diff --git a/.travis.yml b/.travis.yml index 312dfda58eb..0f65b297ea8 100644 --- a/.travis.yml +++ b/.travis.yml @@ -28,7 +28,7 @@ services: language: go go: - - 1.11.x + - 1.12.x go_import_path: vitess.io/vitess env: global: diff --git a/bootstrap.sh b/bootstrap.sh index 0476dc1e7eb..afcd975ccd6 100755 --- a/bootstrap.sh +++ b/bootstrap.sh @@ -20,24 +20,15 @@ # 0. Initialization and helper methods. # 1. Installation of dependencies. # 2. Installation of Go tools and vendored Go dependencies. -# 3. Detection of installed MySQL and setting MYSQL_FLAVOR. -# 4. Installation of development related steps e.g. creating Git hooks. BUILD_TESTS=${BUILD_TESTS:-1} +BUILD_PYTHON=${BUILD_PYTHON:-1} +BUILD_JAVA=${BUILD_JAVA:-1} # # 0. Initialization and helper methods. # -# Run parallel make, based on number of cores available. -case $(uname) in - Linux) NB_CORES=$(grep -c '^processor' /proc/cpuinfo);; - Darwin) NB_CORES=$(sysctl hw.ncpu | awk '{ print $2 }');; -esac -if [ -n "$NB_CORES" ]; then - export MAKEFLAGS="-j$((NB_CORES+1)) -l${NB_CORES}" -fi - function fail() { echo "ERROR: $1" exit 1 @@ -48,6 +39,20 @@ function fail() { go version &>/dev/null || fail "Go is not installed or is not on \$PATH" [[ "$(go version 2>&1)" =~ go1\.[1-9][1-9] ]] || fail "Go is not version 1.11+" +# Create main directories. +mkdir -p "$VTROOT/dist" +mkdir -p "$VTROOT/bin" +mkdir -p "$VTROOT/lib" +mkdir -p "$VTROOT/vthook" + +# Install git hooks. +echo "creating git hooks" +mkdir -p "$VTTOP/.git/hooks" +ln -sf "$VTTOP/misc/git/pre-commit" "$VTTOP/.git/hooks/pre-commit" +ln -sf "$VTTOP/misc/git/commit-msg" "$VTTOP/.git/hooks/commit-msg" +(cd "$VTTOP" && git config core.hooksPath "$VTTOP/.git/hooks") + + # Set up the proper GOPATH for go get below. if [ "$BUILD_TESTS" == 1 ] ; then source ./dev.env @@ -55,12 +60,6 @@ else source ./build.env fi -# Create main directories. -mkdir -p "$VTROOT/dist" -mkdir -p "$VTROOT/bin" -mkdir -p "$VTROOT/lib" -mkdir -p "$VTROOT/vthook" - if [ "$BUILD_TESTS" == 1 ] ; then # Set up required soft links. # TODO(mberlin): Which of these can be deleted? @@ -144,12 +143,13 @@ function install_grpc() { PIP=$grpc_virtualenv/bin/pip $PIP install --upgrade pip $PIP install --upgrade --ignore-installed virtualenv + $PIP install mysql-connector-python grpcio_ver=$version $PIP install --upgrade grpcio=="$grpcio_ver" grpcio-tools=="$grpcio_ver" } -if [ "$BUILD_TESTS" == 1 ] ; then +if [ "$BUILD_PYTHON" == 1 ] ; then install_dep "gRPC" "1.16.0" "$VTROOT/dist/grpc" install_grpc fi @@ -186,9 +186,11 @@ function install_zookeeper() { zip -d "lib/$zk-fatjar.jar" 'META-INF/*.SF' 'META-INF/*.RSA' 'META-INF/*SF' || true # needed for >=3.4.10 <3.5 rm -rf "$zk" "$zk.tar.gz" } -zk_ver=${ZK_VERSION:-3.4.14} -install_dep "Zookeeper" "$zk_ver" "$VTROOT/dist/vt-zookeeper-$zk_ver" install_zookeeper +zk_ver=${ZK_VERSION:-3.4.14} +if [ "$BUILD_JAVA" == 1 ] ; then + install_dep "Zookeeper" "$zk_ver" "$VTROOT/dist/vt-zookeeper-$zk_ver" install_zookeeper +fi # Download and install etcd, link etcd binary into our root. function install_etcd() { @@ -252,7 +254,7 @@ function install_pymock() { popd >/dev/null } pymock_version=1.0.1 -if [ "$BUILD_TESTS" == 1 ] ; then +if [ "$BUILD_PYTHON" == 1 ] ; then install_dep "py-mock" "$pymock_version" "$VTROOT/dist/py-mock-$pymock_version" install_pymock fi @@ -267,7 +269,7 @@ function install_selenium() { # instead of go/dist/selenium/lib/python3.5/site-packages and then can't find module 'pip._vendor.requests' PYTHONPATH='' $PIP install selenium } -if [ "$BUILD_TESTS" == 1 ] ; then +if [ "$BUILD_PYTHON" == 1 ] ; then install_dep "Selenium" "latest" "$VTROOT/dist/selenium" install_selenium fi @@ -281,10 +283,13 @@ function install_chromedriver() { unzip -o -q chromedriver_linux64.zip -d "$dist" rm chromedriver_linux64.zip } -if [ "$BUILD_TESTS" == 1 ] ; then +if [ "$BUILD_PYTHON" == 1 ] ; then install_dep "chromedriver" "73.0.3683.20" "$VTROOT/dist/chromedriver" install_chromedriver fi +if [ "$BUILD_PYTHON" == 1 ] ; then + PYTHONPATH='' $PIP install mysql-connector-python +fi # # 2. Installation of Go tools and vendored Go dependencies. @@ -324,59 +329,5 @@ go get -u $gotools || fail "Failed to download some Go tools with 'go get'. Plea echo "Updating govendor dependencies..." govendor sync || fail "Failed to download/update dependencies with govendor. Please re-run bootstrap.sh in case of transient errors." - -# -# 3. Detection of installed MySQL and setting MYSQL_FLAVOR. -# - - -# find mysql and prepare to use libmysqlclient - -if [ "$BUILD_TESTS" == 1 ] ; then - if [ -z "$MYSQL_FLAVOR" ]; then - export MYSQL_FLAVOR=MySQL56 - echo "MYSQL_FLAVOR environment variable not set. Using default: $MYSQL_FLAVOR" - fi - case "$MYSQL_FLAVOR" in - "MySQL56" | "MySQL80") - myversion="$("$VT_MYSQL_ROOT/bin/mysql" --version)" - [[ "$myversion" =~ Distrib\ 5\.[67] || "$myversion" =~ Ver\ 8\. ]] || fail "Couldn't find MySQL 5.6+ in $VT_MYSQL_ROOT. Set VT_MYSQL_ROOT to override search location." - echo "Found MySQL 5.6+ installation in $VT_MYSQL_ROOT." - ;; - - "MariaDB" | "MariaDB103") - myversion="$("$VT_MYSQL_ROOT/bin/mysql" --version)" - [[ "$myversion" =~ MariaDB ]] || fail "Couldn't find MariaDB in $VT_MYSQL_ROOT. Set VT_MYSQL_ROOT to override search location." - echo "Found MariaDB installation in $VT_MYSQL_ROOT." - ;; - - *) - fail "Unsupported MYSQL_FLAVOR $MYSQL_FLAVOR" - ;; - - esac - # save the flavor that was used in bootstrap, so it can be restored - # every time dev.env is sourced. - echo "$MYSQL_FLAVOR" > "$VTROOT/dist/MYSQL_FLAVOR" -fi - -# -# 4. Installation of development related steps e.g. creating Git hooks. -# - -if [ "$BUILD_TESTS" == 1 ] ; then - # Create the Git hooks. - echo "creating git hooks" - mkdir -p "$VTTOP/.git/hooks" - ln -sf "$VTTOP/misc/git/pre-commit" "$VTTOP/.git/hooks/pre-commit" - ln -sf "$VTTOP/misc/git/prepare-commit-msg.bugnumber" "$VTTOP/.git/hooks/prepare-commit-msg" - ln -sf "$VTTOP/misc/git/commit-msg" "$VTTOP/.git/hooks/commit-msg" - (cd "$VTTOP" && git config core.hooksPath "$VTTOP/.git/hooks") - echo - echo "bootstrap finished - run 'source dev.env' in your shell before building." -else - echo - echo "bootstrap finished - run 'source build.env' in your shell before building." -fi - - +echo +echo "bootstrap finished - run 'source dev.env' or 'source build.env' in your shell before building." diff --git a/build.env b/build.env index a9d46a99b92..29e0d992dbb 100644 --- a/build.env +++ b/build.env @@ -36,4 +36,3 @@ export VTROOT if [[ "$VTTOP" == "${VTTOP/\/src\/vitess.io\/vitess/}" ]]; then echo "WARNING: VTTOP($VTTOP) does not contain src/vitess.io/vitess" fi - diff --git a/config/mycnf/master_mariadb.cnf b/config/mycnf/master_mariadb.cnf index 83f6f318e4c..1e41cd8f3ce 100644 --- a/config/mycnf/master_mariadb.cnf +++ b/config/mycnf/master_mariadb.cnf @@ -1,16 +1,8 @@ +# This file is auto-included when MariaDB (any version) is detected. + # enable strict mode so it's safe to compare sequence numbers across different server IDs. gtid_strict_mode = 1 innodb_stats_persistent = 0 -innodb_support_xa = 0 - -# Semi-sync replication is required for automated unplanned failover -# (when the master goes away). Here we just load the plugin so it's -# available if desired, but it's disabled at startup. -# -# If the -enable_semi_sync flag is used, VTTablet will enable semi-sync -# at the proper time when replication is set up, or when masters are -# promoted or demoted. -plugin-load = rpl_semi_sync_master=semisync_master.so;rpl_semi_sync_slave=semisync_slave.so # When semi-sync is enabled, don't allow fallback to async # if you get no ack, or have no slaves. This is necessary to diff --git a/config/mycnf/master_mariadb100.cnf b/config/mycnf/master_mariadb100.cnf new file mode 100644 index 00000000000..ce85c641c13 --- /dev/null +++ b/config/mycnf/master_mariadb100.cnf @@ -0,0 +1,12 @@ +# This file is auto-included when MariaDB 10.0 is detected. + +innodb_support_xa = 0 + +# Semi-sync replication is required for automated unplanned failover +# (when the master goes away). Here we just load the plugin so it's +# available if desired, but it's disabled at startup. +# +# If the -enable_semi_sync flag is used, VTTablet will enable semi-sync +# at the proper time when replication is set up, or when masters are +# promoted or demoted. +plugin-load = rpl_semi_sync_master=semisync_master.so;rpl_semi_sync_slave=semisync_slave.so diff --git a/config/mycnf/master_mariadb101.cnf b/config/mycnf/master_mariadb101.cnf new file mode 100644 index 00000000000..d613b155d68 --- /dev/null +++ b/config/mycnf/master_mariadb101.cnf @@ -0,0 +1,12 @@ +# This file is auto-included when MariaDB 10.1 is detected. + +innodb_support_xa = 0 + +# Semi-sync replication is required for automated unplanned failover +# (when the master goes away). Here we just load the plugin so it's +# available if desired, but it's disabled at startup. +# +# If the -enable_semi_sync flag is used, VTTablet will enable semi-sync +# at the proper time when replication is set up, or when masters are +# promoted or demoted. +plugin-load = rpl_semi_sync_master=semisync_master.so;rpl_semi_sync_slave=semisync_slave.so diff --git a/config/mycnf/master_mariadb102.cnf b/config/mycnf/master_mariadb102.cnf new file mode 100644 index 00000000000..487baa9bf87 --- /dev/null +++ b/config/mycnf/master_mariadb102.cnf @@ -0,0 +1,12 @@ +# This file is auto-included when MariaDB 10.2 is detected. + +innodb_support_xa = 0 + +# Semi-sync replication is required for automated unplanned failover +# (when the master goes away). Here we just load the plugin so it's +# available if desired, but it's disabled at startup. +# +# If the -enable_semi_sync flag is used, VTTablet will enable semi-sync +# at the proper time when replication is set up, or when masters are +# promoted or demoted. +plugin-load = rpl_semi_sync_master=semisync_master.so;rpl_semi_sync_slave=semisync_slave.so diff --git a/config/mycnf/master_mariadb103.cnf b/config/mycnf/master_mariadb103.cnf index a4dfb50ab18..ac8b38404fd 100644 --- a/config/mycnf/master_mariadb103.cnf +++ b/config/mycnf/master_mariadb103.cnf @@ -1,3 +1,5 @@ +# This file is auto-included when MariaDB 10.3 is detected. + # enable strict mode so it's safe to compare sequence numbers across different server IDs. gtid_strict_mode = 1 innodb_stats_persistent = 0 diff --git a/config/mycnf/master_mysql56.cnf b/config/mycnf/master_mysql56.cnf index 2c802b2332a..dcb8a4e113f 100644 --- a/config/mycnf/master_mysql56.cnf +++ b/config/mycnf/master_mysql56.cnf @@ -1,3 +1,5 @@ +# This file is auto-included when MySQL 5.6 is detected. + # Options for enabling GTID # https://dev.mysql.com/doc/refman/5.6/en/replication-gtids-howto.html gtid_mode = ON diff --git a/config/mycnf/master_mysql57.cnf b/config/mycnf/master_mysql57.cnf new file mode 100644 index 00000000000..381b05ac14c --- /dev/null +++ b/config/mycnf/master_mysql57.cnf @@ -0,0 +1,32 @@ +# This file is auto-included when MySQL 5.7 is detected. + +# Options for enabling GTID +# https://dev.mysql.com/doc/refman/5.6/en/replication-gtids-howto.html +gtid_mode = ON +log_bin +log_slave_updates +enforce_gtid_consistency +innodb_use_native_aio = 0 + +# Crash-safe replication settings. +master_info_repository = TABLE +relay_log_info_repository = TABLE +relay_log_purge = 1 +relay_log_recovery = 1 + +# Semi-sync replication is required for automated unplanned failover +# (when the master goes away). Here we just load the plugin so it's +# available if desired, but it's disabled at startup. +# +# If the -enable_semi_sync flag is used, VTTablet will enable semi-sync +# at the proper time when replication is set up, or when masters are +# promoted or demoted. +plugin-load = rpl_semi_sync_master=semisync_master.so;rpl_semi_sync_slave=semisync_slave.so + +# When semi-sync is enabled, don't allow fallback to async +# if you get no ack, or have no slaves. This is necessary to +# prevent alternate futures when doing a failover in response to +# a master that becomes unresponsive. +rpl_semi_sync_master_timeout = 1000000000000000000 +rpl_semi_sync_master_wait_no_slave = 1 + diff --git a/config/mycnf/master_mysql80.cnf b/config/mycnf/master_mysql80.cnf index f81761ad906..e92b794ef9b 100644 --- a/config/mycnf/master_mysql80.cnf +++ b/config/mycnf/master_mysql80.cnf @@ -1,3 +1,5 @@ +# This file is auto-included when MySQL 8.0 is detected. + # Options for enabling GTID # https://dev.mysql.com/doc/refman/5.6/en/replication-gtids-howto.html gtid_mode = ON diff --git a/dev.env b/dev.env index e31c16167e2..ca7bc608721 100644 --- a/dev.env +++ b/dev.env @@ -95,16 +95,6 @@ if [[ "$VT_MYSQL_ROOT" == "" ]]; then fi fi -# restore MYSQL_FLAVOR, saved by bootstrap.sh -if [ -r "$VTROOT/dist/MYSQL_FLAVOR" ]; then - MYSQL_FLAVOR=$(cat "$VTROOT/dist/MYSQL_FLAVOR") - export MYSQL_FLAVOR -fi - -# mysql cgo library config -if [ -z "$MYSQL_FLAVOR" ]; then - export MYSQL_FLAVOR=MariaDB -fi PKG_CONFIG_PATH=$(prepend_path "$PKG_CONFIG_PATH" "$VTROOT/lib") export PKG_CONFIG_PATH diff --git a/doc/AdvancedFeaturesIndex.md b/doc/AdvancedFeaturesIndex.md deleted file mode 100644 index c2fe3a753d2..00000000000 --- a/doc/AdvancedFeaturesIndex.md +++ /dev/null @@ -1,12 +0,0 @@ -# Advanced Features - -The pages below this navigation entry `Advanced Features` can be understood as an addition to the `User Guide`. Here we describe advanced Vitess features which you may want to enable or tune in a production setup. - -As of October 2017, many of these features are not documented yet. We plan to add pages for them later. - -Examples for undocumented features: - -* hot row protection in vttablet -* vtgate buffer for lossless failovers -* vttablet consolidator (avoids duplicated read queries to MySQL, turned on by default) -* [vtexplain](https://github.com/vitessio/vitess/blob/master/doc/VtExplain.md) diff --git a/doc/BackupAndRestore.md b/doc/BackupAndRestore.md deleted file mode 100644 index 00f700e3111..00000000000 --- a/doc/BackupAndRestore.md +++ /dev/null @@ -1,250 +0,0 @@ -This document explains how to create and restore data backups with -Vitess. Vitess uses backups for two purposes: - -* Provide a point-in-time backup of the data on a tablet. -* Bootstrap new tablets in an existing shard. - -## Prerequisites - -Vitess stores data backups on a Backup Storage service, which is -a -[pluggable interface](https://github.com/vitessio/vitess/blob/master/go/vt/mysqlctl/backupstorage/interface.go). - -Currently, we have plugins for: - -* A network-mounted path (e.g. NFS) -* Google Cloud Storage -* Amazon S3 -* Ceph - -Before you can back up or restore a tablet, you need to ensure that the -tablet is aware of the Backup Storage system that you are using. To do so, -use the following command-line flags when starting a vttablet that has -access to the location where you are storing backups. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Flags
backup_storage_implementationSpecifies the implementation of the Backup Storage interface to - use.

- Current plugin options available are: -
    -
  • file: NFS or any other filesystem-mounted network - drive.
  • -
  • gcs: Google Cloud Storage.
  • -
  • s3: Amazon S3.
  • -
  • ceph: Ceph Object Gateway S3 API.
  • -
-
backup_storage_hookIf set, the contents of every file to backup is sent to a hook. The - hook receives the data for each file on stdin. It should echo the - transformed data to stdout. Anything the hook prints to stderr will - be printed in the vttablet logs.
- Hooks should be located in the vthook subdirectory of the - VTROOT directory.
- The hook receives a -operation write or a - -operation read parameter depending on the direction - of the data processing. For instance, write would be for - encryption, and read would be for decryption.
-
backup_storage_compressThis flag controls if the backups are compressed by the Vitess code. - By default it is set to true. Use - -backup_storage_compress=false to disable.
- This is meant to be used with a -backup_storage_hook - hook that already compresses the data, to avoid compressing the data - twice. -
file_backup_storage_rootFor the file plugin, this identifies the root directory - for backups. -
gcs_backup_storage_bucketFor the gcs plugin, this identifies the - bucket - to use.
s3_backup_aws_regionFor the s3 plugin, this identifies the AWS region.
s3_backup_storage_bucketFor the s3 plugin, this identifies the AWS S3 - bucket.
ceph_backup_storage_configFor the ceph plugin, this identifies the path to a text - file with a JSON object as configuration. The JSON object requires the - following keys: accessKey, secretKey, - endPoint and useSSL. Bucket name is computed - from keyspace name and shard name and is separate for different - keyspaces / shards.
restore_from_backupIndicates that, when started with an empty MySQL instance, the - tablet should restore the most recent backup from the specified - storage plugin.
- -### Authentication - -Note that for the Google Cloud Storage plugin, we currently only -support -[Application Default Credentials](https://developers.google.com/identity/protocols/application-default-credentials). -It means that access to Cloud Storage is automatically granted by virtue of -the fact that you're already running within Google Compute Engine or Container -Engine. - -For this to work, the GCE instances must have been created with -the [scope](https://cloud.google.com/compute/docs/authentication#using) that -grants read-write access to Cloud Storage. When using Container Engine, you can -do this for all the instances it creates by adding `--scopes storage-rw` to the -`gcloud container clusters create` command as shown in the [Vitess on Kubernetes -guide]({% link getting-started/index.md %}#start-a-container-engine-cluster). - -## Creating a backup - -Run the following vtctl command to create a backup: - -``` sh -vtctl Backup -``` - -In response to this command, the designated tablet performs the following -sequence of actions: - -1. Switches its type to `BACKUP`. After this step, the tablet is no - longer used by vtgate to serve any query. - -1. Stops replication, get the current replication position (to be saved in the - backup along with the data). - -1. Shuts down its mysqld process. - -1. Copies the necessary files to the Backup Storage implementation that was - specified when the tablet was started. Note if this fails, we still keep - going, so the tablet is not left in an unstable state because of a storage - failure. - -1. Restarts mysqld. - -1. Restarts replication (with the right semi-sync flags corresponding to its - original type, if applicable). - -1. Switches its type back to its original type. After this, it will most likely - be behind on replication, and not used by vtgate for serving until it catches - up. - -## Restoring a backup - -When a tablet starts, Vitess checks the value of the -`-restore_from_backup` command-line flag to determine whether -to restore a backup to that tablet. - -* If the flag is present, Vitess tries to restore the most recent backup from - the Backup Storage system when starting the tablet. -* If the flag is absent, Vitess does not try to restore a backup to the - tablet. This is the equivalent of starting a new tablet in a new shard. - -As noted in the [Prerequisites](#prerequisites) section, the flag is -generally enabled all of the time for all of the tablets in a shard. -By default, if Vitess cannot find a backup in the Backup Storage system, -the tablet will start up empty. This behavior allows you to bootstrap a new -shard before any backups exist. - -If the `-wait_for_backup_interval` flag is set to a value greater than zero, -the tablet will instead keep checking for a backup to appear at that interval. -This can be used to ensure tablets launched concurrently while an initial backup -is being seeded for the shard (e.g. uploaded from cold storage or created by -another tablet) will wait until the proper time and then pull the new backup -when it's ready. - -``` sh -vttablet ... -backup_storage_implementation=file \ - -file_backup_storage_root=/nfs/XXX \ - -restore_from_backup -``` - -## Managing backups - -**vtctl** provides two commands for managing backups: - -* [ListBackups]({% link reference/vtctl.md %}#listbackups) displays the - existing backups for a keyspace/shard in chronological order. - - ``` sh - vtctl ListBackups - ``` - -* [RemoveBackup]({% link reference/vtctl.md %}#removebackup) deletes a - specified backup for a keyspace/shard. - - ``` sh - RemoveBackup - ``` - -## Bootstrapping a new tablet - -Bootstrapping a new tablet is almost identical to restoring an existing tablet. -The only thing you need to be cautious about is that the tablet specifies its -keyspace, shard and tablet type when it registers itself at the topology. -Specifically, make sure that the following vttablet parameters are set: - -``` sh - -init_keyspace - -init_shard - -init_tablet_type replica|rdonly -``` - -The bootstrapped tablet will restore the data from the backup and then apply -changes, which occurred after the backup, by restarting replication. - - -## Backup Frequency - -We recommend to take backups regularly e.g. you should set up a cron -job for it. - -To determine the proper frequency for creating backups, consider -the amount of time that you keep replication logs and allow enough -time to investigate and fix problems in the event that a backup -operation fails. - -For example, suppose you typically keep four days of replication logs -and you create daily backups. In that case, even if a backup fails, -you have at least a couple of days from the time of the failure to -investigate and fix the problem. - -## Concurrency - -The back-up and restore processes simultaneously copy and either -compress or decompress multiple files to increase throughput. You -can control the concurrency using command-line flags: - -* The vtctl [Backup]({% link reference/vtctl.md %}#backup) command uses the - `-concurrency` flag. -* vttablet uses the `-restore_concurrency` flag. - -If the network link is fast enough, the concurrency matches the CPU -usage of the process during the backup or restore process. - diff --git a/doc/CodeReviews.md b/doc/CodeReviews.md deleted file mode 100644 index 61e13385320..00000000000 --- a/doc/CodeReviews.md +++ /dev/null @@ -1,72 +0,0 @@ -# Code Reviews - -Every GitHub pull request must go through a code review and get approved before it will be merged into the master branch. - -## What to look for in a Review - -Both authors and reviewers need to answer these general questions: - -* Does this change match an existing design / bug? -* Is there proper unit test coverage for this change? All changes should - increase coverage. We need at least integration test coverage when unit test - coverage is not possible. -* Is this change going to log too much? (Error logs should only happen when - the component is in bad shape, not because of bad transient state or bad - user queries) -* Does this change match our coding conventions / style? Linter was run and is - happy? -* Does this match our current patterns? Example include RPC patterns, - Retries / Waits / Timeouts patterns using Context, ... - -Additionally, we recommend every author to look over your own reviews just before committing them and check if you are following the recommendations below. -We usually check these kinds of things while skimming through `git diff --cached` just before committing. - -* Scan the diffs as if you're the reviewer. - * Look for files that shouldn't be checked in (temporary/generated files). - * Googlers only: Remove Google confidential info (e.g. internal URLs). - * Look for temporary code/comments you added while debugging. - * Example: fmt.Println(`AAAAAAAAAAAAAAAAAA`) - * Look for inconsistencies in indentation. - * Use 2 spaces in everything except Go. - * In Go, just use goimports. -* Commit message format: - * ``` - : This is a short description of the change. - - If necessary, more sentences follow e.g. to explain the intent of the change, how it fits into the bigger picture or which implications it has (e.g. other parts in the system have to be adapted.) - - Sometimes this message can also contain more material for reference e.g. benchmark numbers to justify why the change was implemented in this way. - ``` -* Comments - * `// Prefer complete sentences when possible.` - * Leave a space after the comment marker `//`. - -During the review make sure you address all comments. Click Done (reviewable.io) or reply with "Done." (GitHub Review) to mark comments as addressed. There should be 0 unresolved discussions when it's ready to merge. - -## Assigning a Pull Request - -If you want to address your review to a particular set of teammates, add them as Assignee (righthand side on the pull request). -They'll receive an email. - -During discussions, you can also refer to somebody using the *@username* syntax and they'll receive an email as well. - -If you want to receive notifications even when you aren't mentioned, you can go to the [repository page](https://github.com/vitessio/vitess) and click *Watch*. - -## Approving a Pull Request - -As a reviewer you can approve a pull request through two ways: - -* Approve the pull request via GitHub's new code review system -* reply with a comment that contains the word *LGTM* (Looks Good To Me) - -## Merging a Pull Request - -Pull requests can be merged after they were approved and the Travis tests have passed. -External contributions will be merged by a team member. -Internal team members can merge their **own** pull requests. - -## Internal Bug Numbers - -Most of the bugs the team is working on are tracked internally. -We reference to them as `b/########` or `BUG=########` in commit messages and comments. -External users can ignore these. diff --git a/doc/Concepts.md b/doc/Concepts.md deleted file mode 100644 index acb87277522..00000000000 --- a/doc/Concepts.md +++ /dev/null @@ -1,221 +0,0 @@ -This document defines common Vitess concepts and terminology. - -## Keyspace - -A *keyspace* is a logical database. In the unsharded case, it maps directly -to a MySQL database name. If [sharded](https://en.wikipedia.org/wiki/Shard_(database_architecture)), -a keyspace maps to multiple MySQL databases. However, it appears as a single -database to the application. - -Reading data from a keyspace is like reading from a MySQL database. However, -depending on the consistency requirements of the read operation, Vitess -might fetch the data from a master database or from a replica. By routing -each query to the appropriate database, Vitess allows your code to be -structured as if it were reading from a single MySQL database. - - -## Keyspace ID - -The *keyspace ID* is the value that is used to decide on which shard a given -row lives. [Range-based Sharding]({% link user-guide/sharding.md %}#range-based-sharding) -refers to creating shards that each cover a particular range of keyspace IDs. - -Using this technique means you can split a given shard by replacing it with two -or more new shards that combine to cover the original range of keyspace IDs, -without having to move any records in other shards. - -The keyspace ID itself is computed using a function of some column in your data, -such as the user ID. Vitess allows you to choose from a variety of functions -([vindexes]({% link user-guide/vschema.md %}#vindex)) -to perform this mapping. This allows you to choose the right one to achieve optimal -distribution of the data across shards. - -## VSchema - -A [VSchema]({% link user-guide/vschema.md %}) allows you to describe how data is organized -within keyspaces and shards. This information is used for routing queries, and also during -resharding operations. - -For a Keyspace, you can specify if it's sharded or not. For sharded keyspaces, you can specify -the list of vindexes for each table. - -Vitess also supports [sequence generators]({% link user-guide/vschema.md %}#sequences) -that can be used to generate new ids that work like MySQL auto increment columns. -The VSchema allows you to associate table columns to sequence tables. -If no value is specified for such a column, then VTGate will know to use -the sequence table to generate a new value for it. - - -## Shard - -A *shard* is a division within a keyspace. A shard typically contains one MySQL -master and many MySQL slaves. - -Each MySQL instance within a shard has the same data (excepting some replication -lag). The slaves can serve read-only traffic (with eventual consistency guarantees), -execute long-running data analysis tools, or perform administrative tasks -(backup, restore, diff, etc.). - -An unsharded keyspace has effectively one shard. -Vitess names the shard `0` by convention. When sharded, a keyspace has `N` -shards with non-overlapping data. - -### Resharding - -Vitess supports [dynamic resharding]({% link user-guide/sharding.md %}#resharding), -in which the number of shards is changed on a live cluster. This can be either -splitting one or more shards into smaller pieces, or merging neighboring shards -into bigger pieces. - -During dynamic resharding, the data in the source shards is copied into the -destination shards, allowed to catch up on replication, and then compared -against the original to ensure data integrity. Then the live serving -infrastructure is shifted to the destination shards, and the source shards are -deleted. - -## Tablet - -A *tablet* is a combination of a `mysqld` process and a corresponding `vttablet` -process, usually running on the same machine. - -Each tablet is assigned a *tablet type*, which specifies what role it currently -performs. - -### Tablet Types - -* **master** - A *replica* tablet that happens to currently be the MySQL master - for its shard. -* **replica** - A MySQL slave that is eligible to be promoted to *master*. - Conventionally, these are reserved for serving live, user-facing - requests (like from the website's frontend). -* **rdonly** - A MySQL slave that cannot be promoted to *master*. - Conventionally, these are used for background processing jobs, - such as taking backups, dumping data to other systems, heavy - analytical queries, MapReduce, and resharding. -* **backup** - A tablet that has stopped replication at a consistent snapshot, - so it can upload a new backup for its shard. After it finishes, - it will resume replication and return to its previous type. -* **restore** - A tablet that has started up with no data, and is in the process - of restoring itself from the latest backup. After it finishes, - it will begin replicating at the GTID position of the backup, - and become either *replica* or *rdonly*. -* **drained** - A tablet that has been reserved by a Vitess background - process (such as rdonly tablets for resharding). - - - -## Keyspace Graph - -The *keyspace graph* allows Vitess to decide which set of shards to use for a -given keyspace, cell, and tablet type. - -### Partitions - -During horizontal resharding (splitting or merging shards), there can be shards -with overlapping key ranges. For example, the source shard of a split may serve -`c0-d0` while its destination shards serve `c0-c8` and `c8-d0` respectively. - -Since these shards need to exist simultaneously during the migration, -the keyspace graph maintains a list (called a *partitioning* or just a *partition*) -of shards whose ranges cover all possible keyspace ID values, while being -non-overlapping and contiguous. Shards can be moved in and out of this list to -determine whether they are active. - -The keyspace graph stores a separate partitioning for each `(cell, tablet type)` pair. -This allows migrations to proceed in phases: first migrate *rdonly* and -*replica* requests, one cell at a time, and finally migrate *master* requests. - -### Served From - -During vertical resharding (moving tables out from one keyspace to form a new -keyspace), there can be multiple keyspaces that contain the same table. - -Since these multiple copies of the table need to exist simultaneously during -the migration, the keyspace graph supports keyspace redirects, called -`ServedFrom` records. That enables a migration flow like this: - -1. Create `new_keyspace` and set its `ServedFrom` to point to `old_keyspace`. -1. Update the app to look for the tables to be moved in `new_keyspace`. - Vitess will automatically redirect these requests to `old_keyspace`. -1. Perform a vertical split clone to copy data to the new keyspace and start - filtered replication. -1. Remove the `ServedFrom` redirect to begin actually serving from `new_keyspace`. -1. Drop the now unused copies of the tables from `old_keyspace`. - -There can be a different `ServedFrom` record for each `(cell, tablet type)` pair. -This allows migrations to proceed in phases: first migrate *rdonly* and -*replica* requests, one cell at a time, and finally migrate *master* requests. - -## Replication Graph - -The *replication graph* identifies the relationships between master -databases and their respective replicas. During a master failover, -the replication graph enables Vitess to point all existing replicas -to a newly designated master database so that replication can continue. - -## Topology Service - -The *[Topology Service]({% link user-guide/topology-service.md %})* -is a set of backend processes running on different servers. -Those servers store topology data and provide a distributed locking service. - -Vitess uses a plug-in system to support various backends for storing topology -data, which are assumed to provide a distributed, consistent key-value store. -By default, our [local example]({% link getting-started/local-instance.md %}) -uses the ZooKeeper plugin, and the [Kubernetes example]({% link getting-started/index.md %}) -uses etcd. - -The topology service exists for several reasons: - -* It enables tablets to coordinate among themselves as a cluster. -* It enables Vitess to discover tablets, so it knows where to route queries. -* It stores Vitess configuration provided by the database administrator that is - needed by many different servers in the cluster, and that must persist between - server restarts. - -A Vitess cluster has one global topology service, and a local topology service -in each cell. Since *cluster* is an overloaded term, and one Vitess cluster is -distinguished from another by the fact that each has its own global topology -service, we refer to each Vitess cluster as a **toposphere**. - -### Global Topology - -The global topology stores Vitess-wide data that does not change frequently. -Specifically, it contains data about keyspaces and shards as well as the -master tablet alias for each shard. - -The global topology is used for some operations, including reparenting and -resharding. By design, the global topology server is not used a lot. - -In order to survive any single cell going down, the global topology service -should have nodes in multiple cells, with enough to maintain quorum in the -event of a cell failure. - -### Local Topology - -Each local topology contains information related to its own cell. -Specifically, it contains data about tablets in the cell, the keyspace graph -for that cell, and the replication graph for that cell. - -The local topology service must be available for Vitess to discover tablets -and adjust routing as tablets come and go. However, no calls to the topology -service are made in the critical path of serving a query at steady state. -That means queries are still served during temporary unavailability of topology. - -## Cell (Data Center) - -A *cell* is a group of servers and network infrastructure collocated in an area, -and isolated from failures in other cells. It is typically either a full data -center or a subset of a data center, sometimes called a *zone* or *availability zone*. -Vitess gracefully handles cell-level failures, such as when a cell is cut off the network. - -Each cell in a Vitess implementation has a [local topology service](#topology-service), -which is hosted in that cell. The topology service contains most of the -information about the Vitess tablets in its cell. -This enables a cell to be taken down and rebuilt as a unit. - -Vitess limits cross-cell traffic for both data and metadata. -While it may be useful to also have the ability to route read traffic to -individual cells, Vitess currently serves reads only from the local cell. -Writes will go cross-cell when necessary, to wherever the master for that shard -resides. diff --git a/doc/Contributing.md b/doc/Contributing.md deleted file mode 100644 index 70bf486e408..00000000000 --- a/doc/Contributing.md +++ /dev/null @@ -1,53 +0,0 @@ -# Contributing to Vitess - -You want to contribute to Vitess? That's awesome! - -We're looking forward to any contribution! Before you start larger contributions, make sure to reach out first and discuss your plans with us. - -This page describes for new contributors how to make yourself familiar with Vitess and the programming language Go. - -## Learning Go - -Vitess was one of the early adaptors of [Google's programming language Go](https://golang.org/). - -We love it for its simplicity (e.g. compared to C++ or Java) and performance (e.g. compared to Python). - -Contributing to our server code will require you to learn Go. We recommend to read the following resources. - -### Go Tour - -https://tour.golang.org/ - -The Go tour is a browser based tutorial which explains the different concepts of the programming language. -It's interactive i.e. you can change and run all examples on the right side. -The later steps also have specific exercises which you're supposed to implement yourself. -It's a lot of fun and demonstrates how simple it is to write Go code. - -### Go Readability - -While there's no Go style guide, there is a set of recommendations in the Go community which add up to an implicit style guide. -To make sure you're writing idiomatic Go code, please read the following documents: - -* Go Readability slides: https://talks.golang.org/2014/readability.slide - * Talk about Go readability with many specific examples. -* `Effective Go`: https://golang.org/doc/effective_go.html - * Recommendations for writing good Go code. -* Go Code Review Comments: https://github.com/golang/go/wiki/CodeReviewComments - * The closest thing to a style guide. - -### Other Resources - -If you're unsure about Go's behavior or syntax, we recommend to look it up in the specification: https://golang.org/ref/spec -It is well written and easy to understand. - -### Appreciating Go - -After using Go for several weeks, we hope that you'll start to love Go as much as we do. - -In our opinion, the song "Write in Go" from ScaleAbility, a Google acapella band, perfectly captures what's so special about Go. Watch it and enjoy that you learnt Go: www.youtube.com/watch?v=LJvEIjRBSDA - -## Learning Vitess - -Vitess is a complex distributed system. There are a few design docs in the `/doc` section. The best way to ramp up on vitess is by starting to use it. -Then, you can dive into the code to see how the various parts work. For questions, the best place to get them answered is by asking on the slack channel. -You can sign up to the channel by clicking on the top right link at vitess.io. diff --git a/doc/FAQ.md b/doc/FAQ.md deleted file mode 100644 index d34acbc4b0a..00000000000 --- a/doc/FAQ.md +++ /dev/null @@ -1,77 +0,0 @@ -## Does the application need to know about the sharding scheme underneath Vitess? - -The application does not need to know about how the data is sharded. This information is stored in a VSchema which the VTGates use to automatically route your queries. This allows the application to connect to vitess and use it as if it’s a single giant database server. - -## Can I address a specific shard if I want to? - -If necessary, you can access a specific shard by connecting to it using the shard specific database name. For a keyspace `ks` and shard `-80`, you would connect to `ks:-80`. - -## How do I choose between master vs. replica for queries? - -You can qualify the keyspace name with the desired tablet type using the `@` suffix. This can be specified as part of the connection as the database name, or can be changed on the fly through the `USE` command. - -For example, `ks@master` will select `ks` as the default keyspace with all queries being sent to the master. Consequently `ks@replica` will load balance requests across all `REPLICA` tablet types, and `ks@rdonly` will choose `RDONLY`. - -You can also specify the database name as `@master`, etc, which instructs vitess that no default keyspace was specified, but that the requests are for the specified tablet type. - -If no tablet type was specified, then VTGate chooses its default, which can be overridden with the `-default_tablet_type` command line argument. - -## There seems to be a 10,000 row limit per query. What if I want to do a full table scan? - -Vitess supports different modes. In OLTP mode, the result size is typically limited to a preset number (10,000 rows by default). This limit can be adjusted based on your needs. - -However, OLAP mode has no limit to the number of rows returned. In order to change to this mode, you may issue the following command before executing your query: - -``` -set workload='olap' -``` - -You can also set the workload to `dba` mode, which allows you to override the implicit timeouts that exist in vttablet. However, this mode should be used judiciously as it supersedes shutdown and reparent commands. - -The general convention is to send OLTP queries to `REPLICA` tablet types, and OLAP queries to `RDONLY`. - -## Is there a list of supported/unsupported queries? - -The list of unsupported constructs is currently in the form of test cases contained in this [test file](https://github.com/vitessio/vitess/blob/master/go/vt/vtgate/planbuilder/testdata/unsupported_cases.txt). However, contrary to the test cases, there is limited support for SET, DDL and DBA constructs. This will be documented soon. - - -## If I have a log of all queries from my app. Is there a way I can try them against vitess to see how they’ll work? - -Yes. The [vtexplain tool]({% link user-guide/vtexplain.md %}) can be used to preview how your queries will be executed by vitess. It can also be used to try different sharding scenarios before deciding on one. - -## Does the Primary Vindex for a tablet have to be the same as its Primary Key. - -It is not necessary that a Primary Vindex be the same as the Primary Key. In fact, there are many use cases where you would not want this. For example, if there are tables with one-to-many relationships, the Primary Vindex of the main table is likely to be the same as the Primary Key. However, if you want the rows of the secondary table to reside in the same shard as the parent row, the Primary Vindex for that table must be the foreign key that points to the main table. A typical example is a `user` and `order` table. In this case, the order table has the `user_id` as a foreign key to the `id` of the `user` table. The `order_id` may be the primary key for `order`, but you may still want to choose `user_id` as Primary Vindex, which will make a user's orders live in the same shard as the user. - -## How do I connect to vtgate using mysql protocol? - -If you look at the example [vtgate-up.sh script](https://github.com/vitessio/vitess/blob/master/examples/local/vtgate-up.sh), you'll see the following lines: - - -``` - -mysql_server_port $mysql_server_port \ - -mysql_server_socket_path $mysql_server_socket_path \ - -mysql_auth_server_static_file "./mysql_auth_server_static_creds.json" \ -``` - -In that example, vtgate accepts mysql connections on port 15306, and the authentication info is stored in the json file. So, you should be able to connect to it using the following command: - -``` -mysql -h 127.0.0.1 -P 15306 -u mysql_user --password=mysql_password -``` - -## Can I override the default db name from `vt_xxx` to my own? - -Yes. You can start vttablet with the `-init_db_name_override` command line option to specify a different db name. There is no downside to performing this override. - -## I cannot start a cluster, and see these errors in the logs: Could not open required defaults file: /path/to/my.cnf - -Most likely this means that apparmor is running on your server and is preventing vitess processes from accessing the my.cnf file. The workaround is to uninstall apparmor: - -``` -sudo service apparmor stop -sudo service apparmor teardown -sudo update-rc.d -f apparmor remove -``` - -You may also need to reboot the machine after this. Many programs automatically install apparmor, so you may need to uninstall again. diff --git a/doc/GettingStarted.md b/doc/GettingStarted.md deleted file mode 100644 index f8e42628b5e..00000000000 --- a/doc/GettingStarted.md +++ /dev/null @@ -1,673 +0,0 @@ -You can build Vitess using either [Docker](#docker-build) or a -[manual](#manual-build) build process. - -If you run into issues or have questions, please post on our -[forum](https://groups.google.com/forum/#!forum/vitess). - -## Docker Build - -To run Vitess in Docker, you can either use our pre-built images on [Docker Hub](https://hub.docker.com/u/vitess/), or build them yourself. - -### Docker Hub Images - -* The [vitess/base](https://hub.docker.com/r/vitess/base/) image contains a full - development environment, capable of building Vitess and running integration tests. - -* The [vitess/lite](https://hub.docker.com/r/vitess/lite/) image contains only - the compiled Vitess binaries, excluding ZooKeeper. It can run Vitess, but - lacks the environment needed to build Vitess or run tests. It's primarily used - for the [Vitess on Kubernetes]({% link getting-started/index.md %}) guide. - -For example, you can directly run `vitess/base`, and Docker will download the -image for you: - -``` sh -$ sudo docker run -ti vitess/base bash -vitess@32f187ef9351:/vt/src/vitess.io/vitess$ make build -``` - -Now you can proceed to [start a Vitess cluster](#start-a-vitess-cluster) inside -the Docker container you just started. Note that if you want to access the -servers from outside the container, you'll need to expose the ports as described -in the [Docker Engine Reference Guide](https://docs.docker.com/engine/reference/run/#/expose-incoming-ports). - -For local testing, you can also access the servers on the local IP address -created for the container by Docker: - -``` sh -$ docker inspect 32f187ef9351 | grep IPAddress -### example output: -# "IPAddress": "172.17.3.1", -``` - -### Custom Docker Image - -You can also build Vitess Docker images yourself to include your -own patches or configuration data. The -[Dockerfile](https://github.com/vitessio/vitess/blob/master/Dockerfile) -in the root of the Vitess tree builds the `vitess/base` image. -The [docker](https://github.com/vitessio/vitess/tree/master/docker) -subdirectory contains scripts for building other images, such as `vitess/lite`. - -Our `Makefile` also contains rules to build the images. For example: - -``` sh -# Create vitess/bootstrap, which prepares everything up to ./bootstrap.sh -vitess$ make docker_bootstrap -# Create vitess/base from vitess/bootstrap by copying in your local working directory. -vitess$ make docker_base -``` - -## Manual Build - -The following sections explain the process for manually building -Vitess without Docker. - -### Install Dependencies - -We currently test Vitess regularly on Ubuntu 14.04 (Trusty) and Debian 8 (Jessie). -OS X 10.11 (El Capitan) should work as well, the installation instructions are below. - -#### Ubuntu and Debian - -In addition, Vitess requires the software and libraries listed below. - -1. [Install Go 1.11+](https://golang.org/doc/install). - -2. Install [MariaDB 10.0](https://downloads.mariadb.org/) or - [MySQL 5.6](https://dev.mysql.com/downloads/mysql). You can use any - installation method (src/bin/rpm/deb), but be sure to include the client - development headers (`libmariadbclient-dev` or `libmysqlclient-dev`). - - The Vitess development team currently tests against MariaDB 10.0.21 - and MySQL 5.6.27. - - If you are installing MariaDB, note that you must install version 10.0 or - higher. If you are using `apt-get`, confirm that your repository - offers an option to install that version. You can also download the source - directly from [mariadb.org](https://downloads.mariadb.org/mariadb/). - - If you are using Ubuntu 14.04 with MySQL 5.6, the default install may be - missing a file too, `/usr/share/mysql/my-default.cnf`. It would show as an - error like `Could not find my-default.cnf`. If you run into this, just add - it with the following contents: - - ``` - [mysqld] - sql_mode=NO_ENGINE_SUBSTITUTION,STRICT_TRANS_TABLES - ``` - -3. Uninstall or disable `AppArmor`. Some versions of MySQL come with default - AppArmor configurations that the Vitess tools don't recognize yet. This causes - various permission failures when Vitess initializes MySQL instances through - the `mysqlctl` tool. This is only an issue for a test environment. If AppArmor - is necessary in production, you can configure the MySQL instances appropriately - without going through mysqlctl. - - ``` sh - $ sudo service apparmor stop - $ sudo service apparmor teardown - $ sudo update-rc.d -f apparmor remove - ``` - - Reboot, just to be sure that `AppArmor` is fully disabled. - - -4. Select a lock service from the options listed below. It is technically - possible to use another lock server, but plugins currently exist only - for ZooKeeper, etcd and consul. - - ZooKeeper 3.4.14 is included by default. - - [Install etcd v3.0+](https://github.com/coreos/etcd/releases). - If you use etcd, remember to include the `etcd` command - on your path. - - [Install Consul](https://www.consul.io/). - If you use consul, remember to include the `consul` command - on your path. - -5. Install the following other tools needed to build and run Vitess: - - make - - automake - - libtool - - python-dev - - python-virtualenv - - python-mysqldb - - libssl-dev - - g++ - - git - - pkg-config - - bison - - curl - - unzip - - These can be installed with the following apt-get command: - - ``` sh - $ sudo apt-get install make automake libtool python-dev python-virtualenv python-mysqldb libssl-dev g++ git pkg-config bison curl unzip - ``` - -6. If you decided to use ZooKeeper in step 3, you also need to install a - Java Runtime, such as OpenJDK. - - ``` sh - $ sudo apt-get install openjdk-7-jre - ``` - -#### OS X - -1. [Install Homebrew](https://brew.sh/). If your /usr/local directory is not empty and you never used Homebrew before, - it will be - [mandatory](https://github.com/Homebrew/homebrew/blob/master/share/doc/homebrew/El_Capitan_and_Homebrew.md) - to run the following command: - - ``` sh - sudo chown -R $(whoami):admin /usr/local - ``` - -2. On OS X, MySQL 5.6 has to be used, MariaDB doesn't work for some reason yet. It should be installed from Homebrew - (`install steps are below`). - -3. If Xcode is installed (with Console tools, which should be bundled automatically since the 7.1 version), all - the dev dependencies should be satisfied in this step. If no Xcode is present, it is necessary to install pkg-config. - - ``` sh - brew install pkg-config - ``` - -4. ZooKeeper is used as lock service. - -5. Run the following commands: - - ``` sh - brew install go automake libtool python git bison curl wget mysql56 - pip install --upgrade pip setuptools - pip install virtualenv - pip install MySQL-python - pip install tox - - ``` - -6. The Vitess bootstrap script makes some checks for the go runtime, so it is recommended to have the following - commands in your ~/.profile or ~/.bashrc or ~/.zshrc or ~/.bash_profile: - - ``` sh - export PATH="/usr/local/opt/mysql@5.6/bin:$PATH" - export PATH=/usr/local/go/bin:$PATH - export GOROOT=/usr/local/go - ``` - -7. For the Vitess hostname resolving functions to work correctly, a new entry has to be added into the /etc/hosts file - with the current LAN IP address of the computer (preferably IPv4) and the current hostname, which you get by - typing the 'hostname' command in the terminal. - - It is also a good idea to put the following line to [force the Go DNS resolver](https://golang.org/doc/go1.5#net) - in your ~/.profile or ~/.bashrc or ~/.zshrc: - - ``` - export GODEBUG=netdns=go - ``` - -### Build Vitess - -1. Navigate to the directory where you want to download the Vitess - source code and clone the Vitess Github repo. After doing so, - navigate to the `src/vitess.io/vitess` directory. For go to work - correctly, you should create a symbolic link to this inide your ${HOME}/go/src - - ``` sh - cd $WORKSPACE - git clone https://github.com/vitessio/vitess.git \ - src/vitess.io/vitess - ln -s src/vitess.io ${HOME}/go/src/vitess.io - cd ${HOME}/go/src/vitess.io/vitess - ``` - -1. Set the `MYSQL_FLAVOR` environment variable. Choose the appropriate - value for your database. This value is case-sensitive. - - ``` sh - # export MYSQL_FLAVOR=MariaDB - # or (mandatory for OS X) - export MYSQL_FLAVOR=MySQL56 - ``` - -1. If your selected database installed in a location other than `/usr/bin`, - set the `VT_MYSQL_ROOT` variable to the root directory of your - MariaDB installation. For example, if mysql is installed in - `/usr/local/mysql`, run the following command. - - ``` sh - # export VT_MYSQL_ROOT=/usr/local/mysql - - # on OS X, this is the correct value: - export VT_MYSQL_ROOT=/usr/local/opt/mysql@5.6 - ``` - - Note that the command indicates that the `mysql` executable should - be found at `/usr/local/opt/mysql@5.6/bin/mysql`. - -1. Run `mysqld --version` and confirm that you - are running the correct version of MariaDB or MySQL. The value should - be 10 or higher for MariaDB and 5.6.x for MySQL. - -1. Build Vitess using the commands below. Note that the - `bootstrap.sh` script needs to download some dependencies. - If your machine requires a proxy to access the Internet, you will need - to set the usual environment variables (e.g. `http_proxy`, - `https_proxy`, `no_proxy`). - - Run the boostrap.sh script: - - ``` sh - ./bootstrap.sh - ### example output: - # skipping zookeeper build - # go install golang.org/x/tools/cmd/cover ... - # Found MariaDB installation in ... - # creating git pre-commit hooks - # - # source dev.env in your shell before building - ``` - - ``` sh - # Remaining commands to build Vitess - source ./dev.env - make build - ``` - -### Run Tests - -**Note:** If you are using etcd, set the following environment variable: - -``` sh -export VT_TEST_FLAGS='--topo-server-flavor=etcd2' -``` - -**Note:** If you are using consul, set the following environment variable: - -``` sh -export VT_TEST_FLAGS='--topo-server-flavor=consul -``` - -The default targets when running `make test` contain a full set of -tests intended to help Vitess developers to verify code changes. Those tests -simulate a small Vitess cluster by launching many servers on the local -machine. To do so, they require a lot of resources; a minimum of 8GB RAM -and SSD is recommended to run the tests. - -Some tests require extra packages. For example, on Ubuntu: - -``` sh -$ sudo apt-get install chromium-browser mvn xvfb -``` - -If you want only to check that Vitess is working in your environment, -you can run a lighter set of tests: - -``` sh -make site_test -``` - -#### Common Test Issues - -Attempts to run the full developer test suite (`make test`) -on an underpowered machine often results in failure. If you still see -the same failures when running the lighter set of tests (`make site_test`), -please let the development team know in the -[vitess@googlegroups.com](https://groups.google.com/forum/#!forum/vitess) -discussion forum. - -##### Node already exists, port in use, etc. - -A failed test can leave orphaned processes. If you use the default -settings, you can use the following commands to identify and kill -those processes: - -``` sh -pgrep -f -l '(vtdataroot|VTDATAROOT)' # list Vitess processes -pkill -f '(vtdataroot|VTDATAROOT)' # kill Vitess processes -``` - -##### Too many connections to MySQL, or other timeouts - -This error often means your disk is too slow. If you don't have access -to an SSD, you can try [testing against a -ramdisk](https://github.com/vitessio/vitess/blob/master/doc/TestingOnARamDisk.md). - -##### Connection refused to tablet, MySQL socket not found, etc. - -These errors might indicate that the machine ran out of RAM and a server -crashed when trying to allocate more RAM. Some of the heavier tests -require up to 8GB RAM. - -##### Connection refused in zkctl test - -This error might indicate that the machine does not have a Java Runtime -installed, which is a requirement if you are using ZooKeeper as the lock server. - -##### Running out of disk space - -Some of the larger tests use up to 4GB of temporary space on disk. - - -## Start a Vitess cluster - -After completing the instructions above to [build Vitess](#build-vitess), -you can use the example scripts in the Github repo to bring up a Vitess -cluster on your local machine. These scripts use ZooKeeper as the -lock service. ZooKeeper is included in the Vitess distribution. - -1. **Check system settings** - - Some Linux distributions ship with default file descriptor limits - that are too low for database servers. This issue could show up - as the database crashing with the message `too many open files`. - - Check the system-wide `file-max` setting as well as user-specific - `ulimit` values. We recommend setting them above 100K to be safe. - The exact [procedure](https://www.cyberciti.biz/faq/linux-increase-the-maximum-number-of-open-files/) - may vary depending on your Linux distribution. - -1. **Configure environment variables** - - If you are still in the same terminal window that - you used to run the build commands, you can skip to the next - step since the environment variables will already be set. - - If you're adapting this example to your own deployment, the only environment - variables required before running the scripts are `VTROOT` and `VTDATAROOT`. - - Set `VTROOT` to the parent of the Vitess source tree. For example, if you - ran `make build` while in `$HOME/vt/src/vitess.io/vitess`, - then you should set: - - ``` sh - export VTROOT=$HOME/vt - ``` - - Set `VTDATAROOT` to the directory where you want data files and logs to - be stored. For example: - - ``` sh - export VTDATAROOT=$HOME/vtdataroot - ``` - -1. **Start ZooKeeper or Etcd** - - Servers in a Vitess cluster find each other by looking for - dynamic configuration data stored in a distributed lock - service. The following script creates a small ZooKeeper cluster: - - ``` sh - $ cd $VTROOT/src/vitess.io/vitess/examples/local - vitess/examples/local$ ./zk-up.sh - ### example output: - # Starting zk servers... - # Waiting for zk servers to be ready... - ``` - - After the ZooKeeper cluster is running, we only need to tell each - Vitess process how to connect to ZooKeeper. Then, each process can - find all of the other Vitess processes by coordinating via ZooKeeper. - - Each of our scripts automatically uses the `TOPOLOGY_FLAGS` environment - variable to point to the global ZooKeeper instance. The global instance in - turn is configured to point to the local instance. In our sample scripts, - they are both hosted in the same ZooKeeper service. - - If you want to use Etcd as a distributed lock service, The following script - creates a Etcd instance: - - ``` sh - $ cd $VTROOT/src/vitess.io/vitess/examples/local - vitess/examples/local$ source ./topo-etcd2.sh - vitess/examples/local$ ./etcd-up.sh - ### example output: - # enter etcd2 env - # etcdmain: etcd Version: 3.X.X - # ... - # etcd start done... - ``` - -1. **Start vtctld** - - The *vtctld* server provides a web interface that - displays all of the coordination information stored in ZooKeeper. - - ``` sh - vitess/examples/local$ ./vtctld-up.sh - # Starting vtctld - # Access vtctld web UI at http://localhost:15000 - # Send commands with: vtctlclient -server localhost:15999 ... - ``` - - Open `http://localhost:15000` to verify that - *vtctld* is running. There won't be any information - there yet, but the menu should come up, which indicates that - *vtctld* is running. - - The *vtctld* server also accepts commands from the `vtctlclient` tool, - which is used to administer the cluster. Note that the port for RPCs - (in this case `15999`) is different from the web UI port (`15000`). - These ports can be configured with command-line flags, as demonstrated - in `vtctld-up.sh`. - - For convenience, we'll use the `lvtctl.sh` script in example commands, - to avoid having to type the *vtctld* address every time. - - ``` sh - # List available commands - vitess/examples/local$ ./lvtctl.sh help - ``` - -1. **Start vttablets** - - The `vttablet-up.sh` script brings up three vttablets, and assigns them to - a [keyspace]({% link overview/concepts.md %}#keyspace) and [shard]({% link overview/concepts.md %}#shard) - according to the variables set at the top of the script file. - - ``` sh - vitess/examples/local$ ./vttablet-up.sh - # Output from vttablet-up.sh is below - # Starting MySQL for tablet test-0000000100... - # Starting vttablet for test-0000000100... - # Access tablet test-0000000100 at http://localhost:15100/debug/status - # Starting MySQL for tablet test-0000000101... - # Starting vttablet for test-0000000101... - # Access tablet test-0000000101 at http://localhost:15101/debug/status - # Starting MySQL for tablet test-0000000102... - # Starting vttablet for test-0000000102... - # Access tablet test-0000000102 at http://localhost:15102/debug/status - ``` - - After this command completes, refresh the *vtctld* web UI, and you should - see a keyspace named `test_keyspace` with a single shard named `0`. - This is what an unsharded keyspace looks like. - - If you click on the shard box, you'll see a list of [tablets]({% link overview/concepts.md %}#tablet) - in that shard. Note that it's normal for the tablets to be unhealthy at this point, since - you haven't initialized them yet. - - You can also click the **STATUS** link on each tablet to be taken to its - status page, showing more details on its operation. Every Vitess server has - a status page served at `/debug/status` on its web port. - -1. **Initialize MySQL databases** - - Next, designate one of the tablets to be the initial master. - Vitess will automatically connect the other slaves' mysqld instances so - that they start replicating from the master's mysqld. - This is also when the default database is created. Since our keyspace is - named `test_keyspace`, the MySQL database will be named `vt_test_keyspace`. - - ``` sh - vitess/examples/local$ ./lvtctl.sh InitShardMaster -force test_keyspace/0 test-100 - ### example output: - # master-elect tablet test-0000000100 is not the shard master, proceeding anyway as -force was used - # master-elect tablet test-0000000100 is not a master in the shard, proceeding anyway as -force was used - ``` - - **Note:** Since this is the first time the shard has been started, - the tablets are not already doing any replication, and there is no - existing master. The `InitShardMaster` command above uses the `-force` flag - to bypass the usual sanity checks that would apply if this wasn't a - brand new shard. - - After running this command, go back to the **Shard Status** page - in the *vtctld* web interface. When you refresh the - page, you should see that one *vttablet* is the master, - two are replicas and two are rdonly. - - You can also see this on the command line: - - ``` sh - vitess/examples/local$ ./lvtctl.sh ListAllTablets test - ### example output: - # test-0000000100 test_keyspace 0 master localhost:15100 localhost:17100 [] - # test-0000000101 test_keyspace 0 replica localhost:15101 localhost:17101 [] - # test-0000000102 test_keyspace 0 replica localhost:15102 localhost:17102 [] - # test-0000000103 test_keyspace 0 rdonly localhost:15103 localhost:17103 [] - # test-0000000104 test_keyspace 0 rdonly localhost:15104 localhost:17104 [] - ``` - -1. **Create a table** - - The `vtctlclient` tool can be used to apply the database schema across all - tablets in a keyspace. The following command creates the table defined in - the `create_test_table.sql` file: - - ``` sh - # Make sure to run this from the examples/local dir, so it finds the file. - vitess/examples/local$ ./lvtctl.sh ApplySchema -sql "$(cat create_test_table.sql)" test_keyspace - ``` - - The SQL to create the table is shown below: - - ``` sql - CREATE TABLE messages ( - page BIGINT(20) UNSIGNED, - time_created_ns BIGINT(20) UNSIGNED, - message VARCHAR(10000), - PRIMARY KEY (page, time_created_ns) - ) ENGINE=InnoDB - ``` - -1. **Take a backup** - - Now that the initial schema is applied, it's a good time to take the first - [backup]({% link user-guide/backup-and-restore.md %}). This backup - will be used to automatically restore any additional replicas that you run, - before they connect themselves to the master and catch up on replication. - If an existing tablet goes down and comes back up without its data, it will - also automatically restore from the latest backup and then resume replication. - - ``` sh - vitess/examples/local$ ./lvtctl.sh Backup test-0000000102 - ``` - - After the backup completes, you can list available backups for the shard: - - ``` sh - vitess/examples/local$ ./lvtctl.sh ListBackups test_keyspace/0 - ### example output: - # 2016-05-06.072724.test-0000000102 - ``` - - **Note:** In this single-server example setup, backups are stored at - `$VTDATAROOT/backups`. In a multi-server deployment, you would usually mount - an NFS directory there. You can also change the location by setting the - `-file_backup_storage_root` flag on *vtctld* and *vttablet*, as demonstrated - in `vtctld-up.sh` and `vttablet-up.sh`. - -1. **Initialize Vitess Routing Schema** - - In the examples, we are just using a single database with no specific - configuration. So we just need to make that (empty) configuration visible - for serving. This is done by running the following command: - - ``` sh - vitess/examples/local$ ./lvtctl.sh RebuildVSchemaGraph - ``` - - (As it works, this command will not display any output.) - -1. **Start vtgate** - - Vitess uses *vtgate* to route each client query to - the correct *vttablet*. This local example runs a - single *vtgate* instance, though a real deployment - would likely run multiple *vtgate* instances to share - the load. - - ``` sh - vitess/examples/local$ ./vtgate-up.sh - ``` - -### Run a Client Application - -The `client.py` file is a simple sample application -that connects to *vtgate* and executes some queries. -To run it, you need to either: - -* Add the Vitess Python packages to your `PYTHONPATH`. - - or - -* Use the `client.sh` wrapper script, which temporarily - sets up the environment and then runs `client.py`. - - ``` sh - vitess/examples/local$ ./client.sh - ### example output: - # Inserting into master... - # Reading from master... - # (5L, 1462510331910124032L, 'V is for speed') - # (15L, 1462519383758071808L, 'V is for speed') - # (42L, 1462510369213753088L, 'V is for speed') - # ... - ``` - -There are also sample clients in the same directory for Java, PHP, and Go. -See the comments at the top of each sample file for usage instructions. - -### Try Vitess resharding - -Now that you have a full Vitess stack running, you may want to go on to the -[Horizontal Sharding workflow guide]({% link user-guide/horizontal-sharding-workflow.md %}) -or [Horizontal Sharding codelab]({% link user-guide/horizontal-sharding.md %}) -(if you prefer to run each step manually through commands) to try out -[dynamic resharding]({% link user-guide/sharding.md %}#resharding). - -If so, you can skip the tear-down since the sharding guide picks up right here. -If not, continue to the clean-up steps below. - -### Tear down the cluster - -Each `-up.sh` script has a corresponding `-down.sh` script to stop the servers. - -``` sh -vitess/examples/local$ ./vtgate-down.sh -vitess/examples/local$ ./vttablet-down.sh -vitess/examples/local$ ./vtctld-down.sh -vitess/examples/local$ ./zk-down.sh # If you use Etcd, run ./etcd-down.sh -``` - -Note that the `-down.sh` scripts will leave behind any data files created. -If you're done with this example data, you can clear out the contents of `VTDATAROOT`: - -``` sh -$ cd $VTDATAROOT -/path/to/vtdataroot$ rm -rf * -``` - -## Troubleshooting - -If anything goes wrong, check the logs in your `$VTDATAROOT/tmp` directory -for error messages. There are also some tablet-specific logs, as well as -MySQL logs in the various `$VTDATAROOT/vt_*` directories. - -If you need help diagnosing a problem, send a message to our -[mailing list](https://groups.google.com/forum/#!forum/vitess). -In addition to any errors you see at the command-line, it would also help to -upload an archive of your `VTDATAROOT` directory to a file sharing service -and provide a link to it. - diff --git a/doc/GettingStartedKubernetes.md b/doc/GettingStartedKubernetes.md deleted file mode 100644 index d38e7946283..00000000000 --- a/doc/GettingStartedKubernetes.md +++ /dev/null @@ -1,747 +0,0 @@ -This page explains how to run Vitess on [Kubernetes](https://kubernetes.io). -It also gives the steps to start a Kubernetes cluster with -[Google Container Engine](https://cloud.google.com/container-engine/). - -If you already have Kubernetes v1.0+ running in one of the other -[supported platforms](https://kubernetes.io/docs/setup/pick-right-solution/), -you can skip the `gcloud` steps. -The `kubectl` steps will apply to any Kubernetes cluster. - -## Prerequisites - -To complete the exercise in this guide, you must -[install etcd-operator](https://github.com/coreos/etcd-operator/blob/master/doc/user/install_guide.md) -in the same namespace in which you plan to run Vitess. - -You also must locally install Go 1.11+, -the Vitess' `vtctlclient` tool, and `kubectl`. -The following sections explain how to set these up in your environment. - -### Install Go 1.11+ - -You need to install [Go 1.11+](https://golang.org/doc/install) to build the -`vtctlclient` tool, which issues commands to Vitess. - -After installing Go, make sure your `GOPATH` environment -variable is set to the root of your workspace. The most common setting -is `GOPATH=$HOME/go`, and the value should identify a -directory to which your non-root user has write access. - -In addition, make sure that `$GOPATH/bin` is included in -your `$PATH`. More information about setting up a Go -workspace can be found at -[How to Write Go Code](https://golang.org/doc/code.html#Organization). - -### Build and install vtctlclient - -The `vtctlclient` tool issues commands to Vitess. - -``` sh -$ go get vitess.io/vitess/go/cmd/vtctlclient -``` - -This command downloads and builds the Vitess source code at: - -``` sh -$GOPATH/src/vitess.io/vitess/ -``` - -It also copies the built `vtctlclient` binary into `$GOPATH/bin`. - -### Set up Google Compute Engine, Container Engine, and Cloud tools - -**Note:** If you are running Kubernetes elsewhere, skip to -[Locate kubectl](#locate-kubectl). - -To run Vitess on Kubernetes using Google Compute Engine (GCE), -you must have a GCE account with billing enabled. The instructions -below explain how to enable billing and how to associate a billing -account with a project in the Google Developers Console. - -1. Log in to the Google Developers Console to [enable billing](https://console.developers.google.com/billing). - 1. Click the **Billing** pane if you are not there already. - 1. Click **New billing account**. - 1. Assign a name to the billing account -- e.g. "Vitess on - Kubernetes." Then click **Continue**. You can sign up - for the [free trial](https://cloud.google.com/free-trial/) - to avoid any charges. - -1. Create a project in the Google Developers Console that uses - your billing account: - 1. At the top of the Google Developers Console, click the **Projects** dropdown. - 1. Click the Create a Project... link. - 1. Assign a name to your project. Then click the **Create** button. - Your project should be created and associated with your - billing account. (If you have multiple billing accounts, - confirm that the project is associated with the correct account.) - 1. After creating your project, click **API Manager** in the left menu. - 1. Find **Google Compute Engine** and **Google Container Engine API**. - (Both should be listed under "Google Cloud APIs".) - For each, click on it, then click the **"Enable API"** button. - -1. Follow the [Google Cloud SDK quickstart instructions](https://cloud.google.com/sdk/#Quick_Start) - to set up and test the Google Cloud SDK. You will also set your default project - ID while completing the quickstart. - - **Note:** If you skip the quickstart guide because you've previously set up - the Google Cloud SDK, just make sure to set a default project ID by running - the following command. Replace `PROJECT` with the project ID assigned to - your [Google Developers Console](https://console.developers.google.com/) - project. You can [find the ID](https://cloud.google.com/compute/docs/projects#projectids) - by navigating to the **Overview** page for the project in the Console. - - ``` sh - $ gcloud config set project PROJECT - ``` - -1. Install or update the `kubectl` tool: - - ``` sh - $ gcloud components update kubectl - ``` - -### Locate kubectl - -Check if `kubectl` is on your `PATH`: - -``` sh -$ which kubectl -### example output: -# ~/google-cloud-sdk/bin/kubectl -``` - -If `kubectl` isn't on your `PATH`, you can tell our scripts where -to find it by setting the `KUBECTL` environment variable: - -``` sh -$ export KUBECTL=/example/path/to/google-cloud-sdk/bin/kubectl -``` - -## Start a Container Engine cluster - -**Note:** If you are running Kubernetes elsewhere, skip to -[Start a Vitess cluster](#start-a-vitess-cluster). - -1. Set the [zone](https://cloud.google.com/compute/docs/zones#overview) - that your installation will use: - - ``` sh - $ gcloud config set compute/zone us-central1-b - ``` - -1. Create a Container Engine cluster: - - ``` sh - $ gcloud container clusters create example --machine-type n1-standard-4 --num-nodes 5 --scopes storage-rw - ### example output: - # Creating cluster example...done. - # Created [https://container.googleapis.com/v1/projects/vitess/zones/us-central1-b/clusters/example]. - # kubeconfig entry generated for example. - ``` - - **Note:** The `--scopes storage-rw` argument is necessary to allow - [built-in backup/restore]({% link user-guide/backup-and-restore.md %}) - to access [Google Cloud Storage](https://cloud.google.com/storage/). - -1. Create a Cloud Storage bucket: - - To use the Cloud Storage plugin for built-in backups, first create a - [bucket](https://cloud.google.com/storage/docs/concepts-techniques#concepts) - for Vitess backup data. See the - [bucket naming guidelines](https://cloud.google.com/storage/docs/bucket-naming) - if you're new to Cloud Storage. - - ``` sh - $ gsutil mb gs://my-backup-bucket - ``` - -## Start a Vitess cluster - -1. **Navigate to your local Vitess source code** - - This directory would have been created when you installed - `vtctlclient`: - - ``` sh - $ cd $GOPATH/src/vitess.io/vitess/examples/kubernetes - ``` - -1. **Configure site-local settings** - - Run the `configure.sh` script to generate a `config.sh` file, which will be - used to customize your cluster settings. - - Currently, we have out-of-the-box support for storing - [backups]({% link user-guide/backup-and-restore.md %}) in - [Google Cloud Storage](https://cloud.google.com/storage/). If you're using - GCS, fill in the fields requested by the configure script, including the - name of the bucket you created above. - - ``` sh - vitess/examples/kubernetes$ ./configure.sh - ### example output: - # Backup Storage (file, gcs) [gcs]: - # Google Developers Console Project [my-project]: - # Google Cloud Storage bucket for Vitess backups: my-backup-bucket - # Saving config.sh... - ``` - - For other platforms, you'll need to choose the `file` backup storage plugin, - and mount a read-write network volume into the `vttablet` and `vtctld` pods. - For example, you can mount any storage service accessible through NFS into a - [Kubernetes volume](https://kubernetes.io/docs/concepts/storage/volumes#nfs). - Then provide the mount path to the configure script here. - - Direct support for other cloud blob stores like Amazon S3 can be added by - implementing the Vitess [BackupStorage plugin interface](https://github.com/vitessio/vitess/blob/master/go/vt/mysqlctl/backupstorage/interface.go). - Let us know on the [discussion forum](https://groups.google.com/forum/#!forum/vitess) - if you have any specific plugin requests. - -1. **Start an etcd cluster** - - The Vitess [topology service]({% link overview/concepts.md %}#topology-service) - stores coordination data for all the servers in a Vitess cluster. - It can store this data in one of several consistent storage systems. - In this example, we'll use [etcd](https://github.com/coreos/etcd). - Note that we need our own etcd clusters, separate from the one used by - Kubernetes itself. We will use etcd-operator to manage these clusters. - - If you haven't done so already, make sure you - [install etcd-operator](https://github.com/coreos/etcd-operator/blob/master/doc/user/install_guide.md) - in the same namespace in which you plan to run Vitess - before continuing. - - ``` sh - vitess/examples/kubernetes$ ./etcd-up.sh - ### example output: - # Creating etcd service for 'global' cell... - # etcdcluster "etcd-global" created - # Creating etcd service for 'global' cell... - # etcdcluster "etcd-test" created - # ... - ``` - - This command creates two clusters. One is for the - [global cell]({% link user-guide/topology-service.md %}#global-vs-local), - and the other is for a - [local cell]({% link overview/concepts.md %}#cell-data-center) - called *test*. You can check the status of the - [pods](https://kubernetes.io/docs/concepts/workloads/pods/) - in the cluster by running: - - ``` sh - $ kubectl get pods - ### example output: - # NAME READY STATUS RESTARTS AGE - # etcd-global-0000 1/1 Running 0 1m - # etcd-global-0001 1/1 Running 0 1m - # etcd-global-0002 1/1 Running 0 1m - # etcd-operator-857677187-rvgf5 1/1 Running 0 28m - # etcd-test-0000 1/1 Running 0 1m - # etcd-test-0001 1/1 Running 0 1m - # etcd-test-0002 1/1 Running 0 1m - ``` - - It may take a while for each Kubernetes node to download the - Docker images the first time it needs them. While the images - are downloading, the pod status will be Pending. - - **Note:** In this example, each script that has a name ending in - `-up.sh` also has a corresponding `-down.sh` - script, which can be used to stop certain components of the - Vitess cluster without bringing down the whole cluster. For - example, to tear down the `etcd` deployment, run: - - ``` sh - vitess/examples/kubernetes$ ./etcd-down.sh - ``` - -1. **Start vtctld** - - The `vtctld` server provides a web interface to inspect the state of the - Vitess cluster. It also accepts RPC commands from `vtctlclient` to modify - the cluster. - - ``` sh - vitess/examples/kubernetes$ ./vtctld-up.sh - ### example output: - # Creating vtctld ClusterIP service... - # service "vtctld" created - # Creating vtctld replicationcontroller... - # replicationcontroller "vtctld" create createdd - ``` - -1. **Access vtctld web UI** - - To access vtctld from outside Kubernetes, use [kubectl proxy](https://kubernetes.io/docs/tasks/access-kubernetes-api/http-proxy-access-api/) - to create an authenticated tunnel on your workstation: - - **Note:** The proxy command runs in the foreground, - so you may want to run it in a separate terminal. - - ``` sh - $ kubectl proxy --port=8001 - ### example output: - # Starting to serve on localhost:8001 - ``` - - You can then load the vtctld web UI on `localhost`: - - http://localhost:8001/api/v1/namespaces/default/services/vtctld:web/proxy - - You can also use this proxy to access the [Kubernetes Dashboard](https://kubernetes.io/docs/tasks/access-application-cluster/web-ui-dashboard/), - where you can monitor nodes, pods, and services: - - http://localhost:8001/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/. - -1. **Use vtctlclient to send commands to vtctld** - - You can now run `vtctlclient` locally to issue commands - to the `vtctld` service on your Kubernetes cluster. - - To enable RPC access into the Kubernetes cluster, we'll again use - `kubectl` to set up an authenticated tunnel. Unlike the HTTP proxy - we used for the web UI, this time we need raw [port forwarding](https://kubernetes.io/docs/tasks/access-application-cluster/port-forward-access-application-cluster/) - for vtctld's [gRPC](https://grpc.io) port. - - Since the tunnel needs to target a particular vtctld pod name, - we've provided the `kvtctl.sh` script, which uses `kubectl` to - discover the pod name and set up the tunnel before running `vtctlclient`. - - Now, running `kvtctl.sh help` will test your connection to - `vtctld` and also list the `vtctlclient` - commands that you can use to administer the Vitess cluster. - - ``` sh - vitess/examples/kubernetes$ ./kvtctl.sh help - ### example output: - # Available commands: - # - # Tablets: - # InitTablet ... - # ... - ``` - - You can also use the `help` command to get more details about each command: - - ``` sh - vitess/examples/kubernetes$ ./kvtctl.sh help ListAllTablets - ``` - - See the [vtctl reference]({% link reference/vtctl.md %}) for a - web-formatted version of the `vtctl help` output. - -1. **Setup the cell in the topology** - - The global etcd cluster is configured from command-line parameters, - specified in the Kubernetes configuration files. The per-cell etcd cluster - however needs to be configured, so it is reachable by Vitess. The following - command sets it up: - - ``` sh - ./kvtctl.sh AddCellInfo --root /test -server_address http://etcd-test-client:2379 test - ``` - - -1. **Start vttablets** - - A Vitess [tablet]({% link overview/concepts.md %}#tablet) is the - unit of scaling for the database. A tablet consists of the - `vttablet` and `mysqld` processes, running on the same - host. We enforce this coupling in Kubernetes by putting the respective - containers for vttablet and mysqld inside a single - [pod](https://kubernetes.io/docs/concepts/workloads/pods/). - - Run the following script to launch the vttablet pods, which also include - mysqld: - - ``` sh - vitess/examples/kubernetes$ ./vttablet-up.sh - ### example output: - # Creating test_keyspace.shard-0 pods in cell test... - # Creating pod for tablet test-0000000100... - # pod "vttablet-100" created - # Creating pod for tablet test-0000000101... - # pod "vttablet-101" created - # Creating pod for tablet test-0000000102... - # pod "vttablet-102" created - # Creating pod for tablet test-0000000103... - # pod "vttablet-103" created - # Creating pod for tablet test-0000000104... - # pod "vttablet-104" created - ``` - - In the vtctld web UI, you should soon see a - [keyspace]({% link overview/concepts.md %}#keyspace) named `test_keyspace` - with a single [shard]({% link overview/concepts.md %}#shard) named `0`. - Click on the shard name to see the list of tablets. When all 5 tablets - show up on the shard status page, you're ready to continue. Note that it's - normal for the tablets to be unhealthy at this point, since you haven't - initialized the databases on them yet. - - It can take some time for the tablets to come up for the first time if a pod - was scheduled on a node that hasn't downloaded the [Vitess Docker image](https://hub.docker.com/u/vitess/) - yet. You can also check the status of the - tablets from the command line using `kvtctl.sh`: - - ``` sh - vitess/examples/kubernetes$ ./kvtctl.sh ListAllTablets test - ### example output: - # test-0000000100 test_keyspace 0 spare 10.64.1.6:15002 10.64.1.6:3306 [] - # test-0000000101 test_keyspace 0 spare 10.64.2.5:15002 10.64.2.5:3306 [] - # test-0000000102 test_keyspace 0 spare 10.64.0.7:15002 10.64.0.7:3306 [] - # test-0000000103 test_keyspace 0 spare 10.64.1.7:15002 10.64.1.7:3306 [] - # test-0000000104 test_keyspace 0 spare 10.64.2.6:15002 10.64.2.6:3306 [] - ``` - -1. **Initialize MySQL databases** - - Once all the tablets show up, you're ready to initialize the underlying - MySQL databases. - - **Note:** Many `vtctlclient` commands produce no output on success. - - First, designate one of the tablets to be the initial master. Vitess will - automatically connect the other slaves' mysqld instances so that they start - replicating from the master's mysqld. This is also when the default database - is created. Since our keyspace is named `test_keyspace`, the MySQL database - will be named `vt_test_keyspace`. - - ``` sh - vitess/examples/kubernetes$ ./kvtctl.sh InitShardMaster -force test_keyspace/0 test-0000000100 - ### example output: - # master-elect tablet test-0000000100 is not the shard master, proceeding anyway as -force was used - # master-elect tablet test-0000000100 is not a master in the shard, proceeding anyway as -force was used - ``` - - **Note:** Since this is the first time the shard has been started, the - tablets are not already doing any replication, and there is no existing - master. The `InitShardMaster` command above uses the `-force` flag to bypass - the usual sanity checks that would apply if this wasn't a brand new shard. - - After the tablets finish updating, you should see one **master**, and - several **replica** and **rdonly** tablets: - - ``` sh - vitess/examples/kubernetes$ ./kvtctl.sh ListAllTablets test - ### example output: - # test-0000000100 test_keyspace 0 master 10.64.1.6:15002 10.64.1.6:3306 [] - # test-0000000101 test_keyspace 0 replica 10.64.2.5:15002 10.64.2.5:3306 [] - # test-0000000102 test_keyspace 0 replica 10.64.0.7:15002 10.64.0.7:3306 [] - # test-0000000103 test_keyspace 0 rdonly 10.64.1.7:15002 10.64.1.7:3306 [] - # test-0000000104 test_keyspace 0 rdonly 10.64.2.6:15002 10.64.2.6:3306 [] - ``` - - The **replica** tablets are used for serving live web traffic, while the - **rdonly** tablets are used for offline processing, such as batch jobs and backups. - The amount of each [tablet type]({% link overview/concepts.md %}#tablet) - that you launch can be configured in the `vttablet-up.sh` script. - -1. **Create a table** - - The `vtctlclient` tool can be used to apply the database schema - across all tablets in a keyspace. The following command creates - the table defined in the `create_test_table.sql` file: - - ``` sh - # Make sure to run this from the examples/kubernetes dir, so it finds the file. - vitess/examples/kubernetes$ ./kvtctl.sh ApplySchema -sql "$(cat create_test_table.sql)" test_keyspace - ``` - - The SQL to create the table is shown below: - - ``` sql - CREATE TABLE messages ( - page BIGINT(20) UNSIGNED, - time_created_ns BIGINT(20) UNSIGNED, - message VARCHAR(10000), - PRIMARY KEY (page, time_created_ns) - ) ENGINE=InnoDB - ``` - - You can run this command to confirm that the schema was created - properly on a given tablet, where `test-0000000100` - is a tablet alias as shown by the `ListAllTablets` command: - - ``` sh - vitess/examples/kubernetes$ ./kvtctl.sh GetSchema test-0000000100 - ### example output: - # { - # "DatabaseSchema": "CREATE DATABASE `{{.DatabaseName}}` /*!40100 DEFAULT CHARACTER SET utf8 */", - # "TableDefinitions": [ - # { - # "Name": "messages", - # "Schema": "CREATE TABLE `messages` (\n `page` bigint(20) unsigned NOT NULL DEFAULT '0',\n `time_created_ns` bigint(20) unsigned NOT NULL DEFAULT '0',\n `message` varchar(10000) DEFAULT NULL,\n PRIMARY KEY (`page`,`time_created_ns`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8", - # "Columns": [ - # "page", - # "time_created_ns", - # "message" - # ], - # ... - ``` - -1. **Take a backup** - - Now that the initial schema is applied, it's a good time to take the first - [backup]({% link user-guide/backup-and-restore.md %}). This backup - will be used to automatically restore any additional replicas that you run, - before they connect themselves to the master and catch up on replication. - If an existing tablet goes down and comes back up without its data, it will - also automatically restore from the latest backup and then resume replication. - - Select one of the **rdonly** tablets and tell it to take a backup. We use a - **rdonly** tablet instead of a **replica** because the tablet will pause - replication and stop serving during data copy to create a consistent snapshot. - - ``` sh - vitess/examples/kubernetes$ ./kvtctl.sh Backup test-0000000104 - ``` - - After the backup completes, you can list available backups for the shard: - - ``` sh - vitess/examples/kubernetes$ ./kvtctl.sh ListBackups test_keyspace/0 - ### example output: - # 2015-10-21.042940.test-0000000104 - ``` - -1. **Initialize Vitess Routing Schema** - - In the examples, we are just using a single database with no specific - configuration. So we just need to make that (empty) configuration visible - for serving. This is done by running the following command: - - ``` sh - vitess/examples/kubernetes$ ./kvtctl.sh RebuildVSchemaGraph - ``` - - (As it works, this command will not display any output.) - -1. **Start vtgate** - - Vitess uses [vtgate]({% link overview/index.md %}#vtgate) to route each client - query to the correct `vttablet`. In Kubernetes, a `vtgate` service - distributes connections to a pool of `vtgate` pods. The pods are curated by - a [replication controller](https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/). - - ``` sh - vitess/examples/kubernetes$ ./vtgate-up.sh - ### example output: - # Creating vtgate service in cell test... - # service "vtgate-test" created - # Creating vtgate replicationcontroller in cell test... - # replicationcontroller "vtgate-test" created - ``` - -## Test your cluster with a client app - -The GuestBook app in the example is ported from the -[Kubernetes GuestBook example](https://github.com/kubernetes/kubernetes/tree/master/examples/guestbook-go). -The server-side code has been rewritten in Python to use Vitess as the storage -engine. The client-side code (HTML/JavaScript) has been modified to support -multiple Guestbook pages, which will be useful to demonstrate Vitess sharding in -a later guide. - -``` sh -vitess/examples/kubernetes$ ./guestbook-up.sh -### example output: -# Creating guestbook service... -# services "guestbook" created -# Creating guestbook replicationcontroller... -# replicationcontroller "guestbook" created -``` - -As with the `vtctld` service, by default the GuestBook app is not accessible -from outside Kubernetes. In this case, since this is a user-facing frontend, -we set `type: LoadBalancer` in the GuestBook service definition, -which tells Kubernetes to create a public -[load balancer](https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/) -using the API for whatever platform your Kubernetes cluster is in. - -You also need to [allow access through your platform's firewall](https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/). - -``` sh -# For example, to open port 80 in the GCE firewall: -$ gcloud compute firewall-rules create guestbook --allow tcp:80 -``` - -**Note:** For simplicity, the firewall rule above opens the port on **all** -GCE instances in your project. In a production system, you would likely -limit it to specific instances. - -Then, get the external IP of the load balancer for the GuestBook service: - -``` sh -$ kubectl get service guestbook -### example output: -# NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE -# guestbook 10.67.242.247 3.4.5.6 80/TCP 1m -``` - -If the `EXTERNAL-IP` is still empty, give it a few minutes to create -the external load balancer and check again. - -Once the pods are running, the GuestBook app should be accessible -from the load balancer's external IP. In the example above, it would be at -`http://3.4.5.6`. - -You can see Vitess' replication capabilities by opening the app in -multiple browser windows, with the same Guestbook page number. -Each new entry is committed to the master database. -In the meantime, JavaScript on the page continuously polls -the app server to retrieve a list of GuestBook entries. The app serves -read-only requests by querying Vitess in 'replica' mode, confirming -that replication is working. - -You can also inspect the data stored by the app: - -``` sh -vitess/examples/kubernetes$ ./kvtctl.sh ExecuteFetchAsDba test-0000000100 "SELECT * FROM messages" -### example output: -# +------+---------------------+---------+ -# | page | time_created_ns | message | -# +------+---------------------+---------+ -# | 42 | 1460771336286560000 | Hello | -# +------+---------------------+---------+ -``` - -The [GuestBook source code](https://github.com/vitessio/vitess/tree/master/examples/kubernetes/guestbook) -provides more detail about how the app server interacts with Vitess. - -## Try Vitess resharding - -Now that you have a full Vitess stack running, you may want to go on to the -[Sharding in Kubernetes workflow guide]({% link user-guide/sharding-kubernetes.md %}) -or [Sharding in Kubernetes codelab]({% link user-guide/sharding-kubernetes.md %}) -(if you prefer to run each step manually through commands) to try out -[dynamic resharding]({% link user-guide/sharding.md %}#resharding). - -If so, you can skip the tear-down since the sharding guide picks up right here. -If not, continue to the clean-up steps below. - -## Tear down and clean up - -Before stopping the Container Engine cluster, you should tear down the Vitess -services. Kubernetes will then take care of cleaning up any entities it created -for those services, like external load balancers. - -``` sh -vitess/examples/kubernetes$ ./guestbook-down.sh -vitess/examples/kubernetes$ ./vtgate-down.sh -vitess/examples/kubernetes$ ./vttablet-down.sh -vitess/examples/kubernetes$ ./vtctld-down.sh -vitess/examples/kubernetes$ ./etcd-down.sh -``` - -Then tear down the Container Engine cluster itself, which will stop the virtual -machines running on Compute Engine: - -``` sh -$ gcloud container clusters delete example -``` - -It's also a good idea to remove any firewall rules you created, unless you plan -to use them again soon: - -``` sh -$ gcloud compute firewall-rules delete guestbook -``` - -## Troubleshooting - -### Server logs - -If a pod enters the `Running` state, but the server -doesn't respond as expected, use the `kubectl logs` -command to check the pod output: - -``` sh -# show logs for container 'vttablet' within pod 'vttablet-100' -$ kubectl logs vttablet-100 vttablet - -# show logs for container 'mysql' within pod 'vttablet-100' -# Note that this is NOT MySQL error log. -$ kubectl logs vttablet-100 mysql -``` - -Post the logs somewhere and send a link to the [Vitess -mailing list](https://groups.google.com/forum/#!forum/vitess) -to get more help. - -### Shell access - -If you want to poke around inside a container, you can use `kubectl exec` to run -a shell. - -For example, to launch a shell inside the `vttablet` container of the -`vttablet-100` pod: - -``` sh -$ kubectl exec vttablet-100 -c vttablet -t -i -- bash -il -root@vttablet-100:/# ls /vt/vtdataroot/vt_0000000100 -### example output: -# bin-logs innodb my.cnf relay-logs -# data memcache.sock764383635 mysql.pid slow-query.log -# error.log multi-master.info mysql.sock tmp -``` - -### Root certificates - -If you see in the logs a message like this: - -``` -x509: failed to load system roots and no roots provided -``` - -It usually means that your Kubernetes nodes are running a host OS -that puts root certificates in a different place than our configuration -expects by default (for example, Fedora). See the comments in the -[etcd controller template](https://github.com/kubernetes/examples/blob/master/staging/storage/vitess/etcd-controller-template.yaml) -for examples of how to set the right location for your host OS. -You'll also need to adjust the same certificate path settings in the -`vtctld` and `vttablet` templates. - -### Status pages for vttablets - -Each `vttablet` serves a set of HTML status pages on its primary port. -The `vtctld` interface provides a **STATUS** link for each tablet. - -If you access the vtctld web UI through the kubectl proxy as described above, -it will automatically link to the vttablets through that same proxy, -giving you access from outside the cluster. - -You can also use the proxy to go directly to a tablet. For example, -to see the status page for the tablet with ID `100`, you could navigate to: - -http://localhost:8001/api/v1/proxy/namespaces/default/pods/vttablet-100:15002/debug/status - -### Direct connection to mysqld - -Since the `mysqld` within the `vttablet` pod is only meant to be accessed -via vttablet, our default bootstrap settings only allow connections from -localhost. - -If you want to check or manipulate the underlying mysqld, you can issue -simple queries or commands through `vtctlclient` like this: - -``` sh -# Send a query to tablet 100 in cell 'test'. -vitess/examples/kubernetes$ ./kvtctl.sh ExecuteFetchAsDba test-0000000100 "SELECT VERSION()" -### example output: -# +------------+ -# | VERSION() | -# +------------+ -# | 5.7.13-log | -# +------------+ -``` - -If you need a truly direct connection to mysqld, you can [launch a shell](#shell-access) -inside the mysql container, and then connect with the `mysql` -command-line client: - -``` sh -$ kubectl exec vttablet-100 -c mysql -t -i -- bash -il -root@vttablet-100:/# export TERM=ansi -root@vttablet-100:/# mysql -S /vt/vtdataroot/vt_0000000100/mysql.sock -u vt_dba -``` - diff --git a/doc/GitHubWorkflow.md b/doc/GitHubWorkflow.md deleted file mode 100644 index c81aefb69b0..00000000000 --- a/doc/GitHubWorkflow.md +++ /dev/null @@ -1,137 +0,0 @@ -# GitHub Workflow - -If you are new to Git and GitHub, we recommend to read this page. Otherwise, you may skip it. - -Our GitHub workflow is a so called triangular workflow: - -visualization of the GitHub triangular workflow - -*Image Source:* https://github.com/blog/2042-git-2-5-including-multiple-worktrees-and-triangular-workflows - -The Vitess code is hosted on GitHub (https://github.com/vitessio/vitess). -This repository is called *upstream*. -You develop and commit your changes in a clone of our upstream repository (shown as *local* in the image above). -Then you push your changes to your forked repository (*origin*) and send us a pull request. -Eventually, we will merge your pull request back into the *upstream* repository. - -## Remotes - -Since you should have cloned the repository from your fork, the `origin` remote -should look like this: - -``` -$ git remote -v -origin git@github.com:/vitess.git (fetch) -origin git@github.com:/vitess.git (push) -``` - -To help you keep your fork in sync with the main repo, add an `upstream` remote: - -``` -$ git remote add upstream git@github.com:vitessio/vitess.git -$ git remote -v -origin git@github.com:/vitess.git (fetch) -origin git@github.com:/vitess.git (push) -upstream git@github.com:vitessio/vitess.git (fetch) -upstream git@github.com:vitessio/vitess.git (push) -``` - -Now to sync your local `master` branch, do this: - -``` -$ git checkout master -(master) $ git pull upstream master -``` - -Note: In the example output above we prefixed the prompt with `(master)` to -stress the fact that the command must be run from the branch `master`. - -You can omit the `upstream master` from the `git pull` command when you let your -`master` branch always track the main `vitessio/vitess` repository. To achieve -this, run this command once: - -``` -(master) $ git branch --set-upstream-to=upstream/master -``` - -Now the following command syncs your local `master` branch as well: - -``` -(master) $ git pull -``` - -## Topic Branches - -Before you start working on changes, create a topic branch: - -``` -$ git checkout master -(master) $ git pull -(master) $ git checkout -b new-feature -(new-feature) $ # You are now in the new-feature branch. -``` - -Try to commit small pieces along the way as you finish them, with an explanation -of the changes in the commit message. -Please see the [Code Review page]({% link contributing/code-reviews.md %}) for more guidance. - -As you work in a package, you can run just -the unit tests for that package by running `go test` from within that package. - -When you're ready to test the whole system, run the full test suite with `make -test` from the root of the Git tree. -If you haven't installed all dependencies for `make test`, you can rely on the Travis CI test results as well. -These results will be linked on your pull request. - -## Committing your work - -When running `git commit` use the `-s` option to add a Signed-off-by line. -This is needed for [the Developer Certificate of Origin](https://github.com/apps/dco). - -## Sending Pull Requests - -Push your branch to the repository (and set it to track with `-u`): - -``` -(new-feature) $ git push -u origin new-feature -``` - -You can omit `origin` and `-u new-feature` parameters from the `git push` -command with the following two Git configuration changes: - -``` -$ git config remote.pushdefault origin -$ git config push.default current -``` - -The first setting saves you from typing `origin` every time. And with the second -setting, Git assumes that the remote branch on the GitHub side will have the -same name as your local branch. - -After this change, you can run `git push` without arguments: - -``` -(new-feature) $ git push -``` - -Then go to the [repository page](https://github.com/vitessio/vitess) and it -should prompt you to create a Pull Request from a branch you recently pushed. -You can also [choose a branch manually](https://github.com/vitessio/vitess/compare). - -## Addressing Changes - -If you need to make changes in response to the reviewer's comments, just make -another commit on your branch and then push it again: - -``` -$ git checkout new-feature -(new-feature) $ git commit -(new-feature) $ git push -``` - -That is because a pull request always mirrors all commits from your topic branch which are not in the master branch. - -Once your pull request is merged: - -* close the GitHub issue (if it wasn't automatically closed) -* delete your local topic branch (`git branch -d new-feature`) diff --git a/doc/HorizontalReshardingGuide.md b/doc/HorizontalReshardingGuide.md deleted file mode 100644 index f38c5563ef4..00000000000 --- a/doc/HorizontalReshardingGuide.md +++ /dev/null @@ -1,262 +0,0 @@ -This guide walks you through the process of sharding an existing unsharded -Vitess [keyspace]({% link overview/concepts.md %}#keyspace). - -## Prerequisites - -We begin by assuming you've completed the -[Getting Started]({% link getting-started/local-instance.md %}) guide, -and have left the cluster running. - -## Overview - -The sample clients in the `examples/local` folder use the following schema: - -``` sql -CREATE TABLE messages ( - page BIGINT(20) UNSIGNED, - time_created_ns BIGINT(20) UNSIGNED, - message VARCHAR(10000), - PRIMARY KEY (page, time_created_ns) -) ENGINE=InnoDB -``` - -The idea is that each page number represents a separate guestbook in a -multi-tenant app. Each guestbook page consists of a list of messages. - -In this guide, we'll introduce sharding by page number. -That means pages will be randomly distributed across shards, -but all records for a given page are always guaranteed to be on the same shard. -In this way, we can transparently scale the database to support arbitrary growth -in the number of pages. - -## Configure sharding information - -The first step is to tell Vitess how we want to partition the data. -We do this by providing a VSchema definition as follows: - -``` json -{ - "sharded": true, - "vindexes": { - "hash": { - "type": "hash" - } - }, - "tables": { - "messages": { - "column_vindexes": [ - { - "column": "page", - "name": "hash" - } - ] - } - } -} -``` - -This says that we want to shard the data by a hash of the `page` column. -In other words, keep each page's messages together, but spread pages around -the shards randomly. - -We can load this VSchema into Vitess like this: - -``` sh -vitess/examples/local$ ./lvtctl.sh ApplyVSchema -vschema "$(cat vschema.json)" test_keyspace -``` - -## Bring up tablets for new shards - -In the unsharded example, you started tablets for a shard -named *0* in *test_keyspace*, written as *test_keyspace/0*. -Now you'll start tablets for two additional shards, -named *test_keyspace/-80* and *test_keyspace/80-*: - -``` sh -vitess/examples/local$ ./sharded-vttablet-up.sh -``` - -Since the sharding key is the page number, -this will result in half the pages going to each shard, -since *0x80* is the midpoint of the -[sharding key range]({% link user-guide/sharding.md %}#key-ranges-and-partitions). - -These new shards will run in parallel with the original shard during the -transition, but actual traffic will be served only by the original shard -until we tell it to switch over. - -Check the *vtctld* web UI, or the output of `lvtctl.sh ListAllTablets test`, -to see when the tablets are ready. There should be 5 tablets in each shard. - -Once the tablets are ready, initialize replication by electing the first master -for each of the new shards: - -``` sh -vitess/examples/local$ ./lvtctl.sh InitShardMaster -force test_keyspace/-80 test-0000000200 -vitess/examples/local$ ./lvtctl.sh InitShardMaster -force test_keyspace/80- test-0000000300 -``` - -Now there should be a total of 15 tablets, with one master for each shard: - -``` sh -vitess/examples/local$ ./lvtctl.sh ListAllTablets test -### example output: -# test-0000000100 test_keyspace 0 master 10.64.3.4:15002 10.64.3.4:3306 [] -# ... -# test-0000000200 test_keyspace -80 master 10.64.0.7:15002 10.64.0.7:3306 [] -# ... -# test-0000000300 test_keyspace 80- master 10.64.0.9:15002 10.64.0.9:3306 [] -# ... -``` - -## Copy data from original shard - -The new tablets start out empty, so we need to copy everything from the -original shard to the two new ones, starting with the schema: - -``` sh -vitess/examples/local$ ./lvtctl.sh CopySchemaShard test_keyspace/0 test_keyspace/-80 -vitess/examples/local$ ./lvtctl.sh CopySchemaShard test_keyspace/0 test_keyspace/80- -``` - -Next we copy the data. Since the amount of data to copy can be very large, -we use a special batch process called *vtworker* to stream the data from a -single source to multiple destinations, routing each row based on its -*keyspace_id*: - -``` sh -vitess/examples/local$ ./sharded-vtworker.sh SplitClone test_keyspace/0 -### example output: -# I0416 02:08:59.952805 9 instance.go:115] Starting worker... -# ... -# State: done -# Success: -# messages: copy done, copied 11 rows -``` - -Notice that we've only specified the source shard, *test_keyspace/0*. -The *SplitClone* process will automatically figure out which shards to use -as the destinations based on the key range that needs to be covered. -In this case, shard *0* covers the entire range, so it identifies -*-80* and *80-* as the destination shards, since they combine to cover the -same range. - -Next, it will pause replication on one *rdonly* (offline processing) tablet -to serve as a consistent snapshot of the data. The app can continue without -downtime, since live traffic is served by *replica* and *master* tablets, -which are unaffected. Other batch jobs will also be unaffected, since they -will be served only by the remaining, un-paused *rdonly* tablets. - -## Check filtered replication - -Once the copy from the paused snapshot finishes, *vtworker* turns on -[filtered replication]({% link user-guide/sharding.md %}#filtered-replication) -from the source shard to each destination shard. This allows the destination -shards to catch up on updates that have continued to flow in from the app since -the time of the snapshot. - -When the destination shards are caught up, they will continue to replicate -new updates. You can see this by looking at the contents of each shard as -you add new messages to various pages in the Guestbook app. Shard *0* will -see all the messages, while the new shards will only see messages for pages -that live on that shard. - -``` sh -# See what's on shard test_keyspace/0: -vitess/examples/local$ ./lvtctl.sh ExecuteFetchAsDba test-0000000100 "SELECT * FROM messages" -# See what's on shard test_keyspace/-80: -vitess/examples/local$ ./lvtctl.sh ExecuteFetchAsDba test-0000000200 "SELECT * FROM messages" -# See what's on shard test_keyspace/80-: -vitess/examples/local$ ./lvtctl.sh ExecuteFetchAsDba test-0000000300 "SELECT * FROM messages" -``` - -You can run the client script again to add some messages on various pages -and see how they get routed. - -## Check copied data integrity - -The *vtworker* batch process has another mode that will compare the source -and destination to ensure all the data is present and correct. -The following commands will run a diff for each destination shard: - -``` sh -vitess/examples/local$ ./sharded-vtworker.sh SplitDiff test_keyspace/-80 -vitess/examples/local$ ./sharded-vtworker.sh SplitDiff test_keyspace/80- -``` - -If any discrepancies are found, they will be printed. -If everything is good, you should see something like this: - -``` -I0416 02:10:56.927313 10 split_diff.go:496] Table messages checks out (4 rows processed, 1072961 qps) -``` - -## Switch over to new shards - -Now we're ready to switch over to serving from the new shards. -The [MigrateServedTypes]({% link reference/vtctl.md %}#migrateservedtypes) -command lets you do this one -[tablet type]({% link overview/concepts.md %}#tablet) at a time, -and even one [cell]({% link overview/concepts.md %}#cell-data-center) -at a time. The process can be rolled back at any point *until* the master is -switched over. - -``` sh -vitess/examples/local$ ./lvtctl.sh MigrateServedTypes test_keyspace/0 rdonly -vitess/examples/local$ ./lvtctl.sh MigrateServedTypes test_keyspace/0 replica -vitess/examples/local$ ./lvtctl.sh MigrateServedTypes test_keyspace/0 master -``` - -During the *master* migration, the original shard master will first stop -accepting updates. Then the process will wait for the new shard masters to -fully catch up on filtered replication before allowing them to begin serving. -Since filtered replication has been following along with live updates, there -should only be a few seconds of master unavailability. - -When the master traffic is migrated, the filtered replication will be stopped. -Data updates will be visible on the new shards, but not on the original shard. -See it for yourself: Add a message to the guestbook page and then inspect -the database content: - -``` sh -# See what's on shard test_keyspace/0 -# (no updates visible since we migrated away from it): -vitess/examples/local$ ./lvtctl.sh ExecuteFetchAsDba test-0000000100 "SELECT * FROM messages" -# See what's on shard test_keyspace/-80: -vitess/examples/local$ ./lvtctl.sh ExecuteFetchAsDba test-0000000200 "SELECT * FROM messages" -# See what's on shard test_keyspace/80-: -vitess/examples/local$ ./lvtctl.sh ExecuteFetchAsDba test-0000000300 "SELECT * FROM messages" -``` - -## Remove original shard - -Now that all traffic is being served from the new shards, we can remove the -original one. To do that, we use the `vttablet-down.sh` script from the -unsharded example: - -``` sh -vitess/examples/local$ ./vttablet-down.sh -``` - -Then we can delete the now-empty shard: - -``` sh -vitess/examples/local$ ./lvtctl.sh DeleteShard -recursive test_keyspace/0 -``` - -You should then see in the vtctld **Topology** page, or in the output of -`lvtctl.sh ListAllTablets test` that the tablets for shard *0* are gone. - -## Tear down and clean up - -Since you already cleaned up the tablets from the original unsharded example by -running `./vttablet-down.sh`, that step has been replaced with -`./sharded-vttablet-down.sh` to clean up the new sharded tablets. - -``` sh -vitess/examples/local$ ./vtgate-down.sh -vitess/examples/local$ ./sharded-vttablet-down.sh -vitess/examples/local$ ./vtctld-down.sh -vitess/examples/local$ ./zk-down.sh -``` - diff --git a/doc/HorizontalReshardingWorkflowGuide.md b/doc/HorizontalReshardingWorkflowGuide.md deleted file mode 100644 index ff3eb8b0809..00000000000 --- a/doc/HorizontalReshardingWorkflowGuide.md +++ /dev/null @@ -1,262 +0,0 @@ -This guide shows you an example about how to apply range-based sharding -process in an existing unsharded Vitess [keyspace]({% link overview/concepts.md %}#keyspace) -using the horizontal resharding workflow. In this example, we will reshard -from 1 shard "0" into 2 shards "-80" and "80-". - -## Overview - -The horizontal resharding process mainly contains the following steps -(each step is a phase in the workflow): - -1. Copy schema from original shards to destination shards. - (**Phase: CopySchemaShard**) -1. Copy the data with a batch process called *vtworker* - (**Phase: SplitClone**). - [more details](#details-in-splitclone-phase) -1. Check filtered replication (**Phase: WaitForFilteredReplication**). - [more details](#details-in-waitforfilteredreplication-phase) -1. Check copied data integrity using *vtworker* batch process in the mode - to compare the source and destination data. (**Phase: SplitDiff**) -1. Migrate all the serving rdonly tablets in the original shards. - (**Phase: MigrateServedTypeRdonly**) -1. Migrate all the serving replica tablets in the original shards. - (**Phase: MigrateServedTypeReplica**) -1. Migrate all the serving master tablets in the original shards. - (**Phase: MigrateServedTypeMaster**) - [more details](#details-in-migrateservedtypemaste-phase) - -## Prerequisites - -You should complete the [Getting Started]({% link getting-started/local-instance.md %}) guide -(please finish all the steps before Try Vitess resharding) and have left -the cluster running. Then, please follow these steps before running -the resharding process: - -1. Configure sharding information. By running the command below, we tell - Vitess to shard the data using the page column through the provided VSchema. - - ``` sh - vitess/examples/local$ ./lvtctl.sh ApplyVSchema -vschema "$(cat vschema.json)" test_keyspace - ``` - -1. Bring up tablets for 2 additional shards: *test_keyspace/-80* and - *test_keyspace/80-* (you can learn more about sharding key range - [here]({% link user-guide/sharding.md %}#key-ranges-and-partitions)): - - ``` sh - vitess/examples/local$ ./sharded-vttablet-up.sh - ``` - - Initialize replication by electing the first master for each of the new shards: - - ``` sh - vitess/examples/local$ ./lvtctl.sh InitShardMaster -force test_keyspace/-80 test-200 - vitess/examples/local$ ./lvtctl.sh InitShardMaster -force test_keyspace/80- test-300 - ``` - - After this set up, you should see the shards on Dashboard page of vtctld UI - (http://localhost:15000). There should be 1 serving shard named "0" and - 2 non-serving shards named "-80" and "80-". Click the shard node, you can - inspect all its tablets information. - -1. Bring up a vtworker process, which can be connected through port 15033. - (The number of *vtworker* should be the same of original shards, - we start one vtworker process here since we have only one original shard - in this example.) - - ``` sh - vitess/examples/local$ ./vtworker-up.sh - ``` - - You can verify this *vtworker* process set up through http://localhost:15032/Debugging. - It should be pinged successfully. After you ping the vtworker, please click - "Reset Job". Otherwise, the vtworker is not ready for executing other tasks. - -## Horizontal resharding workflow - -### Create the workflow - -1. Open the *Workflows* section on the left menu of vtctld UI (http://localhost:15000). - Click the "+" button in the top right corner to open the "Create - a new Workflow" dialog. -1. Fill in the "Create a new Workflow" dialogue following the instructions - below (you can checkout our example [here](https://cloud.githubusercontent.com/assets/23492389/24314500/27f27988-109f-11e7-8e10-630bad14a286.png)): - * Select the "Skip Start" checkbox if you don't want to start the workflow - immediately after creation. If so, you need to click a "Start" button in - the workflow bar later to run the workflow. - * Open the "Factory Name" menu and select "Horizontal Resharding". This field - defines the type of workflow you want to create. - * Fill in *test_keyspace* in the "Keyspace" slot. - * Fill in *localhost:15033* in the "vtworker Addresses" slot. - * Unselect the "enable_approvals" checkbox if you don't want to manually - approve task executions for canarying. (We suggest you to keep the default - selected choice since this will enable the canary feature) -1. Click "Create" button at the bottom of the dialog. You will see a workflow - node created in the *Workflows* page if the creation succeeds. - The workflow has started running now if "Skip Start" is not selected. - -Another way to start the workflow is through the vtctlclient command, you can -also visualize the workflow on vtctld UI *Workflows* section after executing -the command: - -``` sh -vitess/examples/local$ ./lvtctl.sh WorkflowCreate -skip_start=false horizontal_resharding -keyspace=test_keyspace -vtworkers=localhost:15033 -enable_approvals=true -``` - -When creating the resharding workflow, the program automatically detect the -source shards and destination shards and create tasks for the resharding -process. After the creation, click the workflow node, you can see a list of -child nodes. Each child node represents a phase in the workflow (each phase -represents a step mentioned in [Overview]({% link user-guide/horizontal-sharding-workflow.md %}#overview)). -Further click a phase node, you can inspect tasks in this phase. -For example, in the "CopySchemaShard" phase, it includes tasks to copy schema -to 2 destination shards, therefore you can see task node "Shard -80" and -"Shard 80-". You should see a page similar to -[this](https://cloud.githubusercontent.com/assets/23492389/24313539/71c9c8ae-109a-11e7-9e4a-0c3e8ee8ba85.png). - -### Approvals of Tasks Execution (Canary feature) - -Once the workflow start to run (click the "Start" button if you selected -"Skip Start" and the workflow hasn't started yet.), you need to approve the -task execution for each phase if "enable_approvals" is selected. The approvals -include 2 stages. The first stage approves only the first task, which runs as -canarying. The second stage approves the remaining tasks. - -The resharding workflow runs through phases sequentially. Once the phase starts, -you can see the approval buttons for all the stages under the phase node (click -the phase node if you didn't see the approval buttons, you should see a page -like [this](https://cloud.githubusercontent.com/assets/23492389/24313613/c9508ef0-109a-11e7-8848-75a1ae18a6c5.png)). The -button is enabled when the corresponding task(s) are ready to run. Click the -enabled button to approve task execution, then you can see approved message -on the clicked button. The approval buttons are cleared after the phase has -finished. The next phase will only starts if its previous phase has finished -successfully. - -If the workflow is restored from a checkpoint, you will still see the -approval button with approved message when there are running tasks under this -approval. But you don't need to approve the same tasks again for a restarted -workflow. - -### Retry - -A "Retry" button will be enabled under the task node if the task failed (click -the task node if your job get stuck but don't see the Retry button). Click this -button if you have fixed the bugs and want to retry the failed task. You can -retry as many times as you want if the task continually failed. The workflow -can continue from your failure point once it is fixed. - -For example, you might forget to bring up a vtworker process. The task which -requires that vtworker process in SplitClone phase will fail. After you fix -this, click the retry button on the task node and the workflow will continue -to run. - -When a task failed, the execution of other tasks under this phase won't be -affected if this phase runs tasks in parallel (applied to phase -"CopySchemaShard", "SplitClone", "WaitForFilteredReplication"). For phases -that runs tasks sequentially, remaining unstarted tasks under this phase will -no long be executed. The phases afterwards will no longer be executed. - -### Checkpoint and Recovery - -The resharding workflow tracks the status for every task and checkpoint these -status into topology server whenever there is a status update. When a workflow -is stopped and restarted by loading the checkpoint in the topology, it can -continue to run all the unfinished tasks. - - -## Verify Results and Clean up - -After the resharding process, data in the original shard is identically copied -to new shards. Data updates will be visible on the new shards, but not on the -original shard. You should then see in the vtctld UI *Dashboard* page that shard -*0* becomes non-serving and shard *-80* and shard *80-* are serving shards. -Verify this for yourself: inspect the database content using following commands, -then add messages to the guestbook page (you can use script client.sh mentioned -[here]({% link getting-started/local-instance.md %}#run-a-client-application)) -and inspect using same commands: - -``` sh -# See what's on shard test_keyspace/0 -# (no updates visible since we migrated away from it): -vitess/examples/local$ ./lvtctl.sh ExecuteFetchAsDba test-100 "SELECT * FROM messages" -# See what's on shard test_keyspace/-80: -vitess/examples/local$ ./lvtctl.sh ExecuteFetchAsDba test-200 "SELECT * FROM messages" -# See what's on shard test_keyspace/80-: -vitess/examples/local$ ./lvtctl.sh ExecuteFetchAsDba test-300 "SELECT * FROM messages" -``` - -You can also checkout the *Topology* browser on vtctl UI. It shows you the -information of the keyrange of shard and their serving status. Each shard -should look like this - -[shard 0](https://cloud.githubusercontent.com/assets/23492389/24313876/072f61e6-109c-11e7-938a-23b8398958aa.png) - -[shard -80](https://cloud.githubusercontent.com/assets/23492389/24313813/bd11c824-109b-11e7-83d4-cca3f6093360.png) - -[shard 80-](https://cloud.githubusercontent.com/assets/23492389/24313743/7f9ae1c4-109b-11e7-997a-774f4f16e473.png) - -After you verify the result, we can remove the -original shard since all traffic is being served from the new shards: - -``` sh -vitess/examples/local$ ./vttablet-down.sh -``` - -Then we can delete the now-empty shard: - -``` sh -vitess/examples/local$ ./lvtctl.sh DeleteShard -recursive test_keyspace/0 -``` - -You should then see in the vtctld UI *Dashboard* page that shard *0* is gone. - -## Tear down and clean up - -Since you already cleaned up the tablets from the original unsharded example by -running `./vttablet-down.sh`, that step has been replaced with -`./sharded-vttablet-down.sh` to clean up the new sharded tablets. - -``` sh -vitess/examples/local$ ./vtworker-down.sh -vitess/examples/local$ ./vtgate-down.sh -vitess/examples/local$ ./sharded-vttablet-down.sh -vitess/examples/local$ ./vtctld-down.sh -vitess/examples/local$ ./zk-down.sh -``` - -## Reference - -You can checkout the old version tutorial [here]({% link user-guide/horizontal-sharding.md %}). -It walks you through the resharding process by manually executing commands. - -### Details in SplitClone phase - -*vtworker* copies data from a paused snapshot. It will pause replication on -one rdonly (offline processing) tablet to serve as a consistent snapshot of -the data. The app can continue without downtime, since live traffic is served -by replica and master tablets, which are unaffected. Other batch jobs will -also be unaffected, since they will be served only by the remaining, un-paused -rdonly tablets. - -During the data copying, *vtworker* streams the data from a single source to -multiple destinations, routing each row based on its *keyspace_id*. It can -automatically figure out which shards to use as the destinations based on the -key range that needs to be covered. In our example, shard 0 covers the entire -range, so it identifies -80 and 80- as the destination shards, since they -combine to cover the same range. - -### Details in WaitForFilteredReplication phase - -Once the copying from a paused snapshot (phase SplitClone) has finished, -*vtworker* turns on [filtered replication]({% link user-guide/sharding.md %}#filtered-replication), -which allows the destination shards to catch up on updates that have continued -to flow in from the app since the time of the snapshot. After the destination -shards are caught up, they will continue to replicate new updates. - -### Details in MigrateServedTypeMaster phase - -During the *master* migration, the original shard masters will first stop -accepting updates. Then the process will wait for the new shard masters to -fully catch up on filtered replication before allowing them to begin serving. -After the master traffic is migrated, the filtered replication will be stopped. -Data updates will be visible on the new shards, but not on the original shards. diff --git a/doc/Messaging.md b/doc/Messaging.md deleted file mode 100644 index f605002c49d..00000000000 --- a/doc/Messaging.md +++ /dev/null @@ -1,209 +0,0 @@ -# Vitess Messaging - -## Overview - -Vitess messaging gives the application an easy way to schedule and manage work -that needs to be performed asynchronously. Under the covers, messages are -stored in a traditional MySQL table and therefore enjoy the following -properties: - -* **Scalable**: Because of vitess's sharding abilities, messages can scale to - very large QPS or sizes. -* **Guaranteed delivery**: A message will be indefinitely retried until a - successful ack is received. -* **Non-blocking**: If the sending is backlogged, new messages continue to be - accepted for eventual delivery. -* **Adaptive**: Messages that fail delivery are backed off exponentially. -* **Analytics**: The retention period for messages is dictated by the - application. One could potentially choose to never delete any messages and - use the data for performing analytics. -* **Transactional**: Messages can be created or acked as part of an existing - transaction. The action will complete only if the commit succeeds. - -The properties of a message are chosen by the application. However, every -message needs a uniquely identifiable key. If the messages are stored in a -sharded table, the key must also be the primary vindex of the table. - -Although messages will generally be delivered in the order they're created, -this is not an explicit guarantee of the system. The focus is more on keeping -track of the work that needs to be done and ensuring that it was performed. -Messages are good for: - -* Handing off work to another system. -* Recording potentially time-consuming work that needs to be done - asynchronously. -* Scheduling for future delivery. -* Accumulating work that could be done during off-peak hours. - -Messages are not a good fit for the following use cases: - -* Broadcasting of events to multiple subscribers. -* Ordered delivery. -* Real-time delivery. - -## Creating a message table - -The current implementation requires a fixed schema. This will be made more -flexible in the future. There will also be a custom DDL syntax. For now, a -message table must be created like this: - -``` -create table my_message( - time_scheduled bigint, - id bigint, - time_next bigint, - epoch bigint, - time_created bigint, - time_acked bigint, - message varchar(128), - primary key(time_scheduled, id), - unique index id_idx(id), - index next_idx(time_next, epoch) -) comment 'vitess_message,vt_ack_wait=30,vt_purge_after=86400,vt_batch_size=10,vt_cache_size=10000,vt_poller_interval=30' -``` - -The application-related columns are as follows: - -* `id`: can be any type. Must be unique. -* `message`: can be any type. -* `time_scheduled`: must be a bigint. It will be used to store unix time in - nanoseconds. If unspecified, the `Now` value is inserted. - -The above indexes are recommended for optimum performance. However, some -variation can be allowed to achieve different performance trade-offs. - -The comment section specifies additional configuration parameters. The fields -are as follows: - -* `vitess_message`: Indicates that this is a message table. -* `vt_ack_wait=30`: Wait for 30s for the first message ack. If one is not - received, resend. -* `vt_purge_after=86400`: Purge acked messages that are older than 86400 - seconds (1 day). -* `vt_batch_size=10`: Send up to 10 messages per RPC packet. -* `vt_cache_size=10000`: Store up to 10000 messages in the cache. If the demand - is higher, the rest of the items will have to wait for the next poller cycle. -* `vt_poller_interval=30`: Poll every 30s for messages that are due to be sent. - -If any of the above fields are missing, vitess will fail to load the table. No -operation will be allowed on a table that has failed to load. - -## Enqueuing messages - -The application can enqueue messages using an insert statement: - -``` -insert into my_message(id, message) values(1, 'hello world') -``` - -These inserts can be part of a regular transaction. Multiple messages can be -inserted to different tables. Avoid accumulating too many big messages within a -transaction as it consumes memory on the VTTablet side. At the time of commit, -memory permitting, all messages are instantly enqueued to be sent. - -Messages can also be created to be sent in the future: - -``` -insert into my_message(id, message, time_scheduled) values(1, 'hello world', :future_time) -``` - -`future_time` must be the unix time expressed in nanoseconds. - -## Receiving messages - -Processes can subscribe to receive messages by sending a `MessageStream` -request to VTGate. If there are multiple subscribers, the messages will be -delivered in a round-robin fashion. Note that this is not a broadcast; Each -message will be sent to at most one subscriber. - -The format for messages is the same as a vitess `Result`. This means that -standard database tools that understand query results can also be message -recipients. Currently, there is no SQL format for subscribing to messages, but -one will be provided soon. - -### Subsetting - -It's possible that you may want to subscribe to specific shards or groups of -shards while requesting messages. This is useful for partitioning or load -balancing. The `MessageStream` API allows you to specify these constraints. The -request parameters are as follows: - -* `Name`: Name of the message table. -* `Keyspace`: Keyspace where the message table is present. -* `Shard`: For unsharded keyspaces, this is usually "0". However, an empty - shard will also work. For sharded keyspaces, a specific shard name can be - specified. -* `KeyRange`: If the keyspace is sharded, streaming will be performed only from - the shards that match the range. This must be an exact match. - -## Acknowledging messages - -A received (or processed) message can be acknowledged using the `MessageAck` -API call. This call accepts the following parameters: - -* `Name`: Name of the message table. -* `Keyspace`: Keyspace where the message table is present. This field can be - empty if the table name is unique across all keyspaces. -* `Ids`: The list of ids that need to be acked. - -Once a message is successfully acked, it will never be resent. - -## Exponential backoff - -A message that was successfully sent will wait for the specified ack wait time. -If no ack is received by then, it will be resent. The next attempt will be 2x -the previous wait, and this delay is doubled for every attempt. - -## Purging - -Messages that have been successfully acked will be deleted after their age -exceeds the time period specified by `vt_purge_after`. - -## Advanced usage - -The `MessageAck` functionality is currently an API call and cannot be used -inside a transaction. However, you can ack messages using a regular DML. It -should look like this: - -``` -update my_message set time_acked = :time_acked, time_next = null where id in ::ids and time_acked is null -``` - -You can manually change the schedule of existing messages with a statement like -this: - -``` -update my_message set time_next = :time_next, epoch = :epoch where id in ::ids and time_acked is null -``` - -This comes in handy if a bunch of messages had chronic failures and got -postponed to the distant future. If the root cause of the problem was fixed, -the application could reschedule them to be delivered immediately. You can also -optionally change the epoch. Lower epoch values increase the priority of the -message and the back-off is less aggressive. - -You can also view messages using regular `select` queries. - -## Undocumented features - -These are features that were previously known limitations, but have since been supported -and are awaiting further documentation. - -* Flexible columns: Allow any number of application defined columns to be in - the message table. -* No ACL check for receivers: To be added. -* Monitoring support: To be added. -* Dropped tables: The message engine does not currently detect dropped tables. - -## Known limitations - -The message feature is currently in alpha, and can be improved. Here is the -list of possible limitations/improvements: - -* Proactive scheduling: Upcoming messages can be proactively scheduled for - timely delivery instead of waiting for the next polling cycle. -* Changed properties: Although the engine detects new message tables, it does - not refresh properties of an existing table. -* A `SELECT` style syntax for subscribing to messages. -* No rate limiting. -* Usage of partitions for efficient purging. diff --git a/doc/Monitoring.md b/doc/Monitoring.md deleted file mode 100644 index aa1a163d17a..00000000000 --- a/doc/Monitoring.md +++ /dev/null @@ -1,43 +0,0 @@ -# Vitess Monitoring - -This page explains the current state of Vitess metrics monitoring, and potential future work in the area. - -## Current state of monitoring - -There are currently three main ways that a Vitess cluster can be monitored. Depending on your needs, you can use any of the following methods: - -### 1. Vitess status pages - -The status HTML pages of various Vitess components can be accessed by pointing your browser to `http://:/debug/status`. The status pages will often display some basic, but useful, information for monitoring. For example, the status page of a vttablet will show the QPS graph for the past few minutes. - -Viewing a status page can be useful since it works out of the box, but it only provides very basic monitoring capabilities. - -### 2. Pull-based metrics system - -Vitess uses Go’s [expvar package](https://golang.org/pkg/expvar/) to expose various metrics, with the expectation that a user can configure a pull-based metrics system to ingest those metrics. Metrics are published to `http://:/debug/vars` as JSON key-value pairs, which should be easy for any metrics system to parse. - -Scraping Vitess variables is a good way to integrate Vitess into an existing monitoring system, and is useful for building up detailed monitoring dashboards. It is also the officially supported way for monitoring Vitess. - -### 3. Push-based metrics system - -Vitess also includes support for push-based metrics systems via plug-ins. Each Vitess component would need to be run with the `--emit_stats` flag. - -By default, the stats_emit_period is 60s, so each component will push stats to the selected backend every minute. This is configurable via the `--stats_emit_period` flag. - -Vitess has preliminary plug-ins to support OpenTSDB as a push-based metrics backend. - -It should be fairly straightforward to write your own plug-in, if you want to support a different backend. The plug-in package simply needs to implement the `PushBackend` interface of the `stats` package. For an example, you can see the [OpenTSDB plugin](https://github.com/vitessio/vitess/blob/master/go/stats/opentsdb/opentsdb.go). - -Once you’ve written the backend plug-in, you also need to register the plug-in from within all the relevant Vitess binaries. An example of how to do this can be seen in [this pull request](https://github.com/vitessio/vitess/pull/469). - -You can then specify that Vitess should publish stats to the backend that you’re targeting by using the `--stats_backend` flag. - -Connecting Vitess to a push-based metrics system can be useful if you’re already running a push-based system that you would like to integrate into. More discussion on using a push vs pull based monitoring system can be seen here: [http://www.boxever.com/push-vs-pull-for-monitoring](https://www.boxever.com/push-vs-pull-for-monitoring) - -## Monitoring with Kubernetes - -The existing methods for integrating metrics are not supported in a Kubernetes environment by the Vitess team yet, but are on the roadmap for the future. However, it should be possible to get the Prometheus backend working with Kubernetes, similar to how [Heapster for Kubernetes works](https://github.com/kubernetes/kubernetes/tree/master/cluster/addons/prometheus). - -In the meantime, if you run into issues or have questions, please post on our [forum](https://groups.google.com/forum/#!forum/vitess). - - diff --git a/doc/MySQLServerProtocol.md b/doc/MySQLServerProtocol.md deleted file mode 100644 index fc52b1638b4..00000000000 --- a/doc/MySQLServerProtocol.md +++ /dev/null @@ -1,17 +0,0 @@ -# MySQL Binary Protocol - -Vitess supports MySQL binary protocol. This allows existing applications to connect to Vitess directly without any change, or without using a new driver or connector. This is now the recommended and the most popular protocol for connecting to Vitess. - -# Features of RPC protocol not supported by SQL protocol - -### Bind Variables -The RPC protocol supports bind variables which allows Vitess to cache query plans providing much better execution times. - -### Event Tokens -The RPC protocols allows you to use event tokens to get the latest binlog position. These can be used for cache invalidation. - -### Update Stream -Update stream allows you to subscribe to changing rows. - -### Query Multiplexing -Ability to multiplex multiple request/responses on the same TCP connection. \ No newline at end of file diff --git a/doc/Production.md b/doc/Production.md deleted file mode 100644 index 1b49fca93b9..00000000000 --- a/doc/Production.md +++ /dev/null @@ -1,34 +0,0 @@ -# Production setup -Setting up vitess in production will depend on many factors. -Here are some initial considerations: - -* *Global Transaction IDs*: Vitess requires a version of MySQL -that supports GTIDs. -We currently support MariaDB 10.0 and MySQL 5.6. -* *Firewalls*: Vitess tools and servers assume that they -can open direct TCP connection to each other. If you have -firewalls between your servers, you may have to add exceptions -to allow these communications. -* *Authentication*: If you need authentication, you -need to setup SASL, which is supported by Vitess. -* *Encryption:* Vitess RPC servers support SSL. -* *MySQL permissions*: Vitess currently assumes that all -application clients have uniform permissions. -The connection pooler opens a number of connections under -the same user (vt_app), and rotates them for all requests. -Vitess management tasks use a different user name (vt_dba), -which is assumed to have all administrative privileges. -* *Client Language*: We currently support -Python and Go. -It's not too hard to add support for more languages, -and we are open to contributions in this area. - -## Deploying in Kubernetes - -See the [Getting Started]({% link getting-started/index.md %}) guide. - -## Deploying on bare metal - -See the -[Local Setup](https://github.com/vitessio/vitess/tree/master/examples/local) -scripts for examples of how to bring up a Vitess cluster manually. diff --git a/doc/ProductionPlanning.md b/doc/ProductionPlanning.md deleted file mode 100644 index 4f0ab2d9d15..00000000000 --- a/doc/ProductionPlanning.md +++ /dev/null @@ -1,73 +0,0 @@ -## Provisioning - -### Estimating total resources - -Although Vitess helps you scale indefinitely, the various layers do consume CPU and memory. Currently, the cost of Vitess servers is dominated by the RPC framework which we use: gRPC (gRPC is a relatively young product). So, Vitess servers are expected to get more efficient over time as there are improvements in gRPC as well as the Go runtime. For now, you can use the following rules of thumb to budget resources for Vitess: - -Every MySQL instance that serves traffic requires one VTTablet, which is in turn expected to consume an equal amount of CPU. So, if MySQL consumes 8 CPUs, VTTablet is likely going to consume another 8. - -The memory consumed by VTTablet depends on QPS and result size, but you can start off with the rule of thumb of requesting 1 GB/CPU. - -As for VTGate, double the total number of CPUs you’ve allocated for VTTablet. That should be approximately how much the VTGates are expected to consume. In terms of memory, you should again budget about 1 GB/CPU (needs verification). - -Vitess servers will use disk space for their logs. A smoothly running server should create very little log spam. However, log files can grow big very quickly if there are too many errors. It will be wise to run a log purger daemon if you’re concerned about filling up disk. - -Vitess servers are also likely to add about 2 ms of round-trip latency per MySQL call. This may result in some hidden costs that may or may not be negligible. On the app side, if a significant time is spent making database calls, then you may have to run additional threads or workers to compensate for the delay, which may result in additional memory requirements. - -The client driver CPU usage may be different from a normal MySQL driver. That may require you to allocate more CPU per app thread. - -On the server side, this could result in longer running transactions, which could weigh down MySQL. - -With the above numbers as starting point, the next step will be to set up benchmarks that generate production representative load. If you cannot afford this luxury, you may have to go into production with some over-provisioning, just in case. - -### Mapping topology to hardware - -The different Vitess components have different resource requirements e.g. vtgate requires little disk in comparison to vttablet. Therefore, the components should be mapped to different machine classes for optimal resource usage. If you’re using a cluster manager (such as Kubernetes), the automatic scheduler will do this for you. Otherwise, you have to allocate physical machines and plan out how you’re going to map servers onto them. - -Machine classes needed: - -#### MySQL + vttablet - -You’ll need database-class machines that are likely to have SSDs, and enough RAM to fit the MySQL working set in buffer cache. Make sure that there will be sufficient CPU left for VTTablet to run on them. - -The VTTablet provisioning will be dictated by the MySQL instances they run against. However, soon after launch, it’s recommended to shard these instances to a data size of 100-300 GB. This should also typically reduce the per-MySQL CPU usage to around 2-4 CPUS depending on the load pattern. - -#### VTGate - -For VTGates, you’ll need a class of machines that would be CPU heavy, but may be light on memory usage, and should require normal hard disks, for binary and logs only. - -It’s advisable to run more instances than there are machines. VTGates are happiest when they’re consuming between 2-4 CPUs. So, if your total requirement was 400 CPUs, and your VTGate class machine has 48 cores each, you’ll need about 10 such machines and you’ll be running about 10 VTGates per box. - -You may have to add a few more app class machines to absorb any additional CPU and latency overheads. - -## Lock service setup - -The Lock Service should be running, and both the global and local instances -should be up. See the -[Topology Service]({% link user-guide/topology-service.md %}) -document for more information. - -Each lock service implementation supports a couple configuration command line -parameters, they need to be specified for each Vitess process. - -For sizing purposes, the Vitess processes do not access the lock service very -much. Each *vtgate* process keeps a few watches on a few local nodes (VSchema -and SrvKeyspace). Each *vttablet* process will keep its own Tablet record up to -date, but it usually doesn't change. The *vtctld* process will access it a lot -more, but only on demand to display web pages. - -As mentioned previously, if the setup is only in one cell, the global and local -instances can be combined. Just use different top-level directories. - -## Production testing - -Before running Vitess in production, please make yourself comfortable first with the different operations. We recommend to go through the following scenarios on a non-production system. - -Here is a short list of all the basic workflows Vitess supports: - -* [Failover / Reparents]({% link user-guide/reparenting.md %}) -* [Backup/Restore]({% link user-guide/backup-and-restore.md %}) -* [Schema Management]({% link user-guide/schema-management.md %}) / [Schema Swap]({% link user-guide/schema-swap.md %}) -* [Resharding]({% link user-guide/sharding.md %}) / [Horizontal Resharding Tutorial]({% link user-guide/horizontal-sharding.md %}) -* [Upgrading]({% link user-guide/upgrading.md %}) - diff --git a/doc/ReferenceTables.md b/doc/ReferenceTables.md deleted file mode 100644 index ea262f5d695..00000000000 --- a/doc/ReferenceTables.md +++ /dev/null @@ -1,153 +0,0 @@ -# Reference Tables - -This document describes a proposed design and implementation guidelines for -the `Reference Tables` Vitess feature. - -The idea is to have a `reference keyspace` that contains a small number of -`reference tables`, and replicate these tables to every shard of another -keyspace, the `destination keyspace`. Any update to the reference tables will be -propagated to the destination keyspace. The reference tables in the destination -keyspace can then be used directly, in read-only mode (in `JOIN` queries for -instance). This provides for much better performance than cross-keyspace joins. - -Since the data is replicated to every shard on the destination keyspace, the -write QPS on the reference keyspace is also applied to every shard on the -destination keyspace. So the change rate in the reference keyspace cannot be -very high, and so let's also assume it is not sharded. - -Vitess already has all the right components to support this scenario, it's just -a matter of plumbing it the right way. Let's explore the required changes. - -## Replication Setup - -We can copy all the data and then setup `Filtered Replication` between the -reference keyspace and each shard of the destination keyspace. This is really -just a corner case of the vertical splits Vitess already supports. - -Action items: - -* First, this setup probably needs to be explicitly mentioned somewhere in the - topology, not just as SourceShard objects in the destination keyspace, so - Vitess can know about this setup at a higher level. Let's add a `repeated - ReferenceKeyspace` field to the Keyspace object. Each `ReferenceKeyspace` - object contains the name of the reference keyspace, the list of tables to - copy, and the UID of the SourceShard entry (the same UID in all shards). By - making this a repeated field, the destination keyspace should be able to - support multiple reference keyspaces to copy data from, if necessary. - -* `vtctl CopySchemaShard` can already be used to copy the schema from the - reference keyspace to each destination shard. - -* A new vtworker data copy job needs to be added. `vtworker VerticalSplitClone` - would be a good start, but the new copy has a few special requirements: the - destination keyspace needs the data in all its shards, and the write rate - cannot cause the destination shards to be overloaded (or lag behind on - replication). This job would also populate an entry in the - `\_vt/blp\_checkpoint` table in the destination shards. - -* Setting up Filtered Replication after the copy is easy, each destination Shard - just needs to have a SourceShard with the proper data, and after a - RefreshTablet, the destination masters will start the replication. - -All these steps can be supported by a vtctld workflow. - -## Supporting Horizontal Resharding in the Destination Keyspace - -We still need to support horizontal resharding in the Destination Keyspace while -the Reference Tables feature is enabled. - -Action items: - -* Each step of the process would know what to do because of the - `ReferenceKeyspace` entries in the destination keyspace. - -* `vtctl CopySchemaShard` needs to also copy the schema of the reference tables. - -* `vtworker SplitClone` needs to also copy all of the reference table data, and - the `\_vt/blp\_checkpoint` entry for the reference keyspace. It needs to do - that copy from the first source shard to each destination shard only once. So - in case of a split, the source shard data is copied to each destination - shard. In case of a merge, only the first source shard data is copied to the - destination shard. - -* Enabling filtered replication on the destination shards needs to not use the - same UID for replication as the reference keyspace entries. Right now, their - UID are hardcoded to start at 0. But since the reserved UIDs are documented in - the `ReferenceKeyspace` entries, it's easy. - -* At this point, the destination shards will also replicate from the reference - keyspace. When the `vtctl MigrateServedType master` command is issued, it - needs to just remove the horizontal resharding Filtered Replication entries, - not the `ReferenceKeyspace` entries entries. - -## Other Use Cases - -Other scenarios might also need to be supported, or explicitly disabled: - -* Simple schema changes, or complicated Schema Swap in the reference keyspace: - They would also need to be applied to the destination keyspace, the same way. - -* Vertical Split of the reference keyspace: Since it is replicated, splitting it - will be more complicated. - -## Query Routing - -This would be handled by the vtgate and the VSchema. Once the reference tables -are documented in the VSchema, vtgate will know to do the following: - -* DMLs on the reference tables are routed to the reference keyspace. - -* Select queries on the reference tables only are also routed to the reference - keyspace. - -* JOIN queries between reference tables and destination keyspace tables can be - routed only to the right destination keyspace (based on that keyspace sharding - situation). - -Note this introduces some corner cases: for instance, if the client is asking -for a JOIN between reference tables and destination keyspace tables, with tablet -type `master`. Routing this to the destination keyspace would satisfy the -critical read for the destination tables, but not for the reference -tables. vtgate may need to perform the JOIN to both masters at this point. - -Action Items: - -* Find the right way to represent reference tables in the VSchema. - -* Implement corresponding query routing. - -## Notes - -### Vitess Keyspace vs MySQL Database - -This may force us to revisit the use of databases in our tablets. The current -assumption is that a keyspace only has one MySQL database (with a name usually -derived from the keyspace name with a `vt_` prefix, but that can also be -changed): - -* When vttablet connects to MySQL for data queries, it uses that database name - by default. - -* The VSchema also maps tables to keyspaces, so it can just send queries that - have no keyspace to the right shard (which in turns is configured properly for - that database). - -* Vitess' Filtered Replication only replicates data related to that single - database. The database name has to be the same when we horizontally split a - keyspace, so statements from the source shards can be applied on the - destination shards. - -* Vitess' Query Service only loads the schema for that single database. - -Maybe it's time to change this assumption: - -* A keyspace could be defined as a group of databases, each having a group of - tables. - -* When addressing a table, we could support the `keyspace.database.table` - syntax. - -* We could support moving databases from one keyspace to another. - -But maybe this is too many indirections for nothing? Saying one keyspace is one -database may be just the complexity we need. diff --git a/doc/Reparenting.md b/doc/Reparenting.md deleted file mode 100644 index 5f0cf0ae35a..00000000000 --- a/doc/Reparenting.md +++ /dev/null @@ -1,185 +0,0 @@ -**Reparenting** is the process of changing a shard's master tablet -from one host to another or changing a slave tablet to have a -different master. Reparenting can be initiated manually -or it can occur automatically in response to particular database -conditions. As examples, you might reparent a shard or tablet during -a maintenance exercise or automatically trigger reparenting when -a master tablet dies. - -This document explains the types of reparenting that Vitess supports: - -* *[Active reparenting](#active-reparenting)* occurs when the Vitess - toolchain manages the entire reparenting process. -* *[External reparenting](#external-reparenting)* occurs when another tool - handles the reparenting process, and the Vitess toolchain just updates its - topology server, replication graph, and serving graph to accurately reflect - master-slave relationships. - -**Note:** The InitShardMaster command defines the initial -parenting relationships within a shard. That command makes the specified -tablet the master and makes the other tablets in the shard slaves that -replicate from that master. - -## MySQL requirements - -### GTIDs -Vitess requires the use of global transaction identifiers -([GTIDs](https://dev.mysql.com/doc/refman/5.6/en/replication-gtids-concepts.html)) for its operations: - -* During active reparenting, Vitess uses GTIDs to initialize the - replication process and then depends on the GTID stream to be - correct when reparenting. (During external reparenting, Vitess - assumes the external tool manages the replication process.) -* During resharding, Vitess uses GTIDs for - [filtered replication]({% link user-guide/sharding.md %}#filtered-replication), - the process by which source tablet data is transferred to the proper - destination tablets. - -### Semisynchronous replication - -Vitess does not depend on -[semisynchronous replication](https://dev.mysql.com/doc/refman/5.6/en/replication-semisync.html) but does work if it is implemented. -Larger Vitess deployments typically do implement semisynchronous replication. - -## Active Reparenting - -You can use the following [vtctl]({% link reference/vtctl.md %}) -commands to perform reparenting operations: - -* PlannedReparentShard -* EmergencyReparentShard - -Both commands lock the Shard record in the global topology server. The two commands -cannot run in parallel, nor can either command run in parallel with the -InitShardMaster command. - -The two commands are both dependent on the global topology server being -available, and they both insert rows in the topology server's -\_vt.reparent\_journal table. As such, you can review -your database's reparenting history by inspecting that table. - -### PlannedReparentShard: Planned reparenting - -The PlannedReparentShard command reparents a healthy master -tablet to a new master. The current and new master must both be up and -running. - -This command performs the following actions: - -1. Puts the current master tablet in read-only mode. -1. Shuts down the current master's query service, which is the part of - the system that handles user SQL queries. At this point, Vitess does - not handle any user SQL queries until the new master is configured - and can be used a few seconds later. -1. Retrieves the current master's replication position. -1. Instructs the master-elect tablet to wait for replication data and - then begin functioning as the new master after that data is fully - transferred. -1. Ensures replication is functioning properly via the following steps: - 1. On the master-elect tablet, insert an entry in a test table - and then update the global Shard object's - MasterAlias record. - 1. In parallel on each slave, including the old master, set the new - master and wait for the test entry to replicate to the slave tablet. - (Slave tablets that had not been replicating before the command was - called are left in their current state and do not start replication - after the reparenting process.) - 1. Start replication on the old master tablet so it catches up to the - new master. - -In this scenario, the old master's tablet type transitions to -spare. If health checking is enabled on the old master, -it will likely rejoin the cluster as a replica on the next health -check. To enable health checking, set the -target\_tablet\_type parameter when starting a tablet. -That parameter indicates what type of tablet that tablet tries to be -when healthy. When it is not healthy, the tablet type changes to -spare. - -### EmergencyReparentShard: Emergency reparenting - -The EmergencyReparentShard command is used to force -a reparent to a new master when the current master is unavailable. -The command assumes that data cannot be retrieved from the current -master because it is dead or not working properly. - -As such, this command does not rely on the current master at all -to replicate data to the new master. Instead, it makes sure that -the master-elect is the most advanced in replication within all -of the available slaves. - -**Important:** Before calling this command, you must first identify -the slave with the most advanced replication position as that slave -must be designated as the new master. You can use the -[vtctl ShardReplicationPositions]({% link reference/vtctl.md %}#shardreplicationpositions) -command to determine the current replication positions of a shard's slaves. - -This command performs the following actions: - -1. Determines the current replication position on all of the slave - tablets and confirms that the master-elect tablet has the most - advanced replication position. -1. Promotes the master-elect tablet to be the new master. In addition to - changing its tablet type to master, the master-elect - performs any other changes that might be required for its new state. -1. Ensures replication is functioning properly via the following steps: - 1. On the master-elect tablet, Vitess inserts an entry in a test table - and then updates the MasterAlias record of the global - Shard object. - 1. In parallel on each slave, excluding the old master, Vitess sets the - master and waits for the test entry to replicate to the slave tablet. - (Slave tablets that had not been replicating before the command was - called are left in their current state and do not start replication - after the reparenting process.) - -## External Reparenting - -External reparenting occurs when another tool handles the process -of changing a shard's master tablet. After that occurs, the tool -needs to call the -[vtctl TabletExternallyReparented]({% link reference/vtctl.md %}#tabletexternallyreparented) -command to ensure that the topology server, replication graph, and serving -graph are updated accordingly. - -That command performs the following operations: - -1. Locks the shard in the global topology server. -1. Reads the Shard object from the global topology server. -1. Reads all of the tablets in the replication graph for the shard. - Vitess does allow partial reads in this step, which means that Vitess - will proceed even if a data center is down as long as the data center - containing the new master is available. -1. Ensures that the new master's state is updated correctly and that the - new master is not a MySQL slave of another server. It runs the MySQL - show slave status command, ultimately aiming to confirm - that the MySQL reset slave command already executed on - the tablet. -1. Updates, for each slave, the topology server record and replication - graph to reflect the new master. If the old master does not return - successfully in this step, Vitess changes its tablet type to - spare to ensure that it does not interfere with ongoing - operations. -1. Updates the Shard object to specify the new master. - -The TabletExternallyReparented command fails in the following -cases: - -* The global topology server is not available for locking and - modification. In that case, the operation fails completely. - -Active reparenting might be a dangerous practice in any system -that depends on external reparents. You can disable active reparents -by starting vtctld with the ---disable\_active\_reparents flag set to true. -(You cannot set the flag after vtctld is started.) - -## Fixing Replication - -A tablet can be orphaned after a reparenting if it is unavailable -when the reparent operation is running but then recovers later on. -In that case, you can manually reset the tablet's master to the -current shard master using the -[vtctl ReparentTablet]({% link reference/vtctl.md %}#reparenttablet) -command. You can then restart replication on the tablet if it was stopped -by calling the [vtctl StartSlave]({% link reference/vtctl.md %}#startslave) -command. diff --git a/doc/RowBasedReplication.md b/doc/RowBasedReplication.md deleted file mode 100644 index f4bba3611d2..00000000000 --- a/doc/RowBasedReplication.md +++ /dev/null @@ -1,168 +0,0 @@ -# Row Based Replication - -In Vitess 2.2, we are adding preliminary support for Row Based Replication. This -document explains how we are managing it and how it affects various Vitess -features. - -See the [Vitess and Replication]({% link user-guide/vitess-replication.md %}) document -for an introduction on various types of replication and how it affects Vitess. - -## MySQL Row Based Replication - -With Row Based replication, a more compact binary version of the rows affected -are sent through the replication stream, instead of the SQL statements. The -slaves then do not spend any time parsing the SQL, or performing any complex SQL -operations (like `where` clauses). They can just apply the new rows directly. - -A few binlog events are used: - -* Table Map event: describes a table that is affected by the next - events. Contains the database and table name, the number of columns, and the - Type for each column. It does not contain the individual column names, nor the - flags for each column (so it is impossible to differentiate signed vs unsigned - integers for instance). - -* Write Rows: equivalent of Insert. - -* Update Rows: change the values of some rows. - -* Delete Rows: delete the provided rows. - -The -[`binlog-row-image` option](https://dev.mysql.com/doc/refman/5.7/en/replication-options-binary-log.html#sysvar_binlog_row_image) can -be used to control which rows are used to identify the columns for the Update -and Delete Rows events. The default setting for that option is to log all -columns. - -## Vitess Use of MySQL Replication Stream - -Vitess uses the Replication Stream in a number of places. This part explains how -we use RBR for these. - -### vttablet Replication Stream Watcher - -This is enabled by the `watch_replication_stream` option, and is used -by [Update Stream]({% link user-guide/update-stream.md %}). It only cares about the -GTIDs for the events, so it is unaffected by the use of RBR. - -*Note*: the current vttablet also reloads the schema when it sees a DDL in the -stream. See below for more information on this. DDLs are however not represented -in RBR, so this is an orthogonal issue. - -### Update Stream - -The current implementation uses comments in the original SQL (in SQR) to provide -the primary key of the column that is being changed. - -We are changing this to also parse the RBR events, and extract the primary key -value. - -*Note*: this means we need accurate schema information. See below. - -### Filtered Replication - -This is used during horizontal and vertical resharding, to keep source and -destination shards up to date. - -We need to transform the RBR events into SQL statements, filter them based -either on keyspace_id (horizontal resharding) or table name (vertical -resharding), and apply them. - -For horizontal splits, we need to understand the VSchema to be able to find the -primary VIndex used for sharding. - -*Note*: this again means we need accurate schema information. We could do one of -two things: - -* Send all statements to all destination shards, and let them do the - filtering. They can have accurate schema information if they receive and apply - all schema changes through Filtered Replication. - -* Have the filtering be done on the stream server side, and assume the schema - doesn't change in incompatible ways. As this is simpler for now, that's the - option we're going with. - -## Database Schema Considerations - -### Interpreting RBR Events - -A lot of the work to interpret RBR events correctly requires knowledge of the -table's schema. However, this introduces the possibility of inconsistencies -during schema changes: the current schema for a table might be newer than the -schema an older replication stream event was using. - -For the short term, Vitess will not deal very gracefully with this scenario: we -will only support the case where the current schema for a table has exactly the -same columns as all events in the binlog, plus some other optional columns that -are then unused. That way, it is possible to add columns to tables without -breaking anything. - -Note if the main use case is Filtered Replication for resharding, this -limitation only exists while the resharding process is running. It is somewhat -easy to not change the schema at the same time as resharding is on-going. - -### Applying Schema Changes - -When using -RBR, [Schema Swap]({% link user-guide/vitess-replication.md %}#vitess-schema-swap) -becomes useless, as replication between hosts with different schemas will most -likely break. This is however an existing limitation that is already known and -handled by MySQL DBAs. - -Vitess at this point does not provide an integrated way of applying involved -schema changes through RBR. A number of external tools however already exist to -handle this case, like [gh-ost](https://github.com/github/gh-ost). - -We have future plans to: - -* Integrate with a tool like gh-ost to provide a seamless schema change story. - -* Maintain a history of the schema changes that happen on all shards, so events - can be parsed correctly in all cases. - -## Unsupported Features - -This part describes the features that are not supported for RBR in Vitess as of -March 2017: - -* *Fractional timestamps for MariaDB*: not supported. This affects the objects - of type `TIMESTAMP`, `TIME` and `DATETIME`. The way that feature is - implemented in MariaDB, the binary logs do not contain enough information to - be parsed, but instead MariaDB relies on the schema knowledge. This is very - fragile. MySQL 5.6+ added new data types, and these are supported. - -* *JSON type in MySQL 5.7+*: the representation of these in the binlogs is a - blob containing indexed binary data. Re-building the SQL version of the data, - so it can be re-inserted during resharding, is not supported yet. It wouldn't - however be a lot of work, with other libraries also supporting this, and the - C++ MySQL code being well written and easy to read. See for instance - https://github.com/shyiko/mysql-binlog-connector-java/pull/119 - -* *Timezones support*: the binary logs store timestamps in UTC. When converting - these to SQL, we print the UTC value. If the server is not in UTC, that will - result in data corruption. *Note*: we are working on a fix for that one. - -## Update Stream Extensions - -[Update Stream]({% link user-guide/update-stream.md %}) can be changed to contain both -old and new values of the rows being changed. Again the values will depend on -the schema. We will also make this feature optional, so if the client is using -this for Primary Key based cache invalidation for instance, no extra unneeded -data is sent. - -This can be used to re-populate a cache with Update Stream, instead of -invalidating it, by putting the new values directly in there. - -Then, using this in conjunction with `binlog-row-image` would help provide a -feature-complete way of always getting all changes on rows. It would also help -handle Update Stream corner cases that replay events during resharding, when -switching traffic from old to new shards. - -## Vttablet Simplifications - -A lot of the work done by vttablet now is to find the Primary Key of the -modified rows, to rewrite the queries in an efficient way and tag each statement -with the Primary Key. None of this may be necessary with RBR. - -We plan to eventually add a `rbr_mode` flag to vttablet to disable all the -things it can skip if RBR is used. diff --git a/doc/ScalabilityPhilosophy.md b/doc/ScalabilityPhilosophy.md deleted file mode 100644 index f9ad7b5a6c2..00000000000 --- a/doc/ScalabilityPhilosophy.md +++ /dev/null @@ -1,247 +0,0 @@ -Scalability problems can be solved using many approaches. This document describes Vitess’ approach to address these problems. - -## Small instances - -When deciding to shard or break databases up into smaller parts, it’s tempting to break them just enough that they fit in one machine. In the industry, it’s common to run only one MySQL instance per host. - -Vitess recommends that instances be broken up to be even smaller, and not to shy away from running multiple instances per host. The net resource usage would be about the same. But the manageability greatly improves when MySQL instances are small. There is the complication of keeping track of ports, and separating the paths for the MySQL instances. However, everything else becomes simpler once this hurdle is crossed. - -There are fewer lock contentions to worry about, replication is a lot happier, production impact of outages become smaller, backups and restores run faster, and a lot more secondary advantages can be realized. For example, you can shuffle instances around to get better machine or rack diversity leading to even smaller production impact on outages, and improved resource usage. - -### Cloud Vs Baremetal - -Although Vitess is designed to run in the cloud, it is entirely possible to -run it on baremetal configs, and many users still do. If deploying in a cloud, -the assignment of servers and ports is abstracted away from the administrator. -On baremetal, the operator still has these responsibilities. - -We provide sample configs to help you [get started on Kubernetes](https://vitess.io/docs/tutorials/kubernetes/) -since it's the most similar to Borg (the [predecessor to Kubernetes](https://kubernetes.io/blog/2015/04/borg-predecessor-to-kubernetes/) -on which Vitess now runs in YouTube). -If you're more familiar with alternatives like Mesos, Swarm, Nomad, or DC/OS, -we'd welcome your contribution of sample configs for Vitess. - -These orchestration systems typically use [containers](https://en.wikipedia.org/wiki/Software_container) -to isolate small instances so they can be efficiently packed onto machines -without contention on ports, paths, or compute resources. -Then an automated scheduler does the job of shuffling instances around for -failure resilience and optimum utilization. - -## Durability through replication - -Traditional data storage software treated data as durable as soon as it was flushed to disk. However, this approach is impractical in today’s world of commodity hardware. Such an approach also does not address disaster scenarios. - -The new approach to durability is achieved by copying the data to multiple machines, and even geographical locations. This form of durability addresses the modern concerns of device failures and disasters. - -Many of the workflows in Vitess have been built with this approach in mind. For example, turning on semi-sync replication is highly recommended. This allows Vitess to failover to a new replica when a master goes down, with no data loss. Vitess also recommends that you avoid recovering a crashed database. Instead, create a fresh one from a recent backup and let it catch up. - -Relying on replication also allows you to loosen some of the disk-based durability settings. For example, you can turn off sync\_binlog, which greatly reduces the number of IOPS to the disk thereby increasing effective throughput. - -## Consistency model - -Before sharding or moving tables to different keyspaces, the application needs to be verified (or changed) such that it can tolerate the following changes: - -* Cross-shard reads may not be consistent with each other. Conversely, the sharding decision should also attempt to minimize such occurrences because cross-shard reads are more expensive. -* In "best-effort mode", cross-shard transactions can fail in the middle and result in partial commits. You could instead use "2PC mode" transactions that give you distributed atomic guarantees. However, choosing this option increases the write cost by approximately 50%. - -Single shard transactions continue to remain ACID, just like MySQL supports it. - -If there are read-only code paths that can tolerate slightly stale data, the queries should be sent to REPLICA tablets for OLTP, and RDONLY tablets for OLAP workloads. This allows you to scale your read traffic more easily, and gives you the ability to distribute them geographically. - -This tradeoff allows for better throughput at the expense of stale or possible inconsistent reads, since the reads may be lagging behind the master, as data changes (and possibly with varying lag on different shards). To mitigate this, VTGates are capable of monitoring replica lag and can be configured to avoid serving data from instances that are lagging beyond X seconds. - -For true snapshot, the queries must be sent to the master within a transaction. For read-after-write consistency, reading from the master without a transaction is sufficient. - -To summarize, these are the various levels of consistency supported: - -* REPLICA/RDONLY read: Servers be scaled geographically. Local reads are fast, but can be stale depending on replica lag. -* MASTER read: There is only one worldwide master per shard. Reads coming from remote locations will be subject to network latency and reliability, but the data will be up-to-date (read-after-write consistency). The isolation level is READ\_COMMITTED. -* MASTER transactions: These exhibit the same properties as MASTER reads. However, you get REPEATABLE\_READ consistency and ACID writes for a single shard. Support is underway for cross-shard Atomic transactions. - -As for atomicity, the following levels are supported: - -* SINGLE: disallow multi-db transactions. -* MULTI: multi-db transactions with best effort commit. -* TWOPC: multi-db transactions with 2pc commit. - -### No multi-master - -Vitess doesn’t support multi-master setup. It has alternate ways of addressing most of the use cases that are typically solved by multi-master: - -* Scalability: There are situations where multi-master gives you a little bit of additional runway. However, since the statements have to eventually be applied to all masters, it’s not a sustainable strategy. Vitess addresses this problem through sharding, which can scale indefinitely. -* High availability: Vitess integrates with Orchestrator, which is capable of performing a failover to a new master within seconds of failure detection. This is usually sufficient for most applications. -* Low-latency geographically distributed writes: This is one case that is not addressed by Vitess. The current recommendation is to absorb the latency cost of long-distance round-trips for writes. If the data distribution allows, you still have the option of sharding based on geographic affinity. You can then setup masters for different shards to be in different geographic location. This way, most of the master writes can still be local. - -### Big data queries - -There are two main ways to access the data for offline data processing (as -opposed to online web or direct access to the live data): sending queries to -rdonly servers, or using a Map Reduce framework. - -#### Batch queries - -These are regular queries, but they can consume a lot of data. Typically, the -streaming APIs are used, to consume large quantities of data. - -These queries are just sent to the *rdonly* servers (also known as *batch* -servers). They can take as much resources as they want without affecting live -traffic. - -#### MapReduce - -Vitess supports MapReduce access to the data. Vitess provides a Hadoop -connector, that can also be used with Apache Spark. See the [Hadoop package -documentation](https://github.com/vitessio/vitess/tree/master/java/hadoop/src/main/java/io/vitess/hadoop) -for more information. - -With a MapReduce framework, Vitess does not support very complicated -queries. In part because it would be difficult and not very efficient, but also -because the MapReduce frameworks are usually very good at data processing. So -instead of doing very complex SQL queries and have processed results, it is -recommended to just dump the input data out of Vitess (with simple *select* -statements), and process it with a MapReduce pipeline. - -## Multi-cell - -Vitess is meant to run in multiple data centers / regions / cells. In this part, -we'll use *cell* as a set of servers that are very close together, and share the -same regional availability. - -A cell typically contains a set of tablets, a vtgate pool, and app servers that -use the Vitess cluster. With Vitess, all components can be configured and -brought up as needed: - -* The master for a shard can be in any cell. If cross-cell master access is - required, vtgate can be configured to do so easily (by passing the cell that - contains the master as a cell to watch). -* It is not uncommon to have the cells that can contain the master be more - provisioned than read-only serving cells. These *master-capable* cells may - need one more replica to handle a possible failover, while still maintaining - the same replica serving capacity. -* Failing over from one master in one cell to a master in a different cell is no - different than a local failover. It has an implication on traffic and latency, - but if the application traffic also gets re-directed to the new cell, the end - result is stable. -* It is also possible to have some shards with a master in one cell, and some - other shards with their master in another cell. vtgate will just route the - traffic to the right place, incurring extra latency cost only on the remote - access. For instance, creating U.S. user records in a database with masters in - the U.S. and European user records in a database with masters in Europe is - easy to do. Replicas can exist in every cell anyway, and serve the replica - traffic quickly. -* Replica serving cells are a good compromise to reduce user-visible latency: - they only contain *replica* servers, and master access is always done - remotely. If the application profile is mostly reads, this works really well. -* Not all cells need *rdonly* (or batch) instances. Only the cells that run - batch jobs, or MapReduce jobs, really need them. - -Note Vitess uses local-cell data first, and is very resilient to any cell going -down (most of our processes handle that case gracefully). - -## Lock server - -Vitess is a highly available service, and Vitess itself needs to store a small -amount of metadata very reliably. For that purpose, Vitess needs a highly -available and consistent data store. - -Lock servers were built for this exact purpose, and Vitess needs one such -cluster to be setup to run smoothly. Vitess can be customized to utilize any -lock server, and by default it supports Zookeeper, etcd and Consul. We call this -component [Topology Service]({% link user-guide/topology-service.md %}). - -As Vitess is meant to run in multiple data centers / regions (called cells -below), it relies on two different lock servers: - -* global instance: it contains global meta data, like the list of Keyspaces / - Shards, the VSchema, ... It should be reliable and distributed across multiple - cells. Running Vitess processes almost never access the global instance. -* per-cell instance (local): It should be running only in the local cell. It - contains aggregates of all the global data, plus local running tablet - information. Running Vitess processes get most of their topology data from the - local instance. - -This separation is key to higher reliability. A single cell going bad is never -critical for Vitess, as the global instance is configured to survive it, and -other cells can take over the production traffic. The global instance can be -unavailable for minutes and not affect serving at all (it would affect VSchema -changes for instance, but these are not critical, they can wait for the global -instance to be back). - -If Vitess is only running in one cell, both global and local instances can share -the same lock service instance. It is always possible to split them later when -expanding to multiple cells. - -## Monitoring - -The most stressful part of running a production system is the situation where one is trying to troubleshoot an ongoing outage. You have to be able to get to the root cause quickly and find the correct remedy. This is one area where monitoring becomes critical and Vitess has been battle-tested. A large number of internal state variables and counters are continuously exported by Vitess through the /debug/vars and other URLs. There’s also work underway to integrate with third party monitoring tools like Prometheus. - -Vitess errs on the side of over-reporting, but you can be picky about which of these variables you want to monitor. It’s important and recommended to plot graphs of this data because it’s easy to spot the timing and magnitude of a change. It’s also essential to set up various threshold-based alerts that can be used to proactively prevent outages. - -## Development workflow - -Vitess provides binaries and scripts to make unit testing of the application -code very easy. With these tools, we recommend to unit test all the application -features if possible. - -A production environment for a Vitess cluster involves a topology service, -multiple database instances, a vtgate pool and at least one vtctld process, -possibly in multiple data centers. The vttest library uses the *vtcombo* binary -to combine all the Vitess processes into just one. The various databases are -also combined into a single MySQL instance (using different database names for -each shard). The database schema is initialized at startup. The (optional) -VSchema is also initialized at startup. - -A few things to consider: - -* Use the same database schema in tests as the production schema. -* Use the same VSchema in tests as the production VSchema. -* When a production keyspace is sharded, use a sharded test keyspace as - well. Just two shards is usually enough, to minimize test startup time, while - still re-producing the production environment. -* *vtcombo* can also start the *vtctld* component, so the test environment is - visible with the Vitess UI. -* See - [vttest.proto](https://github.com/vitessio/vitess/blob/master/proto/vttest.proto) - for more information. - -## Application query patterns - -Although Vitess strives to minimize the app changes required to scale, -there are some important considerations for application queries. - -### Commands specific to single MySQL instances - -Since vitess represents a combined view of all MySQL instances, there -are some operations it cannot reasonably perform in a backward compatible -manner. For example: - -* SET GLOBAL -* SHOW -* Binary log commands -* Other single keyspace administrative commands - -However, Vitess allows you to target a single MySQL instance through -an extended syntax of the USE statement. If so, it will -allow you to execute some of these statements as pass-through. - -### Connecting to Vitess - -If your application previously connected to master or replica -instances through different hosts and ports, those parts will -have to be changed to connect to a single load-balanced IP. - -Instead, the database type will be specified as part of the -db name. For example, to connect to a master, you would specify -the dbname as db@master. For a replica, it would be -db@replica. - -### Query support - -A sharded Vitess is not 100% backward compatible with MySQL. -Some queries that used to work will cease to work. -It’s important that you run all your queries on a sharded test environment -- see the [Development workflow](#development-workflow) section above -- to make sure none will fail on production. - -Our goal is to expand query support based on the needs of users. -If you encounter an important construct that isn't supported, -please create or comment on an existing feature request so we -know how to prioritize. diff --git a/doc/ScalingMySQL.md b/doc/ScalingMySQL.md deleted file mode 100644 index deef192c8b3..00000000000 --- a/doc/ScalingMySQL.md +++ /dev/null @@ -1,143 +0,0 @@ -Traditionally, it's been difficult to scale a MySQL-based database to an arbitrary size. Since MySQL lacks the out-of-the-box multi-instance support required to really scale an application, the process can be complex and obscure. - -As the application grows, scripts emerge to back up data, migrate a master database, or run some offline data processing. Complexity creeps into the application layer, which increasingly needs to be aware of database details. And before we know it, any change needs a big engineering effort so we can keep scaling. - -Vitess grew out of YouTube's attempt to break this cycle, and YouTube decided to open source Vitess after realizing that this is a very common problem. Vitess simplifies every aspect of managing a MySQL cluster, allowing easy scaling to any size without complicating your application layer. It ensures your database can keep up when your application takes off, leaving you with a database that is flexible, secure, and easy to mine. - -This document talks about the process of moving from a single small database to a limitless database cluster. It explains how steps in that process influenced Vitess' design, linking to relevant parts of the Vitess documentation along the way. It concludes with tips for designing a new, highly scalable application and database schema. - -## Getting started - -Vitess sits between your application and your MySQL database. It looks at incoming queries and routes them properly. So, instead of sending a query directly from your application to your database, you send it through Vitess, which understands your database topology and constantly monitors the health of individual database instances. - -While Vitess is designed to manage large, multi-instance databases, it offers features that simplify database setup and management at all stages of your product's lifecycle. - -Starting out, our first step is getting a simple, reliable, durable database cluster in place with a master instance and a couple of replicas. In Vitess terminology, that's a single-shard, single-keyspace database. Once that building block is in place, we can focus on scaling it up. - -### Planning for scale - -We recommend a number of best practices to facilitate scaling your database as your product evolves. You might not experience the benefits of these actions immediately, but adopting these practices from day one will make it much easier for your database and product to grow: - -* Always keep your database schema under source control and provide unit test coverage of that schema. Also check schema changes into source control and run unit tests against the newly modified schema. -* Think about code paths that can read from a replica vs. always choosing to read from the master. This will let you to scale your reads by just adding more replicas. Additionally, this will make it easy to expand into other data centers across the world. -* Avoid complicated data relationships. Although RDBMS systems can handle them very well, such relationships hinder scaling in the future. When the time comes, it will be easier to shard the data. -* Avoid pushing too much logic into the database in the form of stored procedures, foreign keys, or triggers. Such operations overly tax the database and hinder scaling. - -## Step 1: Setting up a database cluster - -At the outset, plan to create a database cluster that has a master instance and a couple of read-only replicas (or slaves). The replicas would be able to take over if the master became unavailable, and they might also handle read-only traffic. You'd also want to schedule regular data backups. - -It's worth noting that master management is a complex and critical challenge for data reliability. At any given time, a shard has only one master instance, and all replica instances replicate from it. Your application -- either a component in your application layer or Vitess, if you are using it -- needs to be able to easily identify the master instance for write operations, recognizing that the master might change from time to time. Similarly, your application, with or without Vitess, should be able to seamlessly adapt to new replicas coming online or old ones being unavailable. - -### Keep routing logic out of your application - -A core principle underlying Vitess' design is that your database and data management practices should always be ready to support your application's growth. So, you might not yet have an immediate need to store data in multiple data centers, shard your database, or even do regular backups. But when those needs arise, you want to be sure that you'll have an easy path to achieve them. Note that you can run Vitess in a Kubernetes cluster or on local hardware. - -With that in mind, you want to have a plan that allows your database to grow without complicating your application code. For example, if you reshard your database, your application code shouldn't need to change to identify the target shards for a particular query. - -Vitess has several components that keep this complexity out of your application: - -* Each MySQL instance is paired with a **vttablet** process, which provides features like connection pooling, query rewriting, and query de-duping. -* Your application sends queries to **vtgate**, a light proxy that routes traffic to the correct vttablet(s) and then returns consolidated results to the application. -* The **Topology Service** -- Vitess supports Zookeeper, etcd and Consul -- maintains configuration data for the database system. Vitess relies on the service to know where to route queries based on both the sharding scheme and the availability of individual MySQL instances. -* The **vtctl** and **vtctld** tools offer command-line and web interfaces to the system. - -
-Diagram showing Vitess implementation -
- - -Setting up these components directly -- for example, writing your own topology service or your own implementation of vtgate -- would require a lot of scripting specific to a given configuration. It would also yield a system that would be difficult and costly to support. In addition, while any one of the components on its own is useful in limiting complexity, you need all of them to keep your application as simple as possible while also optimizing performance. - -**Optional functionality to implement** - -* *Recommended*. Vitess has basic support for identifying or changing a master, but it doesn't aim to fully address this feature. As such, we recommend using another program, like [Orchestrator](https://github.com/github/orchestrator), to monitor the health of your servers and to change your master database when necessary. (In a sharded database, each shard has a master.) - - -* *Recommended*. You should have a way to monitor your database topology and set up alerts as needed. Vitess components facilitate this monitoring by exporting a lot of runtime variables, like QPS over the last few minutes, error rates, and query latency. The variables are exported in JSON format, and Vitess also supports a Prometheus plug-in. - - -* *Optional*. Using the Kubernetes scripts as a base, you could run Vitess components with other configuration management systems (like Puppet) or frameworks (like Mesos or AWS images). - -**Related Vitess documentation:** - -* [Running Vitess on Kubernetes]({% link getting-started/index.md %}) -* [Running Vitess on a local server]({% link getting-started/local-instance.md %}) -* [Backing up data]({% link user-guide/backup-and-restore.md %}) -* [Reparenting - basic assignment of master instance in Vitess]({% link user-guide/reparenting.md %}) - -## Step 2: Connect your application to your database - -Obviously, your application needs to be able to call your database. So, we'll jump straight to explaining how you'd modify your application to connect to your database through vtgate. - -As of Release 2.1, VTGate supports the MySQL protocol. So, the application only needs to change where it connects to. For those using Java or Go, we additionally provide libraries that can communicate to VTGate using [gRPC](https://www.grpc.io/). Using the provided libraries allow you to send queries with bind variables, which is not inherently possible through the MySQL protocol. - -#### Unit testing database interactions - -The vttest library and executables provide a unit testing environment that lets you start a fake cluster that acts as an exact replica of your production environment for testing purposes. In the fake cluster, a single DB instance hosts all of your shards. - -### Migrating production data to Vitess - -The easiest way to migrate data to your Vitess database is to take a backup of your existing data, restore it on the Vitess cluster, and go from there. However, that requires some downtime. - -Another, more complicated approach, is a live migration, which requires your application to support both direct MySQL access and Vitess access. In that approach, you'd enable MySQL replication from your source database to the Vitess master database. This would allow you to migrate quickly and with almost no downtime. - -Note that this path is highly dependent on the source setup. Thus, while Vitess provides helper tools, it does not offer a generic way to support this type of migration. - -The final option is to deploy Vitess directly onto the existing MySQL instances and slowly migrate the application traffic to move over to using Vitess. - -**Related Vitess documentation:** - -* [Schema Management]({% link user-guide/schema-management.md %}) -* [Transport Security Model]({% link user-guide/transport-security-model.md %}) - -## Step 3: Vertical sharding (scaling to multiple keyspaces) - -Typically, the first step in scaling up is vertical sharding, in which you identify groups of tables that belong together and move them to separate keyspaces. A keyspace is a distributed database, and, usually, the databases are unsharded at this point. That said, it's possible that you'll need to horizontally shard your data (step 4) before scaling to multiple keyspaces. - -The benefit of splitting tables into multiple keyspaces is to parallelize access to the data (increased performance), and to prepare each smaller keyspace for horizontal sharding. And, in separating data into multiple keyspaces, you should aim to reach a point where: - -* All tables inside a keyspace share a common key. This will make it more convenient to horizontally shard in the future as described in step 4. -* Joins are primarily within keyspaces. (Joins between keyspaces are costly.) -* Transactions involving data in multiple keyspaces, which are also expensive, are uncommon. - -### Scaling keyspaces with Vitess - -Several vtctl functions -- vtctl is Vitess' command-line tool for managing your database topology -- support features for vertically splitting a keyspace. In this process, a set of tables can be moved from an existing keyspace to a new keyspace with no read downtime and write downtime of just a few seconds. - -**Related Vitess documentation:** - -* [vtctl Reference guide]({% link reference/vtctl.md %}) - -## Step 4: Horizontal sharding (partitioning your data) - -The next step in scaling your data is horizontal sharding, the process of partitioning your data to improve scalability and performance. A shard is a horizontal partition of the data within a keyspace. Each shard has a master instance and replica instances, but data does not overlap between shards. - -In order to perform horizontal sharding, you need to identify the column that will be used to decide the target shard for each table. This is known as the Primary Vindex, which is similar to a NoSQL sharding key, but provides additional flexibility. The decision on such primary vindexes and other sharding metadata is stored in the VSchema. - -Vitess offers robust resharding support, which involves updating the sharding scheme for a keyspace and dynamically reorganizing data to match the new scheme. During resharding, Vitess copies, verifies, and keeps data up-to-date on new shards while existing shards continue serving live read and write traffic. When you're ready to switch over, the migration occurs with just a few seconds of read-only downtime. - -**Related Vitess documentation:** - -* [VSchema Reference guide]({% link user-guide/vschema.md %}) -* [Sharding]({% link user-guide/sharding.md %}) -* [Horizontal sharding (Codelab)]({% link user-guide/horizontal-sharding.md %}) -* [Sharding in Kubernetes (Codelab)]({% link user-guide/sharding-kubernetes.md %}) - -## Related tasks - -In addition to the four steps discussed above, you might also want to do some or all of the following as your application matures. - -### Data processing input / output - -Hadoop is a framework that enables distributed processing of large data sets across clusters of computers using simple programming models. - -Vitess provides a Hadoop InputSource that can be used for any Hadoop MapReduce job or even connected to Spark. The Vitess InputSource takes a simple SQL query, splits that query into small chunks, and parallelizes data reading as much as possible across database instances, shards, etc. - -### Query log analysis - -Database query logs can help you to monitor and improve your application's performance. - -To that end, each vttablet instance provides runtime stats, which can be accessed through the tablet’s web page, for the queries the tablet is running. These stats make it easy to detect slow queries, which are usually hampered by a missing or mismatched table index. Reviewing these queries regularly helps maintain the overall health of your large database installation. - -Each vttablet instance can also provide a stream of all the queries it is running. If the Vitess cluster is colocated with a log cluster, you can dump this data in real time and then run more advanced query analysis. diff --git a/doc/SchemaManagement.md b/doc/SchemaManagement.md deleted file mode 100644 index 701bc603658..00000000000 --- a/doc/SchemaManagement.md +++ /dev/null @@ -1,190 +0,0 @@ -Using Vitess requires you to work with two different types of schemas: - -1. The MySQL database schema. This is the schema of the individual MySQL instances. -2. The [VSchema]({% link user-guide/vschema.md %}), which describes all the keyspaces and how they're sharded. - -The workflow for the VSchema is as follows: - -1. Apply the VSchema for each keyspace using the ApplyVschema command. This saves the VSchemas in the global topo server. -2. Execute RebuildVSchemaGraph for each cell (or all cells). This command propagates a denormalized version of the combined VSchema to all the specified cells. The main purpose for this propagation is to minimize the dependency of each cell from the global topology. The ability to push a change to only specific cells allows you to canary the change to make sure that it's good before deploying it everywhere. - -This document describes the [vtctl]({% link reference/vtctl.md %}) -commands that you can use to [review](#reviewing-your-schema) or -[update](#changing-your-schema) your schema in Vitess. - -Note that this functionality is not recommended for long-running schema changes. In such cases, we recommend to do a [schema swap]({% link user-guide/schema-swap.md %}) instead. - -## Reviewing your schema - -This section describes the following vtctl commands, which let you look at the schema and validate its consistency across tablets or shards: - -* [GetSchema](#getschema) -* [ValidateSchemaShard](#validateschemashard) -* [ValidateSchemaKeyspace](#validateschemakeyspace) -* [GetVSchema](#getvschema) -* [GetSrvVSchema](#getsrvvschema) - -### GetSchema - -The [GetSchema]({% link reference/vtctl.md %}#getschema) command -displays the full schema for a tablet or a subset of the tablet's tables. -When you call GetSchema, you specify the tablet alias that -uniquely identifies the tablet. The \ -argument value has the format \-\. - -**Note:** You can use the -[vtctl ListAllTablets]({% link reference/vtctl.md %}#listalltablets) -command to retrieve a list of tablets in a cell and their unique IDs. - -The following example retrieves the schema for the tablet with the -unique ID test-000000100: - -``` -GetSchema test-000000100 -``` - -### ValidateSchemaShard - -The -[ValidateSchemaShard]({% link reference/vtctl.md %}#validateschemashard) -command confirms that for a given keyspace, all of the slave tablets -in a specified shard have the same schema as the master tablet in that -shard. When you call ValidateSchemaShard, you specify both -the keyspace and the shard that you are validating. - -The following command confirms that the master and slave tablets in -shard 0 all have the same schema for the user -keyspace: - -``` -ValidateSchemaShard user/0 -``` - -### ValidateSchemaKeyspace - -The [ValidateSchemaKeyspace]({% link reference/vtctl.md %}#validateschemakeyspace) -command confirms that all of the tablets in a given keyspace have -the same schema as the master tablet on shard 0 -in that keyspace. Thus, whereas the ValidateSchemaShard -command confirms the consistency of the schema on tablets within a shard -for a given keyspace, ValidateSchemaKeyspace confirms the -consistency across all tablets in all shards for that keyspace. - -The following command confirms that all tablets in all shards have the -same schema as the master tablet in shard 0 for the -user keyspace: - -``` -ValidateSchemaKeyspace user -``` - -### GetVSchema - -The [GetVSchema]({% link reference/vtctl.md %}#getvschema) -command displays the global VSchema for the specified keyspace. - -### GetSrvVSchema - -The [GetSrvVSchema]({% link reference/vtctl.md %}#getsrvvschema) -command displays the combined VSchema for a given cell. - -## Changing your schema - -This section describes the following commands: - -* [ApplySchema](#applyschema) -* [ApplyVSchema](#applyvschema) -* [RebuildVSchemaGraph](#rebuildvschemagraph) - -### ApplySchema - -Vitess' schema modification functionality is designed the following goals in mind: - -* Enable simple updates that propagate to your entire fleet of servers. -* Require minimal human interaction. -* Minimize errors by testing changes against a temporary database. -* Guarantee very little downtime (or no downtime) for most schema updates. -* Do not store permanent schema data in the topology server. - -Note that, at this time, Vitess only supports -[data definition statements](https://dev.mysql.com/doc/refman/5.6/en/sql-syntax-data-definition.html) -that create, modify, or delete database tables. -For instance, ApplySchema does not affect stored procedures -or grants. - -The [ApplySchema]({% link reference/vtctl.md %}#applyschema) -command applies a schema change to the specified keyspace on every -master tablet, running in parallel on all shards. Changes are then -propagated to slaves via replication. The command format is: -``` -ApplySchema {-sql= || -sql_file=} -``` - -When the ApplySchema action actually applies a schema -change to the specified keyspace, it performs the following steps: - -1. It finds shards that belong to the keyspace, including newly added - shards if a [resharding event]({% link user-guide/sharding.md %}#resharding) - has taken place. -1. It validates the SQL syntax and determines the impact of the schema - change. If the scope of the change is too large, Vitess rejects it. - See the [permitted schema changes](#permitted-schema-changes) section - for more detail. -1. It employs a pre-flight check to ensure that a schema update will - succeed before the change is actually applied to the live database. - In this stage, Vitess copies the current schema into a temporary - database, applies the change there to validate it, and retrieves - the resulting schema. By doing so, Vitess verifies that the change - succeeds without actually touching live database tables. -1. It applies the SQL command on the master tablet in each shard. - -The following sample command applies the SQL in the **user_table.sql** -file to the **user** keyspace: - -``` -ApplySchema -sql_file=user_table.sql user -``` - -#### Permitted schema changes - -The ApplySchema command supports a limited set of DDL -statements. In addition, Vitess rejects some schema changes because -large changes can slow replication and may reduce the availability -of your overall system. - -The following list identifies types of DDL statements that Vitess -supports: - -* CREATE TABLE -* CREATE INDEX -* CREATE VIEW -* ALTER TABLE -* ALTER VIEW -* RENAME TABLE -* DROP TABLE -* DROP INDEX -* DROP VIEW - -In addition, Vitess applies the following rules when assessing the -impact of a potential change: - -* DROP statements are always allowed, regardless of the - table's size. -* ALTER statements are only allowed if the table on the - shard's master tablet has 100,000 rows or less. -* For all other statements, the table on the shard's master tablet - must have 2 million rows or less. - -If a schema change gets rejected because it affects too many rows, you can specify the flag `-allow_long_unavailability` to tell `ApplySchema` to skip this check. -However, we do not recommend this. Instead, you should apply large schema changes by following the [schema swap process]({% link user-guide/schema-swap.md %}). - -### ApplyVSchema - -The [ApplyVSchema]({% link reference/vtctl.md %}#applyvschema) -command applies the specified VSchema to the keyspace. The VSchema can be specified -as a string or in a file. - -### RebuildVSchemaGraph - -The [RebuildVSchemaGraph]({% link reference/vtctl.md %}#rebuildvschemagraph) -command propagates the global VSchema to a specific cell or the list of specified cells. diff --git a/doc/SchemaSwap.md b/doc/SchemaSwap.md deleted file mode 100644 index a893761464d..00000000000 --- a/doc/SchemaSwap.md +++ /dev/null @@ -1,87 +0,0 @@ -# Schema Swap: A Tutorial - -This page describes how to apply long-running schema changes in Vitess/MySQL -without disrupting ongoing operations. Examples for long-running changes on -large databases are `ALTER TABLE` (for example to add a column), `OPTIMIZE -TABLE` or large-scale data changes (e.g. populating a column or clearing out -values). - -If a schema change is not long-running, please use the simpler [vtctl -ApplySchema]({% link user-guide/schema-management.md %}) instead. - -## Overview - -One solution to realize such long-running schema changes is to use a temporary -table and keep it in sync with triggers as [originally proposed by -Shlomi](https://code.openark.org/blog/mysql/online-alter-table-now-available-in-openark-kit) -and further refined by others ([Percona's -pt-online-schema-change](https://www.percona.com/doc/percona-toolkit/2.2/pt-online-schema-change.html), -[Square's Shift](https://github.com/square/shift)). - -Here we describe an alternative solution which uses a combination of MySQL's -statement based replication and backups to apply the changes to all tablets. -Since the long-running schema changes are applied to an offline tablet, ongoing -operations are not affected. We called this process **schema swap** due to the -way it's done, and therefore we refer to it by this name throughout the -document. - -This tutorial outlines the necessary steps for a schema swap and is based on the -[Vitess Kubernetes Getting Started Guide]({% link getting-started/index.md %}). - -**At the high level, a schema swap comprises the following phases:** - -1. Apply the schema changes to an offline tablet. -1. Let the tablet catch up and then create a backup of it. -1. Restore all remaining tablets (excluding the master) from the backup. -1. Failover the master to a replica tablet which has the new schema. Restore - the old master from the backup. -1. At this point, all tablets have the new schema and you can start using it. - -**You may be wondering: Why does this work?** - -The key here is that the new schema is backward compatible with respect to -statements sent by the app. The replication stream remains backward compatible -as well because we use statement based replication. As a consequence, the new -schema must not be used until it has been changed on all tablets. If the schema -would have been used e.g. when an insert uses a new column, replication would -break on tablets which have the old schema. Swapping schema on all tablets first -ensures this doesn't happen. - -Also note that the changes are applied to only one tablet and then all other -tablets are restored from the backup. This is more efficient than applying the -long-running changes on every single tablet. - -Now let's carry out an actual schema swap based on our Guestbook example schema. -We'll add a column to it. - -## Prerequisites - -We assume that you have followed the [Vitess Kubernetes Getting Started -Guide]({% link getting-started/index.md %}) up to and including the step "9. -Create a table". - -## Schema Swap Steps - -1. Got to the Workflows section of vtctld UI (it will be at - http://localhost:8001/api/v1/proxy/namespaces/default/services/vtctld:web/app2/workflows - if you followed the Getting Started Guide as is) and press the "+" button in - the top right corner. You will be presented with "Create a new Workflow" - dialog. -1. In the "Factory Name" list select "Schema Swap". -1. In the field "Keyspace" enter "test_keyspace" (without quotes). -1. In the field "SQL" enter the statement representing the schema change you - want to execute. As an example we want to execute statement "ALTER TABLE - messages ADD views BIGINT(20) UNSIGNED NULL". -1. Click "Create" button at the bottom of the dialog. - -Another way to start the schema swap is to execute vtctlclient command: - -``` sh -vitess/examples/local$ ./lvtctl.sh WorkflowCreate schema_swap -keyspace=test_keyspace -sql='SQL statement' -``` - -From this point on all you need to do is watch how the schema swap process is -progressing. Try expanding the displayed nodes in vtctld UI and look at the logs -of all the actions that process is doing. Once the UI shows "Schema swap is -finished" you can start using the new schema, it will be propagated to all -tablets. diff --git a/doc/ServerConfiguration.md b/doc/ServerConfiguration.md deleted file mode 100644 index 737757ae726..00000000000 --- a/doc/ServerConfiguration.md +++ /dev/null @@ -1,684 +0,0 @@ -## MySQL - -Vitess has some requirements on how MySQL should be configured. These will be detailed below. - -As a reminder, semi-sync replication is highly recommended. It offers a much better durability story than relying on a disk. This will also let you relax the disk-based durability settings. - -### Versions - -MySQL versions supported are: MariaDB 10.0, MySQL 5.6 and MySQL 5.7. A number of custom versions based on these exist (Percona, …), Vitess most likely supports them if the version they are based on is supported. - -### Config files - -#### my.cnf - -The main `my.cnf` file is generated by -[mysqlctl init](https://github.com/vitessio/vitess/blob/312064b96ac0070d9f8990e57af6f2c0a76a45a9/examples/local/vttablet-up.sh#L66) -based primarily on -[$VTROOT/config/mycnf/default.cnf](https://github.com/vitessio/vitess/blob/master/config/mycnf/default.cnf). -Additional files will be appended to the generated `my.cnf` as specified in -a colon-separated list of absolute paths in the `EXTRA_MY_CNF` environment -variable. For example, this is typically used to include [flavor-specific -config files](https://github.com/vitessio/vitess/blob/312064b96ac0070d9f8990e57af6f2c0a76a45a9/examples/local/vttablet-up.sh#L41). - -To customize the `my.cnf`, you can either add overrides in an additional -`EXTRA_MY_CNF` file, or modify the files in `$VTROOT/config/mycnf` before -distributing to your servers. In Kubernetes, you can use a -[ConfigMap](https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/) to overwrite -the entire `$VTROOT/config/mycnf` directory with your custom versions, -rather than baking them into a custom container image. - -#### init_db.sql - -When a new instance is initialized with `mysqlctl init` (as opposed to -restarting in a previously initialized data dir with `mysqlctl start`), -the [init_db.sql](https://github.com/vitessio/vitess/blob/master/config/init_db.sql) -file is applied to the server immediately after executing `mysql_install_db`. -By default, this file contains the equivalent of running -[mysql_secure_installation](https://dev.mysql.com/doc/refman/5.7/en/mysql-secure-installation.html), -as well as the necessary tables and grants for Vitess. - -If you are running Vitess on top of an existing MySQL instance, -rather than using mysqlctl, you can use this file as a sample of what -grants need to be applied to enable Vitess. - -Note that changes to this file will not be reflected in shards that have -already been initialized and had at least one backup taken. -New instances in such shards will automatically restore the latest backup -upon vttablet startup, overwriting the data dir created by mysqlctl. - -### Statement-based replication (SBR) - -Vitess relies on adding comments to DMLs, which are later parsed on the other end of replication for various post-processing work. The critical ones are: - -* Redirect DMLs to the correct shard during resharding workflow. -* Identify which rows have changed for notifying downstream services that wish to subscribe to changes in vitess. -* Workflows that allow you to apply schema changes to replicas first, and rotate the masters, which improves uptime. - -In order to achieve this, Vitess also rewrites all your DMLs to be primary-key based. In a way, this also makes statement based replication almost as efficient as row-based replication (RBR). So, there should be no major loss of performance if you switched to SBR in Vitess. - -In Vitess 2.2 preliminary support for Row Based Replication (RBR) was added. See [this document]({% link user-guide/row-based-replication.md %}) for more information. - -### Data types - -Vitess supports all data types including newer data types like spatial and JSON. Additionally, the TIMESTAMP data type should not be used in a primary key or sharding column. Otherwise, Vitess cannot predict those values correctly and this may result in data corruption. - -### No side effects - -Vitess cannot guarantee data consistency if the schema contains constructs with side effects. These are triggers, stored procedures and foreign keys. This is because the resharding workflow and update stream cannot correctly detect what has changed by looking at a statement. - -This rule is not strictly enforced. You are allowed to add these things, but at your own risk. As long as you’ve ensured that a certain side-effect will not break Vitess, you can add it to the schema. - -Similar guidelines should be used when deciding to bypass Vitess to send statements directly to MySQL. - -Vitess also requires you to turn on STRICT_TRANS_TABLES or STRICT_ALL_TABLES mode. Otherwise, it cannot accurately predict what will be written to the database. - -It’s safe to apply backward compatible DDLs directly to MySQL. VTTablets can be configured to periodically check the schema for changes. - -There is also work in progress to actively watch the binlog for schema changes. This will likely happen around release 2.1. - -### Autocommit - -MySQL autocommit needs to be turned on. - -VTTablet uses connection pools to MySQL. If autocommit was turned off, MySQL will start an implicit transaction (with a point in time snapshot) for each connection and will work very hard at keeping the current view unchanged, which would be counter-productive. - -### Safe startup - -We recommend to enable `read-only` and `skip-slave-start` at startup. -The first ensures that writes will not be accepted accidentally, -which could cause split brain or alternate futures. -The second ensures that slaves do not connect to the master before -settings like semisync are initialized by vttablet according to -Vitess-specific logic. - - -### Binary logging - -By default, we enable binary logging everywhere (`log-bin`), -including on slaves (`log-slave-updates`). -On *replica* type tablets, this is important to make sure they have the -necessary binlogs in case they are promoted to master. -The slave binlogs are also used to implement Vitess features like -filtered replication (during resharding) and the upcoming update stream -and online schema swap. - -### Global Transaction ID (GTID) - -Many features of Vitess require a fully GTID-based MySQL replication -topology, including master management, resharding, update stream, -and online schema swap. - -For MySQL 5.6+, that means you must use `gtid_mode=ON` on all servers. -We also strongly encourage `enforce_gtid_consistency`. - -Similarly, for MariaDB, you should use `gtid_strict_mode` to ensure that -master management operations will fail rather than risk causing data loss -if slaves diverge from the master due to external interference. - -### Monitoring - -In addition to monitoring the Vitess processes, we recommend to monitor MySQL as well. Here is a list of MySQL metrics you should monitor: - -* QPS -* Bytes sent/received -* Replication lag -* Threads running -* Innodb buffer cache hit rate -* CPU, memory and disk usage. For disk, break into bytes read/written, latencies and IOPS. - -### Recap - -* 2-4 cores -* 100-300GB data size -* Statement based replication (required) -* Semi-sync replication - * rpl_semi_sync_master_timeout is huge (essentially never; there's no way to actually specify never) - * rpl_semi_sync_master_wait_no_slave = 1 - * sync_binlog=0 - * innodb_flush_log_at_trx_commit=2 -* STRICT_TRANS_TABLES -* auto-commit ON (required) -* Additional parameters as mentioned in above sections. - -## Vitess servers - -Vitess servers are written in Go. There are a few Vitess-specific knobs that apply to all servers. - -### Go version - -Go, being a young language, tends to add major improvements over each version. -So, the latest Go version is almost always recommended. -Note that the latest Go version may be higher than the minimum version we require for compiling the binaries (see ["Prerequisites" section in the Getting Started guide](https://github.com/vitessio/website/blob/master/content/docs/tutorials/kubernetes.md#prerequisites)). - -### GOMAXPROCS - -You typically don’t have to set this environment variable. The default Go runtime will try to use as much CPU as necessary. However, if you want to force a Go server to not exceed a certain CPU limit, setting GOMAXPROCS to that value will work in most situations. - -### GOGC - -The default value for this variable is 100. Which means that garbage is collected every time memory doubles from the baseline (100% growth). You typically don’t have to change this value either. However, if you care about tail latency, increasing this value will help you in that area, but at the cost of increased memory usage. - -### Logging - -Vitess servers write to log files, and they are rotated when they reach a maximum size. It’s recommended that you run at INFO level logging. The information printed in the log files come in handy for troubleshooting. You can limit the disk usage by running cron jobs that periodically purge or archive them. - -### gRPC - -Vitess uses gRPC for communication between client and Vitess, and between Vitess -servers. By default, Vitess does not use SSL. - -Also, even without using SSL, we allow the use of an application-provided -CallerID object. It allows unsecure but easy to use authorization using Table -ACLs. - -See the -[Transport Security Model document]({% link user-guide/transport-security-model.md %}) -for more information on how to setup both of these features, and what command -line parameters exist. - -## Topology Service configuration - -Vttablet, vtgate, vtctld need the right command line parameters to find the -Topology Server. First the *topo\_implementation* flag needs to be set to one of -*zk2*, *etcd2*, or *consul*. Then they're all configured as follows: - -* The *topo_global_server_address* contains the server address / addresses of - the global topology server. -* The *topo_global_root* contains the directory / path to use. - -Note that the local cell for the tablet must exist and be configured properly in -the Topology Service for vttablet to start. Local cells are configured inside -the topo server, by using the `vtctl AddCellInfo` command. See -the [Topology Service]({% link user-guide/topology-service.md %}) documentation for more -information. - -## VTTablet - - - -VTTablet has a large number of command line options. Some important ones will be covered here. In terms of provisioning these are the recommended values - -* 2-4 cores (in proportion to MySQL cores) -* 2-4 GB RAM - -### Directory Configuration - -vttablet supports a number of command line options and environment variables -to facilitate its setup. - -The **VTDATAROOT** environment variable specifies the toplevel directory for all -data files. If not set, it defaults to `/vt`. - -By default, a vttablet will use a subdirectory in **VTDATAROOT** named -`vt_NNNNNNNNNN` where `NNNNNNNNNN` is the tablet id. The **tablet_dir** -command-line parameter allows overriding this relative path. This is useful in -containers where the filesystem only contains one vttablet, in order to have a -fixed root directory. - -When starting up and using `mysqlctl` to manage MySQL, the MySQL files will be -in subdirectories of the tablet root. For instance, `bin-logs` for the binary -logs, `data` for the data files, and `relay-logs` for the relay logs. - -It is possible to host different parts of a MySQL server files on different -partitions. For instance, the data file may reside in flash, while the bin logs -and relay logs are on spindle. To achieve this, create a symlink from -`$VTDATAROOT/` to the proper location on disk. When MySQL is -configured by mysqlctl, it will realize this directory exists, and use it for the -files it would otherwise have put in the tablet directory. For instance, to host -the binlogs in `/mnt/bin-logs`: - -* Create a symlink from `$VTDATAROOT/bin-logs` to `/mnt/bin-logs`. - -* When starting up a tablet: - - * `/mnt/bin-logs/vt_NNNNNNNNNN` will be created. - - * `$VTDATAROOT/vt_NNNNNNNNNN/bin-logs` will be a symlink to - `/mnt/bin-logs/vt_NNNNNNNNNN` - -### Initialization - -* Init_keyspace, init_shard, init_tablet_type: These parameters should be set at startup with the keyspace / shard / tablet type to start the tablet as. Note ‘master’ is not allowed here, instead use ‘replica’, as the tablet when starting will figure out if it is the master (this way, all replica tablets start with the same command line parameters, independently of which one is the master). - -### Query server parameters - - - -* **queryserver-config-pool-size**: This value should typically be set to the max number of simultaneous queries you want MySQL to run. This should typically be around 2-3x the number of allocated CPUs. Around 4-16. There is not much harm in going higher with this value, but you may see no additional benefits. -* **queryserver-config-stream-pool-size**: This value is relevant only if you plan to run streaming queries against the database. It’s recommended that you use rdonly instances for such streaming queries. This value depends on how many simultaneous streaming queries you plan to run. Typical values are in the low 100s. -* **queryserver-config-transaction-cap**: This value should be set to how many concurrent transactions you wish to allow. This should be a function of transaction QPS and transaction length. Typical values are in the low 100s. -* **queryserver-config-query-timeout**: This value should be set to the upper limit you’re willing to allow a query to run before it’s deemed too expensive or detrimental to the rest of the system. VTTablet will kill any query that exceeds this timeout. This value is usually around 15-30s. -* **queryserver-config-transaction-timeout**: This value is meant to protect the situation where a client has crashed without completing a transaction. Typical value for this timeout is 30s. -* **queryserver-config-max-result-size**: This parameter prevents the OLTP application from accidentally requesting too many rows. If the result exceeds the specified number of rows, VTTablet returns an error. The default value is 10,000. - -### DB config parameters - -VTTablet requires multiple user credentials to perform its tasks. Since it's required to run on the same machine as MySQL, it’s most beneficial to use the more efficient unix socket connections. - -**connection** parameters - -* **db\_socket**: The unix socket to connect on. If this is specified, host and port will not be used. -* **db\_host**: The host name for the tcp connection. -* **db\_port**: The tcp port to be used with the db\_host. -* **db\_charset**: Character set. Only utf8 or latin1 based character sets are supported. -* **db\_flags**: Flag values as defined by MySQL. -* **db\ssl\_ca, db\_ssl\_ca\_path, db\_ssl\_cert, db\_ssl\_key**: SSL flags. - - -**app** credentials are for serving app queries: - -* **db\_app\_user**: App username. -* **db\_app\_password**: Password for the app username. If you need a more secure way of managing and supplying passwords, VTTablet does allow you to plug into a "password server" that can securely supply and refresh usernames and passwords. Please contact the Vitess team for help if you’d like to write such a custom plugin. -* **db\_app\_use\_ssl**: Set this flag to false if you don't want to use SSL for this connection. This will allow you to turn off SSL for all users except for `repl`, which may have to be turned on for replication that goes over open networks. - -**appdebug** credentials are for the appdebug user: - -* **db\_appdebug\_user** -* **db\_appdebug\_password** -* **db\_appdebug\_use\_ssl** - -**dba** credentials will be used for housekeeping work like loading the schema or killing runaway queries: - -* **db\_dba\_user** -* **db\_dba\_password** -* **db\_dba\_use\_ssl** - -**repl** credentials are for managing replication. - -* **db\_repl\_user** -* **db\_repl\_password** -* **db\_repl\_use\_ssl** - -**filtered** credentials are for performing resharding: - -* **db\_filtered\_user** -* **db\_filtered\_password** -* **db\_filtered\_use\_ssl** - -### Monitoring - -VTTablet exports a wealth of real-time information about itself. This section will explain the essential ones: - -#### /debug/status - -This page has a variety of human-readable information about the current VTTablet. You can look at this page to get a general overview of what’s going on. It also has links to various other diagnostic URLs below. - -#### /debug/vars - -This is the most important source of information for monitoring. There are other URLs below that can be used to further drill down. - -##### Queries (as described in /debug/vars section) - -Vitess has a structured way of exporting certain performance stats. The most common one is the Histogram structure, which is used by Queries: - -``` - "Queries": { - "Histograms": { - "PASS_SELECT": { - "1000000": 1138196, - "10000000": 1138313, - "100000000": 1138342, - "1000000000": 1138342, - "10000000000": 1138342, - "500000": 1133195, - "5000000": 1138277, - "50000000": 1138342, - "500000000": 1138342, - "5000000000": 1138342, - "Count": 1138342, - "Time": 387710449887, - "inf": 1138342 - } - }, - "TotalCount": 1138342, - "TotalTime": 387710449887 - }, -``` - -The histograms are broken out into query categories. In the above case, "PASS_SELECT" is the only category. An entry like `"500000": 1133195` means that `1133195` queries took under `500000` nanoseconds to execute. - -Queries.Histograms.PASS_SELECT.Count is the total count in the PASS_SELECT category. - -Queries.Histograms.PASS_SELECT.Time is the total time in the PASS_SELECT category. - -Queries.TotalCount is the total count across all categories. - -Queries.TotalTime is the total time across all categories. - -There are other Histogram variables described below, and they will always have the same structure. - -Use this variable to track: - -* QPS -* Latency -* Per-category QPS. For replicas, the only category will be PASS_SELECT, but there will be more for masters. -* Per-category latency -* Per-category tail latency - -##### Results - -``` - "Results": { - "0": 0, - "1": 0, - "10": 1138326, - "100": 1138326, - "1000": 1138342, - "10000": 1138342, - "5": 1138326, - "50": 1138326, - "500": 1138342, - "5000": 1138342, - "Count": 1138342, - "Total": 1140438, - "inf": 1138342 - } -``` - -Results is a simple histogram with no timing info. It gives you a histogram view of the number of rows returned per query. - -##### Mysql - -Mysql is a histogram variable like Queries, except that it reports MySQL execution times. The categories are "Exec" and “ExecStream”. - -In the past, the exec time difference between VTTablet and MySQL used to be substantial. With the newer versions of Go, the VTTablet exec time has been predominantly been equal to the mysql exec time, conn pool wait time and consolidations waits. In other words, this variable has not shown much value recently. However, it’s good to track this variable initially, until it’s determined that there are no other factors causing a big difference between MySQL performance and VTTablet performance. - -##### Transactions - -Transactions is a histogram variable that tracks transactions. The categories are "Completed" and “Aborted”. - -##### Waits - -Waits is a histogram variable that tracks various waits in the system. Right now, the only category is "Consolidations". A consolidation happens when one query waits for the results of an identical query already executing, thereby saving the database from performing duplicate work. - -This variable used to report connection pool waits, but a refactor moved those variables out into the pool related vars. - -##### Errors - -``` - "Errors": { - "Deadlock": 0, - "Fail": 1, - "NotInTx": 0, - "TxPoolFull": 0 - }, -``` - -Errors are reported under different categories. It’s beneficial to track each category separately as it will be more helpful for troubleshooting. Right now, there are four categories. The category list may vary as Vitess evolves. - -Plotting errors/query can sometimes be useful for troubleshooting. - -VTTablet also exports an InfoErrors variable that tracks inconsequential errors that don’t signify any kind of problem with the system. For example, a dup key on insert is considered normal because apps tend to use that error to instead update an existing row. So, no monitoring is needed for that variable. - -##### InternalErrors - -``` - "InternalErrors": { - "HungQuery": 0, - "Invalidation": 0, - "MemcacheStats": 0, - "Mismatch": 0, - "Panic": 0, - "Schema": 0, - "StrayTransactions": 0, - "Task": 0 - }, -``` - -An internal error is an unexpected situation in code that may possibly point to a bug. Such errors may not cause outages, but even a single error needs be escalated for root cause analysis. - -##### Kills - -``` - "Kills": { - "Queries": 2, - "Transactions": 0 - }, -``` - -Kills reports the queries and transactions killed by VTTablet due to timeout. It’s a very important variable to look at during outages. - -##### TransactionPool* - -There are a few variables with the above prefix: - -``` - "TransactionPoolAvailable": 300, - "TransactionPoolCapacity": 300, - "TransactionPoolIdleTimeout": 600000000000, - "TransactionPoolMaxCap": 300, - "TransactionPoolTimeout": 30000000000, - "TransactionPoolWaitCount": 0, - "TransactionPoolWaitTime": 0, -``` - -* WaitCount will give you how often the transaction pool gets full that causes new transactions to wait. -* WaitTime/WaitCount will tell you the average wait time. -* Available is a gauge that tells you the number of available connections in the pool in real-time. Capacity-Available is the number of connections in use. Note that this number could be misleading if the traffic is spiky. - -##### Other Pool variables - -Just like TransactionPool, there are variables for other pools: - -* ConnPool: This is the pool used for read traffic. -* StreamConnPool: This is the pool used for streaming queries. - -There are other internal pools used by VTTablet that are not very consequential. - -##### TableACLAllowed, TableACLDenied, TableACLPseudoDenied - -The above three variables table acl stats broken out by table, plan and user. - -##### QueryPlanCacheSize - -If the application does not make good use of bind variables, this value would reach the QueryCacheCapacity. If so, inspecting the current query cache will give you a clue about where the misuse is happening. - -##### QueryCounts, QueryErrorCounts, QueryRowCounts, QueryTimesNs - -These variables are another multi-dimensional view of Queries. They have a lot more data than Queries because they’re broken out into tables as well as plan. This is a priceless source of information when it comes to troubleshooting. If an outage is related to rogue queries, the graphs plotted from these vars will immediately show the table on which such queries are run. After that, a quick look at the detailed query stats will most likely identify the culprit. - -##### UserTableQueryCount, UserTableQueryTimesNs, UserTransactionCount, UserTransactionTimesNs - -These variables are yet another view of Queries, but broken out by user, table and plan. If you have well-compartmentalized app users, this is another priceless way of identifying a rogue "user app" that could be misbehaving. - -##### DataFree, DataLength, IndexLength, TableRows - -These variables are updated periodically from information_schema.tables. They represent statistical information as reported by MySQL about each table. They can be used for planning purposes, or to track unusual changes in table stats. - -* DataFree represents data_free -* DataLength represents data_length -* IndexLength represents index_length -* TableRows represents table_rows - -#### /debug/health - -This URL prints out a simple "ok" or “not ok” string that can be used to check if the server is healthy. The health check makes sure mysqld connections work, and replication is configured (though not necessarily running) if not master. - -#### /queryz, /debug/query_stats, /debug/query_plans, /streamqueryz - -* /debug/query_stats is a JSON view of the per-query stats. This information is pulled in real-time from the query cache. The per-table stats in /debug/vars are a roll-up of this information. -* /queryz is a human-readable version of /debug/query_stats. If a graph shows a table as a possible source of problems, this is the next place to look at to see if a specific query is the root cause. -* /debug/query_plans is a more static view of the query cache. It just shows how VTTablet will process or rewrite the input query. -* /streamqueryz lists the currently running streaming queries. You have the option to kill any of them from this page. - -#### /querylogz, /debug/querylog, /txlogz, /debug/txlog - -* /debug/querylog is a never-ending stream of currently executing queries with verbose information about each query. This URL can generate a lot of data because it streams every query processed by VTTablet. The details are as per this function: [https://github.com/vitessio/vitess/tree/master/go/vt/vttablet/tabletserver/tabletenv/logstats.go#L202](https://github.com/vitessio/vitess/tree/master/go/vt/vttablet/tabletserver/tabletenv/logstats.go#L202) -* /querylogz is a limited human readable version of /debug/querylog. It prints the next 300 queries by default. The limit can be specified with a limit=N parameter on the URL. -* /txlogz is like /querylogz, but for transactions. -* /debug/txlog is the JSON counterpart to /txlogz. - -#### /consolidations - -This URL has an MRU list of consolidations. This is a way of identifying if multiple clients are spamming the same query to a server. - -#### /schemaz, /debug/schema - -* /schemaz shows the schema info loaded by VTTablet. -* /debug/schema is the JSON version of /schemaz. - -#### /debug/query_rules - -This URL displays the currently active query blacklist rules. - -### Alerting - -Alerting is built on top of the variables you monitor. Before setting up alerts, you should get some baseline stats and variance, and then you can build meaningful alerting rules. You can use the following list as a guideline to build your own: - -* Query latency among all vttablets -* Per keyspace latency -* Errors/query -* Memory usage -* Unhealthy for too long -* Too many vttablets down -* Health has been flapping -* Transaction pool full error rate -* Any internal error -* Traffic out of balance among replicas -* Qps/core too high - -## VTGate - -A typical VTGate should be provisioned as follows. - -* 2-4 cores -* 2-4 GB RAM - -Since VTGate is stateless, you can scale it linearly by just adding more servers as needed. Beyond the recommended values, it’s better to add more VTGates than giving more resources to existing servers, as recommended in the philosophy section. - -Load-balancer in front of vtgate to scale up (not covered by Vitess). Stateless, can use the health URL for health check. - -### Parameters - -* **cells_to_watch**: which cell vtgate is in and will monitor tablets from. Cross-cell master access needs multiple cells here. -* **keyspaces_to_watch**: Specifies that a vtgate will only be able to perform queries against or view the topology of these keyspaces -* **tablet_types_to_wait**: VTGate waits for at least one serving tablet per tablet type specified here during startup, before listening to the serving port. So VTGate does not serve error. It should match the available tablet types VTGate connects to (master, replica, rdonly). -* **discovery_low_replication_lag**: when replication lags of all VTTablet in a particular shard and tablet type are less than or equal the flag (in seconds), VTGate does not filter them by replication lag and uses all to balance traffic. -* **degraded_threshold (30s)**: a tablet will publish itself as degraded if replication lag exceeds this threshold. This will cause VTGates to choose more up-to-date servers over this one. If all servers are degraded, VTGate resorts to serving from all of them. -* **unhealthy_threshold (2h)**: a tablet will publish itself as unhealthy if replication lag exceeds this threshold. -* **transaction_mode (multi)**: `single`: disallow multi-db transactions, `multi`: allow multi-db transactions with best effort commit, `twopc`: allow multi-db transactions with 2pc commit. -* **normalize_queries (false)**: Turning this flag on will cause vtgate to rewrite queries with bind vars. This is beneficial if the app doesn't itself send normalized queries. - -### Monitoring - -#### /debug/status - -This is the landing page for a VTGate, which can gives you a status on how a particular server is doing. Of particular interest there is the list of tablets this vtgate process is connected to, as this is the list of tablets that can potentially serve queries. - -#### /debug/vars - -##### VTGateApi - -This is the main histogram variable to track for vtgates. It gives you a break up of all queries by command, keyspace, and type. - -##### HealthcheckConnections - -It shows the number of tablet connections for query/healthcheck per keyspace, shard, and tablet type. - -#### /debug/query_plans - -This URL gives you all the query plans for queries going through VTGate. - -#### /debug/vschema - -This URL shows the vschema as loaded by VTGate. - -### Alerting - -For VTGate, here’s a list of possible variables to alert on: - -* Error rate -* Error/query rate -* Error/query/tablet-type rate -* VTGate serving graph is stale by x minutes (lock server is down) -* Qps/core -* Latency - - - -## External processes - -Things that need to be configured: - -### Periodic backup configuration - -We recommend to take backups regularly e.g. you should set up a cron job for it. See our recommendations at [{% link user-guide/backup-and-restore.md %}#backup-frequency]({% link user-guide/backup-and-restore.md %}#backup-frequency). - -### Logs archiver/purger - -You will need to run some cron jobs to archive or purge log files periodically. - -### Orchestrator - -[Orchestrator](https://github.com/github/orchestrator) is a tool for -managing MySQL replication topologies, including automated failover. -It can detect master failure and initiate a recovery in a matter of seconds. - -For the most part, Vitess is agnostic to the actions of Orchestrator, -which operates below Vitess at the MySQL level. That means you can -pretty much -[set up Orchestrator](https://github.com/github/orchestrator/wiki/Orchestrator-Manual) -in the normal way, with just a few additions as described below. - -For the [Kubernetes example](https://github.com/vitessio/website/blob/master/content/docs/tutorials/kubernetes.md), we provide a -[sample script](https://github.com/vitessio/vitess/blob/master/examples/kubernetes/orchestrator-up.sh) -to launch Orchestrator for you with these settings applied. - -#### Orchestrator configuration - -Orchestrator needs to know some things from the Vitess side, -like the tablet aliases and whether semisync is enforced -(with async fallback disabled). -We pass this information by telling Orchestrator to execute certain -queries that return local metadata from a non-replicated table, -as seen in our sample -[orchestrator.conf.json](https://github.com/vitessio/vitess/blob/master/docker/orchestrator/orchestrator.conf.json): - -```json - "DetectClusterAliasQuery": "SELECT value FROM _vt.local_metadata WHERE name='ClusterAlias'", - "DetectInstanceAliasQuery": "SELECT value FROM _vt.local_metadata WHERE name='Alias'", - "DetectPromotionRuleQuery": "SELECT value FROM _vt.local_metadata WHERE name='PromotionRule'", - "DetectSemiSyncEnforcedQuery": "SELECT @@global.rpl_semi_sync_master_wait_no_slave AND @@global.rpl_semi_sync_master_timeout > 1000000", -``` - -There is also one thing that Vitess needs to know from Orchestrator, -which is the identity of the master for each shard, if a failover occurs. - -From our experience at YouTube, we believe that this signal is too critical -for data integrity to rely on bottom-up detection such as asking each MySQL -if it thinks it's the master. Instead, we rely on Orchestrator to be the -source of truth, and expect it to send a top-down signal to Vitess. - -This signal is sent by ensuring the Orchestrator server has access to -`vtctlclient`, which it then uses to send an RPC to vtctld, informing -Vitess of the change in mastership via the -[TabletExternallyReparented]({% link reference/vtctl.md %}#tabletexternallyreparented) -command. - -```json - "PostMasterFailoverProcesses": [ - "echo 'Recovered from {failureType} on {failureCluster}. Failed: {failedHost}:{failedPort}; Promoted: {successorHost}:{successorPort}' >> /tmp/recovery.log", - "vtctlclient -server vtctld:15999 TabletExternallyReparented {successorAlias}" - ], -``` - -#### VTTablet configuration - -Normally, you need to seed Orchestrator by giving it the addresses of -MySQL instances in each shard. If you have lots of shards, this could -be tedious or error-prone. - -Luckily, Vitess already knows everything about all the MySQL instances -that comprise your cluster. So we provide a mechanism for tablets to -self-register with the Orchestrator API, configured by the following -vttablet parameters: - -* **orc_api_url**: Address of Orchestrator's HTTP API (e.g. http://host:port/api/). Leave empty to disable Orchestrator integration. -* **orc_discover_interval**: How often (e.g. 60s) to ping Orchestrator's HTTP API endpoint to tell it we exist. 0 means never. - -Not only does this relieve you from the initial seeding of addresses into -Orchestrator, it also means new instances will be discovered immediately, -and the topology will automatically repopulate even if Orchestrator's -backing store is wiped out. Note that Orchestrator will forget stale -instances after a configurable timeout. diff --git a/doc/Sharding.md b/doc/Sharding.md deleted file mode 100644 index 1ab77855db7..00000000000 --- a/doc/Sharding.md +++ /dev/null @@ -1,187 +0,0 @@ -Sharding is a method of horizontally partitioning a database to store -data across two or more database servers. This document explains how -sharding works in Vitess and the types of sharding that Vitess supports. - -## Overview - -A keyspace in Vitess can be sharded or unsharded. An unsharded keyspace -maps directly to a MySQL database. If sharded, the rows of the keyspace -are partitioned into different databases of identical schema. - -For example, if an application's "user" keyspace is split into two -shards, each shard contains records for approximately half of the -application's users. Similarly, each user's information is stored -in only one shard. - -Note that sharding is orthogonal to (MySQL) replication. -A Vitess shard typically contains one MySQL master and many MySQL -slaves. The master handles write operations, while slaves handle -read-only traffic, batch processing operations, and other tasks. -Each MySQL instance within the shard should have the same data, -excepting some replication lag. - -### Supported Operations - -Vitess supports the following types of sharding operations: - -* **Horizontal sharding:** Splitting or merging shards in a sharded keyspace -* **Vertical sharding:** Moving tables from an unsharded keyspace to - a different keyspace. - -With these features, you can start with a single keyspace that contains -all of your data (in multiple tables). As your database grows, you can -move tables to different keyspaces (vertical split) and shard some or -all of those keyspaces (horizontal split) without any real downtime -for your application. - -## Sharding scheme - -Vitess allows you to choose the type of sharding scheme by the choice of -your Primary Vindex for the tables of a shard. Once you have chosen -the Primary Vindex, you can choose the partitions depending on how the -resulting keyspace IDs are distributed. - -Vitess calculates the sharding key or keys for each query and then -routes that query to the appropriate shards. For example, a query -that updates information about a particular user might be directed to -a single shard in the application's "user" keyspace. On the other hand, -a query that retrieves information about several products might be -directed to one or more shards in the application's "product" keyspace. - -### Key Ranges and Partitions - -Vitess uses key ranges to determine which shards should handle any -particular query. - -* A **key range** is a series of consecutive keyspace ID values. It - has starting and ending values. A key falls inside the range if - it is equal to or greater than the start value and strictly less - than the end value. -* A **partition** represents a set of key ranges that covers the entire - space. - -When building the serving graph for a sharded keyspace, -Vitess ensures that each shard is valid and that the shards -collectively constitute a full partition. In each keyspace, one shard -must have a key range with an empty start value and one shard, which -could be the same shard, must have a key range with an empty end value. - -* An empty start value represents the lowest value, and all values are - greater than it. -* An empty end value represents a value larger than the highest possible - value, and all values are strictly lower than it. - -Vitess always converts sharding keys to a left-justified binary string for -computing a shard. This left-justification makes the right-most zeroes -insignificant and optional. Therefore, the value 0x80 is -always the middle value for sharding keys. -So, in a keyspace with two shards, sharding keys that have a binary -value lower than 0x80 are assigned to one shard. Keys with a binary -value equal to or higher than 0x80 are assigned to the other shard. - -Several sample key ranges are shown below: - -``` sh -Start=[], End=[]: Full Key Range -Start=[], End=[0x80]: Lower half of the Key Range. -Start=[0x80], End=[]: Upper half of the Key Range. -Start=[0x40], End=[0x80]: Second quarter of the Key Range. -Start=[0xFF00], End=[0xFF80]: Second to last 1/512th of the Key Range. -``` - -Two key ranges are consecutive if the end value of one range equals the -start value of the other range. - -### Shard Names - -A shard's name identifies the start -and end of the shard's key range, printed in hexadecimal and separated -by a hyphen. For instance, if a shard's key range is the array of bytes -beginning with [ 0x80 ] and ending, noninclusively, with [ 0xc0], then -the shard's name is 80-c0. - -Using this naming convention, the following four shards would be a valid -full partition: - -* -40 -* 40-80 -* 80-c0 -* c0- - -Shards do not need to handle the same size portion of the key space. For example, the following five shards would also be a valid full partition, possibly with a highly uneven distribution of keys. - -* -80 -* 80-c0 -* c0-dc00 -* dc00-dc80 -* dc80- - -## Resharding - -Resharding describes the process of updating the sharding -scheme for a keyspace and dynamically reorganizing data to match the -new scheme. During resharding, Vitess copies, verifies, and keeps -data up-to-date on new shards while the existing shards continue to -serve live read and write traffic. When you're ready to switch over, -the migration occurs with only a few seconds of read-only downtime. -During that time, existing data can be read, but new data cannot be -written. - -The table below lists the sharding (or resharding) processes that you -would typically perform for different types of requirements: - -Requirement | Action ------------ | ------ -Uniformly increase read capacity | Add replicas or split shards -Uniformly increase write capacity | Split shards -Reclaim overprovisioned resources | Merge shards and/or keyspaces -Increase geo-diversity | Add new cells and replicas -Cool a hot tablet | For read access, add replicas or split shards. For write access, split shards. - -### Filtered Replication - -The cornerstone of resharding is replicating the right data. Vitess -implements the following functions to support filtered replication, -the process that ensures that the correct source tablet data is -transferred to the proper destination tablets. - -#### Statement-based Replication - -If you've configured the MySQL servers to use Statement-based Replication (SBR), -then Vitess must be able to identify the destination for such statements during -the filtered replication process. This performed the following way: - -1. The source tablet tags transactions with comments so that MySQL binlogs - contain the filtering data needed during the resharding process. The - comments describe the scope of each transaction (its keyspace ID, - table, etc.). -1. A server process uses the comments to filter the MySQL binlogs and - stream the correct data to the destination tablet. -1. A client process on the destination tablet applies the filtered logs, - which are just regular SQL statements at this point. - -#### Row-based Replication - -If MySQL is configured to use Row-based Replication (RBR), the filtered replication -is performed the following way: - -1. The server process uses the primary vindex to compute the keyspace ID for every - row coming throug the replication stream, and sends that row to the corresponding - target shard. -1. The target shard converts the row into the corresponding DML (Data Manipulation Language) - and applies the statement. - -If using RBR, it's generally required that you have full image turned on. However, if your -Primary Vindex is also part of the Primary key, it's not required, because every RBR event -will always contain the full primary key of its affected row. - -### Additional Tools and Processes - -Vitess provides the following tools to help manage range-based shards: - -* The [vtctl]({% link reference/vtctl.md %}) command-line tool supports - functions for managing keyspaces, shards, tablets, and more. -* Client APIs account for sharding operations. -* The [MapReduce framework](https://github.com/vitessio/vitess/tree/master/java/hadoop/src/main/java/io/vitess/hadoop) - fully utilizes key ranges to read data as quickly as possible, - concurrently from all shards and all replicas. diff --git a/doc/ShardingKubernetes.md b/doc/ShardingKubernetes.md deleted file mode 100644 index 2181b2e004c..00000000000 --- a/doc/ShardingKubernetes.md +++ /dev/null @@ -1,286 +0,0 @@ -This guide walks you through the process of sharding an existing unsharded -Vitess [keyspace]({% link overview/concepts.md %}#keyspace) in -[Kubernetes](https://kubernetes.io/). - -## Prerequisites - -We begin by assuming you've completed the -[Getting Started on Kubernetes]({% link getting-started/index.md %}) guide, and -have left the cluster running. - -## Overview - -We will follow a process similar to the one in the general -[Horizontal Sharding]({% link user-guide/horizontal-sharding.md %}) -guide, except that here we'll give the commands you'll need to do it for -the example Vitess cluster in Kubernetes. - -Since Vitess makes [sharding]({% link user-guide/sharding.md %}) -transparent to the app layer, the -[Guestbook](https://github.com/vitessio/vitess/tree/master/examples/kubernetes/guestbook) -sample app will stay live throughout the -[resharding]({% link user-guide/sharding.md %}#resharding) process, -confirming that the Vitess cluster continues to serve without downtime. - -## Configure sharding information - -The first step is to tell Vitess how we want to partition the data. -We do this by providing a VSchema definition as follows: - -``` json -{ - "sharded": true, - "vindexes": { - "hash": { - "type": "hash" - } - }, - "tables": { - "messages": { - "column_vindexes": [ - { - "column": "page", - "name": "hash" - } - ] - } - } -} -``` - -This says that we want to shard the data by a hash of the `page` column. -In other words, keep each page's messages together, but spread pages around -the shards randomly. - -We can load this VSchema into Vitess like this: - -``` sh -vitess/examples/kubernetes$ ./kvtctl.sh ApplyVSchema -vschema "$(cat vschema.json)" test_keyspace -``` - -## Bring up tablets for new shards - -In the unsharded example, you started tablets for a shard -named *0* in *test_keyspace*, written as *test_keyspace/0*. -Now you'll start tablets for two additional shards, -named *test_keyspace/-80* and *test_keyspace/80-*: - -``` sh -vitess/examples/kubernetes$ ./sharded-vttablet-up.sh -### example output: -# Creating test_keyspace.shard--80 pods in cell test... -# ... -# Creating test_keyspace.shard-80- pods in cell test... -# ... -``` - -Since the sharding key in the Guestbook app is the page number, -this will result in half the pages going to each shard, -since *0x80* is the midpoint of the -[sharding key range]({% link user-guide/sharding.md %}#key-ranges-and-partitions). - -These new shards will run in parallel with the original shard during the -transition, but actual traffic will be served only by the original shard -until we tell it to switch over. - -Check the `vtctld` web UI, or the output of `kvtctl.sh ListAllTablets test`, -to see when the tablets are ready. There should be 5 tablets in each shard. - -Once the tablets are ready, initialize replication by electing the first master -for each of the new shards: - -``` sh -vitess/examples/kubernetes$ ./kvtctl.sh InitShardMaster -force test_keyspace/-80 test-0000000200 -vitess/examples/kubernetes$ ./kvtctl.sh InitShardMaster -force test_keyspace/80- test-0000000300 -``` - -Now there should be a total of 15 tablets, with one master for each shard: - -``` sh -vitess/examples/kubernetes$ ./kvtctl.sh ListAllTablets test -### example output: -# test-0000000100 test_keyspace 0 master 10.64.3.4:15002 10.64.3.4:3306 [] -# ... -# test-0000000200 test_keyspace -80 master 10.64.0.7:15002 10.64.0.7:3306 [] -# ... -# test-0000000300 test_keyspace 80- master 10.64.0.9:15002 10.64.0.9:3306 [] -# ... -``` - -## Copy data from original shard - -The new tablets start out empty, so we need to copy everything from the -original shard to the two new ones, starting with the schema: - -``` sh -vitess/examples/kubernetes$ ./kvtctl.sh CopySchemaShard test_keyspace/0 test_keyspace/-80 -vitess/examples/kubernetes$ ./kvtctl.sh CopySchemaShard test_keyspace/0 test_keyspace/80- -``` - -Next we copy the data. Since the amount of data to copy can be very large, -we use a special batch process called *vtworker* to stream the data from a -single source to multiple destinations, routing each row based on its -*keyspace_id*: - -``` sh -vitess/examples/kubernetes$ ./sharded-vtworker.sh SplitClone test_keyspace/0 -### example output: -# Creating vtworker pod in cell test... -# pods/vtworker -# Following vtworker logs until termination... -# I0416 02:08:59.952805 9 instance.go:115] Starting worker... -# ... -# State: done -# Success: -# messages: copy done, copied 11 rows -# Deleting vtworker pod... -# pods/vtworker -``` - -Notice that we've only specified the source shard, *test_keyspace/0*. -The *SplitClone* process will automatically figure out which shards to use -as the destinations based on the key range that needs to be covered. -In this case, shard *0* covers the entire range, so it identifies -*-80* and *80-* as the destination shards, since they combine to cover the -same range. - -Next, it will pause replication on one *rdonly* (offline processing) tablet -to serve as a consistent snapshot of the data. The app can continue without -downtime, since live traffic is served by *replica* and *master* tablets, -which are unaffected. Other batch jobs will also be unaffected, since they -will be served only by the remaining, un-paused *rdonly* tablets. - -## Check filtered replication - -Once the copy from the paused snapshot finishes, *vtworker* turns on -[filtered replication]({% link user-guide/sharding.md %}#filtered-replication) -from the source shard to each destination shard. This allows the destination -shards to catch up on updates that have continued to flow in from the app since -the time of the snapshot. - -When the destination shards are caught up, they will continue to replicate -new updates. You can see this by looking at the contents of each shard as -you add new messages to various pages in the Guestbook app. Shard *0* will -see all the messages, while the new shards will only see messages for pages -that live on that shard. - -``` sh -# See what's on shard test_keyspace/0: -vitess/examples/kubernetes$ ./kvtctl.sh ExecuteFetchAsDba test-0000000100 "SELECT * FROM messages" -# See what's on shard test_keyspace/-80: -vitess/examples/kubernetes$ ./kvtctl.sh ExecuteFetchAsDba test-0000000200 "SELECT * FROM messages" -# See what's on shard test_keyspace/80-: -vitess/examples/kubernetes$ ./kvtctl.sh ExecuteFetchAsDba test-0000000300 "SELECT * FROM messages" -``` - -Add some messages on various pages of the Guestbook to see how they get routed. - -## Check copied data integrity - -The *vtworker* batch process has another mode that will compare the source -and destination to ensure all the data is present and correct. -The following commands will run a diff for each destination shard: - -``` sh -vitess/examples/kubernetes$ ./sharded-vtworker.sh SplitDiff test_keyspace/-80 -vitess/examples/kubernetes$ ./sharded-vtworker.sh SplitDiff test_keyspace/80- -``` - -If any discrepancies are found, they will be printed. -If everything is good, you should see something like this: - -``` -I0416 02:10:56.927313 10 split_diff.go:496] Table messages checks out (4 rows processed, 1072961 qps) -``` - -## Switch over to new shards - -Now we're ready to switch over to serving from the new shards. -The [MigrateServedTypes]({% link reference/vtctl.md %}#migrateservedtypes) -command lets you do this one -[tablet type]({% link overview/concepts.md %}#tablet) at a time, -and even one [cell]({% link overview/concepts.md %}#cell-data-center) -at a time. The process can be rolled back at any point *until* the master is -switched over. - -``` sh -vitess/examples/kubernetes$ ./kvtctl.sh MigrateServedTypes test_keyspace/0 rdonly -vitess/examples/kubernetes$ ./kvtctl.sh MigrateServedTypes test_keyspace/0 replica -vitess/examples/kubernetes$ ./kvtctl.sh MigrateServedTypes test_keyspace/0 master -``` - -During the *master* migration, the original shard master will first stop -accepting updates. Then the process will wait for the new shard masters to -fully catch up on filtered replication before allowing them to begin serving. -Since filtered replication has been following along with live updates, there -should only be a few seconds of master unavailability. - -When the master traffic is migrated, the filtered replication will be stopped. -Data updates will be visible on the new shards, but not on the original shard. -See it for yourself: Add a message to the guestbook page and then inspect -the database content: - -``` sh -# See what's on shard test_keyspace/0 -# (no updates visible since we migrated away from it): -vitess/examples/kubernetes$ ./kvtctl.sh ExecuteFetchAsDba test-0000000100 "SELECT * FROM messages" -# See what's on shard test_keyspace/-80: -vitess/examples/kubernetes$ ./kvtctl.sh ExecuteFetchAsDba test-0000000200 "SELECT * FROM messages" -# See what's on shard test_keyspace/80-: -vitess/examples/kubernetes$ ./kvtctl.sh ExecuteFetchAsDba test-0000000300 "SELECT * FROM messages" -``` - -## Remove original shard - -Now that all traffic is being served from the new shards, we can remove the -original one. To do that, we use the `vttablet-down.sh` script from the -unsharded example: - -``` sh -vitess/examples/kubernetes$ ./vttablet-down.sh -### example output: -# Deleting pod for tablet test-0000000100... -# pods/vttablet-100 -# ... -``` - -Then we can delete the now-empty shard: - -``` sh -vitess/examples/kubernetes$ ./kvtctl.sh DeleteShard -recursive test_keyspace/0 -``` - -You should then see in the vtctld **Topology** page, or in the output of -`kvtctl.sh ListAllTablets test` that the tablets for shard *0* are gone. - -## Tear down and clean up - -Before stopping the Container Engine cluster, you should tear down the Vitess -services. Kubernetes will then take care of cleaning up any entities it created -for those services, like external load balancers. - -Since you already cleaned up the tablets from the original unsharded example by -running `./vttablet-down.sh`, that step has been replaced with -`./sharded-vttablet-down.sh` to clean up the new sharded tablets. - -``` sh -vitess/examples/kubernetes$ ./guestbook-down.sh -vitess/examples/kubernetes$ ./vtgate-down.sh -vitess/examples/kubernetes$ ./sharded-vttablet-down.sh -vitess/examples/kubernetes$ ./vtctld-down.sh -vitess/examples/kubernetes$ ./etcd-down.sh -``` - -Then tear down the Container Engine cluster itself, which will stop the virtual -machines running on Compute Engine: - -``` sh -$ gcloud container clusters delete example -``` - -It's also a good idea to remove the firewall rules you created, unless you plan -to use them again soon: - -``` sh -$ gcloud compute firewall-rules delete vtctld guestbook -``` diff --git a/doc/ShardingKubernetesWorkflow.md b/doc/ShardingKubernetesWorkflow.md deleted file mode 100644 index a428861c9ef..00000000000 --- a/doc/ShardingKubernetesWorkflow.md +++ /dev/null @@ -1,222 +0,0 @@ -This guide shows you an example about how to apply range-based sharding -process in an existing unsharded Vitess [keyspace]({% link overview/concepts.md %}#keyspace) -in [Kubernetes](https://kubernetes.io/) using the horizontal resharding workflow. -In this example, we will reshard from 1 shard "0" into 2 shards "-80" and "80-". -We will follow a process similar to the general -[Horizontal Sharding guide]({% link user-guide/horizontal-sharding-workflow.md %}) -except that here we'll give you the commands you'll need in the kubernetes -environment. - -## Overview - -The horizontal resharding process overview can be found -[here]({% link user-guide/horizontal-sharding-workflow.md %}#overview) - -## Prerequisites - -You should complete the [Getting Started on Kubernetes]({% link getting-started/index.md %}) -guide (please finish all the steps before Try Vitess resharding) and have left -the cluster running. Then, please follow these steps before running the -resharding process: - -1. Configure sharding information. By running the command below, we tell - Vitess to shard the data using the page column through the provided VSchema. - - ``` sh - vitess/examples/kubernetes$ ./kvtctl.sh ApplyVSchema -vschema "$(cat vschema.json)" test_keyspace - ``` - -1. Bring up tablets for 2 additional shards: *test_keyspace/-80* and - *test_keyspace/80-* (you can learn more about sharding key range - [here]({% link user-guide/sharding.md %}#key-ranges-and-partitions)): - - ``` sh - vitess/examples/kubernetes$ ./sharded-vttablet-up.sh - ``` - - Initialize replication by electing the first master for each of the new shards: - - ``` sh - vitess/examples/kubernetes$ ./kvtctl.sh InitShardMaster -force test_keyspace/-80 test-200 - vitess/examples/kubernetes$ ./kvtctl.sh InitShardMaster -force test_keyspace/80- test-300 - ``` - - After this set up, you should see the shards on Dashboard page of vtctld UI - (http://localhost:8001/api/v1/proxy/namespaces/default/services/vtctld:web). - There should be 1 serving shard named "0" and 2 non-serving shards named - "-80" and "80-". Click the shard node, you can inspect all its tablets - information. - -1. Bring up a *vtworker* process (a pod in kubernetes) and a *vtworker* service - which is used by the workflow to connect with the *vtworker* pod. (The - number of *vtworker* should be the same of original shards, we start one - vtworker process here since we have only one original shard in this example.) - - ``` sh - vitess/examples/kubernetes$ ./vtworker-up.sh - ``` - - You can check out the external IP for the vtworker service (please take note - of this external IP, it will be used for the vtworker address in creating - the resharding workflow): - - ``` sh - $ kubectl get service vtworker - ``` - - You can verify this *vtworker* process set up through http://:15032/Debugging. - It should be pinged successfully. After you ping the vtworker, please click - "Reset Job". Otherwise, the vtworker is not ready for executing other tasks. - -## Horizontal Resharding Workflow - -### Create the Workflow - -Using the web vtctld UI to create the workflow is the same with [steps in local -environment]({% link user-guide/horizontal-sharding-workflow.md %}#create-the-workflow) -except for filling the "vtworker Addresses" slot. You need to get the external -IP for vtworker service (mentioned in -[Prerequisites](#prerequisites)) and use -\:15033 as the vtworker addresses. - -Another way to start the workflow is through the vtctlclient command: - -``` sh -vitess/examples/kubernetes$ ./kvtctl.sh WorkflowCreate -skip_start=false horizontal_resharding -keyspace=test_keyspace -vtworkers=:15033 -enable_approvals=true -``` - -### Approvals of Tasks Execution (Canary feature) - -Please check the content in general -[Horizontal Sharding guide]({% link user-guide/horizontal-sharding-workflow.md %}#approvals-of-tasks-execution-canary-feature) - -### Retry - -Please check the content in general -[Horizontal Sharding guide]({% link user-guide/horizontal-sharding-workflow.md %}#retry) - -### Checkpoint and Recovery - -Please check the content in general -[Horizontal Sharding guide]({% link user-guide/horizontal-sharding-workflow.md %}#checkpoint-and-recovery) - -## Verify Results and Clean up - -After the resharding process, data in the original shard is identically copied -to new shards. Data updates will be visible on the new shards, but not on the -original shard. You should then see in the vtctld UI *Dashboard* page that shard -*0* becomes non-serving and shard *-80* and shard *80-* are serving shards. -Verify this for yourself: inspect the database content, -then add messages to the guestbook page and inspect again. You can use -http://\ (EXTERNAL-IP refers to the external IP of the guest book -service) to visit the guestbook webpage in your browser and choose any random -page for inserting information. Details can be found -[here]({% link getting-started/index.md %}#test-your-cluster-with-a-client-app)) -You can inspect the database content using the following commands: - -``` sh -# See what's on shard test_keyspace/0 -# (no updates visible since we migrated away from it): -vitess/examples/kubernetes$ ./kvtctl.sh ExecuteFetchAsDba test-100 "SELECT * FROM messages" -# See what's on shard test_keyspace/-80: -vitess/examples/kubernetes$ ./kvtctl.sh ExecuteFetchAsDba test-200 "SELECT * FROM messages" -# See what's on shard test_keyspace/80-: -vitess/examples/kubernetes$ ./kvtctl.sh ExecuteFetchAsDba test-300 "SELECT * FROM messages" -``` - -You can also checkout the *Topology* browser on vtctl UI. It shows you the -information of the keyrange of shard and their serving status. Each shard -should look like this - -[shard 0](https://cloud.githubusercontent.com/assets/23492389/24313876/072f61e6-109c-11e7-938a-23b8398958aa.png) - -[shard -80](https://cloud.githubusercontent.com/assets/23492389/24313813/bd11c824-109b-11e7-83d4-cca3f6093360.png) - -[shard 80-](https://cloud.githubusercontent.com/assets/23492389/24313743/7f9ae1c4-109b-11e7-997a-774f4f16e473.png) - -After you verify the result, we can remove the -original shard since all traffic is being served from the new shards: - -``` sh -vitess/examples/kubernetes$ ./vttablet-down.sh -``` - -Then we can delete the now-empty shard: - -``` sh -vitess/examples/local$ ./kvtctl.sh DeleteShard -recursive test_keyspace/0 -``` - -You should then see in the vtctld UI *Dashboard* page that shard *0* is gone. - -## Tear down and Clean up - -Since you already cleaned up the tablets from the original unsharded example by -running `./vttablet-down.sh`, that step has been replaced with -`./sharded-vttablet-down.sh` to clean up the new sharded tablets. - -``` sh -vitess/examples/kubernetes$ ./guestbook-down.sh -vitess/examples/kubernetes$ ./vtworker-down.sh -vitess/examples/kubernetes$ ./vtgate-down.sh -vitess/examples/kubernetes$ ./sharded-vttablet-down.sh -vitess/examples/kubernetes$ ./vtctld-down.sh -vitess/examples/kubernetes$ ./etcd-down.sh -``` - -Then tear down the Container Engine cluster itself, which will stop the virtual machines running on Compute Engine: - -``` sh -$ gcloud container clusters delete example -``` - -It's also a good idea to remove the firewall rules you created, unless you plan to use them again soon: - -``` sh -$ gcloud compute firewall-rules delete vtctld guestbook -``` - -## Reference - -You can checkout the old version tutorial [here]({% link user-guide/sharding-kubernetes.md %}). -It walks you through the resharding process by manually executing commands. - -For the kubectl command line interface, which helps you interact with the -kubernetes cluster, you can check out more information -[here](https://kubernetes.io/docs/user-guide/kubectl-overview). - -## Troubleshooting - -### Checking status of your setup. - -To get status of pods and services you've setup, you can use the commands -(all pods should be in Running status, guestbook and vtworker services -should have assign external IP): - -``` sh -$ kubectl get pods -$ kubectl get services -``` - -### Debugging pods. - -If you find out a component (e.g. vttablet, vtgate) doesn't respond as -expected, you can surface the log using this command (the pod name can be -found out using the command mentioned above): - -``` sh -$ kubectl logs [-c ] -### example -# $ kubectl logs vtworker -# $ kubectl logs vttablet-XXXX -c vttablet -``` - -### Debugging pending external IP issue. - -If you found that your service has a pending external IP for long time, it -maybe because you've reached the limitation of networking resource. Please -go to your project console on gcloud (cloud.google.com), then go to *Load -balancing* page (you can search "Load balancing" in the search bar to get -to the page) under Networking section. Then, click "advanced menu" for -editing load balancing resources. Check the forwarding rules you have and -delete the unused ones if there are too many. diff --git a/doc/TestingOnARamDisk.md b/doc/TestingOnARamDisk.md deleted file mode 100644 index 1adb82cf273..00000000000 --- a/doc/TestingOnARamDisk.md +++ /dev/null @@ -1,28 +0,0 @@ -# Testing On A Ramdisk - -The `integration_test` testsuite contains tests that may time-out if run against a slow disk. If your workspace lives on hard disk (as opposed to [SSD](https://en.wikipedia.org/wiki/Solid-state_drive)), it is recommended that you run tests using a [ramdisk](https://en.wikipedia.org/wiki/RAM_drive). - -# Setup - -First, set up a normal vitess development environment by running `bootstrap.sh` and sourcing `dev.env` (see [GettingStarted](GettingStarted.md)). Then overwrite the testing temporary directories and make a 4GiB (smaller sizes may work, if you're constrained on RAM) ramdisk at the location of your choice (this example uses `/tmp/vt`): - -```sh -export VT_TEST_TMPDIR=/tmp/vt - -mkdir ${VT_TEST_TMPDIR} -sudo mount -t tmpfs -o size=4g tmpfs ${VT_TEST_TMPDIR} - -export VTDATAROOT=${VT_TEST_TMPDIR} -export TEST_UNDECLARED_OUTPUTS_DIR=${VT_TEST_TMPDIR} -``` - -You can now run tests (either individually or as part of `make test`) normally. - -# Teardown - -When you are done testing, you can remove the ramdisk by unmounting it and then removing the directory: - -```sh -sudo umount ${VT_TEST_TMPDIR} -rmdir ${VT_TEST_TMPDIR} -``` diff --git a/doc/TopologyService.md b/doc/TopologyService.md deleted file mode 100644 index d4c63348d6a..00000000000 --- a/doc/TopologyService.md +++ /dev/null @@ -1,586 +0,0 @@ -# Topology Service - -This document describes the Topology Service, a key part of the Vitess -architecture. This service is exposed to all Vitess processes, and is used to -store small pieces of configuration data about the Vitess cluster, and provide -cluster-wide locks. It also supports watches, and master election. - -Concretely, the Topology Service features are implemented by -a [Lock Server](https://en.wikipedia.org/wiki/Distributed_lock_manager), referred -to as Topology Server in the rest of this document. We use a plug-in -implementation and we support multiple Lock Servers (Zookeeper, etcd, Consul, …) -as backends for the service. - -## Requirements and usage - -The Topology Service is used to store information about the Keyspaces, the -Shards, the Tablets, the Replication Graph, and the Serving Graph. We store -small data structures (a few hundred bytes) per object. - -The main contract for the Topology Server is to be very highly available and -consistent. It is understood it will come at a higher latency cost and very low -throughput. - -We never use the Topology Server as an RPC mechanism, nor as a storage system -for logs. We never depend on the Topology Server being responsive and fast to -serve every query. - -The Topology Server must also support a Watch interface, to signal when certain -conditions occur on a node. This is used for instance to know when keyspaces -topology changes (for resharding for instance). - -### Global vs local - -We differentiate two instances of the Topology Server: the Global instance, and -the per-cell Local instance: - -* The Global instance is used to store global data about the topology that - doesn’t change very often, for instance information about Keyspaces and - Shards. The data is independent of individual instances and cells, and needs - to survive a cell going down entirely. -* There is one Local instance per cell, that contains cell-specific information, - and also rolled-up data from the global + local cell to make it easier for - clients to find the data. The Vitess local processes should not use the Global - topology instance, but instead the rolled-up data in the Local topology - server as much as possible. - -The Global instance can go down for a while and not impact the local cells (an -exception to that is if a reparent needs to be processed, it might not work). If -a Local instance goes down, it only affects the local tablets in that instance -(and then the cell is usually in bad shape, and should not be used). - -Furthermore, the Vitess processes will not use the Global nor the Local Topology -Server to serve individual queries. They only use the Topology Server to get the -topology information at startup and in the background, but never to directly -serve queries. - -### Recovery - -If a local Topology Server dies and is not recoverable, it can be wiped out. All -the tablets in that cell then need to be restarted so they re-initialize their -topology records (but they won’t lose any MySQL data). - -If the global Topology Server dies and is not recoverable, this is more of a -problem. All the Keyspace / Shard objects have to be re-created. Then the cells -should recover. - -## Global data - -This section describes the data structures stored in the global instance of the -topology server. - -### Keyspace - -The Keyspace object contains various information, mostly about sharding: how is -this Keyspace sharded, what is the name of the sharding key column, is this -Keyspace serving data yet, how to split incoming queries, … - -An entire Keyspace can be locked. We use this during resharding for instance, -when we change which Shard is serving what inside a Keyspace. That way we -guarantee only one operation changes the keyspace data concurrently. - -### Shard - -A Shard contains a subset of the data for a Keyspace. The Shard record in the -global topology contains: - -* the Master tablet alias for this shard (that has the MySQL master). -* the sharding key range covered by this Shard inside the Keyspace. -* the tablet types this Shard is serving (master, replica, batch, …), per cell - if necessary. -* if during filtered replication, the source shards this shard is replicating - from. -* the list of cells that have tablets in this shard. -* shard-global tablet controls, like blacklisted tables no tablet should serve - in this shard. - -A Shard can be locked. We use this during operations that affect either the -Shard record, or multiple tablets within a Shard (like reparenting), so multiple -jobs don’t concurrently alter the data. - -### VSchema data - -The VSchema data contains sharding and routing information for -the [VTGate V3](https://github.com/vitessio/vitess/blob/master/doc/VTGateV3Features.md) API. - -## Local data - -This section describes the data structures stored in the local instance (per -cell) of the topology server. - -### Tablets - -The Tablet record has a lot of information about a single vttablet process -running inside a tablet (along with the MySQL process): - -* the Tablet Alias (cell+unique id) that uniquely identifies the Tablet. -* the Hostname, IP address and port map of the Tablet. -* the current Tablet type (master, replica, batch, spare, …). -* which Keyspace / Shard the tablet is part of. -* the sharding Key Range served by this Tablet. -* user-specified tag map (to store per installation data for instance). - -A Tablet record is created before a tablet can be running (either by `vtctl -InitTablet` or by passing the `init_*` parameters to the vttablet process). -The only way a Tablet record will be updated is one of: - -* The vttablet process itself owns the record while it is running, and can - change it. -* At init time, before the tablet starts. -* After shutdown, when the tablet gets deleted. -* If a tablet becomes unresponsive, it may be forced to spare to make it - unhealthy when it restarts. - -### Replication graph - -The Replication Graph allows us to find Tablets in a given Cell / Keyspace / -Shard. It used to contain information about which Tablet is replicating from -which other Tablet, but that was too complicated to maintain. Now it is just a -list of Tablets. - -### Serving graph - -The Serving Graph is what the clients use to find the per-cell topology of a -Keyspace. It is a roll-up of global data (Keyspace + Shard). vtgates only open a -small number of these objects and get all they need quickly. - -#### SrvKeyspace - -It is the local representation of a Keyspace. It contains information on what -shard to use for getting to the data (but not information about each individual -shard): - -* the partitions map is keyed by the tablet type (master, replica, batch, …) and - the values are list of shards to use for serving. -* it also contains the global Keyspace fields, copied for fast access. - -It can be rebuilt by running `vtctl RebuildKeyspaceGraph`. It is -automatically rebuilt when a tablet starts up in a cell and the SrvKeyspace -for that cell / keyspace doesn't exist yet. It will also be changed -during horizontal and vertical splits. - -#### SrvVSchema - -It is the local roll-up for the VSchema. It contains the VSchema for all -keyspaces in a single object. - -It can be rebuilt by running `vtctl RebuildVSchemaGraph`. It is automatically -rebuilt when using `vtctl ApplyVSchema` (unless prevented by flags). - -## Workflows involving the Topology Server - -The Topology Server is involved in many Vitess workflows. - -When a Tablet is initialized, we create the Tablet record, and add the Tablet to -the Replication Graph. If it is the master for a Shard, we update the global -Shard record as well. - -Administration tools need to find the tablets for a given Keyspace / Shard: -first we get the list of Cells that have Tablets for the Shard (global topology -Shard record has these) then we use the Replication Graph for that Cell / -Keyspace / Shard to find all the tablets then we can read each tablet record. - -When a Shard is reparented, we need to update the global Shard record with the -new master alias. - -Finding a tablet to serve the data is done in two stages: vtgate maintains a -health check connection to all possible tablets, and they report which keyspace -/ shard / tablet type they serve. vtgate also reads the SrvKeyspace object, to -find out the shard map. With these two pieces of information, vtgate can route -the query to the right vttablet. - -During resharding events, we also change the topology a lot. An horizontal split -will change the global Shard records, and the local SrvKeyspace records. A -vertical split will change the global Keyspace records, and the local -SrvKeyspace records. - -## Exploring the data in a Topology Server - -We store the proto3 binary data for each object. - -We use the following paths for the data, in all implementations: - -*Global Cell*: - -* CellInfo path: `cells//CellInfo` -* Keyspace: `keyspaces//Keyspace` -* Shard: `keyspaces//shards//Shard` -* VSchema: `keyspaces//VSchema` - -*Local Cell*: - -* Tablet: `tablets/-/Tablet` -* Replication Graph: `keyspaces//shards//ShardReplication` -* SrvKeyspace: `keyspaces//SrvKeyspace` -* SrvVSchema: `SvrVSchema` - -The `vtctl TopoCat` utility can decode these files when using the -`-decode_proto` option: - -``` sh -TOPOLOGY="-topo_implementation zk2 -topo_global_server_address global_server1,global_server2 -topo_global_root /vitess/global" - -$ vtctl $TOPOLOGY TopoCat -decode_proto -long /keyspaces/*/Keyspace -path=/keyspaces/ks1/Keyspace version=53 -sharding_column_name: "col1" -path=/keyspaces/ks2/Keyspace version=55 -sharding_column_name: "col2" -``` - -The `vtctld` web tool also contains a topology browser (use the `Topology` -tab on the left side). It will display the various proto files, decoded. - -## Implementations - -The Topology Server interfaces are defined in our code in `go/vt/topo/`, -specific implementations are in `go/vt/topo/`, and we also have -a set of unit tests for it in `go/vt/topo/test`. - -This part describes the implementations we have, and their specific -behavior. - -If starting from scratch, please use the `zk2`, `etcd2` or `consul` -implementations. We deprecated the old `zookeeper` and `etcd` -implementations. See the migration section below if you want to migrate. - -### Zookeeper `zk2` implementation - -This is the current implementation when using Zookeeper. (The old `zookeeper` -implementation is deprecated). - -The global cell typically has around 5 servers, distributed one in each -cell. The local cells typically have 3 or 5 servers, in different server racks / -sub-networks for higher resilience. For our integration tests, we use a single -ZK server that serves both global and local cells. - -We provide the `zk` utility for easy access to the topology data in -Zookeeper. It can list, read and write files inside any Zoopeeker server. Just -specify the `-server` parameter to point to the Zookeeper servers. Note the -vtctld UI can also be used to see the contents of the topology data. - -To configure a Zookeeper installation, let's start with the global cell -service. It is described by the addresses of the servers (comma separated list), -and by the root directory to put the Vitess data in. For instance, assuming we -want to use servers `global_server1,global_server2` in path `/vitess/global`: - -``` sh -# The root directory in the global server will be created -# automatically, same as when running this command: -# zk -server global_server1,global_server2 touch -p /vitess/global - -# Set the following flags to let Vitess use this global server: -# -topo_implementation zk2 -# -topo_global_server_address global_server1,global_server2 -# -topo_global_root /vitess/global -``` - -Then to add a cell whose local topology servers `cell1_server1,cell1_server2` -will store their data under the directory `/vitess/cell1`: - -``` sh -TOPOLOGY="-topo_implementation zk2 -topo_global_server_address global_server1,global_server2 -topo_global_root /vitess/global" - -# Reference cell1 in the global topology service: -vtctl $TOPOLOGY AddCellInfo \ - -server_address cell1_server1,cell1_server2 \ - -root /vitess/cell1 \ - cell1 -``` - -If only one cell is used, the same Zookeeper instance can be used for both -global and local data. A local cell record still needs to be created, just use -the same server address, and very importantly a *different* root directory. - -[Zookeeper -Observers](https://zookeeper.apache.org/doc/trunk/zookeeperObservers.html) can -also be used to limit the load on the global Zookeeper. They are configured by -specifying the addresses of the observers in the server address, after a `|`, -for instance: -`global_server1:p1,global_server2:p2|observer1:po1,observer2:po2`. - -#### Implementation details - -We use the following paths for Zookeeper specific data, in addition to the -regular files: - -* Locks sub-directory: `locks/` (for instance: - `keyspaces//Keyspace/locks/` for a keyspace) -* Master election path: `elections/` - -Both locks and master election are implemented using ephemeral, sequential files -which are stored in their respective directory. - -### etcd `etcd2` implementation (new version of `etcd`) - -This topology service plugin is meant to use etcd clusters as storage backend -for the topology data. This topology service supports version 3 and up of the -etcd server. - -This implementation is named `etcd2` because it supersedes our previous -implementation `etcd`. Note that the storage format has been changed with the -`etcd2` implementation, i.e. existing data created by the previous `etcd` -implementation must be migrated manually (See migration section below). - -To configure an `etcd2` installation, let's start with the global cell -service. It is described by the addresses of the servers (comma separated list), -and by the root directory to put the Vitess data in. For instance, assuming we -want to use servers `http://global_server1,http://global_server2` in path -`/vitess/global`: - -``` sh -# Set the following flags to let Vitess use this global server, -# and simplify the example below: -# -topo_implementation etcd2 -# -topo_global_server_address http://global_server1,http://global_server2 -# -topo_global_root /vitess/global -TOPOLOGY="-topo_implementation etcd2 -topo_global_server_address http://global_server1,http://global_server2 -topo_global_root /vitess/global -``` - -Then to add a cell whose local topology servers -`http://cell1_server1,http://cell1_server2` will store their data under the -directory `/vitess/cell1`: - -``` sh -# Reference cell1 in the global topology service: -# (the TOPOLOGY variable is defined in the previous section) -vtctl $TOPOLOGY AddCellInfo \ - -server_address http://cell1_server1,http://cell1_server2 \ - -root /vitess/cell1 \ - cell1 -``` - -If only one cell is used, the same etcd instances can be used for both -global and local data. A local cell record still needs to be created, just use -the same server address and, very importantly, a *different* root directory. - -#### Implementation details - -For locks, we use a subdirectory named `locks` in the directory to lock, and an -ephemeral file in that subdirectory (it is associated with a lease, whose TTL -can be set with the `-topo_etcd_lease_duration` flag, defaults to 30 -seconds). The ephemeral file with the lowest ModRevision has the lock, the -others wait for files with older ModRevisions to disappear. - -Master elections also use a subdirectory, named after the election Name, and use -a similar method as the locks, with ephemeral files. - -We store the proto3 binary data for each object (as the v3 API allows us to store binary data). - -### Consul `consul` implementation - -This topology service plugin is meant to use Consul clusters as storage backend -for the topology data. - -To configure a `consul` installation, let's start with the global cell -service. It is described by the address of a server, -and by the root node path to put the Vitess data in (it cannot start with `/`). For instance, assuming we -want to use servers `global_server:global_port` with node path -`vitess/global`: - -``` sh -# Set the following flags to let Vitess use this global server, -# and simplify the example below: -# -topo_implementation consul -# -topo_global_server_address global_server:global_port -# -topo_global_root vitess/global -TOPOLOGY="-topo_implementation consul -topo_global_server_address global_server:global_port -topo_global_root vitess/global -``` - -Then to add a cell whose local topology server -`cell1_server1:cell1_port` will store their data under the -directory `vitess/cell1`: - -``` sh -# Reference cell1 in the global topology service: -# (the TOPOLOGY variable is defined in the previous section) -vtctl $TOPOLOGY AddCellInfo \ - -server_address cell1_server1:cell1_port \ - -root vitess/cell1 \ - cell1 -``` - -If only one cell is used, the same consul instances can be used for both -global and local data. A local cell record still needs to be created, just use -the same server address and, very importantly, a *different* root node path. - -#### Implementation details - -For locks, we use a file named `Lock` in the directory to lock, and the regular -Consul Lock API. - -Master elections use a single lock file (the Election path) and the regular -Consul Lock API. The contents of the lock file is the ID of the current master. - -Watches use the Consul long polling Get call. They cannot be interrupted, so we -use a long poll whose duration is set by the -`-topo_consul_watch_poll_duration` flag. Canceling a watch may have to -wait until the end of a polling cycle with that duration before returning. - -## Running in only one cell - -The topology service is meant to be distributed across multiple cells, and -survive single cell outages. However, one common usage is to run a Vitess -cluster in only one cell / region. This part explains how to do this, and later -on upgrade to multiple cells / regions. - -If running in a single cell, the same topology service can be used for both -global and local data. A local cell record still needs to be created, just use -the same server address and, very importantly, a *different* root node path. - -In that case, just running 3 servers for topology service quorum is probably -sufficient. For instance, 3 etcd servers. And use their address for the local -cell as well. Let's use a short cell name, like `local`, as the local data in -that topology server will later on be moved to a different topology service, -which will have the real cell name. - -### Extending to more cells - -To then run in multiple cells, the current topology service needs to be split -into a global instance and one local instance per cell. Whereas, the initial -setup had 3 topology servers (used for global and local data), we recommend to -run 5 global servers across all cells (for global topology data) and 3 local -servers per cell (for per-cell topology data). - -To migrate to such a setup, start by adding the 3 local servers in the second -cell and run `vtctl AddCellinfo` as was done for the first cell. Tablets and -vtgates can now be started in the second cell, and used normally. - -vtgate can then be configured with a list of cells to watch for tablets using -the `-cells_to_watch` command line parameter. It can then use all tablets in -all cells to route traffic. Note this is necessary to access the master in -another cell. - -After the extension to two cells, the original topo service contains both the -global topology data, and the first cell topology data. The more symmetrical -configuration we're after would be to split that original service into two: a -global one that only contains the global data (spread across both cells), and a -local one to the original cells. To achieve that split: - -* Start up a new local topology service in that original cell (3 more local - servers in that cell). -* Pick a name for that cell, different from `local`. -* Use `vtctl AddCellInfo` to configure it. -* Make sure all vtgates can see that new local cell (again, using - `-cells_to_watch`). -* Restart all vttablets to be in that new cell, instead of the `local` cell name - used before. -* Use `vtctl RemoveKeyspaceCell` to remove all mentions of the `local` cell in - all keyspaces. -* Use `vtctl RemoveCellInfo` to remove the global configurations for that - `local` cell. -* Remove all remaining data in the global topology service that are in the old - local server root. - -After this split, the configuration is completely symmetrical: - -* a global topology service, with servers in all cells. Only contains global - topology data about Keyspaces, Shards and VSchema. Typically it has 5 servers - across all cells. -* a local topology service to each cell, with servers only in that cell. Only - contains local topology data about Tablets, and roll-ups of global data for - efficient access. Typically, it has 3 servers in each cell. - -## Migration between implementations - -We provide the `topo2topo` binary file to migrate between one implementation -and another of the topology service. - -The process to follow in that case is: - -* Start from a stable topology, where no resharding or reparenting is on-going. -* Configure the new topology service so it has at least all the cells of the - source topology service. Make sure it is running. -* Run the `topo2topo` program with the right flags. `-from_implementation`, - `-from_root`, `-from_server` describe the source (old) topology - service. `-to_implementation`, `-to_root`, `-to_server` describe the - destination (new) topology service. -* Run `vtctl RebuildKeyspaceGraph` for each keyspace using the new topology - service flags. -* Run `vtctl RebuildVSchemaGraph` using the new topology service flags. -* Restart all `vtgate` using the new topology service flags. They will see the - same keyspaces / shards / tablets / vschema as before, as the topology was - copied over. -* Restart all `vttablet` using the new topology service flags. They may use the - same ports or not, but they will update the new topology when they start up, - and be visible from vtgate. -* Restart all `vtctld` processes using the new topology service flags. So that - the UI also shows the new data. - -Sample commands to migrate from deprecated `zookeeper` to `zk2` -topology would be: - -``` sh -# Let's assume the zookeeper client config file is already -# exported in $ZK_CLIENT_CONFIG, and it contains a global record -# pointing to: global_server1,global_server2 -# an a local cell cell1 pointing to cell1_server1,cell1_server2 -# -# The existing directories created by Vitess are: -# /zk/global/vt/... -# /zk/cell1/vt/... -# -# The new zk2 implementation can use any root, so we will use: -# /vitess/global in the global topology service, and: -# /vitess/cell1 in the local topology service. - -# Create the new topology service roots in global and local cell. -zk -server global_server1,global_server2 touch -p /vitess/global -zk -server cell1_server1,cell1_server2 touch -p /vitess/cell1 - -# Store the flags in a shell variable to simplify the example below. -TOPOLOGY="-topo_implementation zk2 -topo_global_server_address global_server1,global_server2 -topo_global_root /vitess/global" - -# Reference cell1 in the global topology service: -vtctl $TOPOLOGY AddCellInfo \ - -server_address cell1_server1,cell1_server2 \ - -root /vitess/cell1 \ - cell1 - -# Now copy the topology. Note the old zookeeper implementation doesn't need -# any server or root parameter, as it reads ZK_CLIENT_CONFIG. -topo2topo \ - -from_implementation zookeeper \ - -to_implementation zk2 \ - -to_server global_server1,global_server2 \ - -to_root /vitess/global \ - -# Rebuild SvrKeyspace objects in new service, for each keyspace. -vtctl $TOPOLOGY RebuildKeyspaceGraph keyspace1 -vtctl $TOPOLOGY RebuildKeyspaceGraph keyspace2 - -# Rebuild SrvVSchema objects in new service. -vtctl $TOPOLOGY RebuildVSchemaGraph - -# Now restart all vtgate, vttablet, vtctld processes replacing: -# -topo_implementation zookeeper -# With: -# -topo_implementation zk2 -# -topo_global_server_address global_server1,global_server2 -# -topo_global_root /vitess/global -# -# After this, the ZK_CLIENT_CONF file and environment variables are not needed -# any more. -``` - -### Migration using the `Tee` implementation - -If your migration is more complex, or has special requirements, we also support -a 'tee' implementation of the topo service interface. It is defined in -`go/vt/topo/helpers/tee.go`. It allows communicating to two topo services, -and the migration uses multiple phases: - -* Start with the old topo service implementation we want to replace. -* Bring up the new topo service, with the same cells. -* Use `topo2topo` to copy the current data from the old to the new topo. -* Configure a Tee topo implementation to maintain both services. - * Note we don't expose a plugin for this, so a small code change is necessary. - * all updates will go to both services. - * the `primary` topo service is the one we will get errors from, if any. - * the `secondary` topo service is just kept in sync. - * at first, use the old topo service as `primary`, and the new one as - `secondary`. - * then, change the configuration to use the new one as `primary`, and the - old one as `secondary`. Reverse the lock order here. - * then rollout a configuration to just use the new service. - - diff --git a/doc/Troubleshooting.md b/doc/Troubleshooting.md deleted file mode 100644 index a7b4b0541c2..00000000000 --- a/doc/Troubleshooting.md +++ /dev/null @@ -1,47 +0,0 @@ -If there is a problem in the system, one or many alerts would typically fire. If a problem was found through means other than an alert, then the alert system needs to be iterated upon. - -When an alert fires, you have the following sources of information to perform your investigation: - -* Alert values -* Graphs -* Diagnostic URLs -* Log files - -Below are a few possible scenarios. - -### Elevated query latency on master - -Diagnosis 1: Inspect the graphs to see if QPS has gone up. If yes, drill down on the more detailed QPS graphs to see which table, or user caused the increase. If a table is identified, look at /debug/queryz for queries on that table. - -Action: Inform engineer about toxic query. If it’s a specific user, you can stop their job or throttle them to keep the load manageable. As a last resort, blacklist query to allow the rest of the system to stay healthy. - -Diagnosis 2: QPS did not go up, only latency did. Inspect the per-table latency graphs. If it’s a specific table, then it’s most likely a long-running low QPS query that’s skewing the numbers. Identify the culprit query and take necessary steps to get it optimized. Such queries usually do not cause outage. So, there may not be a need to take extreme measures. - -Diagnosis 3: Latency seems to be up across the board. Inspect transaction latency. If this has gone up, then something is causing MySQL to run too many concurrent transactions which causes slow-down. See if there are any tx pool full errors. If there is an increase, the INFO logs will dump info about all transactions. From there, you should be able to if a specific sequence of statements is causing the problem. Once that is identified, find out the root cause. It could be network issues, or it could be a recent change in app behavior. - -Diagnosis 4: No particular transaction seems to be the culprit. Nothing seems to have changed in any of the requests. Look at system variables to see if there are hardware faults. Is the disk latency too high? Are there memory parity errors? If so, you may have to failover to a new machine. - -### Master starts up read-only - -To prevent accidentally accepting writes, our default `my.cnf` settings -tell MySQL to always start up read-only. If the master MySQL gets restarted, -it will thus come back read-only until you intervene to confirm that it should -accept writes. You can use the [SetReadWrite]({% link reference/vtctl.md %}#setreadwrite) -command to do that. - -However, usually if something unexpected happens to the master, it's better to -reparent to a different replica with [EmergencyReparentShard]({% link reference/vtctl.md %}#emergencyreparentshard). If you need to do planned maintenance on the master, -it's best to first reparent to another replica with [PlannedReparentShard]({% link reference/vtctl.md %}#plannedreparentshard). - -### Vitess sees the wrong tablet as master - -If you do a failover manually (not through Vitess), you'll need to tell -Vitess which tablet corresponds to the new master MySQL. Until then, -writes will fail since they'll be routed to a read-only replica -(the old master). Use the [TabletExternallyReparented]({% link reference/vtctl.md %}#tabletexternallyreparented) -command to tell Vitess the new master tablet for a shard. - -Tools like [Orchestrator](https://github.com/github/orchestrator) -can be configured to call this automatically when a failover occurs. -See our sample [orchestrator.conf.json](https://github.com/vitessio/vitess/blob/1129d69282bb738c94b8af661b984b6377a759f7/docker/orchestrator/orchestrator.conf.json#L131) -for an example of this. diff --git a/doc/TwoPhaseCommitGuide.md b/doc/TwoPhaseCommitGuide.md deleted file mode 100644 index dccdb196eac..00000000000 --- a/doc/TwoPhaseCommitGuide.md +++ /dev/null @@ -1,86 +0,0 @@ -# 2PC User guide - -# Overview - -Vitess 2PC allows you to perform atomic distributed commits. The feature is implemented using traditional MySQL transactions, and hence inherits the same guarantees. With this addition, Vitess can be configured to support the following three levels of atomicity: - -1. **Single database**: At this level, only single database transactions are allowed. Any transaction that tries to go beyond a single database will be failed. -2. **Multi database**: A transaction can span multiple databases, but the commit will be best effort. Partial commits are possible. -3. **2PC**: This is the same as Multi-database, but the commit will be atomic. - -2PC commits are more expensive than multi-database because the system has to save away the statements before starting the commit process, and also clean them up after a successful commit. This is the reason why it's a separate option instead of being always on. - -## Isolation - -2PC transactions guarantee atomicity: either the whole transaction commits, or it's rolled back entirely. It does not guarantee Isolation (in the ACID sense). This means that a third party that performs cross-database reads can observe partial commits while a 2PC transaction is in progress. - -Guaranteeing ACID Isolation is very contentious and has high costs. Providing it by default would have made vitess impractical for the most common use cases. - - -# Configuring VTGate - -The atomicity policy is controlled by the `transaction_mode` flag. The default value is `multi`, and will set it in multi-database mode. This is the same as the previous legacy behavior. - -To enforce single-database transactions, the VTGates can be started by specifying `transaction_mode=single`. - -To enable 2PC, the VTGates need to be started with `transaction_mode=twopc`. The VTTablets will require a few more flags, which will be explained below. - -The VTGate `transaction_mode` flag decides what to allow. The application can independently request a specific atomicity for each transaction. The request will be honored by VTGate only if it does not exceed what is allowed by the `transaction_mode`. For example, `transaction_mode=single` will only allow single-db transactions. On the other hand, `transaction_mode=twopc` will allow all three levels of atomicity. - -# Driver APIs - -The way to request atomicity from the application is driver-specific. - -## Go driver - -For the Go driver, you request the atomicity by adding it to the context using the `WithAtomicity` function. For more details, please refer to the respective GoDocs. - -## Python driver - -For Python, the `begin` function of the cursor has an optional `single_db` flag. If the flag is `True`, then the request is for a single-db transaction. If `False` (or unspecified), then the following `commit` call's `twopc` flag decides if the commit is 2PC or Best Effort (`multi`). - -## Java & PHP (TODO) - -## Adding support in a new driver - -The VTGate RPC API extends the `Begin` and `Commit` functions to specify atomicity. The API mimics the Python driver: The `BeginRequest` message provides a `single_db` flag and the `CommitRequest` message provides an `atomic` flag which is synonymous to `twopc`. - -# Configuring VTTablet - -The following flags need to be set to enable 2PC support in VTTablet: - -* **twopc_enable**: This flag needs to be turned on. -* **twopc_coordinator_address**: This should specify the address (or VIP) of the VTGate that VTTablet will use to resolve abandoned transactions. -* **twopc_abandon_age**: This is the time in seconds that specifies how long to wait before asking a VTGate to resolve an abandoned transaction. - -With the above flags specified, every master VTTablet also turns into a watchdog. If any 2PC transaction is left lingering for longer than `twopc_abandon_age` seconds, then VTTablet invokes VTGate and requests it to resolve it. Typically, the `abandon_age` needs to be substantially longer than the time it takes for a typical 2PC commit to complete (10s of seconds). - -# Configuring MySQL - -The usual default values of MySQL are sufficient. However, it's important to verify that `wait_timeout` (28800) has not been changed. If this value was changed to be too short, then MySQL could prematurely kill a prepared transaction causing data loss. - -# Monitoring - -A few additional variables have been added to `/debug/vars`. Failures described below should be rare. But these variables are present so you can build an alert mechanism if anything were to go wrong. - -## Critical failures - -The following errors are not expected to happen. If they do, it means that 2PC transactions have failed to commit atomically: - -* **InternalErrors.TwopcCommit**: This is a counter that shows the number of times a prepared transaction failed to fulfil a commit request. -* **InternalErrors.TwopcResurrection**: This counter is incremented if a new master failed to resurrect a previously prepared (and unresolved) transaction. - -## Alertable failures - -The following failures are not urgent, but require someone to investigate: - -* **InternalErrors.WatchdogFail**: This counter is incremented if there are failures in the watchdog thread of VTTablet. This means that the watch dog is not able to alert VTGate of abandoned transactions. -* **Unresolved.Prepares**: This is a gauge that is set based on the number of lingering Prepared transactions that have been alive for longer than 5x the abandon age. This usually means that a distributed transaction has repeatedly failed to resolve. A more serious condition is when the metadata for a distributed transaction has been lost and this Prepare is now permanently orphaned. - -# Repairs - -If any of the alerts fire, it's time to investigate. Once you identify the `dtid` or the VTTablet that originated the alert, you can navigate to the `/twopcz` URL. This will display three lists: - -1. **Failed Transactions**: A transaction reaches this state if it failed to commit. The only action allowed for such transactions is that you can discard it. However, you can record the DMLs that were involved and have someone come up with a plan to repair the partial commit. -2. **Prepared Transactions**: Prepared transactions can be rolled back or committed. Prepared transactions must be remedied only if their root Distributed Transaction has been lost or resolved. -3. **Distributed Transactions**: Distributed transactions can only be Concluded (marked as resolved). diff --git a/doc/UpdateStream.md b/doc/UpdateStream.md deleted file mode 100644 index 8891fb11cd9..00000000000 --- a/doc/UpdateStream.md +++ /dev/null @@ -1,498 +0,0 @@ -# Update Stream - -Update Stream is a Vitess service that provides a change stream for any keyspace. -The use cases for this service include: - -* Providing an *invalidation stream*, that an application can use to maintain a cache. - -* Maintain an external copy of the data in another system, that is only updated - when the data changes. - -* Maintain a change record of all the transactions that have been applied to the data. - -A good understanding -of [Vitess Replication]({% link user-guide/vitess-replication.md %}) is required to -understand this document better. We will go through the use cases in a bit more -details, then introduce the EventToken notion, and finally explain the service. - -## Use Cases - -### Maintaining Cache Consistency - -The first use case we’re trying to address is to maintain a consistent cache of -the data. The problem here has two parts: - -* When data changes, we need to invalidate the cache. - -* When we want to re-populate the cache after an invalidation, we need to make - sure we get data that is more recent than the data change. For instance, we - can’t just re-query any replica, as it might be behind on replication. - -This process can be somewhat resilient to some stream anomalies. For instance, -invalidating the same record twice in some corner cases is fine, as long as we -don’t poison the cache with an old value. - -Note the location / ownership of the cache is not set in stone: - -* Application-layer cache: the app servers maintain the cache. It’s very early - in the serving chain, so in case of a cache hit, it’s lower latency. However, - an invalidation process needs to run and is probably also owned by the - application layer, which is somewhat annoying. - -* vtgate-layer cache: it would be a row cache accessed by vtgate, transparent to - the app. It requires vtgate to do a lot of extra heavy-lifting, depending on - what we want to support. Cache invalidation is still required, at a row level. - -* vttablet-layer cache: this is the old rowcache. Since the cache is not shared - across instances, and the app still needs a cache, we abandoned this one. - -Since the vtgate-layer cache is much harder to work on (because of the query -implications), and will probably require similar components as the app-layer -cache, we decided to work on the app-layer cache for now, with possibly an -invalidation process that is somewhat tied to the app. - -The *composite object cache* is an interesting use case: if the application is -in charge of the cache, it would seem possible to put in the cache higher level -composite objects, that are built from multiple table records. They would be -invalidated any time one of the composing table record is changed. They need to -be addressed by a part of the primary key, so they’re easy to find. - -### Change Log - -A Change Log provides a stream of all data changes, so an external application -can either record these changes, or keep an external database up to date with -the latest data. - -Unlike the Cache Invalidation use case, this is not as forgiving. If we have -duplicate updates, they will need to be handled. - -## Design Considerations - -### Single Shard Update Stream - -This has been supported in Vitess for a while, but not exposed. It works as follows: - -* vttablet adds an SQL comment to every DML, that contains the Primary Key of - the modified row. - -* vttablet provides a streaming service that can connect to the local MySQL - replication stream, extract the comment, and stream events to the client. - -* Use the GTID as the start / restart position. It is sent back with each - update, and an Update Stream can be started from it. - -Note Vitess supports both the MariaDB GTIDs (domain:server:sequence) and the -MySQL 5.6 GTID Sets (encoded in SID blocks). - -### Surviving Resharding: Problem - -The Vitess tools are supposed to provide transparent sharding for the user’s -data. Most of the trouble we run into is surviving resharding events, when we -hop over from one set of shards to another set of shards. - -Two strategies then come to mind: - -* Provide a per-shard update stream. Let the user handle the hop when resharding - happens. If we were to do this for the Cache use case, we would also need to - provide some way of preventing bad corner cases, like a full cache flush, or - no cache update for a while, or lost cache invalidations. Simple for us, but - the app can be a lot more complicated. And the Change Log use case is also - hard to do. - -* Provide a per-keyrange update stream. Vtgate would connect to the right - shards, and resolve all conflicts. We can add the restriction that the client - only asks for keyranges that are exactly matching to one or more shards. For - instance, if a keyspace is sharded four ways, -40, 40-80, 80-c0, c0-, we can - support clients asking for -40, -80, -, but not for 60-a0 for instance. - -As a reminder, the resharding process is somewhat simple: - -* Let’s say we want to split a shard 20-40 into two shards, 20-30 and 30-40. At - first, only 20-40 exists and has a GTID stream. - -* We create 20-30 and 30-40, each has its own GTID stream. We copy the schema, - and the data. - -* Filtered replication is enabled. A transaction in 20-40 is replayed on both - 20-30 and 30-40, with an extra blp_checkpoint statement, that saves the 20-40 - GTID. - -* At some point, we migrate the read-only traffic from 20-40 replicas to 20-30 - and 30-40 replicas. (Note: this is probably when we want to migrate any - invalidation process as well). - -* Then as a final step, the writes are migrated from 20-40 to 20-30 and 30-40. - -So we have a window of time when both streams are available simultaneously. For -the resharding process to be operationally better, that window should be as -small as possible (so we don't run with two copies of the data for too long). So -we will make sure an Update Stream can hop from the source shards to the -destination shards quickly. - -### Surviving Resharding: First Try - -To solve the shard hop problem during resharding, we tried to explore adding -good timing information to the replication stream. However: - -* Since the time is added by vttablet, not MySQL, it is not accurate, not - monotonic, and provides no guarantees. - -* Which such loose guarantees, it is no better than the second-accurate - timestamp added by MySQL to each transaction. - -So this idea was abandoned. - -The GTID stream maintained by MySQL is the only true source of IDs for -changes. It’s the only one we can trivially seek on, and get binlogs. The main -issue with it is that it’s not maintained across shards when resharding. - -However, it is worth noting that a transaction replicated by the binlog streamer -using Filtered Replication also saves the original GTID and the source -transaction timestamp in the blp_checkpoint table. So we could extract the -original GTID and timestamp from at least that statement (and if not, from an -added comment). - -### Change Log and SBR - -If all we have is Statement Based Replication (SBR), we cannot get an accurate -Change Log. SBR only provides the SQL statements, there is no easy way for us to -parse them to get the final values of the columns (OK, there is, it’s just too -complicated). And we cannot just query MySQL, as it may have already applied -more transactions related to that record. So for Change Log, we need Row Based -Replication (or a more advanced replication system). - -Note we can use the following setup: - -* Master and replicas use SBR. - -* Rdonly use SBR to connect to master, but log RBR logs locally. - -* We get the replication stream from rdonly servers. - -This is a bit awkward, and the main question is: what happens if a rdonly server -is the only server that has replicated and semi-sync-acked a transaction, while -the master is dying? Then to get that change, the other servers would get the -RBR version of the change. - -Vitess support for RBR is coming. We will then explore these use cases further. - -## Detailed Design - -In the rest of this document, we’ll explore using the GTID when tracking a -single shard, and revert to the timestamp when we hop across shards. - -As we do not want the application layer to understand / parse / compare the -GTIDs, we’ll use an opaque token, and just pass it around the various -layers. Vtgate / vttablet will understand it. The invalidation process should -not have to, but will as there is no better solution. - -This approach can be made to work for the cache invalidation use case, but it -might be difficult to provide an exact point in time for recovery / switching -over to a different set of shards during resharding. - -For the Change Log, we’ll see what we can provide. - -### Event Token - -We define an Event Token structure that contains: - -* a MySQL replication timestamp (int64, seconds since Epoch). - -* a shard name - -* A GTIDSet position. - -It basically describes a position in a replication stream. - -An Event Token is always constructed from reading a transaction from the -binlogs. If filtered replication is running, we use the source shard timestamp. - -Event Token comparison: - -* First, if the timestamps are different, just use that. - -* Then, if both use the same shard name, compare the GTIDs. - -* Otherwise we do not know for sure. It will depend on the usage to figure out - what we do. - -*Possible Extension*: when filtered replication is running, we also update -blp_checkpoint with the source GTID. We could add that information to the Event -Token. Let’s try to go without in the first version, to remain simple. More on -this later in the ‘Data Dump, Keeping it up to Date’ section. - -### Vttablet Changes - -#### Watching the Replication Stream - -Replicas are changed to add a background routine that reads the binlogs -(controlled by the `watch_replication_stream` flag). When a tablet’s type is set -to `replica`, the routine starts. It stops when the tablet is not `replica` any -more (goes to `master`, `worker`, …). - -The routine starts reading the binlog from the current position. It then remembers: - -* The Event Token of the last seen transaction. - -* *Possible Optimization*: A map of the first time a timestamp is seen to the - corresponding GTID position and filename / position. This would be a value per - second. Let’s age these out: we keep the values for the last N seconds, then - we keep a value for every minute for the last M hours. We forget values older - than 3 days (or whatever the binlog retention time is). - -#### `include_event_token` Option - -We added an option to the Query Service API for Execute calls, called -`include_event_token`. If set, vttablet will get the last seen Event Token right -before issuing the query to MySQL, and include it in the response. This -essentially represents the last known replication position that we’re sure the -data we’re returning is fresher than. - -#### `compare_event_token` Option - -We added an option to the Query Service API for Execute calls, called -`compare_event_token`. The provided event token is sent along with the call, and -vttablet compares that token with the one its current replication stream watcher -has. It returns the result of the comparison in ResultExtras. - -#### Update Stream API Change - -The Update Stream API in vttablet right now can only start from a GTID. We added a -new API that can start from a timestamp as well. It will look for the right -binlog file to start with, and start streaming events, discarding events until -it finds the provided timestamp. *Optimization*: It can also look in the map to -find the closest value it can start with, and then read from the binlogs until -it finds the first timestamp. If it doesn’t have old enough values in its map, -it errors out (the goal is to have vtgate then try another tablet to start -from). For each event, we will also return the corresponding Event Token. - -*Optimization*: if an Update Stream client is caught up to the current binlog -reading thread, we can just tee the binlog stream to that client. We won’t do -that in the first version, as we don’t expect that many clients. - -Note that when filtered replication is running, we need to have the timestamp of -the source transaction on the source shard, not the local timestamp of the -applied transaction. Which also means that timestamps will not be always -linearly increasing in the stream, in the case of a shard merge (although they -will be linearly increasing for a given keyspace_id). - -### Vtgate Changes - -We added a new Update Stream service to vtgate. It takes as input a keyspace and -an optional KeyRange (for sharded keyspaces). As a starting point, it takes a -timestamp. - -*Caveat*: As previously mentioned, at first, we can add the restriction that the -client only asks for KeyRanges that are exactly matching to one or more -shards. For instance, if a keyspace is sharded four ways, -40, 40-80, 80-c0, -c0-, we can support clients asking for -40, -80, -, but not for 60-a0 for -instance. Lifting that restriction is somewhat easy, we’d just have to filter -the returned keyspace_ids by KeyRange, but that’s extra work for not much gain -in the short term (and we don’t parse keyspace_id in Binlog Streamer right now, -just the PK). - -After using the partition map in SrvKeyspace, vtgate will have a list of shards -to query. It will need to create a connection for every shard that overlaps with -the input KeyRange. For every shard, it will pick an up-to-date replica and use -the Update Stream API mentioned above. If the vttablet cannot provide the -stream, it will failover to another one. It will then start an Update Stream on -all sources, and just merge and stream the results back to the source. For each -Event Token that is read from a source, vtgate will also send the smallest -timestamp of all Events it’s seen in all sources. That way the client has a -value to start back from in case it needs to restart. - -In case of resharding event, the list of shards to connect to may change. Vtgate -will build a map of overlapping shards, to know which source shards are mapped -to which destination shards. It will then stop reading from all the source -shards, find the minimum timestamp of the last event it got from each source, -and use that to restart the stream on the destination shards. - -*Alternate Simpler Solution*: when vtgate notices a SrvKeyspace change in the -serving shards, it just aborts the invalidation stream. The client is -responsible for reconnecting with the last timestamp it’s seen. The client will -need to handle this error case anyway (when vtgates get upgraded at least). - -*Caveat*: this will produce duplicate Event Tokens, with the same timestamp but -with GTID positions from two different streams. More on this later, but for a -Cache Invalidation scenario, no issue, and for a Change Log application, we’ll -see how we can deal with it. - -We also add the same `include_event_token` flag to vtgate query service. It just -passes it along to the underlying vttablet. It’s only supported for -single-keyspace_id queries. The resulting EventToken is just returned back as -is. - -## Use Cases How To - -Let's revisit our use cases and see how this addresses them. - -### Cache Invalidation - -The idea is to use the Event Token coming from both the Execute results and the -Update Stream to maintain cache consistency. - -The cache contains entries with both: - -* An Event Token. It describes either the invalidation, or the last population. - -* An optional value. - -The invalidation process works as follows: - -* It asks vtgate for an Update Stream for a provided keyspace / KeyRange, - starting at the current timestamp (or from a few seconds/minutes/hours in the - past, or from the last checkpointed timestamp it had saved). - -* Vtgate resolves the keyrange into shards. It starts an invalidation stream - with a healthy replica in each shard from the provided timestamp. - -* Vtgate sends back all Event Tokens it collects, with all of timestamp, shard - name and GTID. - -* For each change it gets, the invalidation process reads the cache record. Two cases: - - * No entry in the cache: it stores the Event Token (to indicate the cache - should not be populated unless the value is greater) with no value. - - * An entry in the cache exists, with an Event Token: - - * If the cached Event Token is strictly older, update it with the new Event - Token, clear the value. - - * If the cached Event Token is strictly more recent, discard the new Event. - - * If we don’t know which Event Token is the most recent (meaning they have - the same timestamp, and are read from different invalidation stream), we - need to do the safest thing: invalidate the cache with the current Event - Token. This is the safest because we’re guaranteed to get duplicate - events, and not miss events. - - * In any case the invalidation process only updates the cache if it still - contains the value it read (CAS). Otherwise it rereads and tries again - (means an appserver or another invalidator somehow also updated the cache). - -A regular appserver will query the cache for the value it wants. It will get either: - -* No entry: asks vtgate for the Event Token when querying the database, use a - CAS operation to set the value the returned Event Token + Value. - -* An entry with both an Event Token and a Value: Just use the value. - -* An entry with just an Event Token and no Value: - - * Send the Event Token along with the query to vtgate as - `compare_event_token`, and also asking for Event Token using `include_event_token`. - - * Vtgate will query vttablet as usual, but also passing both flags. - - * Vttablet will then compare the provided Event Token with the one that was - included. It will include in the response the knowledge of the Event Token - comparison as a boolean, only set if the data read is `fresher`. - - * Depending on the `fresher` boolean flag, the app will: - - * Data read is more recent: Update the cache with new Event Token / Value. - - * Data read is not more recent (or we don't know for sure): don’t update the cache. - -*Constraints*: - -* When restarting the invalidation process, we start from a point back in time, - let’s say N seconds behind now. Since we can ask destination shards at this - point for events that are N seconds old, filtered replication has to have been - running for at least N seconds. (Alternatively, the invalidators can - checkpoint their current position from time to time, and restart from that - when starting up, and revert back to N seconds behind now). - -* As mentioned before, the shard range queried by the invalidation process - should cover a round number of actual shards. - -* The invalidation process needs to know how to compare tokens. This is a - bummer, I don’t see any way around it. We could simplify and only do the - timestamp comparison part, but that would mean the cache is unused for up to - an entire second upon changes. The appserver doesn’t need to compare, it gives - the value to vtgate and let it do the work. - -To see a sample use of the Update Stream feature, look at -the -[cache_invalidation.py](https://github.com/vitessio/vitess/blob/master/test/cache_invalidation.py) integration -test. It shows how to do the invalidaiton in python, and the application -component. - -### Extension: Removing Duplicate Events - -In the previous section, we use timestamps to easily seek on replication -streams. If we added the ability to seek on any source GTID that appears in the -destination stream, we should be able to precisely seek at the right spot. That -would make exact transitions from one stream to the next possible. Again, as -long as the destination shard in a resharding event has been running filtered -replication for as long as we want to go back. - -However, describing a position on a replication stream becomes tricky: it needs -one Event Token per replication stream. When resharding the Event Tokens would -jump around. When restarting a stream from an Event Token list, we may need to -restart earlier in some cases and skip some items. - -*Bottom Line*: - -* This would require a bunch of non-trivial code. - -* This requires that filtered replication would be running for at least as long - as we want to go back in time for the starting point. - -If there is no use case for it, let’s not do it. - -### Extension: Adding Update Data to the Stream, Towards Change Log - -Let’s add a flag to the streaming query, that, if specified, asks for the -changed columns as well as the PK. - -* If using SBR, and the flag is present, vttablet can just query the row at the - time we get the event, and send it along. As already mentioned, the data may - not be exactly up to date. It is however guaranteed to be newer than the Event - Token, which might be good enough to put in a cache for instance. - -* If using RBR, we just get the data for free, just send it along. - -*Bottom Line*: Let’s try to go without this extension and see how it goes. We -can implement the additional data when we fully support RBR. - -### Extension: Data Dump, Keeping It Up To Date - -*Use Case*: keep a secondary database (like a HBase database) up to date. -*Requirements*: RBR replication, plus Data included in the Stream (previous extension). - -It’s simple: - -* The external database has the same schema as MySQL. Each row is indexed by - PK. It also has an extra field, for the last Event Token. - -* Remember start time of the process, let’s call it StartTime - -* Dump the data to other database. Using Map/Reduce, whatever. Do not populate - the Event Tokens. - -* Start an invalidation process, asking for changes from StartTime. When getting - updates, read the current external database row and its Event Token: - - * If there is no existing row / no Event token, save the new value. - - * If there is an existing row with a strictly more recent Event Token, ignore - the event. - - * Otherwise (when the existing Event Token is older or we don’t know), store - the new Value / Event Token. - -Note this again means the dumping process needs to be able to compare Event -Tokens, as the invalidator does. - -*Caveat*: As described, the values in the secondary storage will converge, but -they may go back in time for a bit, as we will process duplicate events during -resharding, and we may not know how to compare them. - -*Extension*: if we also add the source GTID in Event Tokens read from a -destination shard during filtered replication, we can break the tie easily on -duplicate events, and guarantee we only move forward. This seems like the -easiest solution, and we can then use only timestamps as starting times for -restarting the sync process. - diff --git a/doc/Upgrading.md b/doc/Upgrading.md deleted file mode 100644 index 04723da9ded..00000000000 --- a/doc/Upgrading.md +++ /dev/null @@ -1,49 +0,0 @@ -# Upgrading a Vitess Installation - -This document highlights things to look after when upgrading a Vitess production installation to a newer Vitess release. - -Generally speaking, upgrading Vitess is a safe and easy process because it is explicitly designed for it. This is because in YouTube we follow the practice of releasing new versions often (usually from the tip of the Git master branch). - -## Compatibility - -Our versioning strategy is based on [Semantic Versioning](https://semver.org/). - -Vitess version numbers follow the format `MAJOR.MINOR.PATCH`. -We guarantee compatibility when upgrading to a newer **patch** or **minor** version. -Upgrades to a higher **major** version may require manual configuration changes. - -In general, always **read the 'Upgrading' section of the release notes**. -It will mention any incompatible changes and necessary manual steps. - -## Upgrade Order - -We recommend to upgrade components in a bottom-to-top order such that "old" clients will talk to "new" servers during the transition. - -Please use this upgrade order (unless otherwise noted in the release notes): - -- vtctld -- vttablet -- vtgate -- application code which links client libraries - -*vtctld* is listed first to make sure that you can still administrate Vitess - or if not find out as soon as possible. - -## Canary Testing - -Within the vtgate and vttablet components, we recommend to [canary](https://martinfowler.com/bliki/CanaryRelease.html) single instances, keyspaces and cells. Upgraded canary instances can "bake" for several hours or days to verify that the upgrade did not introduce a regression. Eventually, you can upgrade the remaining instances. - -## Rolling Upgrades - -We recommend to automate the upgrade process with a configuration management software. It will reduce the possibility of human errors and simplify the process of managing all instances. - -As of June 2016 we do not have templates for any major open-source configuration management software because our internal upgrade process is based on a proprietary software. Therefore, we invite open-source users to contribute such templates. - -Any upgrade should be a rolling release i.e. usually one tablet at a time within a shard. This ensures that the remaining tablets continue serving live traffic and there is no interruption. - -## Upgrading the Master Tablet - -The *master* tablet of each shard should always be updated last in the following manner: - -- verify that all *replica* tablets in the shard have been upgraded -- reparent away from the current *master* to a *replica* tablet -- upgrade old *master* tablet diff --git a/doc/UserGuideIntroduction.md b/doc/UserGuideIntroduction.md deleted file mode 100644 index 261abfa46ef..00000000000 --- a/doc/UserGuideIntroduction.md +++ /dev/null @@ -1,83 +0,0 @@ -## Platform support - -We continuously test against Ubuntu 14.04 (Trusty) and Debian 8 (Jessie). -Other Linux distributions should work as well. - -## Database support - -Vitess supports [MySQL 5.6](https://dev.mysql.com/doc/refman/5.6/en/), -[MariaDB 10.0](https://downloads.mariadb.org/mariadb/10.0.21/), and any -newer versions like MySQL 5.7, etc. Vitess also supports Percona's -variations of these versions. - -### Relational capabilities - -Vitess attempts to leverage the capabilities of the underlying MySQL -instances to the fullest extent. In this respect, any query that can -be passed through to a single keyspace, shard or set of shards will -be sent to the MySQL servers as is. - -This approach allows you to exploit the full capabilities of MySQL -as long as the relationships and constraints are within one shard (or -unsharded keyspace). - -For relationships that go beyond shards, Vitess provides -support through the [VSchema]({% link user-guide/vschema.md %}). - -### Schema management - -Vitess supports several functions for looking at your schema and -validating its consistency across tablets in a shard or across all -shards in a keyspace. - -In addition, Vitess supports -[data definition statements](https://dev.mysql.com/doc/refman/5.6/en/sql-syntax-data-definition.html) -that create, modify, or delete database tables. Vitess executes -schema changes on the master tablet within each shard, and those -changes then propagate to slave tablets via replication. Vitess does -not support other types of DDL statements, such as those that affect -stored procedures or grants. - -Before executing a schema change, Vitess validates the SQL syntax -and determines the impact of the change. It also does a pre-flight -check to ensure that the update can be applied to your schema. In -addition, to avoid reducing the availability of your entire system, -Vitess rejects changes that exceed a certain scope. - -See the [Schema Management]({% link user-guide/schema-management.md %}) -section of this guide for more information. - -## Supported clients - -The VTGate server is the main entry point that applications use -to connect to Vitess. - -VTGate understands the MySQL binary protocol. So, any client that -can directly talk to MySQL can also use Vitess. - -Additionally, VTGate exposes its functionality through a -[gRPC](https://www.grpc.io/) API which has support for multiple languages. - -Accessing Vitess through gRPC has some minor advantages over the MySQL -protocol: - -* You can send requests with bind variables, which is slightly more - efficient and secure than building the full text query. -* You can exploit the statelessness of connections. For example, you - can start a transaction using one VTGate server, and complete it - using another. - -Vitess currently provides gRPC based connectors for Java (JDBC) and Go -(database/sql). All others can use the native MySQL drivers instead. The -native MySQL drivers for Java and Go should also work. - -## Backups - -Vitess supports data backups to either a network mount (e.g. NFS) or to a blob store. -Backup storage is implemented through a pluggable interface, -and we currently have plugins available for Google Cloud Storage, Amazon S3, -and Ceph. - -See the [Backing Up Data]({% link user-guide/backup-and-restore.md %}) section -of this guide for more information about creating and restoring data -backups with Vitess. diff --git a/doc/VSchema.md b/doc/VSchema.md deleted file mode 100644 index 83c207badd7..00000000000 --- a/doc/VSchema.md +++ /dev/null @@ -1,340 +0,0 @@ -# VSchema User Guide - -VSchema stands for Vitess Schema. In contrast to a traditional database schema that contains metadata about tables, a VSchema contains metadata about how tables are organized across keyspaces and shards. Simply put, it contains the information needed to make Vitess look like a single database server. - -For example, the VSchema will contain the information about the sharding key for a sharded table. When the application issues a query with a WHERE clause that references the key, the VSchema information will be used to route the query to the appropriate shard. - -## Sharding Model - -In Vitess, a `keyspace` is sharded by `keyspace ID` ranges. Each row is assigned a keyspace ID, which acts like a street address, and it determines the shard where the row lives. In some respect, one could say that the `keyspace ID` is the equivalent of a NoSQL sharding key. However, there are some differences: - -1. The `keyspace ID` is a concept that is internal to Vitess. The application does not need to know anything about it. -2. There is no physical column that stores the actual `keyspace ID`. This value is computed as needed. - -This difference is significant enough that we do not refer to the keyspace ID as the sharding key. we will later introduce the concept of a Primary Vindex which more closely resembles the NoSQL sharding key. - -Mapping to a `keyspace ID`, and then to a shard, gives us the flexibility to reshard the data with minimal disruption because the `keyspace ID` of each row remains unchanged through the process. - -## Vindex - -The Sharding Key is a concept that was introduced by NoSQL datastores. It is based on the fact that there is only one access path to the data, which is the Key. However, relational databases are more versatile about the data and their relationships. So, sharding a database by only designating a sharding key is often insufficient. - -If one were to draw an analogy, the indexes in a database would be the equivalent of the key in a NoSQL datastore, except that databases allow you to define multiple indexes per table, and there are many types of indexes. Extending this analogy to a sharded database results in different types of cross-shard indexes. In Vitess, these are called Vindexes. - -Simplistically stated, a Vindex provides a way to map a column value to a `keyspace ID`. This mapping can be used to identify the location of a row. A variety of vindexes are available to choose from with different trade-offs, and you can choose one that best suits your needs. - -Vindexes offer many flexibilities: - -* A table can have multiple Vindexes. -* Vindexes could be NonUnique, which allows a column value to yield multiple keyspace IDs. -* They could be a simple function or be based on a lookup table. -* They could be shared across multiple tables. -* Custom Vindexes can be plugged in, and Vitess will still know how to reshard using such Vindexes. - -### The Primary Vindex - -The Primary Vindex is analogous to a database primary key. Every sharded table must have one defined. A Primary Vindex must be unique: given an input value, it must produce a single keyspace ID. This unique mapping will be used at the time of insert to decide the target shard for a row. Conceptually, this is also equivalent to the NoSQL Sharding Key, and we often refer to the Primary Vindex as the Sharding Key. - -Uniqueness for a Primary Vindex does not mean that the column has to be a primary key or unique in the MySQL schema. You can have multiple rows that map to the same keyspace ID. The Vindex uniqueness constraint is only used to make sure that all rows for a keyspace ID live in the same shard. - -However, there is a subtle difference: NoSQL datastores let you choose the Sharding Key, but the Sharding Scheme is generally hardcoded in the engine. In Vitess, the choice of Vindex lets you control how a column value maps to a keyspace ID. In other words, a Primary Vindex in Vitess not only defines the Sharding Key, it also decides the Sharding Scheme. - -Vindexes come in many varieties. Some of them can be used as Primary Vindex, and others have different purposes. The following sections will describe their properties. - -### Secondary Vindexes - -Secondary Vindexes are additional vindexes you can define against other columns of a table offering you optimizations for WHERE clauses that do not use the Primary Vindex. Secondary Vindexes return a single or a limited set of `keyspace IDs` which will allow VTGate to only target shards where the relevant data is present. In the absence of a Secondary Vindex, VTGate would have to send the query to all shards. - -Secondary Vindexes are also commonly known as cross-shard indexes. It is important to note that Secondary Vindexes are only for making routing decisions. The underlying database shards will most likely need traditional indexes on those same columns. - -### Unique and NonUnique Vindex - -A Unique Vindex is one that yields at most one keyspace ID for a given input. Knowing that a Vindex is Unique is useful because VTGate can push down some complex queries into VTTablet if it knows that the scope of that query cannot exceed a shard. Uniqueness is also a prerequisite for a Vindex to be used as Primary Vindex. - -A NonUnique Vindex is analogous to a database non-unique index. It is a secondary index for searching by an alternate WHERE clause. An input value could yield multiple keyspace IDs, and rows could be matched from multiple shards. For example, if a table has a `name` column that allows duplicates, you can define a cross-shard NonUnique Vindex for it, and this will let you efficiently search for users that match a certain `name`. - -### Functional and Lookup Vindex - -A Functional Vindex is one where the column value to keyspace ID mapping is pre-established, typically through an algorithmic function. In contrast, a Lookup Vindex is one that gives you the ability to create an association between a value and a keyspace ID, and recall it later when needed. - -Typically, the Primary Vindex is Functional. In some cases, it is the identity function where the input value yields itself as the kesypace id. However, one could also choose other algorithms like hashing or mod functions. - -A Lookup Vindex is usually backed by a lookup table. This is analogous to the traditional database index, except that it is cross-shard. At the time of insert, the computed keyspace ID of the row is stored in the lookup table against the column value. - -### Shared Vindexes - -Relational databases encourage normalization, which lets you split data into different tables to avoid duplication in the case of one-to-many relationships. In such cases, a key is shared between the two tables to indicate that the rows are related, aka `Foreign Key`. - -In a sharded environment, it is often beneficial to keep those rows in the same shard. If a Lookup Vindex was created on the foreign key column of each of those tables, you would find that the backing tables would actually be identical. In such cases, Vitess lets you share a single Lookup Vindex for multiple tables. Of these, one of them is designated as the owner, which is responsible for creating and deleting these associations. The other tables just reuse these associations. - -Caveat: If you delete a row from the owner table, Vitess will not perform cascading deletes. This is mainly for efficiency reasons; The application is likely capable of doing this more efficiently. - -Functional Vindexes can be also be shared. However, there is no concept of ownership because the column to keyspace ID mapping is pre-established. - -### Orthogonality - -The previously described properties are mostly orthogonal. Combining them gives rise to the following valid categories: - -* **Functional Unique**: This is the most popular category because it is the one best suited to be a Primary Vindex. -* **Functional NonUnique**: There are currently no use cases that need this category. -* **Lookup Unique Owned**: This gets used for optimizing high QPS queries that do not use the Primary Vindex columns in their WHERE clause. There is a price to pay: You incur an extra write to the lookup table for insert and delete operations, and an extra lookup for read operations. However, it is worth it if you do not want these high QPS queries to be sent to all shards. -* **Lookup Unique Unowned**: This category is used as an optimization as described in the Shared Vindexes section. -* **Lookup NonUnique Owned**: This gets used for high QPS queries on columns that are non-unique. -* **Lookup NonUnique Unowned**: You would rarely have to use this category because it is unlikely that you will be using a column as foreign key that is not unique within a shard. But it is theoretically possible. - -Of the above categories, `Functional Unique` and `Lookup Unique Unowned` Vindexes can be Primary. This is because those are the only ones that are unique and have the column to keyspace ID mapping pre-established. This is required because the Primary Vindex is responsible for assigning the keyspace ID for a row when it is created. - -However, it is generally not recommended to use a Lookup vindex as Primary because it is too slow for resharding. If absolutely unavoidable, you can use a Lookup Vindex as Primary. In such cases, it is recommended that you add a `keyspace ID` column to such tables. While resharding, Vitess can use that column to efficiently compute the target shard. You can even configure Vitess to auto-populate that column on inserts. This is done using the reverse map feature explained below. - -### How vindexes are used - -#### Cost - -Vindexes have costs. For routing a query, the Vindex with the lowest cost is chosen. The current costs are: - -Vindex Type | Cost ------------ | ---- -Indentity | 0 -Functional | 1 -Lookup Unique | 10 -Lookup NonUnique | 20 - -#### Select - -In the case of a simple select, Vitess scans the WHERE clause to match references to Vindex columns and chooses the best one to use. If there is no match and the query is simple without complex constructs like aggregates, etc, it is sent to all shards. - -Vitess can handle more complex queries. For now, you can refer to the [design doc](https://github.com/vitessio/vitess/blob/master/doc/V3HighLevelDesign.md) on how it handles them. - -#### Insert - -* The Primary Vindex is used to generate a keyspace ID. -* The keyspace ID is validated against the rest of the Vindexes on the table. There must exist a mapping from the column value to the keyspace ID. -* If a column value was not provided for a Vindex and the Vindex is capable of reverse mapping a keyspace ID to an input value, that function is used to auto-fill the column. If there is no reverse map, it is an error. - -#### Update - -The WHERE clause is used to route the update. Updating the value of a Vindex column is supported, but with a restriction: the change in the column value should not result in the row being moved from one shard to another. A workaround is to perform a delete followed by insert, which works as expected. - -#### Delete - -If the table owns lookup vindexes, then the rows to be deleted are first read and the associated Vindex entries are deleted. Following this, the query is routed according to the WHERE clause. - -### Predefined Vindexes - -Vitess provides the following predefined Vindexes: - -Name | Type | Description | Primary | Reversible | Cost ----- | ---- | ----------- | ------- | ---------- | ---- -binary | Functional Unique | Identity | Yes | Yes | 0 -binary_md5 | Functional Unique | md5 hash | Yes | No | 1 -hash | Functional Unique | 3DES null-key hash | Yes | Yes | 1 -lookup | Lookup NonUnique | Lookup table non-unique values | No | Yes | 20 -lookup_unique | Lookup Unique | Lookup table unique values | If unowned | Yes | 10 -numeric | Functional Unique | Identity | Yes | Yes | 0 -numeric_static_map | Functional Unique | A JSON file that maps input values to keyspace IDs | Yes | No | 1 -unicode_loose_md5 | Functional Unique | Case-insensitive (UCA level 1) md5 hash | Yes | No | 1 -reverse_bits | Functional Unique | Bit Reversal | Yes | Yes | 1 - -Custom vindexes can also be plugged in as needed. - -## Sequences - -Auto-increment columns do not work very well for sharded tables. [Vitess sequences]({% link user-guide/vitess-sequences.md %}) solve this problem. Sequence tables must be specified in the VSchema, and then tied to table columns. At the time of insert, if no value is specified for such a column, VTGate will generate a number for it using the sequence table. - -## VSchema - -As mentioned in the beginning of the document, a VSchema is needed to tie together all the databases that Vitess manages. For a very trivial setup where there is only one unsharded keyspace, there is no need to specify a VSchema because Vitess will know that there is no other place to route a query. - -If you have multiple unsharded keyspaces, you can still avoid defining a VSchema in one of two ways: - -1. Connect to a keyspace and all queries are sent to it. -2. Connect to Vitess without specifying a keyspace, but use qualified names for tables, like `keyspace.table` in your queries. - -However, once the setup exceeds the above complexity, VSchemas become a necessity. Vitess has a [working demo](https://github.com/vitessio/vitess/tree/master/examples/demo) of VSchemas. This section documents the various features highlighted with snippets pulled from the demo. - -### Unsharded Table - -The following snippets show the necessary configs for creating a table in an unsharded keyspace: - -Schema: - -``` sql -# lookup keyspace -create table name_user_idx(name varchar(128), user_id bigint, primary key(name, user_id)); -``` - -VSchema: - -``` json -// lookup keyspace -{ - "sharded": false, - "tables": { - "name_user_idx": {} - } -} -``` - -For a normal unsharded table, the VSchema only needs to know the table name. No additional metadata is needed. - -### Sharded Table With Simple Primary Vindex - -To create a sharded table with a simple Primary Vindex, the VSchema requires more information: - -Schema: - -``` sql -# user keyspace -create table user(user_id bigint, name varchar(128), primary key(user_id)); -``` - -VSchema: - -``` json -// user keyspace -{ - "sharded": true, - "vindexes": { - "hash": { - "type": "hash" - }, - "tables": { - "user": { - "column_vindexes": [ - { - "column": "user_id", - "name": "hash" - } - ] - } - } -} -``` - -Because Vindexes can be shared, the JSON requires them to be specified in a separate `vindexes` section, and then referenced by name from the `tables` section. The VSchema above simply states that `user_id` uses `hash` as Primary Vindex. The first Vindex of every table must be the Primary Vindex. - -### Specifying A Sequence - -Since user is a sharded table, it will be beneficial to tie it to a Sequence. However, the sequence must be defined in the lookup (unsharded) keyspace. It is then referred from the user (sharded) keyspace. In this example, we are designating the user_id (Primary Vindex) column as the auto-increment. - -Schema: - -``` sql -# lookup keyspace -create table user_seq(id int, next_id bigint, cache bigint, primary key(id)) comment 'vitess_sequence'; -insert into user_seq(id, next_id, cache) values(0, 1, 3); -``` - -For the sequence table, `id` is always 0. `next_id` starts off as 1, and the cache is usually a medium-sized number like 1000. In our example, we are using a small number to showcase how it works. - -VSchema: - -``` json -// lookup keyspace -{ - "sharded": false, - "tables": { - "user_seq": { - "type": "sequence" - } - } -} - -// user keyspace -{ - "sharded": true, - "vindexes": { - "hash": { - "type": "hash" - }, - "tables": { - "user": { - "column_vindexes": [ - { - "column": "user_id", - "name": "hash" - } - ], - "auto_increment": { - "column": "user_id", - "sequence": "user_seq" - } - } - } -} -``` - -### Specifying A Secondary Vindex - -The following snippet shows how to configure a Secondary Vindex that is backed by a lookup table. In this case, the lookup table is configured to be in the unsharded lookup keyspace: - -Schema: - -``` sql -# lookup keyspace -create table name_user_idx(name varchar(128), user_id bigint, primary key(name, user_id)); -``` - -VSchema: - -``` json -// lookup keyspace -{ - "sharded": false, - "tables": { - "name_user_idx": {} - } -} - -// user keyspace -{ - "sharded": true, - "vindexes": { - "name_user_idx": { - "type": "lookup_hash", - "params": { - "table": "name_user_idx", - "from": "name", - "to": "user_id" - }, - "owner": "user" - }, - "tables": { - "user": { - "column_vindexes": [ - { - "column": "name", - "name": "name_user_idx" - } - ] - } - } -} -``` - -To recap, a checklist for creating the shared Secondary Vindex is: - -* Create physical `name_user_idx` table in lookup database. -* Define a routing for it in the lookup VSchema. -* Define a Vindex as type `lookup_hash` that points to it. Ensure that the `params` match the table name and columns. -* Define the owner for the Vindex as the `user` table. -* Specify that `name` uses the Vindex. - -Currently, these steps have to be currently performed manually. However, extended DDLs backed by improved automation will simplify these tasks in the future. - -### Advanced usage - -The examples/demo also shows more tricks you can perform: - -* The `music` table uses a secondary lookup vindex `music_user_idx`. However, this lookup vindex is itself a sharded table. -* `music_extra` shares `music_user_idx` with `music`, and uses it as Primary Vindex. -* `music_extra` defines an additional Functional Vindex called `keyspace_id` which the demo auto-populates using the reverse mapping capability. -* There is also a `name_info` table that showcases a case-insensitive Vindex `unicode_loose_md5`. - -## Roadmap - -VSchema is still evolving. Features are mostly added on demand. The following features are currently on our roadmap: - -* DDL support -* Lookup Vindex backfill -* Pinned tables: This feature will allow unsharded tables to be pinned to a keyspace id. This avoids the need for a separate unsharded keyspace to contain them. diff --git a/doc/VTTabletModes.md b/doc/VTTabletModes.md deleted file mode 100644 index 1d37efdf4af..00000000000 --- a/doc/VTTabletModes.md +++ /dev/null @@ -1,107 +0,0 @@ -VTTablet can be configured to control mysql at many levels. At the level with the most control, vttablet can perform backups and restores, respond to reparenting commands coming through vtctld, automatically fix replication, and enforce semi-sync settings. - -At the level with the least control, vttablet just sends the application’s queries to mysql. The level of desired control is achieved through various command line arguments, explained below. - -## Managed MySQL - -In the mode with the highest control, VTTablet can take backups. It can also automatically restore from an existing backup to prime a new replica. For this mode, vttablet needs to run on the same host as mysql, and must be given access to mysql's my.cnf file. Additionally, the flags must not contain any connectivity parameters like `-db_host` or `-db_socket`; VTTablet will fetch the socket information from my.cnf and use that to connect to the local mysql. - -It will also load other information from the my.cnf, like the location of data files, etc. When it receives a request to take a backup, it will shut down mysql, copy the mysql data files to the backup store, and restart mysql. - -The my.cnf file can be specified the following ways: - -* Implicit: If mysql was initialized by the `mysqlctl` tool, then vttablet can find it based on just the `-tablet-path`. The default location for this file is `$VTDATAROOT/vt_/my.cnf`. -* `-mycnf-file`: This option can be used if the file is not present in the default location. -* `-my_cnf_server_id` and other flags: You can specify all values of the my.cnf file from the command line, and vttablet will behave as it read this information from a physical file. - -Specifying a `-db_host` or a `-db_socket` parameter will cause vttablet to skip the loading of the my.cnf file, and will disable its ability to perform backups or restores. - -### -restore\_from\_backup - -The default value for this flag is false. If set to true, and the my.cnf file was successfully loaded, then vttablet can perform automatic restores as follows: - -If started against a mysql instance that has no data files, it will search the list of backups for the latest one, and initiate a restore. After this, it will point the mysql to the current master and wait for replication to catch up. Once replication is caught up to the specified tolerance limit, it will advertise itself as serving. This will cause the vtgates to add it to the list of healthy tablets to serve queries from. - -If this flag is true, but my.cnf was not loaded, then vttablet will fatally exit with an error message. - -You can additionally control the level of concurrency for a restore with the `-restore_concurrency` flag. This is typically useful in cloud environments to prevent the restore process from becoming a 'noisy' neighbor by consuming all available disk IOPS. - -## Unmanaged or remote MySQL - -You can start a vttablet against a remote mysql by simply specifying the connection parameters `-db_host` and `-db_port` on the command line. In this mode, backup and restore operations will be disabled. If you start vttablet against a local mysql, you can specify a `-db_socket` instead, which will still make vttablet treat mysql as if it was remote. - -Specifically, the absence of a my.cnf file indicates to vttablet that it's connecting to a remote MySQL. - -## Partially managed MySQL - -Even if a MySQL is remote, you can still make vttablet perform some management functions. They are as follows: - -* `-disable_active_reparents`: If this flag is set, then any reparent or slave commands will not be allowed. These are InitShardMaster, PlannedReparent, EmergencyReparent, and ReparentTablet. In this mode, you should use the TabletExternallyReparented command to inform vitess of the current master. -* `-master_connect_retry`: This value is give to mysql when it connects a slave to the master as the retry duration parameter. -* `-enable_replication_reporter`: If this flag is set, then vttablet will transmit replica lag related information to the vtgates, which will allow it to balance load better. Additionally, enabling this will also cause vttablet to restart replication if it was stopped. However, it will do this only if -disable_active_reparents was not turned on. -* `-enable_semi_sync`: This option will automatically enable semi-sync on new replicas as well as on any tablet that transitions into a replica type. This includes the demotion of a master to a replica. -* `-heatbeat_enable` and `-heartbeat_interval_duration`: cause vttablet to write heartbeats to the sidecar database. This information is also used by the replication reporter to assess replica lag. - -## Typical vttablet command line flags - -### Minimal vttablet to enable query serving -``` -$TOPOLOGY_FLAGS --tablet-path $alias --init_keyspace $keyspace --init_shard $shard --init_tablet_type $tablet_type --port $port --grpc_port $grpc_port --service_map 'grpc-queryservice,grpc-tabletmanager,grpc-updatestream' -``` - -$alias needs to be of the form: `-id`, and the cell should match one of the local cells that was created in the topology. The id can be left padded with zeroes: `cell-100` and `cell-000000100` are synonymous. - -Example TOPOLOGY\_FLAGS for a lockserver like zookeeper: - -`-topo_implementation zk2 -topo_global_server_address localhost:21811,localhost:21812,localhost:21813 -topo_global_root /vitess/global` - -### Additional parameters to enable cluster management -``` --enable_semi_sync --enable_replication_reporter --backup_storage_implementation file --file_backup_storage_root $BACKUP_MOUNT --restore_from_backup --vtctld_addr http://$hostname:$vtctld_web_port/ -``` - -### Additional parameters for running in prod - -``` --queryserver-config-pool-size 24 --queryserver-config-stream-pool-size 24 --queryserver-config-transaction-cap 300 -``` - -More tuning parameters are available, but the above overrides are definitely needed for serving reasonable production traffic. - -### Connecting vttablet to an already running mysql - -``` --db_host $MYSQL_HOST --db_port $MYSQL_PORT --db_app_user $USER --db_app_password $PASSWORD -``` - -### Additional user credentials that need to be supplied for performing various operations - -``` --db_allprivs_user --db_allprivs_password --db_appdebug_user --db_appdebug_password --db_dba_user --db_dba_password --db_filtered_user --db_filtered_password -``` - -Other flags exist for finer control. diff --git a/doc/VTTabletModules.png b/doc/VTTabletModules.png deleted file mode 100644 index 7d107faa703..00000000000 Binary files a/doc/VTTabletModules.png and /dev/null differ diff --git a/doc/VitessApi.md b/doc/VitessApi.md deleted file mode 100644 index 61b5ffc10a9..00000000000 --- a/doc/VitessApi.md +++ /dev/null @@ -1,1033 +0,0 @@ -This document describes Vitess API methods that enable your client application to more easily talk to your storage system to query data. API methods are grouped into the following categories: - -* [Range-based Sharding](#range-based-sharding) -* [Transactions](#transactions) -* [Custom Sharding](#custom-sharding) -* [Map Reduce](#map-reduce) -* [Topology](#topology) -* [v3 API (alpha)](#v3-api-(alpha)) - - -The following table lists the methods in each group and links to more detail about each method: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Range-based Sharding
ExecuteBatchKeyspaceIdsExecuteBatchKeyspaceIds executes the list of queries based on the specified keyspace ids.
ExecuteEntityIdsExecuteEntityIds executes the query based on the specified external id to keyspace id map.
ExecuteKeyRangesExecuteKeyRanges executes the query based on the specified key ranges.
ExecuteKeyspaceIdsExecuteKeyspaceIds executes the query based on the specified keyspace ids.
StreamExecuteKeyRangesStreamExecuteKeyRanges executes a streaming query based on key ranges. Use this method if the query returns a large number of rows.
StreamExecuteKeyspaceIdsStreamExecuteKeyspaceIds executes a streaming query based on keyspace ids. Use this method if the query returns a large number of rows.
Transactions
BeginBegin a transaction.
CommitCommit a transaction.
ResolveTransactionResolveTransaction resolves a transaction.
RollbackRollback a transaction.
Custom Sharding
ExecuteBatchShardsExecuteBatchShards executes the list of queries on the specified shards.
ExecuteShardsExecuteShards executes the query on the specified shards.
StreamExecuteShardsStreamExecuteShards executes a streaming query based on shards. Use this method if the query returns a large number of rows.
Map Reduce
SplitQuerySplit a query into non-overlapping sub queries
Topology
GetSrvKeyspaceGetSrvKeyspace returns a SrvKeyspace object (as seen by this vtgate). This method is provided as a convenient way for clients to take a look at the sharding configuration for a Keyspace. Looking at the sharding information should not be used for routing queries (as the information may change, use the Execute calls for that). It is convenient for monitoring applications for instance, or if using custom sharding.
v3 API (alpha)
ExecuteExecute tries to route the query to the right shard. It depends on the query and bind variables to provide enough information in conjunction with the vindexes to route the query.
StreamExecuteStreamExecute executes a streaming query based on shards. It depends on the query and bind variables to provide enough information in conjunction with the vindexes to route the query. Use this method if the query returns a large number of rows.
-##Range-based Sharding -### ExecuteBatchKeyspaceIds - -ExecuteBatchKeyspaceIds executes the list of queries based on the specified keyspace ids. - -#### Request - - ExecuteBatchKeyspaceIdsRequest is the payload to ExecuteBatchKeyspaceId. - -##### Parameters - -| Name |Description | -| :-------- | :-------- -| caller_id
[vtrpc.CallerID](#vtrpc.callerid)| CallerID is passed along RPCs to identify the originating client for a request. It is not meant to be secure, but only informational. The client can put whatever info they want in these fields, and they will be trusted by the servers. The fields will just be used for logging purposes, and to easily find a client. VtGate propagates it to VtTablet, and VtTablet may use this information for monitoring purposes, to display on dashboards, or for blacklisting purposes. | -| session
[Session](#session)| Session objects are session cookies and are invalidated on use. Query results will contain updated session values. Their content should be opaque to the user. | -| queries
list <[BoundKeyspaceIdQuery](#boundkeyspaceidquery)>| BoundKeyspaceIdQuery represents a single query request for the specified list of keyspace ids. This is used in a list for ExecuteBatchKeyspaceIdsRequest. | -| tablet_type
[topodata.TabletType](#topodata.tablettype)| TabletType represents the type of a given tablet. | -| as_transaction
bool| as_transaction will execute the queries in this batch in a single transaction per shard, created for this purpose. (this can be seen as adding a 'begin' before and 'commit' after the queries). Only makes sense if tablet_type is master. If set, the Session is ignored. | -| options
[query.ExecuteOptions](#query.executeoptions)| ExecuteOptions is passed around for all Execute calls. | - -#### Response - - ExecuteBatchKeyspaceIdsResponse is the returned value from ExecuteBatchKeyspaceId. - -##### Properties - -| Name |Description | -| :-------- | :-------- -| error
[vtrpc.RPCError](#vtrpc.rpcerror)| RPCError is an application-level error structure returned by VtTablet (and passed along by VtGate if appropriate). We use this so the clients don't have to parse the error messages, but instead can depend on the value of the code. | -| session
[Session](#session)| Session objects are session cookies and are invalidated on use. Query results will contain updated session values. Their content should be opaque to the user. | -| results
list <[query.QueryResult](#query.queryresult)>| QueryResult is returned by Execute and ExecuteStream. As returned by Execute, len(fields) is always equal to len(row) (for each row in rows). As returned by StreamExecute, the first QueryResult has the fields set, and subsequent QueryResult have rows set. And as Execute, len(QueryResult[0].fields) is always equal to len(row) (for each row in rows for each QueryResult in QueryResult[1:]). | - -### ExecuteEntityIds - -ExecuteEntityIds executes the query based on the specified external id to keyspace id map. - -#### Request - - ExecuteEntityIdsRequest is the payload to ExecuteEntityIds. - -##### Parameters - -| Name |Description | -| :-------- | :-------- -| caller_id
[vtrpc.CallerID](#vtrpc.callerid)| CallerID is passed along RPCs to identify the originating client for a request. It is not meant to be secure, but only informational. The client can put whatever info they want in these fields, and they will be trusted by the servers. The fields will just be used for logging purposes, and to easily find a client. VtGate propagates it to VtTablet, and VtTablet may use this information for monitoring purposes, to display on dashboards, or for blacklisting purposes. | -| session
[Session](#session)| Session objects are session cookies and are invalidated on use. Query results will contain updated session values. Their content should be opaque to the user. | -| query
[query.BoundQuery](#query.boundquery)| BoundQuery is a query with its bind variables | -| keyspace
string| keyspace to target the query to. | -| entity_column_name
string| entity_column_name is the column name to use. | -| entity_keyspace_ids
list <[EntityId](#executeentityidsrequest.entityid)>| entity_keyspace_ids are pairs of entity_column_name values associated with its corresponding keyspace_id. | -| tablet_type
[topodata.TabletType](#topodata.tablettype)| TabletType represents the type of a given tablet. | -| not_in_transaction
bool| not_in_transaction is deprecated and should not be used. | -| options
[query.ExecuteOptions](#query.executeoptions)| ExecuteOptions is passed around for all Execute calls. | - -#### Messages - -##### ExecuteEntityIdsRequest.EntityId - -Properties - -| Name |Description | -| :-------- | :-------- -| type
[query.Type](#query.type)| Type defines the various supported data types in bind vars and query results. | -| value
bytes| value is the value for the entity. Not set if type is NULL_TYPE. | -| keyspace_id
bytes| keyspace_id is the associated keyspace_id for the entity. | - -#### Response - - ExecuteEntityIdsResponse is the returned value from ExecuteEntityIds. - -##### Properties - -| Name |Description | -| :-------- | :-------- -| error
[vtrpc.RPCError](#vtrpc.rpcerror)| RPCError is an application-level error structure returned by VtTablet (and passed along by VtGate if appropriate). We use this so the clients don't have to parse the error messages, but instead can depend on the value of the code. | -| session
[Session](#session)| Session objects are session cookies and are invalidated on use. Query results will contain updated session values. Their content should be opaque to the user. | -| result
[query.QueryResult](#query.queryresult)| QueryResult is returned by Execute and ExecuteStream. As returned by Execute, len(fields) is always equal to len(row) (for each row in rows). As returned by StreamExecute, the first QueryResult has the fields set, and subsequent QueryResult have rows set. And as Execute, len(QueryResult[0].fields) is always equal to len(row) (for each row in rows for each QueryResult in QueryResult[1:]). | - -### ExecuteKeyRanges - -ExecuteKeyRanges executes the query based on the specified key ranges. - -#### Request - - ExecuteKeyRangesRequest is the payload to ExecuteKeyRanges. - -##### Parameters - -| Name |Description | -| :-------- | :-------- -| caller_id
[vtrpc.CallerID](#vtrpc.callerid)| CallerID is passed along RPCs to identify the originating client for a request. It is not meant to be secure, but only informational. The client can put whatever info they want in these fields, and they will be trusted by the servers. The fields will just be used for logging purposes, and to easily find a client. VtGate propagates it to VtTablet, and VtTablet may use this information for monitoring purposes, to display on dashboards, or for blacklisting purposes. | -| session
[Session](#session)| Session objects are session cookies and are invalidated on use. Query results will contain updated session values. Their content should be opaque to the user. | -| query
[query.BoundQuery](#query.boundquery)| BoundQuery is a query with its bind variables | -| keyspace
string| keyspace to target the query to | -| key_ranges
list <[topodata.KeyRange](#topodata.keyrange)>| KeyRange describes a range of sharding keys, when range-based sharding is used. | -| tablet_type
[topodata.TabletType](#topodata.tablettype)| TabletType represents the type of a given tablet. | -| not_in_transaction
bool| not_in_transaction is deprecated and should not be used. | -| options
[query.ExecuteOptions](#query.executeoptions)| ExecuteOptions is passed around for all Execute calls. | - -#### Response - - ExecuteKeyRangesResponse is the returned value from ExecuteKeyRanges. - -##### Properties - -| Name |Description | -| :-------- | :-------- -| error
[vtrpc.RPCError](#vtrpc.rpcerror)| RPCError is an application-level error structure returned by VtTablet (and passed along by VtGate if appropriate). We use this so the clients don't have to parse the error messages, but instead can depend on the value of the code. | -| session
[Session](#session)| Session objects are session cookies and are invalidated on use. Query results will contain updated session values. Their content should be opaque to the user. | -| result
[query.QueryResult](#query.queryresult)| QueryResult is returned by Execute and ExecuteStream. As returned by Execute, len(fields) is always equal to len(row) (for each row in rows). As returned by StreamExecute, the first QueryResult has the fields set, and subsequent QueryResult have rows set. And as Execute, len(QueryResult[0].fields) is always equal to len(row) (for each row in rows for each QueryResult in QueryResult[1:]). | - -### ExecuteKeyspaceIds - -ExecuteKeyspaceIds executes the query based on the specified keyspace ids. - -#### Request - - ExecuteKeyspaceIdsRequest is the payload to ExecuteKeyspaceIds. - -##### Parameters - -| Name |Description | -| :-------- | :-------- -| caller_id
[vtrpc.CallerID](#vtrpc.callerid)| CallerID is passed along RPCs to identify the originating client for a request. It is not meant to be secure, but only informational. The client can put whatever info they want in these fields, and they will be trusted by the servers. The fields will just be used for logging purposes, and to easily find a client. VtGate propagates it to VtTablet, and VtTablet may use this information for monitoring purposes, to display on dashboards, or for blacklisting purposes. | -| session
[Session](#session)| Session objects are session cookies and are invalidated on use. Query results will contain updated session values. Their content should be opaque to the user. | -| query
[query.BoundQuery](#query.boundquery)| BoundQuery is a query with its bind variables | -| keyspace
string| keyspace to target the query to. | -| keyspace_ids
list <bytes>| keyspace_ids contains the list of keyspace_ids affected by this query. Will be used to find the shards to send the query to. | -| tablet_type
[topodata.TabletType](#topodata.tablettype)| TabletType represents the type of a given tablet. | -| not_in_transaction
bool| not_in_transaction is deprecated and should not be used. | -| options
[query.ExecuteOptions](#query.executeoptions)| ExecuteOptions is passed around for all Execute calls. | - -#### Response - - ExecuteKeyspaceIdsResponse is the returned value from ExecuteKeyspaceIds. - -##### Properties - -| Name |Description | -| :-------- | :-------- -| error
[vtrpc.RPCError](#vtrpc.rpcerror)| RPCError is an application-level error structure returned by VtTablet (and passed along by VtGate if appropriate). We use this so the clients don't have to parse the error messages, but instead can depend on the value of the code. | -| session
[Session](#session)| Session objects are session cookies and are invalidated on use. Query results will contain updated session values. Their content should be opaque to the user. | -| result
[query.QueryResult](#query.queryresult)| QueryResult is returned by Execute and ExecuteStream. As returned by Execute, len(fields) is always equal to len(row) (for each row in rows). As returned by StreamExecute, the first QueryResult has the fields set, and subsequent QueryResult have rows set. And as Execute, len(QueryResult[0].fields) is always equal to len(row) (for each row in rows for each QueryResult in QueryResult[1:]). | - -### StreamExecuteKeyRanges - -StreamExecuteKeyRanges executes a streaming query based on key ranges. Use this method if the query returns a large number of rows. - -#### Request - - StreamExecuteKeyRangesRequest is the payload to StreamExecuteKeyRanges. - -##### Parameters - -| Name |Description | -| :-------- | :-------- -| caller_id
[vtrpc.CallerID](#vtrpc.callerid)| CallerID is passed along RPCs to identify the originating client for a request. It is not meant to be secure, but only informational. The client can put whatever info they want in these fields, and they will be trusted by the servers. The fields will just be used for logging purposes, and to easily find a client. VtGate propagates it to VtTablet, and VtTablet may use this information for monitoring purposes, to display on dashboards, or for blacklisting purposes. | -| query
[query.BoundQuery](#query.boundquery)| BoundQuery is a query with its bind variables | -| keyspace
string| keyspace to target the query to. | -| key_ranges
list <[topodata.KeyRange](#topodata.keyrange)>| KeyRange describes a range of sharding keys, when range-based sharding is used. | -| tablet_type
[topodata.TabletType](#topodata.tablettype)| TabletType represents the type of a given tablet. | -| options
[query.ExecuteOptions](#query.executeoptions)| ExecuteOptions is passed around for all Execute calls. | - -#### Response - - StreamExecuteKeyRangesResponse is the returned value from StreamExecuteKeyRanges. - -##### Properties - -| Name |Description | -| :-------- | :-------- -| result
[query.QueryResult](#query.queryresult)| QueryResult is returned by Execute and ExecuteStream. As returned by Execute, len(fields) is always equal to len(row) (for each row in rows). As returned by StreamExecute, the first QueryResult has the fields set, and subsequent QueryResult have rows set. And as Execute, len(QueryResult[0].fields) is always equal to len(row) (for each row in rows for each QueryResult in QueryResult[1:]). | - -### StreamExecuteKeyspaceIds - -StreamExecuteKeyspaceIds executes a streaming query based on keyspace ids. Use this method if the query returns a large number of rows. - -#### Request - - StreamExecuteKeyspaceIdsRequest is the payload to StreamExecuteKeyspaceIds. - -##### Parameters - -| Name |Description | -| :-------- | :-------- -| caller_id
[vtrpc.CallerID](#vtrpc.callerid)| CallerID is passed along RPCs to identify the originating client for a request. It is not meant to be secure, but only informational. The client can put whatever info they want in these fields, and they will be trusted by the servers. The fields will just be used for logging purposes, and to easily find a client. VtGate propagates it to VtTablet, and VtTablet may use this information for monitoring purposes, to display on dashboards, or for blacklisting purposes. | -| query
[query.BoundQuery](#query.boundquery)| BoundQuery is a query with its bind variables | -| keyspace
string| keyspace to target the query to. | -| keyspace_ids
list <bytes>| keyspace_ids contains the list of keyspace_ids affected by this query. Will be used to find the shards to send the query to. | -| tablet_type
[topodata.TabletType](#topodata.tablettype)| TabletType represents the type of a given tablet. | -| options
[query.ExecuteOptions](#query.executeoptions)| ExecuteOptions is passed around for all Execute calls. | - -#### Response - - StreamExecuteKeyspaceIdsResponse is the returned value from StreamExecuteKeyspaceIds. - -##### Properties - -| Name |Description | -| :-------- | :-------- -| result
[query.QueryResult](#query.queryresult)| QueryResult is returned by Execute and ExecuteStream. As returned by Execute, len(fields) is always equal to len(row) (for each row in rows). As returned by StreamExecute, the first QueryResult has the fields set, and subsequent QueryResult have rows set. And as Execute, len(QueryResult[0].fields) is always equal to len(row) (for each row in rows for each QueryResult in QueryResult[1:]). | - -##Transactions -### Begin - -Begin a transaction. - -#### Request - - BeginRequest is the payload to Begin. - -##### Parameters - -| Name |Description | -| :-------- | :-------- -| caller_id
[vtrpc.CallerID](#vtrpc.callerid)| CallerID is passed along RPCs to identify the originating client for a request. It is not meant to be secure, but only informational. The client can put whatever info they want in these fields, and they will be trusted by the servers. The fields will just be used for logging purposes, and to easily find a client. VtGate propagates it to VtTablet, and VtTablet may use this information for monitoring purposes, to display on dashboards, or for blacklisting purposes. | -| single_db
bool| single_db specifies if the transaction should be restricted to a single database. | - -#### Response - - BeginResponse is the returned value from Begin. - -##### Properties - -| Name |Description | -| :-------- | :-------- -| session
[Session](#session)| Session objects are session cookies and are invalidated on use. Query results will contain updated session values. Their content should be opaque to the user. | - -### Commit - -Commit a transaction. - -#### Request - - CommitRequest is the payload to Commit. - -##### Parameters - -| Name |Description | -| :-------- | :-------- -| caller_id
[vtrpc.CallerID](#vtrpc.callerid)| CallerID is passed along RPCs to identify the originating client for a request. It is not meant to be secure, but only informational. The client can put whatever info they want in these fields, and they will be trusted by the servers. The fields will just be used for logging purposes, and to easily find a client. VtGate propagates it to VtTablet, and VtTablet may use this information for monitoring purposes, to display on dashboards, or for blacklisting purposes. | -| session
[Session](#session)| Session objects are session cookies and are invalidated on use. Query results will contain updated session values. Their content should be opaque to the user. | -| atomic
bool| atomic specifies if the commit should go through the 2PC workflow to ensure atomicity. | - -#### Response - - CommitResponse is the returned value from Commit. - -##### Properties - -| Name |Description | -| :-------- | :-------- - -### ResolveTransaction - -ResolveTransaction resolves a transaction. - -#### Request - - ResolveTransactionRequest is the payload to ResolveTransaction. - -##### Parameters - -| Name |Description | -| :-------- | :-------- -| caller_id
[vtrpc.CallerID](#vtrpc.callerid)| CallerID is passed along RPCs to identify the originating client for a request. It is not meant to be secure, but only informational. The client can put whatever info they want in these fields, and they will be trusted by the servers. The fields will just be used for logging purposes, and to easily find a client. VtGate propagates it to VtTablet, and VtTablet may use this information for monitoring purposes, to display on dashboards, or for blacklisting purposes. | -| dtid
string| dtid is the dtid of the transaction to be resolved. | - -#### Response - - ResolveTransactionResponse is the returned value from Rollback. - -##### Properties - -| Name |Description | -| :-------- | :-------- - -### Rollback - -Rollback a transaction. - -#### Request - - RollbackRequest is the payload to Rollback. - -##### Parameters - -| Name |Description | -| :-------- | :-------- -| caller_id
[vtrpc.CallerID](#vtrpc.callerid)| CallerID is passed along RPCs to identify the originating client for a request. It is not meant to be secure, but only informational. The client can put whatever info they want in these fields, and they will be trusted by the servers. The fields will just be used for logging purposes, and to easily find a client. VtGate propagates it to VtTablet, and VtTablet may use this information for monitoring purposes, to display on dashboards, or for blacklisting purposes. | -| session
[Session](#session)| Session objects are session cookies and are invalidated on use. Query results will contain updated session values. Their content should be opaque to the user. | - -#### Response - - RollbackResponse is the returned value from Rollback. - -##### Properties - -| Name |Description | -| :-------- | :-------- - -##Custom Sharding -### ExecuteBatchShards - -ExecuteBatchShards executes the list of queries on the specified shards. - -#### Request - - ExecuteBatchShardsRequest is the payload to ExecuteBatchShards - -##### Parameters - -| Name |Description | -| :-------- | :-------- -| caller_id
[vtrpc.CallerID](#vtrpc.callerid)| CallerID is passed along RPCs to identify the originating client for a request. It is not meant to be secure, but only informational. The client can put whatever info they want in these fields, and they will be trusted by the servers. The fields will just be used for logging purposes, and to easily find a client. VtGate propagates it to VtTablet, and VtTablet may use this information for monitoring purposes, to display on dashboards, or for blacklisting purposes. | -| session
[Session](#session)| Session objects are session cookies and are invalidated on use. Query results will contain updated session values. Their content should be opaque to the user. | -| queries
list <[BoundShardQuery](#boundshardquery)>| BoundShardQuery represents a single query request for the specified list of shards. This is used in a list for ExecuteBatchShardsRequest. | -| tablet_type
[topodata.TabletType](#topodata.tablettype)| TabletType represents the type of a given tablet. | -| as_transaction
bool| as_transaction will execute the queries in this batch in a single transaction per shard, created for this purpose. (this can be seen as adding a 'begin' before and 'commit' after the queries). Only makes sense if tablet_type is master. If set, the Session is ignored. | -| options
[query.ExecuteOptions](#query.executeoptions)| ExecuteOptions is passed around for all Execute calls. | - -#### Response - - ExecuteBatchShardsResponse is the returned value from ExecuteBatchShards. - -##### Properties - -| Name |Description | -| :-------- | :-------- -| error
[vtrpc.RPCError](#vtrpc.rpcerror)| RPCError is an application-level error structure returned by VtTablet (and passed along by VtGate if appropriate). We use this so the clients don't have to parse the error messages, but instead can depend on the value of the code. | -| session
[Session](#session)| Session objects are session cookies and are invalidated on use. Query results will contain updated session values. Their content should be opaque to the user. | -| results
list <[query.QueryResult](#query.queryresult)>| QueryResult is returned by Execute and ExecuteStream. As returned by Execute, len(fields) is always equal to len(row) (for each row in rows). As returned by StreamExecute, the first QueryResult has the fields set, and subsequent QueryResult have rows set. And as Execute, len(QueryResult[0].fields) is always equal to len(row) (for each row in rows for each QueryResult in QueryResult[1:]). | - -### ExecuteShards - -ExecuteShards executes the query on the specified shards. - -#### Request - - ExecuteShardsRequest is the payload to ExecuteShards. - -##### Parameters - -| Name |Description | -| :-------- | :-------- -| caller_id
[vtrpc.CallerID](#vtrpc.callerid)| CallerID is passed along RPCs to identify the originating client for a request. It is not meant to be secure, but only informational. The client can put whatever info they want in these fields, and they will be trusted by the servers. The fields will just be used for logging purposes, and to easily find a client. VtGate propagates it to VtTablet, and VtTablet may use this information for monitoring purposes, to display on dashboards, or for blacklisting purposes. | -| session
[Session](#session)| Session objects are session cookies and are invalidated on use. Query results will contain updated session values. Their content should be opaque to the user. | -| query
[query.BoundQuery](#query.boundquery)| BoundQuery is a query with its bind variables | -| keyspace
string| keyspace to target the query to. | -| shards
list <string>| shards to target the query to. A DML can only target one shard. | -| tablet_type
[topodata.TabletType](#topodata.tablettype)| TabletType represents the type of a given tablet. | -| not_in_transaction
bool| not_in_transaction is deprecated and should not be used. | -| options
[query.ExecuteOptions](#query.executeoptions)| ExecuteOptions is passed around for all Execute calls. | - -#### Response - - ExecuteShardsResponse is the returned value from ExecuteShards. - -##### Properties - -| Name |Description | -| :-------- | :-------- -| error
[vtrpc.RPCError](#vtrpc.rpcerror)| RPCError is an application-level error structure returned by VtTablet (and passed along by VtGate if appropriate). We use this so the clients don't have to parse the error messages, but instead can depend on the value of the code. | -| session
[Session](#session)| Session objects are session cookies and are invalidated on use. Query results will contain updated session values. Their content should be opaque to the user. | -| result
[query.QueryResult](#query.queryresult)| QueryResult is returned by Execute and ExecuteStream. As returned by Execute, len(fields) is always equal to len(row) (for each row in rows). As returned by StreamExecute, the first QueryResult has the fields set, and subsequent QueryResult have rows set. And as Execute, len(QueryResult[0].fields) is always equal to len(row) (for each row in rows for each QueryResult in QueryResult[1:]). | - -### StreamExecuteShards - -StreamExecuteShards executes a streaming query based on shards. Use this method if the query returns a large number of rows. - -#### Request - - StreamExecuteShardsRequest is the payload to StreamExecuteShards. - -##### Parameters - -| Name |Description | -| :-------- | :-------- -| caller_id
[vtrpc.CallerID](#vtrpc.callerid)| CallerID is passed along RPCs to identify the originating client for a request. It is not meant to be secure, but only informational. The client can put whatever info they want in these fields, and they will be trusted by the servers. The fields will just be used for logging purposes, and to easily find a client. VtGate propagates it to VtTablet, and VtTablet may use this information for monitoring purposes, to display on dashboards, or for blacklisting purposes. | -| query
[query.BoundQuery](#query.boundquery)| BoundQuery is a query with its bind variables | -| keyspace
string| keyspace to target the query to. | -| shards
list <string>| shards to target the query to. | -| tablet_type
[topodata.TabletType](#topodata.tablettype)| TabletType represents the type of a given tablet. | -| options
[query.ExecuteOptions](#query.executeoptions)| ExecuteOptions is passed around for all Execute calls. | - -#### Response - - StreamExecuteShardsResponse is the returned value from StreamExecuteShards. - -##### Properties - -| Name |Description | -| :-------- | :-------- -| result
[query.QueryResult](#query.queryresult)| QueryResult is returned by Execute and ExecuteStream. As returned by Execute, len(fields) is always equal to len(row) (for each row in rows). As returned by StreamExecute, the first QueryResult has the fields set, and subsequent QueryResult have rows set. And as Execute, len(QueryResult[0].fields) is always equal to len(row) (for each row in rows for each QueryResult in QueryResult[1:]). | - -##Map Reduce -### SplitQuery - -Split a query into non-overlapping sub queries - -#### Request - - SplitQueryRequest is the payload to SplitQuery. SplitQuery takes a "SELECT" query and generates a list of queries called "query-parts". Each query-part consists of the original query with an added WHERE clause that restricts the query-part to operate only on rows whose values in the columns listed in the "split_column" field of the request (see below) are in a particular range. It is guaranteed that the set of rows obtained from executing each query-part on a database snapshot and merging (without deduping) the results is equal to the set of rows obtained from executing the original query on the same snapshot with the rows containing NULL values in any of the split_column's excluded. This is typically called by the MapReduce master when reading from Vitess. There it's desirable that the sets of rows returned by the query-parts have roughly the same size. - -##### Parameters - -| Name |Description | -| :-------- | :-------- -| caller_id
[vtrpc.CallerID](#vtrpc.callerid)| CallerID is passed along RPCs to identify the originating client for a request. It is not meant to be secure, but only informational. The client can put whatever info they want in these fields, and they will be trusted by the servers. The fields will just be used for logging purposes, and to easily find a client. VtGate propagates it to VtTablet, and VtTablet may use this information for monitoring purposes, to display on dashboards, or for blacklisting purposes. | -| keyspace
string| keyspace to target the query to. | -| query
[query.BoundQuery](#query.boundquery)| BoundQuery is a query with its bind variables | -| split_column
list <string>| Each generated query-part will be restricted to rows whose values in the columns listed in this field are in a particular range. The list of columns named here must be a prefix of the list of columns defining some index or primary key of the table referenced in 'query'. For many tables using the primary key columns (in order) is sufficient and this is the default if this field is omitted. See the comment on the 'algorithm' field for more restrictions and information. | -| split_count
int64| You can specify either an estimate of the number of query-parts to generate or an estimate of the number of rows each query-part should return. Thus, exactly one of split_count or num_rows_per_query_part should be nonzero. The non-given parameter is calculated from the given parameter using the formula: split_count * num_rows_per_query_pary = table_size, where table_size is an approximation of the number of rows in the table. Note that if "split_count" is given it is regarded as an estimate. The number of query-parts returned may differ slightly (in particular, if it's not a whole multiple of the number of vitess shards). | -| num_rows_per_query_part
int64| | -| algorithm
query.SplitQueryRequest.Algorithm| The algorithm to use to split the query. The split algorithm is performed on each database shard in parallel. The lists of query-parts generated by the shards are merged and returned to the caller. Two algorithms are supported: EQUAL_SPLITS If this algorithm is selected then only the first 'split_column' given is used (or the first primary key column if the 'split_column' field is empty). In the rest of this algorithm's description, we refer to this column as "the split column". The split column must have numeric type (integral or floating point). The algorithm works by taking the interval [min, max], where min and max are the minimum and maximum values of the split column in the table-shard, respectively, and partitioning it into 'split_count' sub-intervals of equal size. The added WHERE clause of each query-part restricts that part to rows whose value in the split column belongs to a particular sub-interval. This is fast, but requires that the distribution of values of the split column be uniform in [min, max] for the number of rows returned by each query part to be roughly the same. FULL_SCAN If this algorithm is used then the split_column must be the primary key columns (in order). This algorithm performs a full-scan of the table-shard referenced in 'query' to get "boundary" rows that are num_rows_per_query_part apart when the table is ordered by the columns listed in 'split_column'. It then restricts each query-part to the rows located between two successive boundary rows. This algorithm supports multiple split_column's of any type, but is slower than EQUAL_SPLITS. | -| use_split_query_v2
bool| Remove this field after this new server code is released to prod. We must keep it for now, so that clients can still send it to the old server code currently in production. | - -#### Response - - SplitQueryResponse is the returned value from SplitQuery. - -##### Properties - -| Name |Description | -| :-------- | :-------- -| splits
list <[Part](#splitqueryresponse.part)>| splits contains the queries to run to fetch the entire data set. | - -#### Messages - -##### SplitQueryResponse.KeyRangePart - -Properties - -| Name |Description | -| :-------- | :-------- -| keyspace
string| keyspace to target the query to. | -| key_ranges
list <[topodata.KeyRange](#topodata.keyrange)>| KeyRange describes a range of sharding keys, when range-based sharding is used. | - -##### SplitQueryResponse.Part - -Properties - -| Name |Description | -| :-------- | :-------- -| query
[query.BoundQuery](#query.boundquery)| BoundQuery is a query with its bind variables | -| key_range_part
[KeyRangePart](#splitqueryresponse.keyrangepart)| key_range_part is set if the query should be executed by ExecuteKeyRanges. | -| shard_part
[ShardPart](#splitqueryresponse.shardpart)| shard_part is set if the query should be executed by ExecuteShards. | -| size
int64| size is the approximate number of rows this query will return. | - -##### SplitQueryResponse.ShardPart - -Properties - -| Name |Description | -| :-------- | :-------- -| keyspace
string| keyspace to target the query to. | -| shards
list <string>| shards to target the query to. | - -##Topology -### GetSrvKeyspace - -GetSrvKeyspace returns a SrvKeyspace object (as seen by this vtgate). This method is provided as a convenient way for clients to take a look at the sharding configuration for a Keyspace. Looking at the sharding information should not be used for routing queries (as the information may change, use the Execute calls for that). It is convenient for monitoring applications for instance, or if using custom sharding. - -#### Request - - GetSrvKeyspaceRequest is the payload to GetSrvKeyspace. - -##### Parameters - -| Name |Description | -| :-------- | :-------- -| keyspace
string| keyspace name to fetch. | - -#### Response - - GetSrvKeyspaceResponse is the returned value from GetSrvKeyspace. - -##### Properties - -| Name |Description | -| :-------- | :-------- -| srv_keyspace
[topodata.SrvKeyspace](#topodata.srvkeyspace)| SrvKeyspace is a rollup node for the keyspace itself. | - -##v3 API (alpha) -### Execute - -Execute tries to route the query to the right shard. It depends on the query and bind variables to provide enough information in conjunction with the vindexes to route the query. - -#### Request - - ExecuteRequest is the payload to Execute. - -##### Parameters - -| Name |Description | -| :-------- | :-------- -| caller_id
[vtrpc.CallerID](#vtrpc.callerid)| CallerID is passed along RPCs to identify the originating client for a request. It is not meant to be secure, but only informational. The client can put whatever info they want in these fields, and they will be trusted by the servers. The fields will just be used for logging purposes, and to easily find a client. VtGate propagates it to VtTablet, and VtTablet may use this information for monitoring purposes, to display on dashboards, or for blacklisting purposes. | -| session
[Session](#session)| Session objects are session cookies and are invalidated on use. Query results will contain updated session values. Their content should be opaque to the user. | -| query
[query.BoundQuery](#query.boundquery)| BoundQuery is a query with its bind variables | -| tablet_type
[topodata.TabletType](#topodata.tablettype)| TabletType represents the type of a given tablet. | -| not_in_transaction
bool| not_in_transaction is deprecated and should not be used. | -| keyspace
string| keyspace to target the query to. | -| options
[query.ExecuteOptions](#query.executeoptions)| ExecuteOptions is passed around for all Execute calls. | - -#### Response - - ExecuteResponse is the returned value from Execute. - -##### Properties - -| Name |Description | -| :-------- | :-------- -| error
[vtrpc.RPCError](#vtrpc.rpcerror)| RPCError is an application-level error structure returned by VtTablet (and passed along by VtGate if appropriate). We use this so the clients don't have to parse the error messages, but instead can depend on the value of the code. | -| session
[Session](#session)| Session objects are session cookies and are invalidated on use. Query results will contain updated session values. Their content should be opaque to the user. | -| result
[query.QueryResult](#query.queryresult)| QueryResult is returned by Execute and ExecuteStream. As returned by Execute, len(fields) is always equal to len(row) (for each row in rows). As returned by StreamExecute, the first QueryResult has the fields set, and subsequent QueryResult have rows set. And as Execute, len(QueryResult[0].fields) is always equal to len(row) (for each row in rows for each QueryResult in QueryResult[1:]). | - -### StreamExecute - -StreamExecute executes a streaming query based on shards. It depends on the query and bind variables to provide enough information in conjunction with the vindexes to route the query. Use this method if the query returns a large number of rows. - -#### Request - - StreamExecuteRequest is the payload to StreamExecute. - -##### Parameters - -| Name |Description | -| :-------- | :-------- -| caller_id
[vtrpc.CallerID](#vtrpc.callerid)| CallerID is passed along RPCs to identify the originating client for a request. It is not meant to be secure, but only informational. The client can put whatever info they want in these fields, and they will be trusted by the servers. The fields will just be used for logging purposes, and to easily find a client. VtGate propagates it to VtTablet, and VtTablet may use this information for monitoring purposes, to display on dashboards, or for blacklisting purposes. | -| query
[query.BoundQuery](#query.boundquery)| BoundQuery is a query with its bind variables | -| tablet_type
[topodata.TabletType](#topodata.tablettype)| TabletType represents the type of a given tablet. | -| keyspace
string| keyspace to target the query to. | -| options
[query.ExecuteOptions](#query.executeoptions)| ExecuteOptions is passed around for all Execute calls. | - -#### Response - - StreamExecuteResponse is the returned value from StreamExecute. - -##### Properties - -| Name |Description | -| :-------- | :-------- -| result
[query.QueryResult](#query.queryresult)| QueryResult is returned by Execute and ExecuteStream. As returned by Execute, len(fields) is always equal to len(row) (for each row in rows). As returned by StreamExecute, the first QueryResult has the fields set, and subsequent QueryResult have rows set. And as Execute, len(QueryResult[0].fields) is always equal to len(row) (for each row in rows for each QueryResult in QueryResult[1:]). | - -## Enums - -### query.Type - - Type defines the various supported data types in bind vars and query results. - -| Name |Value |Description | -| :-------- | :-------- | :-------- -| NULL_TYPE | 0 | NULL_TYPE specifies a NULL type. | -| INT8 | 257 | INT8 specifies a TINYINT type. Properties: 1, IsNumber. | -| UINT8 | 770 | UINT8 specifies a TINYINT UNSIGNED type. Properties: 2, IsNumber, IsUnsigned. | -| INT16 | 259 | INT16 specifies a SMALLINT type. Properties: 3, IsNumber. | -| UINT16 | 772 | UINT16 specifies a SMALLINT UNSIGNED type. Properties: 4, IsNumber, IsUnsigned. | -| INT24 | 261 | INT24 specifies a MEDIUMINT type. Properties: 5, IsNumber. | -| UINT24 | 774 | UINT24 specifies a MEDIUMINT UNSIGNED type. Properties: 6, IsNumber, IsUnsigned. | -| INT32 | 263 | INT32 specifies a INTEGER type. Properties: 7, IsNumber. | -| UINT32 | 776 | UINT32 specifies a INTEGER UNSIGNED type. Properties: 8, IsNumber, IsUnsigned. | -| INT64 | 265 | INT64 specifies a BIGINT type. Properties: 9, IsNumber. | -| UINT64 | 778 | UINT64 specifies a BIGINT UNSIGNED type. Properties: 10, IsNumber, IsUnsigned. | -| FLOAT32 | 1035 | FLOAT32 specifies a FLOAT type. Properties: 11, IsFloat. | -| FLOAT64 | 1036 | FLOAT64 specifies a DOUBLE or REAL type. Properties: 12, IsFloat. | -| TIMESTAMP | 2061 | TIMESTAMP specifies a TIMESTAMP type. Properties: 13, IsQuoted. | -| DATE | 2062 | DATE specifies a DATE type. Properties: 14, IsQuoted. | -| TIME | 2063 | TIME specifies a TIME type. Properties: 15, IsQuoted. | -| DATETIME | 2064 | DATETIME specifies a DATETIME type. Properties: 16, IsQuoted. | -| YEAR | 785 | YEAR specifies a YEAR type. Properties: 17, IsNumber, IsUnsigned. | -| DECIMAL | 18 | DECIMAL specifies a DECIMAL or NUMERIC type. Properties: 18, None. | -| TEXT | 6163 | TEXT specifies a TEXT type. Properties: 19, IsQuoted, IsText. | -| BLOB | 10260 | BLOB specifies a BLOB type. Properties: 20, IsQuoted, IsBinary. | -| VARCHAR | 6165 | VARCHAR specifies a VARCHAR type. Properties: 21, IsQuoted, IsText. | -| VARBINARY | 10262 | VARBINARY specifies a VARBINARY type. Properties: 22, IsQuoted, IsBinary. | -| CHAR | 6167 | CHAR specifies a CHAR type. Properties: 23, IsQuoted, IsText. | -| BINARY | 10264 | BINARY specifies a BINARY type. Properties: 24, IsQuoted, IsBinary. | -| BIT | 2073 | BIT specifies a BIT type. Properties: 25, IsQuoted. | -| ENUM | 2074 | ENUM specifies an ENUM type. Properties: 26, IsQuoted. | -| SET | 2075 | SET specifies a SET type. Properties: 27, IsQuoted. | -| TUPLE | 28 | TUPLE specifies a tuple. This cannot be returned in a QueryResult, but it can be sent as a bind var. Properties: 28, None. | -| GEOMETRY | 2077 | GEOMETRY specifies a GEOMETRY type. Properties: 29, IsQuoted. | -| JSON | 2078 | JSON specified a JSON type. Properties: 30, IsQuoted. | - -### topodata.KeyspaceIdType - - KeyspaceIdType describes the type of the sharding key for a range-based sharded keyspace. - -| Name |Value |Description | -| :-------- | :-------- | :-------- -| UNSET | 0 | UNSET is the default value, when range-based sharding is not used. | -| UINT64 | 1 | UINT64 is when uint64 value is used. This is represented as 'unsigned bigint' in mysql | -| BYTES | 2 | BYTES is when an array of bytes is used. This is represented as 'varbinary' in mysql | - -### topodata.TabletType - - TabletType represents the type of a given tablet. - -| Name |Value |Description | -| :-------- | :-------- | :-------- -| UNKNOWN | 0 | UNKNOWN is not a valid value. | -| MASTER | 1 | MASTER is the master server for the shard. Only MASTER allows DMLs. | -| REPLICA | 2 | REPLICA is a slave type. It is used to serve live traffic. A REPLICA can be promoted to MASTER. A demoted MASTER will go to REPLICA. | -| RDONLY | 3 | RDONLY (old name) / BATCH (new name) is used to serve traffic for long-running jobs. It is a separate type from REPLICA so long-running queries don't affect web-like traffic. | -| BATCH | 3 | | -| SPARE | 4 | SPARE is a type of servers that cannot serve queries, but is available in case an extra server is needed. | -| EXPERIMENTAL | 5 | EXPERIMENTAL is like SPARE, except it can serve queries. This type can be used for usages not planned by Vitess, like online export to another storage engine. | -| BACKUP | 6 | BACKUP is the type a server goes to when taking a backup. No queries can be served in BACKUP mode. | -| RESTORE | 7 | RESTORE is the type a server uses when restoring a backup, at startup time. No queries can be served in RESTORE mode. | -| DRAINED | 8 | DRAINED is the type a server goes into when used by Vitess tools to perform an offline action. It is a serving type (as the tools processes may need to run queries), but it's not used to route queries from Vitess users. In this state, this tablet is dedicated to the process that uses it. | - -### vtrpc.ErrorCode - - ErrorCode is the enum values for Errors. Internally, errors should be created with one of these codes. These will then be translated over the wire by various RPC frameworks. - -| Name |Value |Description | -| :-------- | :-------- | :-------- -| SUCCESS | 0 | SUCCESS is returned from a successful call. | -| CANCELLED | 1 | CANCELLED means that the context was cancelled (and noticed in the app layer, as opposed to the RPC layer). | -| UNKNOWN_ERROR | 2 | UNKNOWN_ERROR includes: 1. MySQL error codes that we don't explicitly handle. 2. MySQL response that wasn't as expected. For example, we might expect a MySQL timestamp to be returned in a particular way, but it wasn't. 3. Anything else that doesn't fall into a different bucket. | -| BAD_INPUT | 3 | BAD_INPUT is returned when an end-user either sends SQL that couldn't be parsed correctly, or tries a query that isn't supported by Vitess. | -| DEADLINE_EXCEEDED | 4 | DEADLINE_EXCEEDED is returned when an action is taking longer than a given timeout. | -| INTEGRITY_ERROR | 5 | INTEGRITY_ERROR is returned on integrity error from MySQL, usually due to duplicate primary keys. | -| PERMISSION_DENIED | 6 | PERMISSION_DENIED errors are returned when a user requests access to something that they don't have permissions for. | -| RESOURCE_EXHAUSTED | 7 | RESOURCE_EXHAUSTED is returned when a query exceeds its quota in some dimension and can't be completed due to that. Queries that return RESOURCE_EXHAUSTED should not be retried, as it could be detrimental to the server's health. Examples of errors that will cause the RESOURCE_EXHAUSTED code: 1. TxPoolFull: this is retried server-side, and is only returned as an error if the server-side retries failed. 2. Query is killed due to it taking too long. | -| QUERY_NOT_SERVED | 8 | QUERY_NOT_SERVED means that a query could not be served right now. Client can interpret it as: "the tablet that you sent this query to cannot serve the query right now, try a different tablet or try again later." This could be due to various reasons: QueryService is not serving, should not be serving, wrong shard, wrong tablet type, blacklisted table, etc. Clients that receive this error should usually retry the query, but after taking the appropriate steps to make sure that the query will get sent to the correct tablet. | -| NOT_IN_TX | 9 | NOT_IN_TX means that we're not currently in a transaction, but we should be. | -| INTERNAL_ERROR | 10 | INTERNAL_ERRORs are problems that only the server can fix, not the client. These errors are not due to a query itself, but rather due to the state of the system. Generally, we don't expect the errors to go away by themselves, but they may go away after human intervention. Examples of scenarios where INTERNAL_ERROR is returned: 1. Something is not configured correctly internally. 2. A necessary resource is not available, and we don't expect it to become available by itself. 3. A sanity check fails. 4. Some other internal error occurs. Clients should not retry immediately, as there is little chance of success. However, it's acceptable for retries to happen internally, for example to multiple backends, in case only a subset of backend are not functional. | -| TRANSIENT_ERROR | 11 | TRANSIENT_ERROR is used for when there is some error that we expect we can recover from automatically - often due to a resource limit temporarily being reached. Retrying this error, with an exponential backoff, should succeed. Clients should be able to successfully retry the query on the same backends. Examples of things that can trigger this error: 1. Query has been throttled 2. VtGate could have request backlog | -| UNAUTHENTICATED | 12 | UNAUTHENTICATED errors are returned when a user requests access to something, and we're unable to verify the user's authentication. | - -## Messages - -### BoundKeyspaceIdQuery - -BoundKeyspaceIdQuery represents a single query request for the specified list of keyspace ids. This is used in a list for ExecuteBatchKeyspaceIdsRequest. - -#### Properties - -| Name |Description | -| :-------- | :-------- -| query
[query.BoundQuery](#query.boundquery)| BoundQuery is a query with its bind variables | -| keyspace
string| keyspace to target the query to. | -| keyspace_ids
list <bytes>| keyspace_ids contains the list of keyspace_ids affected by this query. Will be used to find the shards to send the query to. | - -### BoundShardQuery - -BoundShardQuery represents a single query request for the specified list of shards. This is used in a list for ExecuteBatchShardsRequest. - -#### Properties - -| Name |Description | -| :-------- | :-------- -| query
[query.BoundQuery](#query.boundquery)| BoundQuery is a query with its bind variables | -| keyspace
string| keyspace to target the query to. | -| shards
list <string>| shards to target the query to. A DML can only target one shard. | - -### Session - -Session objects are session cookies and are invalidated on use. Query results will contain updated session values. Their content should be opaque to the user. - -#### Properties - -| Name |Description | -| :-------- | :-------- -| in_transaction
bool| | -| shard_sessions
list <[ShardSession](#session.shardsession)>| | -| single_db
bool| single_db specifies if the transaction should be restricted to a single database. | - -#### Messages - -##### Session.ShardSession - -Properties - -| Name |Description | -| :-------- | :-------- -| target
[query.Target](#query.target)| Target describes what the client expects the tablet is. If the tablet does not match, an error is returned. | -| transaction_id
int64| | - -### query.BindVariable - -BindVariable represents a single bind variable in a Query. - -#### Properties - -| Name |Description | -| :-------- | :-------- -| type
[Type](#query.type)| | -| value
bytes| | -| values
list <[Value](#query.value)>| Value represents a typed value. | - -### query.BoundQuery - -BoundQuery is a query with its bind variables - -#### Properties - -| Name |Description | -| :-------- | :-------- -| sql
string| sql is the SQL query to execute | -| bind_variables
map <string, [BindVariable](#query.bindvariable)>| bind_variables is a map of all bind variables to expand in the query | - -### query.EventToken - -EventToken is a structure that describes a point in time in a replication stream on one shard. The most recent known replication position can be retrieved from vttablet when executing a query. It is also sent with the replication streams from the binlog service. - -#### Properties - -| Name |Description | -| :-------- | :-------- -| timestamp
int64| timestamp is the MySQL timestamp of the statements. Seconds since Epoch. | -| shard
string| The shard name that applied the statements. Note this is not set when streaming from a vttablet. It is only used on the client -> vtgate link. | -| position
string| The position on the replication stream after this statement was applied. It is not the transaction ID / GTID, but the position / GTIDSet. | - -### query.ExecuteOptions - -ExecuteOptions is passed around for all Execute calls. - -#### Properties - -| Name |Description | -| :-------- | :-------- -| include_event_token
bool| This used to be exclude_field_names, which was replaced by IncludedFields enum below If set, we will try to include an EventToken with the responses. | -| compare_event_token
[EventToken](#query.eventtoken)| EventToken is a structure that describes a point in time in a replication stream on one shard. The most recent known replication position can be retrieved from vttablet when executing a query. It is also sent with the replication streams from the binlog service. | -| included_fields
[IncludedFields](#executeoptions.includedfields)| Controls what fields are returned in Field message responses from mysql, i.e. field name, table name, etc. This is an optimization for high-QPS queries where the client knows what it's getting | - -#### Enums - -##### ExecuteOptions.IncludedFields - -| Name |Value |Description | -| :-------- | :-------- | :-------- -| TYPE_AND_NAME | 0 | | -| TYPE_ONLY | 1 | | -| ALL | 2 | | - -### query.Field - -Field describes a single column returned by a query - -#### Properties - -| Name |Description | -| :-------- | :-------- -| name
string| name of the field as returned by mysql C API | -| type
[Type](#query.type)| vitess-defined type. Conversion function is in sqltypes package. | -| table
string| Remaining fields from mysql C API. These fields are only populated when ExecuteOptions.included_fields is set to IncludedFields.ALL. | -| org_table
string| | -| database
string| | -| org_name
string| | -| column_length
uint32| column_length is really a uint32. All 32 bits can be used. | -| charset
uint32| charset is actually a uint16. Only the lower 16 bits are used. | -| decimals
uint32| decimals is actually a uint8. Only the lower 8 bits are used. | -| flags
uint32| flags is actually a uint16. Only the lower 16 bits are used. | - -### query.QueryResult - -QueryResult is returned by Execute and ExecuteStream. As returned by Execute, len(fields) is always equal to len(row) (for each row in rows). As returned by StreamExecute, the first QueryResult has the fields set, and subsequent QueryResult have rows set. And as Execute, len(QueryResult[0].fields) is always equal to len(row) (for each row in rows for each QueryResult in QueryResult[1:]). - -#### Properties - -| Name |Description | -| :-------- | :-------- -| fields
list <[Field](#query.field)>| Field describes a single column returned by a query | -| rows_affected
uint64| | -| insert_id
uint64| | -| rows
list <[Row](#query.row)>| Row is a database row. | -| extras
[ResultExtras](#query.resultextras)| ResultExtras contains optional out-of-band information. Usually the extras are requested by adding ExecuteOptions flags. | - -### query.ResultExtras - -ResultExtras contains optional out-of-band information. Usually the extras are requested by adding ExecuteOptions flags. - -#### Properties - -| Name |Description | -| :-------- | :-------- -| event_token
[EventToken](#query.eventtoken)| EventToken is a structure that describes a point in time in a replication stream on one shard. The most recent known replication position can be retrieved from vttablet when executing a query. It is also sent with the replication streams from the binlog service. | -| fresher
bool| If set, it means the data returned with this result is fresher than the compare_token passed in the ExecuteOptions. | - -### query.ResultWithError - -ResultWithError represents a query response in the form of result or error but not both. - -#### Properties - -| Name |Description | -| :-------- | :-------- -| error
[vtrpc.RPCError](#vtrpc.rpcerror)| RPCError is an application-level error structure returned by VtTablet (and passed along by VtGate if appropriate). We use this so the clients don't have to parse the error messages, but instead can depend on the value of the code. | -| result
[query.QueryResult](#query.queryresult)| QueryResult is returned by Execute and ExecuteStream. As returned by Execute, len(fields) is always equal to len(row) (for each row in rows). As returned by StreamExecute, the first QueryResult has the fields set, and subsequent QueryResult have rows set. And as Execute, len(QueryResult[0].fields) is always equal to len(row) (for each row in rows for each QueryResult in QueryResult[1:]). | - -### query.Row - -Row is a database row. - -#### Properties - -| Name |Description | -| :-------- | :-------- -| lengths
list <sint64>| lengths contains the length of each value in values. A length of -1 means that the field is NULL. While reading values, you have to accummulate the length to know the offset where the next value begins in values. | -| values
bytes| values contains a concatenation of all values in the row. | - -### query.StreamEvent - -StreamEvent describes a set of transformations that happened as a single transactional unit on a server. It is streamed back by the Update Stream calls. - -#### Properties - -| Name |Description | -| :-------- | :-------- -| statements
list <[Statement](#streamevent.statement)>| The statements in this transaction. | -| event_token
[EventToken](#query.eventtoken)| EventToken is a structure that describes a point in time in a replication stream on one shard. The most recent known replication position can be retrieved from vttablet when executing a query. It is also sent with the replication streams from the binlog service. | - -#### Messages - -##### StreamEvent.Statement - -One individual Statement in a transaction. - -Properties - -| Name |Description | -| :-------- | :-------- -| category
[Category](#streamevent.statement.category)| | -| table_name
string| table_name, primary_key_fields and primary_key_values are set for DML. | -| primary_key_fields
list <[Field](#query.field)>| Field describes a single column returned by a query | -| primary_key_values
list <[Row](#query.row)>| Row is a database row. | -| sql
bytes| sql is set for all queries. FIXME(alainjobart) we may not need it for DMLs. | - -#### Enums - -##### StreamEvent.Statement.Category - - One individual Statement in a transaction. The category of one statement. - -| Name |Value |Description | -| :-------- | :-------- | :-------- -| Error | 0 | | -| DML | 1 | | -| DDL | 2 | | - -### query.Target - -Target describes what the client expects the tablet is. If the tablet does not match, an error is returned. - -#### Properties - -| Name |Description | -| :-------- | :-------- -| keyspace
string| | -| shard
string| | -| tablet_type
[topodata.TabletType](#topodata.tablettype)| TabletType represents the type of a given tablet. | - -### query.Value - -Value represents a typed value. - -#### Properties - -| Name |Description | -| :-------- | :-------- -| type
[Type](#query.type)| | -| value
bytes| | - -### topodata.KeyRange - -KeyRange describes a range of sharding keys, when range-based sharding is used. - -#### Properties - -| Name |Description | -| :-------- | :-------- -| start
bytes| | -| end
bytes| | - -### topodata.ShardReference - -ShardReference is used as a pointer from a SrvKeyspace to a Shard - -#### Properties - -| Name |Description | -| :-------- | :-------- -| name
string| Copied from Shard. | -| key_range
[KeyRange](#topodata.keyrange)| KeyRange describes a range of sharding keys, when range-based sharding is used. | - -### topodata.SrvKeyspace - -SrvKeyspace is a rollup node for the keyspace itself. - -#### Properties - -| Name |Description | -| :-------- | :-------- -| partitions
list <[KeyspacePartition](#srvkeyspace.keyspacepartition)>| The partitions this keyspace is serving, per tablet type. | -| sharding_column_name
string| copied from Keyspace | -| sharding_column_type
[KeyspaceIdType](#topodata.keyspaceidtype)| | -| served_from
list <[ServedFrom](#srvkeyspace.servedfrom)>| | - -#### Messages - -##### SrvKeyspace.KeyspacePartition - -Properties - -| Name |Description | -| :-------- | :-------- -| served_type
[TabletType](#topodata.tablettype)| The type this partition applies to. | -| shard_references
list <[ShardReference](#topodata.shardreference)>| ShardReference is used as a pointer from a SrvKeyspace to a Shard | - -##### SrvKeyspace.ServedFrom - -ServedFrom indicates a relationship between a TabletType and the keyspace name that's serving it. - -Properties - -| Name |Description | -| :-------- | :-------- -| tablet_type
[TabletType](#topodata.tablettype)| ServedFrom indicates a relationship between a TabletType and the keyspace name that's serving it. the tablet type | -| keyspace
string| the keyspace name that's serving it | - -### vtrpc.CallerID - -CallerID is passed along RPCs to identify the originating client for a request. It is not meant to be secure, but only informational. The client can put whatever info they want in these fields, and they will be trusted by the servers. The fields will just be used for logging purposes, and to easily find a client. VtGate propagates it to VtTablet, and VtTablet may use this information for monitoring purposes, to display on dashboards, or for blacklisting purposes. - -#### Properties - -| Name |Description | -| :-------- | :-------- -| principal
string| principal is the effective user identifier. It is usually filled in with whoever made the request to the appserver, if the request came from an automated job or another system component. If the request comes directly from the Internet, or if the Vitess client takes action on its own accord, it is okay for this field to be absent. | -| component
string| component describes the running process of the effective caller. It can for instance be the hostname:port of the servlet initiating the database call, or the container engine ID used by the servlet. | -| subcomponent
string| subcomponent describes a component inisde the immediate caller which is responsible for generating is request. Suggested values are a servlet name or an API endpoint name. | - -### vtrpc.RPCError - -RPCError is an application-level error structure returned by VtTablet (and passed along by VtGate if appropriate). We use this so the clients don't have to parse the error messages, but instead can depend on the value of the code. - -#### Properties - -| Name |Description | -| :-------- | :-------- -| code
[ErrorCode](#vtrpc.errorcode)| | -| message
string| | - diff --git a/doc/VitessComponents.png b/doc/VitessComponents.png deleted file mode 100644 index 73e90422153..00000000000 Binary files a/doc/VitessComponents.png and /dev/null differ diff --git a/doc/VitessOverview.md b/doc/VitessOverview.md deleted file mode 100644 index dc66784b9da..00000000000 --- a/doc/VitessOverview.md +++ /dev/null @@ -1,239 +0,0 @@ -Vitess is a database solution for deploying, scaling and managing large clusters of MySQL instances. -It's architected to run as effectively in a public or private cloud architecture as it does -on dedicated hardware. It combines and extends many important MySQL features with the -scalability of a NoSQL database. Vitess can help you with the following problems: - -1. Scaling a MySQL database by allowing you to shard it, while keeping application changes to a minimum. -2. Migrating from baremetal to a private or public cloud. -3. Deploying and managing a large number of MySQL instances. - -Vitess includes compliant JDBC and Go database drivers using a native query protocol. Additionally, it implements the MySQL server protocol which is compatible with virtually any other language. - -Vitess has been serving all YouTube database traffic since 2011, and has now been adopted by many enterprises for their production needs. - -## Features - -* **Performance** - * Connection pooling - Multiplex front-end application queries onto a pool of MySQL connections to optimize performance. - * Query de-duping – Reuse results of an in-flight query for any identical requests received while the in-flight query was still executing. - * Transaction manager – Limit number of concurrent transactions and manage deadlines to optimize overall throughput. - -* **Protection** - * Query rewriting and sanitization – Add limits and avoid non-deterministic updates. - * Query blacklisting – Customize rules to prevent potentially problematic queries from hitting your database. - * Query killer – Terminate queries that take too long to return data. - * Table ACLs – Specify access control lists (ACLs) for tables based on the connected user. - -* **Monitoring** - * Performance analysis: Tools let you monitor, diagnose, and analyze your database performance. - * Query streaming – Use a list of incoming queries to serve OLAP workloads. - * Update stream – A server streams the list of rows changing in the database, which can be used as a mechanism to propagate changes to other data stores. - -* **Topology Management Tools** - * Master management tools (handles reparenting) - * Web-based management GUI - * Designed to work in multiple data centers / regions - -* **Sharding** - * Virtually seamless dynamic re-sharding - * Vertical and Horizontal sharding support - * Multiple sharding schemes, with the ability to plug-in custom ones - -## Comparisons to other storage options - -The following sections compare Vitess to two common alternatives, a vanilla MySQL implementation and a NoSQL implementation. - -### Vitess vs. Vanilla MySQL - -Vitess improves a vanilla MySQL implementation in several ways: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Vanilla MySQLVitess
Every MySQL connection has a memory overhead that ranges between 256KB and almost 3MB, depending on which MySQL release you're using. As your user base grows, you need to add RAM to support additional connections, but the RAM does not contribute to faster queries. In addition, there is a significant CPU cost associated with obtaining the connections.Vitess' gRPC-based protocol creates very lightweight connections. Vitess' connection pooling feature uses Go's concurrency support to map these lightweight connections to a small pool of MySQL connections. As such, Vitess can easily handle thousands of connections.
Poorly written queries, such as those that don't set a LIMIT, can negatively impact database performance for all users.Vitess employs a SQL parser that uses a configurable set of rules to rewrite queries that might hurt database performance.
Sharding is a process of partitioning your data to improve scalability and performance. MySQL lacks native sharding support, requiring you to write sharding code and embed sharding logic in your application.Vitess supports a variety of sharding schemes. It can also migrate tables into different databases and scale up or down the number of shards. These functions are performed non-intrusively, completing most data transitions with just a few seconds of read-only downtime.
A MySQL cluster using replication for availability has a master database and a few replicas. If the master fails, a replica should become the new master. This requires you to manage the database lifecycle and communicate the current system state to your application.Vitess helps to manage the lifecycle of your database scenarios. It supports and automatically handles various scenarios, including master failover and data backups.
A MySQL cluster can have custom database configurations for different workloads, like a master database for writes, fast read-only replicas for web clients, slower read-only replicas for batch jobs, and so forth. If the database has horizontal sharding, the setup is repeated for each shard, and the app needs baked-in logic to know how to find the right database.Vitess uses a topology backed by a consistent data store, like etcd or ZooKeeper. This means the cluster view is always up-to-date and consistent for different clients. Vitess also provides a proxy that routes queries efficiently to the most appropriate MySQL instance.
- -### Vitess vs. NoSQL - -If you're considering a NoSQL solution primarily because of concerns about the scalability of MySQL, Vitess might be a more appropriate choice for your application. While NoSQL provides great support for unstructured data, Vitess still offers several benefits not available in NoSQL datastores: - - - - - - - - - - - - - - - - - - - - - - - - - - -
NoSQLVitess
NoSQL databases do not define relationships between database tables, and only support a subset of the SQL language.Vitess is not a simple key-value store. It supports complex query semantics such as where clauses, JOINS, aggregation functions, and more.
NoSQL datastores do not support transactions.Vitess supports transactions within a shard. For transactions that span multiple shards, it allows you to optionally enable 2PC.
NoSQL solutions have custom APIs, leading to custom architectures, applications, and tools.Vitess adds very little variance to MySQL, a database that most people are already accustomed to working with.
NoSQL solutions provide limited support for database indexes compared to MySQL.Vitess allows you to use all of MySQL's indexing functionality to optimize query performance.
- -## Architecture - -The Vitess platform consists of a number of server processes, command-line utilities, and web-based utilities, backed by a consistent metadata store. - -Depending on the current state of your application, you could arrive at a full Vitess implementation through a number of different process flows. For example, if you're building a service from scratch, your first step with Vitess would be to define your database topology. However, if you need to scale your existing database, you'd likely start by deploying a connection proxy. - -Vitess tools and servers are designed to help you whether you start with a complete fleet of databases or start small and scale over time. For smaller implementations, vttablet features like connection pooling and query rewriting help you get more from your existing hardware. Vitess' automation tools then provide additional benefits for larger implementations. - -The diagram below illustrates Vitess' components: - -
-Diagram showing Vitess implementation -
- -### Topology - -The [Topology Service]({% link user-guide/topology-service.md %}) is a metadata store that contains information about running servers, the sharding scheme, and the replication graph. The topology is backed by a consistent data store. You can explore the topology using **vtctl** (command-line) and **vtctld** (web). - -In Kubernetes, the data store is [etcd](https://github.com/coreos/etcd). Vitess source code also ships with [Apache ZooKeeper](https://zookeeper.apache.org/) support. - -### vtgate - -**vtgate** is a light proxy server that routes traffic to the correct vttablet(s) and returns consolidated results back to the client. It is the server to which applications send queries. Thus, the client can be very simple since it only needs to be able to find a vtgate instance. - -To route queries, vtgate considers the sharding scheme, required latency, and the availability of the tablets and their underlying MySQL instances. - -### vttablet - -**vttablet** is a proxy server that sits in front of a MySQL database. A Vitess implementation has one vttablet for each MySQL instance. - -vttablet performs tasks that attempt to maximize throughput as well as protect MySQL from harmful queries. Its features include connection pooling, query rewriting, and query de-duping. In addition, vttablet executes management tasks that vtctl initiates, and it provides streaming services that are used for [filtered replication]({% link user-guide/sharding.md %}#filtered-replication) and data exports. - -A lightweight Vitess implementation uses vttablet as a smart connection proxy that serves queries for a single MySQL database. By running vttablet in front of your MySQL database and changing your app to use the Vitess client instead of your MySQL driver, your app benefits from vttablet's connection pooling, query rewriting, and query de-duping features. - -### vtctl - -**vtctl** is a command-line tool used to administer a Vitess cluster. It allows a human or application to easily interact with a Vitess implementation. Using vtctl, you can identify master and replica databases, create tables, initiate failovers, perform sharding (and resharding) operations, and so forth. - -As vtctl performs operations, it updates the lockserver as needed. Other Vitess servers observe those changes and react accordingly. For example, if you use vtctl to fail over to a new master database, vtgate sees the change and directs future write operations to the new master. - -### vtctld - -**vtctld** is an HTTP server that lets you browse the information stored in the lockserver. It is useful for troubleshooting or for getting a high-level overview of the servers and their current states. - -### vtworker - -**vtworker** hosts long-running processes. It supports a plugin architecture and offers libraries so that you can easily choose tablets to use. Plugins are available for the following types of jobs: - -* **resharding differ** jobs check data integrity during shard splits and joins -* **vertical split differ** jobs check data integrity during vertical splits and joins - -vtworker also lets you easily add other validation procedures. You could do in-tablet integrity checks to verify foreign-key-like relationships or cross-shard integrity checks if, for example, an index table in one keyspace references data in another keyspace. - -### Other support tools - -Vitess also includes the following tools: - -* **mysqlctl**: Manage MySQL instances -* **vtcombo**: A single binary that contains all components of Vitess. It can be used for testing queries in a Continuous Integration environment. -* **vtexplain**: A command line tool that is used to explore how Vitess will handle queries based on a user-supplied schema and topology, without needing to set up a full cluster. -* **zk**: Command-line ZooKeeper client and explorer -* **zkctl**: Manage ZooKeeper instances - -## Vitess on Kubernetes - -[Kubernetes](https://kubernetes.io/) is an open-source orchestration system for Docker containers, and Vitess can run as a Kubernetes-aware cloud native distributed database. - -Kubernetes handles scheduling onto nodes in a compute cluster, actively manages workloads on those nodes, and groups containers comprising an application for easy management and discovery. -This provides an analogous open-source environment to the way Vitess runs in YouTube, -on the [predecessor to Kubernetes](https://kubernetes.io/blog/2015/04/borg-predecessor-to-kubernetes/). - -The easiest way to run Vitess is via Kubernetes. However, it's not a requirement, and other types of deployment are used as well. - - - -## History - -Vitess has been a fundamental component of YouTube infrastructure since 2011. -This section briefly summarizes the sequence of events that led to Vitess' -creation: - -1. YouTube's MySQL database reached a point when peak traffic would soon - exceed the database's serving capacity. To temporarily alleviate the - problem, YouTube created a master database for write traffic and a - replica database for read traffic. -1. With demand for cat videos at an all-time high, read-only traffic was - still high enough to overload the replica database. So YouTube added - more replicas, again providing a temporary solution. -1. Eventually, write traffic became too high for the master database to - handle, requiring YouTube to shard data to handle incoming traffic. - (Sharding would have also become necessary if the overall size of the - database became too large for a single MySQL instance.) -1. YouTube's application layer was modified so that before executing any - database operation, the code could identify the right database shard - to receive that particular query. - -Vitess let YouTube remove that logic from the source code, introducing -a proxy between the application and the database to route and manage -database interactions. Since then, YouTube has scaled its user base -by a factor of more than 50, greatly increasing its capacity to serve -pages, process newly uploaded videos, and more. Even more importantly, -Vitess is a platform that continues to scale. - -YouTube chose to write Vitess in Go because Go offers a combination of -expressiveness and performance. It is almost as expressive as Python and -very maintainable. However, its performance is in the same range as Java -and close to C++ in certain cases. In addition, the language is extremely -well suited for concurrent programming and has a very high quality -standard library. - -### Open Source First - -The open source version of Vitess is extremely similar to the version -used at YouTube. While there are some changes that let YouTube take -advantage of Google's infrastructure, the core functionality is the same. -When developing new features, the Vitess team first makes them work in -the Open Source tree. In some cases, the team then writes a plugin -that makes use of Google-specific technology. This approach ensures that -the Open Source version of Vitess maintains the same level of quality as -the internal version. - -The vast majority of Vitess development takes place in the open, on GitHub. -As such, Vitess is built with extensibility in mind so that you can adjust -it to the needs of your infrastructure. diff --git a/doc/VitessOverview.png b/doc/VitessOverview.png deleted file mode 100644 index e8e1b17fc19..00000000000 Binary files a/doc/VitessOverview.png and /dev/null differ diff --git a/doc/VitessReplication.md b/doc/VitessReplication.md deleted file mode 100644 index 7e7ad213c91..00000000000 --- a/doc/VitessReplication.md +++ /dev/null @@ -1,162 +0,0 @@ -# Vitess, MySQL Replication, and Schema Changes - -## Statement vs Row Based Replication - -MySQL supports two primary modes of replication in its binary logs: statement or -row based. Vitess supports both these modes. - -For schema changes, if the number of affected rows is greater > 100k (configurable), we don't allow direct application -of DDLs. The recommended tools in such cases are [gh-ost](https://github.com/github/gh-ost) or [pt-osc](https://www.percona.com/doc/percona-toolkit/LATEST/pt-online-schema-change.html). - -Not all statements are safe for Statement Based Replication (SBR): https://dev.mysql.com/doc/refman/8.0/en/replication-rbr-safe-unsafe.html. Vitess rewrites some of these statements to be safe for SBR, and others are explicitly failed. This is described in detail below. - -With statement based replication, it becomes easier to perform offline -advanced schema changes, or large data updates. Vitess’s solution is called -schema swap (described below). - -## Rewriting Update Statements - -Vitess rewrites ‘UPDATE’ SQL statements to always know what rows will be -affected. For instance, this statement: - -``` -UPDATE SET WHERE -``` - -Will be rewritten into: - -``` -SELECT FROM
WHERE FOR UPDATE -UPDATE
SET WHERE IN /* primary key values: … */ -``` - -With this rewrite in effect, we know exactly which rows are affected, by primary -key, and we also document them as a SQL comment. - -The replication stream then doesn’t contain the expensive WHERE clauses, but -only the UPDATE statements by primary key. In a sense, it is combining the best -of row based and statement based replication: the slaves only do primary key -based updates, but the replication stream is very friendly for schema changes. - -Also, Vitess adds comments to the rewritten statements that identify the primary -key affected by that statement. This allows us to produce an Update Stream (see -section below). - -## Vitess Schema Swap - -Within YouTube, we also use a combination of statement based replication and -backups to apply long-running schema changes without disrupting ongoing -operations. See the [schema swap tutorial]({% link user-guide/schema-swap.md %}) -for a detailed example. - -This operation, which is called **schema swap**, works as follows: - -* Pick a slave, take it out of service. It is not used by clients any more. -* Apply whatever schema or large data change is needed, on the slave. -* Take a backup of that slave. -* On all the other slaves, one at a time, take them out of service, restore the - backup, catch up on replication, put them back into service. -* When all slaves are done, reparent to a slave that has applied the change. -* The old master can then be restored from a backup too, and put back into - service. - -With this process, the only guarantee we need is for the change (schema or data) -to be backward compatible: the clients won’t know if they talk to a server -that has applied the change yet or not. This is usually fairly easy to deal -with: - -* When adding a column, clients cannot use it until the schema swap is done. -* When removing a column, all clients must stop referring to it before the - schema swap begins. -* A column rename is still tricky: the best way to do it is to add a new column - with the new name in one schema swap, then change the client to populate both - (and backfill the values), then change the client again to use the new - column only, then use another schema swap to remove the original column. -* A whole bunch of operations are really easy to perform though: index changes, - optimize table, … - -Note the real change is only applied to one instance. We then rely on the backup -/ restore process to propagate the change. This is a very good improvement from -letting the changes through the replication stream, where they are applied to -all hosts, not just one. This is also a very good improvement over the industry -practice of online schema change, which also must run on all hosts. -Since Vitess’s backup / restore and reparent processes -are very reliable (they need to be reliable on their own, independently of this -process!), this does not add much more complexity to a running system. - -## Update Stream - -Since the SBR replication stream also contains comments of which primary key is -affected by a change, it is possible to look at the replication stream and know -exactly what objects have changed. This Vitess feature is -called [Update Stream]({% link user-guide/update-stream.md %}). - -By subscribing to the Update Stream for a given shard, one can know what values -change. This stream can be used to create a stream of data changes (export to an -Apache Kafka for instance), or even invalidate an application layer cache. - -Note: the Update Stream only reliably contains the primary key values of the -rows that have changed, not the actual values for all columns. To get these -values, it is necessary to re-query the database. - -We have plans to make this [Update Stream]({% link user-guide/update-stream.md %}) -feature more consistent, very resilient, fast, and transparent to sharding. - -## Semi-Sync - -If you tell Vitess to enforce semi-sync -([semisynchronous replication](https://dev.mysql.com/doc/refman/5.7/en/replication-semisync.html)) -by passing the `-enable_semi_sync` flag to vttablets, -then the following will happen: - -* The master will only accept writes if it has at least one slave connected - and sending semi-sync ACK. It will never fall back to asynchronous - (not requiring ACKs) because of timeouts while waiting for ACK, nor because - of having zero slaves connected (although it will fall back to asynchronous - in case of shutdown, abrupt or graceful). - - This is important to prevent split brain (or alternate futures) in case of a - network partition. If we can verify all slaves have stopped replicating, - we know the old master is not accepting writes, even if we are unable to - contact the old master itself. - -* Slaves of *replica* type will send semi-sync ACK. Slaves of *rdonly* type will - **not** send ACK. This is because rdonly slaves are not eligible to be - promoted to master, so we want to avoid the case where a rdonly slave is the - single best candidate for election at the time of master failure (though - a split brain is possible when all rdonly slaves have transactions that - none of replica slaves have). - -These behaviors combine to give you the property that, in case of master -failure, there is at least one other *replica* type slave that has every -transaction that was ever reported to clients as having completed. -You can then ([manually]({% link reference/vtctl.md %}#emergencyreparentshard), -or with an automated tool like [Orchestrator](https://github.com/github/orchestrator)) -pick the replica that is farthest ahead in GTID position and promote that to be -the new master. - -Thus, you can survive sudden master failure without losing any transactions that -were reported to clients as completed. In MySQL 5.7+, this guarantee is -strengthened slightly to preventing loss of any transactions that were ever -**committed** on the original master, eliminating so-called -[phantom reads](https://bugs.mysql.com/bug.php?id=62174). - -On the other hand these behaviors also give a requirement that each shard must -have at least 2 tablets with type *replica* (with addition of the master that -can be demoted to type *replica* this gives a minimum of 3 tablets with initial -type *replica*). This will allow for the master to have a semi-sync acker when -one of the *replica* tablets is down for any reason (for a version update, -machine reboot, schema swap or anything else). - -With regard to replication lag, note that this does **not** guarantee there is -always at least one *replica* type slave from which queries will always return -up-to-date results. Semi-sync guarantees that at least one slave has the -transaction in its relay log, but it has not necessarily been applied yet. -The only way to guarantee a fully up-to-date read is to send the request to the -master. - -## Appendix: Adding support for RBR in Vitess - -We are in the process of adding support for RBR in Vitess. - -See [this document]({% link user-guide/row-based-replication.md %})) for more information. diff --git a/doc/VitessSequences.md b/doc/VitessSequences.md deleted file mode 100644 index 155b315acfb..00000000000 --- a/doc/VitessSequences.md +++ /dev/null @@ -1,208 +0,0 @@ -# Vitess Sequences - -This document describes the Vitess Sequences feature, and how to use it. - -## Motivation - -MySQL provides the `auto-increment` feature to assign monotonically incrementing -IDs to a column in a table. However, when a table is sharded across multiple -instances, maintaining the same feature is a lot more tricky. - -Vitess Sequences fill that gap: - -* Inspired from the usual SQL sequences (implemented in different ways by - Oracle, SQL Server and PostgreSQL). - -* Very high throughput for ID creation, using a configurable in-memory block allocation. - -* Transparent use, similar to MySQL auto-increment: when the field is omitted in - an `insert` statement, the next sequence value is used. - -## When *not* to Use Auto-Increment - -Before we go any further, an auto-increment column has limitations and -drawbacks. let's explore this topic a bit here. - -### Security Considerations - -Using auto-increment can leak confidential information about a service. Let's -take the example of a web site that store user information, and assign user IDs -to its users as they sign in. The user ID is then passed in a cookie for all -subsequent requests. - -The client then knows their own user ID. It is now possible to: - -* Try other user IDs and expose potential system vulnerabilities. - -* Get an approximate number of users of the system (using the user ID). - -* Get an approximate number of sign-ins during a week (creating two accounts a - week apart, and diffing the two IDs). - -Auto-incrementing IDs should be reserved for either internal applications, or -exposed to the clients only when safe. - -### Alternatives - -Alternative to auto-incrementing IDs are: - -* use a 64 bits random generator number. Try to insert a new row with that - ID. If taken (because the statement returns an integrity error), try another - ID. - -* use a UUID scheme, and generate truly unique IDs. - -Now that this is out of the way, let's get to MySQL auto-increment. - -## MySQL Auto-increment Feature - -Let's start by looking at the MySQL auto-increment feature: - -* A row that has no value for the auto-increment value will be given the next ID. - -* The current value is stored in the table metadata. - -* Values may be ‘burned’ (by rolled back transactions). - -* Inserting a row with a given value that is higher than the current value will - set the current value. - -* The value used by the master in a statement is sent in the replication stream, - so slaves will have the same value when re-playing the stream. - -* There is no strict guarantee about ordering: two concurrent statements may - have their commit time in one order, but their auto-incrementing ID in the - opposite order (as the value for the ID is reserved when the statement is - issued, not when the transaction is committed). - -* MySQL has multiple options for auto-increment, like only using every N number - (for multi-master configurations), or performance related features (locking - that table’s current ID may have concurrency implications). - -* When inserting a row in a table with an auto-increment column, if the value - for the auto-increment row is not set, the value for the column is returned to - the client alongside the statement result. - -## Vitess Sequences - -An early design was to use a single unsharded database and a table with an -auto-increment value to generate new values. However, this has serious -limitations, in particular throughput, and storing one entry for each value in -that table, for no reason. - -So we decided instead to base sequences on a MySQL table, and use a single value -in that table to describe which values the sequence should have next. To -increase performance, we also support block allocation of IDs: each update to -the MySQL table is only done every N IDs (N being configurable), and in between -only memory structures in vttablet are updated, making the QPS only limited by -RPC latency. - -In a sharded keyspace, a Sequence's data is only present in one shard (but its -schema is in all the shards). We configure which shard has the data by using a -keyspace_id for the sequence, and route all sequence traffic to the shard that -hold that keyspace_id. That way we are completely compatible with any horizontal -resharding. - -The final goal is to have Sequences supported with SQL statements, like: - -``` sql -/* DDL support */ -CREATE SEQUENCE my_sequence; - -SELECT NEXT VALUE FROM my_sequence; - -ALTER SEQUENCE my_sequence ...; - -DROP SEQUENCE my_sequence; - -SHOW CREATE SEQUENCE my_sequence; -``` - -In the current implementation, we support the query access to Sequences, but not -the administration commands yet. - -### Creating a Sequence - -*Note*: The names in this section are extracted from the examples/demo sample -application. - -To create a Sequence, a backing table must first be created and initialized with a single row. The columns for that table have to be respected. - -This is an example: - -``` sql -create table user_seq(id int, next_id bigint, cache bigint, primary key(id)) comment 'vitess_sequence'; - -insert into user_seq(id, next_id, cache) values(0, 1, 100); -``` - -Then, the Sequence has to be define in the VSchema for that keyspace: - -``` json -{ - "sharded": false, - "tables": { - "user_seq": { - "type": "sequence" - }, - ... - } -} -``` - -And the table it is going to be using it can also reference the Sequence in its VSchema: - -``` json -{ - ... - "tables" : { - "user": { - "column_vindexes": [ - ... - ], - "auto_increment": { - "column": "user_id", - "sequence": "user_seq" - } - }, - -``` - -After this done (and the Schema has been reloaded on master tablet, and the -VSchema has been pushed), the sequence can be used. - -### Accessing a Sequence - -If a Sequence is used to fill in a column for a table, nothing further needs to -be done. Just sending no value for the column will make vtgate insert the next -Sequence value in its place. - -It is also possible to access the Sequence directly with the following SQL constructs: - -``` sql -/* Returns the next value for the sequence */ -select next value from my_sequence; - -/* Returns the next value for the sequence, and also reserve 4 values after that. */ -select next 5 values from my_sequence; - -``` - -## TO-DO List - -### DDL Support - -We want to add DDL support for sequences, as previously mentioned: - -``` sql -CREATE SEQUENCE my_sequence; - -ALTER SEQUENCE my_sequence ...; - -DROP SEQUENCE my_sequence; - -SHOW CREATE SEQUENCE my_sequence; -``` - -But for now, the Sequence backing table has to be created and managed using the -usual schema management features, with the right column definitions and table comment. diff --git a/doc/VitessTransportSecurityModel.md b/doc/VitessTransportSecurityModel.md deleted file mode 100644 index bc9d0ebe832..00000000000 --- a/doc/VitessTransportSecurityModel.md +++ /dev/null @@ -1,110 +0,0 @@ -# Vitess Transport Security Model - -Vitess exposes a few RPC services, and internally also uses RPCs. These RPCs -may use secure transport options. This document explains how to use these -features. - -## Overview - -The following diagram represents all the RPCs we use in a Vitess cluster: - -
-Vitess Transport Security Model Diagram -
- -There are two main categories: - -* Internal RPCs: they are used to connect Vitess components. -* Externally visible RPCs: they are use by the app to talk to Vitess. - -A few features in the Vitess ecosystem depend on authentication, like Caller ID -and table ACLs. We'll explore the Caller ID feature first. - -The encryption and authentication scheme used depends on the transport -used. With gRPC (the default for Vitess), TLS can be used to secure both -internal and external RPCs. We'll detail what the options are. - -## Caller ID - -Caller ID is a feature provided by the Vitess stack to identify the source of -queries. There are two different Caller IDs: - -* **Immediate Caller ID**: It represents the secure client identity when it - enters the Vitess side: - * It is a single string, represents the user connecting to Vitess (vtgate). - * It is authenticated by the transport layer used. - * It is used by the Vitess TableACL feature. -* **Effective Caller ID**: It provides detailed information on who the - individual caller process is: - * It contains more information about the caller: principal, component, - sub-component. - * It is provided by the application layer. - * It is not authenticated. - * It is exposed in query logs to be able to debug the source of a slow query, - for instance. - -## gRPC Transport - -### gRPC Encrypted Transport - -When using gRPC transport, Vitess can use the usual TLS security features -(familiarity with SSL / TLS is necessary here): - -* Any Vitess server can be configured to use TLS with the following - command line parameters: - * grpc\_cert, grpc\_key: server cert and key to use. - * grpc\_ca (optional): client cert chains to trust. If specified, the client - must use a certificate signed by one ca in the provided file. -* A Vitess go client can be configured with symmetrical parameters to enable TLS: - * ...\_grpc\_ca: list of server cert signers to trust. - * ...\_grpc\_server\_name: name of the server cert to trust, instead of the - hostname used to connect. - * ...\_grpc\_cert, ...\_grpc\_key: client side cert and key to use (when the - server requires client authentication) -* Other clients can take similar parameters, in various ways, see each client - for more information. - -With these options, it is possible to use TLS-secured connections for all parts -of the system. This enables the server side to authenticate the client, and / or -the client to authenticate the server. - -Note this is not enabled by default, as usually the different Vitess servers -will run on a private network (in a Cloud environment, usually all local traffic -is already secured over a VPN, for instance). - -### Certificates and Caller ID - -Additionally, if a client uses a certificate to connect to Vitess (vtgate), the -common name of that certificate is passed to vttablet as the Immediate Caller -ID. It can then be used by table ACLs, to grant read, write or admin access to -individual tables. This should be used if different clients should have -different access to Vitess tables. - -### Caller ID Override - -In a private network, where SSL security is not required, it might still be -desirable to use table ACLs as a safety mechanism to prevent a user from -accessing sensitive data. The gRPC connector provides the -grpc\_use\_effective\_callerid flag for this purpose: if specified when running -vtgate, the Effective Caller ID's principal is copied into the Immediate Caller -ID, and then used throughout the Vitess stack. - -**Important**: this is not secure. Any user code can provide any value for -the Effective Caller ID's principal, and therefore access any data. This is -intended as a safety feature to make sure some applications do not misbehave. -Therefore, this flag is not enabled by default. - -### Example - -For a concrete example, see -[test/encrypted\_transport.py](https://github.com/vitessio/vitess/blob/master/test/encrypted_transport.py) -in the source tree. It first sets up all the certificates, and some table ACLs, -then uses the python client to connect with SSL. It also exercises the -grpc\_use\_effective\_callerid flag, by connecting without SSL. - -## MySQL Transport - -To get vtgate to support SSL/TLS use `-mysql_server_ssl_cert` and `-mysql_server_ssl_key`. - -To require client certificates set `-mysql_server_ssl_ca`. If there is no CA specified then -TLS is optional. diff --git a/doc/VtExplain.md b/doc/VtExplain.md deleted file mode 100644 index 93d06a8f0c3..00000000000 --- a/doc/VtExplain.md +++ /dev/null @@ -1,151 +0,0 @@ -# VTExplain Tool - -The vtexplain tool provides information about how Vitess will execute a statement (the Vitess version of MySQL `EXPLAIN`). - -## Prerequisites - -You'll need to build the `vtexplain` binary in your environment. -To find instructions on how to build this binary please refer to this [guide](https://vitess.io/docs/tutorials/local/). - -## Explaining a Query - -In order to explain a query you will need to first collect a sql schema for the various tables and a vschema json file containing a map of keyspace to the set of vindexes / tables in the vschema. - -For example, let's use the following: - -Schema: - -```SQL -CREATE TABLE users( - user_id bigint, - name varchar(128), - primary key(user_id) -); - -CREATE TABLE users_name_idx( - user_id bigint, - name varchar(128), - primary key(name, user_id) -); -``` - -VSchema: - -```json -{ - "mainkeyspace": { - "sharded": true, - "vindexes": { - "hash": { - "type": "hash" - }, - "md5": { - "type": "unicode_loose_md5", - "params": {}, - "owner": "" - }, - "users_name_idx": { - "type": "lookup_hash", - "params": { - "from": "name", - "table": "users_name_idx", - "to": "user_id" - }, - "owner": "users" - } - }, - "tables": { - "users": { - "column_vindexes": [ - { - "column": "user_id", - "name": "hash" - }, - { - "column": "name", - "name": "users_name_idx" - } - ], - "auto_increment": null - }, - "users_name_idx": { - "type": "", - "column_vindexes": [ - { - "column": "name", - "name": "md5" - } - ], - "auto_increment": null - } - } - } -} -``` - -These can be passed to vtexplain either on the command line or (more readily) through files. - -Then you can use the tool like this: - -**Select:** - -```bash -vtexplain -shards 8 -vschema-file /tmp/vschema.json -schema-file /tmp/schema.sql -replication-mode "ROW" -output-mode text -sql "SELECT * from users" ----------------------------------------------------------------------- -SELECT * from users - -1 mainkeyspace/-20: select * from users limit 10001 -1 mainkeyspace/20-40: select * from users limit 10001 -1 mainkeyspace/40-60: select * from users limit 10001 -1 mainkeyspace/60-80: select * from users limit 10001 -1 mainkeyspace/80-a0: select * from users limit 10001 -1 mainkeyspace/a0-c0: select * from users limit 10001 -1 mainkeyspace/c0-e0: select * from users limit 10001 -1 mainkeyspace/e0-: select * from users limit 10001 - ----------------------------------------------------------------------- -``` - -The output shows the sequence of queries run. - -In this case, the query planner is a scatter query to all shards, and each line shows: - -(a) the logical sequence of the query -(b) the keyspace/shard -(c) the query that was executed - -The fact that each query runs at time `1` shows that vitess executes these in parallel, and the `limit 10001` is automatically added as a protection against large results. - -**Insert:** - -```bash -vtexplain -shards 128 -vschema-file /tmp/vschema.json -schema-file /tmp/schema.sql -replication-mode "ROW" -output-mode text -sql "INSERT INTO users (user_id, name) VALUES(1, 'john')" - ----------------------------------------------------------------------- -INSERT INTO users (user_id, name) VALUES(1, 'john') - -1 mainkeyspace/22-24: begin -1 mainkeyspace/22-24: insert into users_name_idx(name, user_id) values ('john', 1) /* vtgate:: keyspace_id:22c0c31d7a0b489a16332a5b32b028bc */ -2 mainkeyspace/16-18: begin -2 mainkeyspace/16-18: insert into users(user_id, name) values (1, 'john') /* vtgate:: keyspace_id:166b40b44aba4bd6 */ -3 mainkeyspace/22-24: commit -4 mainkeyspace/16-18: commit - ----------------------------------------------------------------------- -``` - -This example shows how Vitess handles an insert into a table with a secondary lookup vindex. - -First, at time `1`, a transaction is opened on one shard to insert the row into the `users_name_idx` table. Then at time `2` a second transaction is opened on another shard with the actual insert into the `users` table, and finally each transaction is committed at time `3` and `4`. - -**Configuration Options** - -The `--shards` option specifies the number of shards to simulate. vtexplain will always allocate an evenly divided key range to each. - -The `--replication-mode` option controls whether to simulate row based or statement based replication. - -You can find more usage of `vtexplain` by executing the following command: - -``` -vtexplain --help -``` diff --git a/doc/internal/PublishWebsite.md b/doc/internal/PublishWebsite.md deleted file mode 100644 index 5787416f932..00000000000 --- a/doc/internal/PublishWebsite.md +++ /dev/null @@ -1,166 +0,0 @@ -# Publishing vitess.io Web Site - -## Overview - -Our website [vitess.io](https://vitess.io) are static HTML pages which are -generated by [Jekyll](https://github.com/jekyll/jekyll) from Markdown files -located in the [`doc/`](https://github.com/vitessio/vitess/tree/master/doc) -directory. - -The generated files will be put in the -[`docs/`](https://github.com/vitessio/vitess/tree/master/docs) directory (note -the extra **s**). GitHub serves the website from this directory off the master -branch. - -## TL;DR - -### To preview site locally - -* Run: - - ``` - ./vitess.io/preview-site.sh - ``` - -* Site should be live at localhost:4000 - -### To publish the site - -* Make sure your doc and site changes have been committed to your branch and - your branch is clean. -* Run: - - ``` - ./vitess.io/publish-site.sh - ``` - -* Sanity check the diffs. - * `git diff HEAD~` -* Create a pull request for your branch and let somebody review it. -* Merge the pull request into the master branch. - -## Details - -### Directory Structure - -We have three main directories: - -* [`doc/`](https://github.com/vitessio/vitess/tree/master/doc) - original - content -* [`docs/`](https://github.com/vitessio/vitess/tree/master/docs) - generated - website actually served at https://vitess.io/ -* [`vitess.io/`](https://github.com/vitessio/vitess/tree/master/vitess.io) - - all relevant files for the website e.g. - * Jekyll configuration - * images e.g. our logo - * CSS - * [Navigation - menu](https://github.com/vitessio/vitess/blob/master/vitess.io/_includes/left-nav-menu.html) - * boiler plate markdown files which include the actual content from the - `doc/` directory - ([example](https://github.com/vitessio/vitess/blob/master/vitess.io/contributing/github-workflow.md)) - -The boiler plate markdown files have multiple purposes: - -* feed the actual content into a template which adds e.g. the navigation to - the file -* re-arrange paths on the website e.g. - [`doc/GitHubWorkFlow.md`](https://github.com/vitessio/vitess/blob/master/doc/GitHubWorkflow.md) - is actually served as https://vitess.io/docs/contributing/github-workflow/ - because there is the file - [`vitess.io/contributing/github-workflow.md`](https://github.com/vitessio/vitess/blob/master/vitess.io/contributing/github-workflow.md). - -### Changing Content - -To modify our website, you need to: - -* change the underlying Markdown files in the `doc/` directory -* re-generate the static pages (see [`publish-site.sh` - above](#to-publish-the-site)) -* merge your changes into the master branch e.g. as pull request - -### Linking pages - -Always use the `{% raw %}{% link ... %}{% endraw %}` template tag to link other pages. - -Note that you have to refer to the `.md` file of the page. Example: - -``` -[GitHub Workflow page]({% raw %}{% link contributing/github-workflow.md %}{% endraw %}) -``` - -### Adding new Pages - -If you want to add a new page, you must also: - -* add it to the left menu: - [`vitess.io/_includes/left-nav-menu.html`](https://github.com/vitessio/vitess/blob/master/vitess.io/_includes/left-nav-menu.html) -* create a boiler plate .md file. Example: - [`vitess.io/contributing/github-workflow.md`](https://github.com/vitessio/vitess/blob/master/vitess.io/contributing/github-workflow.md) - -When you add a new section to the menu, please create a new directory below -`vitess.io/`. For example, the "Contributing" section is served out of -`vitess.io/contributing/`. - -The main file in the section should have `index.md` as its boiler plate counter -part. Example: `doc/Contributing.md` is included by -`vitess.io/contributing/index.md` and therefore served as -https://vitess.io/contributing/. - -Make sure that you use `{% raw %}{% link ... %}{% endraw %}` to generate the URLs. -See existing entries for examples. - -### Orphaned doc/ Markdown Files - -There are several files in `doc/` which are currently not visible on -https://vitess.io. - -Examples: - -* https://github.com/vitessio/vitess/blob/master/doc/LifeOfAQuery.md -* https://github.com/vitessio/vitess/blob/master/doc/V3VindexDesign.md - -This is fine and accepted. Users can still view them on GitHub.com. - -Note that these files should include images using the full path e.g. in -`LifeOfAQuery.md`: - -``` -![](https://raw.githubusercontent.com/vitessio/vitess/master/doc/life_of_a_query.png) -``` - -Otherwise GitHub cannot find and show the images. - -## Jekyll Install Instructions - -This section describes how to install Jekyll to generate the website. - -### The Easy Way - -* Install Docker. -* Run the `./vitess.io/preview-site.sh` and `./vitess.io/publish-site.sh` commands as shown above. - -### The Hard Way - -* On Ubuntu, you need a non-root Ruby install so gem packages can be - installed correctly. - * [Install rbenv](https://github.com/sstephenson/rbenv#installation). - * Close and re-open your shell to complete the installation. - * [Install - ruby-build](https://github.com/sstephenson/ruby-build#installation) - plugin for rbenv. - * Use rbenv to build a recent version of Ruby. - * rbenv install 2.2.3 - * You may need to install extra dependencies. The build output should tell - you which packages. For example: ```sudo apt-get install -y libreadline-dev``` - * Set the new Ruby as the system-wide default. - * ```rbenv global 2.2.3``` - * ```rbenv rehash``` -* Install bundler. - * ```gem install bundler``` -* Install "nodejs" as JavaScript runtime. - * ```sudo apt-get install nodejs``` - * In Ubuntu, the binary is unfortunately renamed from "node" to "nodejs". - We work-around this by creating a "node" symlink on a directory which is - early in the PATH list: ```ln -s /usr/bin/nodejs $HOME/.rbenv/bin/node``` -* Add `--docker=false` to the commands above. diff --git a/doc/slides/Percona2015.pptx b/doc/slides/Percona2015.pptx deleted file mode 100644 index b38e77df5ee..00000000000 Binary files a/doc/slides/Percona2015.pptx and /dev/null differ diff --git a/doc/slides/Vitess2014.pdf b/doc/slides/Vitess2014.pdf deleted file mode 100644 index 2beb924c267..00000000000 Binary files a/doc/slides/Vitess2014.pdf and /dev/null differ diff --git a/doc/vtctlReference.md b/doc/vtctlReference.md deleted file mode 100644 index a679512f3dd..00000000000 --- a/doc/vtctlReference.md +++ /dev/null @@ -1,2573 +0,0 @@ -This reference guide explains the commands that the vtctl tool supports. **vtctl** is a command-line tool used to administer a Vitess cluster, and it allows a human or application to easily interact with a Vitess implementation. - -Commands are listed in the following groups: - -* [Cells](#cells) -* [Generic](#generic) -* [Keyspaces](#keyspaces) -* [Queries](#queries) -* [Replication Graph](#replication-graph) -* [Resharding Throttler](#resharding-throttler) -* [Schema, Version, Permissions](#schema-version-permissions) -* [Serving Graph](#serving-graph) -* [Shards](#shards) -* [Tablets](#tablets) -* [Topo](#topo) -* [Workflows](#workflows) - - -## Cells - -* [AddCellInfo](#addcellinfo) -* [DeleteCellInfo](#deletecellinfo) -* [GetCellInfo](#getcellinfo) -* [GetCellInfoNames](#getcellinfonames) -* [UpdateCellInfo](#updatecellinfo) - -### AddCellInfo - -Registers a local topology service in a new cell by creating the CellInfo with the provided parameters. The address will be used to connect to the topology service, and we'll put Vitess data starting at the provided root. - -#### Example - -
AddCellInfo [-server_address <addr>] [-root <root>] <cell>
- -#### Flags - -| Name | Type | Definition | -| :-------- | :--------- | :--------- | -| root | string | The root path the topology server is using for that cell. | -| server_address | string | The address the topology server is using for that cell. | - - -#### Arguments - -* <addr> – Required. -* <cell> – Required. A cell is a location for a service. Generally, a cell resides in only one cluster. In Vitess, the terms "cell" and "data center" are interchangeable. The argument value is a string that does not contain whitespace. - -#### Errors - -* the <cell> argument is required for the <AddCellInfo> command This error occurs if the command is not called with exactly one argument. - - -### DeleteCellInfo - -Deletes the CellInfo for the provided cell. The cell cannot be referenced by any Shard record. - -#### Example - -
DeleteCellInfo <cell>
- -#### Errors - -* the <cell> argument is required for the <DeleteCellInfo> command This error occurs if the command is not called with exactly one argument. - - -### GetCellInfo - -Prints a JSON representation of the CellInfo for a cell. - -#### Example - -
GetCellInfo <cell>
- -#### Errors - -* the <cell> argument is required for the <GetCellInfo> command This error occurs if the command is not called with exactly one argument. - - -### GetCellInfoNames - -Lists all the cells for which we have a CellInfo object, meaning we have a local topology service registered. - -#### Example - -
GetCellInfoNames 
- -#### Errors - -* <GetCellInfoNames> command takes no parameter This error occurs if the command is not called with exactly 0 arguments. - - -### UpdateCellInfo - -Updates the content of a CellInfo with the provided parameters. If a value is empty, it is not updated. The CellInfo will be created if it doesn't exist. - -#### Example - -
UpdateCellInfo [-server_address <addr>] [-root <root>] <cell>
- -#### Flags - -| Name | Type | Definition | -| :-------- | :--------- | :--------- | -| root | string | The root path the topology server is using for that cell. | -| server_address | string | The address the topology server is using for that cell. | - - -#### Arguments - -* <addr> – Required. -* <cell> – Required. A cell is a location for a service. Generally, a cell resides in only one cluster. In Vitess, the terms "cell" and "data center" are interchangeable. The argument value is a string that does not contain whitespace. - -#### Errors - -* the <cell> argument is required for the <UpdateCellInfo> command This error occurs if the command is not called with exactly one argument. - - -## Generic - -* [ListAllTablets](#listalltablets) -* [ListTablets](#listtablets) -* [Validate](#validate) - -### ListAllTablets - -Lists all tablets in an awk-friendly way. - -#### Example - -
ListAllTablets <cell name>
- -#### Arguments - -* <cell name> – Required. A cell is a location for a service. Generally, a cell resides in only one cluster. In Vitess, the terms "cell" and "data center" are interchangeable. The argument value is a string that does not contain whitespace. - -#### Errors - -* the <cell name> argument is required for the <ListAllTablets> command This error occurs if the command is not called with exactly one argument. - - -### ListTablets - -Lists specified tablets in an awk-friendly way. - -#### Example - -
ListTablets <tablet alias> ...
- -#### Arguments - -* <tablet alias> – Required. A Tablet Alias uniquely identifies a vttablet. The argument value is in the format <cell name>-<uid>. To specify multiple values for this argument, separate individual values with a space. - -#### Errors - -* the <tablet alias> argument is required for the <ListTablets> command This error occurs if the command is not called with at least one argument. - - -### Validate - -Validates that all nodes reachable from the global replication graph and that all tablets in all discoverable cells are consistent. - -#### Example - -
Validate [-ping-tablets]
- -#### Flags - -| Name | Type | Definition | -| :-------- | :--------- | :--------- | -| ping-tablets | Boolean | Indicates whether all tablets should be pinged during the validation process | - - - - -## Keyspaces - -* [CreateKeyspace](#createkeyspace) -* [DeleteKeyspace](#deletekeyspace) -* [FindAllShardsInKeyspace](#findallshardsinkeyspace) -* [GetKeyspace](#getkeyspace) -* [GetKeyspaces](#getkeyspaces) -* [MigrateServedFrom](#migrateservedfrom) -* [MigrateServedTypes](#migrateservedtypes) -* [CancelResharding](#cancelresharding) -* [ShowResharding](#showresharding) -* [RebuildKeyspaceGraph](#rebuildkeyspacegraph) -* [RemoveKeyspaceCell](#removekeyspacecell) -* [SetKeyspaceServedFrom](#setkeyspaceservedfrom) -* [SetKeyspaceShardingInfo](#setkeyspaceshardinginfo) -* [ValidateKeyspace](#validatekeyspace) -* [WaitForDrain](#waitfordrain) - -### CreateKeyspace - -Creates the specified keyspace. - -#### Example - -
CreateKeyspace [-sharding_column_name=name] [-sharding_column_type=type] [-served_from=tablettype1:ks1,tablettype2,ks2,...] [-force] <keyspace name>
- -#### Flags - -| Name | Type | Definition | -| :-------- | :--------- | :--------- | -| force | Boolean | Proceeds even if the keyspace already exists | -| served_from | string | Specifies a comma-separated list of dbtype:keyspace pairs used to serve traffic | -| sharding_column_name | string | Specifies the column to use for sharding operations | -| sharding_column_type | string | Specifies the type of the column to use for sharding operations | - - -#### Arguments - -* <keyspace name> – Required. The name of a sharded database that contains one or more tables. Vitess distributes keyspace shards into multiple machines and provides an SQL interface to query the data. The argument value must be a string that does not contain whitespace. - -#### Errors - -* the <keyspace name> argument is required for the <CreateKeyspace> command This error occurs if the command is not called with exactly one argument. - - -### DeleteKeyspace - -Deletes the specified keyspace. In recursive mode, it also recursively deletes all shards in the keyspace. Otherwise, there must be no shards left in the keyspace. - -#### Example - -
DeleteKeyspace [-recursive] <keyspace>
- -#### Flags - -| Name | Type | Definition | -| :-------- | :--------- | :--------- | -| recursive | Boolean | Also recursively delete all shards in the keyspace. | - - -#### Arguments - -* <keyspace> – Required. The name of a sharded database that contains one or more tables. Vitess distributes keyspace shards into multiple machines and provides an SQL interface to query the data. The argument value must be a string that does not contain whitespace. - -#### Errors - -* must specify the <keyspace> argument for <DeleteKeyspace> This error occurs if the command is not called with exactly one argument. - - -### FindAllShardsInKeyspace - -Displays all of the shards in the specified keyspace. - -#### Example - -
FindAllShardsInKeyspace <keyspace>
- -#### Arguments - -* <keyspace> – Required. The name of a sharded database that contains one or more tables. Vitess distributes keyspace shards into multiple machines and provides an SQL interface to query the data. The argument value must be a string that does not contain whitespace. - -#### Errors - -* the <keyspace> argument is required for the <FindAllShardsInKeyspace> command This error occurs if the command is not called with exactly one argument. - - -### GetKeyspace - -Outputs a JSON structure that contains information about the Keyspace. - -#### Example - -
GetKeyspace <keyspace>
- -#### Arguments - -* <keyspace> – Required. The name of a sharded database that contains one or more tables. Vitess distributes keyspace shards into multiple machines and provides an SQL interface to query the data. The argument value must be a string that does not contain whitespace. - -#### Errors - -* the <keyspace> argument is required for the <GetKeyspace> command This error occurs if the command is not called with exactly one argument. - - -### GetKeyspaces - -Outputs a sorted list of all keyspaces. - - -### MigrateServedFrom - -Makes the <destination keyspace/shard> serve the given type. This command also rebuilds the serving graph. - -#### Example - -
MigrateServedFrom [-cells=c1,c2,...] [-reverse] <destination keyspace/shard> <served tablet type>
- -#### Flags - -| Name | Type | Definition | -| :-------- | :--------- | :--------- | -| cells | string | Specifies a comma-separated list of cells to update | -| filtered_replication_wait_time | Duration | Specifies the maximum time to wait, in seconds, for filtered replication to catch up on master migrations | -| reverse | Boolean | Moves the served tablet type backward instead of forward. Use in case of trouble | - - -#### Arguments - -* <destination keyspace/shard> – Required. The name of a sharded database that contains one or more tables as well as the shard associated with the command. The keyspace must be identified by a string that does not contain whitepace, while the shard is typically identified by a string in the format <range start>-<range end>. -* <served tablet type> – Required. The vttablet's role. Valid values are: - - * backup – A slaved copy of data that is offline to queries other than for backup purposes - * batch – A slaved copy of data for OLAP load patterns (typically for MapReduce jobs) - * drained – A tablet that is reserved for a background process. For example, a tablet used by a vtworker process, where the tablet is likely lagging in replication. - * experimental – A slaved copy of data that is ready but not serving query traffic. The value indicates a special characteristic of the tablet that indicates the tablet should not be considered a potential master. Vitess also does not worry about lag for experimental tablets when reparenting. - * master – A primary copy of data - * rdonly – A slaved copy of data for OLAP load patterns - * replica – A slaved copy of data ready to be promoted to master - * restore – A tablet that is restoring from a snapshot. Typically, this happens at tablet startup, then it goes to its right state. - * schema_apply – A slaved copy of data that had been serving query traffic but that is now applying a schema change. Following the change, the tablet will revert to its serving type. - * snapshot_source – A slaved copy of data where mysqld is not running and where Vitess is serving data files to clone slaves. Use this command to enter this mode:
vtctl Snapshot -server-mode ...
Use this command to exit this mode:
vtctl SnapshotSourceEnd ...
- * spare – A slaved copy of data that is ready but not serving query traffic. The data could be a potential master tablet. - - - - -#### Errors - -* the <destination keyspace/shard> and <served tablet type> arguments are both required for the <MigrateServedFrom> command This error occurs if the command is not called with exactly 2 arguments. - - -### MigrateServedTypes - -Migrates a serving type from the source shard to the shards that it replicates to. This command also rebuilds the serving graph. The <keyspace/shard> argument can specify any of the shards involved in the migration. - -#### Example - -
MigrateServedTypes [-cells=c1,c2,...] [-reverse] [-skip-refresh-state] <keyspace/shard> <served tablet type>
- -#### Flags - -| Name | Type | Definition | -| :-------- | :--------- | :--------- | -| cells | string | Specifies a comma-separated list of cells to update | -| filtered\_replication\_wait\_time | Duration | Specifies the maximum time to wait, in seconds, for filtered replication to catch up on master migrations | -| reverse | Boolean | Moves the served tablet type backward instead of forward. Use in case of trouble | -| skip-refresh-state | Boolean | Skips refreshing the state of the source tablets after the migration, meaning that the refresh will need to be done manually, replica and rdonly only) | -| reverse\_replication | Boolean | For master migration, enabling this flag reverses replication which allows you to rollback | - - -#### Arguments - -* <keyspace/shard> – Required. The name of a sharded database that contains one or more tables as well as the shard associated with the command. The keyspace must be identified by a string that does not contain whitepace, while the shard is typically identified by a string in the format <range start>-<range end>. -* <served tablet type> – Required. The vttablet's role. Valid values are: - - * backup – A slaved copy of data that is offline to queries other than for backup purposes - * batch – A slaved copy of data for OLAP load patterns (typically for MapReduce jobs) - * drained – A tablet that is reserved for a background process. For example, a tablet used by a vtworker process, where the tablet is likely lagging in replication. - * experimental – A slaved copy of data that is ready but not serving query traffic. The value indicates a special characteristic of the tablet that indicates the tablet should not be considered a potential master. Vitess also does not worry about lag for experimental tablets when reparenting. - * master – A primary copy of data - * rdonly – A slaved copy of data for OLAP load patterns - * replica – A slaved copy of data ready to be promoted to master - * restore – A tablet that is restoring from a snapshot. Typically, this happens at tablet startup, then it goes to its right state. - * schema_apply – A slaved copy of data that had been serving query traffic but that is now applying a schema change. Following the change, the tablet will revert to its serving type. - * snapshot_source – A slaved copy of data where mysqld is not running and where Vitess is serving data files to clone slaves. Use this command to enter this mode:
vtctl Snapshot -server-mode ...
Use this command to exit this mode:
vtctl SnapshotSourceEnd ...
- * spare – A slaved copy of data that is ready but not serving query traffic. The data could be a potential master tablet. - - - - -#### Errors - -* the <source keyspace/shard> and <served tablet type> arguments are both required for the <MigrateServedTypes> command This error occurs if the command is not called with exactly 2 arguments. -* the <skip-refresh-state> flag can only be specified for non-master migrations - - -### CancelResharding - -Permanently cancels a resharding in progress. All resharding related metadata will be deleted. - -#### Arguments - -* <keyspace/shard> – Required. The name of a sharded database that contains one or more tables as well as the shard associated with the command. The keyspace must be identified by a string that does not contain whitepace, while the shard is typically identified by a string in the format <range start>-<range end>. - - -### ShowResharding - -"Displays all metadata about a resharding in progress. - -#### Arguments - -* <keyspace/shard> – Required. The name of a sharded database that contains one or more tables as well as the shard associated with the command. The keyspace must be identified by a string that does not contain whitepace, while the shard is typically identified by a string in the format <range start>-<range end>. - - -### RebuildKeyspaceGraph - -Rebuilds the serving data for the keyspace. This command may trigger an update to all connected clients. - -#### Example - -
RebuildKeyspaceGraph [-cells=c1,c2,...] <keyspace> ...
- -#### Flags - -| Name | Type | Definition | -| :-------- | :--------- | :--------- | -| cells | string | Specifies a comma-separated list of cells to update | - - -#### Arguments - -* <keyspace> – Required. The name of a sharded database that contains one or more tables. Vitess distributes keyspace shards into multiple machines and provides an SQL interface to query the data. The argument value must be a string that does not contain whitespace. To specify multiple values for this argument, separate individual values with a space. - -#### Errors - -* the <keyspace> argument must be used to specify at least one keyspace when calling the <RebuildKeyspaceGraph> command This error occurs if the command is not called with at least one argument. - - -### RemoveKeyspaceCell - -Removes the cell from the Cells list for all shards in the keyspace, and the SrvKeyspace for that keyspace in that cell. - -#### Example - -
RemoveKeyspaceCell [-force] [-recursive] <keyspace> <cell>
- -#### Flags - -| Name | Type | Definition | -| :-------- | :--------- | :--------- | -| force | Boolean | Proceeds even if the cell's topology server cannot be reached. The assumption is that you turned down the entire cell, and just need to update the global topo data. | -| recursive | Boolean | Also delete all tablets in that cell belonging to the specified keyspace. | - - -#### Arguments - -* <keyspace> – Required. The name of a sharded database that contains one or more tables. Vitess distributes keyspace shards into multiple machines and provides an SQL interface to query the data. The argument value must be a string that does not contain whitespace. -* <cell> – Required. A cell is a location for a service. Generally, a cell resides in only one cluster. In Vitess, the terms "cell" and "data center" are interchangeable. The argument value is a string that does not contain whitespace. - -#### Errors - -* the <keyspace> and <cell> arguments are required for the <RemoveKeyspaceCell> command This error occurs if the command is not called with exactly 2 arguments. - - -### SetKeyspaceServedFrom - -Changes the ServedFromMap manually. This command is intended for emergency fixes. This field is automatically set when you call the *MigrateServedFrom* command. This command does not rebuild the serving graph. - -#### Example - -
SetKeyspaceServedFrom [-source=<source keyspace name>] [-remove] [-cells=c1,c2,...] <keyspace name> <tablet type>
- -#### Flags - -| Name | Type | Definition | -| :-------- | :--------- | :--------- | -| cells | string | Specifies a comma-separated list of cells to affect | -| remove | Boolean | Indicates whether to add (default) or remove the served from record | -| source | string | Specifies the source keyspace name | - - -#### Arguments - -* <keyspace name> – Required. The name of a sharded database that contains one or more tables. Vitess distributes keyspace shards into multiple machines and provides an SQL interface to query the data. The argument value must be a string that does not contain whitespace. -* <tablet type> – Required. The vttablet's role. Valid values are: - - * backup – A slaved copy of data that is offline to queries other than for backup purposes - * batch – A slaved copy of data for OLAP load patterns (typically for MapReduce jobs) - * drained – A tablet that is reserved for a background process. For example, a tablet used by a vtworker process, where the tablet is likely lagging in replication. - * experimental – A slaved copy of data that is ready but not serving query traffic. The value indicates a special characteristic of the tablet that indicates the tablet should not be considered a potential master. Vitess also does not worry about lag for experimental tablets when reparenting. - * master – A primary copy of data - * rdonly – A slaved copy of data for OLAP load patterns - * replica – A slaved copy of data ready to be promoted to master - * restore – A tablet that is restoring from a snapshot. Typically, this happens at tablet startup, then it goes to its right state. - * schema_apply – A slaved copy of data that had been serving query traffic but that is now applying a schema change. Following the change, the tablet will revert to its serving type. - * snapshot_source – A slaved copy of data where mysqld is not running and where Vitess is serving data files to clone slaves. Use this command to enter this mode:
vtctl Snapshot -server-mode ...
Use this command to exit this mode:
vtctl SnapshotSourceEnd ...
- * spare – A slaved copy of data that is ready but not serving query traffic. The data could be a potential master tablet. - - - - -#### Errors - -* the <keyspace name> and <tablet type> arguments are required for the <SetKeyspaceServedFrom> command This error occurs if the command is not called with exactly 2 arguments. - - -### SetKeyspaceShardingInfo - -Updates the sharding information for a keyspace. - -#### Example - -
SetKeyspaceShardingInfo [-force] <keyspace name> [<column name>] [<column type>]
- -#### Flags - -| Name | Type | Definition | -| :-------- | :--------- | :--------- | -| force | Boolean | Updates fields even if they are already set. Use caution before calling this command. | - - -#### Arguments - -* <keyspace name> – Required. The name of a sharded database that contains one or more tables. Vitess distributes keyspace shards into multiple machines and provides an SQL interface to query the data. The argument value must be a string that does not contain whitespace. -* <column name> – Optional. -* <column type> – Optional. - -#### Errors - -* the <keyspace name> argument is required for the <SetKeyspaceShardingInfo> command. The <column name> and <column type> arguments are both optional This error occurs if the command is not called with between 1 and 3 arguments. -* both <column name> and <column type> must be set, or both must be unset - - -### ValidateKeyspace - -Validates that all nodes reachable from the specified keyspace are consistent. - -#### Example - -
ValidateKeyspace [-ping-tablets] <keyspace name>
- -#### Flags - -| Name | Type | Definition | -| :-------- | :--------- | :--------- | -| ping-tablets | Boolean | Specifies whether all tablets will be pinged during the validation process | - - -#### Arguments - -* <keyspace name> – Required. The name of a sharded database that contains one or more tables. Vitess distributes keyspace shards into multiple machines and provides an SQL interface to query the data. The argument value must be a string that does not contain whitespace. - -#### Errors - -* the <keyspace name> argument is required for the <ValidateKeyspace> command This error occurs if the command is not called with exactly one argument. - - -### WaitForDrain - -Blocks until no new queries were observed on all tablets with the given tablet type in the specified keyspace. This can be used as sanity check to ensure that the tablets were drained after running vtctl MigrateServedTypes and vtgate is no longer using them. If -timeout is set, it fails when the timeout is reached. - -#### Example - -
WaitForDrain [-timeout <duration>] [-retry_delay <duration>] [-initial_wait <duration>] <keyspace/shard> <served tablet type>
- -#### Flags - -| Name | Type | Definition | -| :-------- | :--------- | :--------- | -| cells | string | Specifies a comma-separated list of cells to look for tablets | -| initial_wait | Duration | Time to wait for all tablets to check in | -| retry_delay | Duration | Time to wait between two checks | -| timeout | Duration | Timeout after which the command fails | - - -#### Arguments - -* <keyspace/shard> – Required. The name of a sharded database that contains one or more tables as well as the shard associated with the command. The keyspace must be identified by a string that does not contain whitepace, while the shard is typically identified by a string in the format <range start>-<range end>. -* <served tablet type> – Required. The vttablet's role. Valid values are: - - * backup – A slaved copy of data that is offline to queries other than for backup purposes - * batch – A slaved copy of data for OLAP load patterns (typically for MapReduce jobs) - * drained – A tablet that is reserved for a background process. For example, a tablet used by a vtworker process, where the tablet is likely lagging in replication. - * experimental – A slaved copy of data that is ready but not serving query traffic. The value indicates a special characteristic of the tablet that indicates the tablet should not be considered a potential master. Vitess also does not worry about lag for experimental tablets when reparenting. - * master – A primary copy of data - * rdonly – A slaved copy of data for OLAP load patterns - * replica – A slaved copy of data ready to be promoted to master - * restore – A tablet that is restoring from a snapshot. Typically, this happens at tablet startup, then it goes to its right state. - * schema_apply – A slaved copy of data that had been serving query traffic but that is now applying a schema change. Following the change, the tablet will revert to its serving type. - * snapshot_source – A slaved copy of data where mysqld is not running and where Vitess is serving data files to clone slaves. Use this command to enter this mode:
vtctl Snapshot -server-mode ...
Use this command to exit this mode:
vtctl SnapshotSourceEnd ...
- * spare – A slaved copy of data that is ready but not serving query traffic. The data could be a potential master tablet. - - - - -#### Errors - -* the <keyspace/shard> and <tablet type> arguments are both required for the <WaitForDrain> command This error occurs if the command is not called with exactly 2 arguments. - - -## Queries - -* [VtGateExecute](#vtgateexecute) -* [VtGateExecuteKeyspaceIds](#vtgateexecutekeyspaceids) -* [VtGateExecuteShards](#vtgateexecuteshards) -* [VtGateSplitQuery](#vtgatesplitquery) -* [VtTabletBegin](#vttabletbegin) -* [VtTabletCommit](#vttabletcommit) -* [VtTabletExecute](#vttabletexecute) -* [VtTabletRollback](#vttabletrollback) -* [VtTabletStreamHealth](#vttabletstreamhealth) -* [VtTabletUpdateStream](#vttabletupdatestream) - -### VtGateExecute - -Executes the given SQL query with the provided bound variables against the vtgate server. - -#### Example - -
VtGateExecute -server <vtgate> [-bind_variables <JSON map>] [-keyspace <default keyspace>] [-tablet_type <tablet type>] [-options <proto text options>] [-json] <sql>
- -#### Flags - -| Name | Type | Definition | -| :-------- | :--------- | :--------- | -| json | Boolean | Output JSON instead of human-readable table | -| options | string | execute options values as a text encoded proto of the ExecuteOptions structure | -| server | string | VtGate server to connect to | -| target | string | keyspace:shard@tablet_type | - - -#### Arguments - -* <vtgate> – Required. -* <sql> – Required. - -#### Errors - -* the <sql> argument is required for the <VtGateExecute> command This error occurs if the command is not called with exactly one argument. -* query commands are disabled (set the -enable_queries flag to enable) -* error connecting to vtgate '%v': %v -* Execute failed: %v - - -### VtGateExecuteKeyspaceIds - -Executes the given SQL query with the provided bound variables against the vtgate server. It is routed to the shards that contain the provided keyspace ids. - -#### Example - -
VtGateExecuteKeyspaceIds -server <vtgate> -keyspace <keyspace> -keyspace_ids <ks1 in hex>,<k2 in hex>,... [-bind_variables <JSON map>] [-tablet_type <tablet type>] [-options <proto text options>] [-json] <sql>
- -#### Flags - -| Name | Type | Definition | -| :-------- | :--------- | :--------- | -| json | Boolean | Output JSON instead of human-readable table | -| keyspace | string | keyspace to send query to | -| keyspace_ids | string | comma-separated list of keyspace ids (in hex) that will map into shards to send query to | -| options | string | execute options values as a text encoded proto of the ExecuteOptions structure | -| server | string | VtGate server to connect to | -| tablet_type | string | tablet type to query | - - -#### Arguments - -* <vtgate> – Required. -* <keyspace> – Required. The name of a sharded database that contains one or more tables. Vitess distributes keyspace shards into multiple machines and provides an SQL interface to query the data. The argument value must be a string that does not contain whitespace. -* <ks1 in hex> – Required. To specify multiple values for this argument, separate individual values with a comma. -* <sql> – Required. - -#### Errors - -* the <sql> argument is required for the <VtGateExecuteKeyspaceIds> command This error occurs if the command is not called with exactly one argument. -* query commands are disabled (set the -enable_queries flag to enable) -* cannot hex-decode value %v '%v': %v -* error connecting to vtgate '%v': %v -* Execute failed: %v - - -### VtGateExecuteShards - -Executes the given SQL query with the provided bound variables against the vtgate server. It is routed to the provided shards. - -#### Example - -
VtGateExecuteShards -server <vtgate> -keyspace <keyspace> -shards <shard0>,<shard1>,... [-bind_variables <JSON map>] [-tablet_type <tablet type>] [-options <proto text options>] [-json] <sql>
- -#### Flags - -| Name | Type | Definition | -| :-------- | :--------- | :--------- | -| json | Boolean | Output JSON instead of human-readable table | -| keyspace | string | keyspace to send query to | -| options | string | execute options values as a text encoded proto of the ExecuteOptions structure | -| server | string | VtGate server to connect to | -| shards | string | comma-separated list of shards to send query to | -| tablet_type | string | tablet type to query | - - -#### Arguments - -* <vtgate> – Required. -* <keyspace> – Required. The name of a sharded database that contains one or more tables. Vitess distributes keyspace shards into multiple machines and provides an SQL interface to query the data. The argument value must be a string that does not contain whitespace. -* <shard> – Required. The name of a shard. The argument value is typically in the format <range start>-<range end>. To specify multiple values for this argument, separate individual values with a comma. -* <sql> – Required. - -#### Errors - -* the <sql> argument is required for the <VtGateExecuteShards> command This error occurs if the command is not called with exactly one argument. -* query commands are disabled (set the -enable_queries flag to enable) -* error connecting to vtgate '%v': %v -* Execute failed: %v - - -### VtGateSplitQuery - -Executes the SplitQuery computation for the given SQL query with the provided bound variables against the vtgate server (this is the base query for Map-Reduce workloads, and is provided here for debug / test purposes). - -#### Example - -
VtGateSplitQuery -server <vtgate> -keyspace <keyspace> [-split_column <split_column>] -split_count <split_count> [-bind_variables <JSON map>] <sql>
- -#### Flags - -| Name | Type | Definition | -| :-------- | :--------- | :--------- | -| algorithm | string | The algorithm to | -| keyspace | string | keyspace to send query to | -| server | string | VtGate server to connect to | -| split_count | Int64 | number of splits to generate. | - - -#### Arguments - -* <vtgate> – Required. -* <keyspace> – Required. The name of a sharded database that contains one or more tables. Vitess distributes keyspace shards into multiple machines and provides an SQL interface to query the data. The argument value must be a string that does not contain whitespace. -* <split_count> – Required. -* <sql> – Required. - -#### Errors - -* the <sql> argument is required for the <VtGateSplitQuery> command This error occurs if the command is not called with exactly one argument. -* query commands are disabled (set the -enable_queries flag to enable) -* Exactly one of <split_count> or num_rows_per_query_part -* Unknown split-query <algorithm>: %v -* error connecting to vtgate '%v': %v -* Execute failed: %v -* SplitQuery failed: %v - - -### VtTabletBegin - -Starts a transaction on the provided server. - -#### Example - -
VtTabletBegin [-username <TableACL user>] <tablet alias>
- -#### Flags - -| Name | Type | Definition | -| :-------- | :--------- | :--------- | -| username | string | If set, value is set as immediate caller id in the request and used by vttablet for TableACL check | - - -#### Arguments - -* <TableACL user> – Required. -* <tablet alias> – Required. A Tablet Alias uniquely identifies a vttablet. The argument value is in the format <cell name>-<uid>. - -#### Errors - -* the <tablet_alias> argument is required for the <VtTabletBegin> command This error occurs if the command is not called with exactly one argument. -* query commands are disabled (set the -enable_queries flag to enable) -* cannot connect to tablet %v: %v -* Begin failed: %v - - -### VtTabletCommit - -Commits the given transaction on the provided server. - -#### Example - -
VtTabletCommit [-username <TableACL user>] <transaction_id>
- -#### Flags - -| Name | Type | Definition | -| :-------- | :--------- | :--------- | -| username | string | If set, value is set as immediate caller id in the request and used by vttablet for TableACL check | - - -#### Arguments - -* <TableACL user> – Required. -* <transaction_id> – Required. - -#### Errors - -* the <tablet_alias> and <transaction_id> arguments are required for the <VtTabletCommit> command This error occurs if the command is not called with exactly 2 arguments. -* query commands are disabled (set the -enable_queries flag to enable) -* cannot connect to tablet %v: %v - - -### VtTabletExecute - -Executes the given query on the given tablet. -transaction_id is optional. Use VtTabletBegin to start a transaction. - -#### Example - -
VtTabletExecute [-username <TableACL user>] [-transaction_id <transaction_id>] [-options <proto text options>] [-json] <tablet alias> <sql>
- -#### Flags - -| Name | Type | Definition | -| :-------- | :--------- | :--------- | -| json | Boolean | Output JSON instead of human-readable table | -| options | string | execute options values as a text encoded proto of the ExecuteOptions structure | -| transaction_id | Int | transaction id to use, if inside a transaction. | -| username | string | If set, value is set as immediate caller id in the request and used by vttablet for TableACL check | - - -#### Arguments - -* <TableACL user> – Required. -* <tablet alias> – Required. A Tablet Alias uniquely identifies a vttablet. The argument value is in the format <cell name>-<uid>. -* <sql> – Required. - -#### Errors - -* the <tablet_alias> and <sql> arguments are required for the <VtTabletExecute> command This error occurs if the command is not called with exactly 2 arguments. -* query commands are disabled (set the -enable_queries flag to enable) -* cannot connect to tablet %v: %v -* Execute failed: %v - - -### VtTabletRollback - -Rollbacks the given transaction on the provided server. - -#### Example - -
VtTabletRollback [-username <TableACL user>] <tablet alias> <transaction_id>
- -#### Flags - -| Name | Type | Definition | -| :-------- | :--------- | :--------- | -| username | string | If set, value is set as immediate caller id in the request and used by vttablet for TableACL check | - - -#### Arguments - -* <TableACL user> – Required. -* <tablet alias> – Required. A Tablet Alias uniquely identifies a vttablet. The argument value is in the format <cell name>-<uid>. -* <transaction_id> – Required. - -#### Errors - -* the <tablet_alias> and <transaction_id> arguments are required for the <VtTabletRollback> command This error occurs if the command is not called with exactly 2 arguments. -* query commands are disabled (set the -enable_queries flag to enable) -* cannot connect to tablet %v: %v - - -### VtTabletStreamHealth - -Executes the StreamHealth streaming query to a vttablet process. Will stop after getting <count> answers. - -#### Example - -
VtTabletStreamHealth [-count <count, default 1>] <tablet alias>
- -#### Flags - -| Name | Type | Definition | -| :-------- | :--------- | :--------- | -| count | Int | number of responses to wait for | - - -#### Arguments - -* <count default 1> – Required. -* <tablet alias> – Required. A Tablet Alias uniquely identifies a vttablet. The argument value is in the format <cell name>-<uid>. - -#### Errors - -* the <tablet alias> argument is required for the <VtTabletStreamHealth> command This error occurs if the command is not called with exactly one argument. -* query commands are disabled (set the -enable_queries flag to enable) -* cannot connect to tablet %v: %v - - -### VtTabletUpdateStream - -Executes the UpdateStream streaming query to a vttablet process. Will stop after getting <count> answers. - -#### Example - -
VtTabletUpdateStream [-count <count, default 1>] [-position <position>] [-timestamp <timestamp>] <tablet alias>
- -#### Flags - -| Name | Type | Definition | -| :-------- | :--------- | :--------- | -| count | Int | number of responses to wait for | -| position | string | position to start the stream from | -| timestamp | Int | timestamp to start the stream from | - - -#### Arguments - -* <count default 1> – Required. -* <tablet alias> – Required. A Tablet Alias uniquely identifies a vttablet. The argument value is in the format <cell name>-<uid>. - -#### Errors - -* the <tablet alias> argument is required for the <VtTabletUpdateStream> command This error occurs if the command is not called with exactly one argument. -* query commands are disabled (set the -enable_queries flag to enable) -* cannot connect to tablet %v: %v - - -## Replication Graph - -* [GetShardReplication](#getshardreplication) - -### GetShardReplication - -Outputs a JSON structure that contains information about the ShardReplication. - -#### Example - -
GetShardReplication <cell> <keyspace/shard>
- -#### Arguments - -* <cell> – Required. A cell is a location for a service. Generally, a cell resides in only one cluster. In Vitess, the terms "cell" and "data center" are interchangeable. The argument value is a string that does not contain whitespace. -* <keyspace/shard> – Required. The name of a sharded database that contains one or more tables as well as the shard associated with the command. The keyspace must be identified by a string that does not contain whitepace, while the shard is typically identified by a string in the format <range start>-<range end>. - -#### Errors - -* the <cell> and <keyspace/shard> arguments are required for the <GetShardReplication> command This error occurs if the command is not called with exactly 2 arguments. - - -## Resharding Throttler - -* [GetThrottlerConfiguration](#getthrottlerconfiguration) -* [ResetThrottlerConfiguration](#resetthrottlerconfiguration) -* [ThrottlerMaxRates](#throttlermaxrates) -* [ThrottlerSetMaxRate](#throttlersetmaxrate) -* [UpdateThrottlerConfiguration](#updatethrottlerconfiguration) - -### GetThrottlerConfiguration - -Returns the current configuration of the MaxReplicationLag module. If no throttler name is specified, the configuration of all throttlers will be returned. - -#### Example - -
GetThrottlerConfiguration -server <vtworker or vttablet> [<throttler name>]
- -#### Flags - -| Name | Type | Definition | -| :-------- | :--------- | :--------- | -| server | string | vtworker or vttablet to connect to | - - -#### Arguments - -* <vtworker or vttablet> – Required. -* <throttler name> – Optional. - -#### Errors - -* the <GetThrottlerConfiguration> command accepts only <throttler name> as optional positional parameter This error occurs if the command is not called with more than 1 arguments. -* error creating a throttler client for <server> '%v': %v -* failed to get the throttler configuration from <server> '%v': %v - - -### ResetThrottlerConfiguration - -Resets the current configuration of the MaxReplicationLag module. If no throttler name is specified, the configuration of all throttlers will be reset. - -#### Example - -
ResetThrottlerConfiguration -server <vtworker or vttablet> [<throttler name>]
- -#### Flags - -| Name | Type | Definition | -| :-------- | :--------- | :--------- | -| server | string | vtworker or vttablet to connect to | - - -#### Arguments - -* <vtworker or vttablet> – Required. -* <throttler name> – Optional. - -#### Errors - -* the <ResetThrottlerConfiguration> command accepts only <throttler name> as optional positional parameter This error occurs if the command is not called with more than 1 arguments. -* error creating a throttler client for <server> '%v': %v -* failed to get the throttler configuration from <server> '%v': %v - - -### ThrottlerMaxRates - -Returns the current max rate of all active resharding throttlers on the server. - -#### Example - -
ThrottlerMaxRates -server <vtworker or vttablet>
- -#### Flags - -| Name | Type | Definition | -| :-------- | :--------- | :--------- | -| server | string | vtworker or vttablet to connect to | - - -#### Arguments - -* <vtworker or vttablet> – Required. - -#### Errors - -* the ThrottlerSetMaxRate command does not accept any positional parameters This error occurs if the command is not called with exactly 0 arguments. -* error creating a throttler client for <server> '%v': %v -* failed to get the throttler rate from <server> '%v': %v - - -### ThrottlerSetMaxRate - -Sets the max rate for all active resharding throttlers on the server. - -#### Example - -
ThrottlerSetMaxRate -server <vtworker or vttablet> <rate>
- -#### Flags - -| Name | Type | Definition | -| :-------- | :--------- | :--------- | -| server | string | vtworker or vttablet to connect to | - - -#### Arguments - -* <vtworker or vttablet> – Required. -* <rate> – Required. - -#### Errors - -* the <rate> argument is required for the <ThrottlerSetMaxRate> command This error occurs if the command is not called with exactly one argument. -* failed to parse rate '%v' as integer value: %v -* error creating a throttler client for <server> '%v': %v -* failed to set the throttler rate on <server> '%v': %v - - -### UpdateThrottlerConfiguration - -Updates the configuration of the MaxReplicationLag module. The configuration must be specified as protobuf text. If a field is omitted or has a zero value, it will be ignored unless -copy_zero_values is specified. If no throttler name is specified, all throttlers will be updated. - -#### Example - -
UpdateThrottlerConfiguration `-server <vtworker or vttablet> [-copy_zero_values] "<configuration protobuf text>" [<throttler name>]`
- -#### Flags - -| Name | Type | Definition | -| :-------- | :--------- | :--------- | -| copy_zero_values | Boolean | If true, fields with zero values will be copied as well | -| server | string | vtworker or vttablet to connect to | - - -#### Arguments - -* <vtworker or vttablet> – Required. -* <throttler name> – Optional. - -#### Errors - -* Failed to unmarshal the configuration protobuf text (%v) into a protobuf instance: %v -* error creating a throttler client for <server> '%v': %v -* failed to update the throttler configuration on <server> '%v': %v - - -## Schema, Version, Permissions - -* [ApplySchema](#applyschema) -* [ApplyVSchema](#applyvschema) -* [CopySchemaShard](#copyschemashard) -* [GetPermissions](#getpermissions) -* [GetSchema](#getschema) -* [GetVSchema](#getvschema) -* [RebuildVSchemaGraph](#rebuildvschemagraph) -* [ReloadSchema](#reloadschema) -* [ReloadSchemaKeyspace](#reloadschemakeyspace) -* [ReloadSchemaShard](#reloadschemashard) -* [ValidatePermissionsKeyspace](#validatepermissionskeyspace) -* [ValidatePermissionsShard](#validatepermissionsshard) -* [ValidateSchemaKeyspace](#validateschemakeyspace) -* [ValidateSchemaShard](#validateschemashard) -* [ValidateVersionKeyspace](#validateversionkeyspace) -* [ValidateVersionShard](#validateversionshard) - -### ApplySchema - -Applies the schema change to the specified keyspace on every master, running in parallel on all shards. The changes are then propagated to slaves via replication. If -allow_long_unavailability is set, schema changes affecting a large number of rows (and possibly incurring a longer period of unavailability) will not be rejected. - -#### Example - -
ApplySchema [-allow_long_unavailability] [-wait_slave_timeout=10s] {-sql=<sql> || -sql-file=<filename>} <keyspace>
- -#### Flags - -| Name | Type | Definition | -| :-------- | :--------- | :--------- | -| allow_long_unavailability | Boolean | Allow large schema changes which incur a longer unavailability of the database. | -| sql | string | A list of semicolon-delimited SQL commands | -| sql-file | string | Identifies the file that contains the SQL commands | -| wait_slave_timeout | Duration | The amount of time to wait for slaves to receive the schema change via replication. | - - -#### Arguments - -* <keyspace> – Required. The name of a sharded database that contains one or more tables. Vitess distributes keyspace shards into multiple machines and provides an SQL interface to query the data. The argument value must be a string that does not contain whitespace. - -#### Errors - -* the <keyspace> argument is required for the command<ApplySchema> command This error occurs if the command is not called with exactly one argument. - - -### ApplyVSchema - -Applies the VTGate routing schema to the provided keyspace. Shows the result after application. - -#### Example - -
ApplyVSchema {-vschema=<vschema> || -vschema_file=<vschema file>} [-cells=c1,c2,...] [-skip_rebuild] <keyspace>
- -#### Flags - -| Name | Type | Definition | -| :-------- | :--------- | :--------- | -| cells | string | If specified, limits the rebuild to the cells, after upload. Ignored if skipRebuild is set. | -| skip_rebuild | Boolean | If set, do no rebuild the SrvSchema objects. | -| dry-run | Boolean | Shows the proposed change without executing it | -| vschema | string | Identifies the VTGate routing schema | -| vschema_file | string | Identifies the VTGate routing schema file | -| sql | string | Identifies a VSchema DDL SQL statement | -| sql_file | string | Identifies a VSchema DDL SQL statement | - - -#### Arguments - -* <keyspace> – Required. The name of a sharded database that contains one or more tables. Vitess distributes keyspace shards into multiple machines and provides an SQL interface to query the data. The argument value must be a string that does not contain whitespace. - -#### Errors - -* the <keyspace> argument is required for the <ApplyVSchema> command This error occurs if the command is not called with exactly one argument. -* either the <vschema> or <vschema>File flag must be specified when calling the <ApplyVSchema> command - - -### CopySchemaShard - -Copies the schema from a source shard's master (or a specific tablet) to a destination shard. The schema is applied directly on the master of the destination shard, and it is propagated to the replicas through binlogs. - -#### Example - -
CopySchemaShard [-tables=<table1>,<table2>,...] [-exclude_tables=<table1>,<table2>,...] [-include-views] [-wait_slave_timeout=10s] {<source keyspace/shard> || <source tablet alias>} <destination keyspace/shard>
- -#### Flags - -| Name | Type | Definition | -| :-------- | :--------- | :--------- | -| exclude_tables | string | Specifies a comma-separated list of tables to exclude. Each is either an exact match, or a regular expression of the form /regexp/ | -| include-views | Boolean | Includes views in the output | -| tables | string | Specifies a comma-separated list of tables to copy. Each is either an exact match, or a regular expression of the form /regexp/ | -| wait_slave_timeout | Duration | The amount of time to wait for slaves to receive the schema change via replication. | - - -#### Arguments - -* <source tablet alias> – Required. A Tablet Alias uniquely identifies a vttablet. The argument value is in the format <cell name>-<uid>. -* <destination keyspace/shard> – Required. The name of a sharded database that contains one or more tables as well as the shard associated with the command. The keyspace must be identified by a string that does not contain whitepace, while the shard is typically identified by a string in the format <range start>-<range end>. - -#### Errors - -* the <source keyspace/shard> and <destination keyspace/shard> arguments are both required for the <CopySchemaShard> command. Instead of the <source keyspace/shard> argument, you can also specify <tablet alias> which refers to a specific tablet of the shard in the source keyspace This error occurs if the command is not called with exactly 2 arguments. - - -### GetPermissions - -Displays the permissions for a tablet. - -#### Example - -
GetPermissions <tablet alias>
- -#### Arguments - -* <tablet alias> – Required. A Tablet Alias uniquely identifies a vttablet. The argument value is in the format <cell name>-<uid>. - -#### Errors - -* the <tablet alias> argument is required for the <GetPermissions> command This error occurs if the command is not called with exactly one argument. - - -### GetSchema - -Displays the full schema for a tablet, or just the schema for the specified tables in that tablet. - -#### Example - -
GetSchema [-tables=<table1>,<table2>,...] [-exclude_tables=<table1>,<table2>,...] [-include-views] <tablet alias>
- -#### Flags - -| Name | Type | Definition | -| :-------- | :--------- | :--------- | -| exclude_tables | string | Specifies a comma-separated list of tables to exclude. Each is either an exact match, or a regular expression of the form /regexp/ | -| include-views | Boolean | Includes views in the output | -| table_names_only | Boolean | Only displays table names that match | -| tables | string | Specifies a comma-separated list of tables for which we should gather information. Each is either an exact match, or a regular expression of the form /regexp/ | - - -#### Arguments - -* <tablet alias> – Required. A Tablet Alias uniquely identifies a vttablet. The argument value is in the format <cell name>-<uid>. - -#### Errors - -* the <tablet alias> argument is required for the <GetSchema> command This error occurs if the command is not called with exactly one argument. - - -### GetVSchema - -Displays the VTGate routing schema. - -#### Example - -
GetVSchema <keyspace>
- -#### Arguments - -* <keyspace> – Required. The name of a sharded database that contains one or more tables. Vitess distributes keyspace shards into multiple machines and provides an SQL interface to query the data. The argument value must be a string that does not contain whitespace. - -#### Errors - -* the <keyspace> argument is required for the <GetVSchema> command This error occurs if the command is not called with exactly one argument. - - -### RebuildVSchemaGraph - -Rebuilds the cell-specific SrvVSchema from the global VSchema objects in the provided cells (or all cells if none provided). - -#### Example - -
RebuildVSchemaGraph [-cells=c1,c2,...]
- -#### Flags - -| Name | Type | Definition | -| :-------- | :--------- | :--------- | -| cells | string | Specifies a comma-separated list of cells to look for tablets | - - -#### Errors - -* <RebuildVSchemaGraph> doesn't take any arguments This error occurs if the command is not called with exactly 0 arguments. - - -### ReloadSchema - -Reloads the schema on a remote tablet. - -#### Example - -
ReloadSchema <tablet alias>
- -#### Arguments - -* <tablet alias> – Required. A Tablet Alias uniquely identifies a vttablet. The argument value is in the format <cell name>-<uid>. - -#### Errors - -* the <tablet alias> argument is required for the <ReloadSchema> command This error occurs if the command is not called with exactly one argument. - - -### ReloadSchemaKeyspace - -Reloads the schema on all the tablets in a keyspace. - -#### Example - -
ReloadSchemaKeyspace [-concurrency=10] [-include_master=false] <keyspace>
- -#### Flags - -| Name | Type | Definition | -| :-------- | :--------- | :--------- | -| concurrency | Int | How many tablets to reload in parallel | -| include_master | Boolean | Include the master tablet(s) | - - -#### Arguments - -* <keyspace> – Required. The name of a sharded database that contains one or more tables. Vitess distributes keyspace shards into multiple machines and provides an SQL interface to query the data. The argument value must be a string that does not contain whitespace. - -#### Errors - -* the <keyspace> argument is required for the <ReloadSchemaKeyspace> command This error occurs if the command is not called with exactly one argument. - - -### ReloadSchemaShard - -Reloads the schema on all the tablets in a shard. - -#### Example - -
ReloadSchemaShard [-concurrency=10] [-include_master=false] <keyspace/shard>
- -#### Flags - -| Name | Type | Definition | -| :-------- | :--------- | :--------- | -| concurrency | Int | How many tablets to reload in parallel | -| include_master | Boolean | Include the master tablet | - - -#### Arguments - -* <keyspace/shard> – Required. The name of a sharded database that contains one or more tables as well as the shard associated with the command. The keyspace must be identified by a string that does not contain whitepace, while the shard is typically identified by a string in the format <range start>-<range end>. - -#### Errors - -* the <keyspace/shard> argument is required for the <ReloadSchemaShard> command This error occurs if the command is not called with exactly one argument. - - -### ValidatePermissionsKeyspace - -Validates that the master permissions from shard 0 match those of all of the other tablets in the keyspace. - -#### Example - -
ValidatePermissionsKeyspace <keyspace name>
- -#### Arguments - -* <keyspace name> – Required. The name of a sharded database that contains one or more tables. Vitess distributes keyspace shards into multiple machines and provides an SQL interface to query the data. The argument value must be a string that does not contain whitespace. - -#### Errors - -* the <keyspace name> argument is required for the <ValidatePermissionsKeyspace> command This error occurs if the command is not called with exactly one argument. - - -### ValidatePermissionsShard - -Validates that the master permissions match all the slaves. - -#### Example - -
ValidatePermissionsShard <keyspace/shard>
- -#### Arguments - -* <keyspace/shard> – Required. The name of a sharded database that contains one or more tables as well as the shard associated with the command. The keyspace must be identified by a string that does not contain whitepace, while the shard is typically identified by a string in the format <range start>-<range end>. - -#### Errors - -* the <keyspace/shard> argument is required for the <ValidatePermissionsShard> command This error occurs if the command is not called with exactly one argument. - - -### ValidateSchemaKeyspace - -Validates that the master schema from shard 0 matches the schema on all of the other tablets in the keyspace. - -#### Example - -
ValidateSchemaKeyspace [-exclude_tables=''] [-include-views] <keyspace name>
- -#### Flags - -| Name | Type | Definition | -| :-------- | :--------- | :--------- | -| exclude_tables | string | Specifies a comma-separated list of tables to exclude. Each is either an exact match, or a regular expression of the form /regexp/ | -| include-views | Boolean | Includes views in the validation | - - -#### Arguments - -* <keyspace name> – Required. The name of a sharded database that contains one or more tables. Vitess distributes keyspace shards into multiple machines and provides an SQL interface to query the data. The argument value must be a string that does not contain whitespace. - -#### Errors - -* the <keyspace name> argument is required for the <ValidateSchemaKeyspace> command This error occurs if the command is not called with exactly one argument. - - -### ValidateSchemaShard - -Validates that the master schema matches all of the slaves. - -#### Example - -
ValidateSchemaShard [-exclude_tables=''] [-include-views] <keyspace/shard>
- -#### Flags - -| Name | Type | Definition | -| :-------- | :--------- | :--------- | -| exclude_tables | string | Specifies a comma-separated list of tables to exclude. Each is either an exact match, or a regular expression of the form /regexp/ | -| include-views | Boolean | Includes views in the validation | - - -#### Arguments - -* <keyspace/shard> – Required. The name of a sharded database that contains one or more tables as well as the shard associated with the command. The keyspace must be identified by a string that does not contain whitepace, while the shard is typically identified by a string in the format <range start>-<range end>. - -#### Errors - -* the <keyspace/shard> argument is required for the <ValidateSchemaShard> command This error occurs if the command is not called with exactly one argument. - - -### ValidateVersionKeyspace - -Validates that the master version from shard 0 matches all of the other tablets in the keyspace. - -#### Example - -
ValidateVersionKeyspace <keyspace name>
- -#### Arguments - -* <keyspace name> – Required. The name of a sharded database that contains one or more tables. Vitess distributes keyspace shards into multiple machines and provides an SQL interface to query the data. The argument value must be a string that does not contain whitespace. - -#### Errors - -* the <keyspace name> argument is required for the <ValidateVersionKeyspace> command This error occurs if the command is not called with exactly one argument. - - -### ValidateVersionShard - -Validates that the master version matches all of the slaves. - -#### Example - -
ValidateVersionShard <keyspace/shard>
- -#### Arguments - -* <keyspace/shard> – Required. The name of a sharded database that contains one or more tables as well as the shard associated with the command. The keyspace must be identified by a string that does not contain whitepace, while the shard is typically identified by a string in the format <range start>-<range end>. - -#### Errors - -* the <keyspace/shard> argument is required for the <ValidateVersionShard> command This error occurs if the command is not called with exactly one argument. - - -## Serving Graph - -* [GetSrvKeyspace](#getsrvkeyspace) -* [GetSrvKeyspaceNames](#getsrvkeyspacenames) -* [GetSrvVSchema](#getsrvvschema) - -### GetSrvKeyspace - -Outputs a JSON structure that contains information about the SrvKeyspace. - -#### Example - -
GetSrvKeyspace <cell> <keyspace>
- -#### Arguments - -* <cell> – Required. A cell is a location for a service. Generally, a cell resides in only one cluster. In Vitess, the terms "cell" and "data center" are interchangeable. The argument value is a string that does not contain whitespace. -* <keyspace> – Required. The name of a sharded database that contains one or more tables. Vitess distributes keyspace shards into multiple machines and provides an SQL interface to query the data. The argument value must be a string that does not contain whitespace. - -#### Errors - -* the <cell> and <keyspace> arguments are required for the <GetSrvKeyspace> command This error occurs if the command is not called with exactly 2 arguments. - - -### GetSrvKeyspaceNames - -Outputs a list of keyspace names. - -#### Example - -
GetSrvKeyspaceNames <cell>
- -#### Arguments - -* <cell> – Required. A cell is a location for a service. Generally, a cell resides in only one cluster. In Vitess, the terms "cell" and "data center" are interchangeable. The argument value is a string that does not contain whitespace. - -#### Errors - -* the <cell> argument is required for the <GetSrvKeyspaceNames> command This error occurs if the command is not called with exactly one argument. - - -### GetSrvVSchema - -Outputs a JSON structure that contains information about the SrvVSchema. - -#### Example - -
GetSrvVSchema <cell>
- -#### Arguments - -* <cell> – Required. A cell is a location for a service. Generally, a cell resides in only one cluster. In Vitess, the terms "cell" and "data center" are interchangeable. The argument value is a string that does not contain whitespace. - -#### Errors - -* the <cell> argument is required for the <GetSrvVSchema> command This error occurs if the command is not called with exactly one argument. - - -## Shards - -* [CreateShard](#createshard) -* [DeleteShard](#deleteshard) -* [EmergencyReparentShard](#emergencyreparentshard) -* [GetShard](#getshard) -* [InitShardMaster](#initshardmaster) -* [ListBackups](#listbackups) -* [ListShardTablets](#listshardtablets) -* [PlannedReparentShard](#plannedreparentshard) -* [RemoveBackup](#removebackup) -* [RemoveShardCell](#removeshardcell) -* [SetShardIsMasterServing](#setshardismasterserving) -* [SetShardTabletControl](#setshardtabletcontrol) -* [ShardReplicationFix](#shardreplicationfix) -* [ShardReplicationPositions](#shardreplicationpositions) -* [SourceShardAdd](#sourceshardadd) -* [SourceShardDelete](#sourcesharddelete) -* [TabletExternallyReparented](#tabletexternallyreparented) -* [ValidateShard](#validateshard) -* [WaitForFilteredReplication](#waitforfilteredreplication) - -### CreateShard - -Creates the specified shard. - -#### Example - -
CreateShard [-force] [-parent] <keyspace/shard>
- -#### Flags - -| Name | Type | Definition | -| :-------- | :--------- | :--------- | -| force | Boolean | Proceeds with the command even if the keyspace already exists | -| parent | Boolean | Creates the parent keyspace if it doesn't already exist | - - -#### Arguments - -* <keyspace/shard> – Required. The name of a sharded database that contains one or more tables as well as the shard associated with the command. The keyspace must be identified by a string that does not contain whitepace, while the shard is typically identified by a string in the format <range start>-<range end>. - -#### Errors - -* the <keyspace/shard> argument is required for the <CreateShard> command This error occurs if the command is not called with exactly one argument. - - -### DeleteShard - -Deletes the specified shard(s). In recursive mode, it also deletes all tablets belonging to the shard. Otherwise, there must be no tablets left in the shard. - -#### Example - -
DeleteShard [-recursive] [-even_if_serving] <keyspace/shard> ...
- -#### Flags - -| Name | Type | Definition | -| :-------- | :--------- | :--------- | -| even_if_serving | Boolean | Remove the shard even if it is serving. Use with caution. | -| recursive | Boolean | Also delete all tablets belonging to the shard. | - - -#### Arguments - -* <keyspace/shard> – Required. The name of a sharded database that contains one or more tables as well as the shard associated with the command. The keyspace must be identified by a string that does not contain whitepace, while the shard is typically identified by a string in the format <range start>-<range end>. To specify multiple values for this argument, separate individual values with a space. - -#### Errors - -* the <keyspace/shard> argument must be used to identify at least one keyspace and shard when calling the <DeleteShard> command This error occurs if the command is not called with at least one argument. - - -### EmergencyReparentShard - -Reparents the shard to the new master. Assumes the old master is dead and not responsding. - -#### Example - -
EmergencyReparentShard -keyspace_shard=<keyspace/shard> -new_master=<tablet alias>
- -#### Flags - -| Name | Type | Definition | -| :-------- | :--------- | :--------- | -| keyspace_shard | string | keyspace/shard of the shard that needs to be reparented | -| new_master | string | alias of a tablet that should be the new master | -| wait_slave_timeout | Duration | time to wait for slaves to catch up in reparenting | - - -#### Errors - -* action <EmergencyReparentShard> requires -keyspace_shard=<keyspace/shard> -new_master=<tablet alias> This error occurs if the command is not called with exactly 0 arguments. -* active reparent commands disabled (unset the -disable_active_reparents flag to enable) -* cannot use legacy syntax and flag -<new_master> for action <EmergencyReparentShard> at the same time - - -### GetShard - -Outputs a JSON structure that contains information about the Shard. - -#### Example - -
GetShard <keyspace/shard>
- -#### Arguments - -* <keyspace/shard> – Required. The name of a sharded database that contains one or more tables as well as the shard associated with the command. The keyspace must be identified by a string that does not contain whitepace, while the shard is typically identified by a string in the format <range start>-<range end>. - -#### Errors - -* the <keyspace/shard> argument is required for the <GetShard> command This error occurs if the command is not called with exactly one argument. - - -### InitShardMaster - -Sets the initial master for a shard. Will make all other tablets in the shard slaves of the provided master. WARNING: this could cause data loss on an already replicating shard. PlannedReparentShard or EmergencyReparentShard should be used instead. - -#### Example - -
InitShardMaster [-force] [-wait_slave_timeout=<duration>] <keyspace/shard> <tablet alias>
- -#### Flags - -| Name | Type | Definition | -| :-------- | :--------- | :--------- | -| force | Boolean | will force the reparent even if the provided tablet is not a master or the shard master | -| wait_slave_timeout | Duration | time to wait for slaves to catch up in reparenting | - - -#### Arguments - -* <keyspace/shard> – Required. The name of a sharded database that contains one or more tables as well as the shard associated with the command. The keyspace must be identified by a string that does not contain whitepace, while the shard is typically identified by a string in the format <range start>-<range end>. -* <tablet alias> – Required. A Tablet Alias uniquely identifies a vttablet. The argument value is in the format <cell name>-<uid>. - -#### Errors - -* action <InitShardMaster> requires <keyspace/shard> <tablet alias> This error occurs if the command is not called with exactly 2 arguments. -* active reparent commands disabled (unset the -disable_active_reparents flag to enable) - - -### ListBackups - -Lists all the backups for a shard. - -#### Example - -
ListBackups <keyspace/shard>
- -#### Errors - -* action <ListBackups> requires <keyspace/shard> This error occurs if the command is not called with exactly one argument. - - -### ListShardTablets - -Lists all tablets in the specified shard. - -#### Example - -
ListShardTablets <keyspace/shard>
- -#### Arguments - -* <keyspace/shard> – Required. The name of a sharded database that contains one or more tables as well as the shard associated with the command. The keyspace must be identified by a string that does not contain whitepace, while the shard is typically identified by a string in the format <range start>-<range end>. - -#### Errors - -* the <keyspace/shard> argument is required for the <ListShardTablets> command This error occurs if the command is not called with exactly one argument. - - -### PlannedReparentShard - -Reparents the shard to the new master, or away from old master. Both old and new master need to be up and running. - -#### Example - -
PlannedReparentShard -keyspace_shard=<keyspace/shard> [-new_master=<tablet alias>] [-avoid_master=<tablet alias>]
- -#### Flags - -| Name | Type | Definition | -| :-------- | :--------- | :--------- | -| avoid_master | string | alias of a tablet that should not be the master, i.e. reparent to any other tablet if this one is the master | -| keyspace_shard | string | keyspace/shard of the shard that needs to be reparented | -| new_master | string | alias of a tablet that should be the new master | -| wait_slave_timeout | Duration | time to wait for slaves to catch up in reparenting | - - -#### Errors - -* action <PlannedReparentShard> requires -keyspace_shard=<keyspace/shard> [-new_master=<tablet alias>] [-avoid_master=<tablet alias>] This error occurs if the command is not called with exactly 0 arguments. -* active reparent commands disabled (unset the -disable_active_reparents flag to enable) -* cannot use legacy syntax and flags -<keyspace_shard> and -<new_master> for action <PlannedReparentShard> at the same time - - -### RemoveBackup - -Removes a backup for the BackupStorage. - -#### Example - -
RemoveBackup <keyspace/shard> <backup name>
- -#### Arguments - -* <backup name> – Required. - -#### Errors - -* action <RemoveBackup> requires <keyspace/shard> <backup name> This error occurs if the command is not called with exactly 2 arguments. - - -### RemoveShardCell - -Removes the cell from the shard's Cells list. - -#### Example - -
RemoveShardCell [-force] [-recursive] <keyspace/shard> <cell>
- -#### Flags - -| Name | Type | Definition | -| :-------- | :--------- | :--------- | -| force | Boolean | Proceeds even if the cell's topology server cannot be reached. The assumption is that you turned down the entire cell, and just need to update the global topo data. | -| recursive | Boolean | Also delete all tablets in that cell belonging to the specified shard. | - - -#### Arguments - -* <keyspace/shard> – Required. The name of a sharded database that contains one or more tables as well as the shard associated with the command. The keyspace must be identified by a string that does not contain whitepace, while the shard is typically identified by a string in the format <range start>-<range end>. -* <cell> – Required. A cell is a location for a service. Generally, a cell resides in only one cluster. In Vitess, the terms "cell" and "data center" are interchangeable. The argument value is a string that does not contain whitespace. - -#### Errors - -* the <keyspace/shard> and <cell> arguments are required for the <RemoveShardCell> command This error occurs if the command is not called with exactly 2 arguments. - - -### SetShardIsMasterServing - -Add or remove a shard from serving. This is meant as an emergency function. It does not rebuild any serving graph i.e. does not run 'RebuildKeyspaceGraph'. - -#### Example - -
SetShardServedTypes [--cells=c1,c2,...] [--remove] <keyspace/shard> <served tablet type>
- -#### Flags - -| Name | Type | Definition | -| :-------- | :--------- | :--------- | -| cells | string | Specifies a comma-separated list of cells to update | -| remove | Boolean | Removes the served tablet type | - - -#### Arguments - -* <keyspace/shard> – Required. The name of a sharded database that contains one or more tables as well as the shard associated with the command. The keyspace must be identified by a string that does not contain whitepace, while the shard is typically identified by a string in the format <range start>-<range end>. -* <served tablet type> – Required. The vttablet's role. Valid values are: - - * backup – A slaved copy of data that is offline to queries other than for backup purposes - * batch – A slaved copy of data for OLAP load patterns (typically for MapReduce jobs) - * drained – A tablet that is reserved for a background process. For example, a tablet used by a vtworker process, where the tablet is likely lagging in replication. - * experimental – A slaved copy of data that is ready but not serving query traffic. The value indicates a special characteristic of the tablet that indicates the tablet should not be considered a potential master. Vitess also does not worry about lag for experimental tablets when reparenting. - * master – A primary copy of data - * rdonly – A slaved copy of data for OLAP load patterns - * replica – A slaved copy of data ready to be promoted to master - * restore – A tablet that is restoring from a snapshot. Typically, this happens at tablet startup, then it goes to its right state. - * schema_apply – A slaved copy of data that had been serving query traffic but that is now applying a schema change. Following the change, the tablet will revert to its serving type. - * snapshot_source – A slaved copy of data where mysqld is not running and where Vitess is serving data files to clone slaves. Use this command to enter this mode:
vtctl Snapshot -server-mode ...
Use this command to exit this mode:
vtctl SnapshotSourceEnd ...
- * spare – A slaved copy of data that is ready but not serving query traffic. The data could be a potential master tablet. - - - - -#### Errors - -* the <keyspace/shard> and <served tablet type> arguments are both required for the <SetShardServedTypes> command This error occurs if the command is not called with exactly 2 arguments. - - -### SetShardTabletControl - -Sets the TabletControl record for a shard and type. Only use this for an emergency fix or after a finished vertical split. The *MigrateServedFrom* and *MigrateServedType* commands set this field appropriately already. Always specify the blacklisted_tables flag for vertical splits, but never for horizontal splits.

To set the DisableQueryServiceFlag, keep 'blacklisted_tables' empty, and set 'disable_query_service' to true or false. Useful to fix horizontal splits gone wrong.

To change the blacklisted tables list, specify the 'blacklisted_tables' parameter with the new list. Useful to fix tables that are being blocked after a vertical split.

To just remove the ShardTabletControl entirely, use the 'remove' flag, useful after a vertical split is finished to remove serving restrictions. - -#### Example - -
SetShardTabletControl [--cells=c1,c2,...] [--blacklisted_tables=t1,t2,...] [--remove] [--disable_query_service] <keyspace/shard> <tablet type>
- -#### Flags - -| Name | Type | Definition | -| :-------- | :--------- | :--------- | -| blacklisted_tables | string | Specifies a comma-separated list of tables to blacklist (used for vertical split). Each is either an exact match, or a regular expression of the form '/regexp/'. | -| cells | string | Specifies a comma-separated list of cells to update | -| disable_query_service | Boolean | Disables query service on the provided nodes. This flag requires 'blacklisted_tables' and 'remove' to be unset, otherwise it's ignored. | -| remove | Boolean | Removes cells for vertical splits. | - - -#### Arguments - -* <keyspace/shard> – Required. The name of a sharded database that contains one or more tables as well as the shard associated with the command. The keyspace must be identified by a string that does not contain whitepace, while the shard is typically identified by a string in the format <range start>-<range end>. -* <tablet type> – Required. The vttablet's role. Valid values are: - - * backup – A slaved copy of data that is offline to queries other than for backup purposes - * batch – A slaved copy of data for OLAP load patterns (typically for MapReduce jobs) - * drained – A tablet that is reserved for a background process. For example, a tablet used by a vtworker process, where the tablet is likely lagging in replication. - * experimental – A slaved copy of data that is ready but not serving query traffic. The value indicates a special characteristic of the tablet that indicates the tablet should not be considered a potential master. Vitess also does not worry about lag for experimental tablets when reparenting. - * master – A primary copy of data - * rdonly – A slaved copy of data for OLAP load patterns - * replica – A slaved copy of data ready to be promoted to master - * restore – A tablet that is restoring from a snapshot. Typically, this happens at tablet startup, then it goes to its right state. - * schema_apply – A slaved copy of data that had been serving query traffic but that is now applying a schema change. Following the change, the tablet will revert to its serving type. - * snapshot_source – A slaved copy of data where mysqld is not running and where Vitess is serving data files to clone slaves. Use this command to enter this mode:
vtctl Snapshot -server-mode ...
Use this command to exit this mode:
vtctl SnapshotSourceEnd ...
- * spare – A slaved copy of data that is ready but not serving query traffic. The data could be a potential master tablet. - - - - -#### Errors - -* the <keyspace/shard> and <tablet type> arguments are both required for the <SetShardTabletControl> command This error occurs if the command is not called with exactly 2 arguments. - - -### ShardReplicationFix - -Walks through a ShardReplication object and fixes the first error that it encounters. - -#### Example - -
ShardReplicationFix <cell> <keyspace/shard>
- -#### Arguments - -* <cell> – Required. A cell is a location for a service. Generally, a cell resides in only one cluster. In Vitess, the terms "cell" and "data center" are interchangeable. The argument value is a string that does not contain whitespace. -* <keyspace/shard> – Required. The name of a sharded database that contains one or more tables as well as the shard associated with the command. The keyspace must be identified by a string that does not contain whitepace, while the shard is typically identified by a string in the format <range start>-<range end>. - -#### Errors - -* the <cell> and <keyspace/shard> arguments are required for the ShardReplicationRemove command This error occurs if the command is not called with exactly 2 arguments. - - -### ShardReplicationPositions - -Shows the replication status of each slave machine in the shard graph. In this case, the status refers to the replication lag between the master vttablet and the slave vttablet. In Vitess, data is always written to the master vttablet first and then replicated to all slave vttablets. Output is sorted by tablet type, then replication position. Use ctrl-C to interrupt command and see partial result if needed. - -#### Example - -
ShardReplicationPositions <keyspace/shard>
- -#### Arguments - -* <keyspace/shard> – Required. The name of a sharded database that contains one or more tables as well as the shard associated with the command. The keyspace must be identified by a string that does not contain whitepace, while the shard is typically identified by a string in the format <range start>-<range end>. - -#### Errors - -* the <keyspace/shard> argument is required for the <ShardReplicationPositions> command This error occurs if the command is not called with exactly one argument. - - -### SourceShardAdd - -Adds the SourceShard record with the provided index. This is meant as an emergency function. It does not call RefreshState for the shard master. - -#### Example - -
SourceShardAdd [--key_range=<keyrange>] [--tables=<table1,table2,...>] <keyspace/shard> <uid> <source keyspace/shard>
- -#### Flags - -| Name | Type | Definition | -| :-------- | :--------- | :--------- | -| key_range | string | Identifies the key range to use for the SourceShard | -| tables | string | Specifies a comma-separated list of tables to replicate (used for vertical split). Each is either an exact match, or a regular expression of the form /regexp/ | - - -#### Arguments - -* <keyspace/shard> – Required. The name of a sharded database that contains one or more tables as well as the shard associated with the command. The keyspace must be identified by a string that does not contain whitepace, while the shard is typically identified by a string in the format <range start>-<range end>. -* <uid> – Required. -* <source keyspace/shard> – Required. The name of a sharded database that contains one or more tables as well as the shard associated with the command. The keyspace must be identified by a string that does not contain whitepace, while the shard is typically identified by a string in the format <range start>-<range end>. - -#### Errors - -* the <keyspace/shard>, <uid>, and <source keyspace/shard> arguments are all required for the <SourceShardAdd> command This error occurs if the command is not called with exactly 3 arguments. - - -### SourceShardDelete - -Deletes the SourceShard record with the provided index. This is meant as an emergency cleanup function. It does not call RefreshState for the shard master. - -#### Example - -
SourceShardDelete <keyspace/shard> <uid>
- -#### Arguments - -* <keyspace/shard> – Required. The name of a sharded database that contains one or more tables as well as the shard associated with the command. The keyspace must be identified by a string that does not contain whitepace, while the shard is typically identified by a string in the format <range start>-<range end>. -* <uid> – Required. - -#### Errors - -* the <keyspace/shard> and <uid> arguments are both required for the <SourceShardDelete> command This error occurs if the command is not called with at least 2 arguments. - - -### TabletExternallyReparented - -Changes metadata in the topology server to acknowledge a shard master change performed by an external tool. See the Reparenting guide for more information:https://github.com/vitessio/vitess/blob/master/doc/Reparenting.md#external-reparents. - -#### Example - -
TabletExternallyReparented <tablet alias>
- -#### Arguments - -* <tablet alias> – Required. A Tablet Alias uniquely identifies a vttablet. The argument value is in the format <cell name>-<uid>. - -#### Errors - -* the <tablet alias> argument is required for the <TabletExternallyReparented> command This error occurs if the command is not called with exactly one argument. - - -### ValidateShard - -Validates that all nodes that are reachable from this shard are consistent. - -#### Example - -
ValidateShard [-ping-tablets] <keyspace/shard>
- -#### Flags - -| Name | Type | Definition | -| :-------- | :--------- | :--------- | -| ping-tablets | Boolean | Indicates whether all tablets should be pinged during the validation process | - - -#### Arguments - -* <keyspace/shard> – Required. The name of a sharded database that contains one or more tables as well as the shard associated with the command. The keyspace must be identified by a string that does not contain whitepace, while the shard is typically identified by a string in the format <range start>-<range end>. - -#### Errors - -* the <keyspace/shard> argument is required for the <ValidateShard> command This error occurs if the command is not called with exactly one argument. - - -### WaitForFilteredReplication - -Blocks until the specified shard has caught up with the filtered replication of its source shard. - -#### Example - -
WaitForFilteredReplication [-max_delay <max_delay, default 30s>] <keyspace/shard>
- -#### Arguments - -* <keyspace/shard> – Required. The name of a sharded database that contains one or more tables as well as the shard associated with the command. The keyspace must be identified by a string that does not contain whitepace, while the shard is typically identified by a string in the format <range start>-<range end>. - -#### Errors - -* the <keyspace/shard> argument is required for the <WaitForFilteredReplication> command This error occurs if the command is not called with exactly one argument. - - -## Tablets - -* [Backup](#backup) -* [ChangeSlaveType](#changeslavetype) -* [DeleteTablet](#deletetablet) -* [ExecuteFetchAsDba](#executefetchasdba) -* [ExecuteHook](#executehook) -* [GetTablet](#gettablet) -* [IgnoreHealthError](#ignorehealtherror) -* [InitTablet](#inittablet) -* [Ping](#ping) -* [RefreshState](#refreshstate) -* [RefreshStateByShard](#refreshstatebyshard) -* [ReparentTablet](#reparenttablet) -* [RestoreFromBackup](#restorefrombackup) -* [RunHealthCheck](#runhealthcheck) -* [SetReadOnly](#setreadonly) -* [SetReadWrite](#setreadwrite) -* [Sleep](#sleep) -* [StartSlave](#startslave) -* [StopSlave](#stopslave) -* [UpdateTabletAddrs](#updatetabletaddrs) - -### Backup - -Stops mysqld and uses the BackupStorage service to store a new backup. This function also remembers if the tablet was replicating so that it can restore the same state after the backup completes. - -#### Example - -
Backup [-concurrency=4] <tablet alias>
- -#### Flags - -| Name | Type | Definition | -| :-------- | :--------- | :--------- | -| concurrency | Int | Specifies the number of compression/checksum jobs to run simultaneously | - - -#### Arguments - -* <tablet alias> – Required. A Tablet Alias uniquely identifies a vttablet. The argument value is in the format <cell name>-<uid>. - -#### Errors - -* the <Backup> command requires the <tablet alias> argument This error occurs if the command is not called with exactly one argument. - - -### ChangeSlaveType - -Changes the db type for the specified tablet, if possible. This command is used primarily to arrange replicas, and it will not convert a master.

NOTE: This command automatically updates the serving graph.

- -#### Example - -
ChangeSlaveType [-dry-run] <tablet alias> <tablet type>
- -#### Flags - -| Name | Type | Definition | -| :-------- | :--------- | :--------- | -| dry-run | Boolean | Lists the proposed change without actually executing it | - - -#### Arguments - -* <tablet alias> – Required. A Tablet Alias uniquely identifies a vttablet. The argument value is in the format <cell name>-<uid>. -* <tablet type> – Required. The vttablet's role. Valid values are: - - * backup – A slaved copy of data that is offline to queries other than for backup purposes - * batch – A slaved copy of data for OLAP load patterns (typically for MapReduce jobs) - * drained – A tablet that is reserved for a background process. For example, a tablet used by a vtworker process, where the tablet is likely lagging in replication. - * experimental – A slaved copy of data that is ready but not serving query traffic. The value indicates a special characteristic of the tablet that indicates the tablet should not be considered a potential master. Vitess also does not worry about lag for experimental tablets when reparenting. - * master – A primary copy of data - * rdonly – A slaved copy of data for OLAP load patterns - * replica – A slaved copy of data ready to be promoted to master - * restore – A tablet that is restoring from a snapshot. Typically, this happens at tablet startup, then it goes to its right state. - * schema_apply – A slaved copy of data that had been serving query traffic but that is now applying a schema change. Following the change, the tablet will revert to its serving type. - * snapshot_source – A slaved copy of data where mysqld is not running and where Vitess is serving data files to clone slaves. Use this command to enter this mode:
vtctl Snapshot -server-mode ...
Use this command to exit this mode:
vtctl SnapshotSourceEnd ...
- * spare – A slaved copy of data that is ready but not serving query traffic. The data could be a potential master tablet. - - - - -#### Errors - -* the <tablet alias> and <db type> arguments are required for the <ChangeSlaveType> command This error occurs if the command is not called with exactly 2 arguments. -* failed reading tablet %v: %v -* invalid type transition %v: %v -> %v - - -### DeleteTablet - -Deletes tablet(s) from the topology. - -#### Example - -
DeleteTablet [-allow_master] <tablet alias> ...
- -#### Flags - -| Name | Type | Definition | -| :-------- | :--------- | :--------- | -| allow_master | Boolean | Allows for the master tablet of a shard to be deleted. Use with caution. | - - -#### Arguments - -* <tablet alias> – Required. A Tablet Alias uniquely identifies a vttablet. The argument value is in the format <cell name>-<uid>. To specify multiple values for this argument, separate individual values with a space. - -#### Errors - -* the <tablet alias> argument must be used to specify at least one tablet when calling the <DeleteTablet> command This error occurs if the command is not called with at least one argument. - - -### ExecuteFetchAsDba - -Runs the given SQL command as a DBA on the remote tablet. - -#### Example - -
ExecuteFetchAsDba [-max_rows=10000] [-disable_binlogs] [-json] <tablet alias> <sql command>
- -#### Flags - -| Name | Type | Definition | -| :-------- | :--------- | :--------- | -| disable_binlogs | Boolean | Disables writing to binlogs during the query | -| json | Boolean | Output JSON instead of human-readable table | -| max_rows | Int | Specifies the maximum number of rows to allow in reset | -| reload_schema | Boolean | Indicates whether the tablet schema will be reloaded after executing the SQL command. The default value is false, which indicates that the tablet schema will not be reloaded. | - - -#### Arguments - -* <tablet alias> – Required. A Tablet Alias uniquely identifies a vttablet. The argument value is in the format <cell name>-<uid>. -* <sql command> – Required. - -#### Errors - -* the <tablet alias> and <sql command> arguments are required for the <ExecuteFetchAsDba> command This error occurs if the command is not called with exactly 2 arguments. - - -### ExecuteHook - -Runs the specified hook on the given tablet. A hook is a script that resides in the $VTROOT/vthook directory. You can put any script into that directory and use this command to run that script.

For this command, the param=value arguments are parameters that the command passes to the specified hook. - -#### Example - -
ExecuteHook <tablet alias> <hook name> [<param1=value1> <param2=value2> ...]
- -#### Arguments - -* <tablet alias> – Required. A Tablet Alias uniquely identifies a vttablet. The argument value is in the format <cell name>-<uid>. -* <hook name> – Required. -* <param1=value1> <param2=value2> . – Optional. - -#### Errors - -* the <tablet alias> and <hook name> arguments are required for the <ExecuteHook> command This error occurs if the command is not called with at least 2 arguments. - - -### GetTablet - -Outputs a JSON structure that contains information about the Tablet. - -#### Example - -
GetTablet <tablet alias>
- -#### Arguments - -* <tablet alias> – Required. A Tablet Alias uniquely identifies a vttablet. The argument value is in the format <cell name>-<uid>. - -#### Errors - -* the <tablet alias> argument is required for the <GetTablet> command This error occurs if the command is not called with exactly one argument. - - -### IgnoreHealthError - -Sets the regexp for health check errors to ignore on the specified tablet. The pattern has implicit ^$ anchors. Set to empty string or restart vttablet to stop ignoring anything. - -#### Example - -
IgnoreHealthError <tablet alias> <ignore regexp>
- -#### Arguments - -* <tablet alias> – Required. A Tablet Alias uniquely identifies a vttablet. The argument value is in the format <cell name>-<uid>. -* <ignore regexp> – Required. - -#### Errors - -* the <tablet alias> and <ignore regexp> arguments are required for the <IgnoreHealthError> command This error occurs if the command is not called with exactly 2 arguments. - - -### InitTablet - -Initializes a tablet in the topology.

- -#### Example - -
InitTablet [-allow_update] [-allow_different_shard] [-allow_master_override] [-parent] [-db_name_override=<db name>] [-hostname=<hostname>] [-mysql_port=<port>] [-port=<port>] [-grpc_port=<port>] -keyspace=<keyspace> -shard=<shard> <tablet alias> <tablet type>
- -#### Flags - -| Name | Type | Definition | -| :-------- | :--------- | :--------- | -| allow_master_override | Boolean | Use this flag to force initialization if a tablet is created as master, and a master for the keyspace/shard already exists. Use with caution. | -| allow_update | Boolean | Use this flag to force initialization if a tablet with the same name already exists. Use with caution. | -| db_name_override | string | Overrides the name of the database that the vttablet uses | -| grpc_port | Int | The gRPC port for the vttablet process | -| hostname | string | The server on which the tablet is running | -| keyspace | string | The keyspace to which this tablet belongs | -| mysql_host | string | The mysql host for the mysql server | -| mysql_port | Int | The mysql port for the mysql server | -| parent | Boolean | Creates the parent shard and keyspace if they don't yet exist | -| port | Int | The main port for the vttablet process | -| shard | string | The shard to which this tablet belongs | -| tags | string | A comma-separated list of key:value pairs that are used to tag the tablet | - - -#### Arguments - -* <tablet alias> – Required. A Tablet Alias uniquely identifies a vttablet. The argument value is in the format <cell name>-<uid>. -* <tablet type> – Required. The vttablet's role. Valid values are: - - * backup – A slaved copy of data that is offline to queries other than for backup purposes - * batch – A slaved copy of data for OLAP load patterns (typically for MapReduce jobs) - * drained – A tablet that is reserved for a background process. For example, a tablet used by a vtworker process, where the tablet is likely lagging in replication. - * experimental – A slaved copy of data that is ready but not serving query traffic. The value indicates a special characteristic of the tablet that indicates the tablet should not be considered a potential master. Vitess also does not worry about lag for experimental tablets when reparenting. - * master – A primary copy of data - * rdonly – A slaved copy of data for OLAP load patterns - * replica – A slaved copy of data ready to be promoted to master - * restore – A tablet that is restoring from a snapshot. Typically, this happens at tablet startup, then it goes to its right state. - * schema_apply – A slaved copy of data that had been serving query traffic but that is now applying a schema change. Following the change, the tablet will revert to its serving type. - * snapshot_source – A slaved copy of data where mysqld is not running and where Vitess is serving data files to clone slaves. Use this command to enter this mode:
vtctl Snapshot -server-mode ...
Use this command to exit this mode:
vtctl SnapshotSourceEnd ...
- * spare – A slaved copy of data that is ready but not serving query traffic. The data could be a potential master tablet. - - - - -#### Errors - -* the <tablet alias> and <tablet type> arguments are both required for the <InitTablet> command This error occurs if the command is not called with exactly 2 arguments. - - -### Ping - -Checks that the specified tablet is awake and responding to RPCs. This command can be blocked by other in-flight operations. - -#### Example - -
Ping <tablet alias>
- -#### Arguments - -* <tablet alias> – Required. A Tablet Alias uniquely identifies a vttablet. The argument value is in the format <cell name>-<uid>. - -#### Errors - -* the <tablet alias> argument is required for the <Ping> command This error occurs if the command is not called with exactly one argument. - - -### RefreshState - -Reloads the tablet record on the specified tablet. - -#### Example - -
RefreshState <tablet alias>
- -#### Arguments - -* <tablet alias> – Required. A Tablet Alias uniquely identifies a vttablet. The argument value is in the format <cell name>-<uid>. - -#### Errors - -* the <tablet alias> argument is required for the <RefreshState> command This error occurs if the command is not called with exactly one argument. - - -### RefreshStateByShard - -Runs 'RefreshState' on all tablets in the given shard. - -#### Example - -
RefreshStateByShard [-cells=c1,c2,...] <keyspace/shard>
- -#### Flags - -| Name | Type | Definition | -| :-------- | :--------- | :--------- | -| cells | string | Specifies a comma-separated list of cells whose tablets are included. If empty, all cells are considered. | - - -#### Arguments - -* <keyspace/shard> – Required. The name of a sharded database that contains one or more tables as well as the shard associated with the command. The keyspace must be identified by a string that does not contain whitepace, while the shard is typically identified by a string in the format <range start>-<range end>. - -#### Errors - -* the <keyspace/shard> argument is required for the <RefreshStateByShard> command This error occurs if the command is not called with exactly one argument. - - -### ReparentTablet - -Reparent a tablet to the current master in the shard. This only works if the current slave position matches the last known reparent action. - -#### Example - -
ReparentTablet <tablet alias>
- -#### Errors - -* action <ReparentTablet> requires <tablet alias> This error occurs if the command is not called with exactly one argument. -* active reparent commands disabled (unset the -disable_active_reparents flag to enable) - - -### RestoreFromBackup - -Stops mysqld and restores the data from the latest backup. - -#### Example - -
RestoreFromBackup <tablet alias>
- -#### Errors - -* the <RestoreFromBackup> command requires the <tablet alias> argument This error occurs if the command is not called with exactly one argument. - - -### RunHealthCheck - -Runs a health check on a remote tablet. - -#### Example - -
RunHealthCheck <tablet alias>
- -#### Arguments - -* <tablet alias> – Required. A Tablet Alias uniquely identifies a vttablet. The argument value is in the format <cell name>-<uid>. - -#### Errors - -* the <tablet alias> argument is required for the <RunHealthCheck> command This error occurs if the command is not called with exactly one argument. - - -### SetReadOnly - -Sets the tablet as read-only. - -#### Example - -
SetReadOnly <tablet alias>
- -#### Arguments - -* <tablet alias> – Required. A Tablet Alias uniquely identifies a vttablet. The argument value is in the format <cell name>-<uid>. - -#### Errors - -* the <tablet alias> argument is required for the <SetReadOnly> command This error occurs if the command is not called with exactly one argument. -* failed reading tablet %v: %v - - -### SetReadWrite - -Sets the tablet as read-write. - -#### Example - -
SetReadWrite <tablet alias>
- -#### Arguments - -* <tablet alias> – Required. A Tablet Alias uniquely identifies a vttablet. The argument value is in the format <cell name>-<uid>. - -#### Errors - -* the <tablet alias> argument is required for the <SetReadWrite> command This error occurs if the command is not called with exactly one argument. -* failed reading tablet %v: %v - - -### Sleep - -Blocks the action queue on the specified tablet for the specified amount of time. This is typically used for testing. - -#### Example - -
Sleep <tablet alias> <duration>
- -#### Arguments - -* <tablet alias> – Required. A Tablet Alias uniquely identifies a vttablet. The argument value is in the format <cell name>-<uid>. -* <duration> – Required. The amount of time that the action queue should be blocked. The value is a string that contains a possibly signed sequence of decimal numbers, each with optional fraction and a unit suffix, such as "300ms" or "1h45m". See the definition of the Go language's ParseDuration function for more details. Note that, in practice, the value should be a positively signed value. - -#### Errors - -* the <tablet alias> and <duration> arguments are required for the <Sleep> command This error occurs if the command is not called with exactly 2 arguments. - - -### StartSlave - -Starts replication on the specified slave. - -#### Example - -
StartSlave <tablet alias>
- -#### Arguments - -* <tablet alias> – Required. A Tablet Alias uniquely identifies a vttablet. The argument value is in the format <cell name>-<uid>. - -#### Errors - -* action <StartSlave> requires <tablet alias> This error occurs if the command is not called with exactly one argument. -* failed reading tablet %v: %v - - -### StopSlave - -Stops replication on the specified slave. - -#### Example - -
StopSlave <tablet alias>
- -#### Arguments - -* <tablet alias> – Required. A Tablet Alias uniquely identifies a vttablet. The argument value is in the format <cell name>-<uid>. - -#### Errors - -* action <StopSlave> requires <tablet alias> This error occurs if the command is not called with exactly one argument. -* failed reading tablet %v: %v - - -### UpdateTabletAddrs - -Updates the IP address and port numbers of a tablet. - -#### Example - -
UpdateTabletAddrs [-hostname <hostname>] [-ip-addr <ip addr>] [-mysql-port <mysql port>] [-vt-port <vt port>] [-grpc-port <grpc port>] <tablet alias>
- -#### Flags - -| Name | Type | Definition | -| :-------- | :--------- | :--------- | -| grpc-port | Int | The gRPC port for the vttablet process | -| hostname | string | The fully qualified host name of the server on which the tablet is running. | -| mysql-port | Int | The mysql port for the mysql daemon | -| mysql_host | string | The mysql host for the mysql server | -| vt-port | Int | The main port for the vttablet process | - - -#### Arguments - -* <tablet alias> – Required. A Tablet Alias uniquely identifies a vttablet. The argument value is in the format <cell name>-<uid>. - -#### Errors - -* the <tablet alias> argument is required for the <UpdateTabletAddrs> command This error occurs if the command is not called with exactly one argument. - - -## Topo - -* [TopoCat](#topocat) - -### TopoCat - -Retrieves the file(s) at <path> from the topo service, and displays it. It can resolve wildcards, and decode the proto-encoded data. - -#### Example - -
TopoCat [-cell <cell>] [-decode_proto] [-long] <path> [<path>...]
- -#### Flags - -| Name | Type | Definition | -| :-------- | :--------- | :--------- | -| cell | string | topology cell to cat the file from. Defaults to global cell. | -| decode_proto | Boolean | decode proto files and display them as text | -| long | Boolean | long listing. | - - -#### Arguments - -* <cell> – Required. A cell is a location for a service. Generally, a cell resides in only one cluster. In Vitess, the terms "cell" and "data center" are interchangeable. The argument value is a string that does not contain whitespace. -* <path> – Required. -* <path>. – Optional. - -#### Errors - -* <TopoCat>: no path specified This error occurs if the command is not called with at least one argument. -* <TopoCat>: invalid wildcards: %v -* <TopoCat>: some paths had errors - - -## Workflows - -* [WorkflowAction](#workflowaction) -* [WorkflowCreate](#workflowcreate) -* [WorkflowDelete](#workflowdelete) -* [WorkflowStart](#workflowstart) -* [WorkflowStop](#workflowstop) -* [WorkflowTree](#workflowtree) -* [WorkflowWait](#workflowwait) - -### WorkflowAction - -Sends the provided action name on the specified path. - -#### Example - -
WorkflowAction <path> <name>
- -#### Arguments - -* <name> – Required. - -#### Errors - -* the <path> and <name> arguments are required for the <WorkflowAction> command This error occurs if the command is not called with exactly 2 arguments. -* no workflow.Manager registered - - -### WorkflowCreate - -Creates the workflow with the provided parameters. The workflow is also started, unless -skip_start is specified. - -#### Example - -
WorkflowCreate [-skip_start] <factoryName> [parameters...]
- -#### Flags - -| Name | Type | Definition | -| :-------- | :--------- | :--------- | -| skip_start | Boolean | If set, the workflow will not be started. | - - -#### Arguments - -* <factoryName> – Required. - -#### Errors - -* the <factoryName> argument is required for the <WorkflowCreate> command This error occurs if the command is not called with at least one argument. -* no workflow.Manager registered - - -### WorkflowDelete - -Deletes the finished or not started workflow. - -#### Example - -
WorkflowDelete <uuid>
- -#### Errors - -* the <uuid> argument is required for the <WorkflowDelete> command This error occurs if the command is not called with exactly one argument. -* no workflow.Manager registered - - -### WorkflowStart - -Starts the workflow. - -#### Example - -
WorkflowStart <uuid>
- -#### Errors - -* the <uuid> argument is required for the <WorkflowStart> command This error occurs if the command is not called with exactly one argument. -* no workflow.Manager registered - - -### WorkflowStop - -Stops the workflow. - -#### Example - -
WorkflowStop <uuid>
- -#### Errors - -* the <uuid> argument is required for the <WorkflowStop> command This error occurs if the command is not called with exactly one argument. -* no workflow.Manager registered - - -### WorkflowTree - -Displays a JSON representation of the workflow tree. - -#### Example - -
WorkflowTree 
- -#### Errors - -* the <WorkflowTree> command takes no parameter This error occurs if the command is not called with exactly 0 arguments. -* no workflow.Manager registered - - -### WorkflowWait - -Waits for the workflow to finish. - -#### Example - -
WorkflowWait <uuid>
- -#### Errors - -* the <uuid> argument is required for the <WorkflowWait> command This error occurs if the command is not called with exactly one argument. -* no workflow.Manager registered - - diff --git a/docker/bootstrap/Dockerfile.common b/docker/bootstrap/Dockerfile.common index 2dc72b8de40..a0300de7958 100644 --- a/docker/bootstrap/Dockerfile.common +++ b/docker/bootstrap/Dockerfile.common @@ -1,4 +1,4 @@ -FROM golang:1.11-stretch +FROM golang:1.12-stretch # Install Vitess build dependencies RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ diff --git a/docker/bootstrap/Dockerfile.mariadb103 b/docker/bootstrap/Dockerfile.mariadb103 index d2f7759fb48..c2828ddd25d 100644 --- a/docker/bootstrap/Dockerfile.mariadb103 +++ b/docker/bootstrap/Dockerfile.mariadb103 @@ -4,8 +4,8 @@ FROM vitess/bootstrap:common RUN apt-key adv --no-tty --recv-keys --keyserver keyserver.ubuntu.com 0xF1656F24C74CD1D8 \ && add-apt-repository 'deb [arch=amd64] http://ftp.osuosl.org/pub/mariadb/repo/10.3/debian stretch main' \ && apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ - mariadb-server \ - libmariadbclient-dev \ + mariadb-server-10.3 \ + libmariadb-dev \ && rm -rf /var/lib/apt/lists/* # Bootstrap Vitess diff --git a/docker/k8s/Dockerfile b/docker/k8s/Dockerfile index 704f2a865be..4a037dfcbb0 100644 --- a/docker/k8s/Dockerfile +++ b/docker/k8s/Dockerfile @@ -41,25 +41,8 @@ COPY --from=base $VTTOP/web /vt/web/ # copy vitess config COPY --from=base $VTTOP/config/init_db.sql /vt/config/ -# mysql flavor files for db specific .cnf settings -COPY --from=base $VTTOP/config/mycnf/master_mysql56.cnf /vt/config/mycnf/ -COPY --from=base $VTTOP/config/mycnf/master_mysql80.cnf /vt/config/mycnf/ -COPY --from=base $VTTOP/config/mycnf/master_mariadb.cnf /vt/config/mycnf/ -COPY --from=base $VTTOP/config/mycnf/master_mariadb103.cnf /vt/config/mycnf/ - -# settings for different types of instances -COPY --from=base $VTTOP/config/mycnf/default.cnf /vt/config/mycnf/ -COPY --from=base $VTTOP/config/mycnf/default-fast.cnf /vt/config/mycnf/ -COPY --from=base $VTTOP/config/mycnf/master.cnf /vt/config/mycnf/ -COPY --from=base $VTTOP/config/mycnf/replica.cnf /vt/config/mycnf/ -COPY --from=base $VTTOP/config/mycnf/rdonly.cnf /vt/config/mycnf/ -COPY --from=base $VTTOP/config/mycnf/backup.cnf /vt/config/mycnf/ - -# settings to support rbr -COPY --from=base $VTTOP/config/mycnf/rbr.cnf /vt/config/mycnf/ - -# recommended production settings -COPY --from=base $VTTOP/config/mycnf/production.cnf /vt/config/mycnf/ +# my.cnf include files +COPY --from=base $VTTOP/config/mycnf /vt/config/mycnf # add vitess user and add permissions RUN groupadd -r --gid 2000 vitess && useradd -r -g vitess --uid 1000 vitess && \ diff --git a/docker/lite/Dockerfile b/docker/lite/Dockerfile index 78318e5c652..aa1ff5b831c 100644 --- a/docker/lite/Dockerfile +++ b/docker/lite/Dockerfile @@ -22,7 +22,8 @@ RUN chown -R vitess:vitess /vt FROM debian:stretch-slim # Install dependencies -RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends gnupg dirmngr ca-certificates \ +RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ + gnupg dirmngr ca-certificates wget libdbd-mysql-perl rsync libaio1 libatomic1 libcurl3 libev4 \ && for i in $(seq 1 10); do apt-key adv --no-tty --recv-keys --keyserver keyserver.ubuntu.com 8C718D3B5072E1F5 && break; done \ && echo 'deb http://repo.mysql.com/apt/debian/ stretch mysql-5.7' > /etc/apt/sources.list.d/mysql.list \ && apt-get update \ @@ -32,8 +33,11 @@ RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-ins libmysqlclient20 \ mysql-client \ mysql-server \ - && rm -rf /var/lib/apt/lists/* \ - && groupadd -r vitess && useradd -r -g vitess vitess + && wget https://www.percona.com/downloads/XtraBackup/Percona-XtraBackup-2.4.13/binary/debian/stretch/x86_64/percona-xtrabackup-24_2.4.13-1.stretch_amd64.deb \ + && dpkg -i percona-xtrabackup-24_2.4.13-1.stretch_amd64.deb \ + && rm -f percona-xtrabackup-24_2.4.13-1.stretch_amd64.deb \ + && rm -rf /var/lib/apt/lists/* \ + && groupadd -r vitess && useradd -r -g vitess vitess # Set up Vitess environment (just enough to run pre-built Go binaries) ENV VTTOP /vt/src/vitess.io/vitess diff --git a/docker/test/run.sh b/docker/test/run.sh index 15b305d2206..dc59fad26f4 100755 --- a/docker/test/run.sh +++ b/docker/test/run.sh @@ -211,9 +211,9 @@ fi # Clean up host dir mounted VTDATAROOT if [[ -n "$hostdir" ]]; then # Use Docker user to clean up first, to avoid permission errors. - #docker run --name=rm_$testid -v $hostdir:/vt/vtdataroot $image bash -c 'rm -rf /vt/vtdataroot/*' + docker run --name=rm_$testid -v $hostdir:/vt/vtdataroot $image bash -c 'rm -rf /vt/vtdataroot/*' docker rm -f rm_$testid &>/dev/null - #rm -rf $hostdir + rm -rf $hostdir fi # If requested, create the cache image. diff --git a/examples/local/env.sh b/examples/local/env.sh index 9084ff29aa8..2c84798747f 100644 --- a/examples/local/env.sh +++ b/examples/local/env.sh @@ -29,15 +29,8 @@ if [ -z "$VT_MYSQL_ROOT" ]; then export VT_MYSQL_ROOT=$(dirname `dirname $mysql_path`) fi -# restore MYSQL_FLAVOR, saved by bootstrap.sh -if [ -r "$VTROOT/dist/MYSQL_FLAVOR" ]; then - MYSQL_FLAVOR=$(cat "$VTROOT/dist/MYSQL_FLAVOR") - export MYSQL_FLAVOR -fi - -if [ -z "$MYSQL_FLAVOR" ]; then - export MYSQL_FLAVOR=MySQL56 -fi +# Previously the file specified MYSQL_FLAVOR +# it is now autodetected if [ "${TOPO}" = "zk2" ]; then # Each ZooKeeper server needs a list of all servers in the quorum. diff --git a/examples/local/vttablet-up.sh b/examples/local/vttablet-up.sh index 0acd5e51b9f..75c3b191d04 100755 --- a/examples/local/vttablet-up.sh +++ b/examples/local/vttablet-up.sh @@ -38,23 +38,10 @@ source $script_root/env.sh init_db_sql_file="$VTROOT/config/init_db.sql" -export EXTRA_MY_CNF=$VTROOT/config/mycnf/default-fast.cnf:$VTROOT/config/mycnf/rbr.cnf +# Previously this file set EXTRA_MY_CNF based on MYSQL_FLAVOR +# It now relies on mysqlctl to autodetect -case "$MYSQL_FLAVOR" in - "MySQL56") - export EXTRA_MY_CNF=$EXTRA_MY_CNF:$VTROOT/config/mycnf/master_mysql56.cnf - ;; - "MariaDB") - export EXTRA_MY_CNF=$EXTRA_MY_CNF:$VTROOT/config/mycnf/master_mariadb.cnf - ;; - "MariaDB103") - export EXTRA_MY_CNF=$EXTRA_MY_CNF:$VTROOT/config/mycnf/master_mariadb103.cnf - ;; - *) - echo "Please set MYSQL_FLAVOR to MySQL56 or MariaDB." - exit 1 - ;; -esac +export EXTRA_MY_CNF=$VTROOT/config/mycnf/default-fast.cnf:$VTROOT/config/mycnf/rbr.cnf mkdir -p $VTDATAROOT/backups diff --git a/go/cmd/vtbackup/vtbackup.go b/go/cmd/vtbackup/vtbackup.go index c95f609ff06..690271a0e63 100644 --- a/go/cmd/vtbackup/vtbackup.go +++ b/go/cmd/vtbackup/vtbackup.go @@ -87,12 +87,17 @@ import ( const ( backupTimestampFormat = "2006-01-02.150405" + manifestFileName = "MANIFEST" ) var ( // vtbackup-specific flags - timeout = flag.Duration("timeout", 2*time.Hour, "Overall timeout for this whole vtbackup run, including restoring the previous backup, waiting for replication, and uploading files") - replicationTimeout = flag.Duration("replication_timeout", 1*time.Hour, "The timeout for the step of waiting for replication to catch up. If progress is made before this timeout is reached, the backup will be taken anyway to save partial progress, but vtbackup will return a non-zero exit code to indicate it should be retried since not all expected data was backed up") + // We used to have timeouts, but these did more harm than good. If a backup + // has been going for a while, giving up and starting over from scratch is + // pretty much never going to help. We should just keep trying and have a + // system that alerts a human if it's taking longer than expected. + _ = flag.Duration("timeout", 2*time.Hour, "DEPRECATED AND UNUSED") + _ = flag.Duration("replication_timeout", 1*time.Hour, "DEPRECATED AND UNUSED") minBackupInterval = flag.Duration("min_backup_interval", 0, "Only take a new backup if it's been at least this long since the most recent backup.") minRetentionTime = flag.Duration("min_retention_time", 0, "Keep each old backup for at least this long before removing it. Set to 0 to disable pruning of old backups.") @@ -128,8 +133,7 @@ func main() { exit.Return(1) } - ctx, cancel := context.WithTimeout(context.Background(), *timeout) - defer cancel() + ctx := context.Background() // Open connection backup storage. backupDir := fmt.Sprintf("%v/%v", *initKeyspace, *initShard) @@ -288,13 +292,6 @@ func takeBackup(ctx context.Context, topoServer *topo.Server, backupStorage back for { time.Sleep(time.Second) - // Check if the replication context is still good. - if time.Since(waitStartTime) > *replicationTimeout { - // If we time out on this step, we still might take the backup anyway. - log.Errorf("Timed out waiting for replication to catch up to %v.", masterPos) - break - } - status, statusErr := mysqld.SlaveStatus() if statusErr != nil { log.Warningf("Error getting replication status: %v", statusErr) @@ -348,7 +345,7 @@ func resetReplication(ctx context.Context, pos mysql.Position, mysqld mysqlctl.M return vterrors.Wrap(err, "failed to reset slave") } - // Check if we have a postion to resume from, if not reset to the beginning of time + // Check if we have a position to resume from, if not reset to the beginning of time if !pos.IsZero() { // Set the position at which to resume from the master. if err := mysqld.SetSlavePosition(ctx, pos); err != nil { @@ -521,13 +518,21 @@ func shouldBackup(ctx context.Context, topoServer *topo.Server, backupStorage ba if len(backups) == 0 && !*allowFirstBackup { return false, fmt.Errorf("no existing backups to restore from; backup is not possible since -initial_backup flag was not enabled") } + // Look for the most recent, complete backup. + lastBackup := lastCompleteBackup(ctx, backups) + if lastBackup == nil { + if *allowFirstBackup { + // There's no complete backup, but we were told to take one from scratch anyway. + return true, nil + } + return false, fmt.Errorf("no complete backups to restore from; backup is not possible since -initial_backup flag was not enabled") + } - // Has it been long enough since the last backup to need a new one? + // Has it been long enough since the last complete backup to need a new one? if *minBackupInterval == 0 { // No minimum interval is set, so always backup. return true, nil } - lastBackup := backups[len(backups)-1] lastBackupTime, err := parseBackupTime(lastBackup.Name()) if err != nil { return false, fmt.Errorf("can't check last backup time: %v", err) @@ -541,3 +546,33 @@ func shouldBackup(ctx context.Context, topoServer *topo.Server, backupStorage ba log.Infof("The last backup was taken at %v, which is older than the min_backup_interval of %v.", lastBackupTime, *minBackupInterval) return true, nil } + +func lastCompleteBackup(ctx context.Context, backups []backupstorage.BackupHandle) backupstorage.BackupHandle { + if len(backups) == 0 { + return nil + } + + // Backups are sorted in ascending order by start time. Start at the end. + for i := len(backups) - 1; i >= 0; i-- { + // Check if this backup is complete by looking for the MANIFEST file, + // which is written at the end after all files are uploaded. + backup := backups[i] + if err := checkBackupComplete(ctx, backup); err != nil { + log.Warningf("Ignoring backup %v because it's incomplete: %v", backup.Name(), err) + continue + } + return backup + } + + return nil +} + +func checkBackupComplete(ctx context.Context, backup backupstorage.BackupHandle) error { + manifest, err := mysqlctl.GetBackupManifest(ctx, backup) + if err != nil { + return fmt.Errorf("can't get backup MANIFEST: %v", err) + } + + log.Infof("Found complete backup %v taken at position %v", backup.Name(), manifest.Position.String()) + return nil +} diff --git a/go/cmd/vtclient/vtclient.go b/go/cmd/vtclient/vtclient.go index 6c4d74d993c..87c6c4dc492 100644 --- a/go/cmd/vtclient/vtclient.go +++ b/go/cmd/vtclient/vtclient.go @@ -64,8 +64,14 @@ Examples: jsonOutput = flag.Bool("json", false, "Output JSON instead of human-readable table") parallel = flag.Int("parallel", 1, "DMLs only: Number of threads executing the same query in parallel. Useful for simple load testing.") count = flag.Int("count", 1, "DMLs only: Number of times each thread executes the query. Useful for simple, sustained load testing.") - minRandomID = flag.Int("min_random_id", 0, "min random ID to generate. When max_random_id > min_random_id, for each query, a random number is generated in [min_random_id, max_random_id) and attached to the end of the bind variables.") - maxRandomID = flag.Int("max_random_id", 0, "max random ID.") + minSeqID = flag.Int("min_sequence_id", 0, "min sequence ID to generate. When max_sequence_id > min_sequence_id, for each query, a number is generated in [min_sequence_id, max_sequence_id) and attached to the end of the bind variables.") + maxSeqID = flag.Int("max_sequence_id", 0, "max sequence ID.") + useRandom = flag.Bool("use_random_sequence", false, "use random sequence for generating [min_sequence_id, max_sequence_id)") + qps = flag.Int("qps", 0, "queries per second to throttle each thread at.") +) + +var ( + seqChan = make(chan int, 10) ) func init() { @@ -149,6 +155,21 @@ func run() (*results, error) { return nil, errors.New("no additional arguments after the query allowed") } + if *maxSeqID > *minSeqID { + go func() { + if *useRandom { + rand.Seed(time.Now().UnixNano()) + for { + seqChan <- rand.Intn(*maxSeqID-*minSeqID) + *minSeqID + } + } else { + for i := *minSeqID; i < *maxSeqID; i++ { + seqChan <- i + } + } + }() + } + c := vitessdriver.Configuration{ Protocol: *vtgateconn.VtgateProtocol, Address: *server, @@ -168,9 +189,10 @@ func run() (*results, error) { } func prepareBindVariables() []interface{} { - bv := *bindVariables - if *maxRandomID > *minRandomID { - bv = append(bv, rand.Intn(*maxRandomID-*minRandomID)+*minRandomID) + bv := make([]interface{}, 0, len(*bindVariables)+1) + bv = append(bv, (*bindVariables)...) + if *maxSeqID > *minSeqID { + bv = append(bv, <-seqChan) } return bv } @@ -181,6 +203,8 @@ func execMulti(ctx context.Context, db *sql.DB, sql string) (*results, error) { wg := sync.WaitGroup{} isDML := sqlparser.IsDML(sql) + isThrottled := *qps > 0 + start := time.Now() for i := 0; i < *parallel; i++ { wg.Add(1) @@ -188,6 +212,12 @@ func execMulti(ctx context.Context, db *sql.DB, sql string) (*results, error) { go func() { defer wg.Done() + var ticker *time.Ticker + if isThrottled { + tickDuration := time.Second / time.Duration(*qps) + ticker = time.NewTicker(tickDuration) + } + for j := 0; j < *count; j++ { var qr *results var err error @@ -208,6 +238,10 @@ func execMulti(ctx context.Context, db *sql.DB, sql string) (*results, error) { ec.RecordError(err) // We keep going and do not return early purpose. } + + if ticker != nil { + <-ticker.C + } } }() } diff --git a/go/cmd/vttestserver/data/schema/app_customer/v001__create_customer_table.sql b/go/cmd/vttestserver/data/schema/app_customer/v001__create_customer_table.sql new file mode 100644 index 00000000000..25eb226f006 --- /dev/null +++ b/go/cmd/vttestserver/data/schema/app_customer/v001__create_customer_table.sql @@ -0,0 +1,7 @@ +create table customers ( + id bigint, + name varchar(64), + age SMALLINT, + primary key (id) +) Engine=InnoDB; + diff --git a/go/cmd/vttestserver/data/schema/app_customer/v002__add_customer_vschema.sql b/go/cmd/vttestserver/data/schema/app_customer/v002__add_customer_vschema.sql new file mode 100644 index 00000000000..300e7e9e41e --- /dev/null +++ b/go/cmd/vttestserver/data/schema/app_customer/v002__add_customer_vschema.sql @@ -0,0 +1 @@ +alter vschema on customers add vindex hash (id); \ No newline at end of file diff --git a/go/cmd/vttestserver/data/schema/app_customer/vschema.json b/go/cmd/vttestserver/data/schema/app_customer/vschema.json new file mode 100644 index 00000000000..cdeb6545df9 --- /dev/null +++ b/go/cmd/vttestserver/data/schema/app_customer/vschema.json @@ -0,0 +1,10 @@ +{ + "sharded": true, + "vindexes": { + "hash": { + "type": "hash" + } + }, + "tables": { + } +} \ No newline at end of file diff --git a/go/cmd/vttestserver/data/schema/test_keyspace/v001__create_test_table.sql b/go/cmd/vttestserver/data/schema/test_keyspace/v001__create_test_table.sql new file mode 100644 index 00000000000..e0ef23320ee --- /dev/null +++ b/go/cmd/vttestserver/data/schema/test_keyspace/v001__create_test_table.sql @@ -0,0 +1,12 @@ +create table test_table ( + id bigint, + name varchar(64), + age SMALLINT, + percent DECIMAL(5,2), + datetime_col DATETIME, + timestamp_col TIMESTAMP, + date_col DATE, + time_col TIME, + primary key (id) +) Engine=InnoDB; + diff --git a/go/cmd/vttestserver/data/schema/test_keyspace/v002__create_hash_vindex.sql b/go/cmd/vttestserver/data/schema/test_keyspace/v002__create_hash_vindex.sql new file mode 100644 index 00000000000..f1333a384fa --- /dev/null +++ b/go/cmd/vttestserver/data/schema/test_keyspace/v002__create_hash_vindex.sql @@ -0,0 +1 @@ +alter vschema create vindex my_vdx using hash \ No newline at end of file diff --git a/go/cmd/vttestserver/data/schema/test_keyspace/v003__add_table_vschema.sql b/go/cmd/vttestserver/data/schema/test_keyspace/v003__add_table_vschema.sql new file mode 100644 index 00000000000..7cdccd02077 --- /dev/null +++ b/go/cmd/vttestserver/data/schema/test_keyspace/v003__add_table_vschema.sql @@ -0,0 +1 @@ +alter vschema on test_table add vindex my_vdx (id) \ No newline at end of file diff --git a/go/cmd/vttestserver/data/schema/test_keyspace/v004__create_test_table1.sql b/go/cmd/vttestserver/data/schema/test_keyspace/v004__create_test_table1.sql new file mode 100644 index 00000000000..79aea4379ab --- /dev/null +++ b/go/cmd/vttestserver/data/schema/test_keyspace/v004__create_test_table1.sql @@ -0,0 +1,11 @@ +create table test_table1 ( + id bigint, + name varchar(64), + age SMALLINT, + percent DECIMAL(5,2), + datetime_col DATETIME, + timestamp_col TIMESTAMP, + date_col DATE, + time_col TIME, + primary key (id) +) Engine=InnoDB; \ No newline at end of file diff --git a/go/cmd/vttestserver/main.go b/go/cmd/vttestserver/main.go index 65614d80ace..e6280af194e 100644 --- a/go/cmd/vttestserver/main.go +++ b/go/cmd/vttestserver/main.go @@ -152,6 +152,8 @@ func parseFlags() (config vttest.Config, env vttest.Environment, err error) { flag.StringVar(&config.TransactionMode, "transaction_mode", "MULTI", "Transaction mode MULTI (default), SINGLE or TWOPC ") flag.Float64Var(&config.TransactionTimeout, "queryserver-config-transaction-timeout", 0, "query server transaction timeout (in seconds), a transaction will be killed if it takes longer than this value") + flag.StringVar(&config.TabletHostName, "tablet_hostname", "localhost", "The hostname to use for the tablet otherwise it will be derived from OS' hostname") + flag.Parse() if basePort != 0 { @@ -197,32 +199,34 @@ func parseFlags() (config vttest.Config, env vttest.Environment, err error) { } func main() { + cluster := runCluster() + defer cluster.TearDown() + + kvconf := cluster.JSONConfig() + if err := json.NewEncoder(os.Stdout).Encode(kvconf); err != nil { + log.Fatal(err) + } + + select {} +} + +func runCluster() vttest.LocalCluster { config, env, err := parseFlags() if err != nil { log.Fatal(err) } - log.Infof("Starting local cluster...") log.Infof("config: %#v", config) - cluster := vttest.LocalCluster{ Config: config, Env: env, } - err = cluster.Setup() - defer cluster.TearDown() - if err != nil { log.Fatal(err) } - kvconf := cluster.JSONConfig() - if err := json.NewEncoder(os.Stdout).Encode(kvconf); err != nil { - log.Fatal(err) - } - log.Info("Local cluster started.") - select {} + return cluster } diff --git a/go/cmd/vttestserver/vttestserver_test.go b/go/cmd/vttestserver/vttestserver_test.go new file mode 100644 index 00000000000..79d246efbfa --- /dev/null +++ b/go/cmd/vttestserver/vttestserver_test.go @@ -0,0 +1,68 @@ +package main + +import ( + "context" + "fmt" + "os" + "testing" + + "vitess.io/vitess/go/vt/vttest" + + "github.com/golang/protobuf/jsonpb" + "vitess.io/vitess/go/vt/proto/logutil" + "vitess.io/vitess/go/vt/proto/vschema" + "vitess.io/vitess/go/vt/vtctl/vtctlclient" +) + +type columnVindex struct { + keyspace string + table string + vindex string + vindexType string + column string +} + +func TestRunsVschemaMigrations(t *testing.T) { + schemaDirArg := "-schema_dir=data/schema" + webDirArg := "-web_dir=web/vtctld/app" + webDir2Arg := "-web_dir2=web/vtctld2/app" + tabletHostname := "-tablet_hostname=localhost" + keyspaceArg := "-keyspaces=test_keyspace,app_customer" + numShardsArg := "-num_shards=2,2" + + os.Args = append(os.Args, []string{schemaDirArg, keyspaceArg, numShardsArg, webDirArg, webDir2Arg, tabletHostname}...) + + cluster := runCluster() + defer cluster.TearDown() + + assertColumnVindex(t, cluster, columnVindex{keyspace: "test_keyspace", table: "test_table", vindex: "my_vdx", vindexType: "hash", column: "id"}) + assertColumnVindex(t, cluster, columnVindex{keyspace: "app_customer", table: "customers", vindex: "hash", vindexType: "hash", column: "id"}) +} + +func assertColumnVindex(t *testing.T, cluster vttest.LocalCluster, expected columnVindex) { + server := fmt.Sprintf("localhost:%v", cluster.GrpcPort()) + args := []string{"GetVSchema", expected.keyspace} + ctx := context.Background() + + err := vtctlclient.RunCommandAndWait(ctx, server, args, func(e *logutil.Event) { + var keyspace vschema.Keyspace + if err := jsonpb.UnmarshalString(e.Value, &keyspace); err != nil { + t.Error(err) + } + + columnVindex := keyspace.Tables[expected.table].ColumnVindexes[0] + actualVindex := keyspace.Vindexes[expected.vindex] + assertEqual(t, actualVindex.Type, expected.vindexType, "Actual vindex type different from expected") + assertEqual(t, columnVindex.Name, expected.vindex, "Actual vindex name different from expected") + assertEqual(t, columnVindex.Columns[0], expected.column, "Actual vindex column different from expected") + }) + if err != nil { + t.Error(err) + } +} + +func assertEqual(t *testing.T, actual string, expected string, message string) { + if actual != expected { + t.Errorf("%s: actual %s, expected %s", message, actual, expected) + } +} diff --git a/go/mysql/auth_server_static_test.go b/go/mysql/auth_server_static_test.go index b29d195d845..327e37bd2f6 100644 --- a/go/mysql/auth_server_static_test.go +++ b/go/mysql/auth_server_static_test.go @@ -105,7 +105,7 @@ func TestHostMatcher(t *testing.T) { addr := &net.TCPAddr{IP: ip, Port: 9999} match := matchSourceHost(net.Addr(addr), "") if !match { - t.Fatalf("Should match any addres when target is empty") + t.Fatalf("Should match any address when target is empty") } match = matchSourceHost(net.Addr(addr), "localhost") diff --git a/go/mysql/binlog_event_common.go b/go/mysql/binlog_event_common.go index 5765fbb0342..7ee90036d7c 100644 --- a/go/mysql/binlog_event_common.go +++ b/go/mysql/binlog_event_common.go @@ -176,7 +176,7 @@ func (ev binlogEvent) IsPseudo() bool { // 4 timestamp (same as timestamp header field) // 1 header length // p (one byte per packet type) event type header lengths -// Rest was infered from reading source code: +// Rest was inferred from reading source code: // 1 checksum algorithm // 4 checksum func (ev binlogEvent) Format() (f BinlogFormat, err error) { diff --git a/go/mysql/binlog_event_json.go b/go/mysql/binlog_event_json.go index 5b82821ce15..a14e18188b7 100644 --- a/go/mysql/binlog_event_json.go +++ b/go/mysql/binlog_event_json.go @@ -335,7 +335,7 @@ func printJSONDouble(data []byte, toplevel bool, result *bytes.Buffer) { } func printJSONString(data []byte, toplevel bool, result *bytes.Buffer) { - size, pos := readVariableInt(data, 0) + size, pos := readVariableLength(data, 0) // A toplevel JSON string is printed as a JSON-escaped // string inside a string, as the value is parsed as JSON. @@ -356,7 +356,7 @@ func printJSONString(data []byte, toplevel bool, result *bytes.Buffer) { func printJSONOpaque(data []byte, toplevel bool, result *bytes.Buffer) error { typ := data[0] - size, pos := readVariableInt(data, 1) + size, pos := readVariableLength(data, 1) // A few types have special encoding. switch typ { @@ -483,7 +483,11 @@ func readOffsetOrSize(data []byte, pos int, large bool) (int, int) { int(data[pos+1])<<8, pos + 2 } -func readVariableInt(data []byte, pos int) (int, int) { +// readVariableLength implements the logic to decode the length +// of an arbitrarily long string as implemented by the mysql server +// https://github.com/mysql/mysql-server/blob/5.7/sql/json_binary.cc#L234 +// https://github.com/mysql/mysql-server/blob/8.0/sql/json_binary.cc#L283 +func readVariableLength(data []byte, pos int) (int, int) { var bb byte var res int var idx byte @@ -491,6 +495,8 @@ func readVariableInt(data []byte, pos int) (int, int) { bb = data[pos] pos++ res |= int(bb&0x7f) << (7 * idx) + // if the high bit is 1, the integer value of the byte will be negative + // high bit of 1 signifies that the next byte is part of the length encoding if int8(bb) >= 0 { break } diff --git a/go/mysql/binlog_event_json_test.go b/go/mysql/binlog_event_json_test.go index 9753a7d4bce..d9bff262609 100644 --- a/go/mysql/binlog_event_json_test.go +++ b/go/mysql/binlog_event_json_test.go @@ -17,6 +17,8 @@ limitations under the License. package mysql import ( + "fmt" + "reflect" "strings" "testing" ) @@ -34,6 +36,9 @@ func TestJSON(t *testing.T) { }, { data: []byte{0, 1, 0, 12, 0, 11, 0, 1, 0, 5, 2, 0, 97}, expected: `JSON_OBJECT('a',2)`, + }, { + data: []byte{0, 1, 0, 29, 0, 11, 0, 4, 0, 0, 15, 0, 97, 115, 100, 102, 1, 0, 14, 0, 11, 0, 3, 0, 5, 123, 0, 102, 111, 111}, + expected: `JSON_OBJECT('asdf',JSON_OBJECT('foo',123))`, }, { data: []byte{2, 2, 0, 10, 0, 5, 1, 0, 5, 2, 0}, expected: `JSON_ARRAY(1,2)`, @@ -50,7 +55,7 @@ func TestJSON(t *testing.T) { data: []byte{0, 1, 0, 149, 0, 11, 0, 6, 0, 12, 17, 0, 115, 99, 111, 112, 101, 115, 130, 1, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 66, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 66, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 69, 65, 65, 65, 65, 65, 65, 69, 65, 65, 65, 65, 65, 65, 56, 65, 65, 65, 66, 103, 65, 65, 65, 65, 65, 65, 66, 65, 65, 65, 65, 67, 65, 65, 65, 65, 65, 65, 65, 65, 65, 84, 216, 142, 184}, expected: `JSON_OBJECT('scopes','AAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAAEAAAAAAEAAAAAA8AAABgAAAAAABAAAACAAAAAAAAA')`, }, { - // repeat the same string 10 times, to test readVariableInt when length of string + // repeat the same string 10 times, to test the case where length of string // requires 2 bytes to store data: []byte{12, 130, 1, 115, 99, 97, 108, 97, 114, 32, 115, 116, 114, 105, 110, 103, @@ -149,7 +154,7 @@ func TestJSON(t *testing.T) { r, err := printJSONData(tcase.data) if err != nil { if got := err.Error(); !strings.HasPrefix(got, tcase.expected) { - t.Errorf("unexpected output for %v: got [%v] expected [%v]", tcase.data, got, tcase.expected) + t.Errorf("unexpected output for %v: got \n[%v] \n expected \n[%v]", tcase.data, got, tcase.expected) } } else { if got := string(r); got != tcase.expected { @@ -160,3 +165,54 @@ func TestJSON(t *testing.T) { }) } } + +func TestReadVariableLength(t *testing.T) { + testcases := []struct { + data []byte + expected []int + }{{ + // we are only providing a truncated form of data, + // when this is actually used data will have another + // 126 bytes + data: []byte{12, 127, 1}, + expected: []int{127, 2}, + }, { + data: []byte{12, 127, 2}, + expected: []int{127, 2}, + }, { + data: []byte{12, 129, 1}, + expected: []int{129, 3}, + }, { + data: []byte{12, 129, 2}, + expected: []int{257, 3}, + }, { + data: []byte{12, 130, 1}, + expected: []int{130, 3}, + }, { + data: []byte{12, 130, 2}, + expected: []int{258, 3}, + }, { + data: []byte{12, 132, 1}, + expected: []int{132, 3}, + }, { + data: []byte{12, 132, 2}, + expected: []int{260, 3}, + }, { + data: []byte{12, 130, 130, 1}, + expected: []int{16642, 4}, + }, { + data: []byte{12, 130, 130, 2}, + expected: []int{33026, 4}, + }} + for _, tcase := range testcases { + t.Run(fmt.Sprintf("%v", tcase.data[1:]), func(t *testing.T) { + // start from position 1 because position 0 has the JSON type + len, pos := readVariableLength(tcase.data, 1) + if got := []int{len, pos}; !reflect.DeepEqual(got, tcase.expected) { + t.Errorf("unexpected output for %v: got \n[%v] \n expected \n[%v]", tcase.data, got, tcase.expected) + } + + }) + } + +} diff --git a/go/mysql/binlog_event_make.go b/go/mysql/binlog_event_make.go index 343d97da1fd..07149079a43 100644 --- a/go/mysql/binlog_event_make.go +++ b/go/mysql/binlog_event_make.go @@ -151,7 +151,7 @@ func NewInvalidFormatDescriptionEvent(f BinlogFormat, s *FakeBinlogStream) Binlo } // NewRotateEvent returns a RotateEvent. -// The timestmap of such an event should be zero, so we patch it in. +// The timestamp of such an event should be zero, so we patch it in. func NewRotateEvent(f BinlogFormat, s *FakeBinlogStream, position uint64, filename string) BinlogEvent { length := 8 + // position len(filename) diff --git a/go/mysql/binlog_event_make_test.go b/go/mysql/binlog_event_make_test.go index c6c00cc27cf..e2b1ebe56d5 100644 --- a/go/mysql/binlog_event_make_test.go +++ b/go/mysql/binlog_event_make_test.go @@ -358,7 +358,7 @@ func TestRowsEvent(t *testing.T) { // 1076895760 is 0x40302010. identifies, _ := rows.StringIdentifiesForTests(tm, 0) if expected := []string{"1076895760", "abc"}; !reflect.DeepEqual(identifies, expected) { - t.Fatalf("bad Rows idenfity, got %v expected %v", identifies, expected) + t.Fatalf("bad Rows identify, got %v expected %v", identifies, expected) } values, _ := rows.StringValuesForTests(tm, 0) if expected := []string{"1076895760", "abcd"}; !reflect.DeepEqual(values, expected) { diff --git a/go/mysql/client.go b/go/mysql/client.go index 2cafdf67213..34dcacbc0f2 100644 --- a/go/mysql/client.go +++ b/go/mysql/client.go @@ -387,7 +387,7 @@ func (c *Conn) parseInitialHandshakePacket(data []byte) (uint32, []byte, error) // Read the connection id. c.ConnectionID, pos, ok = readUint32(data, pos) if !ok { - return 0, nil, NewSQLError(CRMalformedPacket, SSUnknownSQLState, "parseInitialHandshakePacket: packet has no conneciton id") + return 0, nil, NewSQLError(CRMalformedPacket, SSUnknownSQLState, "parseInitialHandshakePacket: packet has no connection id") } // Read the first part of the auth-plugin-data @@ -567,7 +567,7 @@ func (c *Conn) writeHandshakeResponse41(capabilities uint32, scrambledPassword [ 1 + // Character set. 23 + // Reserved. lenNullString(params.Uname) + - // length of scrambled passsword is handled below. + // length of scrambled password is handled below. len(scrambledPassword) + 21 + // "mysql_native_password" string. 1 // terminating zero. diff --git a/go/mysql/conn.go b/go/mysql/conn.go index 8fdaa2d45e5..42b8215c86c 100644 --- a/go/mysql/conn.go +++ b/go/mysql/conn.go @@ -157,6 +157,22 @@ type Conn struct { // currentEphemeralBuffer for tracking allocated temporary buffer for writes and reads respectively. // It can be allocated from bufPool or heap and should be recycled in the same manner. currentEphemeralBuffer *[]byte + + // StatementID is the prepared statement ID. + StatementID uint32 + + // PrepareData is the map to use a prepared statement. + PrepareData map[uint32]*PrepareData +} + +// PrepareData is a buffer used for store prepare statement meta data +type PrepareData struct { + StatementID uint32 + PrepareStmt string + ParamsCount uint16 + ParamsType []int32 + ColumnNames []string + BindVars map[string]*querypb.BindVariable } // bufPool is used to allocate and free buffers in an efficient way. @@ -182,9 +198,10 @@ func newConn(conn net.Conn) *Conn { // size for reads. func newServerConn(conn net.Conn, listener *Listener) *Conn { c := &Conn{ - conn: conn, - listener: listener, - closed: sync2.NewAtomicBool(false), + conn: conn, + listener: listener, + closed: sync2.NewAtomicBool(false), + PrepareData: make(map[uint32]*PrepareData), } if listener.connReadBufferSize > 0 { c.bufferedReader = bufio.NewReaderSize(conn, listener.connReadBufferSize) @@ -801,6 +818,226 @@ func (c *Conn) handleNextCommand(handler Handler) error { return err } } + case ComPrepare: + query := c.parseComPrepare(data) + c.recycleReadPacket() + + var queries []string + if c.Capabilities&CapabilityClientMultiStatements != 0 { + queries, err = sqlparser.SplitStatementToPieces(query) + if err != nil { + log.Errorf("Conn %v: Error splitting query: %v", c, err) + if werr := c.writeErrorPacketFromError(err); werr != nil { + // If we can't even write the error, we're done. + log.Errorf("Conn %v: Error writing query error: %v", c, werr) + return werr + } + } + } else { + queries = []string{query} + } + + if len(queries) != 1 { + return fmt.Errorf("can not prepare multiple statements") + } + + // Popoulate PrepareData + c.StatementID++ + prepare := &PrepareData{ + StatementID: c.StatementID, + PrepareStmt: queries[0], + } + + statement, err := sqlparser.ParseStrictDDL(query) + if err != nil { + return err + } + + paramsCount := uint16(0) + _ = sqlparser.Walk(func(node sqlparser.SQLNode) (bool, error) { + switch node := node.(type) { + case *sqlparser.SQLVal: + if strings.HasPrefix(string(node.Val), ":v") { + paramsCount++ + } + } + return true, nil + }, statement) + + if paramsCount > 0 { + prepare.ParamsCount = paramsCount + prepare.ParamsType = make([]int32, paramsCount) + prepare.BindVars = make(map[string]*querypb.BindVariable, paramsCount) + } + + c.PrepareData[c.StatementID] = prepare + + fld, err := handler.ComPrepare(c, queries[0]) + + if err != nil { + if werr := c.writeErrorPacketFromError(err); werr != nil { + // If we can't even write the error, we're done. + log.Error("Error writing query error to client %v: %v", c.ConnectionID, werr) + return werr + } + return nil + } + + if err := c.writePrepare(fld, c.PrepareData[c.StatementID]); err != nil { + return err + } + + case ComStmtExecute: + queryStart := time.Now() + stmtID, _, err := c.parseComStmtExecute(c.PrepareData, data) + c.recycleReadPacket() + + if stmtID != uint32(0) { + defer func() { + prepare := c.PrepareData[stmtID] + if prepare.BindVars != nil { + for k := range prepare.BindVars { + prepare.BindVars[k] = nil + } + } + }() + } + + if err != nil { + if werr := c.writeErrorPacketFromError(err); werr != nil { + // If we can't even write the error, we're done. + log.Error("Error writing query error to client %v: %v", c.ConnectionID, werr) + return werr + } + return nil + } + + fieldSent := false + // sendFinished is set if the response should just be an OK packet. + sendFinished := false + prepare := c.PrepareData[stmtID] + err = handler.ComStmtExecute(c, prepare, func(qr *sqltypes.Result) error { + if sendFinished { + // Failsafe: Unreachable if server is well-behaved. + return io.EOF + } + + if !fieldSent { + fieldSent = true + + if len(qr.Fields) == 0 { + sendFinished = true + // We should not send any more packets after this. + return c.writeOKPacket(qr.RowsAffected, qr.InsertID, c.StatusFlags, 0) + } + if err := c.writeFields(qr); err != nil { + return err + } + } + + return c.writeBinaryRows(qr) + }) + + // If no field was sent, we expect an error. + if !fieldSent { + // This is just a failsafe. Should never happen. + if err == nil || err == io.EOF { + err = NewSQLErrorFromError(errors.New("unexpected: query ended without no results and no error")) + } + if werr := c.writeErrorPacketFromError(err); werr != nil { + // If we can't even write the error, we're done. + log.Errorf("Error writing query error to %s: %v", c, werr) + return werr + } + } else { + if err != nil { + // We can't send an error in the middle of a stream. + // All we can do is abort the send, which will cause a 2013. + log.Errorf("Error in the middle of a stream to %s: %v", c, err) + return err + } + + // Send the end packet only sendFinished is false (results were streamed). + // In this case the affectedRows and lastInsertID are always 0 since it + // was a read operation. + if !sendFinished { + if err := c.writeEndResult(false, 0, 0, handler.WarningCount(c)); err != nil { + log.Errorf("Error writing result to %s: %v", c, err) + return err + } + } + } + + timings.Record(queryTimingKey, queryStart) + case ComStmtSendLongData: + stmtID, paramID, chunkData, ok := c.parseComStmtSendLongData(data) + c.recycleReadPacket() + if !ok { + err := fmt.Errorf("error parsing statement send long data from client %v, returning error: %v", c.ConnectionID, data) + log.Error(err.Error()) + return err + } + + prepare, ok := c.PrepareData[stmtID] + if !ok { + err := fmt.Errorf("got wrong statement id from client %v, statement ID(%v) is not found from record", c.ConnectionID, stmtID) + log.Error(err.Error()) + return err + } + + if prepare.BindVars == nil || + prepare.ParamsCount == uint16(0) || + paramID >= prepare.ParamsCount { + err := fmt.Errorf("invalid parameter Number from client %v, statement: %v", c.ConnectionID, prepare.PrepareStmt) + log.Error(err.Error()) + return err + } + + chunk := make([]byte, len(chunkData)) + copy(chunk, chunkData) + + key := fmt.Sprintf("v%d", paramID+1) + if val, ok := prepare.BindVars[key]; ok { + val.Value = append(val.Value, chunk...) + } else { + prepare.BindVars[key] = sqltypes.BytesBindVariable(chunk) + } + case ComStmtClose: + stmtID, ok := c.parseComStmtClose(data) + c.recycleReadPacket() + if ok { + delete(c.PrepareData, stmtID) + } + case ComStmtReset: + stmtID, ok := c.parseComStmtReset(data) + c.recycleReadPacket() + if !ok { + log.Error("Got unhandled packet from client %v, returning error: %v", c.ConnectionID, data) + if err := c.writeErrorPacket(ERUnknownComError, SSUnknownComError, "error handling packet: %v", data); err != nil { + log.Error("Error writing error packet to client: %v", err) + return err + } + } + + prepare, ok := c.PrepareData[stmtID] + if !ok { + log.Error("Commands were executed in an improper order from client %v, packet: %v", c.ConnectionID, data) + if err := c.writeErrorPacket(CRCommandsOutOfSync, SSUnknownComError, "commands were executed in an improper order: %v", data); err != nil { + log.Error("Error writing error packet to client: %v", err) + return err + } + } + + if prepare.BindVars != nil { + for k := range prepare.BindVars { + prepare.BindVars[k] = nil + } + } + + if err := c.writeOKPacket(0, 0, c.StatusFlags, 0); err != nil { + log.Error("Error writing ComStmtReset OK packet to client %v: %v", c.ConnectionID, err) + return err + } default: log.Errorf("Got unhandled packet (default) from %s, returning error: %v", c, data) c.recycleReadPacket() @@ -987,8 +1224,9 @@ func ParseErrorPacket(data []byte) error { return NewSQLError(int(code), string(sqlState), "%v", msg) } -func (conn *Conn) GetTLSClientCerts() []*x509.Certificate { - if tlsConn, ok := conn.conn.(*tls.Conn); ok { +// GetTLSClientCerts gets TLS certificates. +func (c *Conn) GetTLSClientCerts() []*x509.Certificate { + if tlsConn, ok := c.conn.(*tls.Conn); ok { return tlsConn.ConnectionState().PeerCertificates } return nil diff --git a/go/mysql/constants.go b/go/mysql/constants.go index 4d1a530a861..b1d4491f637 100644 --- a/go/mysql/constants.go +++ b/go/mysql/constants.go @@ -153,6 +153,24 @@ const ( // ComBinlogDump is COM_BINLOG_DUMP. ComBinlogDump = 0x12 + // ComPrepare is COM_PREPARE. + ComPrepare = 0x16 + + // ComStmtExecute is COM_STMT_EXECUTE. + ComStmtExecute = 0x17 + + // ComStmtSendLongData is COM_STMT_SEND_LONG_DATA + ComStmtSendLongData = 0x18 + + // ComStmtClose is COM_STMT_CLOSE. + ComStmtClose = 0x19 + + // ComStmtReset is COM_STMT_RESET + ComStmtReset = 0x1a + + //ComStmtFetch is COM_STMT_FETCH + ComStmtFetch = 0x1c + // ComSetOption is COM_SET_OPTION ComSetOption = 0x1b diff --git a/go/mysql/encoding_test.go b/go/mysql/encoding_test.go index 75637146f91..7f56f583c86 100644 --- a/go/mysql/encoding_test.go +++ b/go/mysql/encoding_test.go @@ -69,7 +69,7 @@ func TestEncLenInt(t *testing.T) { t.Errorf("unexpected encoded value for %x, got %v expected %v", test.value, data, test.encoded) } - // Check succesful decoding. + // Check successful decoding. got, pos, ok := readLenEncInt(test.encoded, 0) if !ok || got != test.value || pos != len(test.encoded) { t.Errorf("readLenEncInt returned %x/%v/%v but expected %x/%v/%v", got, pos, ok, test.value, len(test.encoded), true) @@ -240,7 +240,7 @@ func TestEncString(t *testing.T) { t.Errorf("unexpected lenEncoded value for %v, got %v expected %v", test.value, data[1:], test.lenEncoded) } - // Check succesful decoding as string. + // Check successful decoding as string. got, pos, ok := readLenEncString(test.lenEncoded, 0) if !ok || got != test.value || pos != len(test.lenEncoded) { t.Errorf("readLenEncString returned %v/%v/%v but expected %v/%v/%v", got, pos, ok, test.value, len(test.lenEncoded), true) @@ -258,7 +258,7 @@ func TestEncString(t *testing.T) { t.Errorf("readLenEncString returned ok=true for empty value %v", test.value) } - // Check succesful skipping as string. + // Check successful skipping as string. pos, ok = skipLenEncString(test.lenEncoded, 0) if !ok || pos != len(test.lenEncoded) { t.Errorf("skipLenEncString returned %v/%v but expected %v/%v", pos, ok, len(test.lenEncoded), true) @@ -276,7 +276,7 @@ func TestEncString(t *testing.T) { t.Errorf("skipLenEncString returned ok=true for empty value %v", test.value) } - // Check succesful decoding as bytes. + // Check successful decoding as bytes. gotb, pos, ok := readLenEncStringAsBytes(test.lenEncoded, 0) if !ok || string(gotb) != test.value || pos != len(test.lenEncoded) { t.Errorf("readLenEncString returned %v/%v/%v but expected %v/%v/%v", gotb, pos, ok, test.value, len(test.lenEncoded), true) @@ -306,7 +306,7 @@ func TestEncString(t *testing.T) { t.Errorf("unexpected nullEncoded value for %v, got %v expected %v", test.value, data, test.nullEncoded) } - // Check succesful decoding. + // Check successful decoding. got, pos, ok = readNullString(test.nullEncoded, 0) if !ok || got != test.value || pos != len(test.nullEncoded) { t.Errorf("readNullString returned %v/%v/%v but expected %v/%v/%v", got, pos, ok, test.value, len(test.nullEncoded), true) diff --git a/go/mysql/endtoend/replication_test.go b/go/mysql/endtoend/replication_test.go index 8ddfc0b3998..46b5d3a53f9 100644 --- a/go/mysql/endtoend/replication_test.go +++ b/go/mysql/endtoend/replication_test.go @@ -458,7 +458,7 @@ func TestRowReplicationWithRealDatabase(t *testing.T) { } -// TestRowReplicationTypes creates a table wih all +// TestRowReplicationTypes creates a table with all // supported data types. Then we insert a row in it. then we re-build // the SQL for the values, re-insert these. Then we select from the // database and make sure both rows are identical. diff --git a/go/mysql/fakesqldb/server.go b/go/mysql/fakesqldb/server.go index 60dbb0547ee..14d9869095b 100644 --- a/go/mysql/fakesqldb/server.go +++ b/go/mysql/fakesqldb/server.go @@ -31,6 +31,8 @@ import ( "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/sqltypes" + + querypb "vitess.io/vitess/go/vt/proto/query" ) const appendEntry = -1 @@ -432,6 +434,16 @@ func (db *DB) comQueryOrdered(query string) (*sqltypes.Result, error) { return entry.QueryResult, nil } +// ComPrepare is part of the mysql.Handler interface. +func (db *DB) ComPrepare(c *mysql.Conn, query string) ([]*querypb.Field, error) { + return nil, nil +} + +// ComStmtExecute is part of the mysql.Handler interface. +func (db *DB) ComStmtExecute(c *mysql.Conn, prepare *mysql.PrepareData, callback func(*sqltypes.Result) error) error { + return nil +} + // // Methods to add expected queries and results. // diff --git a/go/mysql/query.go b/go/mysql/query.go index b92ece87b63..a9be6dd442c 100644 --- a/go/mysql/query.go +++ b/go/mysql/query.go @@ -17,6 +17,11 @@ limitations under the License. package mysql import ( + "fmt" + "math" + "strconv" + "strings" + "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/vterrors" @@ -141,7 +146,6 @@ func (c *Conn) readColumnDefinition(field *querypb.Field, index int) error { if err != nil { return NewSQLError(CRMalformedPacket, SSUnknownSQLState, "MySQLToType(%v,%v) failed for column %v: %v", t, flags, index, err) } - // Decimals is a byte. decimals, _, ok := readByte(colDef, pos) if !ok { @@ -510,6 +514,347 @@ func (c *Conn) parseComSetOption(data []byte) (uint16, bool) { return val, ok } +func (c *Conn) parseComPrepare(data []byte) string { + return string(data[1:]) +} + +func (c *Conn) parseComStmtExecute(prepareData map[uint32]*PrepareData, data []byte) (uint32, byte, error) { + pos := 0 + payload := data[1:] + bitMap := make([]byte, 0) + + // statement ID + stmtID, pos, ok := readUint32(payload, 0) + if !ok { + return 0, 0, NewSQLError(CRMalformedPacket, SSUnknownSQLState, "reading statement ID failed") + } + prepare, ok := prepareData[stmtID] + if !ok { + return 0, 0, NewSQLError(CRCommandsOutOfSync, SSUnknownSQLState, "statement ID is not found from record") + } + + // cursor type flags + cursorType, pos, ok := readByte(payload, pos) + if !ok { + return stmtID, 0, NewSQLError(CRMalformedPacket, SSUnknownSQLState, "reading cursor type flags failed") + } + + // iteration count + iterCount, pos, ok := readUint32(payload, pos) + if !ok { + return stmtID, 0, NewSQLError(CRMalformedPacket, SSUnknownSQLState, "reading iteration count failed") + } + if iterCount != uint32(1) { + return stmtID, 0, NewSQLError(CRMalformedPacket, SSUnknownSQLState, "iteration count is not equal to 1") + } + + if prepare.ParamsCount > 0 { + bitMap, pos, ok = readBytes(payload, pos, int((prepare.ParamsCount+7)/8)) + if !ok { + return stmtID, 0, NewSQLError(CRMalformedPacket, SSUnknownSQLState, "reading NULL-bitmap failed") + } + } + + newParamsBoundFlag, pos, ok := readByte(payload, pos) + if ok && newParamsBoundFlag == 0x01 { + var mysqlType, flags byte + for i := uint16(0); i < prepare.ParamsCount; i++ { + mysqlType, pos, ok = readByte(payload, pos) + if !ok { + return stmtID, 0, NewSQLError(CRMalformedPacket, SSUnknownSQLState, "reading parameter type failed") + } + + flags, pos, ok = readByte(payload, pos) + if !ok { + return stmtID, 0, NewSQLError(CRMalformedPacket, SSUnknownSQLState, "reading parameter flags failed") + } + + // convert MySQL type to internal type. + valType, err := sqltypes.MySQLToType(int64(mysqlType), int64(flags)) + if err != nil { + return stmtID, 0, NewSQLError(CRMalformedPacket, SSUnknownSQLState, "MySQLToType(%v,%v) failed: %v", mysqlType, flags, err) + } + + prepare.ParamsType[i] = int32(valType) + } + } + + for i := 0; i < len(prepare.ParamsType); i++ { + var val sqltypes.Value + parameterID := fmt.Sprintf("v%d", i+1) + if v, ok := prepare.BindVars[parameterID]; ok { + if v != nil { + continue + } + } + + if (bitMap[i/8] & (1 << uint(i%8))) > 0 { + val, pos, ok = c.parseStmtArgs(nil, sqltypes.Null, pos) + } else { + val, pos, ok = c.parseStmtArgs(payload, querypb.Type(prepare.ParamsType[i]), pos) + } + if !ok { + return stmtID, 0, NewSQLError(CRMalformedPacket, SSUnknownSQLState, "decoding parameter value failed: %v", prepare.ParamsType[i]) + } + + prepare.BindVars[parameterID] = sqltypes.ValueBindVariable(val) + } + + return stmtID, cursorType, nil +} + +func (c *Conn) parseStmtArgs(data []byte, typ querypb.Type, pos int) (sqltypes.Value, int, bool) { + switch typ { + case sqltypes.Null: + return sqltypes.NULL, pos, true + case sqltypes.Int8: + val, pos, ok := readByte(data, pos) + return sqltypes.NewInt64(int64(val)), pos, ok + case sqltypes.Uint8: + val, pos, ok := readByte(data, pos) + return sqltypes.NewUint64(uint64(val)), pos, ok + case sqltypes.Uint16: + val, pos, ok := readUint16(data, pos) + return sqltypes.NewUint64(uint64(val)), pos, ok + case sqltypes.Int16, sqltypes.Year: + val, pos, ok := readUint16(data, pos) + return sqltypes.NewInt64(int64(val)), pos, ok + case sqltypes.Uint24, sqltypes.Uint32: + val, pos, ok := readUint32(data, pos) + return sqltypes.NewUint64(uint64(val)), pos, ok + case sqltypes.Int24, sqltypes.Int32: + val, pos, ok := readUint32(data, pos) + return sqltypes.NewInt64(int64(val)), pos, ok + case sqltypes.Float32: + val, pos, ok := readUint32(data, pos) + return sqltypes.NewFloat64(float64(math.Float32frombits(uint32(val)))), pos, ok + case sqltypes.Uint64: + val, pos, ok := readUint64(data, pos) + return sqltypes.NewUint64(val), pos, ok + case sqltypes.Int64: + val, pos, ok := readUint64(data, pos) + return sqltypes.NewInt64(int64(val)), pos, ok + case sqltypes.Float64: + val, pos, ok := readUint64(data, pos) + return sqltypes.NewFloat64(math.Float64frombits(val)), pos, ok + case sqltypes.Timestamp, sqltypes.Date, sqltypes.Datetime: + size, pos, ok := readByte(data, pos) + if !ok { + return sqltypes.NULL, 0, false + } + switch size { + case 0x00: + return sqltypes.NewVarChar(" "), pos, ok + case 0x0b: + year, pos, ok := readUint16(data, pos) + if !ok { + return sqltypes.NULL, 0, false + } + month, pos, ok := readByte(data, pos) + if !ok { + return sqltypes.NULL, 0, false + } + day, pos, ok := readByte(data, pos) + if !ok { + return sqltypes.NULL, 0, false + } + hour, pos, ok := readByte(data, pos) + if !ok { + return sqltypes.NULL, 0, false + } + minute, pos, ok := readByte(data, pos) + if !ok { + return sqltypes.NULL, 0, false + } + second, pos, ok := readByte(data, pos) + if !ok { + return sqltypes.NULL, 0, false + } + microSecond, pos, ok := readUint32(data, pos) + if !ok { + return sqltypes.NULL, 0, false + } + val := strconv.Itoa(int(year)) + "-" + + strconv.Itoa(int(month)) + "-" + + strconv.Itoa(int(day)) + " " + + strconv.Itoa(int(hour)) + ":" + + strconv.Itoa(int(minute)) + ":" + + strconv.Itoa(int(second)) + "." + + strconv.Itoa(int(microSecond)) + + return sqltypes.NewVarChar(val), pos, ok + case 0x07: + year, pos, ok := readUint16(data, pos) + if !ok { + return sqltypes.NULL, 0, false + } + month, pos, ok := readByte(data, pos) + if !ok { + return sqltypes.NULL, 0, false + } + day, pos, ok := readByte(data, pos) + if !ok { + return sqltypes.NULL, 0, false + } + hour, pos, ok := readByte(data, pos) + if !ok { + return sqltypes.NULL, 0, false + } + minute, pos, ok := readByte(data, pos) + if !ok { + return sqltypes.NULL, 0, false + } + second, pos, ok := readByte(data, pos) + if !ok { + return sqltypes.NULL, 0, false + } + val := strconv.Itoa(int(year)) + "-" + + strconv.Itoa(int(month)) + "-" + + strconv.Itoa(int(day)) + " " + + strconv.Itoa(int(hour)) + ":" + + strconv.Itoa(int(minute)) + ":" + + strconv.Itoa(int(second)) + + return sqltypes.NewVarChar(val), pos, ok + case 0x04: + year, pos, ok := readUint16(data, pos) + if !ok { + return sqltypes.NULL, 0, false + } + month, pos, ok := readByte(data, pos) + if !ok { + return sqltypes.NULL, 0, false + } + day, pos, ok := readByte(data, pos) + if !ok { + return sqltypes.NULL, 0, false + } + val := strconv.Itoa(int(year)) + "-" + + strconv.Itoa(int(month)) + "-" + + strconv.Itoa(int(day)) + + return sqltypes.NewVarChar(val), pos, ok + default: + return sqltypes.NULL, 0, false + } + case sqltypes.Time: + size, pos, ok := readByte(data, pos) + if !ok { + return sqltypes.NULL, 0, false + } + switch size { + case 0x00: + return sqltypes.NewVarChar("00:00:00"), pos, ok + case 0x0c: + isNegative, pos, ok := readByte(data, pos) + if !ok { + return sqltypes.NULL, 0, false + } + days, pos, ok := readUint32(data, pos) + if !ok { + return sqltypes.NULL, 0, false + } + hour, pos, ok := readByte(data, pos) + if !ok { + return sqltypes.NULL, 0, false + } + + hours := uint32(hour) + days*uint32(24) + + minute, pos, ok := readByte(data, pos) + if !ok { + return sqltypes.NULL, 0, false + } + second, pos, ok := readByte(data, pos) + if !ok { + return sqltypes.NULL, 0, false + } + microSecond, pos, ok := readUint32(data, pos) + if !ok { + return sqltypes.NULL, 0, false + } + + val := "" + if isNegative == 0x01 { + val += "-" + } + val += strconv.Itoa(int(hours)) + ":" + + strconv.Itoa(int(minute)) + ":" + + strconv.Itoa(int(second)) + "." + + strconv.Itoa(int(microSecond)) + + return sqltypes.NewVarChar(val), pos, ok + case 0x08: + isNegative, pos, ok := readByte(data, pos) + if !ok { + return sqltypes.NULL, 0, false + } + days, pos, ok := readUint32(data, pos) + if !ok { + return sqltypes.NULL, 0, false + } + hour, pos, ok := readByte(data, pos) + if !ok { + return sqltypes.NULL, 0, false + } + + hours := uint32(hour) + days*uint32(24) + + minute, pos, ok := readByte(data, pos) + if !ok { + return sqltypes.NULL, 0, false + } + second, pos, ok := readByte(data, pos) + if !ok { + return sqltypes.NULL, 0, false + } + + val := "" + if isNegative == 0x01 { + val += "-" + } + val += strconv.Itoa(int(hours)) + ":" + + strconv.Itoa(int(minute)) + ":" + + strconv.Itoa(int(second)) + + return sqltypes.NewVarChar(val), pos, ok + default: + return sqltypes.NULL, 0, false + } + case sqltypes.Decimal, sqltypes.Text, sqltypes.Blob, sqltypes.VarChar, sqltypes.VarBinary, sqltypes.Char, + sqltypes.Bit, sqltypes.Enum, sqltypes.Set, sqltypes.Geometry, sqltypes.Binary, sqltypes.TypeJSON: + val, pos, ok := readLenEncStringAsBytes(data, pos) + return sqltypes.MakeTrusted(sqltypes.VarBinary, val), pos, ok + default: + return sqltypes.NULL, pos, false + } +} + +func (c *Conn) parseComStmtSendLongData(data []byte) (uint32, uint16, []byte, bool) { + pos := 1 + statementID, pos, ok := readUint32(data, pos) + if !ok { + return 0, 0, nil, false + } + + paramID, pos, ok := readUint16(data, pos) + if !ok { + return 0, 0, nil, false + } + + return statementID, paramID, data[pos:], true +} + +func (c *Conn) parseComStmtClose(data []byte) (uint32, bool) { + val, _, ok := readUint32(data, 1) + return val, ok +} + +func (c *Conn) parseComStmtReset(data []byte) (uint32, bool) { + val, _, ok := readUint32(data, 1) + return val, ok +} + func (c *Conn) parseComInitDB(data []byte) string { return string(data[1:]) } @@ -655,3 +1000,483 @@ func (c *Conn) writeEndResult(more bool, affectedRows, lastInsertID uint64, warn return nil } + +// writePrepare writes a prepare query response to the wire. +func (c *Conn) writePrepare(fld []*querypb.Field, prepare *PrepareData) error { + paramsCount := prepare.ParamsCount + columnCount := 0 + if len(fld) != 0 { + columnCount = len(fld) + } + if columnCount > 0 { + prepare.ColumnNames = make([]string, columnCount) + } + + data := c.startEphemeralPacket(12) + pos := 0 + + pos = writeByte(data, pos, 0x00) + pos = writeUint32(data, pos, uint32(prepare.StatementID)) + pos = writeUint16(data, pos, uint16(columnCount)) + pos = writeUint16(data, pos, uint16(paramsCount)) + pos = writeByte(data, pos, 0x00) + writeUint16(data, pos, 0x0000) + + if err := c.writeEphemeralPacket(); err != nil { + return err + } + + if paramsCount > 0 { + for i := uint16(0); i < paramsCount; i++ { + if err := c.writeColumnDefinition(&querypb.Field{ + Name: "?", + Type: sqltypes.VarBinary, + Charset: 63}); err != nil { + return err + } + } + + // Now send an EOF packet. + if c.Capabilities&CapabilityClientDeprecateEOF == 0 { + // With CapabilityClientDeprecateEOF, we do not send this EOF. + if err := c.writeEOFPacket(c.StatusFlags, 0); err != nil { + return err + } + } + } + + for i, field := range fld { + field.Name = strings.Replace(field.Name, "'?'", "?", -1) + prepare.ColumnNames[i] = field.Name + if err := c.writeColumnDefinition(field); err != nil { + return err + } + } + + if columnCount > 0 { + // Now send an EOF packet. + if c.Capabilities&CapabilityClientDeprecateEOF == 0 { + // With CapabilityClientDeprecateEOF, we do not send this EOF. + if err := c.writeEOFPacket(c.StatusFlags, 0); err != nil { + return err + } + } + } + + return c.flush() +} + +func (c *Conn) writeBinaryRow(fields []*querypb.Field, row []sqltypes.Value) error { + length := 0 + nullBitMapLen := (len(fields) + 7 + 2) / 8 + for _, val := range row { + if !val.IsNull() { + l, err := val2MySQLLen(val) + if err != nil { + return fmt.Errorf("internal value %v get MySQL value length error: %v", val, err) + } + length += l + } + } + + length += nullBitMapLen + 1 + + data := c.startEphemeralPacket(length) + pos := 0 + + pos = writeByte(data, pos, 0x00) + + for i := 0; i < nullBitMapLen; i++ { + pos = writeByte(data, pos, 0x00) + } + + for i, val := range row { + if val.IsNull() { + bytePos := (i+2)/8 + 1 + bitPos := (i + 2) % 8 + data[bytePos] |= 1 << uint(bitPos) + } else { + v, err := val2MySQL(val) + if err != nil { + c.recycleWritePacket() + return fmt.Errorf("internal value %v to MySQL value error: %v", val, err) + } + pos += copy(data[pos:], v) + } + } + + if pos != length { + return fmt.Errorf("internal error packet row: got %v bytes but expected %v", pos, length) + } + + return c.writeEphemeralPacket() +} + +// writeBinaryRows sends the rows of a Result with binary form. +func (c *Conn) writeBinaryRows(result *sqltypes.Result) error { + for _, row := range result.Rows { + if err := c.writeBinaryRow(result.Fields, row); err != nil { + return err + } + } + return nil +} + +func val2MySQL(v sqltypes.Value) ([]byte, error) { + var out []byte + pos := 0 + switch v.Type() { + case sqltypes.Null: + // no-op + case sqltypes.Int8: + val, err := strconv.ParseInt(v.ToString(), 10, 8) + if err != nil { + return []byte{}, err + } + out = make([]byte, 1) + writeByte(out, pos, uint8(val)) + case sqltypes.Uint8: + val, err := strconv.ParseUint(v.ToString(), 10, 8) + if err != nil { + return []byte{}, err + } + out = make([]byte, 1) + writeByte(out, pos, uint8(val)) + case sqltypes.Uint16: + val, err := strconv.ParseUint(v.ToString(), 10, 16) + if err != nil { + return []byte{}, err + } + out = make([]byte, 2) + writeUint16(out, pos, uint16(val)) + case sqltypes.Int16, sqltypes.Year: + val, err := strconv.ParseInt(v.ToString(), 10, 16) + if err != nil { + return []byte{}, err + } + out = make([]byte, 2) + writeUint16(out, pos, uint16(val)) + case sqltypes.Uint24, sqltypes.Uint32: + val, err := strconv.ParseUint(v.ToString(), 10, 32) + if err != nil { + return []byte{}, err + } + out = make([]byte, 4) + writeUint32(out, pos, uint32(val)) + case sqltypes.Int24, sqltypes.Int32: + val, err := strconv.ParseInt(v.ToString(), 10, 32) + if err != nil { + return []byte{}, err + } + out = make([]byte, 4) + writeUint32(out, pos, uint32(val)) + case sqltypes.Float32: + val, err := strconv.ParseFloat(v.ToString(), 32) + if err != nil { + return []byte{}, err + } + bits := math.Float32bits(float32(val)) + out = make([]byte, 4) + writeUint32(out, pos, bits) + case sqltypes.Uint64: + val, err := strconv.ParseUint(v.ToString(), 10, 64) + if err != nil { + return []byte{}, err + } + out = make([]byte, 8) + writeUint64(out, pos, uint64(val)) + case sqltypes.Int64: + val, err := strconv.ParseInt(v.ToString(), 10, 64) + if err != nil { + return []byte{}, err + } + out = make([]byte, 8) + writeUint64(out, pos, uint64(val)) + case sqltypes.Float64: + val, err := strconv.ParseFloat(v.ToString(), 64) + if err != nil { + return []byte{}, err + } + bits := math.Float64bits(val) + out = make([]byte, 8) + writeUint64(out, pos, bits) + case sqltypes.Timestamp, sqltypes.Date, sqltypes.Datetime: + if len(v.Raw()) > 19 { + out = make([]byte, 1+11) + out[pos] = 0x0b + pos++ + year, err := strconv.ParseUint(string(v.Raw()[0:4]), 10, 16) + if err != nil { + return []byte{}, err + } + month, err := strconv.ParseUint(string(v.Raw()[5:7]), 10, 8) + if err != nil { + return []byte{}, err + } + day, err := strconv.ParseUint(string(v.Raw()[8:10]), 10, 8) + if err != nil { + return []byte{}, err + } + hour, err := strconv.ParseUint(string(v.Raw()[11:13]), 10, 8) + if err != nil { + return []byte{}, err + } + minute, err := strconv.ParseUint(string(v.Raw()[14:16]), 10, 8) + if err != nil { + return []byte{}, err + } + second, err := strconv.ParseUint(string(v.Raw()[17:19]), 10, 8) + if err != nil { + return []byte{}, err + } + val := make([]byte, 6) + count := copy(val, v.Raw()[20:]) + for i := 0; i < (6 - count); i++ { + val[count+i] = 0x30 + } + microSecond, err := strconv.ParseUint(string(val), 10, 32) + if err != nil { + return []byte{}, err + } + pos = writeUint16(out, pos, uint16(year)) + pos = writeByte(out, pos, byte(month)) + pos = writeByte(out, pos, byte(day)) + pos = writeByte(out, pos, byte(hour)) + pos = writeByte(out, pos, byte(minute)) + pos = writeByte(out, pos, byte(second)) + writeUint32(out, pos, uint32(microSecond)) + } else if len(v.Raw()) > 10 { + out = make([]byte, 1+7) + out[pos] = 0x07 + pos++ + year, err := strconv.ParseUint(string(v.Raw()[0:4]), 10, 16) + if err != nil { + return []byte{}, err + } + month, err := strconv.ParseUint(string(v.Raw()[5:7]), 10, 8) + if err != nil { + return []byte{}, err + } + day, err := strconv.ParseUint(string(v.Raw()[8:10]), 10, 8) + if err != nil { + return []byte{}, err + } + hour, err := strconv.ParseUint(string(v.Raw()[11:13]), 10, 8) + if err != nil { + return []byte{}, err + } + minute, err := strconv.ParseUint(string(v.Raw()[14:16]), 10, 8) + if err != nil { + return []byte{}, err + } + second, err := strconv.ParseUint(string(v.Raw()[17:]), 10, 8) + if err != nil { + return []byte{}, err + } + pos = writeUint16(out, pos, uint16(year)) + pos = writeByte(out, pos, byte(month)) + pos = writeByte(out, pos, byte(day)) + pos = writeByte(out, pos, byte(hour)) + pos = writeByte(out, pos, byte(minute)) + writeByte(out, pos, byte(second)) + } else if len(v.Raw()) > 0 { + out = make([]byte, 1+4) + out[pos] = 0x04 + pos++ + year, err := strconv.ParseUint(string(v.Raw()[0:4]), 10, 16) + if err != nil { + return []byte{}, err + } + month, err := strconv.ParseUint(string(v.Raw()[5:7]), 10, 8) + if err != nil { + return []byte{}, err + } + day, err := strconv.ParseUint(string(v.Raw()[8:]), 10, 8) + if err != nil { + return []byte{}, err + } + pos = writeUint16(out, pos, uint16(year)) + pos = writeByte(out, pos, byte(month)) + writeByte(out, pos, byte(day)) + } else { + out = make([]byte, 1) + out[pos] = 0x00 + } + case sqltypes.Time: + if string(v.Raw()) == "00:00:00" { + out = make([]byte, 1) + out[pos] = 0x00 + } else if strings.Contains(string(v.Raw()), ".") { + out = make([]byte, 1+12) + out[pos] = 0x0c + pos++ + + sub1 := strings.Split(string(v.Raw()), ":") + if len(sub1) != 3 { + err := fmt.Errorf("incorrect time value, ':' is not found") + return []byte{}, err + } + sub2 := strings.Split(sub1[2], ".") + if len(sub2) != 2 { + err := fmt.Errorf("incorrect time value, '.' is not found") + return []byte{}, err + } + + var total []byte + if strings.HasPrefix(sub1[0], "-") { + out[pos] = 0x01 + total = []byte(sub1[0]) + total = total[1:] + } else { + out[pos] = 0x00 + total = []byte(sub1[0]) + } + pos++ + + h, err := strconv.ParseUint(string(total), 10, 32) + if err != nil { + return []byte{}, err + } + + days := uint32(h) / 24 + hours := uint32(h) % 24 + minute := sub1[1] + second := sub2[0] + microSecond := sub2[1] + + minutes, err := strconv.ParseUint(minute, 10, 8) + if err != nil { + return []byte{}, err + } + + seconds, err := strconv.ParseUint(second, 10, 8) + if err != nil { + return []byte{}, err + } + pos = writeUint32(out, pos, uint32(days)) + pos = writeByte(out, pos, byte(hours)) + pos = writeByte(out, pos, byte(minutes)) + pos = writeByte(out, pos, byte(seconds)) + + val := make([]byte, 6) + count := copy(val, microSecond) + for i := 0; i < (6 - count); i++ { + val[count+i] = 0x30 + } + microSeconds, err := strconv.ParseUint(string(val), 10, 32) + if err != nil { + return []byte{}, err + } + writeUint32(out, pos, uint32(microSeconds)) + } else if len(v.Raw()) > 0 { + out = make([]byte, 1+8) + out[pos] = 0x08 + pos++ + + sub1 := strings.Split(string(v.Raw()), ":") + if len(sub1) != 3 { + err := fmt.Errorf("incorrect time value, ':' is not found") + return []byte{}, err + } + + var total []byte + if strings.HasPrefix(sub1[0], "-") { + out[pos] = 0x01 + total = []byte(sub1[0]) + total = total[1:] + } else { + out[pos] = 0x00 + total = []byte(sub1[0]) + } + pos++ + + h, err := strconv.ParseUint(string(total), 10, 32) + if err != nil { + return []byte{}, err + } + + days := uint32(h) / 24 + hours := uint32(h) % 24 + minute := sub1[1] + second := sub1[2] + + minutes, err := strconv.ParseUint(minute, 10, 8) + if err != nil { + return []byte{}, err + } + + seconds, err := strconv.ParseUint(second, 10, 8) + if err != nil { + return []byte{}, err + } + pos = writeUint32(out, pos, uint32(days)) + pos = writeByte(out, pos, byte(hours)) + pos = writeByte(out, pos, byte(minutes)) + writeByte(out, pos, byte(seconds)) + } else { + err := fmt.Errorf("incorrect time value") + return []byte{}, err + } + case sqltypes.Decimal, sqltypes.Text, sqltypes.Blob, sqltypes.VarChar, + sqltypes.VarBinary, sqltypes.Char, sqltypes.Bit, sqltypes.Enum, + sqltypes.Set, sqltypes.Geometry, sqltypes.Binary, sqltypes.TypeJSON: + l := len(v.Raw()) + length := lenEncIntSize(uint64(l)) + l + out = make([]byte, length) + pos = writeLenEncInt(out, pos, uint64(l)) + copy(out[pos:], v.Raw()) + default: + out = make([]byte, len(v.Raw())) + copy(out, v.Raw()) + } + return out, nil +} + +func val2MySQLLen(v sqltypes.Value) (int, error) { + var length int + var err error + + switch v.Type() { + case sqltypes.Null: + length = 0 + case sqltypes.Int8, sqltypes.Uint8: + length = 1 + case sqltypes.Uint16, sqltypes.Int16, sqltypes.Year: + length = 2 + case sqltypes.Uint24, sqltypes.Uint32, sqltypes.Int24, sqltypes.Int32, sqltypes.Float32: + length = 4 + case sqltypes.Uint64, sqltypes.Int64, sqltypes.Float64: + length = 8 + case sqltypes.Timestamp, sqltypes.Date, sqltypes.Datetime: + if len(v.Raw()) > 19 { + length = 12 + } else if len(v.Raw()) > 10 { + length = 8 + } else if len(v.Raw()) > 0 { + length = 5 + } else { + length = 1 + } + case sqltypes.Time: + if string(v.Raw()) == "00:00:00" { + length = 1 + } else if strings.Contains(string(v.Raw()), ".") { + length = 13 + } else if len(v.Raw()) > 0 { + length = 9 + } else { + err = fmt.Errorf("incorrect time value") + } + case sqltypes.Decimal, sqltypes.Text, sqltypes.Blob, sqltypes.VarChar, + sqltypes.VarBinary, sqltypes.Char, sqltypes.Bit, sqltypes.Enum, + sqltypes.Set, sqltypes.Geometry, sqltypes.Binary, sqltypes.TypeJSON: + l := len(v.Raw()) + length = lenEncIntSize(uint64(l)) + l + default: + length = len(v.Raw()) + } + if err != nil { + return 0, err + } + return length, nil +} diff --git a/go/mysql/query_test.go b/go/mysql/query_test.go index 14625d1e30b..2d4bf7a13e0 100644 --- a/go/mysql/query_test.go +++ b/go/mysql/query_test.go @@ -29,6 +29,48 @@ import ( querypb "vitess.io/vitess/go/vt/proto/query" ) +// Utility function to write sql query as packets to test parseComPrepare +func MockQueryPackets(t *testing.T, query string) []byte { + data := make([]byte, len(query)+1) + // Not sure if it makes a difference + pos := 0 + pos = writeByte(data, pos, ComPrepare) + copy(data[pos:], query) + return data +} + +func MockPrepareData(t *testing.T) (*PrepareData, *sqltypes.Result) { + sql := "select * from test_table where id = ?" + + result := &sqltypes.Result{ + Fields: []*querypb.Field{ + { + Name: "id", + Type: querypb.Type_INT32, + }, + }, + Rows: [][]sqltypes.Value{ + { + sqltypes.MakeTrusted(querypb.Type_INT32, []byte("1")), + }, + }, + RowsAffected: 1, + } + + prepare := &PrepareData{ + StatementID: 18, + PrepareStmt: sql, + ParamsCount: 1, + ParamsType: []int32{263}, + ColumnNames: []string{"id"}, + BindVars: map[string]*querypb.BindVariable{ + "v1": sqltypes.Int32BindVariable(10), + }, + } + + return prepare, result +} + func TestComInitDB(t *testing.T) { listener, sConn, cConn := createSocketPair(t) defer func() { @@ -76,6 +118,139 @@ func TestComSetOption(t *testing.T) { } } +func TestComStmtPrepare(t *testing.T) { + listener, sConn, cConn := createSocketPair(t) + defer func() { + listener.Close() + sConn.Close() + cConn.Close() + }() + + sql := "select * from test_table where id = ?" + mockData := MockQueryPackets(t, sql) + + if err := cConn.writePacket(mockData); err != nil { + t.Fatalf("writePacket failed: %v", err) + } + + data, err := sConn.ReadPacket() + if err != nil { + t.Fatalf("sConn.ReadPacket - ComPrepare failed: %v", err) + } + + parsedQuery := sConn.parseComPrepare(data) + if parsedQuery != sql { + t.Fatalf("Received incorrect query, want: %v, got: %v", sql, parsedQuery) + } + + prepare, result := MockPrepareData(t) + sConn.PrepareData = make(map[uint32]*PrepareData) + sConn.PrepareData[prepare.StatementID] = prepare + + // write the response to the client + if err := sConn.writePrepare(result.Fields, prepare); err != nil { + t.Fatalf("sConn.writePrepare failed: %v", err) + } + + resp, err := cConn.ReadPacket() + if err != nil { + t.Fatalf("cConn.ReadPacket failed: %v", err) + } + if uint32(resp[1]) != prepare.StatementID { + t.Fatalf("Received incorrect Statement ID, want: %v, got: %v", prepare.StatementID, resp[1]) + } +} + +func TestComStmtSendLongData(t *testing.T) { + listener, sConn, cConn := createSocketPair(t) + defer func() { + listener.Close() + sConn.Close() + cConn.Close() + }() + + prepare, result := MockPrepareData(t) + cConn.PrepareData = make(map[uint32]*PrepareData) + cConn.PrepareData[prepare.StatementID] = prepare + if err := cConn.writePrepare(result.Fields, prepare); err != nil { + t.Fatalf("writePrepare failed: %v", err) + } + + // Since there's no writeComStmtSendLongData, we'll write a prepareStmt and check if we can read the StatementID + data, err := sConn.ReadPacket() + if err != nil || len(data) == 0 { + t.Fatalf("sConn.ReadPacket - ComStmtClose failed: %v %v", data, err) + } + stmtID, paramID, chunkData, ok := sConn.parseComStmtSendLongData(data) + if !ok { + t.Fatalf("parseComStmtSendLongData failed") + } + if paramID != 1 { + t.Fatalf("Received incorrect ParamID, want %v, got %v:", paramID, 1) + } + if stmtID != prepare.StatementID { + t.Fatalf("Received incorrect value, want: %v, got: %v", uint32(data[1]), prepare.StatementID) + } + // Check length of chunkData, Since its a subset of `data` and compare with it after we subtract the number of bytes that was read from it. + // sizeof(uint32) + sizeof(uint16) + 1 = 7 + if len(chunkData) != len(data)-7 { + t.Fatalf("Received bad chunkData") + } +} + +func TestComStmtExecute(t *testing.T) { + listener, sConn, cConn := createSocketPair(t) + defer func() { + listener.Close() + sConn.Close() + cConn.Close() + }() + + prepare, _ := MockPrepareData(t) + cConn.PrepareData = make(map[uint32]*PrepareData) + cConn.PrepareData[prepare.StatementID] = prepare + + // This is simulated packets for `select * from test_table where id = ?` + data := []byte{23, 18, 0, 0, 0, 128, 1, 0, 0, 0, 0, 1, 1, 128, 1} + + stmtID, _, err := sConn.parseComStmtExecute(cConn.PrepareData, data) + if err != nil { + t.Fatalf("parseComStmtExeute failed: %v", err) + } + if stmtID != 18 { + t.Fatalf("Parsed incorrect values") + } +} + +func TestComStmtClose(t *testing.T) { + listener, sConn, cConn := createSocketPair(t) + defer func() { + listener.Close() + sConn.Close() + cConn.Close() + }() + + prepare, result := MockPrepareData(t) + cConn.PrepareData = make(map[uint32]*PrepareData) + cConn.PrepareData[prepare.StatementID] = prepare + if err := cConn.writePrepare(result.Fields, prepare); err != nil { + t.Fatalf("writePrepare failed: %v", err) + } + + // Since there's no writeComStmtClose, we'll write a prepareStmt and check if we can read the StatementID + data, err := sConn.ReadPacket() + if err != nil || len(data) == 0 { + t.Fatalf("sConn.ReadPacket - ComStmtClose failed: %v %v", data, err) + } + stmtID, ok := sConn.parseComStmtClose(data) + if !ok { + t.Fatalf("parseComStmtClose failed") + } + if stmtID != prepare.StatementID { + t.Fatalf("Received incorrect value, want: %v, got: %v", uint32(data[1]), prepare.StatementID) + } +} + func TestQueries(t *testing.T) { listener, sConn, cConn := createSocketPair(t) defer func() { diff --git a/go/mysql/server.go b/go/mysql/server.go index 64642ea53d3..5de219ff476 100644 --- a/go/mysql/server.go +++ b/go/mysql/server.go @@ -29,6 +29,7 @@ import ( "vitess.io/vitess/go/sync2" "vitess.io/vitess/go/tb" "vitess.io/vitess/go/vt/log" + querypb "vitess.io/vitess/go/vt/proto/query" "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/vterrors" ) @@ -94,6 +95,14 @@ type Handler interface { // hang on to the byte slice. ComQuery(c *Conn, query string, callback func(*sqltypes.Result) error) error + // ComPrepare is called when a connection receives a prepared + // statement query. + ComPrepare(c *Conn, query string) ([]*querypb.Field, error) + + // ComStmtExecute is called when a connection receives a statement + // execute query. + ComStmtExecute(c *Conn, prepare *PrepareData, callback func(*sqltypes.Result) error) error + // WarningCount is called at the end of each query to obtain // the value to be returned to the client in the EOF packet. // Note that this will be called either in the context of the diff --git a/go/mysql/server_test.go b/go/mysql/server_test.go index 8895425a7cd..49456128c9f 100644 --- a/go/mysql/server_test.go +++ b/go/mysql/server_test.go @@ -75,6 +75,7 @@ func (th *testHandler) NewConnection(c *Conn) { } func (th *testHandler) ConnectionClosed(c *Conn) { + } func (th *testHandler) ComQuery(c *Conn, query string, callback func(*sqltypes.Result) error) error { @@ -171,6 +172,14 @@ func (th *testHandler) ComQuery(c *Conn, query string, callback func(*sqltypes.R return nil } +func (th *testHandler) ComPrepare(c *Conn, query string) ([]*querypb.Field, error) { + return nil, nil +} + +func (th *testHandler) ComStmtExecute(c *Conn, prepare *PrepareData, callback func(*sqltypes.Result) error) error { + return nil +} + func (th *testHandler) WarningCount(c *Conn) uint16 { return th.warnings } diff --git a/go/netutil/conn.go b/go/netutil/conn.go index 40e59afcbca..90d7a5139f2 100644 --- a/go/netutil/conn.go +++ b/go/netutil/conn.go @@ -45,7 +45,7 @@ func (c ConnWithTimeouts) Read(b []byte) (int, error) { return c.Conn.Read(b) } -// Write sets a write deadline and delagates to conn.Write +// Write sets a write deadline and delegates to conn.Write func (c ConnWithTimeouts) Write(b []byte) (int, error) { if c.writeTimeout == 0 { return c.Conn.Write(b) diff --git a/go/sqltypes/arithmetic.go b/go/sqltypes/arithmetic.go index 516c2c9f220..f5c18dd43a1 100644 --- a/go/sqltypes/arithmetic.go +++ b/go/sqltypes/arithmetic.go @@ -19,6 +19,8 @@ package sqltypes import ( "bytes" "fmt" + "math" + "strconv" querypb "vitess.io/vitess/go/vt/proto/query" @@ -40,6 +42,25 @@ type numeric struct { var zeroBytes = []byte("0") +// Add adds two values together +// if v1 or v2 is null, then it returns null +func Add(v1, v2 Value) (Value, error) { + if v1.IsNull() || v2.IsNull() { + return NULL, nil + } + + lv1, err := newNumeric(v1) + + lv2, err := newNumeric(v2) + + lresult, err := addNumericWithError(lv1, lv2) + if err != nil { + return NULL, err + } + + return castFromNumeric(lresult, lresult.typ), nil +} + // NullsafeAdd adds two Values in a null-safe manner. A null value // is treated as 0. If both values are null, then a null is returned. // If both values are not null, a numeric value is built @@ -51,7 +72,7 @@ var zeroBytes = []byte("0") // addition, if one of the input types was Decimal, then // a Decimal is built. Otherwise, the final type of the // result is preserved. -func NullsafeAdd(v1, v2 Value, resultType querypb.Type) (Value, error) { +func NullsafeAdd(v1, v2 Value, resultType querypb.Type) Value { if v1.IsNull() { v1 = MakeTrusted(resultType, zeroBytes) } @@ -61,16 +82,14 @@ func NullsafeAdd(v1, v2 Value, resultType querypb.Type) (Value, error) { lv1, err := newNumeric(v1) if err != nil { - return NULL, err + return NULL } lv2, err := newNumeric(v2) if err != nil { - return NULL, err - } - lresult, err := addNumeric(lv1, lv2) - if err != nil { - return NULL, err + return NULL } + lresult := addNumeric(lv1, lv2) + return castFromNumeric(lresult, resultType) } @@ -224,10 +243,7 @@ func ToInt64(v Value) (int64, error) { // ToFloat64 converts Value to float64. func ToFloat64(v Value) (float64, error) { - num, err := newNumeric(v) - if err != nil { - return 0, err - } + num, _ := newNumeric(v) switch num.typ { case Int64: return float64(num.ival), nil @@ -292,7 +308,7 @@ func newNumeric(v Value) (numeric, error) { if fval, err := strconv.ParseFloat(str, 64); err == nil { return numeric{fval: fval, typ: Float64}, nil } - return numeric{}, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "could not parse value: '%s'", str) + return numeric{ival: 0, typ: Int64}, nil } // newIntegralNumeric parses a value and produces an Int64 or Uint64. @@ -323,22 +339,41 @@ func newIntegralNumeric(v Value) (numeric, error) { return numeric{}, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "could not parse value: '%s'", str) } -func addNumeric(v1, v2 numeric) (numeric, error) { +func addNumeric(v1, v2 numeric) numeric { v1, v2 = prioritize(v1, v2) switch v1.typ { case Int64: - return intPlusInt(v1.ival, v2.ival), nil + return intPlusInt(v1.ival, v2.ival) case Uint64: switch v2.typ { case Int64: return uintPlusInt(v1.uval, v2.ival) case Uint64: - return uintPlusUint(v1.uval, v2.uval), nil + return uintPlusUint(v1.uval, v2.uval) + } + case Float64: + return floatPlusAny(v1.fval, v2) + } + panic("unreachable") +} + +func addNumericWithError(v1, v2 numeric) (numeric, error) { + v1, v2 = prioritize(v1, v2) + switch v1.typ { + case Int64: + return intPlusIntWithError(v1.ival, v2.ival) + case Uint64: + switch v2.typ { + case Int64: + return uintPlusIntWithError(v1.uval, v2.ival) + case Uint64: + return uintPlusUintWithError(v1.uval, v2.uval) } case Float64: return floatPlusAny(v1.fval, v2), nil } panic("unreachable") + } // prioritize reorders the input parameters @@ -353,6 +388,7 @@ func prioritize(v1, v2 numeric) (altv1, altv2 numeric) { if v2.typ == Float64 { return v2, v1 } + } return v1, v2 } @@ -371,21 +407,47 @@ overflow: return numeric{typ: Float64, fval: float64(v1) + float64(v2)} } -func uintPlusInt(v1 uint64, v2 int64) (numeric, error) { - if v2 < 0 { - return numeric{}, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "cannot add a negative number to an unsigned integer: %d, %d", v1, v2) +func intPlusIntWithError(v1, v2 int64) (numeric, error) { + result := v1 + v2 + if (result > v1) != (v2 > 0) { + return numeric{}, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "BIGINT value is out of range in %v + %v", v1, v2) } - return uintPlusUint(v1, uint64(v2)), nil + return numeric{typ: Int64, ival: result}, nil +} + +func uintPlusInt(v1 uint64, v2 int64) numeric { + return uintPlusUint(v1, uint64(v2)) +} + +func uintPlusIntWithError(v1 uint64, v2 int64) (numeric, error) { + if v2 >= math.MaxInt64 && v1 > 0 { + return numeric{}, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "BIGINT value is out of range in %v + %v", v1, v2) + } + + //convert to int -> uint is because for numeric operators (such as + or -) + //where one of the operands is an unsigned integer, the result is unsigned by default. + return uintPlusUintWithError(v1, uint64(v2)) } func uintPlusUint(v1, v2 uint64) numeric { result := v1 + v2 if result < v2 { return numeric{typ: Float64, fval: float64(v1) + float64(v2)} + } return numeric{typ: Uint64, uval: result} } +func uintPlusUintWithError(v1, v2 uint64) (numeric, error) { + result := v1 + v2 + + if result < v2 { + return numeric{}, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "BIGINT UNSIGNED value is out of range in %v + %v", v1, v2) + } + + return numeric{typ: Uint64, uval: result}, nil +} + func floatPlusAny(v1 float64, v2 numeric) numeric { switch v2.typ { case Int64: @@ -396,37 +458,43 @@ func floatPlusAny(v1 float64, v2 numeric) numeric { return numeric{typ: Float64, fval: v1 + v2.fval} } -func castFromNumeric(v numeric, resultType querypb.Type) (Value, error) { +func castFromNumeric(v numeric, resultType querypb.Type) Value { switch { case IsSigned(resultType): switch v.typ { case Int64: - return MakeTrusted(resultType, strconv.AppendInt(nil, v.ival, 10)), nil - case Uint64, Float64: - return NULL, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "unexpected type conversion: %v to %v", v.typ, resultType) + return MakeTrusted(resultType, strconv.AppendInt(nil, v.ival, 10)) + case Uint64: + return MakeTrusted(resultType, strconv.AppendInt(nil, int64(v.uval), 10)) + case Float64: + return MakeTrusted(resultType, strconv.AppendInt(nil, int64(v.fval), 10)) + } case IsUnsigned(resultType): switch v.typ { case Uint64: - return MakeTrusted(resultType, strconv.AppendUint(nil, v.uval, 10)), nil - case Int64, Float64: - return NULL, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "unexpected type conversion: %v to %v", v.typ, resultType) + return MakeTrusted(resultType, strconv.AppendUint(nil, v.uval, 10)) + case Int64: + return MakeTrusted(resultType, strconv.AppendUint(nil, uint64(v.ival), 10)) + case Float64: + return MakeTrusted(resultType, strconv.AppendUint(nil, uint64(v.fval), 10)) + } case IsFloat(resultType) || resultType == Decimal: switch v.typ { case Int64: - return MakeTrusted(resultType, strconv.AppendInt(nil, v.ival, 10)), nil + return MakeTrusted(resultType, strconv.AppendInt(nil, v.ival, 10)) case Uint64: - return MakeTrusted(resultType, strconv.AppendUint(nil, v.uval, 10)), nil + return MakeTrusted(resultType, strconv.AppendUint(nil, v.uval, 10)) case Float64: format := byte('g') if resultType == Decimal { format = 'f' } - return MakeTrusted(resultType, strconv.AppendFloat(nil, v.fval, format, -1, 64)), nil + return MakeTrusted(resultType, strconv.AppendFloat(nil, v.fval, format, -1, 64)) } } - return NULL, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "unexpected type conversion to non-numeric: %v", resultType) + return NULL } func compareNumeric(v1, v2 numeric) int { diff --git a/go/sqltypes/arithmetic_test.go b/go/sqltypes/arithmetic_test.go index 458a5944dd9..81aeba0cf30 100644 --- a/go/sqltypes/arithmetic_test.go +++ b/go/sqltypes/arithmetic_test.go @@ -19,6 +19,7 @@ package sqltypes import ( "encoding/binary" "fmt" + "math" "reflect" "strconv" "testing" @@ -29,6 +30,111 @@ import ( ) func TestAdd(t *testing.T) { + tcases := []struct { + v1, v2 Value + out Value + err error + }{{ + + //All Nulls + v1: NULL, + v2: NULL, + out: NULL, + }, { + // First value null. + v1: NewInt32(1), + v2: NULL, + out: NULL, + }, { + // Second value null. + v1: NULL, + v2: NewInt32(1), + out: NULL, + }, { + + // case with negatives + v1: NewInt64(-1), + v2: NewInt64(-2), + out: NewInt64(-3), + }, { + + // testing for overflow int64 + v1: NewInt64(math.MaxInt64), + v2: NewUint64(2), + err: vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "BIGINT value is out of range in 2 + 9223372036854775807"), + }, { + + v1: NewInt64(-2), + v2: NewUint64(1), + out: NewUint64(math.MaxUint64), + }, { + + v1: NewInt64(math.MaxInt64), + v2: NewInt64(-2), + out: NewInt64(9223372036854775805), + }, { + // Normal case + v1: NewUint64(1), + v2: NewUint64(2), + out: NewUint64(3), + }, { + // testing for overflow uint64 + v1: NewUint64(math.MaxUint64), + v2: NewUint64(2), + err: vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "BIGINT UNSIGNED value is out of range in 18446744073709551615 + 2"), + }, { + + // int64 underflow + v1: NewInt64(math.MinInt64), + v2: NewInt64(-2), + err: vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "BIGINT value is out of range in -9223372036854775808 + -2"), + }, { + + // checking int64 max value can be returned + v1: NewInt64(math.MaxInt64), + v2: NewUint64(0), + out: NewUint64(9223372036854775807), + }, { + + // testing whether uint64 max value can be returned + v1: NewUint64(math.MaxUint64), + v2: NewInt64(0), + out: NewUint64(math.MaxUint64), + }, { + + v1: NewUint64(math.MaxInt64), + v2: NewInt64(1), + out: NewUint64(9223372036854775808), + }, { + + v1: NewUint64(1), + v2: TestValue(VarChar, "c"), + out: NewUint64(1), + }, { + v1: NewUint64(1), + v2: TestValue(VarChar, "1.2"), + out: NewFloat64(2.2), + }} + + for _, tcase := range tcases { + + got, err := Add(tcase.v1, tcase.v2) + + if !vterrors.Equals(err, tcase.err) { + t.Errorf("Add(%v, %v) error: %v, want %v", printValue(tcase.v1), printValue(tcase.v2), vterrors.Print(err), vterrors.Print(tcase.err)) + } + if tcase.err != nil { + continue + } + + if !reflect.DeepEqual(got, tcase.out) { + t.Errorf("Addition(%v, %v): %v, want %v", printValue(tcase.v1), printValue(tcase.v2), printValue(got), printValue(tcase.out)) + } + } + +} + +func TestNullsafeAdd(t *testing.T) { tcases := []struct { v1, v2 Value out Value @@ -67,21 +173,15 @@ func TestAdd(t *testing.T) { // Make sure underlying error is returned while adding. v1: NewInt64(-1), v2: NewUint64(2), - err: vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "cannot add a negative number to an unsigned integer: 2, -1"), + out: NewInt64(-9223372036854775808), }, { // Make sure underlying error is returned while converting. v1: NewFloat64(1), v2: NewFloat64(2), - err: vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "unexpected type conversion: FLOAT64 to INT64"), + out: NewInt64(3), }} for _, tcase := range tcases { - got, err := NullsafeAdd(tcase.v1, tcase.v2, Int64) - if !vterrors.Equals(err, tcase.err) { - t.Errorf("Add(%v, %v) error: %v, want %v", printValue(tcase.v1), printValue(tcase.v2), vterrors.Print(err), vterrors.Print(tcase.err)) - } - if tcase.err != nil { - continue - } + got := NullsafeAdd(tcase.v1, tcase.v2, Int64) if !reflect.DeepEqual(got, tcase.out) { t.Errorf("Add(%v, %v): %v, want %v", printValue(tcase.v1), printValue(tcase.v2), printValue(got), printValue(tcase.out)) @@ -346,7 +446,7 @@ func TestToFloat64(t *testing.T) { err error }{{ v: TestValue(VarChar, "abcd"), - err: vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "could not parse value: 'abcd'"), + out: 0, }, { v: NewInt64(1), out: 1, @@ -515,7 +615,7 @@ func TestNewNumeric(t *testing.T) { err: vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "strconv.ParseFloat: parsing \"abcd\": invalid syntax"), }, { v: TestValue(VarChar, "abcd"), - err: vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "could not parse value: 'abcd'"), + out: numeric{typ: Float64, fval: 0}, }} for _, tcase := range tcases { got, err := newNumeric(tcase.v) @@ -623,7 +723,7 @@ func TestAddNumeric(t *testing.T) { }, { v1: numeric{typ: Int64, ival: -1}, v2: numeric{typ: Uint64, uval: 2}, - err: vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "cannot add a negative number to an unsigned integer: 2, -1"), + out: numeric{typ: Float64, fval: 18446744073709551617}, }, { // Uint64 overflow. v1: numeric{typ: Uint64, uval: 18446744073709551615}, @@ -631,13 +731,7 @@ func TestAddNumeric(t *testing.T) { out: numeric{typ: Float64, fval: 18446744073709551617}, }} for _, tcase := range tcases { - got, err := addNumeric(tcase.v1, tcase.v2) - if !vterrors.Equals(err, tcase.err) { - t.Errorf("addNumeric(%v, %v) error: %v, want %v", tcase.v1, tcase.v2, vterrors.Print(err), vterrors.Print(tcase.err)) - } - if tcase.err != nil { - continue - } + got := addNumeric(tcase.v1, tcase.v2) if got != tcase.out { t.Errorf("addNumeric(%v, %v): %v, want %v", tcase.v1, tcase.v2, got, tcase.out) @@ -705,15 +799,15 @@ func TestCastFromNumeric(t *testing.T) { }, { typ: Int64, v: numeric{typ: Uint64, uval: 1}, - err: vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "unexpected type conversion: UINT64 to INT64"), + out: NewInt64(1), }, { typ: Int64, v: numeric{typ: Float64, fval: 1.2e-16}, - err: vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "unexpected type conversion: FLOAT64 to INT64"), + out: NewInt64(0), }, { typ: Uint64, v: numeric{typ: Int64, ival: 1}, - err: vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "unexpected type conversion: INT64 to UINT64"), + out: NewUint64(1), }, { typ: Uint64, v: numeric{typ: Uint64, uval: 1}, @@ -721,7 +815,7 @@ func TestCastFromNumeric(t *testing.T) { }, { typ: Uint64, v: numeric{typ: Float64, fval: 1.2e-16}, - err: vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "unexpected type conversion: FLOAT64 to UINT64"), + out: NewUint64(0), }, { typ: Float64, v: numeric{typ: Int64, ival: 1}, @@ -753,13 +847,7 @@ func TestCastFromNumeric(t *testing.T) { err: vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "unexpected type conversion to non-numeric: VARBINARY"), }} for _, tcase := range tcases { - got, err := castFromNumeric(tcase.v, tcase.typ) - if !vterrors.Equals(err, tcase.err) { - t.Errorf("castFromNumeric(%v, %v) error: %v, want %v", tcase.v, tcase.typ, vterrors.Print(err), vterrors.Print(tcase.err)) - } - if tcase.err != nil { - continue - } + got := castFromNumeric(tcase.v, tcase.typ) if !reflect.DeepEqual(got, tcase.out) { t.Errorf("castFromNumeric(%v, %v): %v, want %v", tcase.v, tcase.typ, printValue(got), printValue(tcase.out)) @@ -1021,7 +1109,7 @@ func BenchmarkAddActual(b *testing.B) { v1 := MakeTrusted(Int64, []byte("1")) v2 := MakeTrusted(Int64, []byte("12")) for i := 0; i < b.N; i++ { - v1, _ = NullsafeAdd(v1, v2, Int64) + v1 = NullsafeAdd(v1, v2, Int64) } } diff --git a/go/sqltypes/type.go b/go/sqltypes/type.go index cf1bed67a42..b123e882d72 100644 --- a/go/sqltypes/type.go +++ b/go/sqltypes/type.go @@ -167,6 +167,7 @@ var mysqlToType = map[int64]querypb.Type{ 11: Time, 12: Datetime, 13: Year, + 15: VarChar, 16: Bit, 245: TypeJSON, 246: Decimal, diff --git a/go/sqltypes/type_test.go b/go/sqltypes/type_test.go index 08aed75c81b..a4d5ed6f9b0 100644 --- a/go/sqltypes/type_test.go +++ b/go/sqltypes/type_test.go @@ -406,8 +406,8 @@ func TestMySQLToType(t *testing.T) { } func TestTypeError(t *testing.T) { - _, err := MySQLToType(15, 0) - want := "unsupported type: 15" + _, err := MySQLToType(17, 0) + want := "unsupported type: 17" if err == nil || err.Error() != want { t.Errorf("MySQLToType: %v, want %s", err, want) } diff --git a/go/sqltypes/value.go b/go/sqltypes/value.go index 39c91287e38..e53419154b5 100644 --- a/go/sqltypes/value.go +++ b/go/sqltypes/value.go @@ -89,9 +89,11 @@ func NewValue(typ querypb.Type, val []byte) (v Value, err error) { // comments. Other packages can also use the function to create // VarBinary or VarChar values. func MakeTrusted(typ querypb.Type, val []byte) Value { + if typ == Null { return NULL } + return Value{typ: typ, val: val} } diff --git a/go/stats/opentsdb/opentsdb.go b/go/stats/opentsdb/opentsdb.go index 9b9c1b8fdc4..0a75af9d701 100644 --- a/go/stats/opentsdb/opentsdb.go +++ b/go/stats/opentsdb/opentsdb.go @@ -118,7 +118,7 @@ func (backend *openTSDBBackend) getDataPoints() []dataPoint { return dataCollector.dataPoints } -// combineMetricName joins parts of a hierachical name with a "." +// combineMetricName joins parts of a hierarchical name with a "." func combineMetricName(parts ...string) string { return strings.Join(parts, ".") } diff --git a/go/vt/automation/scheduler.go b/go/vt/automation/scheduler.go index cfbb5fc0520..ddaa1fc4c69 100644 --- a/go/vt/automation/scheduler.go +++ b/go/vt/automation/scheduler.go @@ -55,7 +55,7 @@ type Scheduler struct { // Guarded by "mu". state schedulerState - // Guarded by "taskCreatorMu". May be overriden by testing code. + // Guarded by "taskCreatorMu". May be overridden by testing code. taskCreator taskCreator taskCreatorMu sync.Mutex diff --git a/go/vt/binlog/binlogplayer/binlog_player.go b/go/vt/binlog/binlogplayer/binlog_player.go index b628486fb93..b62522a1ce9 100644 --- a/go/vt/binlog/binlogplayer/binlog_player.go +++ b/go/vt/binlog/binlogplayer/binlog_player.go @@ -174,7 +174,7 @@ func NewBinlogPlayerTables(dbClient DBClient, tablet *topodatapb.Tablet, tables // and processes the events. It returns nil if the provided context // was canceled, or if we reached the stopping point. // If an error is encountered, it updates the vreplication state to "Error". -// If a stop position was specifed, and reached, the state is updated to "Stopped". +// If a stop position was specified, and reached, the state is updated to "Stopped". func (blp *BinlogPlayer) ApplyBinlogEvents(ctx context.Context) error { if err := SetVReplicationState(blp.dbClient, blp.uid, BlpRunning, ""); err != nil { log.Errorf("Error writing Running state: %v", err) diff --git a/go/vt/binlog/keyrange_filter.go b/go/vt/binlog/keyrange_filter.go index 1a0debed59e..6d4ae50ff14 100644 --- a/go/vt/binlog/keyrange_filter.go +++ b/go/vt/binlog/keyrange_filter.go @@ -177,7 +177,7 @@ func logExtractKeySpaceIDError(err error) { case sqlannotation.ExtractKeySpaceIDReplicationUnfriendlyError: log.Errorf( "Found replication unfriendly statement. (%s). "+ - "Filtered replication should abort, but we're currenty just skipping the statement.", + "Filtered replication should abort, but we're currently just skipping the statement.", extractErr.Message) updateStreamErrors.Add("ExtractKeySpaceIDReplicationUnfriendlyError", 1) default: diff --git a/go/vt/binlog/slave_connection.go b/go/vt/binlog/slave_connection.go index 2c8db9bdf85..c44d6e87f3b 100644 --- a/go/vt/binlog/slave_connection.go +++ b/go/vt/binlog/slave_connection.go @@ -171,7 +171,7 @@ func (sc *SlaveConnection) streamEvents(ctx context.Context) chan mysql.BinlogEv // The startup phase will list all the binary logs, and find the one // that has events starting strictly before the provided timestamp. It // will then start from there, and stream all events. It is the -// responsability of the calling site to filter the events more. +// responsibility of the calling site to filter the events more. // // MySQL 5.6+ note: we need to do it that way because of the way the // GTIDSet works. In the previous two streaming functions, we pass in diff --git a/go/vt/dbconfigs/dbconfigs.go b/go/vt/dbconfigs/dbconfigs.go index f180f9dc88a..c15ad1e6415 100644 --- a/go/vt/dbconfigs/dbconfigs.go +++ b/go/vt/dbconfigs/dbconfigs.go @@ -91,7 +91,7 @@ func RegisterFlags(userKeys ...string) { } func registerBaseFlags() { - flag.StringVar(&baseConfig.UnixSocket, "db_socket", "", "The unix socket to connect on. If this is specifed, host and port will not be used.") + flag.StringVar(&baseConfig.UnixSocket, "db_socket", "", "The unix socket to connect on. If this is specified, host and port will not be used.") flag.StringVar(&baseConfig.Host, "db_host", "", "The host name for the tcp connection.") flag.IntVar(&baseConfig.Port, "db_port", 0, "tcp port") flag.StringVar(&baseConfig.Charset, "db_charset", "", "Character set. Only utf8 or latin1 based character sets are supported.") diff --git a/go/vt/discovery/replicationlag.go b/go/vt/discovery/replicationlag.go index cebc27851a2..1e8237c8dca 100644 --- a/go/vt/discovery/replicationlag.go +++ b/go/vt/discovery/replicationlag.go @@ -53,7 +53,7 @@ func IsReplicationLagVeryHigh(tabletStats *TabletStats) bool { // lags of (30m, 35m, 40m, 45m) return all. // // One thing to know about this code: vttablet also has a couple flags that impact the logic here: -// * unhealthy_threshold: if replication lag is higher than this, a tablet will be reported as unhealhty. +// * unhealthy_threshold: if replication lag is higher than this, a tablet will be reported as unhealthy. // The default for this is 2h, same as the discovery_high_replication_lag_minimum_serving here. // * degraded_threshold: this is only used by vttablet for display. It should match // discovery_low_replication_lag here, so the vttablet status display matches what vtgate will do of it. diff --git a/go/vt/mysqlctl/backup.go b/go/vt/mysqlctl/backup.go index 7cba5430a6e..12a7651146c 100644 --- a/go/vt/mysqlctl/backup.go +++ b/go/vt/mysqlctl/backup.go @@ -42,8 +42,8 @@ const ( backupInnodbLogGroupHomeDir = "InnoDBLog" backupData = "Data" - // the manifest file name - backupManifest = "MANIFEST" + // backupManifestFileName is the MANIFEST file name within a backup. + backupManifestFileName = "MANIFEST" // RestoreState is the name of the sentinel file used to detect whether a previous restore // terminated abnormally RestoreState = "restore_in_progress" @@ -286,11 +286,16 @@ func Restore( return mysql.Position{}, ErrNoBackup } - be, err := GetBackupEngine() + bh, err := FindBackupToRestore(ctx, cnf, mysqld, logger, dir, bhs) + if err != nil { + return rval, err + } + + re, err := GetRestoreEngine(ctx, bh) if err != nil { - return mysql.Position{}, vterrors.Wrap(err, "Failed to find backup engine") + return mysql.Position{}, vterrors.Wrap(err, "Failed to find restore engine") } - if rval, err = be.ExecuteRestore(ctx, cnf, mysqld, logger, dir, bhs, restoreConcurrency, hookExtraEnv); err != nil { + if rval, err = re.ExecuteRestore(ctx, cnf, mysqld, logger, dir, bh, restoreConcurrency, hookExtraEnv); err != nil { return rval, err } diff --git a/go/vt/mysqlctl/backupengine.go b/go/vt/mysqlctl/backupengine.go index 2eb59cf9e11..50f07739dab 100644 --- a/go/vt/mysqlctl/backupengine.go +++ b/go/vt/mysqlctl/backupengine.go @@ -34,47 +34,123 @@ import ( var ( // BackupEngineImplementation is the implementation to use for BackupEngine - backupEngineImplementation = flag.String("backup_engine_implementation", builtin, "which implementation to use for the backup method, builtin or xtrabackup") + backupEngineImplementation = flag.String("backup_engine_implementation", builtinBackupEngineName, "Specifies which implementation to use for creating new backups (builtin or xtrabackup). Restores will always be done with whichever engine created a given backup.") ) -// BackupEngine is the interface to the backup engine +// BackupEngine is the interface to take a backup with a given engine. type BackupEngine interface { ExecuteBackup(ctx context.Context, cnf *Mycnf, mysqld MysqlDaemon, logger logutil.Logger, bh backupstorage.BackupHandle, backupConcurrency int, hookExtraEnv map[string]string) (bool, error) - ExecuteRestore(ctx context.Context, cnf *Mycnf, mysqld MysqlDaemon, logger logutil.Logger, dir string, bhs []backupstorage.BackupHandle, restoreConcurrency int, hookExtraEnv map[string]string) (mysql.Position, error) + ShouldDrainForBackup() bool } -// BackupEngineMap contains the registered implementations for BackupEngine -var BackupEngineMap = make(map[string]BackupEngine) +// RestoreEngine is the interface to restore a backup with a given engine. +type RestoreEngine interface { + ExecuteRestore(ctx context.Context, cnf *Mycnf, mysqld MysqlDaemon, logger logutil.Logger, dir string, bh backupstorage.BackupHandle, restoreConcurrency int, hookExtraEnv map[string]string) (mysql.Position, error) +} + +// BackupRestoreEngine is a combination of BackupEngine and RestoreEngine. +type BackupRestoreEngine interface { + BackupEngine + RestoreEngine +} -// GetBackupEngine returns the current BackupEngine implementation. -// Should be called after flags have been initialized. +// BackupRestoreEngineMap contains the registered implementations for +// BackupEngine and RestoreEngine. +var BackupRestoreEngineMap = make(map[string]BackupRestoreEngine) + +// GetBackupEngine returns the BackupEngine implementation that should be used +// to create new backups. +// +// To restore a backup, you should instead get the appropriate RestoreEngine for +// a particular backup by calling GetRestoreEngine(). +// +// This must only be called after flags have been parsed. func GetBackupEngine() (BackupEngine, error) { - be, ok := BackupEngineMap[*backupEngineImplementation] + name := *backupEngineImplementation + be, ok := BackupRestoreEngineMap[name] if !ok { - return nil, vterrors.New(vtrpc.Code_NOT_FOUND, "no registered implementation of BackupEngine") + return nil, vterrors.Errorf(vtrpc.Code_NOT_FOUND, "unknown BackupEngine implementation %q", name) } return be, nil } -func findBackupToRestore(ctx context.Context, cnf *Mycnf, mysqld MysqlDaemon, logger logutil.Logger, dir string, bhs []backupstorage.BackupHandle, bm interface{}) (backupstorage.BackupHandle, error) { +// GetRestoreEngine returns the RestoreEngine implementation to restore a given backup. +// It reads the MANIFEST file from the backup to check which engine was used to create it. +func GetRestoreEngine(ctx context.Context, backup backupstorage.BackupHandle) (RestoreEngine, error) { + manifest, err := GetBackupManifest(ctx, backup) + if err != nil { + return nil, vterrors.Wrap(err, "can't get backup MANIFEST") + } + engine := manifest.BackupMethod + if engine == "" { + // The builtin engine is the only one that ever left BackupMethod unset. + engine = builtinBackupEngineName + } + re, ok := BackupRestoreEngineMap[engine] + if !ok { + return nil, vterrors.Errorf(vtrpc.Code_NOT_FOUND, "can't restore backup created with %q engine; no such BackupEngine implementation is registered", manifest.BackupMethod) + } + return re, nil +} + +// GetBackupManifest returns the common fields of the MANIFEST file for a given backup. +func GetBackupManifest(ctx context.Context, backup backupstorage.BackupHandle) (*BackupManifest, error) { + manifest := &BackupManifest{} + if err := getBackupManifestInto(ctx, backup, manifest); err != nil { + return nil, err + } + return manifest, nil +} + +// getBackupManifestInto fetches and decodes a MANIFEST file into the specified object. +func getBackupManifestInto(ctx context.Context, backup backupstorage.BackupHandle, outManifest interface{}) error { + file, err := backup.ReadFile(ctx, backupManifestFileName) + if err != nil { + return vterrors.Wrap(err, "can't read MANIFEST") + } + defer file.Close() + + if err := json.NewDecoder(file).Decode(outManifest); err != nil { + return vterrors.Wrap(err, "can't decode MANIFEST") + } + return nil +} + +// BackupManifest defines the common fields in the MANIFEST file. +// All backup engines must include at least these fields. They are free to add +// their own custom fields by embedding this struct anonymously into their own +// custom struct, as long as their custom fields don't have conflicting names. +type BackupManifest struct { + // BackupMethod is the name of the backup engine that created this backup. + // If this is empty, the backup engine is assumed to be "builtin" since that + // was the only engine that ever left this field empty. All new backup + // engines are required to set this field to the backup engine name. + BackupMethod string + + // Position is the replication position at which the backup was taken. + Position mysql.Position + + // FinishedTime is the time (in RFC 3339 format, UTC) at which the backup finished, if known. + // Some backups may not set this field if they were created before the field was added. + FinishedTime string +} + +// FindBackupToRestore returns a selected candidate backup to be restored. +// It returns the most recent backup that is complete, meaning it has a valid +// MANIFEST file. +func FindBackupToRestore(ctx context.Context, cnf *Mycnf, mysqld MysqlDaemon, logger logutil.Logger, dir string, bhs []backupstorage.BackupHandle) (backupstorage.BackupHandle, error) { var bh backupstorage.BackupHandle var index int for index = len(bhs) - 1; index >= 0; index-- { bh = bhs[index] - rc, err := bh.ReadFile(ctx, backupManifest) + // Check that the backup MANIFEST exists and can be successfully decoded. + _, err := GetBackupManifest(ctx, bh) if err != nil { log.Warningf("Possibly incomplete backup %v in directory %v on BackupStorage: can't read MANIFEST: %v)", bh.Name(), dir, err) continue } - err = json.NewDecoder(rc).Decode(&bm) - rc.Close() - if err != nil { - log.Warningf("Possibly incomplete backup %v in directory %v on BackupStorage (cannot JSON decode MANIFEST: %v)", bh.Name(), dir, err) - continue - } - logger.Infof("Restore: found backup %v %v to restore", bh.Directory(), bh.Name()) break } diff --git a/go/vt/mysqlctl/builtinbackupengine.go b/go/vt/mysqlctl/builtinbackupengine.go index 92728a39c05..eba829a16bc 100644 --- a/go/vt/mysqlctl/builtinbackupengine.go +++ b/go/vt/mysqlctl/builtinbackupengine.go @@ -27,6 +27,7 @@ import ( "path" "strings" "sync" + "time" "github.com/klauspost/pgzip" "vitess.io/vitess/go/mysql" @@ -41,9 +42,9 @@ import ( ) const ( - builtin = "builtin" - writerBufferSize = 2 * 1024 * 1024 - dataDictionaryFile = "mysql.ibd" + builtinBackupEngineName = "builtin" + writerBufferSize = 2 * 1024 * 1024 + dataDictionaryFile = "mysql.ibd" ) // BuiltinBackupEngine encapsulates the logic of the builtin engine @@ -57,19 +58,19 @@ type BuiltinBackupEngine struct { // Position that the backup was taken at, and the transform hook used, // if any. type builtinBackupManifest struct { + // BackupManifest is an anonymous embedding of the base manifest struct. + BackupManifest + // FileEntries contains all the files in the backup FileEntries []FileEntry - // Position is the position at which the backup was taken - Position mysql.Position - // TransformHook that was used on the files, if any. TransformHook string - // SkipCompress can be set if the backup files were not run - // through gzip. It is the negative of the flag, so old - // backups that don't have this flag are assumed to be - // compressed. + // SkipCompress is true if the backup files were NOT run through gzip. + // The field is expressed as a negative because it will come through as + // false for backups that were created before the field existed, and those + // backups all had compression enabled. SkipCompress bool } @@ -339,7 +340,7 @@ func (be *BuiltinBackupEngine) ExecuteBackup(ctx context.Context, cnf *Mycnf, my } // backupFiles finds the list of files to backup, and creates the backup. -func (be *BuiltinBackupEngine) backupFiles(ctx context.Context, cnf *Mycnf, mysqld MysqlDaemon, logger logutil.Logger, bh backupstorage.BackupHandle, replicationPosition mysql.Position, backupConcurrency int, hookExtraEnv map[string]string) (err error) { +func (be *BuiltinBackupEngine) backupFiles(ctx context.Context, cnf *Mycnf, mysqld MysqlDaemon, logger logutil.Logger, bh backupstorage.BackupHandle, replicationPosition mysql.Position, backupConcurrency int, hookExtraEnv map[string]string) (finalErr error) { // Get the files to backup. fes, err := findFilesToBackup(cnf) if err != nil { @@ -376,39 +377,45 @@ func (be *BuiltinBackupEngine) backupFiles(ctx context.Context, cnf *Mycnf, mysq } // open the MANIFEST - wc, err := bh.AddFile(ctx, backupManifest, 0) + wc, err := bh.AddFile(ctx, backupManifestFileName, 0) if err != nil { - return vterrors.Wrapf(err, "cannot add %v to backup", backupManifest) + return vterrors.Wrapf(err, "cannot add %v to backup", backupManifestFileName) } defer func() { - if closeErr := wc.Close(); err == nil { - err = closeErr + if closeErr := wc.Close(); finalErr == nil { + finalErr = closeErr } }() // JSON-encode and write the MANIFEST bm := &builtinBackupManifest{ + // Common base fields + BackupManifest: BackupManifest{ + BackupMethod: builtinBackupEngineName, + Position: replicationPosition, + FinishedTime: time.Now().UTC().Format(time.RFC3339), + }, + + // Builtin-specific fields FileEntries: fes, - Position: replicationPosition, TransformHook: *backupStorageHook, SkipCompress: !*backupStorageCompress, } data, err := json.MarshalIndent(bm, "", " ") if err != nil { - return vterrors.Wrapf(err, "cannot JSON encode %v", backupManifest) + return vterrors.Wrapf(err, "cannot JSON encode %v", backupManifestFileName) } if _, err := wc.Write([]byte(data)); err != nil { - return vterrors.Wrapf(err, "cannot write %v", backupManifest) + return vterrors.Wrapf(err, "cannot write %v", backupManifestFileName) } return nil } // backupFile backs up an individual file. -func (be *BuiltinBackupEngine) backupFile(ctx context.Context, cnf *Mycnf, mysqld MysqlDaemon, logger logutil.Logger, bh backupstorage.BackupHandle, fe *FileEntry, name string, hookExtraEnv map[string]string) (err error) { +func (be *BuiltinBackupEngine) backupFile(ctx context.Context, cnf *Mycnf, mysqld MysqlDaemon, logger logutil.Logger, bh backupstorage.BackupHandle, fe *FileEntry, name string, hookExtraEnv map[string]string) (finalErr error) { // Open the source file for reading. - var source *os.File - source, err = fe.open(cnf, true) + source, err := fe.open(cnf, true) if err != nil { return err } @@ -427,11 +434,11 @@ func (be *BuiltinBackupEngine) backupFile(ctx context.Context, cnf *Mycnf, mysql } defer func(name, fileName string) { if rerr := wc.Close(); rerr != nil { - if err != nil { + if finalErr != nil { // We already have an error, just log this one. logger.Errorf2(rerr, "failed to close file %v,%v", name, fe.Name) } else { - err = rerr + finalErr = rerr } } }(name, fe.Name) @@ -512,24 +519,23 @@ func (be *BuiltinBackupEngine) ExecuteRestore( mysqld MysqlDaemon, logger logutil.Logger, dir string, - bhs []backupstorage.BackupHandle, + bh backupstorage.BackupHandle, restoreConcurrency int, hookExtraEnv map[string]string) (mysql.Position, error) { zeroPosition := mysql.Position{} var bm builtinBackupManifest - bh, err := findBackupToRestore(ctx, cnf, mysqld, logger, dir, bhs, &bm) - if err != nil { + if err := getBackupManifestInto(ctx, bh, &bm); err != nil { return zeroPosition, err } // mark restore as in progress - if err = createStateFile(cnf); err != nil { + if err := createStateFile(cnf); err != nil { return zeroPosition, err } - if err = prepareToRestore(ctx, cnf, mysqld, logger); err != nil { + if err := prepareToRestore(ctx, cnf, mysqld, logger); err != nil { return zeroPosition, err } @@ -537,7 +543,7 @@ func (be *BuiltinBackupEngine) ExecuteRestore( if err := be.restoreFiles(context.Background(), cnf, bh, bm.FileEntries, bm.TransformHook, !bm.SkipCompress, restoreConcurrency, hookExtraEnv, logger); err != nil { // don't delete the file here because that is how we detect an interrupted restore - return zeroPosition, err + return zeroPosition, vterrors.Wrap(err, "failed to restore files") } logger.Infof("Restore: returning replication position %v", bm.Position) @@ -566,7 +572,10 @@ func (be *BuiltinBackupEngine) restoreFiles(ctx context.Context, cnf *Mycnf, bh // And restore the file. name := fmt.Sprintf("%v", i) logger.Infof("Copying file %v: %v", name, fes[i].Name) - rec.RecordError(be.restoreFile(ctx, cnf, bh, &fes[i], transformHook, compress, name, hookExtraEnv)) + err := be.restoreFile(ctx, cnf, bh, &fes[i], transformHook, compress, name, hookExtraEnv) + if err != nil { + rec.RecordError(vterrors.Wrapf(err, "can't restore file %v to %v", name, fes[i].Name)) + } }(i) } wg.Wait() @@ -574,27 +583,26 @@ func (be *BuiltinBackupEngine) restoreFiles(ctx context.Context, cnf *Mycnf, bh } // restoreFile restores an individual file. -func (be *BuiltinBackupEngine) restoreFile(ctx context.Context, cnf *Mycnf, bh backupstorage.BackupHandle, fe *FileEntry, transformHook string, compress bool, name string, hookExtraEnv map[string]string) (err error) { +func (be *BuiltinBackupEngine) restoreFile(ctx context.Context, cnf *Mycnf, bh backupstorage.BackupHandle, fe *FileEntry, transformHook string, compress bool, name string, hookExtraEnv map[string]string) (finalErr error) { // Open the source file for reading. - var source io.ReadCloser - source, err = bh.ReadFile(ctx, name) + source, err := bh.ReadFile(ctx, name) if err != nil { - return err + return vterrors.Wrap(err, "can't open source file for reading") } defer source.Close() // Open the destination file for writing. dstFile, err := fe.open(cnf, false) if err != nil { - return err + return vterrors.Wrap(err, "can't open destination file for writing") } defer func() { if cerr := dstFile.Close(); cerr != nil { - if err != nil { + if finalErr != nil { // We already have an error, just log this one. log.Errorf("failed to close file %v: %v", name, cerr) } else { - err = cerr + finalErr = vterrors.Wrap(cerr, "failed to close destination file") } } }() @@ -624,15 +632,15 @@ func (be *BuiltinBackupEngine) restoreFile(ctx context.Context, cnf *Mycnf, bh b if compress { gz, err := pgzip.NewReader(reader) if err != nil { - return err + return vterrors.Wrap(err, "can't open gzip decompressor") } defer func() { if cerr := gz.Close(); cerr != nil { - if err != nil { + if finalErr != nil { // We already have an error, just log this one. - log.Errorf("failed to close gunziper %v: %v", name, cerr) + log.Errorf("failed to close gzip decompressor %v: %v", name, cerr) } else { - err = cerr + finalErr = vterrors.Wrap(err, "failed to close gzip decompressor") } } }() @@ -641,7 +649,7 @@ func (be *BuiltinBackupEngine) restoreFile(ctx context.Context, cnf *Mycnf, bh b // Copy the data. Will also write to the hasher. if _, err = io.Copy(dst, reader); err != nil { - return err + return vterrors.Wrap(err, "failed to copy file contents") } // Close the Pipe. @@ -662,9 +670,19 @@ func (be *BuiltinBackupEngine) restoreFile(ctx context.Context, cnf *Mycnf, bh b } // Flush the buffer. - return dst.Flush() + if err := dst.Flush(); err != nil { + return vterrors.Wrap(err, "failed to flush destination buffer") + } + + return nil +} + +// ShouldDrainForBackup satisfies the BackupEngine interface +// backup requires query service to be stopped, hence true +func (be *BuiltinBackupEngine) ShouldDrainForBackup() bool { + return true } func init() { - BackupEngineMap["builtin"] = &BuiltinBackupEngine{} + BackupRestoreEngineMap["builtin"] = &BuiltinBackupEngine{} } diff --git a/go/vt/mysqlctl/capabilityset.go b/go/vt/mysqlctl/capabilityset.go new file mode 100644 index 00000000000..68319450f53 --- /dev/null +++ b/go/vt/mysqlctl/capabilityset.go @@ -0,0 +1,79 @@ +/* +Copyright 2019 The Vitess Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* + Detect server flavors and capabilities +*/ + +package mysqlctl + +type mysqlFlavor string + +const ( + flavorMySQL mysqlFlavor = "mysql" + flavorPercona mysqlFlavor = "percona" + flavorMariaDB mysqlFlavor = "mariadb" +) + +// Mysqld is the object that represents a mysqld daemon running on this server. +type capabilitySet struct { + flavor mysqlFlavor + version serverVersion +} + +func newCapabilitySet(f mysqlFlavor, v serverVersion) (c capabilitySet) { + c.flavor = f + c.version = v + return +} + +func (c *capabilitySet) hasMySQLUpgradeInServer() bool { + return c.isMySQLLike() && c.version.atLeast(serverVersion{Major: 8, Minor: 0, Patch: 16}) +} +func (c *capabilitySet) hasInitializeInServer() bool { + return c.isMySQLLike() && c.version.atLeast(serverVersion{Major: 5, Minor: 7, Patch: 0}) +} +func (c *capabilitySet) hasMySQLxEnabledByDefault() bool { + return c.isMySQLLike() && c.version.atLeast(serverVersion{Major: 8, Minor: 0, Patch: 11}) +} +func (c *capabilitySet) hasPersistConfig() bool { + return c.isMySQLLike() && c.version.atLeast(serverVersion{Major: 8, Minor: 0, Patch: 0}) +} +func (c *capabilitySet) hasShutdownCommand() bool { + return (c.isMySQLLike() && c.version.atLeast(serverVersion{Major: 5, Minor: 7, Patch: 9})) || (c.isMariaDB() && c.version.atLeast(serverVersion{Major: 10, Minor: 0, Patch: 4})) +} +func (c *capabilitySet) hasBackupLocks() bool { + return c.isMySQLLike() && c.version.atLeast(serverVersion{Major: 8, Minor: 0, Patch: 0}) +} +func (c *capabilitySet) hasDefaultUft8mb4() bool { + return c.isMySQLLike() && c.version.atLeast(serverVersion{Major: 8, Minor: 0, Patch: 0}) +} +func (c *capabilitySet) hasSemiSyncEnabledByDefault() bool { + return c.isMariaDB() && c.version.atLeast(serverVersion{Major: 10, Minor: 3, Patch: 3}) +} + +// IsMySQLLike tests if the server is either MySQL +// or Percona Server. At least currently, Vitess doesn't +// make use of any specific Percona Server features. +func (c *capabilitySet) isMySQLLike() bool { + return c.flavor == flavorMySQL || c.flavor == flavorPercona +} + +// IsMariaDB tests if the server is MariaDB. +// IsMySQLLike() and IsMariaDB() are mutually exclusive +func (c *capabilitySet) isMariaDB() bool { + return c.flavor == flavorMariaDB +} diff --git a/go/vt/mysqlctl/fakemysqldaemon/fakemysqldaemon.go b/go/vt/mysqlctl/fakemysqldaemon/fakemysqldaemon.go index 166ed5a01c0..dcc76a6d945 100644 --- a/go/vt/mysqlctl/fakemysqldaemon/fakemysqldaemon.go +++ b/go/vt/mysqlctl/fakemysqldaemon/fakemysqldaemon.go @@ -54,7 +54,7 @@ type FakeMysqlDaemon struct { // Replicating is updated when calling StartSlave / StopSlave // (it is not used at all when calling SlaveStatus, it is the - // test owner responsability to have these two match) + // test owner responsibility to have these two match) Replicating bool // CurrentMasterPosition is returned by MasterPosition @@ -117,7 +117,7 @@ type FakeMysqlDaemon struct { // ExecuteSuperQueryList to be called with. If it doesn't // match, ExecuteSuperQueryList will return an error. // Note each string is just a substring if it begins with SUB, - // so we support partial queries (usefull when queries contain + // so we support partial queries (useful when queries contain // data fields like timestamps) ExpectedExecuteSuperQueryList []string diff --git a/go/vt/mysqlctl/mysqld.go b/go/vt/mysqlctl/mysqld.go index 82e90c60213..e99f63ec33d 100644 --- a/go/vt/mysqlctl/mysqld.go +++ b/go/vt/mysqlctl/mysqld.go @@ -25,6 +25,7 @@ package mysqlctl import ( "bufio" + "bytes" "errors" "flag" "fmt" @@ -34,12 +35,12 @@ import ( "os/exec" "path" "path/filepath" + "regexp" + "strconv" "strings" "sync" "time" - "bytes" - "golang.org/x/net/context" "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/stats" @@ -78,6 +79,8 @@ var ( dbaMysqlStats = stats.NewTimings("MysqlDba", "MySQL DBA stats", "operation") allprivsMysqlStats = stats.NewTimings("MysqlAllPrivs", "MySQl Stats for all privs", "operation") appMysqlStats = stats.NewTimings("MysqlApp", "MySQL app stats", "operation") + + versionRegex = regexp.MustCompile(`Ver ([0-9]+)\.([0-9]+)\.([0-9]+)`) ) // Mysqld is the object that represents a mysqld daemon running on this server. @@ -86,6 +89,8 @@ type Mysqld struct { dbaPool *dbconnpool.ConnectionPool appPool *dbconnpool.ConnectionPool + capabilities capabilitySet + // mutex protects the fields below. mutex sync.Mutex onTermFuncs []func() @@ -107,9 +112,95 @@ func NewMysqld(dbcfgs *dbconfigs.DBConfigs) *Mysqld { result.appPool = dbconnpool.NewConnectionPool("AppConnPool", *appPoolSize, *appIdleTimeout, *poolDynamicHostnameResolution) result.appPool.Open(dbcfgs.AppWithDB(), appMysqlStats) + version, getErr := getVersionString() + f, v, err := parseVersionString(version) + + // Fallback if required + if getErr != nil || err != nil { + f, v, err = getVersionFromEnv() + if err != nil { + panic("could not detect version from mysqld --version or MYSQL_FLAVOR") + } + + } + + log.Infof("Using flavor: %v, version: %v", f, v) + result.capabilities = newCapabilitySet(f, v) return result } +/* +getVersionFromEnv returns the flavor and an assumed version based on the legacy +MYSQL_FLAVOR environment variable. + +The assumed version may not be accurate since the legacy variable only specifies +broad families of compatible versions. However, the differences between those +versions should only matter if Vitess is managing the lifecycle of mysqld, in which +case we should have a local copy of the mysqld binary from which we can fetch +the accurate version instead of falling back to this function (see getVersionString). +*/ +func getVersionFromEnv() (flavor mysqlFlavor, ver serverVersion, err error) { + env := os.Getenv("MYSQL_FLAVOR") + switch env { + case "MariaDB": + return flavorMariaDB, serverVersion{10, 0, 10}, nil + case "MariaDB103": + return flavorMariaDB, serverVersion{10, 3, 7}, nil + case "MySQL80": + return flavorMySQL, serverVersion{8, 0, 11}, nil + case "MySQL56": + return flavorMySQL, serverVersion{5, 7, 10}, nil + } + return flavor, ver, fmt.Errorf("could not determine version from MYSQL_FLAVOR: %s", env) +} + +func getVersionString() (string, error) { + mysqlRoot, err := vtenv.VtMysqlRoot() + if err != nil { + return "", err + } + mysqldPath, err := binaryPath(mysqlRoot, "mysqld") + if err != nil { + return "", err + } + _, version, err := execCmd(mysqldPath, []string{"--version"}, nil, mysqlRoot, nil) + if err != nil { + return "", err + } + return version, nil +} + +// parse the output of mysqld --version into a flavor and version +func parseVersionString(version string) (flavor mysqlFlavor, ver serverVersion, err error) { + if strings.Contains(version, "Percona") { + flavor = flavorPercona + } else if strings.Contains(version, "MariaDB") { + flavor = flavorMariaDB + } else { + // OS distributed MySQL releases have a version string like: + // mysqld Ver 5.7.27-0ubuntu0.19.04.1 for Linux on x86_64 ((Ubuntu)) + flavor = flavorMySQL + } + v := versionRegex.FindStringSubmatch(version) + if len(v) != 4 { + return flavor, ver, fmt.Errorf("could not parse server version from: %s", version) + } + ver.Major, err = strconv.Atoi(string(v[1])) + if err != nil { + return flavor, ver, fmt.Errorf("could not parse server version from: %s", version) + } + ver.Minor, err = strconv.Atoi(string(v[2])) + if err != nil { + return flavor, ver, fmt.Errorf("could not parse server version from: %s", version) + } + ver.Patch, err = strconv.Atoi(string(v[3])) + if err != nil { + return flavor, ver, fmt.Errorf("could not parse server version from: %s", version) + } + + return +} + // RunMysqlUpgrade will run the mysql_upgrade program on the current // install. Will be called only when mysqld is running with no // network and no grant tables. @@ -125,6 +216,11 @@ func (mysqld *Mysqld) RunMysqlUpgrade() error { return client.RunMysqlUpgrade(context.TODO()) } + if mysqld.capabilities.hasMySQLUpgradeInServer() { + log.Warningf("MySQL version has built-in upgrade, skipping RunMySQLUpgrade") + return nil + } + // Find mysql_upgrade. If not there, we do nothing. dir, err := vtenv.VtMysqlRoot() if err != nil { @@ -540,13 +636,6 @@ func (mysqld *Mysqld) Init(ctx context.Context, cnf *Mycnf, initDBSQLFile string return nil } -// MySQL 5.7 GA and up have deprecated mysql_install_db. -// Instead, initialization is built into mysqld. -func useMysqldInitialize(version string) bool { - return strings.Contains(version, "Ver 5.7.") || - strings.Contains(version, "Ver 8.0.") -} - func (mysqld *Mysqld) installDataDir(cnf *Mycnf) error { mysqlRoot, err := vtenv.VtMysqlRoot() if err != nil { @@ -561,16 +650,8 @@ func (mysqld *Mysqld) installDataDir(cnf *Mycnf) error { if err != nil { return err } - - // Check mysqld version. - _, version, err := execCmd(mysqldPath, []string{"--version"}, nil, mysqlRoot, nil) - if err != nil { - return err - } - - if useMysqldInitialize(version) { + if mysqld.capabilities.hasInitializeInServer() { log.Infof("Installing data dir with mysqld --initialize-insecure") - args := []string{ "--defaults-file=" + cnf.path, "--basedir=" + mysqlBaseDir, @@ -612,7 +693,7 @@ func (mysqld *Mysqld) initConfig(root string, cnf *Mycnf, outFile string) error switch hr := hook.NewHookWithEnv("make_mycnf", nil, env).Execute(); hr.ExitStatus { case hook.HOOK_DOES_NOT_EXIST: log.Infof("make_mycnf hook doesn't exist, reading template files") - configData, err = cnf.makeMycnf(getMycnfTemplates(root)) + configData, err = cnf.makeMycnf(mysqld.getMycnfTemplates(root)) case hook.HOOK_SUCCESS: configData, err = cnf.fillMycnfTemplate(hr.Stdout) default: @@ -634,7 +715,7 @@ func contains(haystack []string, needle string) bool { return false } -func getMycnfTemplates(root string) []string { +func (mysqld *Mysqld) getMycnfTemplates(root string) []string { if *mycnfTemplateFile != "" { return []string{*mycnfTemplateFile} } @@ -650,28 +731,26 @@ func getMycnfTemplates(root string) []string { cnfTemplatePaths = append(cnfTemplatePaths, parts...) } - switch mysqlFlavor := os.Getenv("MYSQL_FLAVOR"); mysqlFlavor { - case "MariaDB": - path := path.Join(root, "config/mycnf/master_mariadb.cnf") - if !contains(cnfTemplatePaths, path) { - cnfTemplatePaths = append(cnfTemplatePaths, path) - } - case "MariaDB103": - path := path.Join(root, "config/mycnf/master_mariadb103.cnf") - if !contains(cnfTemplatePaths, path) { - cnfTemplatePaths = append(cnfTemplatePaths, path) - } - case "MySQL80": - path := path.Join(root, "config/mycnf/master_mysql80.cnf") - if !contains(cnfTemplatePaths, path) { - cnfTemplatePaths = append(cnfTemplatePaths, path) - } - default: - path := path.Join(root, "config/mycnf/master_mysql56.cnf") - // By default we assume Mysql56 compatable - if !contains(cnfTemplatePaths, path) { - cnfTemplatePaths = append(cnfTemplatePaths, path) - } + // Only include these files if they exist. + // master_{flavor}.cnf + // Percona Server == MySQL in this context + + f := flavorMariaDB + if mysqld.capabilities.isMySQLLike() { + f = flavorMySQL + } + + p := path.Join(root, fmt.Sprintf("config/mycnf/master_%s.cnf", f)) + _, err := os.Stat(p) + if err == nil && !contains(cnfTemplatePaths, p) { + cnfTemplatePaths = append(cnfTemplatePaths, p) + } + + // master_{flavor}{major}{minor}.cnf + p = path.Join(root, fmt.Sprintf("config/mycnf/master_%s%d%d.cnf", f, mysqld.capabilities.version.Major, mysqld.capabilities.version.Minor)) + _, err = os.Stat(p) + if err == nil && !contains(cnfTemplatePaths, p) { + cnfTemplatePaths = append(cnfTemplatePaths, p) } return cnfTemplatePaths diff --git a/go/vt/mysqlctl/mysqld_test.go b/go/vt/mysqlctl/mysqld_test.go new file mode 100644 index 00000000000..f3e45b87e18 --- /dev/null +++ b/go/vt/mysqlctl/mysqld_test.go @@ -0,0 +1,146 @@ +/* +Copyright 2019 The Vitess Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mysqlctl + +import ( + "os" + "testing" +) + +type testcase struct { + versionString string + version serverVersion + flavor mysqlFlavor +} + +func TestParseVersionString(t *testing.T) { + + var testcases = []testcase{ + + { + versionString: "mysqld Ver 5.7.27-0ubuntu0.19.04.1 for Linux on x86_64 ((Ubuntu))", + version: serverVersion{5, 7, 27}, + flavor: flavorMySQL, + }, + { + versionString: "mysqld Ver 5.6.43 for linux-glibc2.12 on x86_64 (MySQL Community Server (GPL))", + version: serverVersion{5, 6, 43}, + flavor: flavorMySQL, + }, + { + versionString: "mysqld Ver 5.7.26 for linux-glibc2.12 on x86_64 (MySQL Community Server (GPL))", + version: serverVersion{5, 7, 26}, + flavor: flavorMySQL, + }, + { + versionString: "mysqld Ver 8.0.16 for linux-glibc2.12 on x86_64 (MySQL Community Server - GPL)", + version: serverVersion{8, 0, 16}, + flavor: flavorMySQL, + }, + { + versionString: "mysqld Ver 5.7.26-29 for Linux on x86_64 (Percona Server (GPL), Release 29, Revision 11ad961)", + version: serverVersion{5, 7, 26}, + flavor: flavorPercona, + }, + { + versionString: "mysqld Ver 10.0.38-MariaDB for Linux on x86_64 (MariaDB Server)", + version: serverVersion{10, 0, 38}, + flavor: flavorMariaDB, + }, + { + versionString: "mysqld Ver 10.1.40-MariaDB for Linux on x86_64 (MariaDB Server)", + version: serverVersion{10, 1, 40}, + flavor: flavorMariaDB, + }, + { + versionString: "mysqld Ver 10.2.25-MariaDB for Linux on x86_64 (MariaDB Server)", + version: serverVersion{10, 2, 25}, + flavor: flavorMariaDB, + }, + { + versionString: "mysqld Ver 10.3.16-MariaDB for Linux on x86_64 (MariaDB Server)", + version: serverVersion{10, 3, 16}, + flavor: flavorMariaDB, + }, + { + versionString: "mysqld Ver 10.4.6-MariaDB for Linux on x86_64 (MariaDB Server)", + version: serverVersion{10, 4, 6}, + flavor: flavorMariaDB, + }, + { + versionString: "mysqld Ver 5.6.42 for linux-glibc2.12 on x86_64 (MySQL Community Server (GPL))", + version: serverVersion{5, 6, 42}, + flavor: flavorMySQL, + }, + { + versionString: "mysqld Ver 5.6.44-86.0 for Linux on x86_64 (Percona Server (GPL), Release 86.0, Revision eba1b3f)", + version: serverVersion{5, 6, 44}, + flavor: flavorPercona, + }, + { + versionString: "mysqld Ver 8.0.15-6 for Linux on x86_64 (Percona Server (GPL), Release 6, Revision 63abd08)", + version: serverVersion{8, 0, 15}, + flavor: flavorPercona, + }, + } + + for _, testcase := range testcases { + f, v, err := parseVersionString(testcase.versionString) + if v != testcase.version || f != testcase.flavor || err != nil { + t.Errorf("parseVersionString failed for: %#v, Got: %#v, %#v Expected: %#v, %#v", testcase.versionString, v, f, testcase.version, testcase.flavor) + } + } + +} + +func TestAssumeVersionString(t *testing.T) { + + // In these cases, the versionstring is nonsensical or unspecified. + // MYSQL_FLAVOR is used instead. + + var testcases = []testcase{ + { + versionString: "MySQL80", + version: serverVersion{8, 0, 11}, + flavor: flavorMySQL, + }, + { + versionString: "MySQL56", + version: serverVersion{5, 7, 10}, // Yes, this has to lie! + flavor: flavorMySQL, // There was no MySQL57 option + }, + { + versionString: "MariaDB", + version: serverVersion{10, 0, 10}, + flavor: flavorMariaDB, + }, + { + versionString: "MariaDB103", + version: serverVersion{10, 3, 7}, + flavor: flavorMariaDB, + }, + } + + for _, testcase := range testcases { + os.Setenv("MYSQL_FLAVOR", testcase.versionString) + f, v, err := getVersionFromEnv() + if v != testcase.version || f != testcase.flavor || err != nil { + t.Errorf("getVersionFromEnv() failed for: %#v, Got: %#v, %#v Expected: %#v, %#v", testcase.versionString, v, f, testcase.version, testcase.flavor) + } + } + +} diff --git a/go/vt/mysqlctl/version.go b/go/vt/mysqlctl/version.go new file mode 100644 index 00000000000..05bb18c20ef --- /dev/null +++ b/go/vt/mysqlctl/version.go @@ -0,0 +1,38 @@ +/* +Copyright 2019 The Vitess Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* + Detect server flavors and capabilities +*/ + +package mysqlctl + +type serverVersion struct { + Major, Minor, Patch int +} + +func (v *serverVersion) atLeast(compare serverVersion) bool { + if v.Major > compare.Major { + return true + } + if v.Major == compare.Major && v.Minor > compare.Minor { + return true + } + if v.Major == compare.Major && v.Minor == compare.Minor && v.Patch >= compare.Patch { + return true + } + return false +} diff --git a/go/vt/mysqlctl/xtrabackupengine.go b/go/vt/mysqlctl/xtrabackupengine.go index aa43bd471e8..31158dba7d9 100644 --- a/go/vt/mysqlctl/xtrabackupengine.go +++ b/go/vt/mysqlctl/xtrabackupengine.go @@ -23,11 +23,12 @@ import ( "flag" "fmt" "io" - "io/ioutil" "os" "os/exec" "path" + "regexp" "strings" + "sync" "time" "github.com/klauspost/pgzip" @@ -55,13 +56,16 @@ var ( // streaming mode xtrabackupStreamMode = flag.String("xtrabackup_stream_mode", "tar", "which mode to use if streaming, valid values are tar and xbstream") xtrabackupUser = flag.String("xtrabackup_user", "", "User that xtrabackup will use to connect to the database server. This user must have all necessary privileges. For details, please refer to xtrabackup documentation.") + // striping mode + xtrabackupStripes = flag.Uint("xtrabackup_stripes", 0, "If greater than 0, use data striping across this many destination files to parallelize data transfer and decompression") + xtrabackupStripeBlockSize = flag.Uint("xtrabackup_stripe_block_size", 102400, "Size in bytes of each block that gets sent to a given stripe before rotating to the next stripe") ) const ( - streamModeTar = "tar" - xtrabackupBinaryName = "xtrabackup" - xtrabackupBackupMethod = "xtrabackup" - xbstream = "xbstream" + streamModeTar = "tar" + xtrabackupBinaryName = "xtrabackup" + xtrabackupEngineName = "xtrabackup" + xbstream = "xbstream" ) // xtraBackupManifest represents a backup. @@ -69,17 +73,25 @@ const ( // whether the backup is compressed using gzip, and any extra // command line parameters used while invoking it. type xtraBackupManifest struct { + // BackupManifest is an anonymous embedding of the base manifest struct. + BackupManifest + // Name of the backup file FileName string - // BackupMethod, set to xtrabackup - BackupMethod string - // Position at which the backup was taken - Position mysql.Position - // SkipCompress can be set if the backup files were not run - // through gzip. - SkipCompress bool // Params are the parameters that backup was run with Params string `json:"ExtraCommandLineParams"` + // StreamMode is the stream mode used to create this backup. + StreamMode string + // NumStripes is the number of stripes the file is split across, if any. + NumStripes int32 + // StripeBlockSize is the size in bytes of each stripe block. + StripeBlockSize int32 + + // SkipCompress is true if the backup files were NOT run through gzip. + // The field is expressed as a negative because it will come through as + // false for backups that were created before the field existed, and those + // backups all had compression enabled. + SkipCompress bool } func (be *XtrabackupEngine) backupFileName() string { @@ -96,8 +108,7 @@ func (be *XtrabackupEngine) backupFileName() string { // ExecuteBackup returns a boolean that indicates if the backup is usable, // and an overall error. -func (be *XtrabackupEngine) ExecuteBackup(ctx context.Context, cnf *Mycnf, mysqld MysqlDaemon, logger logutil.Logger, bh backupstorage.BackupHandle, backupConcurrency int, hookExtraEnv map[string]string) (bool, error) { - +func (be *XtrabackupEngine) ExecuteBackup(ctx context.Context, cnf *Mycnf, mysqld MysqlDaemon, logger logutil.Logger, bh backupstorage.BackupHandle, backupConcurrency int, hookExtraEnv map[string]string) (complete bool, finalErr error) { if *xtrabackupUser == "" { return false, vterrors.New(vtrpc.Code_INVALID_ARGUMENT, "xtrabackupUser must be specified.") } @@ -135,97 +146,161 @@ func (be *XtrabackupEngine) ExecuteBackup(ctx context.Context, cnf *Mycnf, mysql } backupFileName := be.backupFileName() + numStripes := int(*xtrabackupStripes) - wc, err := bh.AddFile(ctx, backupFileName, 0) + destFiles, err := addStripeFiles(ctx, bh, backupFileName, numStripes, logger) if err != nil { return false, vterrors.Wrapf(err, "cannot create backup file %v", backupFileName) } closeFile := func(wc io.WriteCloser, fileName string) { - if closeErr := wc.Close(); err == nil { - err = closeErr + if closeErr := wc.Close(); finalErr == nil { + finalErr = closeErr } else if closeErr != nil { // since we already have an error just log this - logger.Errorf("error closing file %v: %v", fileName, err) + logger.Errorf("error closing file %v: %v", fileName, closeErr) } } - defer closeFile(wc, backupFileName) + defer func() { + for _, file := range destFiles { + closeFile(file, backupFileName) + } + }() backupCmd := exec.CommandContext(ctx, backupProgram, flagsToExec...) - backupOut, _ := backupCmd.StdoutPipe() - backupErr, _ := backupCmd.StderrPipe() - dst := bufio.NewWriterSize(wc, writerBufferSize) - writer := io.MultiWriter(dst) - - // Create the gzip compression pipe, if necessary. - var gzip *pgzip.Writer - if *backupStorageCompress { - gzip, err = pgzip.NewWriterLevel(writer, pgzip.BestSpeed) - if err != nil { - return false, vterrors.Wrap(err, "cannot create gziper") + backupOut, err := backupCmd.StdoutPipe() + if err != nil { + return false, vterrors.Wrap(err, "cannot create stdout pipe") + } + backupErr, err := backupCmd.StderrPipe() + if err != nil { + return false, vterrors.Wrap(err, "cannot create stderr pipe") + } + + destWriters := []io.Writer{} + destBuffers := []*bufio.Writer{} + destCompressors := []*pgzip.Writer{} + for _, file := range destFiles { + buffer := bufio.NewWriterSize(file, writerBufferSize) + destBuffers = append(destBuffers, buffer) + writer := io.Writer(buffer) + + // Create the gzip compression pipe, if necessary. + if *backupStorageCompress { + compressor, err := pgzip.NewWriterLevel(writer, pgzip.BestSpeed) + if err != nil { + return false, vterrors.Wrap(err, "cannot create gzip compressor") + } + compressor.SetConcurrency(*backupCompressBlockSize, *backupCompressBlocks) + writer = compressor + destCompressors = append(destCompressors, compressor) } - gzip.SetConcurrency(*backupCompressBlockSize, *backupCompressBlocks) - writer = gzip + + destWriters = append(destWriters, writer) } if err = backupCmd.Start(); err != nil { return false, vterrors.Wrap(err, "unable to start backup") } + // Read stderr in the background, so we can log progress as xtrabackup runs. + // Also save important lines of the output so we can parse it later to find + // the replication position. Note that if we don't read stderr as we go, the + // xtrabackup process gets blocked when the write buffer fills up. + stderrBuilder := &strings.Builder{} + stderrDone := make(chan struct{}) + go func() { + defer close(stderrDone) + + scanner := bufio.NewScanner(backupErr) + capture := false + for scanner.Scan() { + line := scanner.Text() + logger.Infof("xtrabackup stderr: %s", line) + + // Wait until we see the first line of the binlog position. + // Then capture all subsequent lines. We need multiple lines since + // the value we're looking for has newlines in it. + if !capture { + if !strings.Contains(line, "MySQL binlog position") { + continue + } + capture = true + } + fmt.Fprintln(stderrBuilder, line) + } + if err := scanner.Err(); err != nil { + logger.Errorf("error reading from xtrabackup stderr: %v", err) + } + }() + // Copy from the stream output to destination file (optional gzip) - _, err = io.Copy(writer, backupOut) - if err != nil { + blockSize := int64(*xtrabackupStripeBlockSize) + if blockSize < 1024 { + // Enforce minimum block size. + blockSize = 1024 + } + if _, err := copyToStripes(destWriters, backupOut, blockSize); err != nil { return false, vterrors.Wrap(err, "cannot copy output from xtrabackup command") } - // Close gzip to flush it, after that all data is sent to writer. - if gzip != nil { - if err = gzip.Close(); err != nil { - return false, vterrors.Wrap(err, "cannot close gzip") + // Close compressor to flush it. After that all data is sent to the buffer. + for _, compressor := range destCompressors { + if err := compressor.Close(); err != nil { + return false, vterrors.Wrap(err, "cannot close gzip compressor") } } // Flush the buffer to finish writing on destination. - if err = dst.Flush(); err != nil { - return false, vterrors.Wrapf(err, "cannot flush destination: %v", backupFileName) + for _, buffer := range destBuffers { + if err = buffer.Flush(); err != nil { + return false, vterrors.Wrapf(err, "cannot flush destination: %v", backupFileName) + } } - stderrOutput, err := ioutil.ReadAll(backupErr) - if err != nil { - return false, vterrors.Wrap(err, "backup failed while reading command output") - } - err = backupCmd.Wait() - output := string(stderrOutput) - logger.Infof("Xtrabackup backup command output: %v", output) - if err != nil { + // Wait for stderr scanner to stop. + <-stderrDone + // Get the final (filtered) stderr output. + sterrOutput := stderrBuilder.String() + + if err := backupCmd.Wait(); err != nil { return false, vterrors.Wrap(err, "xtrabackup failed with error") } - replicationPosition, rerr := findReplicationPosition(output, flavor, logger) + replicationPosition, rerr := findReplicationPosition(sterrOutput, flavor, logger) if rerr != nil { return false, vterrors.Wrap(rerr, "backup failed trying to find replication position") } // open the MANIFEST - mwc, err := bh.AddFile(ctx, backupManifest, 0) + mwc, err := bh.AddFile(ctx, backupManifestFileName, 0) if err != nil { - return false, vterrors.Wrapf(err, "cannot add %v to backup", backupManifest) + return false, vterrors.Wrapf(err, "cannot add %v to backup", backupManifestFileName) } - defer closeFile(mwc, backupManifest) + defer closeFile(mwc, backupManifestFileName) // JSON-encode and write the MANIFEST bm := &xtraBackupManifest{ - FileName: backupFileName, - BackupMethod: xtrabackupBackupMethod, - Position: replicationPosition, - SkipCompress: !*backupStorageCompress, - Params: *xtrabackupBackupFlags, + // Common base fields + BackupManifest: BackupManifest{ + BackupMethod: xtrabackupEngineName, + Position: replicationPosition, + FinishedTime: time.Now().UTC().Format(time.RFC3339), + }, + + // XtraBackup-specific fields + FileName: backupFileName, + StreamMode: *xtrabackupStreamMode, + SkipCompress: !*backupStorageCompress, + Params: *xtrabackupBackupFlags, + NumStripes: int32(numStripes), + StripeBlockSize: int32(*xtrabackupStripeBlockSize), } data, err := json.MarshalIndent(bm, "", " ") if err != nil { - return false, vterrors.Wrapf(err, "cannot JSON encode %v", backupManifest) + return false, vterrors.Wrapf(err, "cannot JSON encode %v", backupManifestFileName) } if _, err := mwc.Write([]byte(data)); err != nil { - return false, vterrors.Wrapf(err, "cannot write %v", backupManifest) + return false, vterrors.Wrapf(err, "cannot write %v", backupManifestFileName) } return true, nil @@ -238,31 +313,30 @@ func (be *XtrabackupEngine) ExecuteRestore( mysqld MysqlDaemon, logger logutil.Logger, dir string, - bhs []backupstorage.BackupHandle, + bh backupstorage.BackupHandle, restoreConcurrency int, hookExtraEnv map[string]string) (mysql.Position, error) { zeroPosition := mysql.Position{} var bm xtraBackupManifest - bh, err := findBackupToRestore(ctx, cnf, mysqld, logger, dir, bhs, &bm) - if err != nil { + if err := getBackupManifestInto(ctx, bh, &bm); err != nil { return zeroPosition, err } // mark restore as in progress - if err = createStateFile(cnf); err != nil { + if err := createStateFile(cnf); err != nil { return zeroPosition, err } - if err = prepareToRestore(ctx, cnf, mysqld, logger); err != nil { + if err := prepareToRestore(ctx, cnf, mysqld, logger); err != nil { return zeroPosition, err } // copy / extract files logger.Infof("Restore: Extracting files from %v", bm.FileName) - if err = be.restoreFromBackup(ctx, cnf, bh, bm, logger); err != nil { + if err := be.restoreFromBackup(ctx, cnf, bh, bm, logger); err != nil { // don't delete the file here because that is how we detect an interrupted restore return zeroPosition, err } @@ -275,19 +349,19 @@ func (be *XtrabackupEngine) restoreFromBackup(ctx context.Context, cnf *Mycnf, b // first download the file into a tmp dir // and extract all the files - tempDir := fmt.Sprintf("%v/%v", cnf.TmpDir, time.Now().UTC().Format("2006-01-02.150405")) + tempDir := fmt.Sprintf("%v/%v", cnf.TmpDir, time.Now().UTC().Format("xtrabackup-2006-01-02.150405")) // create tempDir if err := os.MkdirAll(tempDir, os.ModePerm); err != nil { return err } - if err := be.extractFiles(ctx, logger, bh, !bm.SkipCompress, be.backupFileName(), tempDir); err != nil { - logger.Errorf("error restoring backup file %v:%v", be.backupFileName(), err) + if err := be.extractFiles(ctx, logger, bh, bm, tempDir); err != nil { + logger.Errorf("error extracting backup files: %v", err) return err } // copy / extract files - logger.Infof("Restore: Preparing the files") + logger.Infof("Restore: Preparing the extracted files") // prepare the backup restoreProgram := path.Join(*xtrabackupEnginePath, xtrabackupBinaryName) flagsToExec := []string{"--defaults-file=" + cnf.path, @@ -295,121 +369,148 @@ func (be *XtrabackupEngine) restoreFromBackup(ctx context.Context, cnf *Mycnf, b "--target-dir=" + tempDir, } prepareCmd := exec.CommandContext(ctx, restoreProgram, flagsToExec...) - prepareOut, _ := prepareCmd.StdoutPipe() - prepareErr, _ := prepareCmd.StderrPipe() - if err := prepareCmd.Start(); err != nil { - return vterrors.Wrap(err, "unable to start prepare") + prepareOut, err := prepareCmd.StdoutPipe() + if err != nil { + return vterrors.Wrap(err, "cannot create stdout pipe") } - - errOutput, _ := ioutil.ReadAll(prepareErr) - stdOutput, _ := ioutil.ReadAll(prepareOut) - err := prepareCmd.Wait() - if string(stdOutput) != "" { - logger.Infof("Prepare stdout %v", string(stdOutput)) + prepareErr, err := prepareCmd.StderrPipe() + if err != nil { + return vterrors.Wrap(err, "cannot create stderr pipe") } - output := string(errOutput) - if output != "" { - logger.Infof("Prepare stderr %v", output) + if err := prepareCmd.Start(); err != nil { + return vterrors.Wrap(err, "can't start prepare step") } - if err != nil { + // Read stdout/stderr in the background and send each line to the logger. + prepareWg := &sync.WaitGroup{} + prepareWg.Add(2) + go scanLinesToLogger("prepare stdout", prepareOut, logger, prepareWg.Done) + go scanLinesToLogger("prepare stderr", prepareErr, logger, prepareWg.Done) + prepareWg.Wait() + + // Get exit status. + if err := prepareCmd.Wait(); err != nil { return vterrors.Wrap(err, "prepare step failed") } - // then copy-back - logger.Infof("Restore: Copying the files") + // then move-back + logger.Infof("Restore: Move extracted and prepared files to final locations") flagsToExec = []string{"--defaults-file=" + cnf.path, - "--copy-back", + "--move-back", "--target-dir=" + tempDir, } - copybackCmd := exec.CommandContext(ctx, restoreProgram, flagsToExec...) - copybackErr, _ := copybackCmd.StderrPipe() - copybackOut, _ := copybackCmd.StdoutPipe() - - if err = copybackCmd.Start(); err != nil { - return vterrors.Wrap(err, "unable to start copy-back") + movebackCmd := exec.CommandContext(ctx, restoreProgram, flagsToExec...) + movebackOut, err := movebackCmd.StdoutPipe() + if err != nil { + return vterrors.Wrap(err, "cannot create stdout pipe") } - - errOutput, _ = ioutil.ReadAll(copybackErr) - stdOutput, _ = ioutil.ReadAll(copybackOut) - err = copybackCmd.Wait() - output = string(errOutput) - if output != "" { - logger.Infof("Copy-back stderr %v", string(output)) + movebackErr, err := movebackCmd.StderrPipe() + if err != nil { + return vterrors.Wrap(err, "cannot create stderr pipe") } - if string(stdOutput) != "" { - logger.Infof("Copy-back stdout %v", string(stdOutput)) + if err := movebackCmd.Start(); err != nil { + return vterrors.Wrap(err, "can't start move-back step") } - if err != nil { - return vterrors.Wrap(err, "copy-back step failed") + // Read stdout/stderr in the background and send each line to the logger. + movebackWg := &sync.WaitGroup{} + movebackWg.Add(2) + go scanLinesToLogger("move-back stdout", movebackOut, logger, movebackWg.Done) + go scanLinesToLogger("move-back stderr", movebackErr, logger, movebackWg.Done) + movebackWg.Wait() + + // Get exit status. + if err := movebackCmd.Wait(); err != nil { + return vterrors.Wrap(err, "move-back step failed") } + return nil } // restoreFile extracts all the files from the backup archive -func (be *XtrabackupEngine) extractFiles( - ctx context.Context, - logger logutil.Logger, - bh backupstorage.BackupHandle, - compress bool, - name string, - tempDir string) (err error) { - - streamMode := *xtrabackupStreamMode - // Open the source file for reading. - var source io.ReadCloser - source, err = bh.ReadFile(ctx, name) +func (be *XtrabackupEngine) extractFiles(ctx context.Context, logger logutil.Logger, bh backupstorage.BackupHandle, bm xtraBackupManifest, tempDir string) error { + // Pull details from the MANIFEST where available, so we can still restore + // backups taken with different flags. Some fields were not always present, + // so if necessary we default to the flag values. + compressed := !bm.SkipCompress + streamMode := bm.StreamMode + if streamMode == "" { + streamMode = *xtrabackupStreamMode + } + baseFileName := bm.FileName + if baseFileName == "" { + baseFileName = be.backupFileName() + } + + // Open the source files for reading. + srcFiles, err := readStripeFiles(ctx, bh, baseFileName, int(bm.NumStripes), logger) if err != nil { - return err + return vterrors.Wrapf(err, "cannot open backup file %v", baseFileName) } - defer source.Close() - - reader := io.MultiReader(source) - - // Create the uncompresser if needed. - if compress { - gz, err := pgzip.NewReader(reader) - if err != nil { - return err + defer func() { + for _, file := range srcFiles { + file.Close() } - defer func() { - if cerr := gz.Close(); cerr != nil { - if err != nil { - // We already have an error, just log this one. - logger.Errorf("failed to close gunziper %v: %v", name, cerr) - } else { - err = cerr - } + }() + + srcReaders := []io.Reader{} + srcDecompressors := []*pgzip.Reader{} + for _, file := range srcFiles { + reader := io.Reader(file) + + // Create the decompresser if needed. + if compressed { + decompressor, err := pgzip.NewReader(reader) + if err != nil { + return vterrors.Wrap(err, "can't create gzip decompressor") } - }() - reader = gz + srcDecompressors = append(srcDecompressors, decompressor) + reader = decompressor + } + + srcReaders = append(srcReaders, reader) } + defer func() { + for _, decompressor := range srcDecompressors { + if cerr := decompressor.Close(); cerr != nil { + logger.Errorf("failed to close gzip decompressor: %v", cerr) + } + } + }() + + reader := stripeReader(srcReaders, int64(bm.StripeBlockSize)) switch streamMode { case streamModeTar: // now extract the files by running tar // error if we can't find tar - flagsToExec := []string{"-C", tempDir, "-xi"} + flagsToExec := []string{"-C", tempDir, "-xiv"} tarCmd := exec.CommandContext(ctx, "tar", flagsToExec...) logger.Infof("Executing tar cmd with flags %v", flagsToExec) tarCmd.Stdin = reader - tarOut, _ := tarCmd.StdoutPipe() - tarErr, _ := tarCmd.StderrPipe() - tarCmd.Start() - output, _ := ioutil.ReadAll(tarOut) - errOutput, _ := ioutil.ReadAll(tarErr) - err := tarCmd.Wait() - - if string(output) != "" { - logger.Infof("output from tar: %v ", string(output)) - } - if string(errOutput) != "" { - logger.Infof("error from tar: %v ", string(errOutput)) + tarOut, err := tarCmd.StdoutPipe() + if err != nil { + return vterrors.Wrap(err, "cannot create stdout pipe") } + tarErr, err := tarCmd.StderrPipe() if err != nil { - return vterrors.Wrap(err, "error from tar") + return vterrors.Wrap(err, "cannot create stderr pipe") + } + if err := tarCmd.Start(); err != nil { + return vterrors.Wrap(err, "can't start tar") + } + + // Read stdout/stderr in the background and send each line to the logger. + tarWg := &sync.WaitGroup{} + tarWg.Add(2) + go scanLinesToLogger("tar stdout", tarOut, logger, tarWg.Done) + go scanLinesToLogger("tar stderr", tarErr, logger, tarWg.Done) + tarWg.Wait() + + // Get exit status. + if err := tarCmd.Wait(); err != nil { + return vterrors.Wrap(err, "tar failed") } case xbstream: @@ -419,25 +520,32 @@ func (be *XtrabackupEngine) extractFiles( if *xbstreamRestoreFlags != "" { flagsToExec = append(flagsToExec, strings.Fields(*xbstreamRestoreFlags)...) } - flagsToExec = append(flagsToExec, "-C", tempDir, "-x") + flagsToExec = append(flagsToExec, "-C", tempDir, "-xv") xbstreamCmd := exec.CommandContext(ctx, xbstreamProgram, flagsToExec...) logger.Infof("Executing xbstream cmd: %v %v", xbstreamProgram, flagsToExec) xbstreamCmd.Stdin = reader - xbstreamOut, _ := xbstreamCmd.StdoutPipe() - xbstreamErr, _ := xbstreamCmd.StderrPipe() - xbstreamCmd.Start() - output, _ := ioutil.ReadAll(xbstreamOut) - errOutput, _ := ioutil.ReadAll(xbstreamErr) - err := xbstreamCmd.Wait() - - if string(output) != "" { - logger.Infof("Output from xbstream: %v ", string(output)) - } - if string(errOutput) != "" { - logger.Infof("error from xbstream: %v", string(errOutput)) + xbstreamOut, err := xbstreamCmd.StdoutPipe() + if err != nil { + return vterrors.Wrap(err, "cannot create stdout pipe") } + xbstreamErr, err := xbstreamCmd.StderrPipe() if err != nil { - return vterrors.Wrap(err, "error from xbstream") + return vterrors.Wrap(err, "cannot create stderr pipe") + } + if err := xbstreamCmd.Start(); err != nil { + return vterrors.Wrap(err, "can't start xbstream") + } + + // Read stdout/stderr in the background and send each line to the logger. + xbstreamWg := &sync.WaitGroup{} + xbstreamWg.Add(2) + go scanLinesToLogger("xbstream stdout", xbstreamOut, logger, xbstreamWg.Done) + go scanLinesToLogger("xbstream stderr", xbstreamErr, logger, xbstreamWg.Done) + xbstreamWg.Wait() + + // Get exit status. + if err := xbstreamCmd.Wait(); err != nil { + return vterrors.Wrap(err, "xbstream failed") } default: return vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "%v is not a valid value for xtrabackup_stream_mode, supported modes are tar and xbstream", streamMode) @@ -445,32 +553,197 @@ func (be *XtrabackupEngine) extractFiles( return nil } +var xtrabackupReplicationPositionRegexp = regexp.MustCompile(`GTID of the last change '([^']*)'`) + func findReplicationPosition(input, flavor string, logger logutil.Logger) (mysql.Position, error) { - substrs := strings.Split(input, "'") - index := -1 - for i, str := range substrs { - if strings.Contains(str, "GTID of the last change") { - index = i + 1 - break - } - } - position := "" - // asserts that xtrabackup output comes with GTIDs in the format we expect - if index != -1 && index < len(substrs) { - // since we are extracting this from the log, it contains newlines - // replace them with a single space to match the SET GLOBAL gtid_purged command in xtrabackup_slave_info - position = strings.Replace(substrs[index], "\n", " ", -1) - } + match := xtrabackupReplicationPositionRegexp.FindStringSubmatch(input) + if match == nil || len(match) != 2 { + return mysql.Position{}, vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "couldn't find replication position in xtrabackup stderr output") + } + position := match[1] + // Remove all spaces, tabs, and newlines. + position = strings.Replace(position, " ", "", -1) + position = strings.Replace(position, "\t", "", -1) + position = strings.Replace(position, "\n", "", -1) logger.Infof("Found position: %v", position) + if position == "" { + return mysql.Position{}, vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "empty replication position from xtrabackup") + } // flavor is required to parse a string into a mysql.Position replicationPosition, err := mysql.ParsePosition(flavor, position) if err != nil { - return mysql.Position{}, err + return mysql.Position{}, vterrors.Wrapf(err, "can't parse replication position from xtrabackup: %v", position) } return replicationPosition, nil } +// scanLinesToLogger scans full lines from the given Reader and sends them to +// the given Logger until EOF. +func scanLinesToLogger(prefix string, reader io.Reader, logger logutil.Logger, doneFunc func()) { + defer doneFunc() + + scanner := bufio.NewScanner(reader) + for scanner.Scan() { + line := scanner.Text() + logger.Infof("%s: %s", prefix, line) + } + if err := scanner.Err(); err != nil { + // This is usually run in a background goroutine, so there's no point + // returning an error. Just log it. + logger.Warningf("error scanning lines from %s: %v", prefix, err) + } +} + +func stripeFileName(baseFileName string, index int) string { + return fmt.Sprintf("%s-%03d", baseFileName, index) +} + +func addStripeFiles(ctx context.Context, backupHandle backupstorage.BackupHandle, baseFileName string, numStripes int, logger logutil.Logger) ([]io.WriteCloser, error) { + if numStripes <= 1 { + // No striping. + file, err := backupHandle.AddFile(ctx, baseFileName, 0) + return []io.WriteCloser{file}, err + } + + files := []io.WriteCloser{} + for i := 0; i < numStripes; i++ { + file, err := backupHandle.AddFile(ctx, stripeFileName(baseFileName, i), 0) + if err != nil { + // Close any files we already opened and clear them from the result. + for _, file := range files { + if err := file.Close(); err != nil { + logger.Warningf("error closing backup stripe file: %v", err) + } + } + return nil, err + } + files = append(files, file) + } + + return files, nil +} + +func readStripeFiles(ctx context.Context, backupHandle backupstorage.BackupHandle, baseFileName string, numStripes int, logger logutil.Logger) ([]io.ReadCloser, error) { + if numStripes <= 1 { + // No striping. + file, err := backupHandle.ReadFile(ctx, baseFileName) + return []io.ReadCloser{file}, err + } + + files := []io.ReadCloser{} + for i := 0; i < numStripes; i++ { + file, err := backupHandle.ReadFile(ctx, stripeFileName(baseFileName, i)) + if err != nil { + // Close any files we already opened and clear them from the result. + for _, file := range files { + if err := file.Close(); err != nil { + logger.Warningf("error closing backup stripe file: %v", err) + } + } + return nil, err + } + files = append(files, file) + } + + return files, nil +} + +func copyToStripes(writers []io.Writer, reader io.Reader, blockSize int64) (written int64, err error) { + if len(writers) == 1 { + // Not striped. + return io.Copy(writers[0], reader) + } + + // Read blocks from source and round-robin them to destination writers. + // Since we put a buffer in front of the destination file, and pgzip has its + // own buffer as well, we are writing into a buffer either way (whether a + // compressor is in the chain or not). That means these writes should not + // block often, so we shouldn't need separate goroutines here. + destIndex := 0 + for { + // Copy blockSize bytes to this writer before rotating to the next one. + // The only acceptable reason for copying less than blockSize bytes is EOF. + n, err := io.CopyN(writers[destIndex], reader, blockSize) + written += n + if err == io.EOF { + // We're done. + return written, nil + } + if err != nil { + // If we failed to copy exactly blockSize bytes for any reason other + // than EOF, we must abort. + return written, err + } + + // Rotate to the next writer. + destIndex++ + if destIndex == len(writers) { + destIndex = 0 + } + } +} + +func stripeReader(readers []io.Reader, blockSize int64) io.Reader { + if len(readers) == 1 { + // No striping. + return readers[0] + } + + // Make a pipe to convert our overall Writer into a Reader. + // We will launch a goroutine to write to the write half of the pipe, + // and return the read half to the caller. + reader, writer := io.Pipe() + + go func() { + // Read blocks from each source in round-robin and send them to the pipe. + // When using pgzip, there is already a read-ahead goroutine for every + // source, so we don't need to launch one for each source. + // TODO: See if we need to add read-ahead goroutines for the case when + // compression is not enabled in order to get any benefit to restore + // parallelism from data striping. + srcIndex := 0 + for { + // Copy blockSize bytes from this reader before rotating to the next one. + // The only acceptable reason for copying less than blockSize bytes is EOF. + n, err := io.CopyN(writer, readers[srcIndex], blockSize) + if err != nil { + // If we failed to copy exactly blockSize bytes for any + // reason other than EOF, we must abort. + if err != io.EOF { + writer.CloseWithError(err) + return + } + + // If we hit EOF after copying less than the blockSize from + // this reader, we must be done. + if n < blockSize { + // Close the write half so the read half gets EOF. + writer.Close() + return + } + // If we hit EOF after copying exactly blockSize bytes, then we + // need to keep checking the rest of the stripes until one of + // them returns EOF with n < blockSize. + } + + // Rotate to the next writer. + srcIndex++ + if srcIndex == len(readers) { + srcIndex = 0 + } + } + }() + + return reader +} + +// ShouldDrainForBackup satisfies the BackupEngine interface +// xtrabackup can run while tablet is serving, hence false +func (be *XtrabackupEngine) ShouldDrainForBackup() bool { + return false +} + func init() { - BackupEngineMap[xtrabackupBackupMethod] = &XtrabackupEngine{} + BackupRestoreEngineMap[xtrabackupEngineName] = &XtrabackupEngine{} } diff --git a/go/vt/mysqlctl/xtrabackupengine_test.go b/go/vt/mysqlctl/xtrabackupengine_test.go new file mode 100644 index 00000000000..d8587176164 --- /dev/null +++ b/go/vt/mysqlctl/xtrabackupengine_test.go @@ -0,0 +1,101 @@ +package mysqlctl + +import ( + "bytes" + "io" + "math/rand" + "testing" + + "vitess.io/vitess/go/vt/logutil" +) + +func TestFindReplicationPosition(t *testing.T) { + input := `MySQL binlog position: filename 'vt-0476396352-bin.000005', position '310088991', GTID of the last change '145e508e-ae54-11e9-8ce6-46824dd1815e:1-3, + 1e51f8be-ae54-11e9-a7c6-4280a041109b:1-3, + 47b59de1-b368-11e9-b48b-624401d35560:1-152981, + 557def0a-b368-11e9-84ed-f6fffd91cc57:1-3, + 599ef589-ae55-11e9-9688-ca1f44501925:1-14857169, + b9ce485d-b36b-11e9-9b17-2a6e0a6011f4:1-371262' + MySQL slave binlog position: master host '10.128.0.43', purge list '145e508e-ae54-11e9-8ce6-46824dd1815e:1-3, 1e51f8be-ae54-11e9-a7c6-4280a041109b:1-3, 47b59de1-b368-11e9-b48b-624401d35560:1-152981, 557def0a-b368-11e9-84ed-f6fffd91cc57:1-3, 599ef589-ae55-11e9-9688-ca1f44501925:1-14857169, b9ce485d-b36b-11e9-9b17-2a6e0a6011f4:1-371262', channel name: '' + + 190809 00:15:44 [00] Streaming + 190809 00:15:44 [00] ...done + 190809 00:15:44 [00] Streaming + 190809 00:15:44 [00] ...done + xtrabackup: Transaction log of lsn (405344842034) to (406364859653) was copied. + 190809 00:16:14 completed OK!` + want := "145e508e-ae54-11e9-8ce6-46824dd1815e:1-3,1e51f8be-ae54-11e9-a7c6-4280a041109b:1-3,47b59de1-b368-11e9-b48b-624401d35560:1-152981,557def0a-b368-11e9-84ed-f6fffd91cc57:1-3,599ef589-ae55-11e9-9688-ca1f44501925:1-14857169,b9ce485d-b36b-11e9-9b17-2a6e0a6011f4:1-371262" + + pos, err := findReplicationPosition(input, "MySQL56", logutil.NewConsoleLogger()) + if err != nil { + t.Fatalf("findReplicationPosition error: %v", err) + } + if got := pos.String(); got != want { + t.Errorf("findReplicationPosition() = %v; want %v", got, want) + } +} + +func TestFindReplicationPositionNoMatch(t *testing.T) { + // Make sure failure to find a match triggers an error. + input := `nothing` + + _, err := findReplicationPosition(input, "MySQL56", logutil.NewConsoleLogger()) + if err == nil { + t.Fatalf("expected error from findReplicationPosition but got nil") + } +} + +func TestFindReplicationPositionEmptyMatch(t *testing.T) { + // Make sure failure to find a match triggers an error. + input := `GTID of the last change ' + + '` + + _, err := findReplicationPosition(input, "MySQL56", logutil.NewConsoleLogger()) + if err == nil { + t.Fatalf("expected error from findReplicationPosition but got nil") + } +} + +func TestStripeRoundTrip(t *testing.T) { + // Generate some deterministic input data. + dataSize := int64(1000000) + input := make([]byte, dataSize) + rng := rand.New(rand.NewSource(1)) + rng.Read(input) + + test := func(blockSize int64, stripes int) { + // Write it out striped across some buffers. + buffers := make([]bytes.Buffer, stripes) + readers := []io.Reader{} + writers := []io.Writer{} + for i := range buffers { + readers = append(readers, &buffers[i]) + writers = append(writers, &buffers[i]) + } + copyToStripes(writers, bytes.NewReader(input), blockSize) + + // Read it back and merge. + outBuf := &bytes.Buffer{} + written, err := io.Copy(outBuf, stripeReader(readers, blockSize)) + if err != nil { + t.Errorf("dataSize=%d, blockSize=%d, stripes=%d; copy error: %v", dataSize, blockSize, stripes, err) + } + if written != dataSize { + t.Errorf("dataSize=%d, blockSize=%d, stripes=%d; copy error: wrote %d total bytes instead of dataSize", dataSize, blockSize, stripes, written) + } + output := outBuf.Bytes() + if !bytes.Equal(input, output) { + t.Errorf("output bytes are not the same as input") + } + } + + // Test block size that evenly divides data size. + test(1000, 10) + // Test block size that doesn't evenly divide data size. + test(3000, 10) + // Test stripe count that doesn't evenly divide data size. + test(1000, 30) + // Test block size and stripe count that don't evenly divide data size. + test(6000, 7) +} diff --git a/go/vt/proto/binlogdata/binlogdata.pb.go b/go/vt/proto/binlogdata/binlogdata.pb.go index a0b4c05757a..24983945643 100644 --- a/go/vt/proto/binlogdata/binlogdata.pb.go +++ b/go/vt/proto/binlogdata/binlogdata.pb.go @@ -48,7 +48,7 @@ func (x OnDDLAction) String() string { return proto.EnumName(OnDDLAction_name, int32(x)) } func (OnDDLAction) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_e3df2e837eaa5305, []int{0} + return fileDescriptor_binlogdata_db2d20dd0016de21, []int{0} } // VEventType enumerates the event types. @@ -73,6 +73,7 @@ const ( VEventType_FIELD VEventType = 13 VEventType_HEARTBEAT VEventType = 14 VEventType_VGTID VEventType = 15 + VEventType_JOURNAL VEventType = 16 ) var VEventType_name = map[int32]string{ @@ -92,6 +93,7 @@ var VEventType_name = map[int32]string{ 13: "FIELD", 14: "HEARTBEAT", 15: "VGTID", + 16: "JOURNAL", } var VEventType_value = map[string]int32{ "UNKNOWN": 0, @@ -110,13 +112,38 @@ var VEventType_value = map[string]int32{ "FIELD": 13, "HEARTBEAT": 14, "VGTID": 15, + "JOURNAL": 16, } func (x VEventType) String() string { return proto.EnumName(VEventType_name, int32(x)) } func (VEventType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_e3df2e837eaa5305, []int{1} + return fileDescriptor_binlogdata_db2d20dd0016de21, []int{1} +} + +// MigrationType specifies the type of migration for the Journal. +type MigrationType int32 + +const ( + MigrationType_TABLES MigrationType = 0 + MigrationType_SHARDS MigrationType = 1 +) + +var MigrationType_name = map[int32]string{ + 0: "TABLES", + 1: "SHARDS", +} +var MigrationType_value = map[string]int32{ + "TABLES": 0, + "SHARDS": 1, +} + +func (x MigrationType) String() string { + return proto.EnumName(MigrationType_name, int32(x)) +} +func (MigrationType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_binlogdata_db2d20dd0016de21, []int{2} } type BinlogTransaction_Statement_Category int32 @@ -164,7 +191,7 @@ func (x BinlogTransaction_Statement_Category) String() string { return proto.EnumName(BinlogTransaction_Statement_Category_name, int32(x)) } func (BinlogTransaction_Statement_Category) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_e3df2e837eaa5305, []int{1, 0, 0} + return fileDescriptor_binlogdata_db2d20dd0016de21, []int{1, 0, 0} } // Charset is the per-statement charset info from a QUERY_EVENT binlog entry. @@ -184,7 +211,7 @@ func (m *Charset) Reset() { *m = Charset{} } func (m *Charset) String() string { return proto.CompactTextString(m) } func (*Charset) ProtoMessage() {} func (*Charset) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_e3df2e837eaa5305, []int{0} + return fileDescriptor_binlogdata_db2d20dd0016de21, []int{0} } func (m *Charset) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Charset.Unmarshal(m, b) @@ -241,7 +268,7 @@ func (m *BinlogTransaction) Reset() { *m = BinlogTransaction{} } func (m *BinlogTransaction) String() string { return proto.CompactTextString(m) } func (*BinlogTransaction) ProtoMessage() {} func (*BinlogTransaction) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_e3df2e837eaa5305, []int{1} + return fileDescriptor_binlogdata_db2d20dd0016de21, []int{1} } func (m *BinlogTransaction) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_BinlogTransaction.Unmarshal(m, b) @@ -291,7 +318,7 @@ func (m *BinlogTransaction_Statement) Reset() { *m = BinlogTransaction_S func (m *BinlogTransaction_Statement) String() string { return proto.CompactTextString(m) } func (*BinlogTransaction_Statement) ProtoMessage() {} func (*BinlogTransaction_Statement) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_e3df2e837eaa5305, []int{1, 0} + return fileDescriptor_binlogdata_db2d20dd0016de21, []int{1, 0} } func (m *BinlogTransaction_Statement) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_BinlogTransaction_Statement.Unmarshal(m, b) @@ -349,7 +376,7 @@ func (m *StreamKeyRangeRequest) Reset() { *m = StreamKeyRangeRequest{} } func (m *StreamKeyRangeRequest) String() string { return proto.CompactTextString(m) } func (*StreamKeyRangeRequest) ProtoMessage() {} func (*StreamKeyRangeRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_e3df2e837eaa5305, []int{2} + return fileDescriptor_binlogdata_db2d20dd0016de21, []int{2} } func (m *StreamKeyRangeRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_StreamKeyRangeRequest.Unmarshal(m, b) @@ -402,7 +429,7 @@ func (m *StreamKeyRangeResponse) Reset() { *m = StreamKeyRangeResponse{} func (m *StreamKeyRangeResponse) String() string { return proto.CompactTextString(m) } func (*StreamKeyRangeResponse) ProtoMessage() {} func (*StreamKeyRangeResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_e3df2e837eaa5305, []int{3} + return fileDescriptor_binlogdata_db2d20dd0016de21, []int{3} } func (m *StreamKeyRangeResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_StreamKeyRangeResponse.Unmarshal(m, b) @@ -446,7 +473,7 @@ func (m *StreamTablesRequest) Reset() { *m = StreamTablesRequest{} } func (m *StreamTablesRequest) String() string { return proto.CompactTextString(m) } func (*StreamTablesRequest) ProtoMessage() {} func (*StreamTablesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_e3df2e837eaa5305, []int{4} + return fileDescriptor_binlogdata_db2d20dd0016de21, []int{4} } func (m *StreamTablesRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_StreamTablesRequest.Unmarshal(m, b) @@ -499,7 +526,7 @@ func (m *StreamTablesResponse) Reset() { *m = StreamTablesResponse{} } func (m *StreamTablesResponse) String() string { return proto.CompactTextString(m) } func (*StreamTablesResponse) ProtoMessage() {} func (*StreamTablesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_e3df2e837eaa5305, []int{5} + return fileDescriptor_binlogdata_db2d20dd0016de21, []int{5} } func (m *StreamTablesResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_StreamTablesResponse.Unmarshal(m, b) @@ -544,7 +571,7 @@ func (m *Rule) Reset() { *m = Rule{} } func (m *Rule) String() string { return proto.CompactTextString(m) } func (*Rule) ProtoMessage() {} func (*Rule) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_e3df2e837eaa5305, []int{6} + return fileDescriptor_binlogdata_db2d20dd0016de21, []int{6} } func (m *Rule) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Rule.Unmarshal(m, b) @@ -591,7 +618,7 @@ func (m *Filter) Reset() { *m = Filter{} } func (m *Filter) String() string { return proto.CompactTextString(m) } func (*Filter) ProtoMessage() {} func (*Filter) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_e3df2e837eaa5305, []int{7} + return fileDescriptor_binlogdata_db2d20dd0016de21, []int{7} } func (m *Filter) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Filter.Unmarshal(m, b) @@ -646,7 +673,7 @@ func (m *BinlogSource) Reset() { *m = BinlogSource{} } func (m *BinlogSource) String() string { return proto.CompactTextString(m) } func (*BinlogSource) ProtoMessage() {} func (*BinlogSource) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_e3df2e837eaa5305, []int{8} + return fileDescriptor_binlogdata_db2d20dd0016de21, []int{8} } func (m *BinlogSource) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_BinlogSource.Unmarshal(m, b) @@ -728,7 +755,7 @@ func (m *RowChange) Reset() { *m = RowChange{} } func (m *RowChange) String() string { return proto.CompactTextString(m) } func (*RowChange) ProtoMessage() {} func (*RowChange) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_e3df2e837eaa5305, []int{9} + return fileDescriptor_binlogdata_db2d20dd0016de21, []int{9} } func (m *RowChange) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_RowChange.Unmarshal(m, b) @@ -775,7 +802,7 @@ func (m *RowEvent) Reset() { *m = RowEvent{} } func (m *RowEvent) String() string { return proto.CompactTextString(m) } func (*RowEvent) ProtoMessage() {} func (*RowEvent) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_e3df2e837eaa5305, []int{10} + return fileDescriptor_binlogdata_db2d20dd0016de21, []int{10} } func (m *RowEvent) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_RowEvent.Unmarshal(m, b) @@ -821,7 +848,7 @@ func (m *FieldEvent) Reset() { *m = FieldEvent{} } func (m *FieldEvent) String() string { return proto.CompactTextString(m) } func (*FieldEvent) ProtoMessage() {} func (*FieldEvent) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_e3df2e837eaa5305, []int{11} + return fileDescriptor_binlogdata_db2d20dd0016de21, []int{11} } func (m *FieldEvent) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_FieldEvent.Unmarshal(m, b) @@ -868,7 +895,7 @@ func (m *ShardGtid) Reset() { *m = ShardGtid{} } func (m *ShardGtid) String() string { return proto.CompactTextString(m) } func (*ShardGtid) ProtoMessage() {} func (*ShardGtid) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_e3df2e837eaa5305, []int{12} + return fileDescriptor_binlogdata_db2d20dd0016de21, []int{12} } func (m *ShardGtid) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ShardGtid.Unmarshal(m, b) @@ -920,7 +947,7 @@ func (m *VGtid) Reset() { *m = VGtid{} } func (m *VGtid) String() string { return proto.CompactTextString(m) } func (*VGtid) ProtoMessage() {} func (*VGtid) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_e3df2e837eaa5305, []int{13} + return fileDescriptor_binlogdata_db2d20dd0016de21, []int{13} } func (m *VGtid) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_VGtid.Unmarshal(m, b) @@ -947,6 +974,138 @@ func (m *VGtid) GetShardGtids() []*ShardGtid { return nil } +type KeyspaceShard struct { + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *KeyspaceShard) Reset() { *m = KeyspaceShard{} } +func (m *KeyspaceShard) String() string { return proto.CompactTextString(m) } +func (*KeyspaceShard) ProtoMessage() {} +func (*KeyspaceShard) Descriptor() ([]byte, []int) { + return fileDescriptor_binlogdata_db2d20dd0016de21, []int{14} +} +func (m *KeyspaceShard) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_KeyspaceShard.Unmarshal(m, b) +} +func (m *KeyspaceShard) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_KeyspaceShard.Marshal(b, m, deterministic) +} +func (dst *KeyspaceShard) XXX_Merge(src proto.Message) { + xxx_messageInfo_KeyspaceShard.Merge(dst, src) +} +func (m *KeyspaceShard) XXX_Size() int { + return xxx_messageInfo_KeyspaceShard.Size(m) +} +func (m *KeyspaceShard) XXX_DiscardUnknown() { + xxx_messageInfo_KeyspaceShard.DiscardUnknown(m) +} + +var xxx_messageInfo_KeyspaceShard proto.InternalMessageInfo + +func (m *KeyspaceShard) GetKeyspace() string { + if m != nil { + return m.Keyspace + } + return "" +} + +func (m *KeyspaceShard) GetShard() string { + if m != nil { + return m.Shard + } + return "" +} + +type Journal struct { + Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + MigrationType MigrationType `protobuf:"varint,2,opt,name=migration_type,json=migrationType,proto3,enum=binlogdata.MigrationType" json:"migration_type,omitempty"` + Tables []string `protobuf:"bytes,3,rep,name=tables,proto3" json:"tables,omitempty"` + LocalPosition string `protobuf:"bytes,4,opt,name=local_position,json=localPosition,proto3" json:"local_position,omitempty"` + ShardGtids []*ShardGtid `protobuf:"bytes,5,rep,name=shard_gtids,json=shardGtids,proto3" json:"shard_gtids,omitempty"` + Participants []*KeyspaceShard `protobuf:"bytes,6,rep,name=participants,proto3" json:"participants,omitempty"` + ReversedIds []int64 `protobuf:"varint,7,rep,packed,name=reversed_ids,json=reversedIds,proto3" json:"reversed_ids,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Journal) Reset() { *m = Journal{} } +func (m *Journal) String() string { return proto.CompactTextString(m) } +func (*Journal) ProtoMessage() {} +func (*Journal) Descriptor() ([]byte, []int) { + return fileDescriptor_binlogdata_db2d20dd0016de21, []int{15} +} +func (m *Journal) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Journal.Unmarshal(m, b) +} +func (m *Journal) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Journal.Marshal(b, m, deterministic) +} +func (dst *Journal) XXX_Merge(src proto.Message) { + xxx_messageInfo_Journal.Merge(dst, src) +} +func (m *Journal) XXX_Size() int { + return xxx_messageInfo_Journal.Size(m) +} +func (m *Journal) XXX_DiscardUnknown() { + xxx_messageInfo_Journal.DiscardUnknown(m) +} + +var xxx_messageInfo_Journal proto.InternalMessageInfo + +func (m *Journal) GetId() int64 { + if m != nil { + return m.Id + } + return 0 +} + +func (m *Journal) GetMigrationType() MigrationType { + if m != nil { + return m.MigrationType + } + return MigrationType_TABLES +} + +func (m *Journal) GetTables() []string { + if m != nil { + return m.Tables + } + return nil +} + +func (m *Journal) GetLocalPosition() string { + if m != nil { + return m.LocalPosition + } + return "" +} + +func (m *Journal) GetShardGtids() []*ShardGtid { + if m != nil { + return m.ShardGtids + } + return nil +} + +func (m *Journal) GetParticipants() []*KeyspaceShard { + if m != nil { + return m.Participants + } + return nil +} + +func (m *Journal) GetReversedIds() []int64 { + if m != nil { + return m.ReversedIds + } + return nil +} + // VEvent represents a vstream event type VEvent struct { Type VEventType `protobuf:"varint,1,opt,name=type,proto3,enum=binlogdata.VEventType" json:"type,omitempty"` @@ -956,6 +1115,7 @@ type VEvent struct { RowEvent *RowEvent `protobuf:"bytes,5,opt,name=row_event,json=rowEvent,proto3" json:"row_event,omitempty"` FieldEvent *FieldEvent `protobuf:"bytes,6,opt,name=field_event,json=fieldEvent,proto3" json:"field_event,omitempty"` Vgtid *VGtid `protobuf:"bytes,7,opt,name=vgtid,proto3" json:"vgtid,omitempty"` + Journal *Journal `protobuf:"bytes,8,opt,name=journal,proto3" json:"journal,omitempty"` // current_time specifies the current time to handle clock skew. CurrentTime int64 `protobuf:"varint,20,opt,name=current_time,json=currentTime,proto3" json:"current_time,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` @@ -967,7 +1127,7 @@ func (m *VEvent) Reset() { *m = VEvent{} } func (m *VEvent) String() string { return proto.CompactTextString(m) } func (*VEvent) ProtoMessage() {} func (*VEvent) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_e3df2e837eaa5305, []int{14} + return fileDescriptor_binlogdata_db2d20dd0016de21, []int{16} } func (m *VEvent) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_VEvent.Unmarshal(m, b) @@ -1036,6 +1196,13 @@ func (m *VEvent) GetVgtid() *VGtid { return nil } +func (m *VEvent) GetJournal() *Journal { + if m != nil { + return m.Journal + } + return nil +} + func (m *VEvent) GetCurrentTime() int64 { if m != nil { return m.CurrentTime @@ -1059,7 +1226,7 @@ func (m *VStreamRequest) Reset() { *m = VStreamRequest{} } func (m *VStreamRequest) String() string { return proto.CompactTextString(m) } func (*VStreamRequest) ProtoMessage() {} func (*VStreamRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_e3df2e837eaa5305, []int{15} + return fileDescriptor_binlogdata_db2d20dd0016de21, []int{17} } func (m *VStreamRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_VStreamRequest.Unmarshal(m, b) @@ -1126,7 +1293,7 @@ func (m *VStreamResponse) Reset() { *m = VStreamResponse{} } func (m *VStreamResponse) String() string { return proto.CompactTextString(m) } func (*VStreamResponse) ProtoMessage() {} func (*VStreamResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_e3df2e837eaa5305, []int{16} + return fileDescriptor_binlogdata_db2d20dd0016de21, []int{18} } func (m *VStreamResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_VStreamResponse.Unmarshal(m, b) @@ -1169,7 +1336,7 @@ func (m *VStreamRowsRequest) Reset() { *m = VStreamRowsRequest{} } func (m *VStreamRowsRequest) String() string { return proto.CompactTextString(m) } func (*VStreamRowsRequest) ProtoMessage() {} func (*VStreamRowsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_e3df2e837eaa5305, []int{17} + return fileDescriptor_binlogdata_db2d20dd0016de21, []int{19} } func (m *VStreamRowsRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_VStreamRowsRequest.Unmarshal(m, b) @@ -1240,7 +1407,7 @@ func (m *VStreamRowsResponse) Reset() { *m = VStreamRowsResponse{} } func (m *VStreamRowsResponse) String() string { return proto.CompactTextString(m) } func (*VStreamRowsResponse) ProtoMessage() {} func (*VStreamRowsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_e3df2e837eaa5305, []int{18} + return fileDescriptor_binlogdata_db2d20dd0016de21, []int{20} } func (m *VStreamRowsResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_VStreamRowsResponse.Unmarshal(m, b) @@ -1311,6 +1478,8 @@ func init() { proto.RegisterType((*FieldEvent)(nil), "binlogdata.FieldEvent") proto.RegisterType((*ShardGtid)(nil), "binlogdata.ShardGtid") proto.RegisterType((*VGtid)(nil), "binlogdata.VGtid") + proto.RegisterType((*KeyspaceShard)(nil), "binlogdata.KeyspaceShard") + proto.RegisterType((*Journal)(nil), "binlogdata.Journal") proto.RegisterType((*VEvent)(nil), "binlogdata.VEvent") proto.RegisterType((*VStreamRequest)(nil), "binlogdata.VStreamRequest") proto.RegisterType((*VStreamResponse)(nil), "binlogdata.VStreamResponse") @@ -1318,97 +1487,110 @@ func init() { proto.RegisterType((*VStreamRowsResponse)(nil), "binlogdata.VStreamRowsResponse") proto.RegisterEnum("binlogdata.OnDDLAction", OnDDLAction_name, OnDDLAction_value) proto.RegisterEnum("binlogdata.VEventType", VEventType_name, VEventType_value) + proto.RegisterEnum("binlogdata.MigrationType", MigrationType_name, MigrationType_value) proto.RegisterEnum("binlogdata.BinlogTransaction_Statement_Category", BinlogTransaction_Statement_Category_name, BinlogTransaction_Statement_Category_value) } -func init() { proto.RegisterFile("binlogdata.proto", fileDescriptor_binlogdata_e3df2e837eaa5305) } - -var fileDescriptor_binlogdata_e3df2e837eaa5305 = []byte{ - // 1372 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x57, 0xdd, 0x72, 0xdb, 0x54, - 0x10, 0xae, 0x6d, 0xf9, 0x6f, 0x95, 0x26, 0xca, 0xc9, 0x0f, 0x9e, 0x0c, 0x65, 0x82, 0x06, 0x68, - 0xc8, 0x0c, 0x4e, 0x31, 0x50, 0xae, 0xa0, 0xe3, 0x1f, 0xd5, 0x75, 0xab, 0xd8, 0xe9, 0xb1, 0x9a, - 0x32, 0xbd, 0xd1, 0x28, 0xd2, 0x71, 0xa2, 0x89, 0x2c, 0x39, 0xd2, 0xb1, 0x83, 0x1f, 0x80, 0xe1, - 0x01, 0xb8, 0xe5, 0x05, 0xb8, 0xe3, 0x05, 0xb8, 0x63, 0x78, 0x13, 0xde, 0x83, 0x39, 0x3f, 0x92, - 0xed, 0xb4, 0xb4, 0x81, 0x19, 0x2e, 0xb8, 0xc9, 0xec, 0xff, 0xd9, 0xfd, 0x76, 0xbd, 0xda, 0x80, - 0x76, 0xe6, 0x87, 0x41, 0x74, 0xee, 0x39, 0xd4, 0xa9, 0x4f, 0xe2, 0x88, 0x46, 0x08, 0x16, 0x92, - 0x3d, 0x75, 0x46, 0xe3, 0x89, 0x2b, 0x14, 0x7b, 0xea, 0xd5, 0x94, 0xc4, 0x73, 0xc9, 0xac, 0xd3, - 0x68, 0x12, 0x2d, 0xbc, 0xf4, 0x63, 0x28, 0xb7, 0x2f, 0x9c, 0x38, 0x21, 0x14, 0xed, 0x42, 0xc9, - 0x0d, 0x7c, 0x12, 0xd2, 0x5a, 0x6e, 0x3f, 0x77, 0x50, 0xc4, 0x92, 0x43, 0x08, 0x14, 0x37, 0x0a, - 0xc3, 0x5a, 0x9e, 0x4b, 0x39, 0xcd, 0x6c, 0x13, 0x12, 0xcf, 0x48, 0x5c, 0x2b, 0x08, 0x5b, 0xc1, - 0xe9, 0x7f, 0x16, 0x60, 0xb3, 0xc5, 0xf3, 0xb0, 0x62, 0x27, 0x4c, 0x1c, 0x97, 0xfa, 0x51, 0x88, - 0xba, 0x00, 0x09, 0x75, 0x28, 0x19, 0x93, 0x90, 0x26, 0xb5, 0xdc, 0x7e, 0xe1, 0x40, 0x6d, 0xdc, - 0xaf, 0x2f, 0x55, 0xf0, 0x9a, 0x4b, 0x7d, 0x98, 0xda, 0xe3, 0x25, 0x57, 0xd4, 0x00, 0x95, 0xcc, - 0x48, 0x48, 0x6d, 0x1a, 0x5d, 0x92, 0xb0, 0xa6, 0xec, 0xe7, 0x0e, 0xd4, 0xc6, 0x66, 0x5d, 0x14, - 0x68, 0x30, 0x8d, 0xc5, 0x14, 0x18, 0x48, 0x46, 0xef, 0xfd, 0x91, 0x87, 0x6a, 0x16, 0x0d, 0x99, - 0x50, 0x71, 0x1d, 0x4a, 0xce, 0xa3, 0x78, 0xce, 0xcb, 0x5c, 0x6f, 0x3c, 0xb8, 0x65, 0x22, 0xf5, - 0xb6, 0xf4, 0xc3, 0x59, 0x04, 0xf4, 0x19, 0x94, 0x5d, 0x81, 0x1e, 0x47, 0x47, 0x6d, 0x6c, 0x2d, - 0x07, 0x93, 0xc0, 0xe2, 0xd4, 0x06, 0x69, 0x50, 0x48, 0xae, 0x02, 0x0e, 0xd9, 0x1a, 0x66, 0xa4, - 0xfe, 0x4b, 0x0e, 0x2a, 0x69, 0x5c, 0xb4, 0x05, 0x1b, 0x2d, 0xd3, 0x7e, 0xd1, 0xc7, 0x46, 0x7b, - 0xd0, 0xed, 0xf7, 0x5e, 0x19, 0x1d, 0xed, 0x0e, 0x5a, 0x83, 0x4a, 0xcb, 0xb4, 0x5b, 0x46, 0xb7, - 0xd7, 0xd7, 0x72, 0xe8, 0x2e, 0x54, 0x5b, 0xa6, 0xdd, 0x1e, 0x1c, 0x1f, 0xf7, 0x2c, 0x2d, 0x8f, - 0x36, 0x40, 0x6d, 0x99, 0x36, 0x1e, 0x98, 0x66, 0xab, 0xd9, 0x7e, 0xa6, 0x15, 0xd0, 0x0e, 0x6c, - 0xb6, 0x4c, 0xbb, 0x73, 0x6c, 0xda, 0x1d, 0xe3, 0x04, 0x1b, 0xed, 0xa6, 0x65, 0x74, 0x34, 0x05, - 0x01, 0x94, 0x98, 0xb8, 0x63, 0x6a, 0x45, 0x49, 0x0f, 0x0d, 0x4b, 0x2b, 0xc9, 0x70, 0xbd, 0xfe, - 0xd0, 0xc0, 0x96, 0x56, 0x96, 0xec, 0x8b, 0x93, 0x4e, 0xd3, 0x32, 0xb4, 0x8a, 0x64, 0x3b, 0x86, - 0x69, 0x58, 0x86, 0x56, 0x7d, 0xaa, 0x54, 0xf2, 0x5a, 0xe1, 0xa9, 0x52, 0x29, 0x68, 0x8a, 0xfe, - 0x53, 0x0e, 0x76, 0x86, 0x34, 0x26, 0xce, 0xf8, 0x19, 0x99, 0x63, 0x27, 0x3c, 0x27, 0x98, 0x5c, - 0x4d, 0x49, 0x42, 0xd1, 0x1e, 0x54, 0x26, 0x51, 0xe2, 0x33, 0xec, 0x38, 0xc0, 0x55, 0x9c, 0xf1, - 0xe8, 0x08, 0xaa, 0x97, 0x64, 0x6e, 0xc7, 0xcc, 0x5e, 0x02, 0x86, 0xea, 0xd9, 0x40, 0x66, 0x91, - 0x2a, 0x97, 0x92, 0x5a, 0xc6, 0xb7, 0xf0, 0x6e, 0x7c, 0xf5, 0x11, 0xec, 0xde, 0x4c, 0x2a, 0x99, - 0x44, 0x61, 0x42, 0x90, 0x09, 0x48, 0x38, 0xda, 0x74, 0xd1, 0x5b, 0x9e, 0x9f, 0xda, 0xb8, 0xf7, - 0xd6, 0x01, 0xc0, 0x9b, 0x67, 0x37, 0x45, 0xfa, 0xf7, 0xb0, 0x25, 0xde, 0xb1, 0x9c, 0xb3, 0x80, - 0x24, 0xb7, 0x29, 0x7d, 0x17, 0x4a, 0x94, 0x1b, 0xd7, 0xf2, 0xfb, 0x85, 0x83, 0x2a, 0x96, 0xdc, - 0x3f, 0xad, 0xd0, 0x83, 0xed, 0xd5, 0x97, 0xff, 0x93, 0xfa, 0xbe, 0x04, 0x05, 0x4f, 0x03, 0x82, - 0xb6, 0xa1, 0x38, 0x76, 0xa8, 0x7b, 0x21, 0xab, 0x11, 0x0c, 0x2b, 0x65, 0xe4, 0x07, 0x94, 0xc4, - 0xbc, 0x85, 0x55, 0x2c, 0x39, 0xfd, 0x01, 0x94, 0x1e, 0x73, 0x0a, 0x7d, 0x02, 0xc5, 0x78, 0xca, - 0x6a, 0x15, 0x3f, 0x75, 0x6d, 0x39, 0x01, 0x16, 0x18, 0x0b, 0xb5, 0xfe, 0x73, 0x1e, 0xd6, 0x44, - 0x42, 0xc3, 0x68, 0x1a, 0xbb, 0x84, 0x21, 0x78, 0x49, 0xe6, 0xc9, 0xc4, 0x71, 0x49, 0x8a, 0x60, - 0xca, 0xb3, 0x64, 0x92, 0x0b, 0x27, 0xf6, 0xe4, 0xab, 0x82, 0x41, 0x5f, 0x81, 0xca, 0x91, 0xa4, - 0x36, 0x9d, 0x4f, 0x08, 0xc7, 0x70, 0xbd, 0xb1, 0xbd, 0x18, 0x2a, 0x8e, 0x13, 0xb5, 0xe6, 0x13, - 0x82, 0x81, 0x66, 0xf4, 0xea, 0x24, 0x2a, 0xb7, 0x98, 0xc4, 0x45, 0xff, 0x8a, 0x2b, 0xfd, 0x3b, - 0xcc, 0xc0, 0x28, 0xc9, 0x28, 0x4b, 0xb5, 0x0a, 0x38, 0x52, 0x80, 0x50, 0x1d, 0x4a, 0x51, 0x68, - 0x7b, 0x5e, 0x50, 0x2b, 0xf3, 0x34, 0xdf, 0x5b, 0xb6, 0x1d, 0x84, 0x9d, 0x8e, 0xd9, 0x14, 0x2d, - 0x29, 0x46, 0x61, 0xc7, 0x0b, 0xf4, 0xe7, 0x50, 0xc5, 0xd1, 0x75, 0xfb, 0x82, 0x27, 0xa0, 0x43, - 0xe9, 0x8c, 0x8c, 0xa2, 0x98, 0xc8, 0xae, 0x82, 0xdc, 0x7a, 0x38, 0xba, 0xc6, 0x52, 0x83, 0xf6, - 0xa1, 0xe8, 0x8c, 0xd2, 0xc6, 0xac, 0x9a, 0x08, 0x85, 0xee, 0x40, 0x05, 0x47, 0xd7, 0x7c, 0x53, - 0xa2, 0x7b, 0x20, 0x10, 0xb1, 0x43, 0x67, 0x9c, 0xc2, 0x5d, 0xe5, 0x92, 0xbe, 0x33, 0x26, 0xe8, - 0x21, 0xa8, 0x71, 0x74, 0x6d, 0xbb, 0xfc, 0x79, 0x31, 0xb6, 0x6a, 0x63, 0x67, 0xa5, 0x95, 0x69, - 0x72, 0x18, 0xe2, 0x94, 0x4c, 0xf4, 0xe7, 0x00, 0x8f, 0x7d, 0x12, 0x78, 0xb7, 0x7a, 0xe4, 0x23, - 0x06, 0x1f, 0x09, 0xbc, 0x34, 0xfe, 0x9a, 0x4c, 0x99, 0x47, 0xc0, 0x52, 0xc7, 0x80, 0x18, 0xb2, - 0x6e, 0x77, 0xa9, 0xef, 0xfd, 0x8b, 0x19, 0x41, 0xa0, 0x9c, 0x53, 0xdf, 0xe3, 0xc3, 0x51, 0xc5, - 0x9c, 0xd6, 0x1f, 0x41, 0xf1, 0x94, 0x87, 0x7b, 0x08, 0x2a, 0xb7, 0xb2, 0x99, 0x38, 0x9d, 0xd8, - 0x95, 0x32, 0xb3, 0xa7, 0x31, 0x24, 0x29, 0x99, 0xe8, 0xbf, 0xe6, 0xa1, 0x74, 0x2a, 0x6a, 0x3c, - 0x04, 0x85, 0x0f, 0x9f, 0xf8, 0x9e, 0xec, 0x2e, 0xfb, 0x0a, 0x0b, 0x3e, 0x7e, 0xdc, 0x06, 0xbd, - 0x0f, 0x55, 0xea, 0x8f, 0x49, 0x42, 0x9d, 0xf1, 0x84, 0x67, 0x59, 0xc0, 0x0b, 0xc1, 0x9b, 0x32, - 0x65, 0x1f, 0x0d, 0x36, 0x32, 0x0a, 0x17, 0x31, 0x12, 0x7d, 0x0e, 0x55, 0xd6, 0x19, 0xfe, 0x8d, - 0xab, 0x15, 0x79, 0xab, 0xb7, 0x6f, 0xf4, 0x85, 0x3f, 0x8b, 0x2b, 0x71, 0xda, 0xeb, 0xaf, 0x41, - 0xe5, 0x58, 0x4a, 0x27, 0x31, 0xab, 0xbb, 0xab, 0xb3, 0x9a, 0xf6, 0x0c, 0xc3, 0x68, 0xd1, 0xbf, - 0xfb, 0x50, 0x9c, 0xf1, 0x94, 0xca, 0xf2, 0x5b, 0xbb, 0x5c, 0x1c, 0x07, 0x45, 0xe8, 0xd1, 0x87, - 0xb0, 0xe6, 0x4e, 0xe3, 0x98, 0x7f, 0x9c, 0xfd, 0x31, 0xa9, 0x6d, 0xf3, 0xda, 0x54, 0x29, 0xb3, - 0xfc, 0x31, 0xd1, 0x7f, 0xcc, 0xc3, 0xfa, 0xa9, 0x58, 0x5f, 0xe9, 0xca, 0x7c, 0x04, 0x5b, 0x64, - 0x34, 0x22, 0x2e, 0xf5, 0x67, 0xc4, 0x76, 0x9d, 0x20, 0x20, 0xb1, 0xed, 0x7b, 0x72, 0xc4, 0x37, - 0xea, 0xe2, 0x8c, 0x69, 0x73, 0x79, 0xaf, 0x83, 0x37, 0x33, 0x5b, 0x29, 0xf2, 0x90, 0x01, 0x5b, - 0xfe, 0x78, 0x4c, 0x3c, 0xdf, 0xa1, 0xcb, 0x01, 0xc4, 0x0f, 0x60, 0x47, 0x4e, 0xd3, 0xa9, 0xd5, - 0x75, 0x28, 0x59, 0x84, 0xc9, 0x3c, 0xb2, 0x30, 0x1f, 0xb3, 0x9f, 0x77, 0x7c, 0x9e, 0x6d, 0xe1, - 0xbb, 0xd2, 0xd3, 0xe2, 0x42, 0x2c, 0x95, 0x2b, 0x1b, 0x5e, 0xb9, 0xb1, 0xe1, 0x17, 0x9b, 0xa0, - 0xf8, 0xae, 0x4d, 0xa0, 0x7f, 0x03, 0x1b, 0x19, 0x10, 0x72, 0x83, 0x1f, 0x42, 0x89, 0xf7, 0x26, - 0x1d, 0x41, 0xf4, 0xfa, 0x18, 0x61, 0x69, 0xa1, 0xff, 0x90, 0x07, 0x94, 0xfa, 0x47, 0xd7, 0xc9, - 0xff, 0x14, 0xcc, 0x6d, 0x28, 0x72, 0xb9, 0x44, 0x52, 0x30, 0x0c, 0x87, 0xc0, 0x49, 0xe8, 0xe4, - 0x32, 0x83, 0x51, 0x38, 0x3f, 0x67, 0x7f, 0x31, 0x49, 0xa6, 0x01, 0xc5, 0xd2, 0x42, 0xff, 0x2d, - 0x07, 0x5b, 0x2b, 0x38, 0x48, 0x2c, 0x17, 0x5b, 0x25, 0xf7, 0xf7, 0x5b, 0x05, 0x1d, 0x40, 0x65, - 0x72, 0xf9, 0x96, 0xed, 0x93, 0x69, 0xdf, 0xf8, 0xb3, 0xfc, 0x00, 0x94, 0x38, 0xba, 0x4e, 0x6a, - 0x0a, 0xf7, 0x5c, 0x5e, 0xb5, 0x5c, 0xce, 0xf6, 0xf5, 0x4a, 0x1d, 0x2b, 0xfb, 0x5a, 0x68, 0x0e, - 0xbf, 0x05, 0x75, 0x69, 0xed, 0xb3, 0xcb, 0xac, 0xd7, 0xed, 0x0f, 0xb0, 0xa1, 0xdd, 0x41, 0x15, - 0x50, 0x86, 0xd6, 0xe0, 0x44, 0xcb, 0x31, 0xca, 0xf8, 0xce, 0x68, 0x8b, 0x6b, 0x8f, 0x51, 0xb6, - 0x34, 0x2a, 0x1c, 0xfe, 0x9e, 0x03, 0x58, 0x6c, 0x18, 0xa4, 0x42, 0xf9, 0x45, 0xff, 0x59, 0x7f, - 0xf0, 0xb2, 0x2f, 0x02, 0x74, 0xad, 0x5e, 0x47, 0xcb, 0xa1, 0x2a, 0x14, 0xc5, 0xf9, 0x98, 0x67, - 0x2f, 0xc8, 0xdb, 0xb1, 0xc0, 0x0e, 0xcb, 0xec, 0x70, 0x54, 0x50, 0x19, 0x0a, 0xd9, 0x79, 0x28, - 0xef, 0xc1, 0x12, 0x0b, 0x88, 0x8d, 0x13, 0xb3, 0xd9, 0x36, 0xb4, 0x32, 0x53, 0x64, 0x97, 0x21, - 0x40, 0x29, 0x3d, 0x0b, 0x99, 0x27, 0x3b, 0x26, 0x81, 0xbd, 0x33, 0xb0, 0x9e, 0x18, 0x58, 0x53, - 0x99, 0x0c, 0x0f, 0x5e, 0x6a, 0x6b, 0x4c, 0xf6, 0xb8, 0x67, 0x98, 0x1d, 0xed, 0x2e, 0xbb, 0x26, - 0x9f, 0x18, 0x4d, 0x6c, 0xb5, 0x8c, 0xa6, 0xa5, 0xad, 0x33, 0xcd, 0x29, 0x4f, 0x70, 0xa3, 0xf5, - 0xe9, 0xab, 0xfb, 0x33, 0x9f, 0x92, 0x24, 0xa9, 0xfb, 0xd1, 0x91, 0xa0, 0x8e, 0xce, 0xa3, 0xa3, - 0x19, 0x3d, 0xe2, 0xff, 0xa3, 0x1c, 0x2d, 0x7e, 0x08, 0x67, 0x25, 0x2e, 0xf9, 0xe2, 0xaf, 0x00, - 0x00, 0x00, 0xff, 0xff, 0x59, 0xa0, 0xff, 0x30, 0xff, 0x0c, 0x00, 0x00, +func init() { proto.RegisterFile("binlogdata.proto", fileDescriptor_binlogdata_db2d20dd0016de21) } + +var fileDescriptor_binlogdata_db2d20dd0016de21 = []byte{ + // 1558 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x57, 0xcb, 0x72, 0xdb, 0xca, + 0x11, 0x35, 0x09, 0xf0, 0xd5, 0x90, 0x28, 0x68, 0xf4, 0x08, 0xa3, 0x8a, 0x53, 0x0a, 0x2a, 0x8e, + 0x14, 0x55, 0x85, 0x72, 0x98, 0xc4, 0x59, 0x39, 0x0e, 0x1f, 0xb0, 0x4c, 0x09, 0x22, 0xe5, 0x21, + 0x24, 0xa7, 0xbc, 0x41, 0x41, 0xc4, 0x48, 0x42, 0x04, 0x02, 0x34, 0x30, 0xa4, 0xa2, 0x0f, 0x48, + 0xe5, 0x03, 0xb2, 0xcd, 0x0f, 0x64, 0x9f, 0x6d, 0xb6, 0xd9, 0xe7, 0x0b, 0xb2, 0xca, 0x7f, 0xdc, + 0x9a, 0x07, 0x40, 0x42, 0xf6, 0xb5, 0xe5, 0x5b, 0x75, 0x17, 0x77, 0xc3, 0xea, 0xe9, 0xe9, 0xe7, + 0x41, 0x4f, 0x77, 0x13, 0xf4, 0x4b, 0x3f, 0x0c, 0xa2, 0x6b, 0xcf, 0xa5, 0x6e, 0x73, 0x1a, 0x47, + 0x34, 0x42, 0xb0, 0xe0, 0xec, 0x68, 0x73, 0x1a, 0x4f, 0xc7, 0xe2, 0x62, 0x47, 0xfb, 0x30, 0x23, + 0xf1, 0xbd, 0x3c, 0xd4, 0x69, 0x34, 0x8d, 0x16, 0x5a, 0xc6, 0x29, 0x54, 0xba, 0x37, 0x6e, 0x9c, + 0x10, 0x8a, 0xb6, 0xa1, 0x3c, 0x0e, 0x7c, 0x12, 0xd2, 0x46, 0x61, 0xb7, 0xb0, 0x5f, 0xc2, 0xf2, + 0x84, 0x10, 0xa8, 0xe3, 0x28, 0x0c, 0x1b, 0x45, 0xce, 0xe5, 0x34, 0x93, 0x4d, 0x48, 0x3c, 0x27, + 0x71, 0x43, 0x11, 0xb2, 0xe2, 0x64, 0xfc, 0x5f, 0x81, 0xf5, 0x0e, 0x8f, 0xc3, 0x8e, 0xdd, 0x30, + 0x71, 0xc7, 0xd4, 0x8f, 0x42, 0x74, 0x04, 0x90, 0x50, 0x97, 0x92, 0x09, 0x09, 0x69, 0xd2, 0x28, + 0xec, 0x2a, 0xfb, 0x5a, 0x6b, 0xaf, 0xb9, 0x94, 0xc1, 0x47, 0x2a, 0xcd, 0x51, 0x2a, 0x8f, 0x97, + 0x54, 0x51, 0x0b, 0x34, 0x32, 0x27, 0x21, 0x75, 0x68, 0x74, 0x4b, 0xc2, 0x86, 0xba, 0x5b, 0xd8, + 0xd7, 0x5a, 0xeb, 0x4d, 0x91, 0xa0, 0xc9, 0x6e, 0x6c, 0x76, 0x81, 0x81, 0x64, 0xf4, 0xce, 0x7f, + 0x8a, 0x50, 0xcb, 0xac, 0x21, 0x0b, 0xaa, 0x63, 0x97, 0x92, 0xeb, 0x28, 0xbe, 0xe7, 0x69, 0xd6, + 0x5b, 0xcf, 0x1f, 0x19, 0x48, 0xb3, 0x2b, 0xf5, 0x70, 0x66, 0x01, 0xfd, 0x0a, 0x2a, 0x63, 0x81, + 0x1e, 0x47, 0x47, 0x6b, 0x6d, 0x2c, 0x1b, 0x93, 0xc0, 0xe2, 0x54, 0x06, 0xe9, 0xa0, 0x24, 0x1f, + 0x02, 0x0e, 0xd9, 0x0a, 0x66, 0xa4, 0xf1, 0xcf, 0x02, 0x54, 0x53, 0xbb, 0x68, 0x03, 0xd6, 0x3a, + 0x96, 0x73, 0x3e, 0xc0, 0x66, 0x77, 0x78, 0x34, 0xe8, 0xbf, 0x37, 0x7b, 0xfa, 0x13, 0xb4, 0x02, + 0xd5, 0x8e, 0xe5, 0x74, 0xcc, 0xa3, 0xfe, 0x40, 0x2f, 0xa0, 0x55, 0xa8, 0x75, 0x2c, 0xa7, 0x3b, + 0x3c, 0x3d, 0xed, 0xdb, 0x7a, 0x11, 0xad, 0x81, 0xd6, 0xb1, 0x1c, 0x3c, 0xb4, 0xac, 0x4e, 0xbb, + 0x7b, 0xa2, 0x2b, 0x68, 0x0b, 0xd6, 0x3b, 0x96, 0xd3, 0x3b, 0xb5, 0x9c, 0x9e, 0x79, 0x86, 0xcd, + 0x6e, 0xdb, 0x36, 0x7b, 0xba, 0x8a, 0x00, 0xca, 0x8c, 0xdd, 0xb3, 0xf4, 0x92, 0xa4, 0x47, 0xa6, + 0xad, 0x97, 0xa5, 0xb9, 0xfe, 0x60, 0x64, 0x62, 0x5b, 0xaf, 0xc8, 0xe3, 0xf9, 0x59, 0xaf, 0x6d, + 0x9b, 0x7a, 0x55, 0x1e, 0x7b, 0xa6, 0x65, 0xda, 0xa6, 0x5e, 0x3b, 0x56, 0xab, 0x45, 0x5d, 0x39, + 0x56, 0xab, 0x8a, 0xae, 0x1a, 0x7f, 0x2f, 0xc0, 0xd6, 0x88, 0xc6, 0xc4, 0x9d, 0x9c, 0x90, 0x7b, + 0xec, 0x86, 0xd7, 0x04, 0x93, 0x0f, 0x33, 0x92, 0x50, 0xb4, 0x03, 0xd5, 0x69, 0x94, 0xf8, 0x0c, + 0x3b, 0x0e, 0x70, 0x0d, 0x67, 0x67, 0x74, 0x08, 0xb5, 0x5b, 0x72, 0xef, 0xc4, 0x4c, 0x5e, 0x02, + 0x86, 0x9a, 0x59, 0x41, 0x66, 0x96, 0xaa, 0xb7, 0x92, 0x5a, 0xc6, 0x57, 0xf9, 0x32, 0xbe, 0xc6, + 0x15, 0x6c, 0x3f, 0x0c, 0x2a, 0x99, 0x46, 0x61, 0x42, 0x90, 0x05, 0x48, 0x28, 0x3a, 0x74, 0xf1, + 0x6d, 0x79, 0x7c, 0x5a, 0xeb, 0xe9, 0x67, 0x0b, 0x00, 0xaf, 0x5f, 0x3e, 0x64, 0x19, 0x7f, 0x81, + 0x0d, 0xe1, 0xc7, 0x76, 0x2f, 0x03, 0x92, 0x3c, 0x26, 0xf5, 0x6d, 0x28, 0x53, 0x2e, 0xdc, 0x28, + 0xee, 0x2a, 0xfb, 0x35, 0x2c, 0x4f, 0x5f, 0x9b, 0xa1, 0x07, 0x9b, 0x79, 0xcf, 0xdf, 0x4b, 0x7e, + 0xbf, 0x05, 0x15, 0xcf, 0x02, 0x82, 0x36, 0xa1, 0x34, 0x71, 0xe9, 0xf8, 0x46, 0x66, 0x23, 0x0e, + 0x2c, 0x95, 0x2b, 0x3f, 0xa0, 0x24, 0xe6, 0x9f, 0xb0, 0x86, 0xe5, 0xc9, 0x78, 0x0e, 0xe5, 0xd7, + 0x9c, 0x42, 0xbf, 0x80, 0x52, 0x3c, 0x63, 0xb9, 0x8a, 0xa7, 0xae, 0x2f, 0x07, 0xc0, 0x0c, 0x63, + 0x71, 0x6d, 0xfc, 0xa3, 0x08, 0x2b, 0x22, 0xa0, 0x51, 0x34, 0x8b, 0xc7, 0x84, 0x21, 0x78, 0x4b, + 0xee, 0x93, 0xa9, 0x3b, 0x26, 0x29, 0x82, 0xe9, 0x99, 0x05, 0x93, 0xdc, 0xb8, 0xb1, 0x27, 0xbd, + 0x8a, 0x03, 0xfa, 0x1d, 0x68, 0x1c, 0x49, 0xea, 0xd0, 0xfb, 0x29, 0xe1, 0x18, 0xd6, 0x5b, 0x9b, + 0x8b, 0xa2, 0xe2, 0x38, 0x51, 0xfb, 0x7e, 0x4a, 0x30, 0xd0, 0x8c, 0xce, 0x57, 0xa2, 0xfa, 0x88, + 0x4a, 0x5c, 0x7c, 0xbf, 0x52, 0xee, 0xfb, 0x1d, 0x64, 0x60, 0x94, 0xa5, 0x95, 0xa5, 0x5c, 0x05, + 0x1c, 0x29, 0x40, 0xa8, 0x09, 0xe5, 0x28, 0x74, 0x3c, 0x2f, 0x68, 0x54, 0x78, 0x98, 0x3f, 0x5a, + 0x96, 0x1d, 0x86, 0xbd, 0x9e, 0xd5, 0x16, 0x9f, 0xa4, 0x14, 0x85, 0x3d, 0x2f, 0x30, 0xde, 0x42, + 0x0d, 0x47, 0x77, 0xdd, 0x1b, 0x1e, 0x80, 0x01, 0xe5, 0x4b, 0x72, 0x15, 0xc5, 0x44, 0x7e, 0x55, + 0x90, 0x5d, 0x0f, 0x47, 0x77, 0x58, 0xde, 0xa0, 0x5d, 0x28, 0xb9, 0x57, 0xe9, 0x87, 0xc9, 0x8b, + 0x88, 0x0b, 0xc3, 0x85, 0x2a, 0x8e, 0xee, 0x78, 0xa7, 0x44, 0x4f, 0x41, 0x20, 0xe2, 0x84, 0xee, + 0x24, 0x85, 0xbb, 0xc6, 0x39, 0x03, 0x77, 0x42, 0xd0, 0x0b, 0xd0, 0xe2, 0xe8, 0xce, 0x19, 0x73, + 0xf7, 0xa2, 0x6c, 0xb5, 0xd6, 0x56, 0xee, 0x53, 0xa6, 0xc1, 0x61, 0x88, 0x53, 0x32, 0x31, 0xde, + 0x02, 0xbc, 0xf6, 0x49, 0xe0, 0x3d, 0xca, 0xc9, 0xcf, 0x19, 0x7c, 0x24, 0xf0, 0x52, 0xfb, 0x2b, + 0x32, 0x64, 0x6e, 0x01, 0xcb, 0x3b, 0x06, 0xc4, 0x88, 0x7d, 0xed, 0x23, 0xea, 0x7b, 0xdf, 0xa1, + 0x46, 0x10, 0xa8, 0xd7, 0xd4, 0xf7, 0x78, 0x71, 0xd4, 0x30, 0xa7, 0x8d, 0x57, 0x50, 0xba, 0xe0, + 0xe6, 0x5e, 0x80, 0xc6, 0xa5, 0x1c, 0xc6, 0x4e, 0x2b, 0x36, 0x97, 0x66, 0xe6, 0x1a, 0x43, 0x92, + 0x92, 0x89, 0xd1, 0x86, 0xd5, 0x13, 0xe9, 0x96, 0x0b, 0x7c, 0x7d, 0x5c, 0xc6, 0xbf, 0x8a, 0x50, + 0x39, 0x8e, 0x66, 0x71, 0xe8, 0x06, 0xa8, 0x0e, 0x45, 0xdf, 0xe3, 0x7a, 0x0a, 0x2e, 0xfa, 0x1e, + 0xfa, 0x23, 0xd4, 0x27, 0xfe, 0x75, 0xec, 0xb2, 0x7a, 0x10, 0xa5, 0x5d, 0xe4, 0x35, 0xf3, 0xe3, + 0xe5, 0xc8, 0x4e, 0x53, 0x09, 0x5e, 0xdf, 0xab, 0x93, 0xe5, 0xe3, 0x52, 0xc5, 0x2a, 0xb9, 0x8a, + 0x7d, 0x06, 0xf5, 0x20, 0x1a, 0xbb, 0x81, 0x93, 0xf5, 0x2a, 0x95, 0x07, 0xb5, 0xca, 0xb9, 0x67, + 0x69, 0xc3, 0x7a, 0x80, 0x4b, 0xe9, 0x91, 0xb8, 0xa0, 0x97, 0xb0, 0x32, 0x75, 0x63, 0xea, 0x8f, + 0xfd, 0xa9, 0xcb, 0xa6, 0x7d, 0x99, 0x2b, 0xe6, 0xc2, 0xce, 0xe1, 0x86, 0x73, 0xe2, 0xe8, 0x67, + 0xb0, 0x12, 0x93, 0x39, 0x89, 0x13, 0xe2, 0x39, 0xcc, 0x6f, 0x65, 0x57, 0xd9, 0x57, 0xb0, 0x96, + 0xf2, 0xfa, 0x5e, 0x62, 0xfc, 0xaf, 0x08, 0xe5, 0x0b, 0x51, 0x5d, 0x07, 0xa0, 0x72, 0x6c, 0xc4, + 0x24, 0xdf, 0x5e, 0x76, 0x22, 0x24, 0x38, 0x30, 0x5c, 0x06, 0xfd, 0x04, 0x6a, 0xd4, 0x9f, 0x90, + 0x84, 0xba, 0x93, 0x29, 0x07, 0x53, 0xc1, 0x0b, 0xc6, 0xa7, 0x6a, 0x84, 0x8d, 0x6b, 0xf6, 0x58, + 0x05, 0x3c, 0x8c, 0x44, 0xbf, 0x86, 0x1a, 0x7b, 0x13, 0x7c, 0xbb, 0x68, 0x94, 0xf8, 0x23, 0xdb, + 0x7c, 0xf0, 0x22, 0xb8, 0x5b, 0x5c, 0x8d, 0xd3, 0x57, 0xf6, 0x7b, 0xd0, 0x78, 0x15, 0x4b, 0x25, + 0xd1, 0x25, 0xb6, 0xf3, 0x5d, 0x22, 0x7d, 0x2d, 0x18, 0xae, 0x16, 0x2f, 0x67, 0x0f, 0x4a, 0x73, + 0x1e, 0x52, 0x45, 0x6e, 0x39, 0xcb, 0xc9, 0x71, 0xd8, 0xc5, 0x3d, 0x1b, 0x21, 0x7f, 0x16, 0x55, + 0xd4, 0xa8, 0x7e, 0x3c, 0x42, 0x64, 0x81, 0xe1, 0x54, 0x86, 0x21, 0x3c, 0x9e, 0xc5, 0x31, 0xdf, + 0xa2, 0xfc, 0x09, 0x69, 0x6c, 0x72, 0x28, 0x34, 0xc9, 0xb3, 0xfd, 0x09, 0x31, 0xfe, 0x56, 0x84, + 0xfa, 0x85, 0x98, 0x33, 0xe9, 0x6c, 0x7b, 0x05, 0x1b, 0xe4, 0xea, 0x8a, 0x8c, 0xa9, 0x3f, 0x27, + 0xce, 0xd8, 0x0d, 0x02, 0x12, 0x3b, 0xb2, 0x60, 0xb5, 0xd6, 0x5a, 0x53, 0xec, 0x9b, 0x5d, 0xce, + 0xef, 0xf7, 0xf0, 0x7a, 0x26, 0x2b, 0x59, 0x1e, 0x32, 0x61, 0xc3, 0x9f, 0x4c, 0x88, 0xe7, 0xbb, + 0x74, 0xd9, 0x80, 0xe8, 0x54, 0x5b, 0xf2, 0xd9, 0x5f, 0xd8, 0x47, 0x2e, 0x25, 0x0b, 0x33, 0x99, + 0x46, 0x66, 0xe6, 0x19, 0xab, 0xea, 0xf8, 0x3a, 0x1b, 0x97, 0xab, 0x52, 0xd3, 0xe6, 0x4c, 0x2c, + 0x2f, 0x73, 0xa3, 0x58, 0x7d, 0x30, 0x8a, 0x17, 0x2d, 0xbb, 0xf4, 0xa5, 0x96, 0x6d, 0xbc, 0x84, + 0xb5, 0x0c, 0x08, 0x39, 0x6a, 0x0f, 0xa0, 0xcc, 0x3f, 0x65, 0xda, 0x2b, 0xd0, 0xc7, 0x55, 0x87, + 0xa5, 0x84, 0xf1, 0xd7, 0x22, 0xa0, 0x54, 0x3f, 0xba, 0x4b, 0x7e, 0xa0, 0x60, 0x6e, 0x42, 0x89, + 0xf3, 0x25, 0x92, 0xe2, 0xc0, 0x70, 0x08, 0xdc, 0x84, 0x4e, 0x6f, 0x33, 0x18, 0x85, 0xf2, 0x5b, + 0xf6, 0x8b, 0x49, 0x32, 0x0b, 0x28, 0x96, 0x12, 0xc6, 0xbf, 0x0b, 0xb0, 0x91, 0xc3, 0x41, 0x62, + 0xb9, 0x68, 0xff, 0x85, 0x6f, 0x6f, 0xff, 0x68, 0x1f, 0xaa, 0xd3, 0xdb, 0xcf, 0x8c, 0x89, 0xec, + 0xf6, 0x93, 0xaf, 0xf8, 0xa7, 0xa0, 0xc6, 0xd1, 0x5d, 0xd2, 0x50, 0xb9, 0xe6, 0xf2, 0x4c, 0xe4, + 0x7c, 0x36, 0x58, 0x73, 0x79, 0xe4, 0x06, 0xab, 0xb8, 0x39, 0xf8, 0x03, 0x68, 0x4b, 0xf3, 0x99, + 0xad, 0xd0, 0xfd, 0xa3, 0xc1, 0x10, 0x9b, 0xfa, 0x13, 0x54, 0x05, 0x75, 0x64, 0x0f, 0xcf, 0xf4, + 0x02, 0xa3, 0xcc, 0x3f, 0x99, 0x5d, 0xb1, 0x96, 0x33, 0xca, 0x91, 0x42, 0xca, 0xc1, 0x7f, 0x0b, + 0x00, 0x8b, 0x86, 0x84, 0x34, 0xa8, 0x9c, 0x0f, 0x4e, 0x06, 0xc3, 0x77, 0x03, 0x61, 0xe0, 0xc8, + 0xee, 0xf7, 0xf4, 0x02, 0xaa, 0x41, 0x49, 0xec, 0xf9, 0x45, 0xe6, 0x41, 0x2e, 0xf9, 0x0a, 0xfb, + 0x07, 0x90, 0x6d, 0xf8, 0x2a, 0xaa, 0x80, 0x92, 0xed, 0xf1, 0x72, 0x71, 0x2f, 0x33, 0x83, 0xd8, + 0x3c, 0xb3, 0xda, 0x5d, 0x53, 0xaf, 0xb0, 0x8b, 0x6c, 0x85, 0x07, 0x28, 0xa7, 0xfb, 0x3b, 0xd3, + 0x64, 0x5b, 0x3f, 0x30, 0x3f, 0x43, 0xfb, 0x8d, 0x89, 0x75, 0x8d, 0xf1, 0xf0, 0xf0, 0x9d, 0xbe, + 0xc2, 0x78, 0xaf, 0xfb, 0xa6, 0xd5, 0xd3, 0x57, 0xd9, 0xda, 0xff, 0xc6, 0x6c, 0x63, 0xbb, 0x63, + 0xb6, 0x6d, 0xbd, 0xce, 0x6e, 0x2e, 0x78, 0x80, 0x6b, 0xcc, 0xcd, 0xf1, 0xf0, 0x1c, 0x0f, 0xda, + 0x96, 0xae, 0x1f, 0xec, 0xc1, 0x6a, 0x6e, 0xfe, 0x30, 0x5f, 0x76, 0xbb, 0x63, 0x99, 0x23, 0xfd, + 0x09, 0xa3, 0x47, 0x6f, 0xda, 0xb8, 0x37, 0xd2, 0x0b, 0x9d, 0x5f, 0xbe, 0xdf, 0x9b, 0xfb, 0x94, + 0x24, 0x49, 0xd3, 0x8f, 0x0e, 0x05, 0x75, 0x78, 0x1d, 0x1d, 0xce, 0xe9, 0x21, 0xff, 0x0b, 0x7a, + 0xb8, 0x78, 0x3e, 0x97, 0x65, 0xce, 0xf9, 0xcd, 0x37, 0x01, 0x00, 0x00, 0xff, 0xff, 0x2e, 0xb4, + 0x72, 0xde, 0xde, 0x0e, 0x00, 0x00, } diff --git a/go/vt/proto/query/query.pb.go b/go/vt/proto/query/query.pb.go index 88ddfd485ca..ec8b4f71ab7 100644 --- a/go/vt/proto/query/query.pb.go +++ b/go/vt/proto/query/query.pb.go @@ -964,7 +964,7 @@ type Field struct { ColumnLength uint32 `protobuf:"varint,7,opt,name=column_length,json=columnLength,proto3" json:"column_length,omitempty"` // charset is actually a uint16. Only the lower 16 bits are used. Charset uint32 `protobuf:"varint,8,opt,name=charset,proto3" json:"charset,omitempty"` - // decimals is actualy a uint8. Only the lower 8 bits are used. + // decimals is actually a uint8. Only the lower 8 bits are used. Decimals uint32 `protobuf:"varint,9,opt,name=decimals,proto3" json:"decimals,omitempty"` // flags is actually a uint16. Only the lower 16 bits are used. Flags uint32 `protobuf:"varint,10,opt,name=flags,proto3" json:"flags,omitempty"` diff --git a/go/vt/proto/tabletmanagerservice/tabletmanagerservice.pb.go b/go/vt/proto/tabletmanagerservice/tabletmanagerservice.pb.go index acb13b630d7..686c293a382 100644 --- a/go/vt/proto/tabletmanagerservice/tabletmanagerservice.pb.go +++ b/go/vt/proto/tabletmanagerservice/tabletmanagerservice.pb.go @@ -113,7 +113,7 @@ type TabletManagerClient interface { // PopulateReparentJournal tells the tablet to add an entry to its // reparent journal PopulateReparentJournal(ctx context.Context, in *tabletmanagerdata.PopulateReparentJournalRequest, opts ...grpc.CallOption) (*tabletmanagerdata.PopulateReparentJournalResponse, error) - // InitSlave tells the tablet to reparent to the master unconditionnally + // InitSlave tells the tablet to reparent to the master unconditionally InitSlave(ctx context.Context, in *tabletmanagerdata.InitSlaveRequest, opts ...grpc.CallOption) (*tabletmanagerdata.InitSlaveResponse, error) // DemoteMaster tells the soon-to-be-former master it's gonna change DemoteMaster(ctx context.Context, in *tabletmanagerdata.DemoteMasterRequest, opts ...grpc.CallOption) (*tabletmanagerdata.DemoteMasterResponse, error) @@ -667,7 +667,7 @@ type TabletManagerServer interface { // PopulateReparentJournal tells the tablet to add an entry to its // reparent journal PopulateReparentJournal(context.Context, *tabletmanagerdata.PopulateReparentJournalRequest) (*tabletmanagerdata.PopulateReparentJournalResponse, error) - // InitSlave tells the tablet to reparent to the master unconditionnally + // InitSlave tells the tablet to reparent to the master unconditionally InitSlave(context.Context, *tabletmanagerdata.InitSlaveRequest) (*tabletmanagerdata.InitSlaveResponse, error) // DemoteMaster tells the soon-to-be-former master it's gonna change DemoteMaster(context.Context, *tabletmanagerdata.DemoteMasterRequest) (*tabletmanagerdata.DemoteMasterResponse, error) diff --git a/go/vt/proto/topodata/topodata.pb.go b/go/vt/proto/topodata/topodata.pb.go index 9e22bf51309..9d59160d863 100644 --- a/go/vt/proto/topodata/topodata.pb.go +++ b/go/vt/proto/topodata/topodata.pb.go @@ -505,7 +505,7 @@ func (m *Shard_ServedType) GetCells() []string { } // SourceShard represents a data source for filtered replication -// accross shards. When this is used in a destination shard, the master +// across shards. When this is used in a destination shard, the master // of that shard will run filtered replication. type Shard_SourceShard struct { // Uid is the unique ID for this SourceShard object. diff --git a/go/vt/proto/vtrpc/vtrpc.pb.go b/go/vt/proto/vtrpc/vtrpc.pb.go index a9b53a78efa..6881f6ed3ae 100644 --- a/go/vt/proto/vtrpc/vtrpc.pb.go +++ b/go/vt/proto/vtrpc/vtrpc.pb.go @@ -174,7 +174,7 @@ func (Code) EnumDescriptor() ([]byte, []int) { // LegacyErrorCode is the enum values for Errors. This type is deprecated. // Use Code instead. Background: In the initial design, we thought // that we may end up with a different list of canonical error codes -// than the ones defined by grpc. In hindisght, we realize that +// than the ones defined by grpc. In hindsight, we realize that // the grpc error codes are fairly generic and mostly sufficient. // In order to avoid confusion, this type will be deprecated in // favor of the new Code that matches exactly what grpc defines. diff --git a/go/vt/sqlannotation/sqlannotation.go b/go/vt/sqlannotation/sqlannotation.go index c8e20328720..5cebc2dd728 100644 --- a/go/vt/sqlannotation/sqlannotation.go +++ b/go/vt/sqlannotation/sqlannotation.go @@ -19,7 +19,7 @@ limitations under the License. // comments and parsing them. These annotations // are used during filtered-replication to route // the DML statement to the correct shard. -// TOOD(erez): Move the code for the "_stream" annotations +// TODO(erez): Move the code for the "_stream" annotations // from vttablet to here. package sqlannotation @@ -77,7 +77,7 @@ func AddKeyspaceIDs(sql string, keyspaceIDs [][]byte, marginComments string) str // to extract the keyspace id. // If a keyspace-id comment exists 'keyspaceID' is set to the parsed keyspace id // and err is set to nil; otherwise, if a filtered-replication-unfriendly comment exists -// or some other parsing error occured, keyspaceID is set to nil and err is set to a non-nil +// or some other parsing error occurred, keyspaceID is set to nil and err is set to a non-nil // error value. func ExtractKeyspaceIDS(sql string) (keyspaceIDs [][]byte, err error) { _, comments := sqlparser.SplitMarginComments(sql) diff --git a/go/vt/sqlparser/ast.go b/go/vt/sqlparser/ast.go index 44d47cfcdcb..2ea7762ac31 100644 --- a/go/vt/sqlparser/ast.go +++ b/go/vt/sqlparser/ast.go @@ -739,6 +739,9 @@ type DDL struct { // VindexCols is set for AddColVindexStr. VindexCols []ColIdent + + // AutoIncSpec is set for AddAutoIncStr. + AutoIncSpec *AutoIncSpec } // DDL strings. @@ -755,6 +758,8 @@ const ( DropVschemaTableStr = "drop vschema table" AddColVindexStr = "on table add vindex" DropColVindexStr = "on table drop vindex" + AddSequenceStr = "add sequence" + AddAutoIncStr = "add auto_increment" // Vindex DDL param to specify the owner of a vindex VindexOwnerStr = "owner" @@ -813,6 +818,10 @@ func (node *DDL) Format(buf *TrackedBuffer) { } case DropColVindexStr: buf.Myprintf("alter vschema on %v drop vindex %v", node.Table, node.VindexSpec.Name) + case AddSequenceStr: + buf.Myprintf("alter vschema add sequence %v", node.Table) + case AddAutoIncStr: + buf.Myprintf("alter vschema on %v add auto_increment %v", node.Table, node.AutoIncSpec) default: buf.Myprintf("%s table %v", node.Action, node.Table) } @@ -1352,6 +1361,23 @@ type VindexSpec struct { Params []VindexParam } +// AutoIncSpec defines and autoincrement value for a ADD AUTO_INCREMENT statement +type AutoIncSpec struct { + Column ColIdent + Sequence TableName +} + +// Format formats the node. +func (node *AutoIncSpec) Format(buf *TrackedBuffer) { + buf.Myprintf("%v ", node.Column) + buf.Myprintf("using %v", node.Sequence) +} + +func (node *AutoIncSpec) walkSubtree(visit Visit) error { + err := Walk(visit, node.Sequence, node.Column) + return err +} + // ParseParams parses the vindex parameter list, pulling out the special-case // "owner" parameter func (node *VindexSpec) ParseParams() (string, map[string]string) { diff --git a/go/vt/sqlparser/parse_test.go b/go/vt/sqlparser/parse_test.go index 3be2d623163..d321b2bc932 100644 --- a/go/vt/sqlparser/parse_test.go +++ b/go/vt/sqlparser/parse_test.go @@ -979,6 +979,10 @@ var ( input: "alter vschema drop vindex hash_vdx", }, { input: "alter vschema add table a", + }, { + input: "alter vschema add sequence a_seq", + }, { + input: "alter vschema on a add auto_increment id using a_seq", }, { input: "alter vschema drop table a", }, { diff --git a/go/vt/sqlparser/sql.go b/go/vt/sqlparser/sql.go index d3a3a84a603..94b2965fc5b 100644 --- a/go/vt/sqlparser/sql.go +++ b/go/vt/sqlparser/sql.go @@ -248,113 +248,108 @@ const VINDEXES = 57483 const STATUS = 57484 const VARIABLES = 57485 const WARNINGS = 57486 -const BEGIN = 57487 -const START = 57488 -const TRANSACTION = 57489 -const COMMIT = 57490 -const ROLLBACK = 57491 -const BIT = 57492 -const TINYINT = 57493 -const SMALLINT = 57494 -const MEDIUMINT = 57495 -const INT = 57496 -const INTEGER = 57497 -const BIGINT = 57498 -const INTNUM = 57499 -const REAL = 57500 -const DOUBLE = 57501 -const FLOAT_TYPE = 57502 -const DECIMAL = 57503 -const NUMERIC = 57504 -const TIME = 57505 -const TIMESTAMP = 57506 -const DATETIME = 57507 -const YEAR = 57508 -const CHAR = 57509 -const VARCHAR = 57510 -const BOOL = 57511 -const CHARACTER = 57512 -const VARBINARY = 57513 -const NCHAR = 57514 -const TEXT = 57515 -const TINYTEXT = 57516 -const MEDIUMTEXT = 57517 -const LONGTEXT = 57518 -const BLOB = 57519 -const TINYBLOB = 57520 -const MEDIUMBLOB = 57521 -const LONGBLOB = 57522 -const JSON = 57523 -const ENUM = 57524 -const GEOMETRY = 57525 -const POINT = 57526 -const LINESTRING = 57527 -const POLYGON = 57528 -const GEOMETRYCOLLECTION = 57529 -const MULTIPOINT = 57530 -const MULTILINESTRING = 57531 -const MULTIPOLYGON = 57532 -const NULLX = 57533 -const AUTO_INCREMENT = 57534 -const APPROXNUM = 57535 -const SIGNED = 57536 -const UNSIGNED = 57537 -const ZEROFILL = 57538 -const COLLATION = 57539 -const DATABASES = 57540 -const SCHEMAS = 57541 +const SEQUENCE = 57487 +const BEGIN = 57488 +const START = 57489 +const TRANSACTION = 57490 +const COMMIT = 57491 +const ROLLBACK = 57492 +const BIT = 57493 +const TINYINT = 57494 +const SMALLINT = 57495 +const MEDIUMINT = 57496 +const INT = 57497 +const INTEGER = 57498 +const BIGINT = 57499 +const INTNUM = 57500 +const REAL = 57501 +const DOUBLE = 57502 +const FLOAT_TYPE = 57503 +const DECIMAL = 57504 +const NUMERIC = 57505 +const TIME = 57506 +const TIMESTAMP = 57507 +const DATETIME = 57508 +const YEAR = 57509 +const CHAR = 57510 +const VARCHAR = 57511 +const BOOL = 57512 +const CHARACTER = 57513 +const VARBINARY = 57514 +const NCHAR = 57515 +const TEXT = 57516 +const TINYTEXT = 57517 +const MEDIUMTEXT = 57518 +const LONGTEXT = 57519 +const BLOB = 57520 +const TINYBLOB = 57521 +const MEDIUMBLOB = 57522 +const LONGBLOB = 57523 +const JSON = 57524 +const ENUM = 57525 +const GEOMETRY = 57526 +const POINT = 57527 +const LINESTRING = 57528 +const POLYGON = 57529 +const GEOMETRYCOLLECTION = 57530 +const MULTIPOINT = 57531 +const MULTILINESTRING = 57532 +const MULTIPOLYGON = 57533 +const NULLX = 57534 +const AUTO_INCREMENT = 57535 +const APPROXNUM = 57536 +const SIGNED = 57537 +const UNSIGNED = 57538 +const ZEROFILL = 57539 +const COLLATION = 57540 +const DATABASES = 57541 const TABLES = 57542 -const VITESS_KEYSPACES = 57543 -const VITESS_SHARDS = 57544 -const VITESS_TABLETS = 57545 -const VSCHEMA = 57546 -const VSCHEMA_TABLES = 57547 -const VITESS_TARGET = 57548 -const FULL = 57549 -const PROCESSLIST = 57550 -const COLUMNS = 57551 -const FIELDS = 57552 -const ENGINES = 57553 -const PLUGINS = 57554 -const NAMES = 57555 -const CHARSET = 57556 -const GLOBAL = 57557 -const SESSION = 57558 -const ISOLATION = 57559 -const LEVEL = 57560 -const READ = 57561 -const WRITE = 57562 -const ONLY = 57563 -const REPEATABLE = 57564 -const COMMITTED = 57565 -const UNCOMMITTED = 57566 -const SERIALIZABLE = 57567 -const CURRENT_TIMESTAMP = 57568 -const DATABASE = 57569 -const CURRENT_DATE = 57570 -const CURRENT_TIME = 57571 -const LOCALTIME = 57572 -const LOCALTIMESTAMP = 57573 -const UTC_DATE = 57574 -const UTC_TIME = 57575 -const UTC_TIMESTAMP = 57576 -const REPLACE = 57577 -const CONVERT = 57578 -const CAST = 57579 -const SUBSTR = 57580 -const SUBSTRING = 57581 -const GROUP_CONCAT = 57582 -const SEPARATOR = 57583 -const TIMESTAMPADD = 57584 -const TIMESTAMPDIFF = 57585 -const MATCH = 57586 -const AGAINST = 57587 -const BOOLEAN = 57588 -const LANGUAGE = 57589 -const WITH = 57590 -const QUERY = 57591 -const EXPANSION = 57592 -const UNUSED = 57593 +const VSCHEMA = 57543 +const FULL = 57544 +const PROCESSLIST = 57545 +const COLUMNS = 57546 +const FIELDS = 57547 +const ENGINES = 57548 +const PLUGINS = 57549 +const NAMES = 57550 +const CHARSET = 57551 +const GLOBAL = 57552 +const SESSION = 57553 +const ISOLATION = 57554 +const LEVEL = 57555 +const READ = 57556 +const WRITE = 57557 +const ONLY = 57558 +const REPEATABLE = 57559 +const COMMITTED = 57560 +const UNCOMMITTED = 57561 +const SERIALIZABLE = 57562 +const CURRENT_TIMESTAMP = 57563 +const DATABASE = 57564 +const CURRENT_DATE = 57565 +const CURRENT_TIME = 57566 +const LOCALTIME = 57567 +const LOCALTIMESTAMP = 57568 +const UTC_DATE = 57569 +const UTC_TIME = 57570 +const UTC_TIMESTAMP = 57571 +const REPLACE = 57572 +const CONVERT = 57573 +const CAST = 57574 +const SUBSTR = 57575 +const SUBSTRING = 57576 +const GROUP_CONCAT = 57577 +const SEPARATOR = 57578 +const TIMESTAMPADD = 57579 +const TIMESTAMPDIFF = 57580 +const MATCH = 57581 +const AGAINST = 57582 +const BOOLEAN = 57583 +const LANGUAGE = 57584 +const WITH = 57585 +const QUERY = 57586 +const EXPANSION = 57587 +const UNUSED = 57588 var yyToknames = [...]string{ "$end", @@ -518,6 +513,7 @@ var yyToknames = [...]string{ "STATUS", "VARIABLES", "WARNINGS", + "SEQUENCE", "BEGIN", "START", "TRANSACTION", @@ -572,14 +568,8 @@ var yyToknames = [...]string{ "ZEROFILL", "COLLATION", "DATABASES", - "SCHEMAS", "TABLES", - "VITESS_KEYSPACES", - "VITESS_SHARDS", - "VITESS_TABLETS", "VSCHEMA", - "VSCHEMA_TABLES", - "VITESS_TARGET", "FULL", "PROCESSLIST", "COLUMNS", @@ -642,1363 +632,1359 @@ var yyExca = [...]int{ 5, 29, -2, 4, -1, 37, - 159, 299, - 160, 299, - -2, 289, - -1, 270, - 112, 639, - -2, 635, - -1, 271, - 112, 640, - -2, 636, - -1, 339, - 82, 814, + 159, 296, + 160, 296, + -2, 286, + -1, 260, + 112, 636, + -2, 632, + -1, 261, + 112, 637, + -2, 633, + -1, 329, + 82, 811, -2, 63, - -1, 340, - 82, 770, + -1, 330, + 82, 767, -2, 64, - -1, 345, - 82, 749, - -2, 601, - -1, 347, - 82, 791, - -2, 603, - -1, 617, - 1, 351, - 5, 351, - 12, 351, - 13, 351, - 14, 351, - 15, 351, - 17, 351, - 19, 351, - 30, 351, - 31, 351, - 42, 351, - 43, 351, - 44, 351, - 45, 351, - 46, 351, - 48, 351, - 49, 351, - 52, 351, - 53, 351, - 55, 351, - 56, 351, - 269, 351, - -2, 369, - -1, 620, + -1, 335, + 82, 746, + -2, 598, + -1, 337, + 82, 788, + -2, 600, + -1, 606, + 1, 348, + 5, 348, + 12, 348, + 13, 348, + 14, 348, + 15, 348, + 17, 348, + 19, 348, + 30, 348, + 31, 348, + 42, 348, + 43, 348, + 44, 348, + 45, 348, + 46, 348, + 48, 348, + 49, 348, + 52, 348, + 53, 348, + 55, 348, + 56, 348, + 264, 348, + -2, 366, + -1, 609, 53, 44, 55, 44, -2, 48, - -1, 765, - 112, 642, - -2, 638, - -1, 992, + -1, 755, + 112, 639, + -2, 635, + -1, 983, 5, 30, - -2, 436, - -1, 1022, + -2, 433, + -1, 1013, 5, 29, - -2, 575, - -1, 1264, + -2, 572, + -1, 1257, 5, 30, - -2, 576, - -1, 1316, + -2, 573, + -1, 1310, 5, 29, - -2, 578, - -1, 1393, + -2, 575, + -1, 1388, 5, 30, - -2, 579, + -2, 576, } const yyPrivate = 57344 -const yyLast = 12703 +const yyLast = 12651 var yyAct = [...]int{ - 271, 1427, 1417, 1227, 1381, 1116, 878, 1284, 573, 1025, - 1297, 275, 1328, 288, 1043, 1201, 1167, 851, 301, 907, - 1164, 572, 3, 249, 1026, 1168, 877, 921, 887, 1174, - 1070, 57, 81, 874, 790, 1180, 210, 716, 344, 210, - 1139, 957, 800, 1096, 984, 838, 730, 849, 1087, 633, - 613, 474, 853, 818, 767, 511, 1049, 505, 445, 891, - 632, 797, 333, 799, 831, 338, 622, 210, 81, 917, - 273, 258, 210, 248, 210, 517, 525, 56, 588, 1420, - 1404, 313, 330, 319, 320, 317, 318, 316, 315, 314, - 1415, 614, 1391, 335, 940, 1412, 1228, 321, 322, 1403, - 302, 51, 901, 240, 1156, 1256, 262, 450, 939, 1390, - 478, 61, 205, 201, 202, 203, 1195, 587, 1356, 538, - 537, 547, 548, 540, 541, 542, 543, 544, 545, 546, - 539, 1196, 1197, 549, 869, 870, 944, 63, 64, 65, - 66, 67, 634, 499, 635, 938, 868, 246, 241, 242, - 243, 244, 51, 1058, 247, 495, 1057, 245, 1078, 1059, - 254, 900, 1287, 496, 493, 494, 197, 908, 199, 1303, - 1247, 1245, 239, 463, 488, 489, 480, 1119, 482, 823, - 538, 537, 547, 548, 540, 541, 542, 543, 544, 545, - 546, 539, 1118, 705, 549, 935, 932, 933, 210, 931, - 703, 210, 498, 1414, 1411, 1382, 1115, 210, 479, 481, - 832, 1140, 1374, 210, 892, 1329, 81, 1435, 81, 81, - 464, 81, 81, 1112, 81, 1431, 81, 704, 1331, 1114, - 942, 945, 452, 985, 204, 81, 199, 1337, 706, 1120, - 709, 696, 1190, 1044, 1046, 894, 894, 277, 1142, 1189, - 1188, 448, 455, 212, 200, 951, 1001, 1363, 950, 998, - 561, 562, 198, 1267, 1126, 81, 937, 1054, 1011, 483, - 484, 978, 485, 486, 1071, 487, 514, 490, 739, 1103, - 628, 1144, 529, 1148, 513, 1143, 500, 1141, 936, 460, - 470, 1357, 1146, 1213, 875, 539, 1330, 477, 549, 908, - 736, 1145, 542, 543, 544, 545, 546, 539, 1101, 549, - 549, 864, 959, 446, 1147, 1149, 475, 476, 475, 475, - 1045, 475, 475, 1389, 475, 1113, 475, 1111, 210, 210, - 210, 1429, 941, 70, 1430, 475, 1428, 81, 1338, 1336, - 522, 893, 893, 81, 1214, 1372, 444, 943, 524, 501, - 502, 1346, 457, 1178, 458, 51, 524, 459, 561, 562, - 731, 561, 562, 636, 1158, 819, 819, 515, 1008, 71, - 558, 698, 1076, 560, 1377, 1102, 612, 466, 467, 468, - 1107, 1104, 1097, 1105, 1100, 341, 897, 519, 1098, 1099, - 958, 1395, 898, 591, 593, 1293, 597, 599, 626, 602, - 1436, 571, 1106, 575, 576, 577, 578, 579, 580, 581, - 582, 583, 621, 586, 589, 589, 589, 595, 589, 589, - 595, 589, 603, 604, 605, 606, 607, 608, 503, 618, - 630, 300, 590, 592, 594, 596, 598, 600, 601, 1437, - 196, 732, 54, 894, 774, 210, 742, 743, 1397, 996, - 81, 995, 770, 523, 522, 210, 210, 81, 772, 773, - 771, 210, 1292, 79, 210, 997, 1091, 210, 523, 522, - 524, 210, 446, 81, 81, 975, 976, 977, 81, 81, - 81, 210, 81, 81, 1090, 524, 523, 522, 22, 81, - 81, 1079, 1373, 1160, 523, 522, 757, 759, 760, 343, - 1310, 695, 758, 524, 1290, 327, 328, 451, 702, 718, - 791, 524, 792, 1123, 1088, 523, 522, 81, 1334, 1413, - 559, 210, 1399, 504, 719, 720, 1060, 81, 1061, 721, - 722, 723, 524, 725, 726, 1334, 1385, 744, 1370, 893, - 727, 728, 1334, 504, 890, 888, 710, 889, 253, 1230, - 475, 768, 886, 892, 738, 1334, 1364, 475, 1071, 540, - 541, 542, 543, 544, 545, 546, 539, 763, 765, 549, - 1066, 81, 793, 475, 475, 715, 617, 714, 475, 475, - 475, 699, 475, 475, 453, 454, 697, 746, 694, 475, - 475, 737, 809, 812, 1334, 1333, 504, 804, 820, 1282, - 1281, 1269, 504, 761, 81, 81, 1266, 504, 523, 522, - 472, 210, 1220, 1219, 268, 1216, 1217, 1216, 1215, 210, - 210, 990, 504, 210, 210, 524, 465, 210, 210, 210, - 81, 624, 794, 795, 835, 504, 24, 802, 504, 624, - 805, 806, 1343, 81, 811, 814, 815, 343, 1342, 343, - 343, 1210, 343, 343, 341, 343, 895, 343, 1050, 816, - 1020, 51, 643, 642, 1021, 1177, 343, 718, 828, 827, - 859, 829, 830, 625, 861, 627, 575, 909, 910, 911, - 24, 625, 1050, 623, 54, 857, 1165, 834, 802, 1177, - 862, 58, 866, 1262, 745, 865, 527, 210, 81, 504, - 81, 835, 882, 1345, 81, 81, 210, 210, 1315, 210, - 210, 835, 835, 210, 81, 1129, 858, 1218, 623, 850, - 1062, 923, 24, 618, 867, 1177, 1014, 618, 54, 54, - 210, 990, 210, 210, 1013, 210, 538, 537, 547, 548, - 540, 541, 542, 543, 544, 545, 546, 539, 990, 927, - 549, 929, 801, 803, 623, 903, 904, 905, 906, 990, - 919, 920, 629, 740, 1405, 955, 708, 1299, 343, 902, - 54, 914, 915, 916, 638, 1274, 922, 966, 765, 1206, - 769, 1065, 291, 290, 293, 294, 295, 296, 1181, 1182, - 768, 292, 297, 255, 918, 913, 967, 912, 475, 1117, - 475, 925, 968, 537, 547, 548, 540, 541, 542, 543, - 544, 545, 546, 539, 475, 974, 549, 840, 843, 844, - 845, 841, 1422, 842, 846, 1418, 980, 1181, 1182, 1208, - 1184, 1165, 1092, 734, 712, 752, 210, 210, 210, 210, - 210, 54, 1187, 1186, 1037, 1035, 1027, 764, 210, 1038, - 1036, 210, 1034, 1022, 1039, 210, 844, 845, 1033, 210, - 259, 260, 989, 1409, 1402, 979, 617, 1125, 963, 1407, - 617, 1007, 804, 973, 617, 518, 972, 1083, 81, 506, - 1005, 343, 641, 473, 1063, 1075, 1028, 1379, 343, 1031, - 516, 507, 1040, 563, 564, 565, 566, 567, 568, 569, - 570, 1378, 1313, 1073, 343, 343, 1051, 1048, 1067, 343, - 343, 343, 1260, 343, 343, 1295, 1052, 1055, 1053, 928, - 343, 343, 1072, 1080, 1081, 711, 81, 81, 1029, 1030, - 264, 1032, 1023, 1024, 848, 518, 618, 618, 618, 618, - 618, 1082, 250, 1084, 1085, 1086, 971, 341, 748, 1068, - 1069, 850, 58, 1047, 970, 81, 256, 257, 527, 618, - 879, 343, 1089, 1350, 251, 840, 843, 844, 845, 841, - 210, 842, 846, 987, 1349, 1301, 1108, 988, 1094, 81, - 1050, 497, 1424, 1423, 992, 993, 994, 1002, 999, 729, - 520, 1000, 1424, 1360, 1003, 1004, 1288, 735, 60, 62, - 1010, 55, 796, 1122, 1012, 1, 1121, 1015, 1016, 1017, - 1018, 1416, 1229, 1296, 934, 1380, 1327, 1200, 821, 769, - 885, 876, 69, 443, 81, 81, 1133, 475, 68, 1042, - 1166, 1150, 1027, 1138, 1095, 825, 826, 1157, 1151, 1371, - 1169, 1132, 884, 883, 1335, 1171, 1286, 896, 81, 966, - 765, 1077, 899, 1207, 1074, 475, 1376, 764, 649, 647, - 648, 343, 646, 81, 651, 81, 81, 1185, 650, 645, - 224, 1199, 336, 847, 343, 637, 924, 1191, 521, 72, - 1110, 1192, 1176, 617, 617, 617, 617, 617, 1109, 1198, - 930, 491, 492, 210, 226, 1203, 557, 969, 617, 1204, - 1205, 1056, 342, 1172, 741, 510, 617, 1348, 1194, 1300, - 210, 1006, 584, 817, 1211, 1212, 81, 276, 756, 81, - 81, 210, 1170, 289, 51, 286, 287, 81, 747, 343, - 210, 343, 1019, 531, 274, 946, 947, 266, 616, 609, - 839, 837, 836, 331, 1183, 343, 766, 1179, 615, 775, - 776, 777, 778, 779, 780, 781, 782, 783, 784, 785, - 786, 787, 788, 789, 1234, 1128, 1255, 1355, 751, 1137, - 26, 343, 1243, 59, 261, 19, 18, 1236, 17, 20, - 16, 508, 512, 15, 14, 1235, 461, 1261, 30, 1027, - 21, 13, 12, 11, 10, 879, 9, 8, 530, 81, - 1222, 7, 1271, 6, 824, 1063, 5, 81, 1270, 4, - 252, 23, 1223, 2, 1225, 0, 1280, 0, 0, 0, - 0, 0, 81, 0, 0, 0, 0, 0, 0, 81, - 618, 0, 0, 574, 0, 1289, 0, 1291, 0, 0, - 0, 0, 585, 1240, 1241, 0, 1242, 0, 0, 1244, - 0, 1246, 0, 0, 0, 0, 0, 1254, 0, 0, - 0, 1302, 0, 0, 0, 0, 821, 81, 81, 0, - 81, 0, 0, 1294, 0, 81, 0, 81, 81, 81, - 210, 1169, 1322, 81, 1323, 1324, 1325, 1316, 1314, 1276, - 1277, 1278, 0, 0, 1321, 0, 1131, 1326, 1332, 0, - 81, 0, 1339, 0, 0, 1283, 0, 1347, 0, 343, - 0, 0, 1237, 0, 0, 0, 0, 0, 0, 1239, - 0, 0, 475, 1340, 0, 1341, 509, 0, 0, 1161, - 1248, 1249, 1361, 0, 1169, 81, 0, 0, 0, 1362, - 1369, 1368, 0, 0, 0, 0, 81, 81, 0, 0, - 1263, 1264, 1265, 1384, 1268, 0, 1383, 1093, 343, 1387, - 0, 0, 208, 1170, 81, 238, 1317, 619, 0, 0, - 1392, 1279, 1027, 0, 0, 210, 0, 617, 0, 0, - 879, 0, 879, 81, 0, 0, 343, 981, 982, 983, - 265, 1401, 0, 334, 0, 0, 1344, 0, 208, 0, - 208, 0, 0, 207, 1406, 1408, 81, 0, 0, 0, - 343, 0, 0, 1410, 0, 0, 1170, 0, 51, 1421, - 1259, 0, 0, 0, 0, 0, 1432, 0, 733, 0, - 0, 0, 0, 0, 332, 1309, 0, 0, 0, 447, - 0, 449, 0, 343, 1131, 0, 0, 0, 0, 0, - 0, 0, 821, 754, 755, 1173, 1175, 0, 538, 537, - 547, 548, 540, 541, 542, 543, 544, 545, 546, 539, - 0, 0, 549, 0, 0, 0, 0, 0, 0, 1175, - 0, 0, 0, 0, 1351, 1352, 1353, 1354, 0, 0, - 0, 1358, 1359, 0, 343, 0, 343, 1202, 0, 0, - 0, 0, 0, 1365, 1366, 1367, 574, 0, 0, 807, - 808, 0, 0, 0, 0, 1419, 879, 0, 0, 0, - 0, 0, 0, 0, 208, 0, 0, 208, 0, 0, - 0, 0, 0, 208, 0, 1388, 0, 0, 0, 208, - 0, 0, 1393, 0, 1258, 0, 1298, 1226, 0, 0, - 1231, 1232, 0, 0, 0, 0, 0, 221, 343, 0, - 1398, 0, 0, 0, 0, 456, 0, 873, 462, 0, - 0, 0, 0, 0, 469, 0, 0, 0, 0, 0, - 471, 234, 538, 537, 547, 548, 540, 541, 542, 543, - 544, 545, 546, 539, 0, 0, 549, 0, 0, 0, - 1135, 1136, 0, 0, 0, 1433, 1434, 0, 0, 821, - 0, 0, 0, 1152, 1153, 0, 1154, 1155, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 1162, 1163, - 343, 0, 213, 0, 0, 0, 0, 0, 1285, 216, - 0, 1253, 0, 0, 0, 0, 0, 225, 220, 0, - 0, 0, 0, 343, 208, 208, 208, 0, 0, 0, - 343, 0, 0, 1298, 879, 0, 0, 0, 964, 965, - 0, 512, 0, 0, 0, 0, 0, 0, 0, 223, - 0, 0, 0, 0, 0, 233, 0, 0, 0, 0, - 1209, 0, 0, 0, 0, 611, 0, 620, 1318, 1319, - 0, 1320, 0, 0, 0, 0, 1285, 0, 1285, 1285, - 1285, 214, 0, 0, 1202, 538, 537, 547, 548, 540, - 541, 542, 543, 544, 545, 546, 539, 0, 0, 549, - 0, 1285, 0, 991, 0, 0, 0, 0, 227, 217, - 218, 0, 228, 229, 230, 232, 0, 231, 237, 1238, - 1009, 0, 219, 222, 0, 215, 236, 235, 0, 0, - 0, 0, 0, 0, 0, 0, 1375, 0, 0, 0, - 0, 208, 0, 0, 0, 0, 0, 343, 343, 0, - 0, 208, 208, 0, 0, 0, 0, 208, 0, 0, - 208, 0, 821, 208, 0, 1394, 0, 717, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 208, 0, 0, - 0, 0, 644, 0, 1400, 0, 0, 0, 0, 0, - 0, 0, 700, 701, 0, 0, 0, 0, 707, 0, - 0, 332, 0, 0, 713, 0, 0, 1285, 0, 0, - 1252, 0, 0, 0, 0, 0, 0, 208, 724, 0, - 0, 0, 0, 0, 0, 0, 717, 24, 25, 52, - 27, 28, 0, 0, 0, 1304, 1305, 1306, 1307, 1308, - 0, 0, 0, 1311, 1312, 0, 43, 0, 0, 0, - 0, 29, 48, 49, 0, 0, 0, 0, 753, 0, - 1124, 0, 0, 0, 0, 0, 0, 0, 265, 0, - 0, 38, 0, 265, 265, 54, 0, 265, 265, 265, - 0, 0, 0, 822, 538, 537, 547, 548, 540, 541, - 542, 543, 544, 545, 546, 539, 0, 1251, 549, 0, - 0, 0, 265, 265, 265, 265, 0, 208, 0, 1159, - 0, 0, 0, 0, 0, 208, 855, 0, 0, 208, - 208, 0, 0, 208, 863, 717, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 31, 32, 34, 33, - 36, 0, 50, 0, 0, 0, 0, 0, 833, 0, - 0, 1193, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 860, 0, 37, 44, 45, 0, 0, 46, 47, - 35, 538, 537, 547, 548, 540, 541, 542, 543, 544, - 545, 546, 539, 39, 40, 549, 41, 42, 0, 0, - 1134, 0, 0, 208, 0, 0, 0, 0, 0, 0, - 0, 0, 208, 208, 1425, 208, 208, 0, 0, 208, - 538, 537, 547, 548, 540, 541, 542, 543, 544, 545, - 546, 539, 666, 0, 549, 0, 208, 1250, 960, 961, - 0, 208, 0, 0, 926, 0, 717, 0, 0, 0, - 0, 0, 0, 948, 949, 0, 952, 953, 265, 0, - 954, 547, 548, 540, 541, 542, 543, 544, 545, 546, - 539, 1257, 0, 549, 0, 0, 0, 956, 0, 0, - 0, 574, 962, 53, 0, 0, 0, 0, 0, 1272, - 0, 0, 1273, 0, 0, 1275, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 265, 986, 0, 0, 0, - 654, 538, 537, 547, 548, 540, 541, 542, 543, 544, - 545, 546, 539, 265, 0, 549, 538, 537, 547, 548, - 540, 541, 542, 543, 544, 545, 546, 539, 0, 0, - 549, 822, 208, 208, 208, 208, 208, 667, 0, 0, - 0, 0, 0, 0, 1041, 0, 0, 208, 0, 0, - 0, 855, 0, 0, 0, 208, 0, 0, 0, 680, - 683, 684, 685, 686, 687, 688, 0, 689, 690, 691, - 692, 693, 668, 669, 670, 671, 652, 653, 681, 0, - 655, 0, 656, 657, 658, 659, 660, 661, 662, 663, - 664, 665, 672, 673, 674, 675, 676, 677, 678, 679, - 533, 0, 536, 0, 0, 0, 0, 0, 550, 551, - 552, 553, 554, 555, 556, 0, 534, 535, 532, 538, - 537, 547, 548, 540, 541, 542, 543, 544, 545, 546, - 539, 0, 0, 549, 538, 537, 547, 548, 540, 541, - 542, 543, 544, 545, 546, 539, 0, 0, 549, 0, - 1386, 574, 0, 0, 0, 682, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 208, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 265, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 265, 0, 0, + 261, 1412, 1219, 1422, 1322, 1376, 1107, 265, 1016, 1277, + 562, 1290, 1034, 868, 1159, 278, 1193, 841, 239, 897, + 891, 57, 291, 864, 839, 1156, 1017, 1160, 911, 948, + 1040, 1166, 81, 230, 867, 877, 205, 1061, 1172, 205, + 706, 1131, 334, 780, 790, 561, 3, 622, 975, 1087, + 843, 1078, 787, 881, 500, 828, 808, 757, 720, 494, + 621, 292, 51, 328, 907, 435, 603, 205, 81, 514, + 248, 602, 205, 323, 205, 821, 506, 263, 231, 232, + 233, 234, 320, 611, 237, 56, 1415, 325, 1399, 1410, + 1386, 1407, 608, 1220, 1398, 577, 1385, 238, 930, 1148, + 267, 1249, 440, 1049, 1188, 1189, 1048, 468, 252, 1050, + 859, 860, 929, 51, 1187, 576, 200, 196, 197, 198, + 623, 244, 624, 858, 192, 236, 194, 303, 202, 309, + 310, 307, 308, 306, 305, 304, 488, 235, 1069, 890, + 934, 1109, 61, 311, 312, 1280, 898, 484, 453, 928, + 485, 482, 483, 1297, 1240, 1238, 229, 1111, 695, 322, + 477, 478, 692, 1409, 437, 1406, 439, 1377, 63, 64, + 65, 66, 67, 470, 464, 472, 1351, 527, 526, 536, + 537, 529, 530, 531, 532, 533, 534, 535, 528, 694, + 1106, 538, 822, 205, 1110, 487, 205, 696, 1369, 925, + 922, 923, 205, 921, 1430, 469, 471, 1094, 205, 882, + 454, 81, 1331, 81, 1323, 81, 81, 193, 81, 442, + 81, 1035, 1037, 693, 1103, 81, 1426, 1325, 194, 1112, + 1105, 699, 685, 199, 932, 935, 1092, 1182, 527, 526, + 536, 537, 529, 530, 531, 532, 533, 534, 535, 528, + 1181, 1180, 538, 438, 884, 81, 445, 884, 207, 195, + 942, 550, 551, 941, 502, 1358, 1260, 1118, 1045, 490, + 491, 927, 465, 450, 465, 1002, 465, 465, 969, 465, + 992, 465, 989, 1062, 898, 446, 465, 729, 452, 467, + 503, 976, 617, 926, 459, 1324, 865, 518, 1036, 460, + 461, 528, 538, 1093, 538, 1384, 51, 854, 1098, 1095, + 1088, 1096, 1091, 1332, 1330, 726, 1089, 1090, 205, 205, + 205, 547, 512, 511, 549, 513, 1104, 81, 1102, 1152, + 1097, 931, 1424, 81, 721, 1425, 447, 1423, 448, 513, + 1205, 449, 1367, 601, 1352, 1340, 933, 456, 457, 458, + 883, 1150, 560, 883, 564, 565, 566, 567, 568, 569, + 570, 571, 572, 548, 575, 578, 578, 578, 584, 578, + 578, 584, 578, 592, 593, 594, 595, 596, 597, 884, + 607, 764, 550, 551, 550, 551, 988, 473, 504, 474, + 475, 1206, 476, 1170, 479, 762, 763, 761, 950, 489, + 580, 582, 610, 586, 588, 615, 591, 70, 436, 436, + 600, 511, 609, 789, 619, 722, 512, 511, 625, 606, + 579, 581, 583, 585, 587, 589, 590, 513, 809, 987, + 999, 986, 466, 513, 441, 205, 512, 511, 809, 687, + 81, 1067, 434, 71, 1372, 205, 205, 81, 512, 511, + 887, 205, 191, 513, 205, 1392, 888, 205, 966, 967, + 968, 205, 508, 81, 81, 513, 1431, 258, 81, 81, + 81, 205, 81, 81, 1390, 883, 949, 54, 81, 81, + 880, 878, 1286, 879, 747, 749, 750, 760, 876, 882, + 748, 1132, 529, 530, 531, 532, 533, 534, 535, 528, + 331, 465, 538, 708, 1285, 1432, 81, 781, 465, 782, + 205, 443, 444, 1082, 1081, 1070, 81, 317, 318, 22, + 1368, 734, 732, 733, 465, 465, 1304, 633, 1134, 465, + 465, 465, 1283, 465, 465, 728, 700, 689, 690, 465, + 465, 1115, 1079, 697, 758, 1051, 322, 1052, 1365, 703, + 531, 532, 533, 534, 535, 528, 1328, 1408, 538, 753, + 81, 755, 1136, 714, 1140, 1222, 1135, 1062, 1133, 1057, + 512, 511, 727, 1138, 493, 736, 783, 799, 802, 243, + 1394, 493, 1137, 810, 705, 751, 704, 513, 688, 512, + 511, 1328, 1380, 81, 81, 1139, 1141, 1328, 493, 1337, + 205, 813, 743, 1328, 1359, 1336, 513, 686, 205, 205, + 794, 51, 205, 205, 684, 683, 205, 205, 205, 81, + 462, 691, 759, 1328, 1327, 1202, 564, 455, 784, 785, + 1275, 1274, 81, 1262, 493, 885, 849, 709, 710, 848, + 851, 612, 711, 712, 713, 1169, 715, 716, 818, 806, + 1259, 493, 717, 718, 1212, 1211, 1208, 1209, 1208, 1207, + 708, 792, 893, 894, 895, 896, 899, 900, 901, 840, + 981, 493, 1041, 607, 825, 493, 1041, 607, 904, 905, + 906, 856, 855, 792, 493, 847, 205, 81, 613, 81, + 852, 613, 823, 81, 81, 205, 205, 205, 872, 205, + 205, 632, 631, 205, 81, 850, 1157, 58, 606, 1169, + 1255, 913, 606, 1339, 1121, 825, 606, 825, 24, 1169, + 205, 24, 205, 205, 1210, 205, 824, 1053, 857, 1005, + 614, 1004, 616, 614, 981, 612, 552, 553, 554, 555, + 556, 557, 558, 559, 909, 910, 1309, 981, 465, 54, + 465, 825, 281, 280, 283, 284, 285, 286, 981, 331, + 493, 282, 287, 612, 618, 465, 54, 245, 730, 54, + 957, 698, 755, 830, 833, 834, 835, 831, 916, 832, + 836, 1400, 1292, 892, 758, 958, 24, 938, 939, 940, + 1267, 943, 944, 912, 959, 945, 1198, 527, 526, 536, + 537, 529, 530, 531, 532, 533, 534, 535, 528, 1056, + 1011, 538, 947, 908, 1012, 54, 970, 953, 903, 971, + 1173, 1174, 1417, 902, 1108, 1293, 205, 205, 205, 205, + 205, 915, 1018, 1413, 54, 1200, 1176, 1157, 205, 1083, + 492, 205, 724, 702, 742, 205, 1028, 1026, 1179, 205, + 1178, 1029, 1027, 1030, 1025, 834, 835, 1024, 249, 250, + 1404, 917, 759, 919, 998, 1397, 1117, 1013, 81, 954, + 507, 1402, 964, 1042, 290, 963, 1074, 630, 946, 1066, + 1043, 1054, 1044, 1014, 1015, 505, 794, 607, 607, 607, + 607, 607, 1031, 1020, 1021, 1039, 1023, 1019, 463, 495, + 1022, 1374, 840, 1373, 1038, 1046, 79, 1307, 1288, 1064, + 607, 496, 1058, 1071, 1072, 1253, 81, 81, 918, 1063, + 701, 1073, 838, 1075, 1076, 1077, 606, 606, 606, 606, + 606, 246, 247, 1059, 1060, 962, 507, 240, 1345, 241, + 58, 606, 333, 961, 1344, 1295, 81, 1041, 486, 606, + 993, 754, 990, 254, 1086, 1080, 830, 833, 834, 835, + 831, 205, 832, 836, 1419, 1418, 1173, 1174, 719, 509, + 81, 1419, 1099, 1355, 1281, 725, 60, 62, 465, 795, + 796, 55, 1, 801, 804, 805, 1411, 1221, 756, 1289, + 924, 765, 766, 767, 768, 769, 770, 771, 772, 773, + 774, 775, 776, 777, 778, 779, 1114, 465, 817, 1375, + 819, 820, 1321, 1192, 875, 81, 81, 866, 1149, 1018, + 1158, 1125, 69, 1124, 433, 68, 1130, 1366, 874, 873, + 1143, 1142, 1329, 1279, 886, 1161, 1068, 889, 1199, 81, + 1065, 1371, 638, 957, 636, 755, 814, 1168, 637, 635, + 640, 331, 639, 1119, 81, 634, 81, 81, 218, 326, + 1163, 1177, 1184, 837, 869, 626, 914, 510, 72, 1191, + 1183, 1101, 1100, 1186, 1162, 920, 51, 480, 481, 220, + 546, 960, 1047, 332, 205, 333, 1164, 333, 1190, 333, + 333, 1085, 333, 1195, 333, 1203, 1204, 1196, 1197, 333, + 731, 205, 499, 1343, 1294, 735, 997, 81, 573, 807, + 81, 81, 81, 205, 266, 746, 279, 276, 277, 81, + 1113, 1214, 205, 737, 1010, 520, 264, 256, 605, 516, + 598, 829, 827, 1215, 826, 1217, 321, 1175, 1171, 604, + 1120, 1248, 1350, 741, 1227, 26, 1229, 59, 251, 19, + 18, 1228, 17, 1233, 1234, 965, 1235, 20, 16, 1237, + 15, 1239, 754, 791, 793, 1236, 536, 537, 529, 530, + 531, 532, 533, 534, 535, 528, 1213, 1018, 538, 14, + 451, 30, 21, 607, 1254, 13, 12, 11, 10, 9, + 1263, 81, 8, 1216, 497, 501, 1264, 7, 6, 81, + 5, 333, 980, 1273, 1054, 1226, 4, 627, 242, 23, + 1247, 519, 2, 0, 81, 1276, 0, 0, 0, 0, + 996, 81, 606, 0, 0, 0, 0, 1282, 0, 1284, + 972, 973, 974, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 1269, 1270, 1271, 0, 563, 0, 0, 0, + 0, 0, 0, 0, 1296, 574, 0, 0, 0, 0, + 81, 81, 0, 81, 0, 0, 0, 0, 81, 0, + 81, 81, 81, 205, 0, 465, 81, 1316, 1161, 1317, + 1318, 1319, 1315, 1308, 0, 0, 0, 0, 0, 0, + 1320, 0, 1326, 81, 205, 0, 0, 1333, 0, 0, + 869, 0, 1341, 0, 1310, 0, 1334, 0, 1335, 0, + 0, 0, 0, 0, 333, 0, 0, 1162, 0, 0, + 1311, 333, 0, 0, 1356, 0, 1364, 0, 0, 81, + 0, 0, 1161, 1363, 0, 0, 0, 333, 333, 0, + 81, 81, 333, 333, 333, 0, 333, 333, 0, 0, + 1338, 1378, 333, 333, 1379, 0, 1382, 1357, 0, 81, + 0, 0, 0, 1018, 1387, 0, 0, 0, 0, 0, + 205, 1162, 0, 51, 0, 0, 0, 0, 81, 0, + 738, 0, 0, 0, 0, 978, 1342, 1396, 1287, 979, + 516, 0, 0, 333, 0, 0, 983, 984, 985, 1403, + 1401, 81, 1123, 991, 0, 0, 994, 995, 0, 0, + 1405, 0, 1001, 1416, 0, 0, 1003, 0, 0, 1006, + 1007, 1008, 1009, 1427, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 786, 1153, 0, 0, 0, 0, + 723, 1033, 0, 0, 1127, 1128, 0, 0, 0, 0, + 811, 0, 0, 0, 0, 0, 0, 1144, 1145, 0, + 1146, 1147, 1391, 0, 0, 744, 745, 815, 816, 0, + 0, 1414, 1154, 1155, 0, 24, 25, 52, 27, 28, + 0, 0, 0, 0, 0, 0, 869, 0, 869, 0, + 0, 0, 0, 333, 43, 0, 0, 1252, 0, 29, + 48, 49, 0, 0, 0, 0, 333, 526, 536, 537, + 529, 530, 531, 532, 533, 534, 535, 528, 563, 38, + 538, 797, 798, 54, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 1201, 527, 526, 536, 537, 529, + 530, 531, 532, 533, 534, 535, 528, 0, 0, 538, + 0, 1123, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 333, 0, 333, 0, 0, 0, 936, 937, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 333, 863, + 0, 0, 1129, 0, 31, 32, 34, 33, 36, 0, + 50, 0, 0, 0, 1231, 1246, 0, 0, 0, 0, + 0, 0, 1251, 0, 333, 0, 0, 0, 0, 0, + 0, 37, 44, 45, 0, 215, 46, 47, 35, 0, + 0, 0, 0, 869, 0, 0, 0, 0, 0, 498, + 0, 0, 39, 40, 0, 41, 42, 0, 0, 224, + 527, 526, 536, 537, 529, 530, 531, 532, 533, 534, + 535, 528, 0, 1291, 538, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 203, 0, 0, 228, 527, + 526, 536, 537, 529, 530, 531, 532, 533, 534, 535, + 528, 955, 956, 538, 501, 0, 0, 0, 0, 0, + 208, 0, 0, 255, 0, 0, 324, 211, 0, 811, + 0, 203, 0, 203, 0, 219, 214, 0, 0, 0, + 0, 1298, 1299, 1300, 1301, 1302, 53, 0, 0, 1305, + 1306, 0, 0, 0, 0, 0, 1230, 1126, 0, 0, + 0, 0, 0, 1232, 0, 0, 0, 217, 0, 0, + 0, 0, 333, 223, 1241, 1242, 982, 527, 526, 536, + 537, 529, 530, 531, 532, 533, 534, 535, 528, 0, + 0, 538, 0, 1000, 1256, 1257, 1258, 0, 1261, 0, + 209, 0, 1291, 869, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 1272, 0, 0, 0, 0, + 1084, 333, 0, 0, 0, 0, 0, 221, 212, 0, + 222, 227, 0, 0, 0, 213, 216, 0, 210, 226, + 225, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 333, 0, 203, 0, 0, 203, 0, 0, 0, 0, + 0, 203, 0, 0, 0, 0, 0, 203, 0, 0, + 0, 0, 0, 522, 333, 525, 0, 0, 0, 0, + 1303, 539, 540, 541, 542, 543, 544, 545, 1245, 523, + 524, 521, 527, 526, 536, 537, 529, 530, 531, 532, + 533, 534, 535, 528, 0, 0, 538, 333, 0, 0, + 0, 0, 1420, 0, 0, 0, 811, 1244, 0, 1165, + 1167, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 1346, 1347, 1348, 1349, 1116, 0, 0, 1353, 1354, 0, + 0, 0, 0, 1167, 0, 0, 0, 0, 0, 1360, + 1361, 1362, 1243, 0, 0, 0, 0, 0, 333, 0, + 333, 1194, 527, 526, 536, 537, 529, 530, 531, 532, + 533, 534, 535, 528, 0, 0, 538, 203, 203, 203, + 0, 0, 1383, 1151, 0, 0, 0, 0, 0, 1388, + 0, 527, 526, 536, 537, 529, 530, 531, 532, 533, + 534, 535, 528, 0, 0, 538, 0, 1393, 0, 0, + 0, 1218, 0, 0, 1223, 1224, 1225, 0, 0, 0, + 0, 0, 0, 333, 0, 1185, 527, 526, 536, 537, + 529, 530, 531, 532, 533, 534, 535, 528, 977, 0, + 538, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 1428, 1429, 0, 0, 0, 0, 527, 526, + 536, 537, 529, 530, 531, 532, 533, 534, 535, 528, + 0, 0, 538, 0, 811, 527, 526, 536, 537, 529, + 530, 531, 532, 533, 534, 535, 528, 0, 0, 538, + 0, 0, 0, 0, 203, 333, 0, 0, 0, 0, + 0, 0, 0, 1278, 203, 203, 0, 0, 0, 0, + 203, 0, 0, 203, 0, 0, 203, 0, 333, 0, + 707, 0, 0, 0, 0, 333, 0, 0, 0, 0, + 203, 0, 0, 0, 0, 0, 1250, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 563, 0, 0, 0, + 0, 0, 0, 0, 1265, 0, 0, 1266, 0, 0, + 1268, 0, 0, 0, 1312, 1313, 0, 1314, 0, 203, + 0, 0, 1278, 0, 1278, 1278, 1278, 0, 707, 0, + 1194, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 1278, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 1127, 717, 0, - 0, 0, 0, 0, 0, 0, 0, 822, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 255, 0, 0, 0, 0, 255, 255, 0, 0, 255, + 255, 255, 0, 1370, 0, 812, 0, 0, 0, 0, + 0, 0, 0, 0, 333, 333, 0, 0, 0, 0, + 0, 0, 0, 0, 255, 255, 255, 255, 0, 203, + 811, 0, 0, 1389, 0, 0, 0, 203, 845, 0, + 0, 203, 203, 0, 0, 203, 853, 707, 0, 0, + 0, 0, 1395, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 139, - 0, 0, 0, 526, 0, 0, 0, 0, 103, 0, - 0, 0, 0, 0, 120, 0, 122, 0, 0, 160, - 131, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 80, 208, - 528, 0, 0, 0, 0, 0, 0, 96, 0, 0, - 0, 0, 0, 523, 522, 0, 208, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 208, 0, 0, - 524, 0, 0, 0, 0, 0, 208, 0, 0, 0, - 1221, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 1224, 0, 0, - 0, 0, 109, 0, 0, 0, 211, 0, 1233, 0, - 0, 146, 0, 163, 111, 119, 83, 90, 0, 110, - 137, 151, 155, 0, 822, 0, 99, 0, 153, 141, - 176, 0, 142, 152, 123, 168, 147, 175, 183, 184, - 165, 182, 191, 84, 164, 174, 97, 156, 86, 172, - 162, 129, 115, 116, 85, 0, 150, 102, 107, 101, - 138, 169, 170, 100, 194, 91, 181, 88, 92, 180, - 136, 167, 173, 130, 127, 87, 171, 128, 126, 118, - 105, 112, 144, 125, 145, 113, 133, 132, 134, 0, - 0, 0, 161, 178, 195, 94, 0, 157, 166, 185, - 186, 187, 188, 189, 190, 0, 0, 95, 108, 104, - 143, 135, 93, 114, 158, 117, 124, 149, 193, 140, - 154, 98, 177, 159, 0, 0, 855, 0, 0, 0, + 0, 0, 0, 0, 0, 1278, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 82, 89, 121, 192, 148, 106, 179, - 0, 431, 420, 0, 391, 434, 369, 383, 442, 384, - 385, 413, 355, 399, 139, 381, 0, 372, 350, 378, - 351, 370, 393, 103, 396, 368, 422, 402, 433, 120, - 440, 122, 407, 0, 160, 131, 0, 0, 395, 424, - 397, 418, 390, 414, 360, 406, 435, 382, 411, 436, - 0, 0, 0, 80, 0, 880, 881, 822, 0, 0, - 0, 0, 96, 0, 409, 430, 380, 410, 412, 349, - 408, 208, 353, 356, 441, 426, 375, 376, 1064, 0, - 0, 0, 0, 0, 0, 394, 398, 415, 388, 0, - 0, 0, 0, 0, 0, 0, 0, 373, 0, 405, - 0, 0, 0, 357, 354, 0, 0, 392, 0, 0, - 0, 359, 1396, 374, 416, 0, 348, 109, 419, 425, - 389, 211, 429, 387, 386, 432, 146, 0, 163, 111, - 119, 83, 90, 0, 110, 137, 151, 155, 423, 371, - 379, 99, 377, 153, 141, 176, 404, 142, 152, 123, - 168, 147, 175, 183, 184, 165, 182, 191, 84, 164, - 174, 97, 156, 86, 172, 162, 129, 115, 116, 85, - 0, 150, 102, 107, 101, 138, 169, 170, 100, 194, - 91, 181, 88, 92, 180, 136, 167, 173, 130, 127, - 87, 171, 128, 126, 118, 105, 112, 144, 125, 145, - 113, 133, 132, 134, 0, 352, 0, 161, 178, 195, - 94, 367, 157, 166, 185, 186, 187, 188, 189, 190, - 0, 0, 95, 108, 104, 143, 135, 93, 114, 158, - 117, 124, 149, 193, 140, 154, 98, 177, 159, 363, - 366, 361, 362, 400, 401, 437, 438, 439, 417, 358, - 0, 364, 365, 0, 421, 427, 428, 403, 82, 89, - 121, 192, 148, 106, 179, 431, 420, 0, 391, 434, - 369, 383, 442, 384, 385, 413, 355, 399, 139, 381, - 0, 372, 350, 378, 351, 370, 393, 103, 396, 368, - 422, 402, 433, 120, 440, 122, 407, 0, 160, 131, - 0, 0, 395, 424, 397, 418, 390, 414, 360, 406, - 435, 382, 411, 436, 0, 0, 0, 80, 0, 880, - 881, 0, 0, 0, 0, 0, 96, 0, 409, 430, - 380, 410, 412, 349, 408, 0, 353, 356, 441, 426, - 375, 376, 0, 0, 0, 0, 0, 0, 0, 394, - 398, 415, 388, 0, 0, 0, 0, 0, 0, 0, - 0, 373, 0, 405, 0, 0, 0, 357, 354, 0, - 0, 392, 0, 0, 0, 359, 0, 374, 416, 0, - 348, 109, 419, 425, 389, 211, 429, 387, 386, 432, - 146, 0, 163, 111, 119, 83, 90, 0, 110, 137, - 151, 155, 423, 371, 379, 99, 377, 153, 141, 176, - 404, 142, 152, 123, 168, 147, 175, 183, 184, 165, - 182, 191, 84, 164, 174, 97, 156, 86, 172, 162, - 129, 115, 116, 85, 0, 150, 102, 107, 101, 138, - 169, 170, 100, 194, 91, 181, 88, 92, 180, 136, - 167, 173, 130, 127, 87, 171, 128, 126, 118, 105, - 112, 144, 125, 145, 113, 133, 132, 134, 0, 352, - 0, 161, 178, 195, 94, 367, 157, 166, 185, 186, - 187, 188, 189, 190, 0, 0, 95, 108, 104, 143, - 135, 93, 114, 158, 117, 124, 149, 193, 140, 154, - 98, 177, 159, 363, 366, 361, 362, 400, 401, 437, - 438, 439, 417, 358, 0, 364, 365, 0, 421, 427, - 428, 403, 82, 89, 121, 192, 148, 106, 179, 431, - 420, 0, 391, 434, 369, 383, 442, 384, 385, 413, - 355, 399, 139, 381, 0, 372, 350, 378, 351, 370, - 393, 103, 396, 368, 422, 402, 433, 120, 440, 122, - 407, 0, 160, 131, 0, 0, 395, 424, 397, 418, - 390, 414, 360, 406, 435, 382, 411, 436, 54, 0, - 0, 80, 0, 0, 0, 0, 0, 0, 0, 0, - 96, 0, 409, 430, 380, 410, 412, 349, 408, 0, - 353, 356, 441, 426, 375, 376, 0, 0, 0, 0, - 0, 0, 0, 394, 398, 415, 388, 0, 0, 0, - 0, 0, 0, 0, 0, 373, 0, 405, 0, 0, - 0, 357, 354, 0, 0, 392, 0, 0, 0, 359, - 0, 374, 416, 0, 348, 109, 419, 425, 389, 211, - 429, 387, 386, 432, 146, 0, 163, 111, 119, 83, - 90, 0, 110, 137, 151, 155, 423, 371, 379, 99, - 377, 153, 141, 176, 404, 142, 152, 123, 168, 147, - 175, 183, 184, 165, 182, 191, 84, 164, 174, 97, - 156, 86, 172, 162, 129, 115, 116, 85, 0, 150, - 102, 107, 101, 138, 169, 170, 100, 194, 91, 181, - 88, 92, 180, 136, 167, 173, 130, 127, 87, 171, - 128, 126, 118, 105, 112, 144, 125, 145, 113, 133, - 132, 134, 0, 352, 0, 161, 178, 195, 94, 367, - 157, 166, 185, 186, 187, 188, 189, 190, 0, 0, - 95, 108, 104, 143, 135, 93, 114, 158, 117, 124, - 149, 193, 140, 154, 98, 177, 159, 363, 366, 361, - 362, 400, 401, 437, 438, 439, 417, 358, 0, 364, - 365, 0, 421, 427, 428, 403, 82, 89, 121, 192, - 148, 106, 179, 431, 420, 0, 391, 434, 369, 383, - 442, 384, 385, 413, 355, 399, 139, 381, 0, 372, - 350, 378, 351, 370, 393, 103, 396, 368, 422, 402, - 433, 120, 440, 122, 407, 0, 160, 131, 0, 0, - 395, 424, 397, 418, 390, 414, 360, 406, 435, 382, - 411, 436, 0, 0, 0, 80, 0, 0, 0, 0, - 0, 0, 0, 0, 96, 0, 409, 430, 380, 410, - 412, 349, 408, 0, 353, 356, 441, 426, 375, 376, - 0, 0, 0, 0, 0, 0, 0, 394, 398, 415, - 388, 0, 0, 0, 0, 0, 0, 1130, 0, 373, - 0, 405, 0, 0, 0, 357, 354, 0, 0, 392, - 0, 0, 0, 359, 0, 374, 416, 0, 348, 109, - 419, 425, 389, 211, 429, 387, 386, 432, 146, 0, - 163, 111, 119, 83, 90, 0, 110, 137, 151, 155, - 423, 371, 379, 99, 377, 153, 141, 176, 404, 142, - 152, 123, 168, 147, 175, 183, 184, 165, 182, 191, + 0, 0, 0, 0, 0, 0, 0, 0, 1381, 563, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 203, 0, 0, 0, 0, + 0, 0, 0, 0, 203, 203, 203, 0, 203, 203, + 0, 0, 203, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 203, + 0, 951, 952, 0, 203, 0, 0, 0, 0, 707, + 0, 0, 0, 0, 0, 139, 0, 0, 0, 844, + 0, 255, 0, 0, 103, 0, 0, 0, 0, 0, + 120, 0, 122, 0, 0, 160, 131, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 204, 0, 846, 0, 0, 0, + 0, 0, 0, 96, 0, 0, 0, 0, 255, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 255, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 812, 203, 203, 203, 203, 203, + 0, 0, 0, 0, 0, 0, 0, 1032, 109, 0, + 203, 0, 206, 0, 845, 0, 0, 146, 203, 163, + 111, 119, 83, 90, 0, 110, 137, 151, 155, 0, + 0, 0, 99, 0, 153, 141, 176, 0, 142, 152, + 123, 168, 147, 175, 183, 184, 165, 182, 186, 157, 84, 164, 174, 97, 156, 86, 172, 162, 129, 115, 116, 85, 0, 150, 102, 107, 101, 138, 169, 170, - 100, 194, 91, 181, 88, 92, 180, 136, 167, 173, + 100, 189, 91, 181, 88, 92, 180, 136, 167, 173, 130, 127, 87, 171, 128, 126, 118, 105, 112, 144, - 125, 145, 113, 133, 132, 134, 0, 352, 0, 161, - 178, 195, 94, 367, 157, 166, 185, 186, 187, 188, - 189, 190, 0, 0, 95, 108, 104, 143, 135, 93, - 114, 158, 117, 124, 149, 193, 140, 154, 98, 177, - 159, 363, 366, 361, 362, 400, 401, 437, 438, 439, - 417, 358, 0, 364, 365, 0, 421, 427, 428, 403, - 82, 89, 121, 192, 148, 106, 179, 431, 420, 0, - 391, 434, 369, 383, 442, 384, 385, 413, 355, 399, - 139, 381, 0, 372, 350, 378, 351, 370, 393, 103, - 396, 368, 422, 402, 433, 120, 440, 122, 407, 0, - 160, 131, 0, 0, 395, 424, 397, 418, 390, 414, - 360, 406, 435, 382, 411, 436, 0, 0, 0, 209, + 125, 145, 113, 133, 132, 134, 0, 0, 0, 161, + 178, 190, 94, 0, 166, 185, 0, 0, 95, 108, + 104, 143, 135, 93, 114, 158, 117, 124, 149, 188, + 140, 154, 98, 177, 159, 0, 0, 0, 0, 0, + 203, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 255, 0, 0, 0, 82, 89, 121, 187, 148, 106, + 179, 255, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 707, 0, 0, 0, 0, 0, 0, 0, + 0, 812, 139, 0, 0, 0, 0, 262, 0, 0, + 0, 103, 0, 259, 0, 0, 0, 120, 302, 122, + 0, 0, 160, 131, 0, 0, 0, 0, 293, 294, + 0, 0, 0, 0, 0, 0, 861, 0, 54, 0, + 0, 260, 281, 280, 283, 284, 285, 286, 0, 0, + 96, 282, 287, 288, 289, 862, 0, 0, 257, 274, + 0, 301, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 203, 0, 0, 0, 0, 0, 0, + 0, 271, 272, 0, 0, 0, 0, 315, 0, 273, + 203, 0, 268, 269, 270, 275, 0, 0, 0, 0, + 0, 0, 203, 0, 0, 109, 0, 0, 0, 206, + 0, 203, 313, 0, 146, 0, 163, 111, 119, 83, + 90, 0, 110, 137, 151, 155, 0, 0, 0, 99, + 0, 153, 141, 176, 0, 142, 152, 123, 168, 147, + 175, 183, 184, 165, 182, 186, 157, 84, 164, 174, + 97, 156, 86, 172, 162, 129, 115, 116, 85, 812, + 150, 102, 107, 101, 138, 169, 170, 100, 189, 91, + 181, 88, 92, 180, 136, 167, 173, 130, 127, 87, + 171, 128, 126, 118, 105, 112, 144, 125, 145, 113, + 133, 132, 134, 0, 0, 0, 161, 178, 190, 94, + 0, 166, 185, 0, 0, 95, 108, 104, 143, 135, + 93, 114, 158, 117, 124, 149, 188, 140, 154, 98, + 177, 159, 303, 314, 309, 310, 307, 308, 306, 305, + 304, 316, 295, 296, 297, 298, 300, 0, 311, 312, + 299, 82, 89, 121, 187, 148, 106, 179, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 845, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 203, 0, 0, 0, 0, 0, 421, + 410, 0, 381, 424, 359, 373, 432, 374, 375, 403, + 345, 389, 139, 371, 0, 362, 340, 368, 341, 360, + 383, 103, 386, 358, 412, 392, 423, 120, 430, 122, + 397, 0, 160, 131, 0, 0, 385, 414, 387, 408, + 380, 404, 350, 396, 425, 372, 401, 426, 0, 0, + 0, 80, 0, 870, 871, 812, 0, 0, 0, 0, + 96, 0, 399, 420, 370, 400, 402, 339, 398, 203, + 343, 346, 431, 416, 365, 366, 1055, 0, 0, 0, + 0, 0, 0, 384, 388, 405, 378, 0, 0, 0, + 0, 0, 0, 0, 0, 363, 0, 395, 0, 0, + 0, 347, 344, 0, 0, 382, 0, 0, 0, 349, + 0, 364, 406, 0, 338, 109, 409, 415, 379, 206, + 419, 377, 376, 422, 146, 0, 163, 111, 119, 83, + 90, 0, 110, 137, 151, 155, 413, 361, 369, 99, + 367, 153, 141, 176, 394, 142, 152, 123, 168, 147, + 175, 183, 184, 165, 182, 186, 157, 84, 164, 174, + 97, 156, 86, 172, 162, 129, 115, 116, 85, 0, + 150, 102, 107, 101, 138, 169, 170, 100, 189, 91, + 181, 88, 92, 180, 136, 167, 173, 130, 127, 87, + 171, 128, 126, 118, 105, 112, 144, 125, 145, 113, + 133, 132, 134, 0, 342, 0, 161, 178, 190, 94, + 357, 166, 185, 0, 0, 95, 108, 104, 143, 135, + 93, 114, 158, 117, 124, 149, 188, 140, 154, 98, + 177, 159, 353, 356, 351, 352, 390, 391, 427, 428, + 429, 407, 348, 0, 354, 355, 0, 411, 417, 418, + 393, 82, 89, 121, 187, 148, 106, 179, 421, 410, + 0, 381, 424, 359, 373, 432, 374, 375, 403, 345, + 389, 139, 371, 0, 362, 340, 368, 341, 360, 383, + 103, 386, 358, 412, 392, 423, 120, 430, 122, 397, + 0, 160, 131, 0, 0, 385, 414, 387, 408, 380, + 404, 350, 396, 425, 372, 401, 426, 0, 0, 0, + 80, 0, 870, 871, 0, 0, 0, 0, 0, 96, + 0, 399, 420, 370, 400, 402, 339, 398, 0, 343, + 346, 431, 416, 365, 366, 0, 0, 0, 0, 0, + 0, 0, 384, 388, 405, 378, 0, 0, 0, 0, + 0, 0, 0, 0, 363, 0, 395, 0, 0, 0, + 347, 344, 0, 0, 382, 0, 0, 0, 349, 0, + 364, 406, 0, 338, 109, 409, 415, 379, 206, 419, + 377, 376, 422, 146, 0, 163, 111, 119, 83, 90, + 0, 110, 137, 151, 155, 413, 361, 369, 99, 367, + 153, 141, 176, 394, 142, 152, 123, 168, 147, 175, + 183, 184, 165, 182, 186, 157, 84, 164, 174, 97, + 156, 86, 172, 162, 129, 115, 116, 85, 0, 150, + 102, 107, 101, 138, 169, 170, 100, 189, 91, 181, + 88, 92, 180, 136, 167, 173, 130, 127, 87, 171, + 128, 126, 118, 105, 112, 144, 125, 145, 113, 133, + 132, 134, 0, 342, 0, 161, 178, 190, 94, 357, + 166, 185, 0, 0, 95, 108, 104, 143, 135, 93, + 114, 158, 117, 124, 149, 188, 140, 154, 98, 177, + 159, 353, 356, 351, 352, 390, 391, 427, 428, 429, + 407, 348, 0, 354, 355, 0, 411, 417, 418, 393, + 82, 89, 121, 187, 148, 106, 179, 421, 410, 0, + 381, 424, 359, 373, 432, 374, 375, 403, 345, 389, + 139, 371, 0, 362, 340, 368, 341, 360, 383, 103, + 386, 358, 412, 392, 423, 120, 430, 122, 397, 0, + 160, 131, 0, 0, 385, 414, 387, 408, 380, 404, + 350, 396, 425, 372, 401, 426, 54, 0, 0, 80, 0, 0, 0, 0, 0, 0, 0, 0, 96, 0, - 409, 430, 380, 410, 412, 349, 408, 0, 353, 356, - 441, 426, 375, 376, 0, 0, 0, 0, 0, 0, - 0, 394, 398, 415, 388, 0, 0, 0, 0, 0, - 0, 864, 0, 373, 0, 405, 0, 0, 0, 357, - 354, 0, 0, 392, 0, 0, 0, 359, 0, 374, - 416, 0, 348, 109, 419, 425, 389, 211, 429, 387, - 386, 432, 146, 0, 163, 111, 119, 83, 90, 0, - 110, 137, 151, 155, 423, 371, 379, 99, 377, 153, - 141, 176, 404, 142, 152, 123, 168, 147, 175, 183, - 184, 165, 182, 191, 84, 164, 174, 97, 156, 86, + 399, 420, 370, 400, 402, 339, 398, 0, 343, 346, + 431, 416, 365, 366, 0, 0, 0, 0, 0, 0, + 0, 384, 388, 405, 378, 0, 0, 0, 0, 0, + 0, 0, 0, 363, 0, 395, 0, 0, 0, 347, + 344, 0, 0, 382, 0, 0, 0, 349, 0, 364, + 406, 0, 338, 109, 409, 415, 379, 206, 419, 377, + 376, 422, 146, 0, 163, 111, 119, 83, 90, 0, + 110, 137, 151, 155, 413, 361, 369, 99, 367, 153, + 141, 176, 394, 142, 152, 123, 168, 147, 175, 183, + 184, 165, 182, 186, 157, 84, 164, 174, 97, 156, + 86, 172, 162, 129, 115, 116, 85, 0, 150, 102, + 107, 101, 138, 169, 170, 100, 189, 91, 181, 88, + 92, 180, 136, 167, 173, 130, 127, 87, 171, 128, + 126, 118, 105, 112, 144, 125, 145, 113, 133, 132, + 134, 0, 342, 0, 161, 178, 190, 94, 357, 166, + 185, 0, 0, 95, 108, 104, 143, 135, 93, 114, + 158, 117, 124, 149, 188, 140, 154, 98, 177, 159, + 353, 356, 351, 352, 390, 391, 427, 428, 429, 407, + 348, 0, 354, 355, 0, 411, 417, 418, 393, 82, + 89, 121, 187, 148, 106, 179, 421, 410, 0, 381, + 424, 359, 373, 432, 374, 375, 403, 345, 389, 139, + 371, 0, 362, 340, 368, 341, 360, 383, 103, 386, + 358, 412, 392, 423, 120, 430, 122, 397, 0, 160, + 131, 0, 0, 385, 414, 387, 408, 380, 404, 350, + 396, 425, 372, 401, 426, 0, 0, 0, 80, 0, + 0, 0, 0, 0, 0, 0, 0, 96, 0, 399, + 420, 370, 400, 402, 339, 398, 0, 343, 346, 431, + 416, 365, 366, 0, 0, 0, 0, 0, 0, 0, + 384, 388, 405, 378, 0, 0, 0, 0, 0, 0, + 1122, 0, 363, 0, 395, 0, 0, 0, 347, 344, + 0, 0, 382, 0, 0, 0, 349, 0, 364, 406, + 0, 338, 109, 409, 415, 379, 206, 419, 377, 376, + 422, 146, 0, 163, 111, 119, 83, 90, 0, 110, + 137, 151, 155, 413, 361, 369, 99, 367, 153, 141, + 176, 394, 142, 152, 123, 168, 147, 175, 183, 184, + 165, 182, 186, 157, 84, 164, 174, 97, 156, 86, 172, 162, 129, 115, 116, 85, 0, 150, 102, 107, - 101, 138, 169, 170, 100, 194, 91, 181, 88, 92, + 101, 138, 169, 170, 100, 189, 91, 181, 88, 92, 180, 136, 167, 173, 130, 127, 87, 171, 128, 126, 118, 105, 112, 144, 125, 145, 113, 133, 132, 134, - 0, 352, 0, 161, 178, 195, 94, 367, 157, 166, - 185, 186, 187, 188, 189, 190, 0, 0, 95, 108, - 104, 143, 135, 93, 114, 158, 117, 124, 149, 193, - 140, 154, 98, 177, 159, 363, 366, 361, 362, 400, - 401, 437, 438, 439, 417, 358, 0, 364, 365, 0, - 421, 427, 428, 403, 82, 89, 121, 192, 148, 106, - 179, 431, 420, 0, 391, 434, 369, 383, 442, 384, - 385, 413, 355, 399, 139, 381, 0, 372, 350, 378, - 351, 370, 393, 103, 396, 368, 422, 402, 433, 120, - 440, 122, 407, 0, 160, 131, 0, 0, 395, 424, - 397, 418, 390, 414, 360, 406, 435, 382, 411, 436, - 0, 0, 0, 270, 0, 0, 0, 0, 0, 0, - 0, 0, 96, 0, 409, 430, 380, 410, 412, 349, - 408, 0, 353, 356, 441, 426, 375, 376, 0, 0, - 0, 0, 0, 0, 0, 394, 398, 415, 388, 0, - 0, 0, 0, 0, 0, 762, 0, 373, 0, 405, - 0, 0, 0, 357, 354, 0, 0, 392, 0, 0, - 0, 359, 0, 374, 416, 0, 348, 109, 419, 425, - 389, 211, 429, 387, 386, 432, 146, 0, 163, 111, - 119, 83, 90, 0, 110, 137, 151, 155, 423, 371, - 379, 99, 377, 153, 141, 176, 404, 142, 152, 123, - 168, 147, 175, 183, 184, 165, 182, 191, 84, 164, - 174, 97, 156, 86, 172, 162, 129, 115, 116, 85, - 0, 150, 102, 107, 101, 138, 169, 170, 100, 194, - 91, 181, 88, 92, 180, 136, 167, 173, 130, 127, - 87, 171, 128, 126, 118, 105, 112, 144, 125, 145, - 113, 133, 132, 134, 0, 352, 0, 161, 178, 195, - 94, 367, 157, 166, 185, 186, 187, 188, 189, 190, + 0, 342, 0, 161, 178, 190, 94, 357, 166, 185, 0, 0, 95, 108, 104, 143, 135, 93, 114, 158, - 117, 124, 149, 193, 140, 154, 98, 177, 159, 363, - 366, 361, 362, 400, 401, 437, 438, 439, 417, 358, - 0, 364, 365, 0, 421, 427, 428, 403, 82, 89, - 121, 192, 148, 106, 179, 431, 420, 0, 391, 434, - 369, 383, 442, 384, 385, 413, 355, 399, 139, 381, - 0, 372, 350, 378, 351, 370, 393, 103, 396, 368, - 422, 402, 433, 120, 440, 122, 407, 0, 160, 131, - 0, 0, 395, 424, 397, 418, 390, 414, 360, 406, - 435, 382, 411, 436, 0, 0, 0, 80, 0, 0, - 0, 0, 0, 0, 0, 0, 96, 0, 409, 430, - 380, 410, 412, 349, 408, 0, 353, 356, 441, 426, - 375, 376, 0, 0, 0, 0, 0, 0, 0, 394, - 398, 415, 388, 0, 0, 0, 0, 0, 0, 0, - 0, 373, 0, 405, 0, 0, 0, 357, 354, 0, - 0, 392, 0, 0, 0, 359, 0, 374, 416, 0, - 348, 109, 419, 425, 389, 211, 429, 387, 386, 432, + 117, 124, 149, 188, 140, 154, 98, 177, 159, 353, + 356, 351, 352, 390, 391, 427, 428, 429, 407, 348, + 0, 354, 355, 0, 411, 417, 418, 393, 82, 89, + 121, 187, 148, 106, 179, 421, 410, 0, 381, 424, + 359, 373, 432, 374, 375, 403, 345, 389, 139, 371, + 0, 362, 340, 368, 341, 360, 383, 103, 386, 358, + 412, 392, 423, 120, 430, 122, 397, 0, 160, 131, + 0, 0, 385, 414, 387, 408, 380, 404, 350, 396, + 425, 372, 401, 426, 0, 0, 0, 204, 0, 0, + 0, 0, 0, 0, 0, 0, 96, 0, 399, 420, + 370, 400, 402, 339, 398, 0, 343, 346, 431, 416, + 365, 366, 0, 0, 0, 0, 0, 0, 0, 384, + 388, 405, 378, 0, 0, 0, 0, 0, 0, 854, + 0, 363, 0, 395, 0, 0, 0, 347, 344, 0, + 0, 382, 0, 0, 0, 349, 0, 364, 406, 0, + 338, 109, 409, 415, 379, 206, 419, 377, 376, 422, 146, 0, 163, 111, 119, 83, 90, 0, 110, 137, - 151, 155, 423, 371, 379, 99, 377, 153, 141, 176, - 404, 142, 152, 123, 168, 147, 175, 183, 184, 165, - 182, 191, 84, 164, 174, 97, 156, 86, 172, 162, + 151, 155, 413, 361, 369, 99, 367, 153, 141, 176, + 394, 142, 152, 123, 168, 147, 175, 183, 184, 165, + 182, 186, 157, 84, 164, 174, 97, 156, 86, 172, + 162, 129, 115, 116, 85, 0, 150, 102, 107, 101, + 138, 169, 170, 100, 189, 91, 181, 88, 92, 180, + 136, 167, 173, 130, 127, 87, 171, 128, 126, 118, + 105, 112, 144, 125, 145, 113, 133, 132, 134, 0, + 342, 0, 161, 178, 190, 94, 357, 166, 185, 0, + 0, 95, 108, 104, 143, 135, 93, 114, 158, 117, + 124, 149, 188, 140, 154, 98, 177, 159, 353, 356, + 351, 352, 390, 391, 427, 428, 429, 407, 348, 0, + 354, 355, 0, 411, 417, 418, 393, 82, 89, 121, + 187, 148, 106, 179, 421, 410, 0, 381, 424, 359, + 373, 432, 374, 375, 403, 345, 389, 139, 371, 0, + 362, 340, 368, 341, 360, 383, 103, 386, 358, 412, + 392, 423, 120, 430, 122, 397, 0, 160, 131, 0, + 0, 385, 414, 387, 408, 380, 404, 350, 396, 425, + 372, 401, 426, 0, 0, 0, 260, 0, 0, 0, + 0, 0, 0, 0, 0, 96, 0, 399, 420, 370, + 400, 402, 339, 398, 0, 343, 346, 431, 416, 365, + 366, 0, 0, 0, 0, 0, 0, 0, 384, 388, + 405, 378, 0, 0, 0, 0, 0, 0, 752, 0, + 363, 0, 395, 0, 0, 0, 347, 344, 0, 0, + 382, 0, 0, 0, 349, 0, 364, 406, 0, 338, + 109, 409, 415, 379, 206, 419, 377, 376, 422, 146, + 0, 163, 111, 119, 83, 90, 0, 110, 137, 151, + 155, 413, 361, 369, 99, 367, 153, 141, 176, 394, + 142, 152, 123, 168, 147, 175, 183, 184, 165, 182, + 186, 157, 84, 164, 174, 97, 156, 86, 172, 162, 129, 115, 116, 85, 0, 150, 102, 107, 101, 138, - 169, 170, 100, 194, 91, 181, 88, 92, 180, 136, + 169, 170, 100, 189, 91, 181, 88, 92, 180, 136, 167, 173, 130, 127, 87, 171, 128, 126, 118, 105, - 112, 144, 125, 145, 113, 133, 132, 134, 0, 352, - 0, 161, 178, 195, 94, 367, 157, 166, 185, 186, - 187, 188, 189, 190, 0, 0, 95, 108, 104, 143, - 135, 93, 114, 158, 117, 124, 149, 193, 140, 154, - 98, 177, 159, 363, 366, 361, 362, 400, 401, 437, - 438, 439, 417, 358, 0, 364, 365, 0, 421, 427, - 428, 403, 82, 89, 121, 192, 148, 106, 179, 431, - 420, 0, 391, 434, 369, 383, 442, 384, 385, 413, - 355, 399, 139, 381, 0, 372, 350, 378, 351, 370, - 393, 103, 396, 368, 422, 402, 433, 120, 440, 122, - 407, 0, 160, 131, 0, 0, 395, 424, 397, 418, - 390, 414, 360, 406, 435, 382, 411, 436, 0, 0, - 0, 270, 0, 0, 0, 0, 0, 0, 0, 0, - 96, 0, 409, 430, 380, 410, 412, 349, 408, 0, - 353, 356, 441, 426, 375, 376, 0, 0, 0, 0, - 0, 0, 0, 394, 398, 415, 388, 0, 0, 0, - 0, 0, 0, 0, 0, 373, 0, 405, 0, 0, - 0, 357, 354, 0, 0, 392, 0, 0, 0, 359, - 0, 374, 416, 0, 348, 109, 419, 425, 389, 211, - 429, 387, 386, 432, 146, 0, 163, 111, 119, 83, - 90, 0, 110, 137, 151, 155, 423, 371, 379, 99, - 377, 153, 141, 176, 404, 142, 152, 123, 168, 147, - 175, 183, 184, 165, 182, 191, 84, 164, 174, 97, - 156, 86, 172, 162, 129, 115, 116, 85, 0, 150, - 102, 107, 101, 138, 169, 170, 100, 194, 91, 181, - 88, 92, 180, 136, 167, 173, 130, 127, 87, 171, - 128, 126, 118, 105, 112, 144, 125, 145, 113, 133, - 132, 134, 0, 352, 0, 161, 178, 195, 94, 367, - 157, 166, 185, 186, 187, 188, 189, 190, 0, 0, + 112, 144, 125, 145, 113, 133, 132, 134, 0, 342, + 0, 161, 178, 190, 94, 357, 166, 185, 0, 0, 95, 108, 104, 143, 135, 93, 114, 158, 117, 124, - 149, 193, 140, 154, 98, 177, 159, 363, 366, 361, - 362, 400, 401, 437, 438, 439, 417, 358, 0, 364, - 365, 0, 421, 427, 428, 403, 82, 89, 121, 192, - 148, 106, 179, 431, 420, 0, 391, 434, 369, 383, - 442, 384, 385, 413, 355, 399, 139, 381, 0, 372, - 350, 378, 351, 370, 393, 103, 396, 368, 422, 402, - 433, 120, 440, 122, 407, 0, 160, 131, 0, 0, - 395, 424, 397, 418, 390, 414, 360, 406, 435, 382, - 411, 436, 0, 0, 0, 80, 0, 0, 0, 0, - 0, 0, 0, 0, 96, 0, 409, 430, 380, 410, - 412, 349, 408, 0, 353, 356, 441, 426, 375, 376, - 0, 0, 0, 0, 0, 0, 0, 394, 398, 415, - 388, 0, 0, 0, 0, 0, 0, 0, 0, 373, - 0, 405, 0, 0, 0, 357, 354, 0, 0, 392, - 0, 0, 0, 359, 0, 374, 416, 0, 348, 109, - 419, 425, 389, 211, 429, 387, 386, 432, 146, 0, + 149, 188, 140, 154, 98, 177, 159, 353, 356, 351, + 352, 390, 391, 427, 428, 429, 407, 348, 0, 354, + 355, 0, 411, 417, 418, 393, 82, 89, 121, 187, + 148, 106, 179, 421, 410, 0, 381, 424, 359, 373, + 432, 374, 375, 403, 345, 389, 139, 371, 0, 362, + 340, 368, 341, 360, 383, 103, 386, 358, 412, 392, + 423, 120, 430, 122, 397, 0, 160, 131, 0, 0, + 385, 414, 387, 408, 380, 404, 350, 396, 425, 372, + 401, 426, 0, 0, 0, 80, 0, 0, 0, 0, + 0, 0, 0, 0, 96, 0, 399, 420, 370, 400, + 402, 339, 398, 0, 343, 346, 431, 416, 365, 366, + 0, 0, 0, 0, 0, 0, 0, 384, 388, 405, + 378, 0, 0, 0, 0, 0, 0, 0, 0, 363, + 0, 395, 0, 0, 0, 347, 344, 0, 0, 382, + 0, 0, 0, 349, 0, 364, 406, 0, 338, 109, + 409, 415, 379, 206, 419, 377, 376, 422, 146, 0, 163, 111, 119, 83, 90, 0, 110, 137, 151, 155, - 423, 371, 379, 99, 377, 153, 141, 176, 404, 142, - 152, 123, 168, 147, 175, 183, 184, 165, 182, 191, + 413, 361, 369, 99, 367, 153, 141, 176, 394, 142, + 152, 123, 168, 147, 175, 183, 184, 165, 182, 186, + 157, 84, 164, 174, 97, 156, 86, 172, 162, 129, + 115, 116, 85, 0, 150, 102, 107, 101, 138, 169, + 170, 100, 189, 91, 181, 88, 92, 180, 136, 167, + 173, 130, 127, 87, 171, 128, 126, 118, 105, 112, + 144, 125, 145, 113, 133, 132, 134, 0, 342, 0, + 161, 178, 190, 94, 357, 166, 185, 0, 0, 95, + 108, 104, 143, 135, 93, 114, 158, 117, 124, 149, + 188, 140, 154, 98, 177, 159, 353, 356, 351, 352, + 390, 391, 427, 428, 429, 407, 348, 0, 354, 355, + 0, 411, 417, 418, 393, 82, 89, 121, 187, 148, + 106, 179, 421, 410, 0, 381, 424, 359, 373, 432, + 374, 375, 403, 345, 389, 139, 371, 0, 362, 340, + 368, 341, 360, 383, 103, 386, 358, 412, 392, 423, + 120, 430, 122, 397, 0, 160, 131, 0, 0, 385, + 414, 387, 408, 380, 404, 350, 396, 425, 372, 401, + 426, 0, 0, 0, 260, 0, 0, 0, 0, 0, + 0, 0, 0, 96, 0, 399, 420, 370, 400, 402, + 339, 398, 0, 343, 346, 431, 416, 365, 366, 0, + 0, 0, 0, 0, 0, 0, 384, 388, 405, 378, + 0, 0, 0, 0, 0, 0, 0, 0, 363, 0, + 395, 0, 0, 0, 347, 344, 0, 0, 382, 0, + 0, 0, 349, 0, 364, 406, 0, 338, 109, 409, + 415, 379, 206, 419, 377, 376, 422, 146, 0, 163, + 111, 119, 83, 90, 0, 110, 137, 151, 155, 413, + 361, 369, 99, 367, 153, 141, 176, 394, 142, 152, + 123, 168, 147, 175, 183, 184, 165, 182, 186, 157, 84, 164, 174, 97, 156, 86, 172, 162, 129, 115, 116, 85, 0, 150, 102, 107, 101, 138, 169, 170, - 100, 194, 91, 181, 88, 346, 180, 136, 167, 173, + 100, 189, 91, 181, 88, 92, 180, 136, 167, 173, 130, 127, 87, 171, 128, 126, 118, 105, 112, 144, - 125, 145, 113, 133, 132, 134, 0, 352, 0, 161, - 178, 195, 94, 367, 157, 166, 185, 186, 187, 188, - 189, 190, 0, 0, 95, 108, 104, 143, 347, 345, - 114, 158, 117, 124, 149, 193, 140, 154, 98, 177, - 159, 363, 366, 361, 362, 400, 401, 437, 438, 439, - 417, 358, 0, 364, 365, 0, 421, 427, 428, 403, - 82, 89, 121, 192, 148, 106, 179, 431, 420, 0, - 391, 434, 369, 383, 442, 384, 385, 413, 355, 399, - 139, 381, 0, 372, 350, 378, 351, 370, 393, 103, - 396, 368, 422, 402, 433, 120, 440, 122, 407, 0, - 160, 131, 0, 0, 395, 424, 397, 418, 390, 414, - 360, 406, 435, 382, 411, 436, 0, 0, 0, 209, - 0, 0, 0, 0, 0, 0, 0, 0, 96, 0, - 409, 430, 380, 410, 412, 349, 408, 0, 353, 356, - 441, 426, 375, 376, 0, 0, 0, 0, 0, 0, - 0, 394, 398, 415, 388, 0, 0, 0, 0, 0, - 0, 0, 0, 373, 0, 405, 0, 0, 0, 357, - 354, 0, 0, 392, 0, 0, 0, 359, 0, 374, - 416, 0, 348, 109, 419, 425, 389, 211, 429, 387, - 386, 432, 146, 0, 163, 111, 119, 83, 90, 0, - 110, 137, 151, 155, 423, 371, 379, 99, 377, 153, - 141, 176, 404, 142, 152, 123, 168, 147, 175, 183, - 184, 165, 182, 191, 84, 164, 174, 97, 156, 86, - 172, 162, 129, 115, 116, 85, 0, 150, 102, 107, - 101, 138, 169, 170, 100, 194, 91, 181, 88, 92, - 180, 136, 167, 173, 130, 127, 87, 171, 128, 126, - 118, 105, 112, 144, 125, 145, 113, 133, 132, 134, - 0, 352, 0, 161, 178, 195, 94, 367, 157, 166, - 185, 186, 187, 188, 189, 190, 0, 0, 95, 108, - 104, 143, 135, 93, 114, 158, 117, 124, 149, 193, - 140, 154, 98, 177, 159, 363, 366, 361, 362, 400, - 401, 437, 438, 439, 417, 358, 0, 364, 365, 0, - 421, 427, 428, 403, 82, 89, 121, 192, 148, 106, - 179, 431, 420, 0, 391, 434, 369, 383, 442, 384, - 385, 413, 355, 399, 139, 381, 0, 372, 350, 378, - 351, 370, 393, 103, 396, 368, 422, 402, 433, 120, - 440, 122, 407, 0, 160, 131, 0, 0, 395, 424, - 397, 418, 390, 414, 360, 406, 435, 382, 411, 436, + 125, 145, 113, 133, 132, 134, 0, 342, 0, 161, + 178, 190, 94, 357, 166, 185, 0, 0, 95, 108, + 104, 143, 135, 93, 114, 158, 117, 124, 149, 188, + 140, 154, 98, 177, 159, 353, 356, 351, 352, 390, + 391, 427, 428, 429, 407, 348, 0, 354, 355, 0, + 411, 417, 418, 393, 82, 89, 121, 187, 148, 106, + 179, 421, 410, 0, 381, 424, 359, 373, 432, 374, + 375, 403, 345, 389, 139, 371, 0, 362, 340, 368, + 341, 360, 383, 103, 386, 358, 412, 392, 423, 120, + 430, 122, 397, 0, 160, 131, 0, 0, 385, 414, + 387, 408, 380, 404, 350, 396, 425, 372, 401, 426, 0, 0, 0, 80, 0, 0, 0, 0, 0, 0, - 0, 0, 96, 0, 409, 430, 380, 410, 412, 349, - 408, 0, 353, 356, 441, 426, 375, 376, 0, 0, - 0, 0, 0, 0, 0, 394, 398, 415, 388, 0, - 0, 0, 0, 0, 0, 0, 0, 373, 0, 405, - 0, 0, 0, 357, 354, 0, 0, 392, 0, 0, - 0, 359, 0, 374, 416, 0, 348, 109, 419, 425, - 389, 211, 429, 387, 386, 432, 146, 0, 163, 111, - 119, 83, 90, 0, 110, 137, 151, 155, 423, 371, - 379, 99, 377, 153, 141, 176, 404, 142, 152, 123, - 168, 147, 175, 183, 184, 165, 182, 191, 84, 164, - 631, 97, 156, 86, 172, 162, 129, 115, 116, 85, - 0, 150, 102, 107, 101, 138, 169, 170, 100, 194, - 91, 181, 88, 346, 180, 136, 167, 173, 130, 127, + 0, 0, 96, 0, 399, 420, 370, 400, 402, 339, + 398, 0, 343, 346, 431, 416, 365, 366, 0, 0, + 0, 0, 0, 0, 0, 384, 388, 405, 378, 0, + 0, 0, 0, 0, 0, 0, 0, 363, 0, 395, + 0, 0, 0, 347, 344, 0, 0, 382, 0, 0, + 0, 349, 0, 364, 406, 0, 338, 109, 409, 415, + 379, 206, 419, 377, 376, 422, 146, 0, 163, 111, + 119, 83, 90, 0, 110, 137, 151, 155, 413, 361, + 369, 99, 367, 153, 141, 176, 394, 142, 152, 123, + 168, 147, 175, 183, 184, 165, 182, 186, 157, 84, + 164, 174, 97, 156, 86, 172, 162, 129, 115, 116, + 85, 0, 150, 102, 107, 101, 138, 169, 170, 100, + 189, 91, 181, 88, 336, 180, 136, 167, 173, 130, + 127, 87, 171, 128, 126, 118, 105, 112, 144, 125, + 145, 113, 133, 132, 134, 0, 342, 0, 161, 178, + 190, 94, 357, 166, 185, 0, 0, 95, 108, 104, + 143, 337, 335, 114, 158, 117, 124, 149, 188, 140, + 154, 98, 177, 159, 353, 356, 351, 352, 390, 391, + 427, 428, 429, 407, 348, 0, 354, 355, 0, 411, + 417, 418, 393, 82, 89, 121, 187, 148, 106, 179, + 421, 410, 0, 381, 424, 359, 373, 432, 374, 375, + 403, 345, 389, 139, 371, 0, 362, 340, 368, 341, + 360, 383, 103, 386, 358, 412, 392, 423, 120, 430, + 122, 397, 0, 160, 131, 0, 0, 385, 414, 387, + 408, 380, 404, 350, 396, 425, 372, 401, 426, 0, + 0, 0, 204, 0, 0, 0, 0, 0, 0, 0, + 0, 96, 0, 399, 420, 370, 400, 402, 339, 398, + 0, 343, 346, 431, 416, 365, 366, 0, 0, 0, + 0, 0, 0, 0, 384, 388, 405, 378, 0, 0, + 0, 0, 0, 0, 0, 0, 363, 0, 395, 0, + 0, 0, 347, 344, 0, 0, 382, 0, 0, 0, + 349, 0, 364, 406, 0, 338, 109, 409, 415, 379, + 206, 419, 377, 376, 422, 146, 0, 163, 111, 119, + 83, 90, 0, 110, 137, 151, 155, 413, 361, 369, + 99, 367, 153, 141, 176, 394, 142, 152, 123, 168, + 147, 175, 183, 184, 165, 182, 186, 157, 84, 164, + 174, 97, 156, 86, 172, 162, 129, 115, 116, 85, + 0, 150, 102, 107, 101, 138, 169, 170, 100, 189, + 91, 181, 88, 92, 180, 136, 167, 173, 130, 127, 87, 171, 128, 126, 118, 105, 112, 144, 125, 145, - 113, 133, 132, 134, 0, 352, 0, 161, 178, 195, - 94, 367, 157, 166, 185, 186, 187, 188, 189, 190, - 0, 0, 95, 108, 104, 143, 347, 345, 114, 158, - 117, 124, 149, 193, 140, 154, 98, 177, 159, 363, - 366, 361, 362, 400, 401, 437, 438, 439, 417, 358, - 0, 364, 365, 0, 421, 427, 428, 403, 82, 89, - 121, 192, 148, 106, 179, 431, 420, 0, 391, 434, - 369, 383, 442, 384, 385, 413, 355, 399, 139, 381, - 0, 372, 350, 378, 351, 370, 393, 103, 396, 368, - 422, 402, 433, 120, 440, 122, 407, 0, 160, 131, - 0, 0, 395, 424, 397, 418, 390, 414, 360, 406, - 435, 382, 411, 436, 0, 0, 0, 80, 0, 0, - 0, 0, 0, 0, 0, 0, 96, 0, 409, 430, - 380, 410, 412, 349, 408, 0, 353, 356, 441, 426, - 375, 376, 0, 0, 0, 0, 0, 0, 0, 394, - 398, 415, 388, 0, 0, 0, 0, 0, 0, 0, - 0, 373, 0, 405, 0, 0, 0, 357, 354, 0, - 0, 392, 0, 0, 0, 359, 0, 374, 416, 0, - 348, 109, 419, 425, 389, 211, 429, 387, 386, 432, - 146, 0, 163, 111, 119, 83, 90, 0, 110, 137, - 151, 155, 423, 371, 379, 99, 377, 153, 141, 176, - 404, 142, 152, 123, 168, 147, 175, 183, 184, 165, - 182, 191, 84, 164, 337, 97, 156, 86, 172, 162, - 129, 115, 116, 85, 0, 150, 102, 107, 101, 138, - 169, 170, 100, 194, 91, 181, 88, 346, 180, 136, - 167, 173, 130, 127, 87, 171, 128, 126, 118, 105, - 112, 144, 125, 145, 113, 133, 132, 134, 0, 352, - 0, 161, 178, 195, 94, 367, 157, 166, 185, 186, - 187, 188, 189, 190, 0, 0, 95, 108, 104, 143, - 347, 345, 340, 339, 117, 124, 149, 193, 140, 154, - 98, 177, 159, 363, 366, 361, 362, 400, 401, 437, - 438, 439, 417, 358, 0, 364, 365, 0, 421, 427, - 428, 403, 82, 89, 121, 192, 148, 106, 179, 139, - 0, 0, 0, 0, 272, 0, 0, 0, 103, 0, - 269, 0, 0, 0, 120, 312, 122, 0, 0, 160, - 131, 0, 0, 0, 0, 303, 304, 0, 0, 0, - 0, 0, 0, 871, 0, 54, 0, 0, 270, 291, - 290, 293, 294, 295, 296, 0, 0, 96, 292, 297, - 298, 299, 872, 0, 0, 267, 284, 0, 311, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 281, 282, - 0, 0, 0, 0, 325, 0, 283, 0, 0, 278, - 279, 280, 285, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 109, 0, 0, 0, 211, 0, 0, 323, - 0, 146, 0, 163, 111, 119, 83, 90, 0, 110, - 137, 151, 155, 0, 0, 0, 99, 0, 153, 141, - 176, 0, 142, 152, 123, 168, 147, 175, 183, 184, - 165, 182, 191, 84, 164, 174, 97, 156, 86, 172, - 162, 129, 115, 116, 85, 0, 150, 102, 107, 101, - 138, 169, 170, 100, 194, 91, 181, 88, 92, 180, - 136, 167, 173, 130, 127, 87, 171, 128, 126, 118, - 105, 112, 144, 125, 145, 113, 133, 132, 134, 0, - 0, 0, 161, 178, 195, 94, 0, 157, 166, 185, - 186, 187, 188, 189, 190, 0, 0, 95, 108, 104, - 143, 135, 93, 114, 158, 117, 124, 149, 193, 140, - 154, 98, 177, 159, 313, 324, 319, 320, 317, 318, - 316, 315, 314, 326, 305, 306, 307, 308, 310, 0, - 321, 322, 309, 82, 89, 121, 192, 148, 106, 179, - 139, 0, 0, 798, 0, 272, 0, 0, 0, 103, - 0, 269, 0, 0, 0, 120, 312, 122, 0, 0, - 160, 131, 0, 0, 0, 0, 303, 304, 0, 0, - 0, 0, 0, 0, 0, 0, 54, 0, 0, 270, - 291, 290, 293, 294, 295, 296, 0, 0, 96, 292, - 297, 298, 299, 0, 0, 0, 267, 284, 0, 311, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 281, - 282, 263, 0, 0, 0, 325, 0, 283, 0, 0, - 278, 279, 280, 285, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 109, 0, 0, 0, 211, 0, 0, - 323, 0, 146, 0, 163, 111, 119, 83, 90, 0, - 110, 137, 151, 155, 0, 0, 0, 99, 0, 153, - 141, 176, 0, 142, 152, 123, 168, 147, 175, 183, - 184, 165, 182, 191, 84, 164, 174, 97, 156, 86, - 172, 162, 129, 115, 116, 85, 0, 150, 102, 107, - 101, 138, 169, 170, 100, 194, 91, 181, 88, 92, - 180, 136, 167, 173, 130, 127, 87, 171, 128, 126, - 118, 105, 112, 144, 125, 145, 113, 133, 132, 134, - 0, 0, 0, 161, 178, 195, 94, 0, 157, 166, - 185, 186, 187, 188, 189, 190, 0, 0, 95, 108, - 104, 143, 135, 93, 114, 158, 117, 124, 149, 193, - 140, 154, 98, 177, 159, 313, 324, 319, 320, 317, - 318, 316, 315, 314, 326, 305, 306, 307, 308, 310, - 0, 321, 322, 309, 82, 89, 121, 192, 148, 106, - 179, 139, 0, 0, 0, 0, 272, 0, 0, 0, - 103, 0, 269, 0, 0, 0, 120, 312, 122, 0, - 0, 160, 131, 0, 0, 0, 0, 303, 304, 0, - 0, 0, 0, 0, 0, 0, 0, 54, 0, 504, - 270, 291, 290, 293, 294, 295, 296, 0, 0, 96, - 292, 297, 298, 299, 0, 0, 0, 267, 284, 0, - 311, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 281, 282, 0, 0, 0, 0, 325, 0, 283, 0, - 0, 278, 279, 280, 285, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 109, 0, 0, 0, 211, 0, - 0, 323, 0, 146, 0, 163, 111, 119, 83, 90, - 0, 110, 137, 151, 155, 0, 0, 0, 99, 0, - 153, 141, 176, 0, 142, 152, 123, 168, 147, 175, - 183, 184, 165, 182, 191, 84, 164, 174, 97, 156, - 86, 172, 162, 129, 115, 116, 85, 0, 150, 102, - 107, 101, 138, 169, 170, 100, 194, 91, 181, 88, - 92, 180, 136, 167, 173, 130, 127, 87, 171, 128, - 126, 118, 105, 112, 144, 125, 145, 113, 133, 132, - 134, 0, 0, 0, 161, 178, 195, 94, 0, 157, - 166, 185, 186, 187, 188, 189, 190, 0, 0, 95, - 108, 104, 143, 135, 93, 114, 158, 117, 124, 149, - 193, 140, 154, 98, 177, 159, 313, 324, 319, 320, - 317, 318, 316, 315, 314, 326, 305, 306, 307, 308, - 310, 0, 321, 322, 309, 82, 89, 121, 192, 148, - 106, 179, 139, 0, 0, 0, 0, 272, 0, 0, - 0, 103, 0, 269, 0, 0, 0, 120, 312, 122, - 0, 0, 160, 131, 0, 0, 0, 0, 303, 304, - 0, 0, 0, 0, 0, 0, 0, 0, 54, 0, - 0, 270, 291, 290, 293, 294, 295, 296, 0, 0, - 96, 292, 297, 298, 299, 0, 0, 0, 267, 284, - 0, 311, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 281, 282, 263, 0, 0, 0, 325, 0, 283, - 0, 0, 278, 279, 280, 285, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 109, 0, 0, 0, 211, - 0, 0, 323, 0, 146, 0, 163, 111, 119, 83, - 90, 0, 110, 137, 151, 155, 0, 0, 0, 99, - 0, 153, 141, 176, 0, 142, 152, 123, 168, 147, - 175, 183, 184, 165, 182, 191, 84, 164, 174, 97, + 113, 133, 132, 134, 0, 342, 0, 161, 178, 190, + 94, 357, 166, 185, 0, 0, 95, 108, 104, 143, + 135, 93, 114, 158, 117, 124, 149, 188, 140, 154, + 98, 177, 159, 353, 356, 351, 352, 390, 391, 427, + 428, 429, 407, 348, 0, 354, 355, 0, 411, 417, + 418, 393, 82, 89, 121, 187, 148, 106, 179, 421, + 410, 0, 381, 424, 359, 373, 432, 374, 375, 403, + 345, 389, 139, 371, 0, 362, 340, 368, 341, 360, + 383, 103, 386, 358, 412, 392, 423, 120, 430, 122, + 397, 0, 160, 131, 0, 0, 385, 414, 387, 408, + 380, 404, 350, 396, 425, 372, 401, 426, 0, 0, + 0, 80, 0, 0, 0, 0, 0, 0, 0, 0, + 96, 0, 399, 420, 370, 400, 402, 339, 398, 0, + 343, 346, 431, 416, 365, 366, 0, 0, 0, 0, + 0, 0, 0, 384, 388, 405, 378, 0, 0, 0, + 0, 0, 0, 0, 0, 363, 0, 395, 0, 0, + 0, 347, 344, 0, 0, 382, 0, 0, 0, 349, + 0, 364, 406, 0, 338, 109, 409, 415, 379, 206, + 419, 377, 376, 422, 146, 0, 163, 111, 119, 83, + 90, 0, 110, 137, 151, 155, 413, 361, 369, 99, + 367, 153, 141, 176, 394, 142, 152, 123, 168, 147, + 175, 183, 184, 165, 182, 186, 157, 84, 164, 620, + 97, 156, 86, 172, 162, 129, 115, 116, 85, 0, + 150, 102, 107, 101, 138, 169, 170, 100, 189, 91, + 181, 88, 336, 180, 136, 167, 173, 130, 127, 87, + 171, 128, 126, 118, 105, 112, 144, 125, 145, 113, + 133, 132, 134, 0, 342, 0, 161, 178, 190, 94, + 357, 166, 185, 0, 0, 95, 108, 104, 143, 337, + 335, 114, 158, 117, 124, 149, 188, 140, 154, 98, + 177, 159, 353, 356, 351, 352, 390, 391, 427, 428, + 429, 407, 348, 0, 354, 355, 0, 411, 417, 418, + 393, 82, 89, 121, 187, 148, 106, 179, 421, 410, + 0, 381, 424, 359, 373, 432, 374, 375, 403, 345, + 389, 139, 371, 0, 362, 340, 368, 341, 360, 383, + 103, 386, 358, 412, 392, 423, 120, 430, 122, 397, + 0, 160, 131, 0, 0, 385, 414, 387, 408, 380, + 404, 350, 396, 425, 372, 401, 426, 0, 0, 0, + 80, 0, 0, 0, 0, 0, 0, 0, 0, 96, + 0, 399, 420, 370, 400, 402, 339, 398, 0, 343, + 346, 431, 416, 365, 366, 0, 0, 0, 0, 0, + 0, 0, 384, 388, 405, 378, 0, 0, 0, 0, + 0, 0, 0, 0, 363, 0, 395, 0, 0, 0, + 347, 344, 0, 0, 382, 0, 0, 0, 349, 0, + 364, 406, 0, 338, 109, 409, 415, 379, 206, 419, + 377, 376, 422, 146, 0, 163, 111, 119, 83, 90, + 0, 110, 137, 151, 155, 413, 361, 369, 99, 367, + 153, 141, 176, 394, 142, 152, 123, 168, 147, 175, + 183, 184, 165, 182, 186, 157, 84, 164, 327, 97, 156, 86, 172, 162, 129, 115, 116, 85, 0, 150, - 102, 107, 101, 138, 169, 170, 100, 194, 91, 181, - 88, 92, 180, 136, 167, 173, 130, 127, 87, 171, + 102, 107, 101, 138, 169, 170, 100, 189, 91, 181, + 88, 336, 180, 136, 167, 173, 130, 127, 87, 171, 128, 126, 118, 105, 112, 144, 125, 145, 113, 133, - 132, 134, 0, 0, 0, 161, 178, 195, 94, 0, - 157, 166, 185, 186, 187, 188, 189, 190, 0, 0, + 132, 134, 0, 342, 0, 161, 178, 190, 94, 357, + 166, 185, 0, 0, 95, 108, 104, 143, 337, 335, + 330, 329, 117, 124, 149, 188, 140, 154, 98, 177, + 159, 353, 356, 351, 352, 390, 391, 427, 428, 429, + 407, 348, 0, 354, 355, 0, 411, 417, 418, 393, + 82, 89, 121, 187, 148, 106, 179, 139, 0, 0, + 788, 0, 262, 0, 0, 0, 103, 0, 259, 0, + 0, 0, 120, 302, 122, 0, 0, 160, 131, 0, + 0, 0, 0, 293, 294, 0, 0, 0, 0, 0, + 0, 0, 0, 54, 0, 0, 260, 281, 280, 283, + 284, 285, 286, 0, 0, 96, 282, 287, 288, 289, + 0, 0, 0, 257, 274, 0, 301, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 271, 272, 253, 0, + 0, 0, 315, 0, 273, 0, 0, 268, 269, 270, + 275, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 109, 0, 0, 0, 206, 0, 0, 313, 0, 146, + 0, 163, 111, 119, 83, 90, 0, 110, 137, 151, + 155, 0, 0, 0, 99, 0, 153, 141, 176, 0, + 142, 152, 123, 168, 147, 175, 183, 184, 165, 182, + 186, 157, 84, 164, 174, 97, 156, 86, 172, 162, + 129, 115, 116, 85, 0, 150, 102, 107, 101, 138, + 169, 170, 100, 189, 91, 181, 88, 92, 180, 136, + 167, 173, 130, 127, 87, 171, 128, 126, 118, 105, + 112, 144, 125, 145, 113, 133, 132, 134, 0, 0, + 0, 161, 178, 190, 94, 0, 166, 185, 0, 0, 95, 108, 104, 143, 135, 93, 114, 158, 117, 124, - 149, 193, 140, 154, 98, 177, 159, 313, 324, 319, - 320, 317, 318, 316, 315, 314, 326, 305, 306, 307, - 308, 310, 0, 321, 322, 309, 82, 89, 121, 192, - 148, 106, 179, 139, 0, 0, 0, 0, 272, 0, - 0, 0, 103, 0, 269, 0, 0, 0, 120, 312, - 122, 0, 0, 160, 131, 0, 0, 0, 0, 303, - 304, 0, 0, 0, 0, 0, 0, 0, 0, 54, - 0, 0, 270, 291, 813, 293, 294, 295, 296, 0, - 0, 96, 292, 297, 298, 299, 0, 0, 0, 267, - 284, 0, 311, 0, 0, 0, 0, 0, 0, 0, + 149, 188, 140, 154, 98, 177, 159, 303, 314, 309, + 310, 307, 308, 306, 305, 304, 316, 295, 296, 297, + 298, 300, 0, 311, 312, 299, 82, 89, 121, 187, + 148, 106, 179, 139, 0, 0, 0, 0, 262, 0, + 0, 0, 103, 0, 259, 0, 0, 0, 120, 302, + 122, 0, 0, 160, 131, 0, 0, 0, 0, 293, + 294, 0, 0, 0, 0, 0, 0, 0, 0, 54, + 0, 493, 260, 281, 280, 283, 284, 285, 286, 0, + 0, 96, 282, 287, 288, 289, 0, 0, 0, 257, + 274, 0, 301, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 281, 282, 263, 0, 0, 0, 325, 0, - 283, 0, 0, 278, 279, 280, 285, 0, 0, 0, + 0, 0, 271, 272, 0, 0, 0, 0, 315, 0, + 273, 0, 0, 268, 269, 270, 275, 0, 0, 0, 0, 0, 0, 0, 0, 0, 109, 0, 0, 0, - 211, 0, 0, 323, 0, 146, 0, 163, 111, 119, + 206, 0, 0, 313, 0, 146, 0, 163, 111, 119, 83, 90, 0, 110, 137, 151, 155, 0, 0, 0, 99, 0, 153, 141, 176, 0, 142, 152, 123, 168, - 147, 175, 183, 184, 165, 182, 191, 84, 164, 174, - 97, 156, 86, 172, 162, 129, 115, 116, 85, 0, - 150, 102, 107, 101, 138, 169, 170, 100, 194, 91, - 181, 88, 92, 180, 136, 167, 173, 130, 127, 87, - 171, 128, 126, 118, 105, 112, 144, 125, 145, 113, - 133, 132, 134, 0, 0, 0, 161, 178, 195, 94, - 0, 157, 166, 185, 186, 187, 188, 189, 190, 0, - 0, 95, 108, 104, 143, 135, 93, 114, 158, 117, - 124, 149, 193, 140, 154, 98, 177, 159, 313, 324, - 319, 320, 317, 318, 316, 315, 314, 326, 305, 306, - 307, 308, 310, 0, 321, 322, 309, 82, 89, 121, - 192, 148, 106, 179, 139, 0, 0, 0, 0, 272, - 0, 0, 0, 103, 0, 269, 0, 0, 0, 120, - 312, 122, 0, 0, 160, 131, 0, 0, 0, 0, - 303, 304, 0, 0, 0, 0, 0, 0, 0, 0, - 54, 0, 0, 270, 291, 810, 293, 294, 295, 296, - 0, 0, 96, 292, 297, 298, 299, 0, 0, 0, - 267, 284, 0, 311, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 281, 282, 263, 0, 0, 0, 325, - 0, 283, 0, 0, 278, 279, 280, 285, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 109, 0, 0, - 0, 211, 0, 0, 323, 0, 146, 0, 163, 111, - 119, 83, 90, 0, 110, 137, 151, 155, 0, 0, - 0, 99, 0, 153, 141, 176, 0, 142, 152, 123, - 168, 147, 175, 183, 184, 165, 182, 191, 84, 164, + 147, 175, 183, 184, 165, 182, 186, 157, 84, 164, 174, 97, 156, 86, 172, 162, 129, 115, 116, 85, - 0, 150, 102, 107, 101, 138, 169, 170, 100, 194, + 0, 150, 102, 107, 101, 138, 169, 170, 100, 189, 91, 181, 88, 92, 180, 136, 167, 173, 130, 127, 87, 171, 128, 126, 118, 105, 112, 144, 125, 145, - 113, 133, 132, 134, 0, 0, 0, 161, 178, 195, - 94, 0, 157, 166, 185, 186, 187, 188, 189, 190, + 113, 133, 132, 134, 0, 0, 0, 161, 178, 190, + 94, 0, 166, 185, 0, 0, 95, 108, 104, 143, + 135, 93, 114, 158, 117, 124, 149, 188, 140, 154, + 98, 177, 159, 303, 314, 309, 310, 307, 308, 306, + 305, 304, 316, 295, 296, 297, 298, 300, 0, 311, + 312, 299, 82, 89, 121, 187, 148, 106, 179, 139, + 0, 0, 0, 0, 262, 0, 0, 0, 103, 0, + 259, 0, 0, 0, 120, 302, 122, 0, 0, 160, + 131, 0, 0, 0, 0, 293, 294, 0, 0, 0, + 0, 0, 0, 0, 0, 54, 0, 0, 260, 281, + 280, 283, 284, 285, 286, 0, 0, 96, 282, 287, + 288, 289, 0, 0, 0, 257, 274, 0, 301, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 271, 272, + 253, 0, 0, 0, 315, 0, 273, 0, 0, 268, + 269, 270, 275, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 109, 0, 0, 0, 206, 0, 0, 313, + 0, 146, 0, 163, 111, 119, 83, 90, 0, 110, + 137, 151, 155, 0, 0, 0, 99, 0, 153, 141, + 176, 0, 142, 152, 123, 168, 147, 175, 183, 184, + 165, 182, 186, 157, 84, 164, 174, 97, 156, 86, + 172, 162, 129, 115, 116, 85, 0, 150, 102, 107, + 101, 138, 169, 170, 100, 189, 91, 181, 88, 92, + 180, 136, 167, 173, 130, 127, 87, 171, 128, 126, + 118, 105, 112, 144, 125, 145, 113, 133, 132, 134, + 0, 0, 0, 161, 178, 190, 94, 0, 166, 185, 0, 0, 95, 108, 104, 143, 135, 93, 114, 158, - 117, 124, 149, 193, 140, 154, 98, 177, 159, 313, - 324, 319, 320, 317, 318, 316, 315, 314, 326, 305, - 306, 307, 308, 310, 24, 321, 322, 309, 82, 89, - 121, 192, 148, 106, 179, 0, 139, 0, 0, 0, - 0, 272, 0, 0, 0, 103, 0, 269, 0, 0, - 0, 120, 312, 122, 0, 0, 160, 131, 0, 0, - 0, 0, 303, 304, 0, 0, 0, 0, 0, 0, - 0, 0, 54, 0, 0, 270, 291, 290, 293, 294, - 295, 296, 0, 0, 96, 292, 297, 298, 299, 0, - 0, 0, 267, 284, 0, 311, 0, 0, 0, 0, + 117, 124, 149, 188, 140, 154, 98, 177, 159, 303, + 314, 309, 310, 307, 308, 306, 305, 304, 316, 295, + 296, 297, 298, 300, 0, 311, 312, 299, 82, 89, + 121, 187, 148, 106, 179, 139, 0, 0, 0, 0, + 262, 0, 0, 0, 103, 0, 259, 0, 0, 0, + 120, 302, 122, 0, 0, 160, 131, 0, 0, 0, + 0, 293, 294, 0, 0, 0, 0, 0, 0, 0, + 0, 54, 0, 0, 260, 281, 803, 283, 284, 285, + 286, 0, 0, 96, 282, 287, 288, 289, 0, 0, + 0, 257, 274, 0, 301, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 281, 282, 0, 0, 0, - 0, 325, 0, 283, 0, 0, 278, 279, 280, 285, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 109, - 0, 0, 0, 211, 0, 0, 323, 0, 146, 0, - 163, 111, 119, 83, 90, 0, 110, 137, 151, 155, - 0, 0, 0, 99, 0, 153, 141, 176, 0, 142, - 152, 123, 168, 147, 175, 183, 184, 165, 182, 191, + 0, 0, 0, 0, 271, 272, 253, 0, 0, 0, + 315, 0, 273, 0, 0, 268, 269, 270, 275, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 109, 0, + 0, 0, 206, 0, 0, 313, 0, 146, 0, 163, + 111, 119, 83, 90, 0, 110, 137, 151, 155, 0, + 0, 0, 99, 0, 153, 141, 176, 0, 142, 152, + 123, 168, 147, 175, 183, 184, 165, 182, 186, 157, 84, 164, 174, 97, 156, 86, 172, 162, 129, 115, 116, 85, 0, 150, 102, 107, 101, 138, 169, 170, - 100, 194, 91, 181, 88, 92, 180, 136, 167, 173, + 100, 189, 91, 181, 88, 92, 180, 136, 167, 173, 130, 127, 87, 171, 128, 126, 118, 105, 112, 144, 125, 145, 113, 133, 132, 134, 0, 0, 0, 161, - 178, 195, 94, 0, 157, 166, 185, 186, 187, 188, - 189, 190, 0, 0, 95, 108, 104, 143, 135, 93, - 114, 158, 117, 124, 149, 193, 140, 154, 98, 177, - 159, 313, 324, 319, 320, 317, 318, 316, 315, 314, - 326, 305, 306, 307, 308, 310, 0, 321, 322, 309, - 82, 89, 121, 192, 148, 106, 179, 139, 0, 0, - 0, 0, 272, 0, 0, 0, 103, 0, 269, 0, - 0, 0, 120, 312, 122, 0, 0, 160, 131, 0, - 0, 0, 0, 303, 304, 0, 0, 0, 0, 0, - 0, 0, 0, 54, 0, 0, 270, 291, 290, 293, - 294, 295, 296, 0, 0, 96, 292, 297, 298, 299, - 0, 0, 0, 267, 284, 0, 311, 0, 0, 0, + 178, 190, 94, 0, 166, 185, 0, 0, 95, 108, + 104, 143, 135, 93, 114, 158, 117, 124, 149, 188, + 140, 154, 98, 177, 159, 303, 314, 309, 310, 307, + 308, 306, 305, 304, 316, 295, 296, 297, 298, 300, + 0, 311, 312, 299, 82, 89, 121, 187, 148, 106, + 179, 139, 0, 0, 0, 0, 262, 0, 0, 0, + 103, 0, 259, 0, 0, 0, 120, 302, 122, 0, + 0, 160, 131, 0, 0, 0, 0, 293, 294, 0, + 0, 0, 0, 0, 0, 0, 0, 54, 0, 0, + 260, 281, 800, 283, 284, 285, 286, 0, 0, 96, + 282, 287, 288, 289, 0, 0, 0, 257, 274, 0, + 301, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 281, 282, 0, 0, - 0, 0, 325, 0, 283, 0, 0, 278, 279, 280, - 285, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 109, 0, 0, 0, 211, 0, 0, 323, 0, 146, - 0, 163, 111, 119, 83, 90, 0, 110, 137, 151, - 155, 0, 0, 0, 99, 0, 153, 141, 176, 0, - 142, 152, 123, 168, 147, 175, 183, 184, 165, 182, - 191, 84, 164, 174, 97, 156, 86, 172, 162, 129, - 115, 116, 85, 0, 150, 102, 107, 101, 138, 169, - 170, 100, 194, 91, 181, 88, 92, 180, 136, 167, - 173, 130, 127, 87, 171, 128, 126, 118, 105, 112, - 144, 125, 145, 113, 133, 132, 134, 0, 0, 0, - 161, 178, 195, 94, 0, 157, 166, 185, 186, 187, - 188, 189, 190, 0, 0, 95, 108, 104, 143, 135, - 93, 114, 158, 117, 124, 149, 193, 140, 154, 98, - 177, 159, 313, 324, 319, 320, 317, 318, 316, 315, - 314, 326, 305, 306, 307, 308, 310, 0, 321, 322, - 309, 82, 89, 121, 192, 148, 106, 179, 139, 0, - 0, 0, 0, 0, 0, 0, 0, 103, 0, 0, - 0, 0, 0, 120, 312, 122, 0, 0, 160, 131, - 0, 0, 0, 0, 303, 304, 0, 0, 0, 0, - 0, 0, 0, 0, 54, 0, 0, 270, 291, 290, - 293, 294, 295, 296, 0, 0, 96, 292, 297, 298, - 299, 0, 0, 0, 0, 284, 0, 311, 0, 0, + 271, 272, 253, 0, 0, 0, 315, 0, 273, 0, + 0, 268, 269, 270, 275, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 109, 0, 0, 0, 206, 0, + 0, 313, 0, 146, 0, 163, 111, 119, 83, 90, + 0, 110, 137, 151, 155, 0, 0, 0, 99, 0, + 153, 141, 176, 0, 142, 152, 123, 168, 147, 175, + 183, 184, 165, 182, 186, 157, 84, 164, 174, 97, + 156, 86, 172, 162, 129, 115, 116, 85, 0, 150, + 102, 107, 101, 138, 169, 170, 100, 189, 91, 181, + 88, 92, 180, 136, 167, 173, 130, 127, 87, 171, + 128, 126, 118, 105, 112, 144, 125, 145, 113, 133, + 132, 134, 0, 0, 0, 161, 178, 190, 94, 0, + 166, 185, 0, 0, 95, 108, 104, 143, 135, 93, + 114, 158, 117, 124, 149, 188, 140, 154, 98, 177, + 159, 303, 314, 309, 310, 307, 308, 306, 305, 304, + 316, 295, 296, 297, 298, 300, 24, 311, 312, 299, + 82, 89, 121, 187, 148, 106, 179, 0, 139, 0, + 0, 0, 0, 262, 0, 0, 0, 103, 0, 259, + 0, 0, 0, 120, 302, 122, 0, 0, 160, 131, + 0, 0, 0, 0, 293, 294, 0, 0, 0, 0, + 0, 0, 0, 0, 54, 0, 0, 260, 281, 280, + 283, 284, 285, 286, 0, 0, 96, 282, 287, 288, + 289, 0, 0, 0, 257, 274, 0, 301, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 281, 282, 0, - 0, 0, 0, 325, 0, 283, 0, 0, 278, 279, - 280, 285, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 109, 0, 0, 0, 211, 0, 0, 323, 0, + 0, 0, 0, 0, 0, 0, 0, 271, 272, 0, + 0, 0, 0, 315, 0, 273, 0, 0, 268, 269, + 270, 275, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 109, 0, 0, 0, 206, 0, 0, 313, 0, 146, 0, 163, 111, 119, 83, 90, 0, 110, 137, 151, 155, 0, 0, 0, 99, 0, 153, 141, 176, - 1426, 142, 152, 123, 168, 147, 175, 183, 184, 165, - 182, 191, 84, 164, 174, 97, 156, 86, 172, 162, - 129, 115, 116, 85, 0, 150, 102, 107, 101, 138, - 169, 170, 100, 194, 91, 181, 88, 92, 180, 136, - 167, 173, 130, 127, 87, 171, 128, 126, 118, 105, - 112, 144, 125, 145, 113, 133, 132, 134, 0, 0, - 0, 161, 178, 195, 94, 0, 157, 166, 185, 186, - 187, 188, 189, 190, 0, 0, 95, 108, 104, 143, - 135, 93, 114, 158, 117, 124, 149, 193, 140, 154, - 98, 177, 159, 313, 324, 319, 320, 317, 318, 316, - 315, 314, 326, 305, 306, 307, 308, 310, 0, 321, - 322, 309, 82, 89, 121, 192, 148, 106, 179, 139, - 0, 0, 0, 0, 0, 0, 0, 0, 103, 0, - 0, 0, 0, 0, 120, 312, 122, 0, 0, 160, - 131, 0, 0, 0, 0, 303, 304, 0, 0, 0, - 0, 0, 0, 0, 0, 54, 0, 504, 270, 291, - 290, 293, 294, 295, 296, 0, 0, 96, 292, 297, - 298, 299, 0, 0, 0, 0, 284, 0, 311, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 281, 282, - 0, 0, 0, 0, 325, 0, 283, 0, 0, 278, - 279, 280, 285, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 109, 0, 0, 0, 211, 0, 0, 323, - 0, 146, 0, 163, 111, 119, 83, 90, 0, 110, - 137, 151, 155, 0, 0, 0, 99, 0, 153, 141, - 176, 0, 142, 152, 123, 168, 147, 175, 183, 184, - 165, 182, 191, 84, 164, 174, 97, 156, 86, 172, + 0, 142, 152, 123, 168, 147, 175, 183, 184, 165, + 182, 186, 157, 84, 164, 174, 97, 156, 86, 172, 162, 129, 115, 116, 85, 0, 150, 102, 107, 101, - 138, 169, 170, 100, 194, 91, 181, 88, 92, 180, + 138, 169, 170, 100, 189, 91, 181, 88, 92, 180, 136, 167, 173, 130, 127, 87, 171, 128, 126, 118, 105, 112, 144, 125, 145, 113, 133, 132, 134, 0, - 0, 0, 161, 178, 195, 94, 0, 157, 166, 185, - 186, 187, 188, 189, 190, 0, 0, 95, 108, 104, - 143, 135, 93, 114, 158, 117, 124, 149, 193, 140, - 154, 98, 177, 159, 313, 324, 319, 320, 317, 318, - 316, 315, 314, 326, 305, 306, 307, 308, 310, 0, - 321, 322, 309, 82, 89, 121, 192, 148, 106, 179, + 0, 0, 161, 178, 190, 94, 0, 166, 185, 0, + 0, 95, 108, 104, 143, 135, 93, 114, 158, 117, + 124, 149, 188, 140, 154, 98, 177, 159, 303, 314, + 309, 310, 307, 308, 306, 305, 304, 316, 295, 296, + 297, 298, 300, 0, 311, 312, 299, 82, 89, 121, + 187, 148, 106, 179, 139, 0, 0, 0, 0, 262, + 0, 0, 0, 103, 0, 259, 0, 0, 0, 120, + 302, 122, 0, 0, 160, 131, 0, 0, 0, 0, + 293, 294, 0, 0, 0, 0, 0, 0, 0, 0, + 54, 0, 0, 260, 281, 280, 283, 284, 285, 286, + 0, 0, 96, 282, 287, 288, 289, 0, 0, 0, + 257, 274, 0, 301, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 271, 272, 0, 0, 0, 0, 315, + 0, 273, 0, 0, 268, 269, 270, 275, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 109, 0, 0, + 0, 206, 0, 0, 313, 0, 146, 0, 163, 111, + 119, 83, 90, 0, 110, 137, 151, 155, 0, 0, + 0, 99, 0, 153, 141, 176, 0, 142, 152, 123, + 168, 147, 175, 183, 184, 165, 182, 186, 157, 84, + 164, 174, 97, 156, 86, 172, 162, 129, 115, 116, + 85, 0, 150, 102, 107, 101, 138, 169, 170, 100, + 189, 91, 181, 88, 92, 180, 136, 167, 173, 130, + 127, 87, 171, 128, 126, 118, 105, 112, 144, 125, + 145, 113, 133, 132, 134, 0, 0, 0, 161, 178, + 190, 94, 0, 166, 185, 0, 0, 95, 108, 104, + 143, 135, 93, 114, 158, 117, 124, 149, 188, 140, + 154, 98, 177, 159, 303, 314, 309, 310, 307, 308, + 306, 305, 304, 316, 295, 296, 297, 298, 300, 0, + 311, 312, 299, 82, 89, 121, 187, 148, 106, 179, 139, 0, 0, 0, 0, 0, 0, 0, 0, 103, - 0, 0, 0, 0, 0, 120, 312, 122, 0, 0, - 160, 131, 0, 0, 0, 0, 303, 304, 0, 0, - 0, 0, 0, 0, 0, 0, 54, 0, 0, 270, - 291, 290, 293, 294, 295, 296, 0, 0, 96, 292, - 297, 298, 299, 0, 0, 0, 0, 284, 0, 311, + 0, 0, 0, 0, 0, 120, 302, 122, 0, 0, + 160, 131, 0, 0, 0, 0, 293, 294, 0, 0, + 0, 0, 0, 0, 0, 0, 54, 0, 0, 260, + 281, 280, 283, 284, 285, 286, 0, 0, 96, 282, + 287, 288, 289, 0, 0, 0, 0, 274, 0, 301, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 281, - 282, 0, 0, 0, 0, 325, 0, 283, 0, 0, - 278, 279, 280, 285, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 109, 0, 0, 0, 211, 0, 0, - 323, 0, 146, 0, 163, 111, 119, 83, 90, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 271, + 272, 0, 0, 0, 0, 315, 0, 273, 0, 0, + 268, 269, 270, 275, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 109, 0, 0, 0, 206, 0, 0, + 313, 0, 146, 0, 163, 111, 119, 83, 90, 0, 110, 137, 151, 155, 0, 0, 0, 99, 0, 153, - 141, 176, 0, 142, 152, 123, 168, 147, 175, 183, - 184, 165, 182, 191, 84, 164, 174, 97, 156, 86, - 172, 162, 129, 115, 116, 85, 0, 150, 102, 107, - 101, 138, 169, 170, 100, 194, 91, 181, 88, 92, - 180, 136, 167, 173, 130, 127, 87, 171, 128, 126, - 118, 105, 112, 144, 125, 145, 113, 133, 132, 134, - 0, 0, 0, 161, 178, 195, 94, 0, 157, 166, - 185, 186, 187, 188, 189, 190, 0, 0, 95, 108, - 104, 143, 135, 93, 114, 158, 117, 124, 149, 193, - 140, 154, 98, 177, 159, 313, 324, 319, 320, 317, - 318, 316, 315, 314, 326, 305, 306, 307, 308, 310, - 0, 321, 322, 309, 82, 89, 121, 192, 148, 106, - 179, 139, 0, 0, 0, 0, 0, 0, 0, 0, - 103, 0, 0, 0, 0, 0, 120, 0, 122, 0, - 0, 160, 131, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 80, 0, 0, 0, 0, 0, 0, 0, 0, 96, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 538, 537, 547, 548, - 540, 541, 542, 543, 544, 545, 546, 539, 0, 0, - 549, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 109, 0, 0, 0, 211, 0, - 0, 0, 0, 146, 0, 163, 111, 119, 83, 90, - 0, 110, 137, 151, 155, 0, 0, 0, 99, 0, - 153, 141, 176, 0, 142, 152, 123, 168, 147, 175, - 183, 184, 165, 182, 191, 84, 164, 174, 97, 156, + 141, 176, 1421, 142, 152, 123, 168, 147, 175, 183, + 184, 165, 182, 186, 157, 84, 164, 174, 97, 156, 86, 172, 162, 129, 115, 116, 85, 0, 150, 102, - 107, 101, 138, 169, 170, 100, 194, 91, 181, 88, + 107, 101, 138, 169, 170, 100, 189, 91, 181, 88, 92, 180, 136, 167, 173, 130, 127, 87, 171, 128, 126, 118, 105, 112, 144, 125, 145, 113, 133, 132, - 134, 0, 0, 0, 161, 178, 195, 94, 0, 157, - 166, 185, 186, 187, 188, 189, 190, 0, 0, 95, - 108, 104, 143, 135, 93, 114, 158, 117, 124, 149, - 193, 140, 154, 98, 177, 159, 0, 0, 0, 0, + 134, 0, 0, 0, 161, 178, 190, 94, 0, 166, + 185, 0, 0, 95, 108, 104, 143, 135, 93, 114, + 158, 117, 124, 149, 188, 140, 154, 98, 177, 159, + 303, 314, 309, 310, 307, 308, 306, 305, 304, 316, + 295, 296, 297, 298, 300, 0, 311, 312, 299, 82, + 89, 121, 187, 148, 106, 179, 139, 0, 0, 0, + 0, 0, 0, 0, 0, 103, 0, 0, 0, 0, + 0, 120, 302, 122, 0, 0, 160, 131, 0, 0, + 0, 0, 293, 294, 0, 0, 0, 0, 0, 0, + 0, 0, 54, 0, 493, 260, 281, 280, 283, 284, + 285, 286, 0, 0, 96, 282, 287, 288, 289, 0, + 0, 0, 0, 274, 0, 301, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 139, 0, 82, 89, 121, 192, 148, - 106, 179, 103, 0, 0, 0, 0, 0, 120, 0, - 122, 0, 0, 160, 131, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 271, 272, 0, 0, 0, + 0, 315, 0, 273, 0, 0, 268, 269, 270, 275, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 109, + 0, 0, 0, 206, 0, 0, 313, 0, 146, 0, + 163, 111, 119, 83, 90, 0, 110, 137, 151, 155, + 0, 0, 0, 99, 0, 153, 141, 176, 0, 142, + 152, 123, 168, 147, 175, 183, 184, 165, 182, 186, + 157, 84, 164, 174, 97, 156, 86, 172, 162, 129, + 115, 116, 85, 0, 150, 102, 107, 101, 138, 169, + 170, 100, 189, 91, 181, 88, 92, 180, 136, 167, + 173, 130, 127, 87, 171, 128, 126, 118, 105, 112, + 144, 125, 145, 113, 133, 132, 134, 0, 0, 0, + 161, 178, 190, 94, 0, 166, 185, 0, 0, 95, + 108, 104, 143, 135, 93, 114, 158, 117, 124, 149, + 188, 140, 154, 98, 177, 159, 303, 314, 309, 310, + 307, 308, 306, 305, 304, 316, 295, 296, 297, 298, + 300, 0, 311, 312, 299, 82, 89, 121, 187, 148, + 106, 179, 139, 0, 0, 0, 0, 0, 0, 0, + 0, 103, 0, 0, 0, 0, 0, 120, 302, 122, + 0, 0, 160, 131, 0, 0, 0, 0, 293, 294, + 0, 0, 0, 0, 0, 0, 0, 0, 54, 0, + 0, 260, 281, 280, 283, 284, 285, 286, 0, 0, + 96, 282, 287, 288, 289, 0, 0, 0, 0, 274, + 0, 301, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 80, 0, 0, 0, 0, 0, 0, 0, - 0, 96, 0, 0, 0, 0, 0, 74, 0, 0, + 0, 271, 272, 0, 0, 0, 0, 315, 0, 273, + 0, 0, 268, 269, 270, 275, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 109, 0, 0, 0, 206, + 0, 0, 313, 0, 146, 0, 163, 111, 119, 83, + 90, 0, 110, 137, 151, 155, 0, 0, 0, 99, + 0, 153, 141, 176, 0, 142, 152, 123, 168, 147, + 175, 183, 184, 165, 182, 186, 157, 84, 164, 174, + 97, 156, 86, 172, 162, 129, 115, 116, 85, 0, + 150, 102, 107, 101, 138, 169, 170, 100, 189, 91, + 181, 88, 92, 180, 136, 167, 173, 130, 127, 87, + 171, 128, 126, 118, 105, 112, 144, 125, 145, 113, + 133, 132, 134, 0, 0, 0, 161, 178, 190, 94, + 0, 166, 185, 0, 0, 95, 108, 104, 143, 135, + 93, 114, 158, 117, 124, 149, 188, 140, 154, 98, + 177, 159, 303, 314, 309, 310, 307, 308, 306, 305, + 304, 316, 295, 296, 297, 298, 300, 0, 311, 312, + 299, 82, 89, 121, 187, 148, 106, 179, 139, 0, + 0, 0, 0, 0, 0, 0, 0, 103, 0, 0, + 0, 0, 0, 120, 0, 122, 0, 0, 160, 131, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 80, 0, 0, + 0, 0, 0, 0, 0, 0, 96, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 527, 526, 536, 537, 529, 530, 531, + 532, 533, 534, 535, 528, 0, 0, 538, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 109, 76, 77, 0, - 73, 0, 0, 0, 78, 146, 0, 163, 111, 119, - 83, 90, 0, 110, 137, 151, 155, 0, 0, 0, - 99, 0, 153, 141, 176, 0, 142, 152, 123, 168, - 147, 175, 183, 184, 165, 182, 191, 84, 164, 174, - 97, 156, 86, 172, 162, 129, 115, 116, 85, 0, - 150, 102, 107, 101, 138, 169, 170, 100, 194, 91, - 181, 88, 92, 180, 136, 167, 173, 130, 127, 87, - 171, 128, 126, 118, 105, 112, 144, 125, 145, 113, - 133, 132, 134, 0, 0, 0, 161, 178, 195, 94, - 0, 157, 166, 185, 186, 187, 188, 189, 190, 0, + 0, 109, 0, 0, 0, 206, 0, 0, 0, 0, + 146, 0, 163, 111, 119, 83, 90, 0, 110, 137, + 151, 155, 0, 0, 0, 99, 0, 153, 141, 176, + 0, 142, 152, 123, 168, 147, 175, 183, 184, 165, + 182, 186, 157, 84, 164, 174, 97, 156, 86, 172, + 162, 129, 115, 116, 85, 0, 150, 102, 107, 101, + 138, 169, 170, 100, 189, 91, 181, 88, 92, 180, + 136, 167, 173, 130, 127, 87, 171, 128, 126, 118, + 105, 112, 144, 125, 145, 113, 133, 132, 134, 0, + 0, 0, 161, 178, 190, 94, 0, 166, 185, 0, 0, 95, 108, 104, 143, 135, 93, 114, 158, 117, - 124, 149, 193, 140, 154, 98, 177, 159, 0, 75, + 124, 149, 188, 140, 154, 98, 177, 159, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 82, 89, 121, - 192, 148, 106, 179, 139, 0, 0, 0, 854, 0, + 187, 148, 106, 179, 139, 0, 0, 0, 515, 0, 0, 0, 0, 103, 0, 0, 0, 0, 0, 120, 0, 122, 0, 0, 160, 131, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 209, 0, 856, 0, 0, 0, 0, - 0, 0, 96, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 80, 0, 517, 0, 0, 0, 0, + 0, 0, 96, 0, 0, 0, 0, 0, 512, 511, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 513, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 109, 0, 0, - 0, 211, 0, 0, 0, 0, 146, 0, 163, 111, + 0, 206, 0, 0, 0, 0, 146, 0, 163, 111, 119, 83, 90, 0, 110, 137, 151, 155, 0, 0, 0, 99, 0, 153, 141, 176, 0, 142, 152, 123, - 168, 147, 175, 183, 184, 165, 182, 191, 84, 164, - 174, 97, 156, 86, 172, 162, 129, 115, 116, 85, - 0, 150, 102, 107, 101, 138, 169, 170, 100, 194, - 91, 181, 88, 92, 180, 136, 167, 173, 130, 127, - 87, 171, 128, 126, 118, 105, 112, 144, 125, 145, - 113, 133, 132, 134, 0, 0, 0, 161, 178, 195, - 94, 0, 157, 166, 185, 186, 187, 188, 189, 190, - 0, 0, 95, 108, 104, 143, 135, 93, 114, 158, - 117, 124, 149, 193, 140, 154, 98, 177, 159, 0, - 0, 0, 0, 0, 24, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 139, 0, 82, 89, - 121, 192, 148, 106, 179, 103, 0, 0, 0, 0, - 0, 120, 0, 122, 0, 0, 160, 131, 0, 0, + 168, 147, 175, 183, 184, 165, 182, 186, 157, 84, + 164, 174, 97, 156, 86, 172, 162, 129, 115, 116, + 85, 0, 150, 102, 107, 101, 138, 169, 170, 100, + 189, 91, 181, 88, 92, 180, 136, 167, 173, 130, + 127, 87, 171, 128, 126, 118, 105, 112, 144, 125, + 145, 113, 133, 132, 134, 0, 0, 0, 161, 178, + 190, 94, 0, 166, 185, 0, 0, 95, 108, 104, + 143, 135, 93, 114, 158, 117, 124, 149, 188, 140, + 154, 98, 177, 159, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 54, 0, 0, 80, 0, 0, 0, 0, - 0, 0, 0, 0, 96, 0, 0, 0, 0, 0, + 0, 139, 0, 82, 89, 121, 187, 148, 106, 179, + 103, 0, 0, 0, 0, 0, 120, 0, 122, 0, + 0, 160, 131, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 80, 0, 0, 0, 0, 0, 0, 0, 0, 96, + 0, 0, 0, 0, 0, 74, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 109, - 0, 0, 0, 211, 0, 0, 0, 0, 146, 0, - 163, 111, 119, 83, 90, 0, 110, 137, 151, 155, - 0, 0, 0, 99, 0, 153, 141, 176, 0, 142, - 152, 123, 168, 147, 175, 183, 184, 165, 182, 191, - 84, 164, 174, 97, 156, 86, 172, 162, 129, 115, - 116, 85, 0, 150, 102, 107, 101, 138, 169, 170, - 100, 194, 91, 181, 88, 92, 180, 136, 167, 173, - 130, 127, 87, 171, 128, 126, 118, 105, 112, 144, - 125, 145, 113, 133, 132, 134, 0, 0, 0, 161, - 178, 195, 94, 0, 157, 166, 185, 186, 187, 188, - 189, 190, 0, 0, 95, 108, 104, 143, 135, 93, - 114, 158, 117, 124, 149, 193, 140, 154, 98, 177, - 159, 0, 0, 0, 0, 0, 24, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 109, 76, 77, 0, 73, 0, + 0, 0, 78, 146, 0, 163, 111, 119, 83, 90, + 0, 110, 137, 151, 155, 0, 0, 0, 99, 0, + 153, 141, 176, 0, 142, 152, 123, 168, 147, 175, + 183, 184, 165, 182, 186, 157, 84, 164, 174, 97, + 156, 86, 172, 162, 129, 115, 116, 85, 0, 150, + 102, 107, 101, 138, 169, 170, 100, 189, 91, 181, + 88, 92, 180, 136, 167, 173, 130, 127, 87, 171, + 128, 126, 118, 105, 112, 144, 125, 145, 113, 133, + 132, 134, 0, 0, 0, 161, 178, 190, 94, 0, + 166, 185, 0, 0, 95, 108, 104, 143, 135, 93, + 114, 158, 117, 124, 149, 188, 140, 154, 98, 177, + 159, 0, 75, 0, 0, 0, 24, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 139, 0, - 82, 89, 121, 192, 148, 106, 179, 103, 0, 0, + 82, 89, 121, 187, 148, 106, 179, 103, 0, 0, 0, 0, 0, 120, 0, 122, 0, 0, 160, 131, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 54, 0, 0, 209, 0, 0, + 0, 0, 0, 0, 54, 0, 0, 80, 0, 0, 0, 0, 0, 0, 0, 0, 96, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 109, 0, 0, 0, 211, 0, 0, 0, 0, + 0, 109, 0, 0, 0, 206, 0, 0, 0, 0, 146, 0, 163, 111, 119, 83, 90, 0, 110, 137, 151, 155, 0, 0, 0, 99, 0, 153, 141, 176, 0, 142, 152, 123, 168, 147, 175, 183, 184, 165, - 182, 191, 84, 164, 174, 97, 156, 86, 172, 162, - 129, 115, 116, 85, 0, 150, 102, 107, 101, 138, - 169, 170, 100, 194, 91, 181, 88, 92, 180, 136, - 167, 173, 130, 127, 87, 171, 128, 126, 118, 105, - 112, 144, 125, 145, 113, 133, 132, 134, 0, 0, - 0, 161, 178, 195, 94, 0, 157, 166, 185, 186, - 187, 188, 189, 190, 0, 0, 95, 108, 104, 143, - 135, 93, 114, 158, 117, 124, 149, 193, 140, 154, - 98, 177, 159, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 82, 89, 121, 192, 148, 106, 179, 139, - 0, 0, 0, 854, 0, 0, 0, 0, 103, 0, - 0, 0, 0, 0, 120, 0, 122, 0, 0, 160, - 131, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 209, 0, - 856, 0, 0, 0, 0, 0, 0, 96, 0, 0, + 182, 186, 157, 84, 164, 174, 97, 156, 86, 172, + 162, 129, 115, 116, 85, 0, 150, 102, 107, 101, + 138, 169, 170, 100, 189, 91, 181, 88, 92, 180, + 136, 167, 173, 130, 127, 87, 171, 128, 126, 118, + 105, 112, 144, 125, 145, 113, 133, 132, 134, 0, + 0, 0, 161, 178, 190, 94, 0, 166, 185, 0, + 0, 95, 108, 104, 143, 135, 93, 114, 158, 117, + 124, 149, 188, 140, 154, 98, 177, 159, 0, 0, + 0, 0, 0, 24, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 139, 0, 82, 89, 121, + 187, 148, 106, 179, 103, 0, 0, 0, 0, 0, + 120, 0, 122, 0, 0, 160, 131, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 54, 0, 0, 204, 0, 0, 0, 0, 0, + 0, 0, 0, 96, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 109, 0, 0, 0, 211, 0, 0, 0, - 0, 146, 0, 163, 111, 119, 83, 90, 0, 110, - 137, 151, 155, 0, 0, 0, 99, 0, 153, 141, - 176, 0, 852, 152, 123, 168, 147, 175, 183, 184, - 165, 182, 191, 84, 164, 174, 97, 156, 86, 172, - 162, 129, 115, 116, 85, 0, 150, 102, 107, 101, - 138, 169, 170, 100, 194, 91, 181, 88, 92, 180, - 136, 167, 173, 130, 127, 87, 171, 128, 126, 118, - 105, 112, 144, 125, 145, 113, 133, 132, 134, 0, - 0, 0, 161, 178, 195, 94, 0, 157, 166, 185, - 186, 187, 188, 189, 190, 0, 0, 95, 108, 104, - 143, 135, 93, 114, 158, 117, 124, 149, 193, 140, - 154, 98, 177, 159, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 109, 0, + 0, 0, 206, 0, 0, 0, 0, 146, 0, 163, + 111, 119, 83, 90, 0, 110, 137, 151, 155, 0, + 0, 0, 99, 0, 153, 141, 176, 0, 142, 152, + 123, 168, 147, 175, 183, 184, 165, 182, 186, 157, + 84, 164, 174, 97, 156, 86, 172, 162, 129, 115, + 116, 85, 0, 150, 102, 107, 101, 138, 169, 170, + 100, 189, 91, 181, 88, 92, 180, 136, 167, 173, + 130, 127, 87, 171, 128, 126, 118, 105, 112, 144, + 125, 145, 113, 133, 132, 134, 0, 0, 0, 161, + 178, 190, 94, 0, 166, 185, 0, 0, 95, 108, + 104, 143, 135, 93, 114, 158, 117, 124, 149, 188, + 140, 154, 98, 177, 159, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 139, 0, 82, 89, 121, 192, 148, 106, 179, + 0, 0, 0, 0, 82, 89, 121, 187, 148, 106, + 179, 139, 0, 0, 0, 844, 0, 0, 0, 0, 103, 0, 0, 0, 0, 0, 120, 0, 122, 0, 0, 160, 131, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 80, 0, 0, 749, 0, 0, 750, 0, 0, 96, + 204, 0, 846, 0, 0, 0, 0, 0, 0, 96, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 109, 0, 0, 0, 211, 0, + 0, 0, 0, 0, 109, 0, 0, 0, 206, 0, 0, 0, 0, 146, 0, 163, 111, 119, 83, 90, 0, 110, 137, 151, 155, 0, 0, 0, 99, 0, - 153, 141, 176, 0, 142, 152, 123, 168, 147, 175, - 183, 184, 165, 182, 191, 84, 164, 174, 97, 156, - 86, 172, 162, 129, 115, 116, 85, 0, 150, 102, - 107, 101, 138, 169, 170, 100, 194, 91, 181, 88, - 92, 180, 136, 167, 173, 130, 127, 87, 171, 128, - 126, 118, 105, 112, 144, 125, 145, 113, 133, 132, - 134, 0, 0, 0, 161, 178, 195, 94, 0, 157, - 166, 185, 186, 187, 188, 189, 190, 0, 0, 95, - 108, 104, 143, 135, 93, 114, 158, 117, 124, 149, - 193, 140, 154, 98, 177, 159, 0, 0, 0, 0, + 153, 141, 176, 0, 842, 152, 123, 168, 147, 175, + 183, 184, 165, 182, 186, 157, 84, 164, 174, 97, + 156, 86, 172, 162, 129, 115, 116, 85, 0, 150, + 102, 107, 101, 138, 169, 170, 100, 189, 91, 181, + 88, 92, 180, 136, 167, 173, 130, 127, 87, 171, + 128, 126, 118, 105, 112, 144, 125, 145, 113, 133, + 132, 134, 0, 0, 0, 161, 178, 190, 94, 0, + 166, 185, 0, 0, 95, 108, 104, 143, 135, 93, + 114, 158, 117, 124, 149, 188, 140, 154, 98, 177, + 159, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 139, 0, + 82, 89, 121, 187, 148, 106, 179, 103, 0, 0, + 0, 0, 0, 120, 0, 122, 0, 0, 160, 131, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 139, 0, 82, 89, 121, 192, 148, - 106, 179, 103, 0, 640, 0, 0, 0, 120, 0, - 122, 0, 0, 160, 131, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 80, 0, 0, + 739, 0, 0, 740, 0, 0, 96, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 80, 0, 639, 0, 0, 0, 0, 0, - 0, 96, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 109, 0, 0, 0, - 211, 0, 0, 0, 0, 146, 0, 163, 111, 119, - 83, 90, 0, 110, 137, 151, 155, 0, 0, 0, - 99, 0, 153, 141, 176, 0, 142, 152, 123, 168, - 147, 175, 183, 184, 165, 182, 191, 84, 164, 174, - 97, 156, 86, 172, 162, 129, 115, 116, 85, 0, - 150, 102, 107, 101, 138, 169, 170, 100, 194, 91, - 181, 88, 92, 180, 136, 167, 173, 130, 127, 87, - 171, 128, 126, 118, 105, 112, 144, 125, 145, 113, - 133, 132, 134, 0, 0, 0, 161, 178, 195, 94, - 0, 157, 166, 185, 186, 187, 188, 189, 190, 0, + 0, 109, 0, 0, 0, 206, 0, 0, 0, 0, + 146, 0, 163, 111, 119, 83, 90, 0, 110, 137, + 151, 155, 0, 0, 0, 99, 0, 153, 141, 176, + 0, 142, 152, 123, 168, 147, 175, 183, 184, 165, + 182, 186, 157, 84, 164, 174, 97, 156, 86, 172, + 162, 129, 115, 116, 85, 0, 150, 102, 107, 101, + 138, 169, 170, 100, 189, 91, 181, 88, 92, 180, + 136, 167, 173, 130, 127, 87, 171, 128, 126, 118, + 105, 112, 144, 125, 145, 113, 133, 132, 134, 0, + 0, 0, 161, 178, 190, 94, 0, 166, 185, 0, 0, 95, 108, 104, 143, 135, 93, 114, 158, 117, - 124, 149, 193, 140, 154, 98, 177, 159, 0, 0, + 124, 149, 188, 140, 154, 98, 177, 159, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 139, 0, 82, 89, 121, - 192, 148, 106, 179, 103, 0, 0, 0, 0, 0, + 187, 148, 106, 179, 103, 0, 629, 0, 0, 0, 120, 0, 122, 0, 0, 160, 131, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 54, 0, 0, 209, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 80, 0, 628, 0, 0, 0, 0, 0, 0, 96, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 109, 0, - 0, 0, 211, 0, 0, 0, 0, 146, 0, 163, + 0, 0, 206, 0, 0, 0, 0, 146, 0, 163, 111, 119, 83, 90, 0, 110, 137, 151, 155, 0, 0, 0, 99, 0, 153, 141, 176, 0, 142, 152, - 123, 168, 147, 175, 183, 184, 165, 182, 191, 84, - 164, 174, 97, 156, 86, 172, 162, 129, 115, 116, - 85, 0, 150, 102, 107, 101, 138, 169, 170, 100, - 194, 91, 181, 88, 92, 180, 136, 167, 173, 130, - 127, 87, 171, 128, 126, 118, 105, 112, 144, 125, - 145, 113, 133, 132, 134, 0, 0, 0, 161, 178, - 195, 94, 0, 157, 166, 185, 186, 187, 188, 189, - 190, 0, 0, 95, 108, 104, 143, 135, 93, 114, - 158, 117, 124, 149, 193, 140, 154, 98, 177, 159, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 139, 0, 82, - 89, 121, 192, 148, 106, 179, 103, 0, 0, 0, - 0, 0, 120, 0, 122, 0, 0, 160, 131, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 209, 0, 856, 0, - 0, 0, 0, 0, 0, 96, 0, 0, 0, 0, + 123, 168, 147, 175, 183, 184, 165, 182, 186, 157, + 84, 164, 174, 97, 156, 86, 172, 162, 129, 115, + 116, 85, 0, 150, 102, 107, 101, 138, 169, 170, + 100, 189, 91, 181, 88, 92, 180, 136, 167, 173, + 130, 127, 87, 171, 128, 126, 118, 105, 112, 144, + 125, 145, 113, 133, 132, 134, 0, 0, 0, 161, + 178, 190, 94, 0, 166, 185, 0, 0, 95, 108, + 104, 143, 135, 93, 114, 158, 117, 124, 149, 188, + 140, 154, 98, 177, 159, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 139, 0, 82, 89, 121, 187, 148, 106, + 179, 103, 0, 0, 0, 0, 0, 120, 0, 122, + 0, 0, 160, 131, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 54, 0, + 0, 204, 0, 0, 0, 0, 0, 0, 0, 0, + 96, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 109, 0, 0, 0, 211, 0, 0, 0, 0, 146, - 0, 163, 111, 119, 83, 90, 0, 110, 137, 151, - 155, 0, 0, 0, 99, 0, 153, 141, 176, 0, - 142, 152, 123, 168, 147, 175, 183, 184, 165, 182, - 191, 84, 164, 174, 97, 156, 86, 172, 162, 129, - 115, 116, 85, 0, 150, 102, 107, 101, 138, 169, - 170, 100, 194, 91, 181, 88, 92, 180, 136, 167, - 173, 130, 127, 87, 171, 128, 126, 118, 105, 112, - 144, 125, 145, 113, 133, 132, 134, 0, 0, 0, - 161, 178, 195, 94, 0, 157, 166, 185, 186, 187, - 188, 189, 190, 0, 0, 95, 108, 104, 143, 135, - 93, 114, 158, 117, 124, 149, 193, 140, 154, 98, + 0, 0, 0, 0, 0, 109, 0, 0, 0, 206, + 0, 0, 0, 0, 146, 0, 163, 111, 119, 83, + 90, 0, 110, 137, 151, 155, 0, 0, 0, 99, + 0, 153, 141, 176, 0, 142, 152, 123, 168, 147, + 175, 183, 184, 165, 182, 186, 157, 84, 164, 174, + 97, 156, 86, 172, 162, 129, 115, 116, 85, 0, + 150, 102, 107, 101, 138, 169, 170, 100, 189, 91, + 181, 88, 92, 180, 136, 167, 173, 130, 127, 87, + 171, 128, 126, 118, 105, 112, 144, 125, 145, 113, + 133, 132, 134, 0, 0, 0, 161, 178, 190, 94, + 0, 166, 185, 0, 0, 95, 108, 104, 143, 135, + 93, 114, 158, 117, 124, 149, 188, 140, 154, 98, 177, 159, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 139, - 0, 82, 89, 121, 192, 148, 106, 179, 103, 0, + 0, 82, 89, 121, 187, 148, 106, 179, 103, 0, 0, 0, 0, 0, 120, 0, 122, 0, 0, 160, 131, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 80, 0, - 528, 0, 0, 0, 0, 0, 0, 96, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 204, 0, + 846, 0, 0, 0, 0, 0, 0, 96, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 109, 0, 0, 0, 211, 0, 0, 0, + 0, 0, 109, 0, 0, 0, 206, 0, 0, 0, 0, 146, 0, 163, 111, 119, 83, 90, 0, 110, 137, 151, 155, 0, 0, 0, 99, 0, 153, 141, 176, 0, 142, 152, 123, 168, 147, 175, 183, 184, - 165, 182, 191, 84, 164, 174, 97, 156, 86, 172, - 162, 129, 115, 116, 85, 0, 150, 102, 107, 101, - 138, 169, 170, 100, 194, 91, 181, 88, 92, 180, - 136, 167, 173, 130, 127, 87, 171, 128, 126, 118, - 105, 112, 144, 125, 145, 113, 133, 132, 134, 0, - 0, 0, 161, 178, 195, 94, 0, 157, 166, 185, - 186, 187, 188, 189, 190, 0, 0, 95, 108, 104, - 143, 135, 93, 114, 158, 117, 124, 149, 193, 140, - 154, 98, 177, 159, 0, 0, 0, 0, 0, 0, + 165, 182, 186, 157, 84, 164, 174, 97, 156, 86, + 172, 162, 129, 115, 116, 85, 0, 150, 102, 107, + 101, 138, 169, 170, 100, 189, 91, 181, 88, 92, + 180, 136, 167, 173, 130, 127, 87, 171, 128, 126, + 118, 105, 112, 144, 125, 145, 113, 133, 132, 134, + 0, 0, 0, 161, 178, 190, 94, 0, 166, 185, + 0, 0, 95, 108, 104, 143, 135, 93, 114, 158, + 117, 124, 149, 188, 140, 154, 98, 177, 159, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 139, 82, 89, 121, 192, 148, 106, 179, - 610, 103, 0, 0, 0, 0, 0, 120, 0, 122, - 0, 0, 160, 131, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 139, 0, 82, 89, + 121, 187, 148, 106, 179, 103, 0, 0, 0, 0, + 0, 120, 0, 122, 0, 0, 160, 131, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 209, 0, 0, 0, 0, 0, 0, 0, 0, - 96, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 80, 0, 517, 0, 0, + 0, 0, 0, 0, 96, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 109, 0, 0, 0, 211, - 0, 0, 0, 0, 146, 0, 163, 111, 119, 83, - 90, 0, 110, 137, 151, 155, 0, 0, 0, 99, - 0, 153, 141, 176, 0, 142, 152, 123, 168, 147, - 175, 183, 184, 165, 182, 191, 84, 164, 174, 97, - 156, 86, 172, 162, 129, 115, 116, 85, 0, 150, - 102, 107, 101, 138, 169, 170, 100, 194, 91, 181, - 88, 92, 180, 136, 167, 173, 130, 127, 87, 171, - 128, 126, 118, 105, 112, 144, 125, 145, 113, 133, - 132, 134, 0, 0, 0, 161, 178, 195, 94, 0, - 157, 166, 185, 186, 187, 188, 189, 190, 0, 0, - 95, 108, 104, 143, 135, 93, 114, 158, 117, 124, - 149, 193, 140, 154, 98, 177, 159, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 329, 0, 0, - 0, 0, 0, 0, 139, 0, 82, 89, 121, 192, - 148, 106, 179, 103, 0, 0, 0, 0, 0, 120, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 109, + 0, 0, 0, 206, 0, 0, 0, 0, 146, 0, + 163, 111, 119, 83, 90, 0, 110, 137, 151, 155, + 0, 0, 0, 99, 0, 153, 141, 176, 0, 142, + 152, 123, 168, 147, 175, 183, 184, 165, 182, 186, + 157, 84, 164, 174, 97, 156, 86, 172, 162, 129, + 115, 116, 85, 0, 150, 102, 107, 101, 138, 169, + 170, 100, 189, 91, 181, 88, 92, 180, 136, 167, + 173, 130, 127, 87, 171, 128, 126, 118, 105, 112, + 144, 125, 145, 113, 133, 132, 134, 0, 0, 0, + 161, 178, 190, 94, 0, 166, 185, 0, 0, 95, + 108, 104, 143, 135, 93, 114, 158, 117, 124, 149, + 188, 140, 154, 98, 177, 159, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 139, 82, 89, 121, 187, 148, + 106, 179, 599, 103, 0, 0, 0, 0, 0, 120, 0, 122, 0, 0, 160, 131, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 209, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 204, 0, 0, 0, 0, 0, 0, 0, 0, 96, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 109, 0, 0, - 0, 211, 0, 0, 0, 0, 146, 0, 163, 111, + 0, 206, 0, 0, 0, 0, 146, 0, 163, 111, 119, 83, 90, 0, 110, 137, 151, 155, 0, 0, 0, 99, 0, 153, 141, 176, 0, 142, 152, 123, - 168, 147, 175, 183, 184, 165, 182, 191, 84, 164, - 174, 97, 156, 86, 172, 162, 129, 115, 116, 85, - 0, 150, 102, 107, 101, 138, 169, 170, 100, 194, - 91, 181, 88, 92, 180, 136, 167, 173, 130, 127, - 87, 171, 128, 126, 118, 105, 112, 144, 125, 145, - 113, 133, 132, 134, 0, 0, 0, 161, 178, 195, - 94, 0, 157, 166, 185, 186, 187, 188, 189, 190, - 0, 0, 95, 108, 104, 143, 135, 93, 114, 158, - 117, 124, 149, 193, 140, 154, 98, 177, 159, 0, + 168, 147, 175, 183, 184, 165, 182, 186, 157, 84, + 164, 174, 97, 156, 86, 172, 162, 129, 115, 116, + 85, 0, 150, 102, 107, 101, 138, 169, 170, 100, + 189, 91, 181, 88, 92, 180, 136, 167, 173, 130, + 127, 87, 171, 128, 126, 118, 105, 112, 144, 125, + 145, 113, 133, 132, 134, 0, 0, 0, 161, 178, + 190, 94, 0, 166, 185, 0, 0, 95, 108, 104, + 143, 135, 93, 114, 158, 117, 124, 149, 188, 140, + 154, 98, 177, 159, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 319, 0, 0, 0, 0, 0, + 0, 139, 0, 82, 89, 121, 187, 148, 106, 179, + 103, 0, 0, 0, 0, 0, 120, 0, 122, 0, + 0, 160, 131, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 139, 0, 82, 89, - 121, 192, 148, 106, 179, 103, 0, 0, 0, 0, - 0, 120, 0, 122, 0, 0, 160, 131, 0, 0, + 204, 0, 0, 0, 0, 0, 0, 0, 0, 96, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 209, 0, 0, 0, 0, - 0, 0, 0, 0, 96, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 109, - 0, 206, 0, 211, 0, 0, 0, 0, 146, 0, - 163, 111, 119, 83, 90, 0, 110, 137, 151, 155, - 0, 0, 0, 99, 0, 153, 141, 176, 0, 142, - 152, 123, 168, 147, 175, 183, 184, 165, 182, 191, - 84, 164, 174, 97, 156, 86, 172, 162, 129, 115, - 116, 85, 0, 150, 102, 107, 101, 138, 169, 170, - 100, 194, 91, 181, 88, 92, 180, 136, 167, 173, - 130, 127, 87, 171, 128, 126, 118, 105, 112, 144, - 125, 145, 113, 133, 132, 134, 0, 0, 0, 161, - 178, 195, 94, 0, 157, 166, 185, 186, 187, 188, - 189, 190, 0, 0, 95, 108, 104, 143, 135, 93, - 114, 158, 117, 124, 149, 193, 140, 154, 98, 177, + 0, 0, 0, 0, 109, 0, 0, 0, 206, 0, + 0, 0, 0, 146, 0, 163, 111, 119, 83, 90, + 0, 110, 137, 151, 155, 0, 0, 0, 99, 0, + 153, 141, 176, 0, 142, 152, 123, 168, 147, 175, + 183, 184, 165, 182, 186, 157, 84, 164, 174, 97, + 156, 86, 172, 162, 129, 115, 116, 85, 0, 150, + 102, 107, 101, 138, 169, 170, 100, 189, 91, 181, + 88, 92, 180, 136, 167, 173, 130, 127, 87, 171, + 128, 126, 118, 105, 112, 144, 125, 145, 113, 133, + 132, 134, 0, 0, 0, 161, 178, 190, 94, 0, + 166, 185, 0, 0, 95, 108, 104, 143, 135, 93, + 114, 158, 117, 124, 149, 188, 140, 154, 98, 177, 159, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 139, 0, - 82, 89, 121, 192, 148, 106, 179, 103, 0, 0, + 82, 89, 121, 187, 148, 106, 179, 103, 0, 0, 0, 0, 0, 120, 0, 122, 0, 0, 160, 131, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 80, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 204, 0, 0, 0, 0, 0, 0, 0, 0, 96, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 109, 0, 0, 0, 211, 0, 0, 0, 0, + 0, 109, 0, 201, 0, 206, 0, 0, 0, 0, 146, 0, 163, 111, 119, 83, 90, 0, 110, 137, 151, 155, 0, 0, 0, 99, 0, 153, 141, 176, 0, 142, 152, 123, 168, 147, 175, 183, 184, 165, - 182, 191, 84, 164, 174, 97, 156, 86, 172, 162, - 129, 115, 116, 85, 0, 150, 102, 107, 101, 138, - 169, 170, 100, 194, 91, 181, 88, 92, 180, 136, - 167, 173, 130, 127, 87, 171, 128, 126, 118, 105, - 112, 144, 125, 145, 113, 133, 132, 134, 0, 0, - 0, 161, 178, 195, 94, 0, 157, 166, 185, 186, - 187, 188, 189, 190, 0, 0, 95, 108, 104, 143, - 135, 93, 114, 158, 117, 124, 149, 193, 140, 154, - 98, 177, 159, 0, 0, 0, 0, 0, 0, 0, + 182, 186, 157, 84, 164, 174, 97, 156, 86, 172, + 162, 129, 115, 116, 85, 0, 150, 102, 107, 101, + 138, 169, 170, 100, 189, 91, 181, 88, 92, 180, + 136, 167, 173, 130, 127, 87, 171, 128, 126, 118, + 105, 112, 144, 125, 145, 113, 133, 132, 134, 0, + 0, 0, 161, 178, 190, 94, 0, 166, 185, 0, + 0, 95, 108, 104, 143, 135, 93, 114, 158, 117, + 124, 149, 188, 140, 154, 98, 177, 159, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 139, 0, 82, 89, 121, 192, 148, 106, 179, 103, - 0, 0, 0, 0, 0, 120, 0, 122, 0, 0, - 160, 131, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 209, - 0, 0, 0, 0, 0, 0, 0, 0, 96, 0, + 0, 0, 0, 0, 0, 139, 0, 82, 89, 121, + 187, 148, 106, 179, 103, 0, 0, 0, 0, 0, + 120, 0, 122, 0, 0, 160, 131, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 80, 0, 0, 0, 0, 0, + 0, 0, 0, 96, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 109, 0, 0, 0, 211, 0, 0, - 0, 0, 146, 0, 163, 111, 119, 83, 90, 0, - 110, 137, 151, 155, 0, 0, 0, 99, 0, 153, - 141, 176, 0, 142, 152, 123, 168, 147, 175, 183, - 184, 165, 182, 191, 84, 164, 174, 97, 156, 86, - 172, 162, 129, 115, 116, 85, 0, 150, 102, 107, - 101, 138, 169, 170, 100, 194, 91, 181, 88, 92, - 180, 136, 167, 173, 130, 127, 87, 171, 128, 126, - 118, 105, 112, 144, 125, 145, 113, 133, 132, 134, - 0, 0, 0, 161, 178, 195, 94, 0, 157, 166, - 185, 186, 187, 188, 189, 190, 0, 0, 95, 108, - 104, 143, 135, 93, 114, 158, 117, 124, 149, 193, + 0, 0, 0, 0, 0, 0, 0, 0, 109, 0, + 0, 0, 206, 0, 0, 0, 0, 146, 0, 163, + 111, 119, 83, 90, 0, 110, 137, 151, 155, 0, + 0, 0, 99, 0, 153, 141, 176, 0, 142, 152, + 123, 168, 147, 175, 183, 184, 165, 182, 186, 157, + 84, 164, 174, 97, 156, 86, 172, 162, 129, 115, + 116, 85, 0, 150, 102, 107, 101, 138, 169, 170, + 100, 189, 91, 181, 88, 92, 180, 136, 167, 173, + 130, 127, 87, 171, 128, 126, 118, 105, 112, 144, + 125, 145, 113, 133, 132, 134, 0, 0, 0, 161, + 178, 190, 94, 0, 166, 185, 0, 0, 95, 108, + 104, 143, 135, 93, 114, 158, 117, 124, 149, 188, 140, 154, 98, 177, 159, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 139, 0, 82, 89, 121, 192, 148, 106, + 0, 0, 139, 0, 82, 89, 121, 187, 148, 106, 179, 103, 0, 0, 0, 0, 0, 120, 0, 122, 0, 0, 160, 131, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 270, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 204, 0, 0, 0, 0, 0, 0, 0, 0, 96, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 109, 0, 0, 0, 211, + 0, 0, 0, 0, 0, 109, 0, 0, 0, 206, 0, 0, 0, 0, 146, 0, 163, 111, 119, 83, 90, 0, 110, 137, 151, 155, 0, 0, 0, 99, 0, 153, 141, 176, 0, 142, 152, 123, 168, 147, - 175, 183, 184, 165, 182, 191, 84, 164, 174, 97, - 156, 86, 172, 162, 129, 115, 116, 85, 0, 150, - 102, 107, 101, 138, 169, 170, 100, 194, 91, 181, - 88, 92, 180, 136, 167, 173, 130, 127, 87, 171, - 128, 126, 118, 105, 112, 144, 125, 145, 113, 133, - 132, 134, 0, 0, 0, 161, 178, 195, 94, 0, - 157, 166, 185, 186, 187, 188, 189, 190, 0, 0, - 95, 108, 104, 143, 135, 93, 114, 158, 117, 124, - 149, 193, 140, 154, 98, 177, 159, 0, 0, 0, + 175, 183, 184, 165, 182, 186, 157, 84, 164, 174, + 97, 156, 86, 172, 162, 129, 115, 116, 85, 0, + 150, 102, 107, 101, 138, 169, 170, 100, 189, 91, + 181, 88, 92, 180, 136, 167, 173, 130, 127, 87, + 171, 128, 126, 118, 105, 112, 144, 125, 145, 113, + 133, 132, 134, 0, 0, 0, 161, 178, 190, 94, + 0, 166, 185, 0, 0, 95, 108, 104, 143, 135, + 93, 114, 158, 117, 124, 149, 188, 140, 154, 98, + 177, 159, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 139, + 0, 82, 89, 121, 187, 148, 106, 179, 103, 0, + 0, 0, 0, 0, 120, 0, 122, 0, 0, 160, + 131, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 260, 0, + 0, 0, 0, 0, 0, 0, 0, 96, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 109, 0, 0, 0, 206, 0, 0, 0, + 0, 146, 655, 163, 111, 119, 83, 90, 0, 110, + 137, 151, 155, 0, 0, 0, 99, 0, 153, 141, + 176, 0, 142, 152, 123, 168, 147, 175, 183, 184, + 165, 182, 186, 157, 84, 164, 174, 97, 156, 86, + 172, 162, 129, 115, 116, 85, 0, 150, 102, 107, + 101, 138, 169, 170, 100, 189, 91, 181, 88, 92, + 180, 136, 167, 173, 130, 127, 87, 171, 128, 126, + 118, 105, 112, 144, 125, 145, 113, 133, 132, 134, + 643, 0, 0, 161, 178, 190, 94, 0, 166, 185, + 0, 0, 95, 108, 104, 143, 135, 93, 114, 158, + 117, 124, 149, 188, 140, 154, 98, 177, 159, 0, + 0, 0, 0, 0, 0, 0, 0, 656, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 82, 89, + 121, 187, 148, 106, 179, 0, 0, 0, 0, 0, + 669, 672, 673, 674, 675, 676, 677, 0, 678, 679, + 680, 681, 682, 657, 658, 659, 660, 641, 642, 670, + 0, 644, 0, 645, 646, 647, 648, 649, 650, 651, + 652, 653, 654, 661, 662, 663, 664, 665, 666, 667, + 668, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 82, 89, 121, 192, - 148, 106, 179, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 671, } var yyPact = [...]int{ - 1851, -1000, -192, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + 1469, -1000, -179, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, 925, 971, -1000, -1000, -1000, -1000, -1000, -1000, + 353, 8963, -1, 136, -6, 11580, 135, 1582, 12054, -1000, + -9, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -80, -92, + -1000, 715, -1000, -1000, -1000, -1000, -1000, 920, 923, 761, + 911, 818, -1000, 6511, 101, 101, 11343, 5773, -1000, -1000, + 352, 12054, 129, 12054, -145, 91, 91, 91, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, 937, 993, -1000, -1000, -1000, -1000, -1000, -1000, - 279, 8785, 41, 131, -10, 11708, 130, 1524, 12192, -1000, - 8, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -60, -70, - -1000, 716, -1000, -1000, -1000, -1000, -1000, 925, 948, 787, - 936, 820, -1000, 6534, 109, 109, 11466, 5530, -1000, -1000, - 256, 12192, 127, 12192, -145, 104, 104, 104, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, @@ -2009,23 +1995,22 @@ var yyPact = [...]int{ -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, 133, 12054, 221, -1000, 12054, 82, 570, 82, 82, + 82, 12054, -1000, 187, -1000, -1000, -1000, 12054, 563, 868, + 3442, 50, 3442, -1000, 3442, 3442, -1000, 3442, 1, 3442, + -70, 936, -22, -1000, 3442, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 518, 880, + 7496, 7496, 925, -1000, 715, -1000, -1000, -1000, 849, -1000, + -1000, 397, 958, -1000, 8726, 185, -1000, 7496, 1769, 695, + -1000, -1000, 695, -1000, -1000, 148, -1000, -1000, 8234, 8234, + 8234, 8234, 8234, 8234, 8234, 8234, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, 129, 12192, 237, -1000, - 12192, 92, 569, 92, 92, 92, 12192, -1000, 178, -1000, - -1000, -1000, 12192, 553, 853, 3154, 53, 3154, 3154, -1000, - 3154, 3154, -1000, 3154, 15, 3154, -62, 969, -1000, -1000, - -1000, -1000, -15, -1000, 3154, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 540, 860, - 7539, 7539, 937, -1000, 716, -1000, -1000, -1000, 854, -1000, - -1000, 322, 979, -1000, 2361, 170, -1000, 7539, 2156, 675, - -1000, -1000, 675, -1000, -1000, 147, -1000, -1000, 8292, 8292, - 8292, 8292, 8292, 8292, 8292, 8292, -1000, -1000, -1000, -1000, + 695, -1000, 7250, 695, 695, 695, 695, 695, 695, 695, + 695, 7496, 695, 695, 695, 695, 695, 695, 695, 695, + 695, 695, 695, 695, 695, 695, 695, 11106, 10394, 12054, + 680, 677, -1000, -1000, 180, 709, 5514, -109, -1000, -1000, + -1000, 336, 10157, -1000, -1000, -1000, 847, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - 675, -1000, 7288, 675, 675, 675, 675, 675, 675, 675, - 675, 7539, 675, 675, 675, 675, 675, 675, 675, 675, - 675, 675, 675, 675, 675, 675, 675, 11224, 10497, 12192, - 628, 620, -1000, -1000, 168, 707, 5266, -92, -1000, -1000, - -1000, 281, 10255, -1000, -1000, -1000, 852, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, @@ -2034,130 +2019,130 @@ var yyPact = [...]int{ -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, 646, 12054, -1000, 12392, -1000, 558, 3442, + 107, 550, 365, 531, 12054, 12054, 3442, 5, 66, 35, + 12054, 716, 105, 12054, 897, 791, 12054, 529, 527, -1000, + 5255, -1000, 3442, 3442, -1000, -1000, -1000, 3442, 3442, 3442, + 12054, 3442, 3442, -1000, -1000, -1000, -1000, 3442, 3442, -1000, + 957, 323, -1000, -1000, -1000, -1000, 7496, -1000, 790, -1000, + -1000, -1000, -1000, -1000, -1000, 966, 223, 517, 175, 713, + -1000, 498, 920, 518, 818, 9920, 801, -1000, -1000, 12054, + -1000, 7496, 7496, 416, -1000, 10868, -1000, -1000, 4219, 236, + 8234, 423, 305, 8234, 8234, 8234, 8234, 8234, 8234, 8234, + 8234, 8234, 8234, 8234, 8234, 8234, 8234, 8234, 450, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, 519, -1000, 715, + 694, 694, 195, 195, 195, 195, 195, 195, 195, 8480, + 6019, 518, 628, 344, 7250, 6511, 6511, 7496, 7496, 7003, + 6757, 6511, 915, 360, 344, 12291, -1000, -1000, 7988, -1000, + -1000, -1000, -1000, -1000, 518, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, 11817, 11817, 6511, 6511, 6511, 6511, 41, 12054, + -1000, 696, 731, -1000, -1000, -1000, 900, 9437, 9683, 41, + 586, 10394, 12054, -1000, -1000, 10394, 12054, 3960, 4996, 709, + -109, 673, -1000, -107, -122, 2624, 189, -1000, -1000, -1000, + -1000, 3183, 351, 579, 382, -74, -1000, -1000, -1000, 729, + -1000, 729, 729, 729, 729, -42, -42, -42, -42, -1000, + -1000, -1000, -1000, -1000, 769, 764, -1000, 729, 729, 729, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, 607, 12192, -1000, 2022, -1000, 531, 3154, - 116, 529, 297, 524, 12192, 12192, 3154, 43, 70, 115, - 12192, 711, 114, 12192, 902, 782, 12192, 520, 518, -1000, - 5002, -1000, 3154, 3154, -1000, -1000, -1000, 3154, 3154, 3154, - 12192, 3154, 3154, -1000, -1000, -1000, -1000, -1000, 3154, 3154, - -1000, 978, 349, -1000, -1000, -1000, -1000, 7539, -1000, 781, - -1000, -1000, -1000, -1000, -1000, -1000, 988, 208, 536, 166, - 708, -1000, 422, 925, 540, 820, 10013, 792, -1000, -1000, - 12192, -1000, 7539, 7539, 428, -1000, 10981, -1000, -1000, 3946, - 259, 8292, 388, 368, 8292, 8292, 8292, 8292, 8292, 8292, - 8292, 8292, 8292, 8292, 8292, 8292, 8292, 8292, 8292, 453, - -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 515, -1000, - 716, 724, 724, 202, 202, 202, 202, 202, 202, 202, - 8543, 6032, 540, 582, 381, 7288, 6534, 6534, 7539, 7539, - 7036, 6785, 6534, 914, 287, 381, 12434, -1000, -1000, 8041, - -1000, -1000, -1000, -1000, -1000, 540, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, 11950, 11950, 6534, 6534, 6534, 6534, 59, - 12192, -1000, 657, 923, -1000, -1000, -1000, 912, 9520, 9771, - 59, 663, 10497, 12192, -1000, -1000, 10497, 12192, 3682, 4738, - 707, -92, 669, -1000, -89, -103, 5781, 187, -1000, -1000, - -1000, -1000, 2890, 415, 600, 318, -51, -1000, -1000, -1000, - 715, -1000, 715, 715, 715, 715, -20, -20, -20, -20, - -1000, -1000, -1000, -1000, -1000, 743, 741, -1000, 715, 715, - 715, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 740, - 740, 740, 722, 722, 748, -1000, 12192, 3154, 896, 3154, - -1000, 79, -1000, 11950, 11950, 12192, 12192, 138, 12192, 12192, - 699, -1000, 12192, 3154, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 12192, - 300, 12192, 12192, 381, 12192, -1000, 830, 7539, 7539, 4474, - 7539, -1000, -1000, -1000, 860, -1000, 914, 935, -1000, 842, - 839, 6534, -1000, -1000, 259, 267, -1000, -1000, 407, -1000, - -1000, -1000, -1000, 159, 675, -1000, 2171, -1000, -1000, -1000, - -1000, 388, 8292, 8292, 8292, 87, 2171, 2053, 1986, 709, - 202, 203, 203, 191, 191, 191, 191, 191, 462, 462, - -1000, -1000, -1000, 540, -1000, -1000, -1000, 540, 6534, 693, - -1000, -1000, 7539, -1000, 540, 566, 566, 396, 443, 248, - 977, 566, 245, 976, 566, 566, 6534, 288, -1000, 7539, - 540, -1000, 156, -1000, 643, 679, 671, 566, 540, 566, - 566, 630, 675, -1000, 12434, 10497, 10497, 10497, 10497, 10497, - -1000, 816, 810, -1000, 803, 802, 812, 12192, -1000, 579, - 9520, 193, 675, -1000, 10739, -1000, -1000, 968, 10497, 646, - -1000, 646, -1000, 155, -1000, -1000, 669, -92, -83, -1000, - -1000, -1000, -1000, 381, -1000, 469, 665, 2626, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, 727, 513, -1000, 880, 218, - 217, 501, 875, -1000, -1000, -1000, 856, -1000, 304, -55, - -1000, -1000, 431, -20, -20, -1000, -1000, 187, 847, 187, - 187, 187, 455, 455, -1000, -1000, -1000, -1000, 424, -1000, - -1000, -1000, 406, -1000, 780, 11950, 3154, -1000, -1000, -1000, - -1000, 251, 251, 201, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, 55, 746, -1000, -1000, -1000, - 35, 20, 113, -1000, 3154, -1000, 349, -1000, 454, 7539, - -1000, -1000, -1000, 828, 381, 381, 152, -1000, -1000, 12192, - -1000, -1000, -1000, -1000, 704, -1000, -1000, -1000, 3418, 6534, - -1000, 87, 2171, 1947, -1000, 8292, 8292, -1000, -1000, 566, - 6534, 381, -1000, -1000, -1000, 103, 453, 103, 8292, 8292, - -1000, 8292, 8292, -1000, -158, 676, 283, -1000, 7539, 414, - -1000, 4474, -1000, 8292, 8292, -1000, -1000, -1000, -1000, 779, - 12434, 675, -1000, 9278, 11950, 670, -1000, 271, 923, 736, - 778, 775, -1000, -1000, -1000, -1000, 801, -1000, 800, -1000, - -1000, -1000, -1000, -1000, 126, 125, 118, 11950, -1000, 937, - 7539, 646, -1000, -1000, 212, -1000, -1000, -120, -109, -1000, - -1000, -1000, 2890, -1000, 2890, 11950, 76, -1000, 501, 501, - -1000, -1000, -1000, 725, 777, 8292, -1000, -1000, -1000, 595, - 187, 187, -1000, 236, -1000, -1000, -1000, 562, -1000, 560, - 662, 557, 12192, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 12192, - -1000, -1000, -1000, -1000, -1000, 11950, -169, 492, 11950, 11950, - 12192, -1000, 300, -1000, 381, -1000, 4210, -1000, 968, 10497, - -1000, -1000, 540, -1000, 8292, 2171, 2171, -1000, -1000, 540, - 715, 715, -1000, 715, 722, -1000, 715, -1, 715, -2, - 540, 540, 2038, 1908, 1821, 1622, 675, -153, -1000, 381, - 7539, -1000, 1489, 1365, -1000, 885, 634, 638, -1000, -1000, - 6283, 540, 551, 151, 546, -1000, 937, 12434, 7539, -1000, - -1000, 7539, 721, -1000, 7539, -1000, -1000, -1000, 675, 675, - 675, 546, 925, 381, -1000, -1000, -1000, -1000, 2626, -1000, - 544, -1000, 715, -1000, -1000, -1000, 11950, -47, 987, 2171, - -1000, -1000, -1000, -1000, -1000, -20, 445, -20, 402, -1000, - 335, 3154, -1000, -1000, -1000, -1000, 889, -1000, 4210, -1000, - -1000, 713, -1000, -1000, -1000, 962, 656, -1000, 2171, -1000, - -1000, 112, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - 8292, 8292, 8292, 8292, 8292, 540, 441, 381, 8292, 8292, - 874, -1000, 675, -1000, -1000, 674, 11950, 11950, -1000, 11950, - 925, -1000, 381, 381, 11950, 381, 11950, 11950, 11950, 9036, - -1000, 162, 11950, -1000, 539, -1000, 209, -1000, -162, 187, - -1000, 187, 592, 586, -1000, 675, 648, -1000, 269, 11950, - 960, 947, -1000, -1000, 643, 643, 643, 643, 26, -1000, - -1000, 643, 643, 984, -1000, 675, -1000, 716, 145, -1000, - -1000, -1000, 500, 487, 487, 487, 193, 162, -1000, 481, - 263, 433, -1000, 72, 11950, 308, 873, -1000, 859, -1000, - -1000, -1000, -1000, -1000, 54, 4210, 2890, 480, -1000, 7539, - 7539, -1000, -1000, -1000, -1000, 540, 60, -174, -1000, -1000, - 12434, 638, 540, 11950, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, 331, -1000, -1000, 12192, -1000, -1000, 389, -1000, -1000, - 467, -1000, 11950, -1000, -1000, 746, 381, 633, -1000, 825, - -165, -187, 610, -1000, -1000, -1000, 710, -1000, -1000, 54, - 835, -169, -1000, 824, -1000, 11950, -1000, 51, -1000, -170, - 463, 49, -176, 773, 675, -188, 770, -1000, 973, 7790, - -1000, -1000, 983, 195, 195, 643, 540, -1000, -1000, -1000, - 82, 371, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 759, 759, + 759, 739, 739, 778, -1000, 12054, 3442, 895, 3442, -1000, + 83, -1000, 11817, 11817, 12054, 12054, 12054, 143, 12054, 12054, + 708, -1000, 12054, 3442, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 12054, + 386, 12054, 12054, 344, 12054, -1000, 831, 7496, 7496, 4737, + 7496, -1000, -1000, -1000, 880, -1000, 915, 924, -1000, 841, + 838, 6511, -1000, -1000, 236, 338, -1000, -1000, 390, -1000, + -1000, -1000, -1000, 166, 695, -1000, 1952, -1000, -1000, -1000, + -1000, 423, 8234, 8234, 8234, 145, 1952, 1935, 1071, 1413, + 195, 451, 451, 197, 197, 197, 197, 197, 395, 395, + -1000, -1000, -1000, 518, -1000, -1000, -1000, 518, 6511, 679, + -1000, -1000, 7496, -1000, 518, 615, 615, 376, 364, 271, + 941, 615, 269, 939, 615, 615, 6511, 350, -1000, 7496, + 518, -1000, 163, -1000, 704, 676, 674, 615, 518, 615, + 615, 780, 695, -1000, 12291, 10394, 10394, 10394, 10394, 10394, + -1000, 815, 812, -1000, 805, 804, 811, 12054, -1000, 619, + 9437, 171, 695, -1000, 10631, -1000, -1000, 935, 10394, 660, + -1000, 660, -1000, 156, -1000, -1000, 673, -109, -128, -1000, + -1000, -1000, -1000, 344, -1000, 488, 672, 2924, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, 755, 512, -1000, 884, 229, + 226, 510, 881, -1000, -1000, -1000, 850, -1000, 373, -76, + -1000, -1000, 455, -42, -42, -1000, -1000, 189, 846, 189, + 189, 189, 483, 483, -1000, -1000, -1000, -1000, 454, -1000, + -1000, -1000, 453, -1000, 787, 11817, 3442, -1000, -1000, -1000, + -1000, 179, 179, 202, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, 39, 771, -1000, -1000, -1000, + -1000, -16, 0, 103, -1000, 3442, -1000, 323, -1000, 482, + 7496, -1000, -1000, -1000, 827, 344, 344, 155, -1000, -1000, + 12054, -1000, -1000, -1000, -1000, 703, -1000, -1000, -1000, 3701, + 6511, -1000, 145, 1952, 1654, -1000, 8234, 8234, -1000, -1000, + 615, 6511, 344, -1000, -1000, -1000, 383, 450, 383, 8234, + 8234, -1000, 8234, 8234, -1000, -158, 692, 270, -1000, 7496, + 250, -1000, 4737, -1000, 8234, 8234, -1000, -1000, -1000, -1000, + 785, 12291, 695, -1000, 9200, 11817, 664, -1000, 311, 731, + 768, 784, 914, -1000, -1000, -1000, -1000, 808, -1000, 806, + -1000, -1000, -1000, -1000, -1000, 127, 126, 113, 11817, -1000, + 925, 7496, 660, -1000, -1000, 208, -1000, -1000, -117, -131, + -1000, -1000, -1000, 3183, -1000, 3183, 11817, 71, -1000, 510, + 510, -1000, -1000, -1000, 742, 783, 8234, -1000, -1000, -1000, + 569, 189, 189, -1000, 283, -1000, -1000, -1000, 603, -1000, + 601, 669, 599, 12054, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + 12054, -1000, -1000, -1000, -1000, -1000, 11817, -167, 508, 11817, + 11817, 11817, 12054, -1000, 386, -1000, 344, -1000, 4478, -1000, + 935, 10394, -1000, -1000, 518, -1000, 8234, 1952, 1952, -1000, + -1000, 518, 729, 729, -1000, 729, 739, -1000, 729, -18, + 729, -19, 518, 518, 1903, 1868, 1839, 1576, 695, -152, + -1000, 344, 7496, -1000, 1547, 1442, -1000, 888, 654, 655, + -1000, -1000, 6265, 518, 595, 154, 578, -1000, 925, 12291, + 7496, -1000, -1000, 7496, 736, -1000, 7496, -1000, -1000, -1000, + 695, 695, 695, 578, 920, 344, -1000, -1000, -1000, -1000, + 2924, -1000, 575, -1000, 729, -1000, -1000, -1000, 11817, -65, + 965, 1952, -1000, -1000, -1000, -1000, -1000, -42, 473, -42, + 444, -1000, 422, 3442, -1000, -1000, -1000, -1000, 882, -1000, + 4478, -1000, -1000, 728, 772, -1000, -1000, -1000, 932, 662, + -1000, 1952, -1000, -1000, 96, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, 8234, 8234, 8234, 8234, 8234, 518, 467, + 344, 8234, 8234, 879, -1000, 695, -1000, -1000, 712, 11817, + 11817, -1000, 11817, 920, -1000, 344, 344, 11817, 344, 11817, + 11817, 11817, 2347, -1000, 161, 11817, -1000, 568, -1000, 184, + -1000, -111, 189, -1000, 189, 549, 543, -1000, 695, 658, + -1000, 263, 11817, 12054, 930, 922, -1000, -1000, 704, 704, + 704, 704, 84, -1000, -1000, 704, 704, 964, -1000, 695, + -1000, 715, 153, -1000, -1000, -1000, 548, 542, 542, 542, + 171, 161, -1000, 491, 260, 461, -1000, 58, 11817, 378, + 875, -1000, 873, -1000, -1000, -1000, -1000, -1000, 16, 4478, + 3183, 536, -1000, -1000, 7496, 7496, -1000, -1000, -1000, -1000, + 518, 47, -171, -1000, -1000, 12291, 655, 518, 11817, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, 414, -1000, -1000, 12054, + -1000, -1000, 396, -1000, -1000, 525, -1000, 11817, -1000, -1000, + 771, 344, 606, -1000, 826, -165, -174, 590, -1000, -1000, + -1000, 727, -1000, -1000, 16, 837, -167, -1000, 821, -1000, + 11817, -1000, 12, -1000, -169, 501, 9, -172, 781, 695, + -176, 770, -1000, 955, 7742, -1000, -1000, 962, 196, 196, + 704, 518, -1000, -1000, -1000, 69, 437, -1000, -1000, -1000, + -1000, -1000, -1000, } var yyPgo = [...]int{ - 0, 1213, 21, 488, 1211, 1210, 1209, 1206, 1203, 1201, - 1197, 1196, 1194, 1193, 1192, 1191, 1190, 1188, 1186, 1184, - 1183, 1180, 1179, 1178, 1176, 1175, 111, 1174, 1173, 1170, - 75, 1168, 71, 1167, 1166, 44, 63, 61, 42, 930, - 1165, 47, 50, 91, 1148, 35, 1147, 1144, 82, 1143, - 1142, 45, 1141, 1140, 1367, 1139, 62, 1138, 14, 56, - 1137, 1134, 1133, 1132, 70, 614, 1128, 1126, 13, 1125, - 1123, 78, 1118, 54, 8, 16, 18, 25, 1117, 247, - 11, 1113, 53, 1112, 1111, 1109, 1107, 31, 1105, 55, - 1104, 23, 57, 1103, 7, 64, 29, 20, 9, 93, - 60, 1102, 24, 65, 49, 1101, 1097, 440, 1096, 1094, - 46, 1092, 1091, 41, 173, 507, 1090, 1088, 1080, 1079, - 38, 0, 431, 317, 76, 1078, 1076, 1075, 1326, 37, - 52, 17, 1073, 103, 51, 34, 1072, 1070, 40, 1069, - 1068, 1064, 1062, 1060, 1059, 1058, 102, 1056, 1054, 1053, - 19, 33, 1052, 1051, 69, 27, 1047, 1046, 1044, 48, - 58, 1043, 1042, 59, 30, 1039, 1028, 1023, 1022, 1021, - 26, 6, 1020, 15, 1017, 12, 1016, 28, 1015, 4, - 1014, 10, 1013, 3, 1012, 5, 43, 1, 1011, 2, - 1005, 1001, 100, 179, 66, 999, 117, + 0, 1212, 45, 519, 1209, 1208, 1206, 1200, 1198, 1197, + 1192, 1189, 1188, 1187, 1186, 1185, 1182, 1181, 1180, 1179, + 1160, 1158, 1157, 1152, 1150, 1149, 142, 1148, 1147, 1145, + 76, 1143, 70, 1142, 1141, 48, 413, 52, 44, 953, + 1140, 24, 71, 66, 1139, 38, 1138, 1137, 82, 1136, + 1134, 55, 1132, 1131, 92, 1130, 73, 1128, 12, 30, + 1127, 1126, 1125, 1124, 77, 467, 1123, 1118, 15, 1117, + 1116, 95, 1115, 57, 10, 14, 22, 27, 1114, 100, + 7, 1109, 56, 1108, 1106, 1104, 1103, 21, 1102, 54, + 1100, 18, 59, 1086, 9, 75, 31, 25, 8, 87, + 60, 1083, 26, 63, 47, 1082, 1081, 452, 1080, 1079, + 58, 1078, 1077, 29, 148, 434, 1075, 1072, 1071, 1068, + 42, 0, 874, 432, 69, 1067, 1066, 1065, 1629, 40, + 50, 17, 1063, 33, 174, 43, 1059, 1058, 41, 1055, + 1052, 1050, 1049, 1048, 1044, 1042, 20, 1041, 1040, 1038, + 19, 23, 1037, 1036, 64, 28, 1034, 1033, 1032, 51, + 65, 1029, 1028, 53, 37, 1027, 1025, 1024, 1022, 1017, + 34, 13, 1014, 16, 1013, 4, 1012, 35, 1009, 5, + 990, 11, 989, 2, 987, 6, 49, 3, 986, 1, + 982, 981, 61, 601, 83, 977, 115, } var yyR1 = [...]int{ @@ -2183,49 +2168,48 @@ var yyR1 = [...]int{ 174, 174, 173, 162, 162, 177, 177, 177, 177, 188, 189, 187, 187, 187, 187, 187, 169, 169, 169, 170, 170, 170, 171, 171, 171, 12, 12, 12, 12, 12, - 12, 12, 12, 12, 12, 12, 12, 12, 186, 186, - 186, 186, 186, 186, 186, 186, 186, 186, 186, 180, - 178, 178, 179, 179, 13, 18, 18, 14, 14, 14, - 14, 14, 15, 15, 19, 20, 20, 20, 20, 20, + 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, + 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, + 186, 180, 178, 178, 179, 179, 13, 18, 18, 14, + 14, 14, 14, 14, 15, 15, 19, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, - 20, 20, 20, 20, 20, 20, 20, 111, 111, 109, - 109, 112, 112, 110, 110, 110, 113, 113, 113, 137, - 137, 137, 21, 21, 23, 23, 24, 25, 22, 22, - 22, 22, 22, 22, 22, 16, 195, 26, 27, 27, - 28, 28, 28, 32, 32, 32, 30, 30, 31, 31, - 37, 37, 36, 36, 38, 38, 38, 38, 125, 125, - 125, 124, 124, 40, 40, 41, 41, 42, 42, 43, - 43, 43, 43, 57, 57, 94, 94, 96, 96, 44, - 44, 44, 44, 45, 45, 46, 46, 47, 47, 132, - 132, 131, 131, 131, 130, 130, 50, 50, 50, 52, - 51, 51, 51, 51, 53, 53, 55, 55, 54, 54, - 56, 58, 58, 58, 58, 59, 59, 39, 39, 39, - 39, 39, 39, 39, 108, 108, 61, 61, 60, 60, - 60, 60, 60, 60, 60, 60, 60, 60, 72, 72, - 72, 72, 72, 72, 62, 62, 62, 62, 62, 62, - 62, 35, 35, 73, 73, 73, 79, 74, 74, 65, - 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, + 20, 20, 20, 20, 111, 111, 109, 109, 112, 112, + 110, 110, 110, 113, 113, 113, 137, 137, 137, 21, + 21, 23, 23, 24, 25, 22, 22, 22, 22, 22, + 22, 22, 16, 195, 26, 27, 27, 28, 28, 28, + 32, 32, 32, 30, 30, 31, 31, 37, 37, 36, + 36, 38, 38, 38, 38, 125, 125, 125, 124, 124, + 40, 40, 41, 41, 42, 42, 43, 43, 43, 43, + 57, 57, 94, 94, 96, 96, 44, 44, 44, 44, + 45, 45, 46, 46, 47, 47, 132, 132, 131, 131, + 131, 130, 130, 50, 50, 50, 52, 51, 51, 51, + 51, 53, 53, 55, 55, 54, 54, 56, 58, 58, + 58, 58, 59, 59, 39, 39, 39, 39, 39, 39, + 39, 108, 108, 61, 61, 60, 60, 60, 60, 60, + 60, 60, 60, 60, 60, 72, 72, 72, 72, 72, + 72, 62, 62, 62, 62, 62, 62, 62, 35, 35, + 73, 73, 73, 79, 74, 74, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, - 65, 69, 69, 69, 67, 67, 67, 67, 67, 67, - 67, 67, 67, 67, 67, 67, 67, 68, 68, 68, + 65, 65, 65, 65, 65, 65, 65, 65, 69, 69, + 69, 67, 67, 67, 67, 67, 67, 67, 67, 67, + 67, 67, 67, 67, 68, 68, 68, 68, 68, 68, 68, 68, 68, 68, 68, 68, 68, 68, 68, 68, - 68, 68, 68, 196, 196, 71, 70, 70, 70, 70, - 70, 70, 33, 33, 33, 33, 33, 135, 135, 138, - 138, 138, 138, 138, 138, 138, 138, 138, 138, 138, - 138, 138, 83, 83, 34, 34, 81, 81, 82, 84, - 84, 80, 80, 80, 64, 64, 64, 64, 64, 64, - 64, 64, 66, 66, 66, 85, 85, 86, 86, 87, - 87, 88, 88, 89, 90, 90, 90, 91, 91, 91, - 91, 92, 92, 92, 63, 63, 63, 63, 63, 63, - 93, 93, 93, 93, 97, 97, 75, 75, 77, 77, - 76, 78, 98, 98, 102, 99, 99, 103, 103, 103, - 103, 101, 101, 101, 127, 127, 127, 106, 106, 114, - 114, 115, 115, 107, 107, 116, 116, 116, 116, 116, - 116, 116, 116, 116, 116, 117, 117, 117, 118, 118, - 119, 119, 119, 126, 126, 122, 122, 123, 123, 128, - 128, 129, 129, 120, 120, 120, 120, 120, 120, 120, + 196, 196, 71, 70, 70, 70, 70, 70, 70, 33, + 33, 33, 33, 33, 135, 135, 138, 138, 138, 138, + 138, 138, 138, 138, 138, 138, 138, 138, 138, 83, + 83, 34, 34, 81, 81, 82, 84, 84, 80, 80, + 80, 64, 64, 64, 64, 64, 64, 64, 64, 66, + 66, 66, 85, 85, 86, 86, 87, 87, 88, 88, + 89, 90, 90, 90, 91, 91, 91, 91, 92, 92, + 92, 63, 63, 63, 63, 63, 63, 93, 93, 93, + 93, 97, 97, 75, 75, 77, 77, 76, 78, 98, + 98, 102, 99, 99, 103, 103, 103, 103, 101, 101, + 101, 127, 127, 127, 106, 106, 114, 114, 115, 115, + 107, 107, 116, 116, 116, 116, 116, 116, 116, 116, + 116, 116, 117, 117, 117, 118, 118, 119, 119, 119, + 126, 126, 122, 122, 123, 123, 128, 128, 129, 129, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, @@ -2234,8 +2218,8 @@ var yyR1 = [...]int{ 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, - 120, 120, 120, 120, 120, 120, 120, 120, 121, 121, - 121, 121, 121, 121, 121, 121, 121, 121, 121, 121, + 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, + 120, 120, 120, 120, 120, 121, 121, 121, 121, 121, 121, 121, 121, 121, 121, 121, 121, 121, 121, 121, 121, 121, 121, 121, 121, 121, 121, 121, 121, 121, 121, 121, 121, 121, 121, 121, 121, 121, 121, 121, @@ -2246,7 +2230,8 @@ var yyR1 = [...]int{ 121, 121, 121, 121, 121, 121, 121, 121, 121, 121, 121, 121, 121, 121, 121, 121, 121, 121, 121, 121, 121, 121, 121, 121, 121, 121, 121, 121, 121, 121, - 121, 121, 121, 192, 193, 133, 134, 134, 134, + 121, 121, 121, 121, 121, 192, 193, 133, 134, 134, + 134, } var yyR2 = [...]int{ @@ -2272,49 +2257,48 @@ var yyR2 = [...]int{ 1, 3, 2, 3, 1, 10, 11, 11, 12, 3, 3, 1, 1, 2, 2, 2, 0, 1, 3, 1, 2, 3, 1, 1, 1, 6, 7, 7, 7, 7, - 4, 5, 7, 5, 5, 5, 12, 7, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 7, - 1, 3, 8, 8, 3, 3, 5, 4, 6, 5, - 4, 4, 3, 2, 3, 4, 4, 3, 4, 4, - 4, 4, 4, 4, 3, 3, 2, 3, 3, 2, - 3, 4, 3, 7, 5, 4, 2, 4, 2, 2, - 2, 2, 3, 3, 5, 2, 3, 1, 1, 0, - 1, 1, 1, 0, 2, 2, 0, 2, 2, 0, - 1, 1, 2, 1, 1, 2, 1, 1, 2, 2, - 2, 2, 2, 3, 3, 2, 0, 2, 0, 2, - 1, 2, 2, 0, 1, 1, 0, 1, 0, 1, - 0, 1, 1, 3, 1, 2, 3, 5, 0, 1, - 2, 1, 1, 0, 2, 1, 3, 1, 1, 1, - 3, 1, 3, 3, 7, 1, 3, 1, 3, 4, - 4, 4, 3, 2, 4, 0, 1, 0, 2, 0, - 1, 0, 1, 2, 1, 1, 1, 2, 2, 1, - 2, 3, 2, 3, 2, 2, 2, 1, 1, 3, - 3, 0, 5, 5, 5, 0, 2, 1, 3, 3, - 2, 3, 1, 2, 0, 3, 1, 1, 3, 3, - 4, 4, 5, 3, 4, 5, 6, 2, 1, 2, - 1, 2, 1, 2, 1, 1, 1, 1, 1, 1, - 1, 0, 2, 1, 1, 1, 3, 1, 3, 1, - 1, 1, 1, 1, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, - 2, 2, 2, 2, 2, 2, 3, 1, 1, 1, - 1, 4, 5, 6, 4, 4, 6, 6, 6, 8, - 8, 8, 8, 9, 7, 5, 4, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 8, 8, 0, 2, 3, 4, 4, 4, 4, - 4, 4, 0, 3, 4, 7, 3, 1, 1, 2, - 3, 3, 1, 2, 2, 1, 2, 1, 2, 2, - 1, 2, 0, 1, 0, 2, 1, 2, 4, 0, - 2, 1, 3, 5, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 2, 2, 0, 3, 0, 2, 0, - 3, 1, 3, 2, 0, 1, 1, 0, 2, 4, - 4, 0, 2, 4, 2, 1, 3, 5, 4, 6, - 1, 3, 3, 5, 0, 5, 1, 3, 1, 2, - 3, 1, 1, 3, 3, 1, 3, 3, 3, 3, - 3, 1, 2, 1, 1, 1, 1, 1, 1, 0, - 2, 0, 3, 0, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, - 0, 1, 1, 0, 2, 1, 1, 1, 1, 1, + 4, 5, 7, 5, 5, 5, 12, 7, 5, 9, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 7, 1, 3, 8, 8, 3, 3, 5, 4, + 6, 5, 4, 4, 3, 2, 3, 4, 4, 3, + 4, 4, 4, 4, 4, 4, 3, 2, 3, 3, + 2, 3, 4, 3, 7, 5, 4, 2, 4, 3, + 3, 5, 2, 3, 1, 1, 0, 1, 1, 1, + 0, 2, 2, 0, 2, 2, 0, 1, 1, 2, + 1, 1, 2, 1, 1, 2, 2, 2, 2, 2, + 3, 3, 2, 0, 2, 0, 2, 1, 2, 2, + 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, + 3, 1, 2, 3, 5, 0, 1, 2, 1, 1, + 0, 2, 1, 3, 1, 1, 1, 3, 1, 3, + 3, 7, 1, 3, 1, 3, 4, 4, 4, 3, + 2, 4, 0, 1, 0, 2, 0, 1, 0, 1, + 2, 1, 1, 1, 2, 2, 1, 2, 3, 2, + 3, 2, 2, 2, 1, 1, 3, 3, 0, 5, + 5, 5, 0, 2, 1, 3, 3, 2, 3, 1, + 2, 0, 3, 1, 1, 3, 3, 4, 4, 5, + 3, 4, 5, 6, 2, 1, 2, 1, 2, 1, + 2, 1, 1, 1, 1, 1, 1, 1, 0, 2, + 1, 1, 1, 3, 1, 3, 1, 1, 1, 1, + 1, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, + 2, 2, 2, 3, 1, 1, 1, 1, 4, 5, + 6, 4, 4, 6, 6, 6, 8, 8, 8, 8, + 9, 7, 5, 4, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 8, 8, + 0, 2, 3, 4, 4, 4, 4, 4, 4, 0, + 3, 4, 7, 3, 1, 1, 2, 3, 3, 1, + 2, 2, 1, 2, 1, 2, 2, 1, 2, 0, + 1, 0, 2, 1, 2, 4, 0, 2, 1, 3, + 5, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 2, 2, 0, 3, 0, 2, 0, 3, 1, 3, + 2, 0, 1, 1, 0, 2, 4, 4, 0, 2, + 4, 2, 1, 3, 5, 4, 6, 1, 3, 3, + 5, 0, 5, 1, 3, 1, 2, 3, 1, 1, + 3, 3, 1, 3, 3, 3, 3, 3, 1, 2, + 1, 1, 1, 1, 1, 1, 0, 2, 0, 3, + 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, + 0, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, @@ -2335,81 +2319,81 @@ var yyR2 = [...]int{ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 0, 0, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, + 1, } var yyChk = [...]int{ -1000, -190, -1, -2, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -19, -20, -21, -23, -24, -25, -22, -16, -3, -4, 6, 7, -29, 9, 10, 30, - -17, 115, 116, 118, 117, 149, 119, 142, 50, 162, - 163, 165, 166, 25, 143, 144, 147, 148, 31, 32, - 121, -192, 8, 252, 54, -191, 269, -87, 15, -28, + -17, 115, 116, 118, 117, 149, 119, 142, 50, 163, + 164, 166, 167, 25, 143, 144, 147, 148, 31, 32, + 121, -192, 8, 247, 54, -191, 264, -87, 15, -28, 5, -26, -195, -26, -26, -26, -26, -26, -166, -168, - 54, 90, -119, 125, 72, 244, 122, 123, 129, -122, - 57, -121, 262, 135, 162, 173, 167, 194, 186, 263, - 136, 184, 187, 231, 214, 226, 66, 165, 240, 145, - 182, 178, 176, 27, 228, 199, 267, 177, 227, 121, - 138, 133, 200, 204, 232, 171, 172, 234, 198, 134, - 33, 264, 35, 153, 235, 202, 197, 193, 196, 170, - 192, 39, 206, 205, 207, 230, 189, 139, 179, 18, - 238, 148, 151, 229, 201, 203, 130, 155, 266, 236, - 175, 140, 152, 147, 239, 141, 166, 216, 233, 242, - 38, 211, 169, 132, 163, 159, 217, 190, 154, 180, - 181, 195, 168, 191, 164, 156, 149, 241, 212, 268, - 188, 185, 160, 157, 158, 218, 219, 220, 221, 222, - 223, 161, 265, 237, 183, 213, -107, 125, 221, 127, - 123, 123, 124, 125, 244, 122, 123, -54, -128, 57, - -121, 125, 123, 108, 187, 231, 115, 215, 216, 228, - 124, 33, 229, 155, -137, 123, -109, 214, 218, 219, - 220, 223, 221, 161, 57, 233, 232, 224, -128, 164, + 54, 90, -119, 125, 72, 239, 122, 123, 129, -122, + 57, -121, 257, 135, 163, 174, 168, 195, 187, 258, + 136, 185, 188, 226, 215, 221, 66, 166, 235, 145, + 183, 179, 177, 27, 223, 200, 262, 178, 222, 121, + 138, 133, 201, 205, 227, 172, 173, 229, 199, 134, + 33, 259, 35, 153, 230, 203, 198, 194, 197, 171, + 193, 39, 207, 206, 208, 225, 190, 139, 180, 18, + 233, 148, 151, 224, 202, 204, 130, 155, 261, 231, + 176, 140, 152, 147, 234, 141, 167, 162, 228, 237, + 38, 212, 170, 132, 164, 159, 217, 191, 154, 181, + 182, 196, 169, 192, 165, 156, 149, 236, 213, 263, + 189, 186, 160, 157, 158, 218, 161, 260, 232, 184, + 214, -107, 125, 218, 127, 123, 123, 124, 125, 239, + 122, 123, -54, -128, 57, -121, 125, 123, 108, 188, + 226, 115, 216, 223, 124, 33, 224, 155, -137, 123, + -109, 215, 218, 161, 57, 228, 227, 219, -128, 165, -133, -133, -133, -133, -133, 217, 217, -133, -2, -91, 17, 16, -5, -3, -192, 6, 20, 21, -32, 40, 41, -27, -38, 99, -39, -128, -60, 74, -65, 29, 57, -121, 23, -64, -61, -80, -78, -79, 108, 109, 110, 97, 98, 105, 75, 111, -69, -67, -68, -70, 59, 58, 67, 60, 61, 62, 63, 68, 69, 70, - -122, -76, -192, 44, 45, 253, 254, 255, 256, 261, - 257, 77, 34, 243, 251, 250, 249, 247, 248, 245, - 246, 259, 260, 128, 244, 103, 252, -107, -107, 11, - -48, -49, -54, -56, -128, -99, -136, 164, -103, 233, - 232, -123, -101, -122, -120, 231, 187, 230, 120, 73, - 22, 24, 209, 76, 108, 16, 77, 107, 253, 115, - 48, 245, 246, 243, 255, 256, 244, 215, 29, 10, + -122, -76, -192, 44, 45, 248, 249, 250, 251, 256, + 252, 77, 34, 238, 246, 245, 244, 242, 243, 240, + 241, 254, 255, 128, 239, 103, 247, -107, -107, 11, + -48, -49, -54, -56, -128, -99, -136, 165, -103, 228, + 227, -123, -101, -122, -120, 226, 188, 225, 120, 73, + 22, 24, 210, 76, 108, 16, 77, 107, 248, 115, + 48, 240, 241, 238, 250, 251, 239, 216, 29, 10, 25, 143, 21, 101, 117, 80, 81, 146, 23, 144, 70, 19, 51, 11, 13, 14, 128, 127, 92, 124, 46, 8, 111, 26, 89, 42, 28, 44, 90, 17, - 247, 248, 31, 261, 150, 103, 49, 36, 74, 68, - 71, 52, 72, 15, 47, 91, 118, 252, 45, 122, - 6, 258, 30, 142, 43, 123, 79, 259, 260, 126, - 69, 5, 129, 32, 9, 50, 53, 249, 250, 251, + 242, 243, 31, 256, 150, 103, 49, 36, 74, 68, + 71, 52, 72, 15, 47, 91, 118, 247, 45, 122, + 6, 253, 30, 142, 43, 123, 79, 254, 255, 126, + 69, 5, 129, 32, 9, 50, 53, 244, 245, 246, 34, 78, 12, -167, 90, -160, 57, -54, 124, -54, - 252, -115, 128, -115, -115, 123, -54, 115, 117, 120, + 247, -115, 128, -115, -115, 123, -54, 115, 117, 120, 52, -18, -54, -114, 128, 57, -114, -114, -114, -54, - 112, -54, 57, 30, -134, -192, -123, 244, 57, 155, - 123, 156, 125, -134, -134, -134, -134, -134, 159, 160, - -134, -112, -111, 226, 227, 217, 225, 12, 217, 158, - -134, -133, -133, -193, 56, -92, 19, 31, -39, -128, - -88, -89, -39, -87, -2, -26, 36, -30, 21, 65, - 11, -125, 73, 72, 89, -124, 22, -122, 59, 112, - -39, -62, 92, 74, 90, 91, 76, 94, 93, 104, - 97, 98, 99, 100, 101, 102, 103, 95, 96, 107, - 82, 83, 84, 85, 86, 87, 88, -108, -192, -79, - -192, 113, 114, -65, -65, -65, -65, -65, -65, -65, - -65, -192, -2, -74, -39, -192, -192, -192, -192, -192, - -192, -192, -192, -192, -83, -39, -192, -196, -71, -192, - -196, -71, -196, -71, -196, -192, -196, -71, -196, -71, - -196, -196, -71, -192, -192, -192, -192, -192, -192, -55, - 26, -54, -41, -42, -43, -44, -57, -79, -192, -54, - -54, -48, -194, 55, 11, 53, -194, 55, 112, 55, - -99, 164, -100, -104, 234, 236, 82, -127, -122, 59, - 29, 30, 56, 55, -54, -139, -142, -144, -143, -145, - -140, -141, 184, 185, 108, 188, 190, 191, 192, 193, - 194, 195, 196, 197, 198, 199, 30, 145, 180, 181, - 182, 183, 200, 201, 202, 203, 204, 205, 206, 207, - 167, 186, 263, 168, 169, 170, 171, 172, 173, 175, - 176, 177, 178, 179, 57, -134, 125, 57, 74, 57, - -54, -54, -134, 157, 157, 123, 123, -54, 55, 126, + 112, -54, 57, 30, -134, -192, -123, 239, 57, 155, + 123, 156, 125, -134, -134, -134, -134, 159, 160, -134, + -112, -111, 221, 222, 217, 220, 12, 217, 158, -134, + -133, -133, -193, 56, -92, 19, 31, -39, -128, -88, + -89, -39, -87, -2, -26, 36, -30, 21, 65, 11, + -125, 73, 72, 89, -124, 22, -122, 59, 112, -39, + -62, 92, 74, 90, 91, 76, 94, 93, 104, 97, + 98, 99, 100, 101, 102, 103, 95, 96, 107, 82, + 83, 84, 85, 86, 87, 88, -108, -192, -79, -192, + 113, 114, -65, -65, -65, -65, -65, -65, -65, -65, + -192, -2, -74, -39, -192, -192, -192, -192, -192, -192, + -192, -192, -192, -83, -39, -192, -196, -71, -192, -196, + -71, -196, -71, -196, -192, -196, -71, -196, -71, -196, + -196, -71, -192, -192, -192, -192, -192, -192, -55, 26, + -54, -41, -42, -43, -44, -57, -79, -192, -54, -54, + -48, -194, 55, 11, 53, -194, 55, 112, 55, -99, + 165, -100, -104, 229, 231, 82, -127, -122, 59, 29, + 30, 56, 55, -54, -139, -142, -144, -143, -145, -140, + -141, 185, 186, 108, 189, 191, 192, 193, 194, 195, + 196, 197, 198, 199, 200, 30, 145, 181, 182, 183, + 184, 201, 202, 203, 204, 205, 206, 207, 208, 168, + 187, 258, 169, 170, 171, 172, 173, 174, 176, 177, + 178, 179, 180, 57, -134, 125, 57, 74, 57, -54, + -54, -134, 157, 157, 123, 123, 162, -54, 55, 126, -48, 23, 52, -54, 57, 57, -129, -128, -120, -134, -134, -134, -134, -134, -54, -134, -134, -134, -134, 11, -110, 11, 92, -39, 52, 9, 92, 55, 18, 112, @@ -2425,211 +2409,212 @@ var yyChk = [...]int{ -36, -95, 151, -54, 30, 55, -50, -52, -51, -53, 42, 46, 48, 43, 44, 45, 49, -132, 22, -41, -192, -131, 151, -130, 22, -128, 59, -95, 53, -41, - -54, -41, -56, -128, 99, -103, -100, 55, 235, 237, - 238, 52, 71, -39, -151, 107, -169, -170, -171, -123, + -54, -41, -56, -128, 99, -103, -100, 55, 230, 232, + 233, 52, 71, -39, -151, 107, -169, -170, -171, -123, 59, 60, -160, -161, -162, -172, 137, -177, 130, 132, 129, -163, 138, 124, 28, 56, -156, 68, 74, -152, - 212, -146, 54, -146, -146, -146, -146, -150, 187, -150, + 213, -146, 54, -146, -146, -146, -146, -150, 188, -150, -150, -150, 54, 54, -146, -146, -146, -154, 54, -154, -154, -155, 54, -155, -126, 53, -54, -134, 23, -134, - -116, 120, 117, 118, -180, 116, 209, 187, 66, 29, - 15, 253, 151, 268, 57, 152, -122, -122, -54, -54, - 120, 117, -54, -54, -54, -134, -54, -113, 90, 12, - -128, -128, -54, 38, -39, -39, -129, -89, -92, -106, - 19, 11, 34, 34, -36, 68, 69, 70, 112, -192, - -73, -65, -65, -65, -35, 146, 73, -193, -193, -36, - 55, -39, -193, -193, -193, 55, 53, 22, 11, 11, - -193, 11, 11, -193, -193, -36, -84, -82, 80, -39, - -193, 112, -193, 55, 55, -193, -193, -193, -193, -63, - 30, 34, -2, -192, -192, -98, -102, -80, -42, -43, - -43, -42, -43, 42, 42, 42, 47, 42, 47, 42, - -51, -128, -193, -58, 50, 127, 51, -192, -130, -59, - 12, -41, -59, -59, 112, -104, -105, 239, 236, 242, - 57, 59, 55, -171, 82, 54, 57, 28, -163, -163, - -164, 57, -164, 28, -148, 29, 68, -153, 213, 60, - -150, -150, -151, 30, -151, -151, -151, -159, 59, -159, - 60, 60, 52, -122, -134, -133, -186, 131, 137, 138, - 133, 57, 124, 28, 130, 132, 151, 129, -186, -117, - -118, 126, 22, 124, 28, 151, -185, 53, 157, 157, - 126, -134, -110, 59, -39, 39, 112, -54, -40, 11, - 99, -123, -37, -35, 73, -65, -65, -193, -38, -138, - 108, 184, 145, 182, 178, 198, 189, 211, 180, 212, - -135, -138, -65, -65, -65, -65, 262, -87, 81, -39, - 79, -123, -65, -65, -97, 52, -98, -75, -77, -76, - -192, -2, -93, -122, -96, -122, -59, 55, 82, -46, - -45, 52, 53, -47, 52, -45, 42, 42, 124, 124, - 124, -96, -87, -39, -59, 236, 240, 241, -170, -171, - -174, -173, -122, -177, -164, -164, 54, -149, 52, -65, - 56, -151, -151, 57, 108, 56, 55, 56, 55, 56, - 55, -54, -133, -133, -54, -133, -122, -183, 265, -184, - 57, -122, -122, -54, -113, -59, -41, -193, -65, -193, - -146, -146, -146, -155, -146, 172, -146, 172, -193, -193, - 19, 19, 19, 19, -192, -34, 258, -39, 55, 55, - 27, -97, 55, -193, -193, -193, 55, 112, -193, 55, - -87, -102, -39, -39, 54, -39, -192, -192, -192, -193, - -91, 56, 55, -146, -94, -122, -157, 209, 9, -150, - 59, -150, 60, 60, -134, 26, -182, -181, -123, 54, - -85, 13, -150, 57, -65, -65, -65, -65, -65, -193, - 59, -65, -65, 28, -77, 34, -2, -192, -122, -122, - -122, -91, -94, -94, -94, -94, -131, -176, -175, 53, - 134, 66, -173, 56, 55, -158, 130, 28, 129, -68, - -151, -151, 56, 56, -192, 55, 82, -94, -86, 14, - 16, -193, -193, -193, -193, -33, 92, 265, -193, -193, - 9, -75, -2, 112, 56, -193, -193, -193, -58, -175, - 57, -165, 82, 59, 140, -122, -147, 66, 28, 28, - -178, -179, 151, -181, -171, 56, -39, -74, -193, 263, - 49, 266, -98, -193, -122, 60, -54, 59, -193, 55, - -122, -185, 39, 264, 267, 54, -179, 34, -183, 39, - -94, 153, 265, 56, 154, 266, -188, -189, 52, -192, - 267, -189, 52, 10, 9, -65, 150, -187, 141, 136, - 139, 30, -187, -193, -193, 135, 29, 68, + -116, 120, 117, 118, -180, 116, 210, 188, 66, 29, + 15, 248, 151, 263, 57, 152, -122, -122, -54, -54, + -54, 120, 117, -54, -54, -54, -134, -54, -113, 90, + 12, -128, -128, -54, 38, -39, -39, -129, -89, -92, + -106, 19, 11, 34, 34, -36, 68, 69, 70, 112, + -192, -73, -65, -65, -65, -35, 146, 73, -193, -193, + -36, 55, -39, -193, -193, -193, 55, 53, 22, 11, + 11, -193, 11, 11, -193, -193, -36, -84, -82, 80, + -39, -193, 112, -193, 55, 55, -193, -193, -193, -193, + -63, 30, 34, -2, -192, -192, -98, -102, -80, -42, + -43, -43, -42, -43, 42, 42, 42, 47, 42, 47, + 42, -51, -128, -193, -58, 50, 127, 51, -192, -130, + -59, 12, -41, -59, -59, 112, -104, -105, 234, 231, + 237, 57, 59, 55, -171, 82, 54, 57, 28, -163, + -163, -164, 57, -164, 28, -148, 29, 68, -153, 214, + 60, -150, -150, -151, 30, -151, -151, -151, -159, 59, + -159, 60, 60, 52, -122, -134, -133, -186, 131, 137, + 138, 133, 57, 124, 28, 130, 132, 151, 129, -186, + -117, -118, 126, 22, 124, 28, 151, -185, 53, 157, + 210, 157, 126, -134, -110, 59, -39, 39, 112, -54, + -40, 11, 99, -123, -37, -35, 73, -65, -65, -193, + -38, -138, 108, 185, 145, 183, 179, 199, 190, 212, + 181, 213, -135, -138, -65, -65, -65, -65, 257, -87, + 81, -39, 79, -123, -65, -65, -97, 52, -98, -75, + -77, -76, -192, -2, -93, -122, -96, -122, -59, 55, + 82, -46, -45, 52, 53, -47, 52, -45, 42, 42, + 124, 124, 124, -96, -87, -39, -59, 231, 235, 236, + -170, -171, -174, -173, -122, -177, -164, -164, 54, -149, + 52, -65, 56, -151, -151, 57, 108, 56, 55, 56, + 55, 56, 55, -54, -133, -133, -54, -133, -122, -183, + 260, -184, 57, -122, -122, -122, -54, -113, -59, -41, + -193, -65, -193, -146, -146, -146, -155, -146, 173, -146, + 173, -193, -193, 19, 19, 19, 19, -192, -34, 253, + -39, 55, 55, 27, -97, 55, -193, -193, -193, 55, + 112, -193, 55, -87, -102, -39, -39, 54, -39, -192, + -192, -192, -193, -91, 56, 55, -146, -94, -122, -157, + 210, 9, -150, 59, -150, 60, 60, -134, 26, -182, + -181, -123, 54, 53, -85, 13, -150, 57, -65, -65, + -65, -65, -65, -193, 59, -65, -65, 28, -77, 34, + -2, -192, -122, -122, -122, -91, -94, -94, -94, -94, + -131, -176, -175, 53, 134, 66, -173, 56, 55, -158, + 130, 28, 129, -68, -151, -151, 56, 56, -192, 55, + 82, -94, -54, -86, 14, 16, -193, -193, -193, -193, + -33, 92, 260, -193, -193, 9, -75, -2, 112, 56, + -193, -193, -193, -58, -175, 57, -165, 82, 59, 140, + -122, -147, 66, 28, 28, -178, -179, 151, -181, -171, + 56, -39, -74, -193, 258, 49, 261, -98, -193, -122, + 60, -54, 59, -193, 55, -122, -185, 39, 259, 262, + 54, -179, 34, -183, 39, -94, 153, 260, 56, 154, + 261, -188, -189, 52, -192, 262, -189, 52, 10, 9, + -65, 150, -187, 141, 136, 139, 30, -187, -193, -193, + 135, 29, 68, } var yyDef = [...]int{ 23, -2, 2, -2, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, - 21, 22, 559, 0, 316, 316, 316, 316, 316, 316, - 0, 630, 613, 0, 0, 0, 0, -2, 303, 304, - 0, 306, 307, 855, 855, 855, 855, 855, 0, 0, - 855, 0, 35, 36, 853, 1, 3, 567, 0, 0, - 320, 323, 318, 0, 613, 613, 0, 0, 65, 66, - 0, 0, 0, 839, 0, 611, 611, 611, 631, 632, - 635, 636, 738, 739, 740, 741, 742, 743, 744, 745, - 746, 747, 748, 749, 750, 751, 752, 753, 754, 755, - 756, 757, 758, 759, 760, 761, 762, 763, 764, 765, - 766, 767, 768, 769, 770, 771, 772, 773, 774, 775, - 776, 777, 778, 779, 780, 781, 782, 783, 784, 785, - 786, 787, 788, 789, 790, 791, 792, 793, 794, 795, - 796, 797, 798, 799, 800, 801, 802, 803, 804, 805, - 806, 807, 808, 809, 810, 811, 812, 813, 814, 815, - 816, 817, 818, 819, 820, 821, 822, 823, 824, 825, - 826, 827, 828, 829, 830, 831, 832, 833, 834, 835, - 836, 837, 838, 840, 841, 842, 843, 844, 845, 846, - 847, 848, 849, 850, 851, 852, 0, 0, 0, 614, - 0, 609, 0, 609, 609, 609, 0, 253, 388, 639, - 640, 839, 0, 0, 0, 856, 0, 856, 856, 266, - 856, 856, 269, 856, 0, 856, 0, 276, 278, 279, - 280, 281, 0, 285, 856, 300, 301, 290, 302, 305, - 308, 309, 310, 311, 312, 855, 855, 315, 29, 571, - 0, 0, 559, 31, 0, 316, 321, 322, 326, 324, - 325, 317, 0, 334, 338, 0, 397, 0, 402, 404, - -2, -2, 0, 439, 440, 441, 442, 443, 0, 0, - 0, 0, 0, 0, 0, 0, 467, 468, 469, 470, - 544, 545, 546, 547, 548, 549, 550, 551, 406, 407, - 541, 591, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 532, 0, 503, 503, 503, 503, 503, 503, 503, - 503, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 44, 46, 388, 50, 0, 830, 595, -2, - -2, 0, 0, 637, 638, -2, 748, -2, 643, 644, - 645, 646, 647, 648, 649, 650, 651, 652, 653, 654, - 655, 656, 657, 658, 659, 660, 661, 662, 663, 664, - 665, 666, 667, 668, 669, 670, 671, 672, 673, 674, - 675, 676, 677, 678, 679, 680, 681, 682, 683, 684, - 685, 686, 687, 688, 689, 690, 691, 692, 693, 694, - 695, 696, 697, 698, 699, 700, 701, 702, 703, 704, - 705, 706, 707, 708, 709, 710, 711, 712, 713, 714, - 715, 716, 717, 718, 719, 720, 721, 722, 723, 724, - 725, 726, 727, 728, 729, 730, 731, 732, 733, 734, - 735, 736, 737, 0, 0, 84, 0, 82, 0, 856, - 0, 0, 0, 0, 0, 0, 856, 0, 0, 0, - 0, 244, 0, 0, 0, 0, 0, 0, 0, 252, - 0, 254, 856, 856, 257, 857, 858, 856, 856, 856, - 0, 856, 856, 264, 265, 267, 268, 270, 856, 856, - 272, 0, 293, 291, 292, 287, 288, 0, 282, 283, - 286, 313, 314, 30, 854, 24, 0, 0, 568, 0, - 560, 561, 564, 567, 29, 323, 0, 328, 327, 319, - 0, 335, 0, 0, 0, 339, 0, 341, 342, 0, - 400, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 21, 22, 556, 0, 313, 313, 313, 313, 313, 313, + 0, 627, 610, 0, 0, 0, 0, -2, 300, 301, + 0, 303, 304, 847, 847, 847, 847, 847, 0, 0, + 847, 0, 35, 36, 845, 1, 3, 564, 0, 0, + 317, 320, 315, 0, 610, 610, 0, 0, 65, 66, + 0, 0, 0, 836, 0, 608, 608, 608, 628, 629, + 632, 633, 735, 736, 737, 738, 739, 740, 741, 742, + 743, 744, 745, 746, 747, 748, 749, 750, 751, 752, + 753, 754, 755, 756, 757, 758, 759, 760, 761, 762, + 763, 764, 765, 766, 767, 768, 769, 770, 771, 772, + 773, 774, 775, 776, 777, 778, 779, 780, 781, 782, + 783, 784, 785, 786, 787, 788, 789, 790, 791, 792, + 793, 794, 795, 796, 797, 798, 799, 800, 801, 802, + 803, 804, 805, 806, 807, 808, 809, 810, 811, 812, + 813, 814, 815, 816, 817, 818, 819, 820, 821, 822, + 823, 824, 825, 826, 827, 828, 829, 830, 831, 832, + 833, 834, 835, 837, 838, 839, 840, 841, 842, 843, + 844, 0, 0, 0, 611, 0, 606, 0, 606, 606, + 606, 0, 255, 385, 636, 637, 836, 0, 0, 0, + 848, 0, 848, 267, 848, 848, 270, 848, 0, 848, + 0, 277, 0, 282, 848, 297, 298, 287, 299, 302, + 305, 306, 307, 308, 309, 847, 847, 312, 29, 568, + 0, 0, 556, 31, 0, 313, 318, 319, 323, 321, + 322, 314, 0, 331, 335, 0, 394, 0, 399, 401, + -2, -2, 0, 436, 437, 438, 439, 440, 0, 0, + 0, 0, 0, 0, 0, 0, 464, 465, 466, 467, + 541, 542, 543, 544, 545, 546, 547, 548, 403, 404, + 538, 588, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 529, 0, 500, 500, 500, 500, 500, 500, 500, + 500, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 44, 46, 385, 50, 0, 827, 592, -2, + -2, 0, 0, 634, 635, -2, 745, -2, 640, 641, + 642, 643, 644, 645, 646, 647, 648, 649, 650, 651, + 652, 653, 654, 655, 656, 657, 658, 659, 660, 661, + 662, 663, 664, 665, 666, 667, 668, 669, 670, 671, + 672, 673, 674, 675, 676, 677, 678, 679, 680, 681, + 682, 683, 684, 685, 686, 687, 688, 689, 690, 691, + 692, 693, 694, 695, 696, 697, 698, 699, 700, 701, + 702, 703, 704, 705, 706, 707, 708, 709, 710, 711, + 712, 713, 714, 715, 716, 717, 718, 719, 720, 721, + 722, 723, 724, 725, 726, 727, 728, 729, 730, 731, + 732, 733, 734, 0, 0, 84, 0, 82, 0, 848, + 0, 0, 0, 0, 0, 0, 848, 0, 0, 0, + 0, 246, 0, 0, 0, 0, 0, 0, 0, 254, + 0, 256, 848, 848, 259, 849, 850, 848, 848, 848, + 0, 848, 848, 266, 268, 269, 271, 848, 848, 273, + 0, 290, 288, 289, 284, 285, 0, 279, 280, 283, + 310, 311, 30, 846, 24, 0, 0, 565, 0, 557, + 558, 561, 564, 29, 320, 0, 325, 324, 316, 0, + 332, 0, 0, 0, 336, 0, 338, 339, 0, 397, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 424, 425, 426, 427, 428, 429, 430, 403, 0, 417, - 0, 0, 0, 459, 460, 461, 462, 463, 464, 465, - 0, 330, 29, 0, 437, 0, 0, 0, 0, 0, - 0, 0, 0, 326, 0, 533, 0, 487, 495, 0, - 488, 496, 489, 497, 490, 0, 491, 498, 492, 499, - 493, 494, 500, 0, 0, 0, 330, 0, 0, 48, - 0, 387, 0, 345, 347, 348, 349, -2, 0, 371, - -2, 0, 0, 0, 42, 43, 0, 0, 0, 0, - 51, 830, 53, 54, 0, 0, 0, 162, 604, 605, - 606, 602, 206, 0, 0, 150, 146, 90, 91, 92, - 139, 94, 139, 139, 139, 139, 159, 159, 159, 159, - 122, 123, 124, 125, 126, 0, 0, 109, 139, 139, - 139, 113, 129, 130, 131, 132, 133, 134, 135, 136, - 95, 96, 97, 98, 99, 100, 101, 102, 103, 141, - 141, 141, 143, 143, 633, 68, 0, 856, 0, 856, - 80, 0, 220, 0, 0, 0, 0, 0, 0, 0, - 247, 610, 0, 856, 250, 251, 389, 641, 642, 255, - 256, 258, 259, 260, 261, 262, 263, 271, 275, 0, - 296, 0, 0, 277, 0, 572, 0, 0, 0, 0, - 0, 563, 565, 566, 571, 32, 326, 0, 552, 0, - 0, 0, 329, 27, 398, 399, 401, 418, 0, 420, - 422, 340, 336, 0, 542, -2, 408, 409, 433, 434, - 435, 0, 0, 0, 0, 431, 413, 0, 444, 445, - 446, 447, 448, 449, 450, 451, 452, 453, 454, 455, - 458, 517, 518, 0, 456, 457, 466, 0, 0, 331, - 332, 436, 0, 590, 29, 0, 0, 0, 0, 441, - 544, 0, 441, 544, 0, 0, 0, 539, 536, 0, - 0, 541, 0, 504, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 386, 0, 0, 0, 0, 0, 0, - 376, 0, 0, 379, 0, 0, 0, 0, 370, 0, - 0, 391, 798, 372, 0, 374, 375, 395, 0, 395, - 45, 395, 47, 0, 390, 596, 52, 0, 0, 57, - 58, 597, 598, 599, 600, 0, 81, 207, 209, 212, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 421, + 422, 423, 424, 425, 426, 427, 400, 0, 414, 0, + 0, 0, 456, 457, 458, 459, 460, 461, 462, 0, + 327, 29, 0, 434, 0, 0, 0, 0, 0, 0, + 0, 0, 323, 0, 530, 0, 484, 492, 0, 485, + 493, 486, 494, 487, 0, 488, 495, 489, 496, 490, + 491, 497, 0, 0, 0, 327, 0, 0, 48, 0, + 384, 0, 342, 344, 345, 346, -2, 0, 368, -2, + 0, 0, 0, 42, 43, 0, 0, 0, 0, 51, + 827, 53, 54, 0, 0, 0, 162, 601, 602, 603, + 599, 206, 0, 0, 150, 146, 90, 91, 92, 139, + 94, 139, 139, 139, 139, 159, 159, 159, 159, 122, + 123, 124, 125, 126, 0, 0, 109, 139, 139, 139, + 113, 129, 130, 131, 132, 133, 134, 135, 136, 95, + 96, 97, 98, 99, 100, 101, 102, 103, 141, 141, + 141, 143, 143, 630, 68, 0, 848, 0, 848, 80, + 0, 220, 0, 0, 0, 0, 0, 0, 0, 0, + 249, 607, 0, 848, 252, 253, 386, 638, 639, 257, + 258, 260, 261, 262, 263, 264, 265, 272, 276, 0, + 293, 0, 0, 278, 0, 569, 0, 0, 0, 0, + 0, 560, 562, 563, 568, 32, 323, 0, 549, 0, + 0, 0, 326, 27, 395, 396, 398, 415, 0, 417, + 419, 337, 333, 0, 539, -2, 405, 406, 430, 431, + 432, 0, 0, 0, 0, 428, 410, 0, 441, 442, + 443, 444, 445, 446, 447, 448, 449, 450, 451, 452, + 455, 514, 515, 0, 453, 454, 463, 0, 0, 328, + 329, 433, 0, 587, 29, 0, 0, 0, 0, 438, + 541, 0, 438, 541, 0, 0, 0, 536, 533, 0, + 0, 538, 0, 501, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 383, 0, 0, 0, 0, 0, 0, + 373, 0, 0, 376, 0, 0, 0, 0, 367, 0, + 0, 388, 795, 369, 0, 371, 372, 392, 0, 392, + 45, 392, 47, 0, 387, 593, 52, 0, 0, 57, + 58, 594, 595, 596, 597, 0, 81, 207, 209, 212, 213, 214, 85, 86, 87, 0, 0, 194, 0, 0, 188, 188, 0, 186, 187, 83, 153, 151, 0, 148, 147, 93, 0, 159, 159, 116, 117, 162, 0, 162, 162, 162, 0, 0, 110, 111, 112, 104, 0, 105, - 106, 107, 0, 108, 0, 0, 856, 70, 612, 71, - 855, 0, 0, 625, 221, 615, 616, 617, 618, 619, - 620, 621, 622, 623, 624, 0, 72, 223, 225, 224, - 0, 0, 0, 245, 856, 249, 293, 274, 0, 0, - 294, 295, 284, 0, 569, 570, 0, 562, 25, 0, - 607, 608, 553, 554, 343, 419, 421, 423, 0, 330, - 410, 431, 414, 0, 411, 0, 0, 405, 471, 0, - 0, 438, -2, 474, 475, 0, 0, 0, 0, 0, - 510, 0, 0, 511, 0, 559, 0, 537, 0, 0, - 486, 0, 505, 0, 0, 506, 507, 508, 509, 584, - 0, 0, -2, 0, 0, 395, 592, 0, 346, 365, - 367, 0, 362, 377, 378, 380, 0, 382, 0, 384, - 385, 350, 352, 353, 0, 0, 0, 0, 373, 559, - 0, 395, 40, 41, 0, 55, 56, 0, 0, 62, - 163, 164, 0, 210, 0, 0, 0, 181, 188, 188, - 184, 189, 185, 0, 155, 0, 152, 89, 149, 0, - 162, 162, 118, 0, 119, 120, 121, 0, 137, 0, - 0, 0, 0, 634, 69, 215, 855, 228, 229, 230, - 231, 232, 233, 234, 235, 236, 237, 238, 855, 0, - 855, 626, 627, 628, 629, 0, 75, 0, 0, 0, - 0, 248, 296, 297, 298, 573, 0, 26, 395, 0, - 337, 543, 0, 412, 0, 432, 415, 472, 333, 0, - 139, 139, 522, 139, 143, 525, 139, 527, 139, 530, - 0, 0, 0, 0, 0, 0, 0, 534, 485, 540, - 0, 542, 0, 0, 33, 0, 584, 574, 586, 588, - 0, 29, 0, 580, 0, 357, 559, 0, 0, 359, - 366, 0, 0, 360, 0, 361, 381, 383, 0, 0, - 0, 0, 567, 396, 39, 59, 60, 61, 208, 211, - 0, 190, 139, 193, 182, 183, 0, 157, 0, 154, - 140, 114, 115, 160, 161, 159, 0, 159, 0, 144, - 0, 856, 216, 217, 218, 219, 0, 222, 0, 73, - 74, 0, 227, 246, 273, 555, 344, 473, 416, 476, - 519, 159, 523, 524, 526, 528, 529, 531, 478, 477, - 0, 0, 0, 0, 0, 0, 0, 538, 0, 0, - 0, 34, 0, 589, -2, 0, 0, 0, 49, 0, - 567, 593, 594, 363, 0, 368, 0, 0, 0, 371, - 38, 173, 0, 192, 0, 355, 165, 158, 0, 162, - 138, 162, 0, 0, 67, 0, 76, 77, 0, 0, - 557, 0, 520, 521, 0, 0, 0, 0, 512, 484, - 535, 0, 0, 0, 587, 0, -2, 0, 582, 581, - 358, 37, 0, 0, 0, 0, 391, 172, 174, 0, - 179, 0, 191, 0, 0, 170, 0, 167, 169, 156, - 127, 128, 142, 145, 0, 0, 0, 0, 28, 0, - 0, 479, 481, 480, 482, 0, 0, 0, 501, 502, - 0, 577, 29, 0, 364, 392, 393, 394, 354, 175, - 176, 0, 180, 178, 0, 356, 88, 0, 166, 168, - 0, 240, 0, 78, 79, 72, 558, 556, 483, 0, - 0, 0, 585, -2, 583, 177, 0, 171, 239, 0, - 0, 75, 513, 0, 516, 0, 241, 0, 226, 514, - 0, 0, 0, 195, 0, 0, 196, 197, 0, 0, - 515, 198, 0, 0, 0, 0, 0, 199, 201, 202, - 0, 0, 200, 242, 243, 203, 204, 205, + 106, 107, 0, 108, 0, 0, 848, 70, 609, 71, + 847, 0, 0, 622, 221, 612, 613, 614, 615, 616, + 617, 618, 619, 620, 621, 0, 72, 223, 225, 224, + 228, 0, 0, 0, 247, 848, 251, 290, 275, 0, + 0, 291, 292, 281, 0, 566, 567, 0, 559, 25, + 0, 604, 605, 550, 551, 340, 416, 418, 420, 0, + 327, 407, 428, 411, 0, 408, 0, 0, 402, 468, + 0, 0, 435, -2, 471, 472, 0, 0, 0, 0, + 0, 507, 0, 0, 508, 0, 556, 0, 534, 0, + 0, 483, 0, 502, 0, 0, 503, 504, 505, 506, + 581, 0, 0, -2, 0, 0, 392, 589, 0, 343, + 362, 364, 0, 359, 374, 375, 377, 0, 379, 0, + 381, 382, 347, 349, 350, 0, 0, 0, 0, 370, + 556, 0, 392, 40, 41, 0, 55, 56, 0, 0, + 62, 163, 164, 0, 210, 0, 0, 0, 181, 188, + 188, 184, 189, 185, 0, 155, 0, 152, 89, 149, + 0, 162, 162, 118, 0, 119, 120, 121, 0, 137, + 0, 0, 0, 0, 631, 69, 215, 847, 230, 231, + 232, 233, 234, 235, 236, 237, 238, 239, 240, 847, + 0, 847, 623, 624, 625, 626, 0, 75, 0, 0, + 0, 0, 0, 250, 293, 294, 295, 570, 0, 26, + 392, 0, 334, 540, 0, 409, 0, 429, 412, 469, + 330, 0, 139, 139, 519, 139, 143, 522, 139, 524, + 139, 527, 0, 0, 0, 0, 0, 0, 0, 531, + 482, 537, 0, 539, 0, 0, 33, 0, 581, 571, + 583, 585, 0, 29, 0, 577, 0, 354, 556, 0, + 0, 356, 363, 0, 0, 357, 0, 358, 378, 380, + 0, 0, 0, 0, 564, 393, 39, 59, 60, 61, + 208, 211, 0, 190, 139, 193, 182, 183, 0, 157, + 0, 154, 140, 114, 115, 160, 161, 159, 0, 159, + 0, 144, 0, 848, 216, 217, 218, 219, 0, 222, + 0, 73, 74, 0, 0, 227, 248, 274, 552, 341, + 470, 413, 473, 516, 159, 520, 521, 523, 525, 526, + 528, 475, 474, 0, 0, 0, 0, 0, 0, 0, + 535, 0, 0, 0, 34, 0, 586, -2, 0, 0, + 0, 49, 0, 564, 590, 591, 360, 0, 365, 0, + 0, 0, 368, 38, 173, 0, 192, 0, 352, 165, + 158, 0, 162, 138, 162, 0, 0, 67, 0, 76, + 77, 0, 0, 0, 554, 0, 517, 518, 0, 0, + 0, 0, 509, 481, 532, 0, 0, 0, 584, 0, + -2, 0, 579, 578, 355, 37, 0, 0, 0, 0, + 388, 172, 174, 0, 179, 0, 191, 0, 0, 170, + 0, 167, 169, 156, 127, 128, 142, 145, 0, 0, + 0, 0, 229, 28, 0, 0, 476, 478, 477, 479, + 0, 0, 0, 498, 499, 0, 574, 29, 0, 361, + 389, 390, 391, 351, 175, 176, 0, 180, 178, 0, + 353, 88, 0, 166, 168, 0, 242, 0, 78, 79, + 72, 555, 553, 480, 0, 0, 0, 582, -2, 580, + 177, 0, 171, 241, 0, 0, 75, 510, 0, 513, + 0, 243, 0, 226, 511, 0, 0, 0, 195, 0, + 0, 196, 197, 0, 0, 512, 198, 0, 0, 0, + 0, 0, 199, 201, 202, 0, 0, 200, 244, 245, + 203, 204, 205, } var yyTok1 = [...]int{ @@ -2638,7 +2623,7 @@ var yyTok1 = [...]int{ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 75, 3, 3, 3, 102, 94, 3, 54, 56, 99, 97, 55, 98, 112, 100, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, 3, 269, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 264, 83, 82, 84, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, @@ -2673,7 +2658,7 @@ var yyTok2 = [...]int{ 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, - 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, + 259, 260, 261, 262, 263, } var yyTok3 = [...]int{ 0, @@ -3018,35 +3003,35 @@ yydefault: case 1: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:316 +//line sql.y:317 { setParseTree(yylex, yyDollar[1].statement) } case 2: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:321 +//line sql.y:322 { } case 3: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:322 +//line sql.y:323 { } case 4: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:326 +//line sql.y:327 { yyVAL.statement = yyDollar[1].selStmt } case 23: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:348 +//line sql.y:349 { setParseTree(yylex, nil) } case 24: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:354 +//line sql.y:355 { sel := yyDollar[1].selStmt.(*Select) sel.OrderBy = yyDollar[2].orderBy @@ -3056,55 +3041,55 @@ yydefault: } case 25: yyDollar = yyS[yypt-6 : yypt+1] -//line sql.y:362 +//line sql.y:363 { yyVAL.selStmt = &Union{Type: yyDollar[2].str, Left: yyDollar[1].selStmt, Right: yyDollar[3].selStmt, OrderBy: yyDollar[4].orderBy, Limit: yyDollar[5].limit, Lock: yyDollar[6].str} } case 26: yyDollar = yyS[yypt-7 : yypt+1] -//line sql.y:366 +//line sql.y:367 { yyVAL.selStmt = &Select{Comments: Comments(yyDollar[2].bytes2), Cache: yyDollar[3].str, SelectExprs: SelectExprs{Nextval{Expr: yyDollar[5].expr}}, From: TableExprs{&AliasedTableExpr{Expr: yyDollar[7].tableName}}} } case 27: yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:372 +//line sql.y:373 { yyVAL.statement = &Stream{Comments: Comments(yyDollar[2].bytes2), SelectExpr: yyDollar[3].selectExpr, Table: yyDollar[5].tableName} } case 28: yyDollar = yyS[yypt-10 : yypt+1] -//line sql.y:379 +//line sql.y:380 { yyVAL.selStmt = &Select{Comments: Comments(yyDollar[2].bytes2), Cache: yyDollar[3].str, Distinct: yyDollar[4].str, Hints: yyDollar[5].str, SelectExprs: yyDollar[6].selectExprs, From: yyDollar[7].tableExprs, Where: NewWhere(WhereStr, yyDollar[8].expr), GroupBy: GroupBy(yyDollar[9].exprs), Having: NewWhere(HavingStr, yyDollar[10].expr)} } case 29: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:385 +//line sql.y:386 { yyVAL.selStmt = yyDollar[1].selStmt } case 30: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:389 +//line sql.y:390 { yyVAL.selStmt = &ParenSelect{Select: yyDollar[2].selStmt} } case 31: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:395 +//line sql.y:396 { yyVAL.selStmt = yyDollar[1].selStmt } case 32: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:399 +//line sql.y:400 { yyVAL.selStmt = &ParenSelect{Select: yyDollar[2].selStmt} } case 33: yyDollar = yyS[yypt-7 : yypt+1] -//line sql.y:406 +//line sql.y:407 { // insert_data returns a *Insert pre-filled with Columns & Values ins := yyDollar[6].ins @@ -3118,7 +3103,7 @@ yydefault: } case 34: yyDollar = yyS[yypt-8 : yypt+1] -//line sql.y:418 +//line sql.y:419 { cols := make(Columns, 0, len(yyDollar[7].updateExprs)) vals := make(ValTuple, 0, len(yyDollar[8].updateExprs)) @@ -3130,192 +3115,192 @@ yydefault: } case 35: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:430 +//line sql.y:431 { yyVAL.str = InsertStr } case 36: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:434 +//line sql.y:435 { yyVAL.str = ReplaceStr } case 37: yyDollar = yyS[yypt-9 : yypt+1] -//line sql.y:440 +//line sql.y:441 { yyVAL.statement = &Update{Comments: Comments(yyDollar[2].bytes2), Ignore: yyDollar[3].str, TableExprs: yyDollar[4].tableExprs, Exprs: yyDollar[6].updateExprs, Where: NewWhere(WhereStr, yyDollar[7].expr), OrderBy: yyDollar[8].orderBy, Limit: yyDollar[9].limit} } case 38: yyDollar = yyS[yypt-8 : yypt+1] -//line sql.y:446 +//line sql.y:447 { yyVAL.statement = &Delete{Comments: Comments(yyDollar[2].bytes2), TableExprs: TableExprs{&AliasedTableExpr{Expr: yyDollar[4].tableName}}, Partitions: yyDollar[5].partitions, Where: NewWhere(WhereStr, yyDollar[6].expr), OrderBy: yyDollar[7].orderBy, Limit: yyDollar[8].limit} } case 39: yyDollar = yyS[yypt-7 : yypt+1] -//line sql.y:450 +//line sql.y:451 { yyVAL.statement = &Delete{Comments: Comments(yyDollar[2].bytes2), Targets: yyDollar[4].tableNames, TableExprs: yyDollar[6].tableExprs, Where: NewWhere(WhereStr, yyDollar[7].expr)} } case 40: yyDollar = yyS[yypt-6 : yypt+1] -//line sql.y:454 +//line sql.y:455 { yyVAL.statement = &Delete{Comments: Comments(yyDollar[2].bytes2), Targets: yyDollar[3].tableNames, TableExprs: yyDollar[5].tableExprs, Where: NewWhere(WhereStr, yyDollar[6].expr)} } case 41: yyDollar = yyS[yypt-6 : yypt+1] -//line sql.y:458 +//line sql.y:459 { yyVAL.statement = &Delete{Comments: Comments(yyDollar[2].bytes2), Targets: yyDollar[3].tableNames, TableExprs: yyDollar[5].tableExprs, Where: NewWhere(WhereStr, yyDollar[6].expr)} } case 42: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:463 +//line sql.y:464 { } case 43: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:464 +//line sql.y:465 { } case 44: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:468 +//line sql.y:469 { yyVAL.tableNames = TableNames{yyDollar[1].tableName} } case 45: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:472 +//line sql.y:473 { yyVAL.tableNames = append(yyVAL.tableNames, yyDollar[3].tableName) } case 46: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:478 +//line sql.y:479 { yyVAL.tableNames = TableNames{yyDollar[1].tableName} } case 47: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:482 +//line sql.y:483 { yyVAL.tableNames = append(yyVAL.tableNames, yyDollar[3].tableName) } case 48: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:487 +//line sql.y:488 { yyVAL.partitions = nil } case 49: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:491 +//line sql.y:492 { yyVAL.partitions = yyDollar[3].partitions } case 50: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:497 +//line sql.y:498 { yyVAL.statement = &Set{Comments: Comments(yyDollar[2].bytes2), Exprs: yyDollar[3].setExprs} } case 51: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:501 +//line sql.y:502 { yyVAL.statement = &Set{Comments: Comments(yyDollar[2].bytes2), Scope: yyDollar[3].str, Exprs: yyDollar[4].setExprs} } case 52: yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:505 +//line sql.y:506 { yyVAL.statement = &Set{Comments: Comments(yyDollar[2].bytes2), Scope: yyDollar[3].str, Exprs: yyDollar[5].setExprs} } case 53: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:509 +//line sql.y:510 { yyVAL.statement = &Set{Comments: Comments(yyDollar[2].bytes2), Exprs: yyDollar[4].setExprs} } case 54: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:515 +//line sql.y:516 { yyVAL.setExprs = SetExprs{yyDollar[1].setExpr} } case 55: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:519 +//line sql.y:520 { yyVAL.setExprs = append(yyVAL.setExprs, yyDollar[3].setExpr) } case 56: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:525 +//line sql.y:526 { yyVAL.setExpr = &SetExpr{Name: NewColIdent(TransactionStr), Expr: NewStrVal([]byte(yyDollar[3].str))} } case 57: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:529 +//line sql.y:530 { yyVAL.setExpr = &SetExpr{Name: NewColIdent(TransactionStr), Expr: NewStrVal([]byte(TxReadWrite))} } case 58: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:533 +//line sql.y:534 { yyVAL.setExpr = &SetExpr{Name: NewColIdent(TransactionStr), Expr: NewStrVal([]byte(TxReadOnly))} } case 59: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:539 +//line sql.y:540 { yyVAL.str = IsolationLevelRepeatableRead } case 60: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:543 +//line sql.y:544 { yyVAL.str = IsolationLevelReadCommitted } case 61: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:547 +//line sql.y:548 { yyVAL.str = IsolationLevelReadUncommitted } case 62: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:551 +//line sql.y:552 { yyVAL.str = IsolationLevelSerializable } case 63: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:557 +//line sql.y:558 { yyVAL.str = SessionStr } case 64: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:561 +//line sql.y:562 { yyVAL.str = GlobalStr } case 65: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:567 +//line sql.y:568 { yyDollar[1].ddl.TableSpec = yyDollar[2].TableSpec yyVAL.statement = yyDollar[1].ddl } case 66: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:572 +//line sql.y:573 { // Create table [name] like [name] yyDollar[1].ddl.OptLike = yyDollar[2].optLike @@ -3323,139 +3308,139 @@ yydefault: } case 67: yyDollar = yyS[yypt-8 : yypt+1] -//line sql.y:578 +//line sql.y:579 { // Change this to an alter statement yyVAL.statement = &DDL{Action: AlterStr, Table: yyDollar[7].tableName} } case 68: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:583 +//line sql.y:584 { yyVAL.statement = &DDL{Action: CreateStr, Table: yyDollar[3].tableName.ToViewName()} } case 69: yyDollar = yyS[yypt-6 : yypt+1] -//line sql.y:587 +//line sql.y:588 { yyVAL.statement = &DDL{Action: CreateStr, Table: yyDollar[5].tableName.ToViewName()} } case 70: yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:591 +//line sql.y:592 { yyVAL.statement = &DBDDL{Action: CreateStr, DBName: string(yyDollar[4].bytes)} } case 71: yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:595 +//line sql.y:596 { yyVAL.statement = &DBDDL{Action: CreateStr, DBName: string(yyDollar[4].bytes)} } case 72: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:600 +//line sql.y:601 { yyVAL.colIdent = NewColIdent("") } case 73: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:604 +//line sql.y:605 { yyVAL.colIdent = yyDollar[2].colIdent } case 74: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:610 +//line sql.y:611 { yyVAL.colIdent = NewColIdent(string(yyDollar[1].bytes)) } case 75: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:615 +//line sql.y:616 { var v []VindexParam yyVAL.vindexParams = v } case 76: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:620 +//line sql.y:621 { yyVAL.vindexParams = yyDollar[2].vindexParams } case 77: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:626 +//line sql.y:627 { yyVAL.vindexParams = make([]VindexParam, 0, 4) yyVAL.vindexParams = append(yyVAL.vindexParams, yyDollar[1].vindexParam) } case 78: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:631 +//line sql.y:632 { yyVAL.vindexParams = append(yyVAL.vindexParams, yyDollar[3].vindexParam) } case 79: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:637 +//line sql.y:638 { yyVAL.vindexParam = VindexParam{Key: yyDollar[1].colIdent, Val: yyDollar[3].str} } case 80: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:643 +//line sql.y:644 { yyVAL.ddl = &DDL{Action: CreateStr, Table: yyDollar[4].tableName} setDDL(yylex, yyVAL.ddl) } case 81: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:650 +//line sql.y:651 { yyVAL.TableSpec = yyDollar[2].TableSpec yyVAL.TableSpec.Options = yyDollar[4].str } case 82: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:657 +//line sql.y:658 { yyVAL.optLike = &OptLike{LikeTable: yyDollar[2].tableName} } case 83: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:661 +//line sql.y:662 { yyVAL.optLike = &OptLike{LikeTable: yyDollar[3].tableName} } case 84: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:667 +//line sql.y:668 { yyVAL.TableSpec = &TableSpec{} yyVAL.TableSpec.AddColumn(yyDollar[1].columnDefinition) } case 85: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:672 +//line sql.y:673 { yyVAL.TableSpec.AddColumn(yyDollar[3].columnDefinition) } case 86: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:676 +//line sql.y:677 { yyVAL.TableSpec.AddIndex(yyDollar[3].indexDefinition) } case 87: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:680 +//line sql.y:681 { yyVAL.TableSpec.AddConstraint(yyDollar[3].constraintDefinition) } case 88: yyDollar = yyS[yypt-8 : yypt+1] -//line sql.y:686 +//line sql.y:687 { yyDollar[2].columnType.NotNull = yyDollar[3].boolVal yyDollar[2].columnType.Default = yyDollar[4].optVal @@ -3467,7 +3452,7 @@ yydefault: } case 89: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:697 +//line sql.y:698 { yyVAL.columnType = yyDollar[1].columnType yyVAL.columnType.Unsigned = yyDollar[2].boolVal @@ -3475,74 +3460,74 @@ yydefault: } case 93: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:708 +//line sql.y:709 { yyVAL.columnType = yyDollar[1].columnType yyVAL.columnType.Length = yyDollar[2].sqlVal } case 94: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:713 +//line sql.y:714 { yyVAL.columnType = yyDollar[1].columnType } case 95: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:719 +//line sql.y:720 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} } case 96: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:723 +//line sql.y:724 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} } case 97: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:727 +//line sql.y:728 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} } case 98: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:731 +//line sql.y:732 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} } case 99: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:735 +//line sql.y:736 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} } case 100: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:739 +//line sql.y:740 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} } case 101: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:743 +//line sql.y:744 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} } case 102: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:747 +//line sql.y:748 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} } case 103: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:751 +//line sql.y:752 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} } case 104: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:757 +//line sql.y:758 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} yyVAL.columnType.Length = yyDollar[2].LengthScaleOption.Length @@ -3550,7 +3535,7 @@ yydefault: } case 105: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:763 +//line sql.y:764 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} yyVAL.columnType.Length = yyDollar[2].LengthScaleOption.Length @@ -3558,7 +3543,7 @@ yydefault: } case 106: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:769 +//line sql.y:770 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} yyVAL.columnType.Length = yyDollar[2].LengthScaleOption.Length @@ -3566,7 +3551,7 @@ yydefault: } case 107: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:775 +//line sql.y:776 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} yyVAL.columnType.Length = yyDollar[2].LengthScaleOption.Length @@ -3574,7 +3559,7 @@ yydefault: } case 108: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:781 +//line sql.y:782 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} yyVAL.columnType.Length = yyDollar[2].LengthScaleOption.Length @@ -3582,206 +3567,206 @@ yydefault: } case 109: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:789 +//line sql.y:790 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} } case 110: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:793 +//line sql.y:794 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].sqlVal} } case 111: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:797 +//line sql.y:798 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].sqlVal} } case 112: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:801 +//line sql.y:802 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].sqlVal} } case 113: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:805 +//line sql.y:806 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} } case 114: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:811 +//line sql.y:812 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].sqlVal, Charset: yyDollar[3].str, Collate: yyDollar[4].str} } case 115: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:815 +//line sql.y:816 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].sqlVal, Charset: yyDollar[3].str, Collate: yyDollar[4].str} } case 116: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:819 +//line sql.y:820 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].sqlVal} } case 117: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:823 +//line sql.y:824 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].sqlVal} } case 118: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:827 +//line sql.y:828 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), Charset: yyDollar[2].str, Collate: yyDollar[3].str} } case 119: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:831 +//line sql.y:832 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), Charset: yyDollar[2].str, Collate: yyDollar[3].str} } case 120: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:835 +//line sql.y:836 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), Charset: yyDollar[2].str, Collate: yyDollar[3].str} } case 121: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:839 +//line sql.y:840 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), Charset: yyDollar[2].str, Collate: yyDollar[3].str} } case 122: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:843 +//line sql.y:844 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} } case 123: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:847 +//line sql.y:848 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} } case 124: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:851 +//line sql.y:852 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} } case 125: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:855 +//line sql.y:856 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} } case 126: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:859 +//line sql.y:860 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} } case 127: yyDollar = yyS[yypt-6 : yypt+1] -//line sql.y:863 +//line sql.y:864 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), EnumValues: yyDollar[3].strs, Charset: yyDollar[5].str, Collate: yyDollar[6].str} } case 128: yyDollar = yyS[yypt-6 : yypt+1] -//line sql.y:868 +//line sql.y:869 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), EnumValues: yyDollar[3].strs, Charset: yyDollar[5].str, Collate: yyDollar[6].str} } case 129: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:874 +//line sql.y:875 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} } case 130: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:878 +//line sql.y:879 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} } case 131: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:882 +//line sql.y:883 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} } case 132: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:886 +//line sql.y:887 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} } case 133: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:890 +//line sql.y:891 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} } case 134: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:894 +//line sql.y:895 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} } case 135: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:898 +//line sql.y:899 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} } case 136: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:902 +//line sql.y:903 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} } case 137: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:908 +//line sql.y:909 { yyVAL.strs = make([]string, 0, 4) yyVAL.strs = append(yyVAL.strs, "'"+string(yyDollar[1].bytes)+"'") } case 138: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:913 +//line sql.y:914 { yyVAL.strs = append(yyDollar[1].strs, "'"+string(yyDollar[3].bytes)+"'") } case 139: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:918 +//line sql.y:919 { yyVAL.sqlVal = nil } case 140: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:922 +//line sql.y:923 { yyVAL.sqlVal = NewIntVal(yyDollar[2].bytes) } case 141: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:927 +//line sql.y:928 { yyVAL.LengthScaleOption = LengthScaleOption{} } case 142: yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:931 +//line sql.y:932 { yyVAL.LengthScaleOption = LengthScaleOption{ Length: NewIntVal(yyDollar[2].bytes), @@ -3790,13 +3775,13 @@ yydefault: } case 143: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:939 +//line sql.y:940 { yyVAL.LengthScaleOption = LengthScaleOption{} } case 144: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:943 +//line sql.y:944 { yyVAL.LengthScaleOption = LengthScaleOption{ Length: NewIntVal(yyDollar[2].bytes), @@ -3804,7 +3789,7 @@ yydefault: } case 145: yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:949 +//line sql.y:950 { yyVAL.LengthScaleOption = LengthScaleOption{ Length: NewIntVal(yyDollar[2].bytes), @@ -3813,466 +3798,466 @@ yydefault: } case 146: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:957 +//line sql.y:958 { yyVAL.boolVal = BoolVal(false) } case 147: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:961 +//line sql.y:962 { yyVAL.boolVal = BoolVal(true) } case 148: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:966 +//line sql.y:967 { yyVAL.boolVal = BoolVal(false) } case 149: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:970 +//line sql.y:971 { yyVAL.boolVal = BoolVal(true) } case 150: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:976 +//line sql.y:977 { yyVAL.boolVal = BoolVal(false) } case 151: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:980 +//line sql.y:981 { yyVAL.boolVal = BoolVal(false) } case 152: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:984 +//line sql.y:985 { yyVAL.boolVal = BoolVal(true) } case 153: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:989 +//line sql.y:990 { yyVAL.optVal = nil } case 154: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:993 +//line sql.y:994 { yyVAL.optVal = yyDollar[2].expr } case 155: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:998 +//line sql.y:999 { yyVAL.optVal = nil } case 156: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1002 +//line sql.y:1003 { yyVAL.optVal = yyDollar[3].expr } case 157: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:1007 +//line sql.y:1008 { yyVAL.boolVal = BoolVal(false) } case 158: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1011 +//line sql.y:1012 { yyVAL.boolVal = BoolVal(true) } case 159: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:1016 +//line sql.y:1017 { yyVAL.str = "" } case 160: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1020 +//line sql.y:1021 { yyVAL.str = string(yyDollar[3].bytes) } case 161: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1024 +//line sql.y:1025 { yyVAL.str = string(yyDollar[3].bytes) } case 162: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:1029 +//line sql.y:1030 { yyVAL.str = "" } case 163: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1033 +//line sql.y:1034 { yyVAL.str = string(yyDollar[2].bytes) } case 164: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1037 +//line sql.y:1038 { yyVAL.str = string(yyDollar[2].bytes) } case 165: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:1042 +//line sql.y:1043 { yyVAL.colKeyOpt = colKeyNone } case 166: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1046 +//line sql.y:1047 { yyVAL.colKeyOpt = colKeyPrimary } case 167: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1050 +//line sql.y:1051 { yyVAL.colKeyOpt = colKey } case 168: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1054 +//line sql.y:1055 { yyVAL.colKeyOpt = colKeyUniqueKey } case 169: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1058 +//line sql.y:1059 { yyVAL.colKeyOpt = colKeyUnique } case 170: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:1063 +//line sql.y:1064 { yyVAL.sqlVal = nil } case 171: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1067 +//line sql.y:1068 { yyVAL.sqlVal = NewStrVal(yyDollar[2].bytes) } case 172: yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:1073 +//line sql.y:1074 { yyVAL.indexDefinition = &IndexDefinition{Info: yyDollar[1].indexInfo, Columns: yyDollar[3].indexColumns, Options: yyDollar[5].indexOptions} } case 173: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:1077 +//line sql.y:1078 { yyVAL.indexDefinition = &IndexDefinition{Info: yyDollar[1].indexInfo, Columns: yyDollar[3].indexColumns} } case 174: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1083 +//line sql.y:1084 { yyVAL.indexOptions = []*IndexOption{yyDollar[1].indexOption} } case 175: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1087 +//line sql.y:1088 { yyVAL.indexOptions = append(yyVAL.indexOptions, yyDollar[2].indexOption) } case 176: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1093 +//line sql.y:1094 { yyVAL.indexOption = &IndexOption{Name: string(yyDollar[1].bytes), Using: string(yyDollar[2].bytes)} } case 177: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1097 +//line sql.y:1098 { // should not be string yyVAL.indexOption = &IndexOption{Name: string(yyDollar[1].bytes), Value: NewIntVal(yyDollar[3].bytes)} } case 178: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1102 +//line sql.y:1103 { yyVAL.indexOption = &IndexOption{Name: string(yyDollar[1].bytes), Value: NewStrVal(yyDollar[2].bytes)} } case 179: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:1108 +//line sql.y:1109 { yyVAL.str = "" } case 180: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1112 +//line sql.y:1113 { yyVAL.str = string(yyDollar[1].bytes) } case 181: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1118 +//line sql.y:1119 { yyVAL.indexInfo = &IndexInfo{Type: string(yyDollar[1].bytes) + " " + string(yyDollar[2].bytes), Name: NewColIdent("PRIMARY"), Primary: true, Unique: true} } case 182: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1122 +//line sql.y:1123 { yyVAL.indexInfo = &IndexInfo{Type: string(yyDollar[1].bytes) + " " + string(yyDollar[2].str), Name: NewColIdent(yyDollar[3].str), Spatial: true, Unique: false} } case 183: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1126 +//line sql.y:1127 { yyVAL.indexInfo = &IndexInfo{Type: string(yyDollar[1].bytes) + " " + string(yyDollar[2].str), Name: NewColIdent(yyDollar[3].str), Unique: true} } case 184: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1130 +//line sql.y:1131 { yyVAL.indexInfo = &IndexInfo{Type: string(yyDollar[1].bytes), Name: NewColIdent(yyDollar[2].str), Unique: true} } case 185: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1134 +//line sql.y:1135 { yyVAL.indexInfo = &IndexInfo{Type: string(yyDollar[1].str), Name: NewColIdent(yyDollar[2].str), Unique: false} } case 186: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1140 +//line sql.y:1141 { yyVAL.str = string(yyDollar[1].bytes) } case 187: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1144 +//line sql.y:1145 { yyVAL.str = string(yyDollar[1].bytes) } case 188: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:1149 +//line sql.y:1150 { yyVAL.str = "" } case 189: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1153 +//line sql.y:1154 { yyVAL.str = string(yyDollar[1].bytes) } case 190: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1159 +//line sql.y:1160 { yyVAL.indexColumns = []*IndexColumn{yyDollar[1].indexColumn} } case 191: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1163 +//line sql.y:1164 { yyVAL.indexColumns = append(yyVAL.indexColumns, yyDollar[3].indexColumn) } case 192: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1169 +//line sql.y:1170 { yyVAL.indexColumn = &IndexColumn{Column: yyDollar[1].colIdent, Length: yyDollar[2].sqlVal} } case 193: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1175 +//line sql.y:1176 { yyVAL.constraintDefinition = &ConstraintDefinition{Name: string(yyDollar[2].bytes), Details: yyDollar[3].constraintInfo} } case 194: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1179 +//line sql.y:1180 { yyVAL.constraintDefinition = &ConstraintDefinition{Details: yyDollar[1].constraintInfo} } case 195: yyDollar = yyS[yypt-10 : yypt+1] -//line sql.y:1186 +//line sql.y:1187 { yyVAL.constraintInfo = &ForeignKeyDefinition{Source: yyDollar[4].columns, ReferencedTable: yyDollar[7].tableName, ReferencedColumns: yyDollar[9].columns} } case 196: yyDollar = yyS[yypt-11 : yypt+1] -//line sql.y:1190 +//line sql.y:1191 { yyVAL.constraintInfo = &ForeignKeyDefinition{Source: yyDollar[4].columns, ReferencedTable: yyDollar[7].tableName, ReferencedColumns: yyDollar[9].columns, OnDelete: yyDollar[11].ReferenceAction} } case 197: yyDollar = yyS[yypt-11 : yypt+1] -//line sql.y:1194 +//line sql.y:1195 { yyVAL.constraintInfo = &ForeignKeyDefinition{Source: yyDollar[4].columns, ReferencedTable: yyDollar[7].tableName, ReferencedColumns: yyDollar[9].columns, OnUpdate: yyDollar[11].ReferenceAction} } case 198: yyDollar = yyS[yypt-12 : yypt+1] -//line sql.y:1198 +//line sql.y:1199 { yyVAL.constraintInfo = &ForeignKeyDefinition{Source: yyDollar[4].columns, ReferencedTable: yyDollar[7].tableName, ReferencedColumns: yyDollar[9].columns, OnDelete: yyDollar[11].ReferenceAction, OnUpdate: yyDollar[12].ReferenceAction} } case 199: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1204 +//line sql.y:1205 { yyVAL.ReferenceAction = yyDollar[3].ReferenceAction } case 200: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1210 +//line sql.y:1211 { yyVAL.ReferenceAction = yyDollar[3].ReferenceAction } case 201: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1216 +//line sql.y:1217 { yyVAL.ReferenceAction = Restrict } case 202: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1220 +//line sql.y:1221 { yyVAL.ReferenceAction = Cascade } case 203: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1224 +//line sql.y:1225 { yyVAL.ReferenceAction = NoAction } case 204: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1228 +//line sql.y:1229 { yyVAL.ReferenceAction = SetDefault } case 205: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1232 +//line sql.y:1233 { yyVAL.ReferenceAction = SetNull } case 206: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:1237 +//line sql.y:1238 { yyVAL.str = "" } case 207: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1241 +//line sql.y:1242 { yyVAL.str = " " + string(yyDollar[1].str) } case 208: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1245 +//line sql.y:1246 { yyVAL.str = string(yyDollar[1].str) + ", " + string(yyDollar[3].str) } case 209: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1253 +//line sql.y:1254 { yyVAL.str = yyDollar[1].str } case 210: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1257 +//line sql.y:1258 { yyVAL.str = yyDollar[1].str + " " + yyDollar[2].str } case 211: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1261 +//line sql.y:1262 { yyVAL.str = yyDollar[1].str + "=" + yyDollar[3].str } case 212: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1267 +//line sql.y:1268 { yyVAL.str = yyDollar[1].colIdent.String() } case 213: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1271 +//line sql.y:1272 { yyVAL.str = "'" + string(yyDollar[1].bytes) + "'" } case 214: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1275 +//line sql.y:1276 { yyVAL.str = string(yyDollar[1].bytes) } case 215: yyDollar = yyS[yypt-6 : yypt+1] -//line sql.y:1281 +//line sql.y:1282 { yyVAL.statement = &DDL{Action: AlterStr, Table: yyDollar[4].tableName} } case 216: yyDollar = yyS[yypt-7 : yypt+1] -//line sql.y:1285 +//line sql.y:1286 { yyVAL.statement = &DDL{Action: AlterStr, Table: yyDollar[4].tableName} } case 217: yyDollar = yyS[yypt-7 : yypt+1] -//line sql.y:1289 +//line sql.y:1290 { yyVAL.statement = &DDL{Action: AlterStr, Table: yyDollar[4].tableName} } case 218: yyDollar = yyS[yypt-7 : yypt+1] -//line sql.y:1293 +//line sql.y:1294 { // Change this to a rename statement yyVAL.statement = &DDL{Action: RenameStr, FromTables: TableNames{yyDollar[4].tableName}, ToTables: TableNames{yyDollar[7].tableName}} } case 219: yyDollar = yyS[yypt-7 : yypt+1] -//line sql.y:1298 +//line sql.y:1299 { // Rename an index can just be an alter yyVAL.statement = &DDL{Action: AlterStr, Table: yyDollar[4].tableName} } case 220: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:1303 +//line sql.y:1304 { yyVAL.statement = &DDL{Action: AlterStr, Table: yyDollar[3].tableName.ToViewName()} } case 221: yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:1307 +//line sql.y:1308 { yyVAL.statement = &DDL{Action: AlterStr, Table: yyDollar[4].tableName, PartitionSpec: yyDollar[5].partSpec} } case 222: yyDollar = yyS[yypt-7 : yypt+1] -//line sql.y:1311 +//line sql.y:1312 { yyVAL.statement = &DDL{Action: CreateVindexStr, VindexSpec: &VindexSpec{ Name: yyDollar[5].colIdent, @@ -4282,7 +4267,7 @@ yydefault: } case 223: yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:1319 +//line sql.y:1320 { yyVAL.statement = &DDL{Action: DropVindexStr, VindexSpec: &VindexSpec{ Name: yyDollar[5].colIdent, @@ -4290,19 +4275,19 @@ yydefault: } case 224: yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:1325 +//line sql.y:1326 { yyVAL.statement = &DDL{Action: AddVschemaTableStr, Table: yyDollar[5].tableName} } case 225: yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:1329 +//line sql.y:1330 { yyVAL.statement = &DDL{Action: DropVschemaTableStr, Table: yyDollar[5].tableName} } case 226: yyDollar = yyS[yypt-12 : yypt+1] -//line sql.y:1333 +//line sql.y:1334 { yyVAL.statement = &DDL{ Action: AddColVindexStr, @@ -4317,7 +4302,7 @@ yydefault: } case 227: yyDollar = yyS[yypt-7 : yypt+1] -//line sql.y:1346 +//line sql.y:1347 { yyVAL.statement = &DDL{ Action: DropColVindexStr, @@ -4327,59 +4312,78 @@ yydefault: }, } } - case 239: + case 228: + yyDollar = yyS[yypt-5 : yypt+1] +//line sql.y:1357 + { + yyVAL.statement = &DDL{Action: AddSequenceStr, Table: yyDollar[5].tableName} + } + case 229: + yyDollar = yyS[yypt-9 : yypt+1] +//line sql.y:1361 + { + yyVAL.statement = &DDL{ + Action: AddAutoIncStr, + Table: yyDollar[4].tableName, + AutoIncSpec: &AutoIncSpec{ + Column: yyDollar[7].colIdent, + Sequence: yyDollar[9].tableName, + }, + } + } + case 241: yyDollar = yyS[yypt-7 : yypt+1] -//line sql.y:1371 +//line sql.y:1387 { yyVAL.partSpec = &PartitionSpec{Action: ReorganizeStr, Name: yyDollar[3].colIdent, Definitions: yyDollar[6].partDefs} } - case 240: + case 242: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1377 +//line sql.y:1393 { yyVAL.partDefs = []*PartitionDefinition{yyDollar[1].partDef} } - case 241: + case 243: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1381 +//line sql.y:1397 { yyVAL.partDefs = append(yyDollar[1].partDefs, yyDollar[3].partDef) } - case 242: + case 244: yyDollar = yyS[yypt-8 : yypt+1] -//line sql.y:1387 +//line sql.y:1403 { yyVAL.partDef = &PartitionDefinition{Name: yyDollar[2].colIdent, Limit: yyDollar[7].expr} } - case 243: + case 245: yyDollar = yyS[yypt-8 : yypt+1] -//line sql.y:1391 +//line sql.y:1407 { yyVAL.partDef = &PartitionDefinition{Name: yyDollar[2].colIdent, Maxvalue: true} } - case 244: + case 246: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1397 +//line sql.y:1413 { yyVAL.statement = yyDollar[3].ddl } - case 245: + case 247: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1403 +//line sql.y:1419 { yyVAL.ddl = &DDL{Action: RenameStr, FromTables: TableNames{yyDollar[1].tableName}, ToTables: TableNames{yyDollar[3].tableName}} } - case 246: + case 248: yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:1407 +//line sql.y:1423 { yyVAL.ddl = yyDollar[1].ddl yyVAL.ddl.FromTables = append(yyVAL.ddl.FromTables, yyDollar[3].tableName) yyVAL.ddl.ToTables = append(yyVAL.ddl.ToTables, yyDollar[5].tableName) } - case 247: + case 249: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:1415 +//line sql.y:1431 { var exists bool if yyDollar[3].byt != 0 { @@ -4387,16 +4391,16 @@ yydefault: } yyVAL.statement = &DDL{Action: DropStr, FromTables: yyDollar[4].tableNames, IfExists: exists} } - case 248: + case 250: yyDollar = yyS[yypt-6 : yypt+1] -//line sql.y:1423 +//line sql.y:1439 { // Change this to an alter statement yyVAL.statement = &DDL{Action: AlterStr, Table: yyDollar[5].tableName} } - case 249: + case 251: yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:1428 +//line sql.y:1444 { var exists bool if yyDollar[3].byt != 0 { @@ -4404,154 +4408,148 @@ yydefault: } yyVAL.statement = &DDL{Action: DropStr, FromTables: TableNames{yyDollar[4].tableName.ToViewName()}, IfExists: exists} } - case 250: + case 252: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:1436 +//line sql.y:1452 { yyVAL.statement = &DBDDL{Action: DropStr, DBName: string(yyDollar[4].bytes)} } - case 251: + case 253: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:1440 +//line sql.y:1456 { yyVAL.statement = &DBDDL{Action: DropStr, DBName: string(yyDollar[4].bytes)} } - case 252: + case 254: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1446 +//line sql.y:1462 { yyVAL.statement = &DDL{Action: TruncateStr, Table: yyDollar[3].tableName} } - case 253: + case 255: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1450 +//line sql.y:1466 { yyVAL.statement = &DDL{Action: TruncateStr, Table: yyDollar[2].tableName} } - case 254: + case 256: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1455 +//line sql.y:1471 { yyVAL.statement = &DDL{Action: AlterStr, Table: yyDollar[3].tableName} } - case 255: + case 257: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:1461 +//line sql.y:1477 { yyVAL.statement = &Show{Type: string(yyDollar[2].bytes) + " " + string(yyDollar[3].bytes)} } - case 256: + case 258: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:1466 +//line sql.y:1482 { yyVAL.statement = &Show{Type: CharsetStr} } - case 257: + case 259: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1470 +//line sql.y:1486 { yyVAL.statement = &Show{Type: string(yyDollar[2].bytes)} } - case 258: + case 260: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:1474 +//line sql.y:1490 { yyVAL.statement = &Show{Type: string(yyDollar[2].bytes) + " " + string(yyDollar[3].bytes)} } - case 259: + case 261: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:1479 +//line sql.y:1495 { yyVAL.statement = &Show{Type: string(yyDollar[2].bytes) + " " + string(yyDollar[3].bytes)} } - case 260: + case 262: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:1483 +//line sql.y:1499 { yyVAL.statement = &Show{Type: string(yyDollar[2].bytes) + " " + string(yyDollar[3].bytes)} } - case 261: + case 263: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:1487 +//line sql.y:1503 { yyVAL.statement = &Show{Type: string(yyDollar[2].bytes) + " " + string(yyDollar[3].bytes), Table: yyDollar[4].tableName} } - case 262: + case 264: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:1491 +//line sql.y:1507 { yyVAL.statement = &Show{Type: string(yyDollar[2].bytes) + " " + string(yyDollar[3].bytes)} } - case 263: + case 265: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:1495 +//line sql.y:1511 { yyVAL.statement = &Show{Type: string(yyDollar[2].bytes) + " " + string(yyDollar[3].bytes)} } - case 264: - yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1499 - { - yyVAL.statement = &Show{Type: string(yyDollar[2].bytes)} - } - case 265: + case 266: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1503 +//line sql.y:1515 { yyVAL.statement = &Show{Type: string(yyDollar[2].bytes)} } - case 266: + case 267: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1507 +//line sql.y:1519 { yyVAL.statement = &Show{Type: string(yyDollar[2].bytes)} } - case 267: + case 268: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1511 +//line sql.y:1523 { yyVAL.statement = &Show{Type: string(yyDollar[2].bytes)} } - case 268: + case 269: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1515 +//line sql.y:1527 { yyVAL.statement = &Show{Type: string(yyDollar[2].bytes)} } - case 269: + case 270: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1519 +//line sql.y:1531 { yyVAL.statement = &Show{Type: string(yyDollar[2].bytes)} } - case 270: + case 271: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1523 +//line sql.y:1535 { yyVAL.statement = &Show{Type: string(yyDollar[2].bytes)} } - case 271: + case 272: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:1527 +//line sql.y:1539 { yyVAL.statement = &Show{Scope: yyDollar[2].str, Type: string(yyDollar[3].bytes)} } - case 272: + case 273: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1531 +//line sql.y:1543 { yyVAL.statement = &Show{Type: string(yyDollar[2].bytes)} } - case 273: + case 274: yyDollar = yyS[yypt-7 : yypt+1] -//line sql.y:1535 +//line sql.y:1547 { showTablesOpt := &ShowTablesOpt{Full: yyDollar[2].str, DbName: yyDollar[6].str, Filter: yyDollar[7].showFilter} yyVAL.statement = &Show{Type: string(yyDollar[3].str), ShowTablesOpt: showTablesOpt, OnTable: yyDollar[5].tableName} } - case 274: + case 275: yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:1540 +//line sql.y:1552 { // this is ugly, but I couldn't find a better way for now if yyDollar[3].str == "processlist" { @@ -4561,448 +4559,424 @@ yydefault: yyVAL.statement = &Show{Type: yyDollar[3].str, ShowTablesOpt: showTablesOpt} } } - case 275: + case 276: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:1550 +//line sql.y:1562 { yyVAL.statement = &Show{Scope: yyDollar[2].str, Type: string(yyDollar[3].bytes)} } - case 276: + case 277: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1554 +//line sql.y:1566 { yyVAL.statement = &Show{Type: string(yyDollar[2].bytes)} } - case 277: + case 278: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:1558 +//line sql.y:1570 { // Cannot dereference $4 directly, or else the parser stackcannot be pooled. See yyParsePooled showCollationFilterOpt := yyDollar[4].expr yyVAL.statement = &Show{Type: string(yyDollar[2].bytes), ShowCollationFilterOpt: &showCollationFilterOpt} } - case 278: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1564 - { - yyVAL.statement = &Show{Type: string(yyDollar[2].bytes)} - } case 279: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1568 - { - yyVAL.statement = &Show{Type: string(yyDollar[2].bytes)} - } - case 280: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1572 - { - yyVAL.statement = &Show{Type: string(yyDollar[2].bytes)} - } - case 281: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1576 - { - yyVAL.statement = &Show{Type: string(yyDollar[2].bytes)} - } - case 282: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1580 +//line sql.y:1576 { yyVAL.statement = &Show{Type: string(yyDollar[2].bytes) + " " + string(yyDollar[3].bytes)} } - case 283: + case 280: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1584 +//line sql.y:1580 { yyVAL.statement = &Show{Type: string(yyDollar[2].bytes) + " " + string(yyDollar[3].bytes)} } - case 284: + case 281: yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:1588 +//line sql.y:1584 { yyVAL.statement = &Show{Type: string(yyDollar[2].bytes) + " " + string(yyDollar[3].bytes), OnTable: yyDollar[5].tableName} } - case 285: + case 282: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1592 +//line sql.y:1588 { yyVAL.statement = &Show{Type: string(yyDollar[2].bytes)} } - case 286: + case 283: yyDollar = yyS[yypt-3 : yypt+1] //line sql.y:1602 { yyVAL.statement = &Show{Type: string(yyDollar[2].bytes)} } - case 287: + case 284: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:1608 { yyVAL.str = string(yyDollar[1].bytes) } - case 288: + case 285: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:1612 { yyVAL.str = string(yyDollar[1].bytes) } - case 289: + case 286: yyDollar = yyS[yypt-0 : yypt+1] //line sql.y:1618 { yyVAL.str = "" } - case 290: + case 287: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:1622 { yyVAL.str = "full " } - case 291: + case 288: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:1628 { yyVAL.str = string(yyDollar[1].bytes) } - case 292: + case 289: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:1632 { yyVAL.str = string(yyDollar[1].bytes) } - case 293: + case 290: yyDollar = yyS[yypt-0 : yypt+1] //line sql.y:1638 { yyVAL.str = "" } - case 294: + case 291: yyDollar = yyS[yypt-2 : yypt+1] //line sql.y:1642 { yyVAL.str = yyDollar[2].tableIdent.v } - case 295: + case 292: yyDollar = yyS[yypt-2 : yypt+1] //line sql.y:1646 { yyVAL.str = yyDollar[2].tableIdent.v } - case 296: + case 293: yyDollar = yyS[yypt-0 : yypt+1] //line sql.y:1652 { yyVAL.showFilter = nil } - case 297: + case 294: yyDollar = yyS[yypt-2 : yypt+1] //line sql.y:1656 { yyVAL.showFilter = &ShowFilter{Like: string(yyDollar[2].bytes)} } - case 298: + case 295: yyDollar = yyS[yypt-2 : yypt+1] //line sql.y:1660 { yyVAL.showFilter = &ShowFilter{Filter: yyDollar[2].expr} } - case 299: + case 296: yyDollar = yyS[yypt-0 : yypt+1] //line sql.y:1666 { yyVAL.str = "" } - case 300: + case 297: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:1670 { yyVAL.str = SessionStr } - case 301: + case 298: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:1674 { yyVAL.str = GlobalStr } - case 302: + case 299: yyDollar = yyS[yypt-2 : yypt+1] //line sql.y:1680 { yyVAL.statement = &Use{DBName: yyDollar[2].tableIdent} } - case 303: + case 300: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:1684 { yyVAL.statement = &Use{DBName: TableIdent{v: ""}} } - case 304: + case 301: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:1690 { yyVAL.statement = &Begin{} } - case 305: + case 302: yyDollar = yyS[yypt-2 : yypt+1] //line sql.y:1694 { yyVAL.statement = &Begin{} } - case 306: + case 303: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:1700 { yyVAL.statement = &Commit{} } - case 307: + case 304: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:1706 { yyVAL.statement = &Rollback{} } - case 308: + case 305: yyDollar = yyS[yypt-2 : yypt+1] //line sql.y:1712 { yyVAL.statement = &OtherRead{} } - case 309: + case 306: yyDollar = yyS[yypt-2 : yypt+1] //line sql.y:1716 { yyVAL.statement = &OtherRead{} } - case 310: + case 307: yyDollar = yyS[yypt-2 : yypt+1] //line sql.y:1720 { yyVAL.statement = &OtherRead{} } - case 311: + case 308: yyDollar = yyS[yypt-2 : yypt+1] //line sql.y:1724 { yyVAL.statement = &OtherAdmin{} } - case 312: + case 309: yyDollar = yyS[yypt-2 : yypt+1] //line sql.y:1728 { yyVAL.statement = &OtherAdmin{} } - case 313: + case 310: yyDollar = yyS[yypt-3 : yypt+1] //line sql.y:1732 { yyVAL.statement = &OtherAdmin{} } - case 314: + case 311: yyDollar = yyS[yypt-3 : yypt+1] //line sql.y:1736 { yyVAL.statement = &OtherAdmin{} } - case 315: + case 312: yyDollar = yyS[yypt-2 : yypt+1] //line sql.y:1742 { yyVAL.statement = &DDL{Action: FlushStr} } - case 316: + case 313: yyDollar = yyS[yypt-0 : yypt+1] //line sql.y:1746 { setAllowComments(yylex, true) } - case 317: + case 314: yyDollar = yyS[yypt-2 : yypt+1] //line sql.y:1750 { yyVAL.bytes2 = yyDollar[2].bytes2 setAllowComments(yylex, false) } - case 318: + case 315: yyDollar = yyS[yypt-0 : yypt+1] //line sql.y:1756 { yyVAL.bytes2 = nil } - case 319: + case 316: yyDollar = yyS[yypt-2 : yypt+1] //line sql.y:1760 { yyVAL.bytes2 = append(yyDollar[1].bytes2, yyDollar[2].bytes) } - case 320: + case 317: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:1766 { yyVAL.str = UnionStr } - case 321: + case 318: yyDollar = yyS[yypt-2 : yypt+1] //line sql.y:1770 { yyVAL.str = UnionAllStr } - case 322: + case 319: yyDollar = yyS[yypt-2 : yypt+1] //line sql.y:1774 { yyVAL.str = UnionDistinctStr } - case 323: + case 320: yyDollar = yyS[yypt-0 : yypt+1] //line sql.y:1779 { yyVAL.str = "" } - case 324: + case 321: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:1783 { yyVAL.str = SQLNoCacheStr } - case 325: + case 322: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:1787 { yyVAL.str = SQLCacheStr } - case 326: + case 323: yyDollar = yyS[yypt-0 : yypt+1] //line sql.y:1792 { yyVAL.str = "" } - case 327: + case 324: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:1796 { yyVAL.str = DistinctStr } - case 328: + case 325: yyDollar = yyS[yypt-0 : yypt+1] //line sql.y:1801 { yyVAL.str = "" } - case 329: + case 326: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:1805 { yyVAL.str = StraightJoinHint } - case 330: + case 327: yyDollar = yyS[yypt-0 : yypt+1] //line sql.y:1810 { yyVAL.selectExprs = nil } - case 331: + case 328: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:1814 { yyVAL.selectExprs = yyDollar[1].selectExprs } - case 332: + case 329: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:1820 { yyVAL.selectExprs = SelectExprs{yyDollar[1].selectExpr} } - case 333: + case 330: yyDollar = yyS[yypt-3 : yypt+1] //line sql.y:1824 { yyVAL.selectExprs = append(yyVAL.selectExprs, yyDollar[3].selectExpr) } - case 334: + case 331: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:1830 { yyVAL.selectExpr = &StarExpr{} } - case 335: + case 332: yyDollar = yyS[yypt-2 : yypt+1] //line sql.y:1834 { yyVAL.selectExpr = &AliasedExpr{Expr: yyDollar[1].expr, As: yyDollar[2].colIdent} } - case 336: + case 333: yyDollar = yyS[yypt-3 : yypt+1] //line sql.y:1838 { yyVAL.selectExpr = &StarExpr{TableName: TableName{Name: yyDollar[1].tableIdent}} } - case 337: + case 334: yyDollar = yyS[yypt-5 : yypt+1] //line sql.y:1842 { yyVAL.selectExpr = &StarExpr{TableName: TableName{Qualifier: yyDollar[1].tableIdent, Name: yyDollar[3].tableIdent}} } - case 338: + case 335: yyDollar = yyS[yypt-0 : yypt+1] //line sql.y:1847 { yyVAL.colIdent = ColIdent{} } - case 339: + case 336: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:1851 { yyVAL.colIdent = yyDollar[1].colIdent } - case 340: + case 337: yyDollar = yyS[yypt-2 : yypt+1] //line sql.y:1855 { yyVAL.colIdent = yyDollar[2].colIdent } - case 342: + case 339: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:1862 { yyVAL.colIdent = NewColIdent(string(yyDollar[1].bytes)) } - case 343: + case 340: yyDollar = yyS[yypt-0 : yypt+1] //line sql.y:1867 { yyVAL.tableExprs = TableExprs{&AliasedTableExpr{Expr: TableName{Name: NewTableIdent("dual")}}} } - case 344: + case 341: yyDollar = yyS[yypt-2 : yypt+1] //line sql.y:1871 { yyVAL.tableExprs = yyDollar[2].tableExprs } - case 345: + case 342: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:1877 { yyVAL.tableExprs = TableExprs{yyDollar[1].tableExpr} } - case 346: + case 343: yyDollar = yyS[yypt-3 : yypt+1] //line sql.y:1881 { yyVAL.tableExprs = append(yyVAL.tableExprs, yyDollar[3].tableExpr) } - case 349: + case 346: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:1891 { yyVAL.tableExpr = yyDollar[1].aliasedTableName } - case 350: + case 347: yyDollar = yyS[yypt-3 : yypt+1] //line sql.y:1895 { yyVAL.tableExpr = &AliasedTableExpr{Expr: yyDollar[1].subquery, As: yyDollar[3].tableIdent} } - case 351: + case 348: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:1899 { @@ -5010,199 +4984,199 @@ yydefault: yylex.Error("Every derived table must have its own alias") return 1 } - case 352: + case 349: yyDollar = yyS[yypt-3 : yypt+1] //line sql.y:1905 { yyVAL.tableExpr = &ParenTableExpr{Exprs: yyDollar[2].tableExprs} } - case 353: + case 350: yyDollar = yyS[yypt-3 : yypt+1] //line sql.y:1911 { yyVAL.aliasedTableName = &AliasedTableExpr{Expr: yyDollar[1].tableName, As: yyDollar[2].tableIdent, Hints: yyDollar[3].indexHints} } - case 354: + case 351: yyDollar = yyS[yypt-7 : yypt+1] //line sql.y:1915 { yyVAL.aliasedTableName = &AliasedTableExpr{Expr: yyDollar[1].tableName, Partitions: yyDollar[4].partitions, As: yyDollar[6].tableIdent, Hints: yyDollar[7].indexHints} } - case 355: + case 352: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:1921 { yyVAL.columns = Columns{yyDollar[1].colIdent} } - case 356: + case 353: yyDollar = yyS[yypt-3 : yypt+1] //line sql.y:1925 { yyVAL.columns = append(yyVAL.columns, yyDollar[3].colIdent) } - case 357: + case 354: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:1931 { yyVAL.partitions = Partitions{yyDollar[1].colIdent} } - case 358: + case 355: yyDollar = yyS[yypt-3 : yypt+1] //line sql.y:1935 { yyVAL.partitions = append(yyVAL.partitions, yyDollar[3].colIdent) } - case 359: + case 356: yyDollar = yyS[yypt-4 : yypt+1] //line sql.y:1948 { yyVAL.tableExpr = &JoinTableExpr{LeftExpr: yyDollar[1].tableExpr, Join: yyDollar[2].str, RightExpr: yyDollar[3].tableExpr, Condition: yyDollar[4].joinCondition} } - case 360: + case 357: yyDollar = yyS[yypt-4 : yypt+1] //line sql.y:1952 { yyVAL.tableExpr = &JoinTableExpr{LeftExpr: yyDollar[1].tableExpr, Join: yyDollar[2].str, RightExpr: yyDollar[3].tableExpr, Condition: yyDollar[4].joinCondition} } - case 361: + case 358: yyDollar = yyS[yypt-4 : yypt+1] //line sql.y:1956 { yyVAL.tableExpr = &JoinTableExpr{LeftExpr: yyDollar[1].tableExpr, Join: yyDollar[2].str, RightExpr: yyDollar[3].tableExpr, Condition: yyDollar[4].joinCondition} } - case 362: + case 359: yyDollar = yyS[yypt-3 : yypt+1] //line sql.y:1960 { yyVAL.tableExpr = &JoinTableExpr{LeftExpr: yyDollar[1].tableExpr, Join: yyDollar[2].str, RightExpr: yyDollar[3].tableExpr} } - case 363: + case 360: yyDollar = yyS[yypt-2 : yypt+1] //line sql.y:1966 { yyVAL.joinCondition = JoinCondition{On: yyDollar[2].expr} } - case 364: + case 361: yyDollar = yyS[yypt-4 : yypt+1] //line sql.y:1968 { yyVAL.joinCondition = JoinCondition{Using: yyDollar[3].columns} } - case 365: + case 362: yyDollar = yyS[yypt-0 : yypt+1] //line sql.y:1972 { yyVAL.joinCondition = JoinCondition{} } - case 366: + case 363: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:1974 { yyVAL.joinCondition = yyDollar[1].joinCondition } - case 367: + case 364: yyDollar = yyS[yypt-0 : yypt+1] //line sql.y:1978 { yyVAL.joinCondition = JoinCondition{} } - case 368: + case 365: yyDollar = yyS[yypt-2 : yypt+1] //line sql.y:1980 { yyVAL.joinCondition = JoinCondition{On: yyDollar[2].expr} } - case 369: + case 366: yyDollar = yyS[yypt-0 : yypt+1] //line sql.y:1983 { yyVAL.empty = struct{}{} } - case 370: + case 367: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:1985 { yyVAL.empty = struct{}{} } - case 371: + case 368: yyDollar = yyS[yypt-0 : yypt+1] //line sql.y:1988 { yyVAL.tableIdent = NewTableIdent("") } - case 372: + case 369: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:1992 { yyVAL.tableIdent = yyDollar[1].tableIdent } - case 373: + case 370: yyDollar = yyS[yypt-2 : yypt+1] //line sql.y:1996 { yyVAL.tableIdent = yyDollar[2].tableIdent } - case 375: + case 372: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:2003 { yyVAL.tableIdent = NewTableIdent(string(yyDollar[1].bytes)) } - case 376: + case 373: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:2009 { yyVAL.str = JoinStr } - case 377: + case 374: yyDollar = yyS[yypt-2 : yypt+1] //line sql.y:2013 { yyVAL.str = JoinStr } - case 378: + case 375: yyDollar = yyS[yypt-2 : yypt+1] //line sql.y:2017 { yyVAL.str = JoinStr } - case 379: + case 376: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:2023 { yyVAL.str = StraightJoinStr } - case 380: + case 377: yyDollar = yyS[yypt-2 : yypt+1] //line sql.y:2029 { yyVAL.str = LeftJoinStr } - case 381: + case 378: yyDollar = yyS[yypt-3 : yypt+1] //line sql.y:2033 { yyVAL.str = LeftJoinStr } - case 382: + case 379: yyDollar = yyS[yypt-2 : yypt+1] //line sql.y:2037 { yyVAL.str = RightJoinStr } - case 383: + case 380: yyDollar = yyS[yypt-3 : yypt+1] //line sql.y:2041 { yyVAL.str = RightJoinStr } - case 384: + case 381: yyDollar = yyS[yypt-2 : yypt+1] //line sql.y:2047 { yyVAL.str = NaturalJoinStr } - case 385: + case 382: yyDollar = yyS[yypt-2 : yypt+1] //line sql.y:2051 { @@ -5212,463 +5186,463 @@ yydefault: yyVAL.str = NaturalRightJoinStr } } - case 386: + case 383: yyDollar = yyS[yypt-2 : yypt+1] //line sql.y:2061 { yyVAL.tableName = yyDollar[2].tableName } - case 387: + case 384: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:2065 { yyVAL.tableName = yyDollar[1].tableName } - case 388: + case 385: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:2071 { yyVAL.tableName = TableName{Name: yyDollar[1].tableIdent} } - case 389: + case 386: yyDollar = yyS[yypt-3 : yypt+1] //line sql.y:2075 { yyVAL.tableName = TableName{Qualifier: yyDollar[1].tableIdent, Name: yyDollar[3].tableIdent} } - case 390: + case 387: yyDollar = yyS[yypt-3 : yypt+1] //line sql.y:2081 { yyVAL.tableName = TableName{Name: yyDollar[1].tableIdent} } - case 391: + case 388: yyDollar = yyS[yypt-0 : yypt+1] //line sql.y:2086 { yyVAL.indexHints = nil } - case 392: + case 389: yyDollar = yyS[yypt-5 : yypt+1] //line sql.y:2090 { yyVAL.indexHints = &IndexHints{Type: UseStr, Indexes: yyDollar[4].columns} } - case 393: + case 390: yyDollar = yyS[yypt-5 : yypt+1] //line sql.y:2094 { yyVAL.indexHints = &IndexHints{Type: IgnoreStr, Indexes: yyDollar[4].columns} } - case 394: + case 391: yyDollar = yyS[yypt-5 : yypt+1] //line sql.y:2098 { yyVAL.indexHints = &IndexHints{Type: ForceStr, Indexes: yyDollar[4].columns} } - case 395: + case 392: yyDollar = yyS[yypt-0 : yypt+1] //line sql.y:2103 { yyVAL.expr = nil } - case 396: + case 393: yyDollar = yyS[yypt-2 : yypt+1] //line sql.y:2107 { yyVAL.expr = yyDollar[2].expr } - case 397: + case 394: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:2113 { yyVAL.expr = yyDollar[1].expr } - case 398: + case 395: yyDollar = yyS[yypt-3 : yypt+1] //line sql.y:2117 { yyVAL.expr = &AndExpr{Left: yyDollar[1].expr, Right: yyDollar[3].expr} } - case 399: + case 396: yyDollar = yyS[yypt-3 : yypt+1] //line sql.y:2121 { yyVAL.expr = &OrExpr{Left: yyDollar[1].expr, Right: yyDollar[3].expr} } - case 400: + case 397: yyDollar = yyS[yypt-2 : yypt+1] //line sql.y:2125 { yyVAL.expr = &NotExpr{Expr: yyDollar[2].expr} } - case 401: + case 398: yyDollar = yyS[yypt-3 : yypt+1] //line sql.y:2129 { yyVAL.expr = &IsExpr{Operator: yyDollar[3].str, Expr: yyDollar[1].expr} } - case 402: + case 399: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:2133 { yyVAL.expr = yyDollar[1].expr } - case 403: + case 400: yyDollar = yyS[yypt-2 : yypt+1] //line sql.y:2137 { yyVAL.expr = &Default{ColName: yyDollar[2].str} } - case 404: + case 401: yyDollar = yyS[yypt-0 : yypt+1] //line sql.y:2143 { yyVAL.str = "" } - case 405: + case 402: yyDollar = yyS[yypt-3 : yypt+1] //line sql.y:2147 { yyVAL.str = string(yyDollar[2].bytes) } - case 406: + case 403: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:2153 { yyVAL.boolVal = BoolVal(true) } - case 407: + case 404: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:2157 { yyVAL.boolVal = BoolVal(false) } - case 408: + case 405: yyDollar = yyS[yypt-3 : yypt+1] //line sql.y:2163 { yyVAL.expr = &ComparisonExpr{Left: yyDollar[1].expr, Operator: yyDollar[2].str, Right: yyDollar[3].expr} } - case 409: + case 406: yyDollar = yyS[yypt-3 : yypt+1] //line sql.y:2167 { yyVAL.expr = &ComparisonExpr{Left: yyDollar[1].expr, Operator: InStr, Right: yyDollar[3].colTuple} } - case 410: + case 407: yyDollar = yyS[yypt-4 : yypt+1] //line sql.y:2171 { yyVAL.expr = &ComparisonExpr{Left: yyDollar[1].expr, Operator: NotInStr, Right: yyDollar[4].colTuple} } - case 411: + case 408: yyDollar = yyS[yypt-4 : yypt+1] //line sql.y:2175 { yyVAL.expr = &ComparisonExpr{Left: yyDollar[1].expr, Operator: LikeStr, Right: yyDollar[3].expr, Escape: yyDollar[4].expr} } - case 412: + case 409: yyDollar = yyS[yypt-5 : yypt+1] //line sql.y:2179 { yyVAL.expr = &ComparisonExpr{Left: yyDollar[1].expr, Operator: NotLikeStr, Right: yyDollar[4].expr, Escape: yyDollar[5].expr} } - case 413: + case 410: yyDollar = yyS[yypt-3 : yypt+1] //line sql.y:2183 { yyVAL.expr = &ComparisonExpr{Left: yyDollar[1].expr, Operator: RegexpStr, Right: yyDollar[3].expr} } - case 414: + case 411: yyDollar = yyS[yypt-4 : yypt+1] //line sql.y:2187 { yyVAL.expr = &ComparisonExpr{Left: yyDollar[1].expr, Operator: NotRegexpStr, Right: yyDollar[4].expr} } - case 415: + case 412: yyDollar = yyS[yypt-5 : yypt+1] //line sql.y:2191 { yyVAL.expr = &RangeCond{Left: yyDollar[1].expr, Operator: BetweenStr, From: yyDollar[3].expr, To: yyDollar[5].expr} } - case 416: + case 413: yyDollar = yyS[yypt-6 : yypt+1] //line sql.y:2195 { yyVAL.expr = &RangeCond{Left: yyDollar[1].expr, Operator: NotBetweenStr, From: yyDollar[4].expr, To: yyDollar[6].expr} } - case 417: + case 414: yyDollar = yyS[yypt-2 : yypt+1] //line sql.y:2199 { yyVAL.expr = &ExistsExpr{Subquery: yyDollar[2].subquery} } - case 418: + case 415: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:2205 { yyVAL.str = IsNullStr } - case 419: + case 416: yyDollar = yyS[yypt-2 : yypt+1] //line sql.y:2209 { yyVAL.str = IsNotNullStr } - case 420: + case 417: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:2213 { yyVAL.str = IsTrueStr } - case 421: + case 418: yyDollar = yyS[yypt-2 : yypt+1] //line sql.y:2217 { yyVAL.str = IsNotTrueStr } - case 422: + case 419: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:2221 { yyVAL.str = IsFalseStr } - case 423: + case 420: yyDollar = yyS[yypt-2 : yypt+1] //line sql.y:2225 { yyVAL.str = IsNotFalseStr } - case 424: + case 421: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:2231 { yyVAL.str = EqualStr } - case 425: + case 422: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:2235 { yyVAL.str = LessThanStr } - case 426: + case 423: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:2239 { yyVAL.str = GreaterThanStr } - case 427: + case 424: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:2243 { yyVAL.str = LessEqualStr } - case 428: + case 425: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:2247 { yyVAL.str = GreaterEqualStr } - case 429: + case 426: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:2251 { yyVAL.str = NotEqualStr } - case 430: + case 427: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:2255 { yyVAL.str = NullSafeEqualStr } - case 431: + case 428: yyDollar = yyS[yypt-0 : yypt+1] //line sql.y:2260 { yyVAL.expr = nil } - case 432: + case 429: yyDollar = yyS[yypt-2 : yypt+1] //line sql.y:2264 { yyVAL.expr = yyDollar[2].expr } - case 433: + case 430: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:2270 { yyVAL.colTuple = yyDollar[1].valTuple } - case 434: + case 431: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:2274 { yyVAL.colTuple = yyDollar[1].subquery } - case 435: + case 432: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:2278 { yyVAL.colTuple = ListArg(yyDollar[1].bytes) } - case 436: + case 433: yyDollar = yyS[yypt-3 : yypt+1] //line sql.y:2284 { yyVAL.subquery = &Subquery{yyDollar[2].selStmt} } - case 437: + case 434: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:2290 { yyVAL.exprs = Exprs{yyDollar[1].expr} } - case 438: + case 435: yyDollar = yyS[yypt-3 : yypt+1] //line sql.y:2294 { yyVAL.exprs = append(yyDollar[1].exprs, yyDollar[3].expr) } - case 439: + case 436: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:2300 { yyVAL.expr = yyDollar[1].expr } - case 440: + case 437: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:2304 { yyVAL.expr = yyDollar[1].boolVal } - case 441: + case 438: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:2308 { yyVAL.expr = yyDollar[1].colName } - case 442: + case 439: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:2312 { yyVAL.expr = yyDollar[1].expr } - case 443: + case 440: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:2316 { yyVAL.expr = yyDollar[1].subquery } - case 444: + case 441: yyDollar = yyS[yypt-3 : yypt+1] //line sql.y:2320 { yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: BitAndStr, Right: yyDollar[3].expr} } - case 445: + case 442: yyDollar = yyS[yypt-3 : yypt+1] //line sql.y:2324 { yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: BitOrStr, Right: yyDollar[3].expr} } - case 446: + case 443: yyDollar = yyS[yypt-3 : yypt+1] //line sql.y:2328 { yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: BitXorStr, Right: yyDollar[3].expr} } - case 447: + case 444: yyDollar = yyS[yypt-3 : yypt+1] //line sql.y:2332 { yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: PlusStr, Right: yyDollar[3].expr} } - case 448: + case 445: yyDollar = yyS[yypt-3 : yypt+1] //line sql.y:2336 { yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: MinusStr, Right: yyDollar[3].expr} } - case 449: + case 446: yyDollar = yyS[yypt-3 : yypt+1] //line sql.y:2340 { yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: MultStr, Right: yyDollar[3].expr} } - case 450: + case 447: yyDollar = yyS[yypt-3 : yypt+1] //line sql.y:2344 { yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: DivStr, Right: yyDollar[3].expr} } - case 451: + case 448: yyDollar = yyS[yypt-3 : yypt+1] //line sql.y:2348 { yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: IntDivStr, Right: yyDollar[3].expr} } - case 452: + case 449: yyDollar = yyS[yypt-3 : yypt+1] //line sql.y:2352 { yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: ModStr, Right: yyDollar[3].expr} } - case 453: + case 450: yyDollar = yyS[yypt-3 : yypt+1] //line sql.y:2356 { yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: ModStr, Right: yyDollar[3].expr} } - case 454: + case 451: yyDollar = yyS[yypt-3 : yypt+1] //line sql.y:2360 { yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: ShiftLeftStr, Right: yyDollar[3].expr} } - case 455: + case 452: yyDollar = yyS[yypt-3 : yypt+1] //line sql.y:2364 { yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: ShiftRightStr, Right: yyDollar[3].expr} } - case 456: + case 453: yyDollar = yyS[yypt-3 : yypt+1] //line sql.y:2368 { yyVAL.expr = &BinaryExpr{Left: yyDollar[1].colName, Operator: JSONExtractOp, Right: yyDollar[3].expr} } - case 457: + case 454: yyDollar = yyS[yypt-3 : yypt+1] //line sql.y:2372 { yyVAL.expr = &BinaryExpr{Left: yyDollar[1].colName, Operator: JSONUnquoteExtractOp, Right: yyDollar[3].expr} } - case 458: + case 455: yyDollar = yyS[yypt-3 : yypt+1] //line sql.y:2376 { yyVAL.expr = &CollateExpr{Expr: yyDollar[1].expr, Charset: yyDollar[3].str} } - case 459: + case 456: yyDollar = yyS[yypt-2 : yypt+1] //line sql.y:2380 { yyVAL.expr = &UnaryExpr{Operator: BinaryStr, Expr: yyDollar[2].expr} } - case 460: + case 457: yyDollar = yyS[yypt-2 : yypt+1] //line sql.y:2384 { yyVAL.expr = &UnaryExpr{Operator: UBinaryStr, Expr: yyDollar[2].expr} } - case 461: + case 458: yyDollar = yyS[yypt-2 : yypt+1] //line sql.y:2388 { yyVAL.expr = &UnaryExpr{Operator: Utf8mb4Str, Expr: yyDollar[2].expr} } - case 462: + case 459: yyDollar = yyS[yypt-2 : yypt+1] //line sql.y:2392 { @@ -5678,7 +5652,7 @@ yydefault: yyVAL.expr = &UnaryExpr{Operator: UPlusStr, Expr: yyDollar[2].expr} } } - case 463: + case 460: yyDollar = yyS[yypt-2 : yypt+1] //line sql.y:2400 { @@ -5694,19 +5668,19 @@ yydefault: yyVAL.expr = &UnaryExpr{Operator: UMinusStr, Expr: yyDollar[2].expr} } } - case 464: + case 461: yyDollar = yyS[yypt-2 : yypt+1] //line sql.y:2414 { yyVAL.expr = &UnaryExpr{Operator: TildaStr, Expr: yyDollar[2].expr} } - case 465: + case 462: yyDollar = yyS[yypt-2 : yypt+1] //line sql.y:2418 { yyVAL.expr = &UnaryExpr{Operator: BangStr, Expr: yyDollar[2].expr} } - case 466: + case 463: yyDollar = yyS[yypt-3 : yypt+1] //line sql.y:2422 { @@ -5716,313 +5690,313 @@ yydefault: // will be non-trivial because of grammar conflicts. yyVAL.expr = &IntervalExpr{Expr: yyDollar[2].expr, Unit: yyDollar[3].colIdent.String()} } - case 471: + case 468: yyDollar = yyS[yypt-4 : yypt+1] //line sql.y:2440 { yyVAL.expr = &FuncExpr{Name: yyDollar[1].colIdent, Exprs: yyDollar[3].selectExprs} } - case 472: + case 469: yyDollar = yyS[yypt-5 : yypt+1] //line sql.y:2444 { yyVAL.expr = &FuncExpr{Name: yyDollar[1].colIdent, Distinct: true, Exprs: yyDollar[4].selectExprs} } - case 473: + case 470: yyDollar = yyS[yypt-6 : yypt+1] //line sql.y:2448 { yyVAL.expr = &FuncExpr{Qualifier: yyDollar[1].tableIdent, Name: yyDollar[3].colIdent, Exprs: yyDollar[5].selectExprs} } - case 474: + case 471: yyDollar = yyS[yypt-4 : yypt+1] //line sql.y:2458 { yyVAL.expr = &FuncExpr{Name: NewColIdent("left"), Exprs: yyDollar[3].selectExprs} } - case 475: + case 472: yyDollar = yyS[yypt-4 : yypt+1] //line sql.y:2462 { yyVAL.expr = &FuncExpr{Name: NewColIdent("right"), Exprs: yyDollar[3].selectExprs} } - case 476: + case 473: yyDollar = yyS[yypt-6 : yypt+1] //line sql.y:2466 { yyVAL.expr = &ConvertExpr{Expr: yyDollar[3].expr, Type: yyDollar[5].convertType} } - case 477: + case 474: yyDollar = yyS[yypt-6 : yypt+1] //line sql.y:2470 { yyVAL.expr = &ConvertExpr{Expr: yyDollar[3].expr, Type: yyDollar[5].convertType} } - case 478: + case 475: yyDollar = yyS[yypt-6 : yypt+1] //line sql.y:2474 { yyVAL.expr = &ConvertUsingExpr{Expr: yyDollar[3].expr, Type: yyDollar[5].str} } - case 479: + case 476: yyDollar = yyS[yypt-8 : yypt+1] //line sql.y:2478 { yyVAL.expr = &SubstrExpr{Name: yyDollar[3].colName, From: yyDollar[5].expr, To: yyDollar[7].expr} } - case 480: + case 477: yyDollar = yyS[yypt-8 : yypt+1] //line sql.y:2482 { yyVAL.expr = &SubstrExpr{Name: yyDollar[3].colName, From: yyDollar[5].expr, To: yyDollar[7].expr} } - case 481: + case 478: yyDollar = yyS[yypt-8 : yypt+1] //line sql.y:2486 { yyVAL.expr = &SubstrExpr{StrVal: NewStrVal(yyDollar[3].bytes), From: yyDollar[5].expr, To: yyDollar[7].expr} } - case 482: + case 479: yyDollar = yyS[yypt-8 : yypt+1] //line sql.y:2490 { yyVAL.expr = &SubstrExpr{StrVal: NewStrVal(yyDollar[3].bytes), From: yyDollar[5].expr, To: yyDollar[7].expr} } - case 483: + case 480: yyDollar = yyS[yypt-9 : yypt+1] //line sql.y:2494 { yyVAL.expr = &MatchExpr{Columns: yyDollar[3].selectExprs, Expr: yyDollar[7].expr, Option: yyDollar[8].str} } - case 484: + case 481: yyDollar = yyS[yypt-7 : yypt+1] //line sql.y:2498 { yyVAL.expr = &GroupConcatExpr{Distinct: yyDollar[3].str, Exprs: yyDollar[4].selectExprs, OrderBy: yyDollar[5].orderBy, Separator: yyDollar[6].str} } - case 485: + case 482: yyDollar = yyS[yypt-5 : yypt+1] //line sql.y:2502 { yyVAL.expr = &CaseExpr{Expr: yyDollar[2].expr, Whens: yyDollar[3].whens, Else: yyDollar[4].expr} } - case 486: + case 483: yyDollar = yyS[yypt-4 : yypt+1] //line sql.y:2506 { yyVAL.expr = &ValuesFuncExpr{Name: yyDollar[3].colName} } - case 487: + case 484: yyDollar = yyS[yypt-2 : yypt+1] //line sql.y:2516 { yyVAL.expr = &FuncExpr{Name: NewColIdent("current_timestamp")} } - case 488: + case 485: yyDollar = yyS[yypt-2 : yypt+1] //line sql.y:2520 { yyVAL.expr = &FuncExpr{Name: NewColIdent("utc_timestamp")} } - case 489: + case 486: yyDollar = yyS[yypt-2 : yypt+1] //line sql.y:2524 { yyVAL.expr = &FuncExpr{Name: NewColIdent("utc_time")} } - case 490: + case 487: yyDollar = yyS[yypt-2 : yypt+1] //line sql.y:2529 { yyVAL.expr = &FuncExpr{Name: NewColIdent("utc_date")} } - case 491: + case 488: yyDollar = yyS[yypt-2 : yypt+1] //line sql.y:2534 { yyVAL.expr = &FuncExpr{Name: NewColIdent("localtime")} } - case 492: + case 489: yyDollar = yyS[yypt-2 : yypt+1] //line sql.y:2539 { yyVAL.expr = &FuncExpr{Name: NewColIdent("localtimestamp")} } - case 493: + case 490: yyDollar = yyS[yypt-2 : yypt+1] //line sql.y:2545 { yyVAL.expr = &FuncExpr{Name: NewColIdent("current_date")} } - case 494: + case 491: yyDollar = yyS[yypt-2 : yypt+1] //line sql.y:2550 { yyVAL.expr = &FuncExpr{Name: NewColIdent("current_time")} } - case 495: + case 492: yyDollar = yyS[yypt-2 : yypt+1] //line sql.y:2555 { yyVAL.expr = &CurTimeFuncExpr{Name: NewColIdent("current_timestamp"), Fsp: yyDollar[2].expr} } - case 496: + case 493: yyDollar = yyS[yypt-2 : yypt+1] //line sql.y:2559 { yyVAL.expr = &CurTimeFuncExpr{Name: NewColIdent("utc_timestamp"), Fsp: yyDollar[2].expr} } - case 497: + case 494: yyDollar = yyS[yypt-2 : yypt+1] //line sql.y:2563 { yyVAL.expr = &CurTimeFuncExpr{Name: NewColIdent("utc_time"), Fsp: yyDollar[2].expr} } - case 498: + case 495: yyDollar = yyS[yypt-2 : yypt+1] //line sql.y:2568 { yyVAL.expr = &CurTimeFuncExpr{Name: NewColIdent("localtime"), Fsp: yyDollar[2].expr} } - case 499: + case 496: yyDollar = yyS[yypt-2 : yypt+1] //line sql.y:2573 { yyVAL.expr = &CurTimeFuncExpr{Name: NewColIdent("localtimestamp"), Fsp: yyDollar[2].expr} } - case 500: + case 497: yyDollar = yyS[yypt-2 : yypt+1] //line sql.y:2578 { yyVAL.expr = &CurTimeFuncExpr{Name: NewColIdent("current_time"), Fsp: yyDollar[2].expr} } - case 501: + case 498: yyDollar = yyS[yypt-8 : yypt+1] //line sql.y:2582 { yyVAL.expr = &TimestampFuncExpr{Name: string("timestampadd"), Unit: yyDollar[3].colIdent.String(), Expr1: yyDollar[5].expr, Expr2: yyDollar[7].expr} } - case 502: + case 499: yyDollar = yyS[yypt-8 : yypt+1] //line sql.y:2586 { yyVAL.expr = &TimestampFuncExpr{Name: string("timestampdiff"), Unit: yyDollar[3].colIdent.String(), Expr1: yyDollar[5].expr, Expr2: yyDollar[7].expr} } - case 505: + case 502: yyDollar = yyS[yypt-3 : yypt+1] //line sql.y:2596 { yyVAL.expr = yyDollar[2].expr } - case 506: + case 503: yyDollar = yyS[yypt-4 : yypt+1] //line sql.y:2606 { yyVAL.expr = &FuncExpr{Name: NewColIdent("if"), Exprs: yyDollar[3].selectExprs} } - case 507: + case 504: yyDollar = yyS[yypt-4 : yypt+1] //line sql.y:2610 { yyVAL.expr = &FuncExpr{Name: NewColIdent("database"), Exprs: yyDollar[3].selectExprs} } - case 508: + case 505: yyDollar = yyS[yypt-4 : yypt+1] //line sql.y:2614 { yyVAL.expr = &FuncExpr{Name: NewColIdent("mod"), Exprs: yyDollar[3].selectExprs} } - case 509: + case 506: yyDollar = yyS[yypt-4 : yypt+1] //line sql.y:2618 { yyVAL.expr = &FuncExpr{Name: NewColIdent("replace"), Exprs: yyDollar[3].selectExprs} } - case 510: + case 507: yyDollar = yyS[yypt-4 : yypt+1] //line sql.y:2622 { yyVAL.expr = &FuncExpr{Name: NewColIdent("substr"), Exprs: yyDollar[3].selectExprs} } - case 511: + case 508: yyDollar = yyS[yypt-4 : yypt+1] //line sql.y:2626 { yyVAL.expr = &FuncExpr{Name: NewColIdent("substr"), Exprs: yyDollar[3].selectExprs} } - case 512: + case 509: yyDollar = yyS[yypt-0 : yypt+1] //line sql.y:2632 { yyVAL.str = "" } - case 513: + case 510: yyDollar = yyS[yypt-3 : yypt+1] //line sql.y:2636 { yyVAL.str = BooleanModeStr } - case 514: + case 511: yyDollar = yyS[yypt-4 : yypt+1] //line sql.y:2640 { yyVAL.str = NaturalLanguageModeStr } - case 515: + case 512: yyDollar = yyS[yypt-7 : yypt+1] //line sql.y:2644 { yyVAL.str = NaturalLanguageModeWithQueryExpansionStr } - case 516: + case 513: yyDollar = yyS[yypt-3 : yypt+1] //line sql.y:2648 { yyVAL.str = QueryExpansionStr } - case 517: + case 514: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:2654 { yyVAL.str = string(yyDollar[1].bytes) } - case 518: + case 515: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:2658 { yyVAL.str = string(yyDollar[1].bytes) } - case 519: + case 516: yyDollar = yyS[yypt-2 : yypt+1] //line sql.y:2664 { yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].sqlVal} } - case 520: + case 517: yyDollar = yyS[yypt-3 : yypt+1] //line sql.y:2668 { yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].sqlVal, Charset: yyDollar[3].str, Operator: CharacterSetStr} } - case 521: + case 518: yyDollar = yyS[yypt-3 : yypt+1] //line sql.y:2672 { yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].sqlVal, Charset: string(yyDollar[3].bytes)} } - case 522: + case 519: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:2676 { yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes)} } - case 523: + case 520: yyDollar = yyS[yypt-2 : yypt+1] //line sql.y:2680 { yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].sqlVal} } - case 524: + case 521: yyDollar = yyS[yypt-2 : yypt+1] //line sql.y:2684 { @@ -6030,169 +6004,169 @@ yydefault: yyVAL.convertType.Length = yyDollar[2].LengthScaleOption.Length yyVAL.convertType.Scale = yyDollar[2].LengthScaleOption.Scale } - case 525: + case 522: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:2690 { yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes)} } - case 526: + case 523: yyDollar = yyS[yypt-2 : yypt+1] //line sql.y:2694 { yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].sqlVal} } - case 527: + case 524: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:2698 { yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes)} } - case 528: + case 525: yyDollar = yyS[yypt-2 : yypt+1] //line sql.y:2702 { yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes)} } - case 529: + case 526: yyDollar = yyS[yypt-2 : yypt+1] //line sql.y:2706 { yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].sqlVal} } - case 530: + case 527: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:2710 { yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes)} } - case 531: + case 528: yyDollar = yyS[yypt-2 : yypt+1] //line sql.y:2714 { yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes)} } - case 532: + case 529: yyDollar = yyS[yypt-0 : yypt+1] //line sql.y:2719 { yyVAL.expr = nil } - case 533: + case 530: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:2723 { yyVAL.expr = yyDollar[1].expr } - case 534: + case 531: yyDollar = yyS[yypt-0 : yypt+1] //line sql.y:2728 { yyVAL.str = string("") } - case 535: + case 532: yyDollar = yyS[yypt-2 : yypt+1] //line sql.y:2732 { yyVAL.str = " separator '" + string(yyDollar[2].bytes) + "'" } - case 536: + case 533: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:2738 { yyVAL.whens = []*When{yyDollar[1].when} } - case 537: + case 534: yyDollar = yyS[yypt-2 : yypt+1] //line sql.y:2742 { yyVAL.whens = append(yyDollar[1].whens, yyDollar[2].when) } - case 538: + case 535: yyDollar = yyS[yypt-4 : yypt+1] //line sql.y:2748 { yyVAL.when = &When{Cond: yyDollar[2].expr, Val: yyDollar[4].expr} } - case 539: + case 536: yyDollar = yyS[yypt-0 : yypt+1] //line sql.y:2753 { yyVAL.expr = nil } - case 540: + case 537: yyDollar = yyS[yypt-2 : yypt+1] //line sql.y:2757 { yyVAL.expr = yyDollar[2].expr } - case 541: + case 538: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:2763 { yyVAL.colName = &ColName{Name: yyDollar[1].colIdent} } - case 542: + case 539: yyDollar = yyS[yypt-3 : yypt+1] //line sql.y:2767 { yyVAL.colName = &ColName{Qualifier: TableName{Name: yyDollar[1].tableIdent}, Name: yyDollar[3].colIdent} } - case 543: + case 540: yyDollar = yyS[yypt-5 : yypt+1] //line sql.y:2771 { yyVAL.colName = &ColName{Qualifier: TableName{Qualifier: yyDollar[1].tableIdent, Name: yyDollar[3].tableIdent}, Name: yyDollar[5].colIdent} } - case 544: + case 541: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:2777 { yyVAL.expr = NewStrVal(yyDollar[1].bytes) } - case 545: + case 542: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:2781 { yyVAL.expr = NewHexVal(yyDollar[1].bytes) } - case 546: + case 543: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:2785 { yyVAL.expr = NewBitVal(yyDollar[1].bytes) } - case 547: + case 544: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:2789 { yyVAL.expr = NewIntVal(yyDollar[1].bytes) } - case 548: + case 545: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:2793 { yyVAL.expr = NewFloatVal(yyDollar[1].bytes) } - case 549: + case 546: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:2797 { yyVAL.expr = NewHexNum(yyDollar[1].bytes) } - case 550: + case 547: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:2801 { yyVAL.expr = NewValArg(yyDollar[1].bytes) } - case 551: + case 548: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:2805 { yyVAL.expr = &NullVal{} } - case 552: + case 549: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:2811 { @@ -6203,237 +6177,237 @@ yydefault: } yyVAL.expr = NewIntVal([]byte("1")) } - case 553: + case 550: yyDollar = yyS[yypt-2 : yypt+1] //line sql.y:2820 { yyVAL.expr = NewIntVal(yyDollar[1].bytes) } - case 554: + case 551: yyDollar = yyS[yypt-2 : yypt+1] //line sql.y:2824 { yyVAL.expr = NewValArg(yyDollar[1].bytes) } - case 555: + case 552: yyDollar = yyS[yypt-0 : yypt+1] //line sql.y:2829 { yyVAL.exprs = nil } - case 556: + case 553: yyDollar = yyS[yypt-3 : yypt+1] //line sql.y:2833 { yyVAL.exprs = yyDollar[3].exprs } - case 557: + case 554: yyDollar = yyS[yypt-0 : yypt+1] //line sql.y:2838 { yyVAL.expr = nil } - case 558: + case 555: yyDollar = yyS[yypt-2 : yypt+1] //line sql.y:2842 { yyVAL.expr = yyDollar[2].expr } - case 559: + case 556: yyDollar = yyS[yypt-0 : yypt+1] //line sql.y:2847 { yyVAL.orderBy = nil } - case 560: + case 557: yyDollar = yyS[yypt-3 : yypt+1] //line sql.y:2851 { yyVAL.orderBy = yyDollar[3].orderBy } - case 561: + case 558: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:2857 { yyVAL.orderBy = OrderBy{yyDollar[1].order} } - case 562: + case 559: yyDollar = yyS[yypt-3 : yypt+1] //line sql.y:2861 { yyVAL.orderBy = append(yyDollar[1].orderBy, yyDollar[3].order) } - case 563: + case 560: yyDollar = yyS[yypt-2 : yypt+1] //line sql.y:2867 { yyVAL.order = &Order{Expr: yyDollar[1].expr, Direction: yyDollar[2].str} } - case 564: + case 561: yyDollar = yyS[yypt-0 : yypt+1] //line sql.y:2872 { yyVAL.str = AscScr } - case 565: + case 562: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:2876 { yyVAL.str = AscScr } - case 566: + case 563: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:2880 { yyVAL.str = DescScr } - case 567: + case 564: yyDollar = yyS[yypt-0 : yypt+1] //line sql.y:2885 { yyVAL.limit = nil } - case 568: + case 565: yyDollar = yyS[yypt-2 : yypt+1] //line sql.y:2889 { yyVAL.limit = &Limit{Rowcount: yyDollar[2].expr} } - case 569: + case 566: yyDollar = yyS[yypt-4 : yypt+1] //line sql.y:2893 { yyVAL.limit = &Limit{Offset: yyDollar[2].expr, Rowcount: yyDollar[4].expr} } - case 570: + case 567: yyDollar = yyS[yypt-4 : yypt+1] //line sql.y:2897 { yyVAL.limit = &Limit{Offset: yyDollar[4].expr, Rowcount: yyDollar[2].expr} } - case 571: + case 568: yyDollar = yyS[yypt-0 : yypt+1] //line sql.y:2902 { yyVAL.str = "" } - case 572: + case 569: yyDollar = yyS[yypt-2 : yypt+1] //line sql.y:2906 { yyVAL.str = ForUpdateStr } - case 573: + case 570: yyDollar = yyS[yypt-4 : yypt+1] //line sql.y:2910 { yyVAL.str = ShareModeStr } - case 574: + case 571: yyDollar = yyS[yypt-2 : yypt+1] //line sql.y:2923 { yyVAL.ins = &Insert{Rows: yyDollar[2].values} } - case 575: + case 572: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:2927 { yyVAL.ins = &Insert{Rows: yyDollar[1].selStmt} } - case 576: + case 573: yyDollar = yyS[yypt-3 : yypt+1] //line sql.y:2931 { // Drop the redundant parenthesis. yyVAL.ins = &Insert{Rows: yyDollar[2].selStmt} } - case 577: + case 574: yyDollar = yyS[yypt-5 : yypt+1] //line sql.y:2936 { yyVAL.ins = &Insert{Columns: yyDollar[2].columns, Rows: yyDollar[5].values} } - case 578: + case 575: yyDollar = yyS[yypt-4 : yypt+1] //line sql.y:2940 { yyVAL.ins = &Insert{Columns: yyDollar[2].columns, Rows: yyDollar[4].selStmt} } - case 579: + case 576: yyDollar = yyS[yypt-6 : yypt+1] //line sql.y:2944 { // Drop the redundant parenthesis. yyVAL.ins = &Insert{Columns: yyDollar[2].columns, Rows: yyDollar[5].selStmt} } - case 580: + case 577: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:2951 { yyVAL.columns = Columns{yyDollar[1].colIdent} } - case 581: + case 578: yyDollar = yyS[yypt-3 : yypt+1] //line sql.y:2955 { yyVAL.columns = Columns{yyDollar[3].colIdent} } - case 582: + case 579: yyDollar = yyS[yypt-3 : yypt+1] //line sql.y:2959 { yyVAL.columns = append(yyVAL.columns, yyDollar[3].colIdent) } - case 583: + case 580: yyDollar = yyS[yypt-5 : yypt+1] //line sql.y:2963 { yyVAL.columns = append(yyVAL.columns, yyDollar[5].colIdent) } - case 584: + case 581: yyDollar = yyS[yypt-0 : yypt+1] //line sql.y:2968 { yyVAL.updateExprs = nil } - case 585: + case 582: yyDollar = yyS[yypt-5 : yypt+1] //line sql.y:2972 { yyVAL.updateExprs = yyDollar[5].updateExprs } - case 586: + case 583: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:2978 { yyVAL.values = Values{yyDollar[1].valTuple} } - case 587: + case 584: yyDollar = yyS[yypt-3 : yypt+1] //line sql.y:2982 { yyVAL.values = append(yyDollar[1].values, yyDollar[3].valTuple) } - case 588: + case 585: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:2988 { yyVAL.valTuple = yyDollar[1].valTuple } - case 589: + case 586: yyDollar = yyS[yypt-2 : yypt+1] //line sql.y:2992 { yyVAL.valTuple = ValTuple{} } - case 590: + case 587: yyDollar = yyS[yypt-3 : yypt+1] //line sql.y:2998 { yyVAL.valTuple = ValTuple(yyDollar[2].exprs) } - case 591: + case 588: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:3004 { @@ -6443,312 +6417,312 @@ yydefault: yyVAL.expr = yyDollar[1].valTuple } } - case 592: + case 589: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:3014 { yyVAL.updateExprs = UpdateExprs{yyDollar[1].updateExpr} } - case 593: + case 590: yyDollar = yyS[yypt-3 : yypt+1] //line sql.y:3018 { yyVAL.updateExprs = append(yyDollar[1].updateExprs, yyDollar[3].updateExpr) } - case 594: + case 591: yyDollar = yyS[yypt-3 : yypt+1] //line sql.y:3024 { yyVAL.updateExpr = &UpdateExpr{Name: yyDollar[1].colName, Expr: yyDollar[3].expr} } - case 595: + case 592: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:3030 { yyVAL.setExprs = SetExprs{yyDollar[1].setExpr} } - case 596: + case 593: yyDollar = yyS[yypt-3 : yypt+1] //line sql.y:3034 { yyVAL.setExprs = append(yyDollar[1].setExprs, yyDollar[3].setExpr) } - case 597: + case 594: yyDollar = yyS[yypt-3 : yypt+1] //line sql.y:3040 { yyVAL.setExpr = &SetExpr{Name: yyDollar[1].colIdent, Expr: NewStrVal([]byte("on"))} } - case 598: + case 595: yyDollar = yyS[yypt-3 : yypt+1] //line sql.y:3044 { yyVAL.setExpr = &SetExpr{Name: yyDollar[1].colIdent, Expr: NewStrVal([]byte("off"))} } - case 599: + case 596: yyDollar = yyS[yypt-3 : yypt+1] //line sql.y:3048 { yyVAL.setExpr = &SetExpr{Name: yyDollar[1].colIdent, Expr: yyDollar[3].expr} } - case 600: + case 597: yyDollar = yyS[yypt-3 : yypt+1] //line sql.y:3052 { yyVAL.setExpr = &SetExpr{Name: NewColIdent(string(yyDollar[1].bytes)), Expr: yyDollar[2].expr} } - case 602: + case 599: yyDollar = yyS[yypt-2 : yypt+1] //line sql.y:3059 { yyVAL.bytes = []byte("charset") } - case 604: + case 601: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:3066 { yyVAL.expr = NewStrVal([]byte(yyDollar[1].colIdent.String())) } - case 605: + case 602: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:3070 { yyVAL.expr = NewStrVal(yyDollar[1].bytes) } - case 606: + case 603: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:3074 { yyVAL.expr = &Default{} } - case 609: + case 606: yyDollar = yyS[yypt-0 : yypt+1] //line sql.y:3083 { yyVAL.byt = 0 } - case 610: + case 607: yyDollar = yyS[yypt-2 : yypt+1] //line sql.y:3085 { yyVAL.byt = 1 } - case 611: + case 608: yyDollar = yyS[yypt-0 : yypt+1] //line sql.y:3088 { yyVAL.empty = struct{}{} } - case 612: + case 609: yyDollar = yyS[yypt-3 : yypt+1] //line sql.y:3090 { yyVAL.empty = struct{}{} } - case 613: + case 610: yyDollar = yyS[yypt-0 : yypt+1] //line sql.y:3093 { yyVAL.str = "" } - case 614: + case 611: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:3095 { yyVAL.str = IgnoreStr } - case 615: + case 612: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:3099 { yyVAL.empty = struct{}{} } - case 616: + case 613: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:3101 { yyVAL.empty = struct{}{} } - case 617: + case 614: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:3103 { yyVAL.empty = struct{}{} } - case 618: + case 615: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:3105 { yyVAL.empty = struct{}{} } - case 619: + case 616: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:3107 { yyVAL.empty = struct{}{} } - case 620: + case 617: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:3109 { yyVAL.empty = struct{}{} } - case 621: + case 618: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:3111 { yyVAL.empty = struct{}{} } - case 622: + case 619: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:3113 { yyVAL.empty = struct{}{} } - case 623: + case 620: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:3115 { yyVAL.empty = struct{}{} } - case 624: + case 621: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:3117 { yyVAL.empty = struct{}{} } - case 625: + case 622: yyDollar = yyS[yypt-0 : yypt+1] //line sql.y:3120 { yyVAL.empty = struct{}{} } - case 626: + case 623: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:3122 { yyVAL.empty = struct{}{} } - case 627: + case 624: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:3124 { yyVAL.empty = struct{}{} } - case 628: + case 625: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:3128 { yyVAL.empty = struct{}{} } - case 629: + case 626: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:3130 { yyVAL.empty = struct{}{} } - case 630: + case 627: yyDollar = yyS[yypt-0 : yypt+1] //line sql.y:3133 { yyVAL.empty = struct{}{} } - case 631: + case 628: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:3135 { yyVAL.empty = struct{}{} } - case 632: + case 629: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:3137 { yyVAL.empty = struct{}{} } - case 633: + case 630: yyDollar = yyS[yypt-0 : yypt+1] //line sql.y:3140 { yyVAL.colIdent = ColIdent{} } - case 634: + case 631: yyDollar = yyS[yypt-2 : yypt+1] //line sql.y:3142 { yyVAL.colIdent = yyDollar[2].colIdent } - case 635: + case 632: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:3146 { yyVAL.colIdent = NewColIdent(string(yyDollar[1].bytes)) } - case 636: + case 633: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:3150 { yyVAL.colIdent = NewColIdent(string(yyDollar[1].bytes)) } - case 638: + case 635: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:3157 { yyVAL.colIdent = NewColIdent(string(yyDollar[1].bytes)) } - case 639: + case 636: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:3163 { yyVAL.tableIdent = NewTableIdent(string(yyDollar[1].bytes)) } - case 640: + case 637: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:3167 { yyVAL.tableIdent = NewTableIdent(string(yyDollar[1].bytes)) } - case 642: + case 639: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:3174 { yyVAL.tableIdent = NewTableIdent(string(yyDollar[1].bytes)) } - case 853: + case 845: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:3410 +//line sql.y:3405 { if incNesting(yylex) { yylex.Error("max nesting level reached") return 1 } } - case 854: + case 846: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:3419 +//line sql.y:3414 { decNesting(yylex) } - case 855: + case 847: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:3424 +//line sql.y:3419 { skipToEnd(yylex) } - case 856: + case 848: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:3429 +//line sql.y:3424 { skipToEnd(yylex) } - case 857: + case 849: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:3433 +//line sql.y:3428 { skipToEnd(yylex) } - case 858: + case 850: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:3437 +//line sql.y:3432 { skipToEnd(yylex) } diff --git a/go/vt/sqlparser/sql.y b/go/vt/sqlparser/sql.y index 422bbc234a5..ed5fda40263 100644 --- a/go/vt/sqlparser/sql.y +++ b/go/vt/sqlparser/sql.y @@ -164,6 +164,7 @@ func skipToEnd(yylex interface{}) { %token MAXVALUE PARTITION REORGANIZE LESS THAN PROCEDURE TRIGGER %token VINDEX VINDEXES %token STATUS VARIABLES WARNINGS +%token SEQUENCE // Transaction Tokens %token BEGIN START TRANSACTION COMMIT ROLLBACK @@ -181,7 +182,7 @@ func skipToEnd(yylex interface{}) { %token NULLX AUTO_INCREMENT APPROXNUM SIGNED UNSIGNED ZEROFILL // Supported SHOW tokens -%token COLLATION DATABASES SCHEMAS TABLES VITESS_KEYSPACES VITESS_SHARDS VITESS_TABLETS VSCHEMA VSCHEMA_TABLES VITESS_TARGET FULL PROCESSLIST COLUMNS FIELDS ENGINES PLUGINS +%token COLLATION DATABASES TABLES VSCHEMA FULL PROCESSLIST COLUMNS FIELDS ENGINES PLUGINS // SET tokens %token NAMES CHARSET GLOBAL SESSION ISOLATION LEVEL READ WRITE ONLY REPEATABLE COMMITTED UNCOMMITTED SERIALIZABLE @@ -1352,6 +1353,21 @@ alter_statement: }, } } +| ALTER VSCHEMA ADD SEQUENCE table_name + { + $$ = &DDL{Action: AddSequenceStr, Table: $5} + } +| ALTER VSCHEMA ON table_name ADD AUTO_INCREMENT sql_id USING table_name + { + $$ = &DDL{ + Action: AddAutoIncStr, + Table: $4, + AutoIncSpec: &AutoIncSpec{ + Column: $7, + Sequence: $9, + }, + } + } alter_object_type: COLUMN @@ -1499,10 +1515,6 @@ show_statement: { $$ = &Show{Type: string($2)} } -| SHOW SCHEMAS ddl_skip_to_end - { - $$ = &Show{Type: string($2)} - } | SHOW ENGINES { $$ = &Show{Type: string($2)} @@ -1560,22 +1572,6 @@ show_statement: showCollationFilterOpt := $4 $$ = &Show{Type: string($2), ShowCollationFilterOpt: &showCollationFilterOpt} } -| SHOW VITESS_KEYSPACES - { - $$ = &Show{Type: string($2)} - } -| SHOW VITESS_SHARDS - { - $$ = &Show{Type: string($2)} - } -| SHOW VITESS_TABLETS - { - $$ = &Show{Type: string($2)} - } -| SHOW VITESS_TARGET - { - $$ = &Show{Type: string($2)} - } | SHOW VSCHEMA TABLES { $$ = &Show{Type: string($2) + " " + string($3)} @@ -1597,6 +1593,10 @@ show_statement: * * SHOW BINARY LOGS * SHOW INVALID + * SHOW VITESS_KEYSPACES + * SHOW VITESS_TABLETS + * SHOW VITESS_SHARDS + * SHOW VITESS_TARGET */ | SHOW ID ddl_skip_to_end { @@ -3364,7 +3364,7 @@ non_reserved_keyword: | REPEATABLE | RESTRICT | ROLLBACK -| SCHEMAS +| SEQUENCE | SESSION | SERIALIZABLE | SHARE @@ -3393,12 +3393,7 @@ non_reserved_keyword: | VIEW | VINDEX | VINDEXES -| VITESS_KEYSPACES -| VITESS_SHARDS -| VITESS_TABLETS | VSCHEMA -| VSCHEMA_TABLES -| VITESS_TARGET | WARNINGS | WITH | WRITE diff --git a/go/vt/sqlparser/token.go b/go/vt/sqlparser/token.go index bc6a38adf37..0247cbf3cc6 100644 --- a/go/vt/sqlparser/token.go +++ b/go/vt/sqlparser/token.go @@ -318,11 +318,11 @@ var keywords = map[string]int{ "rlike": REGEXP, "rollback": ROLLBACK, "schema": SCHEMA, - "schemas": SCHEMAS, "second_microsecond": UNUSED, "select": SELECT, "sensitive": UNUSED, "separator": SEPARATOR, + "sequence": SEQUENCE, "serializable": SERIALIZABLE, "session": SESSION, "set": SET, @@ -391,12 +391,7 @@ var keywords = map[string]int{ "vindex": VINDEX, "vindexes": VINDEXES, "view": VIEW, - "vitess_keyspaces": VITESS_KEYSPACES, - "vitess_shards": VITESS_SHARDS, - "vitess_tablets": VITESS_TABLETS, - "vitess_target": VITESS_TARGET, "vschema": VSCHEMA, - "vschema_tables": VSCHEMA_TABLES, "warnings": WARNINGS, "when": WHEN, "where": WHERE, diff --git a/go/vt/tableacl/acl/acl.go b/go/vt/tableacl/acl/acl.go index 26e22a38433..cae56fa2c78 100644 --- a/go/vt/tableacl/acl/acl.go +++ b/go/vt/tableacl/acl/acl.go @@ -32,7 +32,7 @@ type Factory interface { New(entries []string) (ACL, error) } -// DenyAllACL implements ACL interface and alway deny access request. +// DenyAllACL implements ACL interface and always deny access request. type DenyAllACL struct{} // IsMember implements ACL.IsMember and always return false. @@ -40,7 +40,7 @@ func (acl DenyAllACL) IsMember(principal *querypb.VTGateCallerID) bool { return false } -// AcceptAllACL implements ACL interface and alway accept access request. +// AcceptAllACL implements ACL interface and always accept access request. type AcceptAllACL struct{} // IsMember implements ACL.IsMember and always return true. diff --git a/go/vt/throttler/max_replication_lag_module.go b/go/vt/throttler/max_replication_lag_module.go index 7cab0bf5681..8881f59d302 100644 --- a/go/vt/throttler/max_replication_lag_module.go +++ b/go/vt/throttler/max_replication_lag_module.go @@ -485,7 +485,7 @@ func (m *MaxReplicationLagModule) increaseRate(r *result, now time.Time, lagReco return } - // Calculate new rate based on the previous (preferrably highest good) rate. + // Calculate new rate based on the previous (preferably highest good) rate. highestGood := m.memory.highestGood() previousRateSource := "highest known good rate" previousRate := float64(highestGood) diff --git a/go/vt/throttler/max_replication_lag_module_test.go b/go/vt/throttler/max_replication_lag_module_test.go index e5537e64f24..4d2eef4263c 100644 --- a/go/vt/throttler/max_replication_lag_module_test.go +++ b/go/vt/throttler/max_replication_lag_module_test.go @@ -127,7 +127,7 @@ func TestMaxReplicationLagModule_InitialStateAndWait(t *testing.T) { } } -// TestMaxReplicationLagModule_Increase tests only the continous increase of the +// TestMaxReplicationLagModule_Increase tests only the continuous increase of the // rate and assumes that we are well below the replica capacity. func TestMaxReplicationLagModule_Increase(t *testing.T) { tf, err := newTestFixtureWithMaxReplicationLag(5) diff --git a/go/vt/throttler/replication_lag_cache_test.go b/go/vt/throttler/replication_lag_cache_test.go index 7e00366674d..e3e06896aa7 100644 --- a/go/vt/throttler/replication_lag_cache_test.go +++ b/go/vt/throttler/replication_lag_cache_test.go @@ -32,7 +32,7 @@ func TestReplicationLagCache(t *testing.T) { // If there is no entry yet, a zero struct is returned. zeroEntry := c.atOrAfter(r1Key, sinceZero(0*time.Second)) if !zeroEntry.isZero() { - t.Fatalf("atOrAfter() should have returned a zero entry but did not: %v", zeroEntry) + t.Fatalf("atOrAfter() should have returned a zero entry but did not: %v", zeroEntry) } // First entry at 1s. diff --git a/go/vt/throttler/throttlerclient/throttlerclient.go b/go/vt/throttler/throttlerclient/throttlerclient.go index 4abe73b5538..cfcbd05ef8e 100644 --- a/go/vt/throttler/throttlerclient/throttlerclient.go +++ b/go/vt/throttler/throttlerclient/throttlerclient.go @@ -29,7 +29,7 @@ import ( throttlerdatapb "vitess.io/vitess/go/vt/proto/throttlerdata" ) -// protocol specifices which RPC client implementation should be used. +// protocol specifics which RPC client implementation should be used. var protocol = flag.String("throttler_client_protocol", "grpc", "the protocol to use to talk to the integrated throttler service") // Client defines the generic RPC interface for the throttler service. diff --git a/go/vt/throttler/throttlerlogz_test.go b/go/vt/throttler/throttlerlogz_test.go index d94fa86321c..a7651f883fd 100644 --- a/go/vt/throttler/throttlerlogz_test.go +++ b/go/vt/throttler/throttlerlogz_test.go @@ -42,7 +42,7 @@ func TestThrottlerlogzHandler_NonExistantThrottler(t *testing.T) { throttlerlogzHandler(response, request, newManager()) if got, want := response.Body.String(), `throttler: t1 does not exist`; !strings.Contains(got, want) { - t.Fatalf("/throttlerlogz page for non-existant t1 should not succeed. got = %v, want = %v", got, want) + t.Fatalf("/throttlerlogz page for non-existent t1 should not succeed. got = %v, want = %v", got, want) } } diff --git a/go/vt/topo/cells_aliases.go b/go/vt/topo/cells_aliases.go index 6488a095aba..c751bd1da84 100644 --- a/go/vt/topo/cells_aliases.go +++ b/go/vt/topo/cells_aliases.go @@ -89,9 +89,8 @@ func (ts *Server) CreateCellsAlias(ctx context.Context, alias string, cellsAlias return err } - if overlappingAliases(currentAliases, cellsAlias) { - return fmt.Errorf("unsupported: you can't over overlapping aliases. Cells alias: %v, has an overlap with existent aliases", cellsAlias) - + if err := validateAlias(currentAliases, alias, cellsAlias); err != nil { + return fmt.Errorf("cells alias %v is not valid: %v", alias, err) } ts.clearCellAliasesCache() @@ -114,13 +113,13 @@ func (ts *Server) UpdateCellsAlias(ctx context.Context, alias string, update fun filePath := pathForCellsAlias(alias) for { - ca := &topodatapb.CellsAlias{} + cellsAlias := &topodatapb.CellsAlias{} // Read the file, unpack the contents. contents, version, err := ts.globalCell.Get(ctx, filePath) switch { case err == nil: - if err := proto.Unmarshal(contents, ca); err != nil { + if err := proto.Unmarshal(contents, cellsAlias); err != nil { return err } case IsErrType(err, NoNode): @@ -130,7 +129,7 @@ func (ts *Server) UpdateCellsAlias(ctx context.Context, alias string, update fun } // Call update method. - if err = update(ca); err != nil { + if err = update(cellsAlias); err != nil { if IsErrType(err, NoUpdateNeeded) { return nil } @@ -142,13 +141,12 @@ func (ts *Server) UpdateCellsAlias(ctx context.Context, alias string, update fun return err } - if overlappingAliases(currentAliases, ca) { - return fmt.Errorf("unsupported: you can't over overlapping aliases. Cells alias: %v, has an overlap with existent aliases", ca) - + if err := validateAlias(currentAliases, alias, cellsAlias); err != nil { + return fmt.Errorf("cells alias %v is not valid: %v", alias, err) } // Pack and save. - contents, err = proto.Marshal(ca) + contents, err = proto.Marshal(cellsAlias) if err != nil { return err } @@ -159,16 +157,21 @@ func (ts *Server) UpdateCellsAlias(ctx context.Context, alias string, update fun } } -func overlappingAliases(currentAliases map[string]*topodatapb.CellsAlias, newAlias *topodatapb.CellsAlias) bool { - for _, cellsAlias := range currentAliases { - for _, cell := range cellsAlias.Cells { - for _, newCell := range newAlias.Cells { - if cell == newCell { - return true - } - } +// validateAlias checks whether the given alias is allowed. +// If the alias overlaps with any existing alias other than itself, this returns +// a non-nil error. +func validateAlias(currentAliases map[string]*topodatapb.CellsAlias, newAliasName string, newAlias *topodatapb.CellsAlias) error { + for name, alias := range currentAliases { + // Skip the alias we're checking against. It's allowed to overlap with itself. + if name == newAliasName { + continue } + for _, cell := range alias.Cells { + if InCellList(cell, newAlias.Cells) { + return fmt.Errorf("cell set overlaps with existing alias %v", name) + } + } } - return false + return nil } diff --git a/go/vt/topo/cells_aliases_test.go b/go/vt/topo/cells_aliases_test.go new file mode 100644 index 00000000000..fe500f1d344 --- /dev/null +++ b/go/vt/topo/cells_aliases_test.go @@ -0,0 +1,78 @@ +package topo + +import ( + "strings" + "testing" + + topodatapb "vitess.io/vitess/go/vt/proto/topodata" +) + +func TestValidateAlias(t *testing.T) { + table := []struct { + currentAliases map[string][]string + newAliasName string + newAlias []string + wantErrMsg string + }{ + { + currentAliases: map[string][]string{ + "alias1": {"cell_a", "cell_b"}, + "alias2": {"cell_c", "cell_d"}, + }, + newAliasName: "overlaps_alias1", + newAlias: []string{"cell_x", "cell_b"}, + wantErrMsg: "alias1", + }, + { + currentAliases: map[string][]string{ + "alias1": {"cell_a", "cell_b"}, + "alias2": {"cell_c", "cell_d"}, + }, + newAliasName: "overlaps_alias2", + newAlias: []string{"cell_x", "cell_c"}, + wantErrMsg: "alias2", + }, + { + currentAliases: map[string][]string{ + "alias1": {"cell_a", "cell_b"}, + "alias2": {"cell_c", "cell_d"}, + }, + newAliasName: "no_overlap", + newAlias: []string{"cell_x", "cell_y"}, + wantErrMsg: "", + }, + { + currentAliases: map[string][]string{ + "overlaps_self": {"cell_a", "cell_b"}, + "alias2": {"cell_c", "cell_d"}, + }, + newAliasName: "overlaps_self", + newAlias: []string{"cell_a", "cell_b", "cell_x"}, + wantErrMsg: "", + }, + } + + for _, test := range table { + currentAliases := map[string]*topodatapb.CellsAlias{} + for name, cells := range test.currentAliases { + currentAliases[name] = &topodatapb.CellsAlias{Cells: cells} + } + newAlias := &topodatapb.CellsAlias{Cells: test.newAlias} + + gotErr := validateAlias(currentAliases, test.newAliasName, newAlias) + if test.wantErrMsg == "" { + // Expect success. + if gotErr != nil { + t.Errorf("validateAlias(%v) error = %q; want nil", test.newAliasName, gotErr.Error()) + } + } else { + // Expect failure. + if gotErr == nil { + t.Errorf("validateAlias(%v) error = nil; want non-nil", test.newAliasName) + } + if got, want := gotErr.Error(), test.wantErrMsg; !strings.Contains(got, want) { + t.Errorf("validateAlias(%v) error = %q; want *%q*", test.newAliasName, got, want) + } + } + } +} diff --git a/go/vt/topo/keyspace.go b/go/vt/topo/keyspace.go index d68320a668a..7c0361cc1ff 100755 --- a/go/vt/topo/keyspace.go +++ b/go/vt/topo/keyspace.go @@ -82,7 +82,7 @@ func (ki *KeyspaceInfo) CheckServedFromMigration(tabletType topodatapb.TabletTyp // check the keyspace is consistent in any case for _, ksf := range ki.ServedFroms { if ksf.Keyspace != keyspace { - return vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "inconsistent keypace specified in migration: %v != %v for type %v", keyspace, ksf.Keyspace, ksf.TabletType) + return vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "inconsistent keyspace specified in migration: %v != %v for type %v", keyspace, ksf.Keyspace, ksf.TabletType) } } diff --git a/go/vt/topo/keyspace_test.go b/go/vt/topo/keyspace_test.go index ac9ed109c05..e8883ec7baf 100644 --- a/go/vt/topo/keyspace_test.go +++ b/go/vt/topo/keyspace_test.go @@ -95,7 +95,7 @@ func TestUpdateServedFromMap(t *testing.T) { } // couple error cases - if err := ki.UpdateServedFromMap(topodatapb.TabletType_RDONLY, []string{"second"}, "othersource", true, allCells); err == nil || (err.Error() != "inconsistent keypace specified in migration: othersource != source for type MASTER" && err.Error() != "inconsistent keypace specified in migration: othersource != source for type RDONLY") { + if err := ki.UpdateServedFromMap(topodatapb.TabletType_RDONLY, []string{"second"}, "othersource", true, allCells); err == nil || (err.Error() != "inconsistent keyspace specified in migration: othersource != source for type MASTER" && err.Error() != "inconsistent keyspace specified in migration: othersource != source for type RDONLY") { t.Fatalf("different keyspace should fail: %v", err) } if err := ki.UpdateServedFromMap(topodatapb.TabletType_MASTER, nil, "source", true, allCells); err == nil || err.Error() != "cannot migrate master into ks until everything else is migrated" { diff --git a/go/vt/topo/tablet.go b/go/vt/topo/tablet.go index e6d2a877a13..22803bc3002 100644 --- a/go/vt/topo/tablet.go +++ b/go/vt/topo/tablet.go @@ -83,7 +83,7 @@ func IsRunningQueryService(tt topodatapb.TabletType) bool { // them as fast as possible. // // Replica and rdonly will use lameduck when going from healthy to -// unhealhty (either because health check fails, or they're shutting down). +// unhealthy (either because health check fails, or they're shutting down). // // Other types are probably not serving user visible traffic, so they // need to transition as fast as possible too. diff --git a/go/vt/topo/vschema.go b/go/vt/topo/vschema.go index d4c9e0b7192..fcd7990b007 100644 --- a/go/vt/topo/vschema.go +++ b/go/vt/topo/vschema.go @@ -67,7 +67,7 @@ func (ts *Server) GetVSchema(ctx context.Context, keyspace string) (*vschemapb.K return &vs, nil } -// EnsureVschema makes sure that a vschema is present for this keyspace are creates a blank one if it is missing +// EnsureVSchema makes sure that a vschema is present for this keyspace or creates a blank one if it is missing func (ts *Server) EnsureVSchema(ctx context.Context, keyspace string) error { vschema, err := ts.GetVSchema(ctx, keyspace) if vschema == nil || IsErrType(err, NoNode) { @@ -99,7 +99,10 @@ func (ts *Server) SaveRoutingRules(ctx context.Context, routingRules *vschemapb. if len(data) == 0 { // No vschema, remove it. So we can remove the keyspace. - return ts.globalCell.Delete(ctx, RoutingRulesFile, nil) + if err := ts.globalCell.Delete(ctx, RoutingRulesFile, nil); err != nil && !IsErrType(err, NoNode) { + return err + } + return nil } _, err = ts.globalCell.Update(ctx, RoutingRulesFile, data, nil) diff --git a/go/vt/topotools/vschema_ddl.go b/go/vt/topotools/vschema_ddl.go index f6ccdbc99bc..d9b1330021a 100644 --- a/go/vt/topotools/vschema_ddl.go +++ b/go/vt/topotools/vschema_ddl.go @@ -17,6 +17,7 @@ limitations under the License. package topotools import ( + "fmt" "reflect" "vitess.io/vitess/go/vt/sqlparser" @@ -180,6 +181,44 @@ func ApplyVSchemaDDL(ksName string, ks *vschemapb.Keyspace, ddl *sqlparser.DDL) } } return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "vindex %s not defined in table %s.%s", name, ksName, tableName) + + case sqlparser.AddSequenceStr: + if ks.Sharded { + return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "add sequence table: unsupported on sharded keyspace %s", ksName) + } + + name := ddl.Table.Name.String() + if _, ok := ks.Tables[name]; ok { + return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "vschema already contains sequence %s in keyspace %s", name, ksName) + } + + ks.Tables[name] = &vschemapb.Table{Type: "sequence"} + + return ks, nil + + case sqlparser.AddAutoIncStr: + name := ddl.Table.Name.String() + table := ks.Tables[name] + if table == nil { + return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "vschema does not contain table %s in keyspace %s", name, ksName) + } + + if table.AutoIncrement != nil { + return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "vschema already contains auto inc %v on table %s in keyspace %s", table.AutoIncrement, name, ksName) + } + + sequence := ddl.AutoIncSpec.Sequence + sequenceFqn := sequence.Name.String() + if sequence.Qualifier.String() != "" { + sequenceFqn = fmt.Sprintf("%s.%s", sequence.Qualifier.String(), sequenceFqn) + } + + table.AutoIncrement = &vschemapb.AutoIncrement{ + Column: ddl.AutoIncSpec.Column.String(), + Sequence: sequenceFqn, + } + + return ks, nil } return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unexpected vindex ddl operation %s", ddl.Action) diff --git a/go/vt/vtctl/cells_aliases.go b/go/vt/vtctl/cells_aliases.go index 4ffd67b14bd..64ab3c9c31d 100644 --- a/go/vt/vtctl/cells_aliases.go +++ b/go/vt/vtctl/cells_aliases.go @@ -39,7 +39,7 @@ func init() { "AddCellsAlias", commandAddCellsAlias, "[-cells ] ", - "Registers a local topology service in a new cell by creating the CellsAlias with the provided parameters. An alis provides a group cells that replica/rdonly can route. By default, vitess won't route traffic cross cells for replica/rdonly tablets. Aliases provide a way to create groups where this is allowed."}) + "Defines a group of cells within which replica/rdonly traffic can be routed across cells. Between cells that are not in the same group (alias), only master traffic can be routed."}) addCommand(cellsAliasesGroupName, command{ "UpdateCellsAlias", @@ -61,7 +61,7 @@ func init() { } func commandAddCellsAlias(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { - cellsString := subFlags.String("cells", "", "The address the topology server is using for that cell.") + cellsString := subFlags.String("cells", "", "The list of cell names that are members of this alias.") if err := subFlags.Parse(args); err != nil { return err } @@ -82,7 +82,7 @@ func commandAddCellsAlias(ctx context.Context, wr *wrangler.Wrangler, subFlags * } func commandUpdateCellsAlias(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { - cellsString := subFlags.String("cells", "", "The address the topology server is using for that cell.") + cellsString := subFlags.String("cells", "", "The list of cell names that are members of this alias.") if err := subFlags.Parse(args); err != nil { return err } diff --git a/go/vt/vtctl/vtctl.go b/go/vt/vtctl/vtctl.go index 018e007fdb5..94cb8744a61 100644 --- a/go/vt/vtctl/vtctl.go +++ b/go/vt/vtctl/vtctl.go @@ -320,6 +320,12 @@ var commands = []commandGroup{ {"MigrateServedFrom", commandMigrateServedFrom, "[-cells=c1,c2,...] [-reverse] ", "Makes the serve the given type. This command also rebuilds the serving graph."}, + {"MigrateReads", commandMigrateReads, + "[-cells=c1,c2,...] [-reverse] -workflow=workflow ", + "Migrate read traffic for the specified workflow."}, + {"MigrateWrites", commandMigrateWrites, + "[-filtered_replication_wait_time=30s] -workflow=workflow ", + "Migrate write traffic for the specified workflow."}, {"CancelResharding", commandCancelResharding, "", "Permanently cancels a resharding in progress. All resharding related metadata will be deleted."}, @@ -331,7 +337,7 @@ var commands = []commandGroup{ "Displays all of the shards in the specified keyspace."}, {"WaitForDrain", commandWaitForDrain, "[-timeout ] [-retry_delay ] [-initial_wait ] ", - "Blocks until no new queries were observed on all tablets with the given tablet type in the specifed keyspace. " + + "Blocks until no new queries were observed on all tablets with the given tablet type in the specified keyspace. " + " This can be used as sanity check to ensure that the tablets were drained after running vtctl MigrateServedTypes " + " and vtgate is no longer using them. If -timeout is set, it fails when the timeout is reached."}, }, @@ -406,7 +412,7 @@ var commands = []commandGroup{ "", "Displays the VSchema routing rules."}, {"ApplyRoutingRules", commandApplyRoutingRules, - "{-rules= || -rules_file=} [-cells=c1,c2,...] [-skip_rebuild] [-dry-run]", + "{-rules= || -rules_file=} [-cells=c1,c2,...] [-skip_rebuild] [-dry-run]", "Applies the VSchema routing rules."}, {"RebuildVSchemaGraph", commandRebuildVSchemaGraph, "[-cells=c1,c2,...]", @@ -1760,9 +1766,9 @@ func commandVerticalSplitClone(ctx context.Context, wr *wrangler.Wrangler, subFl func commandMigrateServedTypes(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { cellsStr := subFlags.String("cells", "", "Specifies a comma-separated list of cells to update") - reverse := subFlags.Bool("reverse", false, "Moves the served tablet type backward instead of forward. Use in case of trouble") + reverse := subFlags.Bool("reverse", false, "Moves the served tablet type backward instead of forward.") skipReFreshState := subFlags.Bool("skip-refresh-state", false, "Skips refreshing the state of the source tablets after the migration, meaning that the refresh will need to be done manually, replica and rdonly only)") - filteredReplicationWaitTime := subFlags.Duration("filtered_replication_wait_time", 30*time.Second, "Specifies the maximum time to wait, in seconds, for filtered replication to catch up on master migrations") + filteredReplicationWaitTime := subFlags.Duration("filtered_replication_wait_time", 30*time.Second, "Specifies the maximum time to wait, in seconds, for filtered replication to catch up on master migrations. The migration will be aborted on timeout.") reverseReplication := subFlags.Bool("reverse_replication", false, "For master migration, enabling this flag reverses replication which allows you to rollback") if err := subFlags.Parse(args); err != nil { return err @@ -1790,9 +1796,9 @@ func commandMigrateServedTypes(ctx context.Context, wr *wrangler.Wrangler, subFl } func commandMigrateServedFrom(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { - reverse := subFlags.Bool("reverse", false, "Moves the served tablet type backward instead of forward. Use in case of trouble") + reverse := subFlags.Bool("reverse", false, "Moves the served tablet type backward instead of forward.") cellsStr := subFlags.String("cells", "", "Specifies a comma-separated list of cells to update") - filteredReplicationWaitTime := subFlags.Duration("filtered_replication_wait_time", 30*time.Second, "Specifies the maximum time to wait, in seconds, for filtered replication to catch up on master migrations") + filteredReplicationWaitTime := subFlags.Duration("filtered_replication_wait_time", 30*time.Second, "Specifies the maximum time to wait, in seconds, for filtered replication to catch up on master migrations. The migration will be aborted on timeout.") if err := subFlags.Parse(args); err != nil { return err } @@ -1815,6 +1821,58 @@ func commandMigrateServedFrom(ctx context.Context, wr *wrangler.Wrangler, subFla return wr.MigrateServedFrom(ctx, keyspace, shard, servedType, cells, *reverse, *filteredReplicationWaitTime) } +func commandMigrateReads(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { + reverse := subFlags.Bool("reverse", false, "Moves the served tablet type backward instead of forward.") + cellsStr := subFlags.String("cells", "", "Specifies a comma-separated list of cells to update") + workflow := subFlags.String("workflow", "", "Specifies the workflow name") + if err := subFlags.Parse(args); err != nil { + return err + } + if subFlags.NArg() != 2 { + return fmt.Errorf("the and arguments are required for the MigrateReads command") + } + + keyspace := subFlags.Arg(0) + servedType, err := parseTabletType(subFlags.Arg(2), []topodatapb.TabletType{topodatapb.TabletType_REPLICA, topodatapb.TabletType_RDONLY}) + if err != nil { + return err + } + var cells []string + if *cellsStr != "" { + cells = strings.Split(*cellsStr, ",") + } + direction := wrangler.DirectionForward + if *reverse { + direction = wrangler.DirectionBackward + } + if *workflow == "" { + return fmt.Errorf("a -workflow=workflow argument is required") + } + return wr.MigrateReads(ctx, keyspace, *workflow, servedType, cells, direction) +} + +func commandMigrateWrites(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { + filteredReplicationWaitTime := subFlags.Duration("filtered_replication_wait_time", 30*time.Second, "Specifies the maximum time to wait, in seconds, for filtered replication to catch up on master migrations. The migration will be aborted on timeout.") + workflow := subFlags.String("workflow", "", "Specifies the workflow name") + if err := subFlags.Parse(args); err != nil { + return err + } + if subFlags.NArg() != 1 { + return fmt.Errorf("the argument is required for the MigrateWrites command") + } + + keyspace := subFlags.Arg(0) + if *workflow == "" { + return fmt.Errorf("a -workflow=workflow argument is required") + } + journalID, err := wr.MigrateWrites(ctx, keyspace, *workflow, *filteredReplicationWaitTime) + if err != nil { + return err + } + wr.Logger().Infof("Migration Journal ID: %v", journalID) + return nil +} + func commandCancelResharding(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { if err := subFlags.Parse(args); err != nil { return err @@ -2333,7 +2391,7 @@ func commandApplyVSchema(ctx context.Context, wr *wrangler.Wrangler, subFlags *f func commandApplyRoutingRules(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { routingRules := subFlags.String("rules", "", "Specify rules as a string") - routingRulesFile := subFlags.String("vschema_file", "", "Specify rules in a file") + routingRulesFile := subFlags.String("rules_file", "", "Specify rules in a file") skipRebuild := subFlags.Bool("skip_rebuild", false, "If set, do no rebuild the SrvSchema objects.") var cells flagutil.StringListValue subFlags.Var(&cells, "cells", "If specified, limits the rebuild to the cells, after upload. Ignored if skipRebuild is set.") diff --git a/go/vt/vtctl/vtctlclient/interface.go b/go/vt/vtctl/vtctlclient/interface.go index 634879eac38..1626505e9a7 100644 --- a/go/vt/vtctl/vtctlclient/interface.go +++ b/go/vt/vtctl/vtctlclient/interface.go @@ -28,7 +28,7 @@ import ( "vitess.io/vitess/go/vt/logutil" ) -// vtctlClientProtocol specifices which RPC client implementation should be used. +// vtctlClientProtocol specifics which RPC client implementation should be used. var vtctlClientProtocol = flag.String("vtctl_client_protocol", "grpc", "the protocol to use to talk to the vtctl server") // VtctlClient defines the interface used to send remote vtctl commands diff --git a/go/vt/vtgate/buffer/buffer.go b/go/vt/vtgate/buffer/buffer.go index ca9e9eeb399..9967e1b701c 100644 --- a/go/vt/vtgate/buffer/buffer.go +++ b/go/vt/vtgate/buffer/buffer.go @@ -77,7 +77,7 @@ type Buffer struct { // shards is a set of keyspace/shard entries to which buffering is limited. // If empty (and *enabled==true), buffering is enabled for all shards. shards map[string]bool - // now returns the current time. Overriden in tests. + // now returns the current time. Overridden in tests. now func() time.Time // bufferSizeSema limits how many requests can be buffered @@ -134,7 +134,7 @@ func newWithNow(now func() time.Time) *Buffer { limited = header + limited dryRunOverride := "" if *enabledDryRun { - dryRunOverride = " Dry-run mode is overriden for these entries and actual buffering will take place." + dryRunOverride = " Dry-run mode is overridden for these entries and actual buffering will take place." } log.Infof("%v.%v", limited, dryRunOverride) } diff --git a/go/vt/vtgate/endtoend/deletetest/delete_test.go b/go/vt/vtgate/endtoend/deletetest/delete_test.go index 647556cb151..9f0cb6ceb79 100644 --- a/go/vt/vtgate/endtoend/deletetest/delete_test.go +++ b/go/vt/vtgate/endtoend/deletetest/delete_test.go @@ -32,10 +32,11 @@ import ( ) var ( - cluster *vttest.LocalCluster - vtParams mysql.ConnParams - mysqlParams mysql.ConnParams - grpcAddress string + cluster *vttest.LocalCluster + vtParams mysql.ConnParams + mysqlParams mysql.ConnParams + grpcAddress string + tabletHostName = flag.String("tablet_hostname", "", "the tablet hostname") schema = ` create table t1( @@ -134,6 +135,8 @@ func TestMain(m *testing.M) { } defer os.RemoveAll(cfg.SchemaDir) + cfg.TabletHostName = *tabletHostName + cluster = &vttest.LocalCluster{ Config: cfg, } diff --git a/go/vt/vtgate/endtoend/main_test.go b/go/vt/vtgate/endtoend/main_test.go index bfc17878101..da52636dcec 100644 --- a/go/vt/vtgate/endtoend/main_test.go +++ b/go/vt/vtgate/endtoend/main_test.go @@ -32,10 +32,11 @@ import ( ) var ( - cluster *vttest.LocalCluster - vtParams mysql.ConnParams - mysqlParams mysql.ConnParams - grpcAddress string + cluster *vttest.LocalCluster + vtParams mysql.ConnParams + mysqlParams mysql.ConnParams + grpcAddress string + tabletHostName = flag.String("tablet_hostname", "", "the tablet hostname") schema = ` create table t1( @@ -178,6 +179,8 @@ func TestMain(m *testing.M) { } defer os.RemoveAll(cfg.SchemaDir) + cfg.TabletHostName = *tabletHostName + cluster = &vttest.LocalCluster{ Config: cfg, } diff --git a/go/vt/vtgate/engine/insert.go b/go/vt/vtgate/engine/insert.go index ed00cc5c622..72dc3dedbc8 100644 --- a/go/vt/vtgate/engine/insert.go +++ b/go/vt/vtgate/engine/insert.go @@ -52,13 +52,13 @@ type Insert struct { Query string // VindexValues specifies values for all the vindex columns. - // This is a three-dimensonal data structure: + // This is a three-dimensional data structure: // Insert.Values[i] represents the values to be inserted for the i'th colvindex (i < len(Insert.Table.ColumnVindexes)) // Insert.Values[i].Values[j] represents values for the j'th column of the given colVindex (j < len(colVindex[i].Columns) // Insert.Values[i].Values[j].Values[k] represents the value pulled from row k for that column: (k < len(ins.rows)) VindexValues []sqltypes.PlanValue - // Table sepcifies the table for the insert. + // Table specifies the table for the insert. Table *vindexes.Table // Generate is only set for inserts where a sequence must be generated. @@ -161,7 +161,7 @@ type Generate struct { } // InsertOpcode is a number representing the opcode -// for the Insert primitve. +// for the Insert primitive. type InsertOpcode int const ( @@ -464,7 +464,7 @@ func (ins *Insert) getInsertShardedRoute(vcursor VCursor, bindVars map[string]*q return rss, queries, nil } -// processPrimary maps the primary vindex values to the kesypace ids. +// processPrimary maps the primary vindex values to the keyspace ids. func (ins *Insert) processPrimary(vcursor VCursor, vindexKeys [][]sqltypes.Value, colVindex *vindexes.ColumnVindex, bv map[string]*querypb.BindVariable) ([][]byte, error) { var flattenedVindexKeys []sqltypes.Value // TODO: @rafael - this will change once vindex Primary keys also support multicolumns diff --git a/go/vt/vtgate/engine/merge_sort_test.go b/go/vt/vtgate/engine/merge_sort_test.go index 99a53a3f7a2..9ef8dfbfd9d 100644 --- a/go/vt/vtgate/engine/merge_sort_test.go +++ b/go/vt/vtgate/engine/merge_sort_test.go @@ -266,7 +266,7 @@ func TestMergeSortResultFailures(t *testing.T) { } func TestMergeSortDataFailures(t *testing.T) { - // The first row being bad fails in a differnt code path than + // The first row being bad fails in a different code path than // the case of subsequent rows. So, test the two cases separately. idColFields := sqltypes.MakeTestFields("id|col", "int32|varchar") vc := &streamVCursor{ @@ -329,7 +329,7 @@ type streamVCursor struct { } // StreamExecuteMulti streams a result from the specified shard. -// The shard is specifed by the only entry in shardVars. At the +// The shard is specified by the only entry in shardVars. At the // end of a stream, if sendErr is set, that error is returned. func (t *streamVCursor) StreamExecuteMulti(query string, rss []*srvtopo.ResolvedShard, bindVars []map[string]*querypb.BindVariable, callback func(reply *sqltypes.Result) error) error { shard := rss[0].Target.Shard diff --git a/go/vt/vtgate/engine/ordered_aggregate.go b/go/vt/vtgate/engine/ordered_aggregate.go index 93b013047cc..9e652727ec8 100644 --- a/go/vt/vtgate/engine/ordered_aggregate.go +++ b/go/vt/vtgate/engine/ordered_aggregate.go @@ -19,6 +19,9 @@ package engine import ( "fmt" + "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/sqltypes" querypb "vitess.io/vitess/go/vt/proto/query" @@ -179,6 +182,17 @@ func (oa *OrderedAggregate) execute(vcursor VCursor, bindVars map[string]*queryp out.Rows = append(out.Rows, current) current, curDistinct = oa.convertRow(row) } + + if len(result.Rows) == 0 && len(oa.Keys) == 0 { + // When doing aggregation without grouping keys, we need to produce a single row containing zero-value for the + // different aggregation functions + row, err := oa.createEmptyRow() + if err != nil { + return nil, err + } + out.Rows = append(out.Rows, row) + } + if current != nil { out.Rows = append(out.Rows, current) } @@ -327,15 +341,15 @@ func (oa *OrderedAggregate) merge(fields []*querypb.Field, row1, row2 []sqltypes var err error switch aggr.Opcode { case AggregateCount, AggregateSum: - result[aggr.Col], err = sqltypes.NullsafeAdd(row1[aggr.Col], row2[aggr.Col], fields[aggr.Col].Type) + result[aggr.Col] = sqltypes.NullsafeAdd(row1[aggr.Col], row2[aggr.Col], fields[aggr.Col].Type) case AggregateMin: result[aggr.Col], err = sqltypes.Min(row1[aggr.Col], row2[aggr.Col]) case AggregateMax: result[aggr.Col], err = sqltypes.Max(row1[aggr.Col], row2[aggr.Col]) case AggregateCountDistinct: - result[aggr.Col], err = sqltypes.NullsafeAdd(row1[aggr.Col], countOne, opcodeType[aggr.Opcode]) + result[aggr.Col] = sqltypes.NullsafeAdd(row1[aggr.Col], countOne, opcodeType[aggr.Opcode]) case AggregateSumDistinct: - result[aggr.Col], err = sqltypes.NullsafeAdd(row1[aggr.Col], row2[aggr.Col], opcodeType[aggr.Opcode]) + result[aggr.Col] = sqltypes.NullsafeAdd(row1[aggr.Col], row2[aggr.Col], opcodeType[aggr.Opcode]) default: return nil, sqltypes.NULL, fmt.Errorf("BUG: Unexpected opcode: %v", aggr.Opcode) } @@ -345,3 +359,26 @@ func (oa *OrderedAggregate) merge(fields []*querypb.Field, row1, row2 []sqltypes } return result, curDistinct, nil } + +// creates the empty row for the case when we are missing grouping keys and have empty input table +func (oa *OrderedAggregate) createEmptyRow() ([]sqltypes.Value, error) { + out := make([]sqltypes.Value, len(oa.Aggregates)) + for i, aggr := range oa.Aggregates { + value, err := createEmptyValueFor(aggr.Opcode) + if err != nil { + return nil, err + } + out[i] = value + } + return out, nil +} + +func createEmptyValueFor(opcode AggregateOpcode) (sqltypes.Value, error) { + switch opcode { + case AggregateCountDistinct: + return countZero, nil + case AggregateSumDistinct: + return sqltypes.NULL, nil + } + return sqltypes.NULL, vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "unknown aggregation %v", opcode) +} diff --git a/go/vt/vtgate/engine/ordered_aggregate_test.go b/go/vt/vtgate/engine/ordered_aggregate_test.go index d9f7df1e508..d4201fad66c 100644 --- a/go/vt/vtgate/engine/ordered_aggregate_test.go +++ b/go/vt/vtgate/engine/ordered_aggregate_test.go @@ -21,10 +21,13 @@ import ( "reflect" "testing" + "github.com/stretchr/testify/assert" "vitess.io/vitess/go/sqltypes" + querypb "vitess.io/vitess/go/vt/proto/query" ) func TestOrderedAggregateExecute(t *testing.T) { + assert := assert.New(t) fields := sqltypes.MakeTestFields( "col|count(*)", "varbinary|decimal", @@ -50,9 +53,7 @@ func TestOrderedAggregateExecute(t *testing.T) { } result, err := oa.Execute(nil, nil, false) - if err != nil { - t.Error(err) - } + assert.NoError(err) wantResult := sqltypes.MakeTestResult( fields, @@ -60,12 +61,11 @@ func TestOrderedAggregateExecute(t *testing.T) { "b|2", "c|7", ) - if !reflect.DeepEqual(result, wantResult) { - t.Errorf("oa.Execute:\n%v, want\n%v", result, wantResult) - } + assert.Equal(wantResult, result) } func TestOrderedAggregateExecuteTruncate(t *testing.T) { + assert := assert.New(t) fp := &fakePrimitive{ results: []*sqltypes.Result{sqltypes.MakeTestResult( sqltypes.MakeTestFields( @@ -91,9 +91,7 @@ func TestOrderedAggregateExecuteTruncate(t *testing.T) { } result, err := oa.Execute(nil, nil, false) - if err != nil { - t.Error(err) - } + assert.NoError(err) wantResult := sqltypes.MakeTestResult( sqltypes.MakeTestFields( @@ -104,12 +102,11 @@ func TestOrderedAggregateExecuteTruncate(t *testing.T) { "b|2", "C|7", ) - if !reflect.DeepEqual(result, wantResult) { - t.Errorf("oa.Execute:\n%v, want\n%v", result, wantResult) - } + assert.Equal(wantResult, result) } func TestOrderedAggregateStreamExecute(t *testing.T) { + assert := assert.New(t) fields := sqltypes.MakeTestFields( "col|count(*)", "varbinary|decimal", @@ -139,9 +136,7 @@ func TestOrderedAggregateStreamExecute(t *testing.T) { results = append(results, qr) return nil }) - if err != nil { - t.Error(err) - } + assert.NoError(err) wantResults := sqltypes.MakeTestStreamingResults( fields, @@ -151,12 +146,11 @@ func TestOrderedAggregateStreamExecute(t *testing.T) { "---", "c|7", ) - if !reflect.DeepEqual(results, wantResults) { - t.Errorf("oa.StreamExecute:\n%s, want\n%s", sqltypes.PrintResults(results), sqltypes.PrintResults(wantResults)) - } + assert.Equal(wantResults, results) } func TestOrderedAggregateStreamExecuteTruncate(t *testing.T) { + assert := assert.New(t) fp := &fakePrimitive{ results: []*sqltypes.Result{sqltypes.MakeTestResult( sqltypes.MakeTestFields( @@ -186,9 +180,7 @@ func TestOrderedAggregateStreamExecuteTruncate(t *testing.T) { results = append(results, qr) return nil }) - if err != nil { - t.Error(err) - } + assert.NoError(err) wantResults := sqltypes.MakeTestStreamingResults( sqltypes.MakeTestFields( @@ -201,32 +193,28 @@ func TestOrderedAggregateStreamExecuteTruncate(t *testing.T) { "---", "C|7", ) - if !reflect.DeepEqual(results, wantResults) { - t.Errorf("oa.StreamExecute:\n%s, want\n%s", sqltypes.PrintResults(results), sqltypes.PrintResults(wantResults)) - } + assert.Equal(wantResults, results) } func TestOrderedAggregateGetFields(t *testing.T) { - result := sqltypes.MakeTestResult( + assert := assert.New(t) + input := sqltypes.MakeTestResult( sqltypes.MakeTestFields( "col|count(*)", "varbinary|decimal", ), ) - fp := &fakePrimitive{results: []*sqltypes.Result{result}} + fp := &fakePrimitive{results: []*sqltypes.Result{input}} oa := &OrderedAggregate{Input: fp} got, err := oa.GetFields(nil, nil) - if err != nil { - t.Error(err) - } - if !reflect.DeepEqual(got, result) { - t.Errorf("oa.GetFields:\n%v, want\n%v", got, result) - } + assert.NoError(err) + assert.Equal(got, input) } func TestOrderedAggregateGetFieldsTruncate(t *testing.T) { + assert := assert.New(t) result := sqltypes.MakeTestResult( sqltypes.MakeTestFields( "col|count(*)|weight_string(col)", @@ -241,18 +229,14 @@ func TestOrderedAggregateGetFieldsTruncate(t *testing.T) { } got, err := oa.GetFields(nil, nil) - if err != nil { - t.Error(err) - } + assert.NoError(err) wantResult := sqltypes.MakeTestResult( sqltypes.MakeTestFields( "col|count(*)", "varchar|decimal", ), ) - if !reflect.DeepEqual(got, wantResult) { - t.Errorf("oa.GetFields:\n%v, want\n%v", got, wantResult) - } + assert.Equal(wantResult, got) } func TestOrderedAggregateInputFail(t *testing.T) { @@ -277,6 +261,7 @@ func TestOrderedAggregateInputFail(t *testing.T) { } func TestOrderedAggregateExecuteCountDistinct(t *testing.T) { + assert := assert.New(t) fp := &fakePrimitive{ results: []*sqltypes.Result{sqltypes.MakeTestResult( sqltypes.MakeTestFields( @@ -331,9 +316,7 @@ func TestOrderedAggregateExecuteCountDistinct(t *testing.T) { } result, err := oa.Execute(nil, nil, false) - if err != nil { - t.Error(err) - } + assert.NoError(err) wantResult := sqltypes.MakeTestResult( sqltypes.MakeTestFields( @@ -350,12 +333,11 @@ func TestOrderedAggregateExecuteCountDistinct(t *testing.T) { "h|3|4", "i|2|2", ) - if !reflect.DeepEqual(result, wantResult) { - t.Errorf("oa.Execute:\n%v, want\n%v", result, wantResult) - } + assert.Equal(wantResult, result) } func TestOrderedAggregateStreamCountDistinct(t *testing.T) { + assert := assert.New(t) fp := &fakePrimitive{ results: []*sqltypes.Result{sqltypes.MakeTestResult( sqltypes.MakeTestFields( @@ -414,9 +396,7 @@ func TestOrderedAggregateStreamCountDistinct(t *testing.T) { results = append(results, qr) return nil }) - if err != nil { - t.Error(err) - } + assert.NoError(err) wantResults := sqltypes.MakeTestStreamingResults( sqltypes.MakeTestFields( @@ -441,12 +421,11 @@ func TestOrderedAggregateStreamCountDistinct(t *testing.T) { "-----", "i|2|2", ) - if !reflect.DeepEqual(results, wantResults) { - t.Errorf("oa.Execute:\n%v, want\n%v", results, wantResults) - } + assert.Equal(wantResults, results) } func TestOrderedAggregateSumDistinctGood(t *testing.T) { + assert := assert.New(t) fp := &fakePrimitive{ results: []*sqltypes.Result{sqltypes.MakeTestResult( sqltypes.MakeTestFields( @@ -501,9 +480,7 @@ func TestOrderedAggregateSumDistinctGood(t *testing.T) { } result, err := oa.Execute(nil, nil, false) - if err != nil { - t.Error(err) - } + assert.NoError(err) wantResult := sqltypes.MakeTestResult( sqltypes.MakeTestFields( @@ -520,12 +497,11 @@ func TestOrderedAggregateSumDistinctGood(t *testing.T) { "h|6|4", "i|7|2", ) - if !reflect.DeepEqual(result, wantResult) { - t.Errorf("oa.Execute:\n%v, want\n%v", result, wantResult) - } + assert.Equal(wantResult, result) } func TestOrderedAggregateSumDistinctTolerateError(t *testing.T) { + assert := assert.New(t) fp := &fakePrimitive{ results: []*sqltypes.Result{sqltypes.MakeTestResult( sqltypes.MakeTestFields( @@ -550,9 +526,7 @@ func TestOrderedAggregateSumDistinctTolerateError(t *testing.T) { } result, err := oa.Execute(nil, nil, false) - if err != nil { - t.Error(err) - } + assert.NoError(err) wantResult := sqltypes.MakeTestResult( sqltypes.MakeTestFields( @@ -561,9 +535,7 @@ func TestOrderedAggregateSumDistinctTolerateError(t *testing.T) { ), "a|1", ) - if !reflect.DeepEqual(result, wantResult) { - t.Errorf("oa.Execute:\n%v, want\n%v", result, wantResult) - } + assert.Equal(wantResult, result) } func TestOrderedAggregateKeysFail(t *testing.T) { @@ -621,18 +593,44 @@ func TestOrderedAggregateMergeFail(t *testing.T) { Input: fp, } - want := "could not parse value: 'b'" - if _, err := oa.Execute(nil, nil, false); err == nil || err.Error() != want { - t.Errorf("oa.Execute(): %v, want %s", err, want) + result := &sqltypes.Result{ + Fields: []*querypb.Field{ + { + Name: "col", + Type: querypb.Type_VARBINARY, + }, + { + Name: "count(*)", + Type: querypb.Type_DECIMAL, + }, + }, + Rows: [][]sqltypes.Value{ + { + sqltypes.MakeTrusted(querypb.Type_VARBINARY, []byte("a")), + sqltypes.MakeTrusted(querypb.Type_DECIMAL, []byte("1")), + }, + }, + RowsAffected: 1, + } + + res, err := oa.Execute(nil, nil, false) + if err != nil { + t.Errorf("oa.Execute() failed: %v", err) + } + + if !reflect.DeepEqual(res, result) { + t.Fatalf("Found mismatched values: want %v, got %v", result, res) } fp.rewind() - if err := oa.StreamExecute(nil, nil, false, func(_ *sqltypes.Result) error { return nil }); err == nil || err.Error() != want { - t.Errorf("oa.StreamExecute(): %v, want %s", err, want) + if err := oa.StreamExecute(nil, nil, false, func(_ *sqltypes.Result) error { return nil }); err != nil { + t.Errorf("oa.StreamExecute(): %v", err) } + } func TestMerge(t *testing.T) { + assert := assert.New(t) oa := &OrderedAggregate{ Aggregates: []AggregateParams{{ Opcode: AggregateCount, @@ -658,20 +656,52 @@ func TestMerge(t *testing.T) { ) merged, _, err := oa.merge(fields, r.Rows[0], r.Rows[1], sqltypes.NULL) - if err != nil { - t.Error(err) - } + assert.NoError(err) want := sqltypes.MakeTestResult(fields, "1|5|6|2|bc").Rows[0] - if !reflect.DeepEqual(merged, want) { - t.Errorf("oa.merge(row1, row2): %v, want %v", merged, want) - } + assert.Equal(want, merged) // swap and retry merged, _, err = oa.merge(fields, r.Rows[1], r.Rows[0], sqltypes.NULL) - if err != nil { - t.Error(err) + assert.NoError(err) + assert.Equal(want, merged) +} + +func TestNoInputAndNoGroupingKeys(t *testing.T) { + assert := assert.New(t) + fp := &fakePrimitive{ + results: []*sqltypes.Result{sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "col1|col2", + "int64|int64", + ), + // Empty input table + )}, } - if !reflect.DeepEqual(merged, want) { - t.Errorf("oa.merge(row1, row2): %v, want %v", merged, want) + + oa := &OrderedAggregate{ + HasDistinct: true, + Aggregates: []AggregateParams{{ + Opcode: AggregateCountDistinct, + Col: 0, + Alias: "count(distinct col2)", + }, { + Opcode: AggregateSumDistinct, + Col: 1, + Alias: "sum(distinct col2)", + }}, + Keys: []int{}, + Input: fp, } + + result, err := oa.Execute(nil, nil, false) + assert.NoError(err) + + wantResult := sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "count(distinct col2)|sum(distinct col2)", + "int64|decimal", + ), + "0|null", + ) + assert.Equal(wantResult, result) } diff --git a/go/vt/vtgate/executor.go b/go/vt/vtgate/executor.go index 7e3df65cb7b..cf38a9dec55 100644 --- a/go/vt/vtgate/executor.go +++ b/go/vt/vtgate/executor.go @@ -344,7 +344,9 @@ func (e *Executor) handleDDL(ctx context.Context, safeSession *SafeSession, sql sqlparser.AddVschemaTableStr, sqlparser.DropVschemaTableStr, sqlparser.AddColVindexStr, - sqlparser.DropColVindexStr: + sqlparser.DropColVindexStr, + sqlparser.AddSequenceStr, + sqlparser.AddAutoIncStr: err := e.handleVSchemaDDL(ctx, safeSession, dest, destKeyspace, destTabletType, ddl, logStats) logStats.ExecuteTime = time.Since(execStart) @@ -676,7 +678,7 @@ func (e *Executor) handleShow(ctx context.Context, safeSession *SafeSession, sql execStart := time.Now() defer func() { logStats.ExecuteTime = time.Since(execStart) }() - switch show.Type { + switch strings.ToLower(show.Type) { case sqlparser.KeywordString(sqlparser.COLLATION), sqlparser.KeywordString(sqlparser.VARIABLES): if destKeyspace == "" { keyspaces, err := e.resolver.resolver.GetAllKeyspaces(ctx) @@ -776,7 +778,7 @@ func (e *Executor) handleShow(ctx context.Context, safeSession *SafeSession, sql show.ShowTablesOpt.DbName = "" } sql = sqlparser.String(show) - case sqlparser.KeywordString(sqlparser.DATABASES), sqlparser.KeywordString(sqlparser.SCHEMAS), sqlparser.KeywordString(sqlparser.VITESS_KEYSPACES): + case sqlparser.KeywordString(sqlparser.DATABASES), "vitess_keyspaces", "keyspaces": keyspaces, err := e.resolver.resolver.GetAllKeyspaces(ctx) if err != nil { return nil, err @@ -792,7 +794,7 @@ func (e *Executor) handleShow(ctx context.Context, safeSession *SafeSession, sql Rows: rows, RowsAffected: uint64(len(rows)), }, nil - case sqlparser.KeywordString(sqlparser.VITESS_SHARDS): + case "vitess_shards": keyspaces, err := e.resolver.resolver.GetAllKeyspaces(ctx) if err != nil { return nil, err @@ -817,7 +819,7 @@ func (e *Executor) handleShow(ctx context.Context, safeSession *SafeSession, sql Rows: rows, RowsAffected: uint64(len(rows)), }, nil - case sqlparser.KeywordString(sqlparser.VITESS_TABLETS): + case "vitess_tablets": var rows [][]sqltypes.Value stats := e.scatterConn.healthCheck.CacheStatus() for _, s := range stats { @@ -842,7 +844,7 @@ func (e *Executor) handleShow(ctx context.Context, safeSession *SafeSession, sql Rows: rows, RowsAffected: uint64(len(rows)), }, nil - case sqlparser.KeywordString(sqlparser.VITESS_TARGET): + case "vitess_target": var rows [][]sqltypes.Value rows = append(rows, buildVarCharRow(safeSession.TargetString)) return &sqltypes.Result{ @@ -1417,3 +1419,100 @@ func buildVarCharRow(values ...string) []sqltypes.Value { } return row } + +// Prepare executes a prepare statements. +func (e *Executor) Prepare(ctx context.Context, method string, safeSession *SafeSession, sql string, bindVars map[string]*querypb.BindVariable) (fld []*querypb.Field, err error) { + logStats := NewLogStats(ctx, method, sql, bindVars) + fld, err = e.prepare(ctx, safeSession, sql, bindVars, logStats) + logStats.Error = err + + // The mysql plugin runs an implicit rollback whenever a connection closes. + // To avoid spamming the log with no-op rollback records, ignore it if + // it was a no-op record (i.e. didn't issue any queries) + if !(logStats.StmtType == "ROLLBACK" && logStats.ShardQueries == 0) { + logStats.Send() + } + return fld, err +} + +func (e *Executor) prepare(ctx context.Context, safeSession *SafeSession, sql string, bindVars map[string]*querypb.BindVariable, logStats *LogStats) ([]*querypb.Field, error) { + // Start an implicit transaction if necessary. + if !safeSession.Autocommit && !safeSession.InTransaction() { + if err := e.txConn.Begin(ctx, safeSession); err != nil { + return nil, err + } + } + + destKeyspace, destTabletType, dest, err := e.ParseDestinationTarget(safeSession.TargetString) + if err != nil { + return nil, err + } + + if safeSession.InTransaction() && destTabletType != topodatapb.TabletType_MASTER { + return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "transactions are supported only for master tablet types, current type: %v", destTabletType) + } + if bindVars == nil { + bindVars = make(map[string]*querypb.BindVariable) + } + + stmtType := sqlparser.Preview(sql) + logStats.StmtType = sqlparser.StmtType(stmtType) + + // Mysql warnings are scoped to the current session, but are + // cleared when a "non-diagnostic statement" is executed: + // https://dev.mysql.com/doc/refman/8.0/en/show-warnings.html + // + // To emulate this behavior, clear warnings from the session + // for all statements _except_ SHOW, so that SHOW WARNINGS + // can actually return them. + if stmtType != sqlparser.StmtShow { + safeSession.ClearWarnings() + } + + switch stmtType { + case sqlparser.StmtSelect: + return e.handlePrepare(ctx, safeSession, sql, bindVars, destKeyspace, destTabletType, logStats) + case sqlparser.StmtDDL, sqlparser.StmtBegin, sqlparser.StmtCommit, sqlparser.StmtRollback, sqlparser.StmtSet, sqlparser.StmtInsert, sqlparser.StmtReplace, sqlparser.StmtUpdate, sqlparser.StmtDelete, + sqlparser.StmtUse, sqlparser.StmtOther, sqlparser.StmtComment: + return nil, nil + case sqlparser.StmtShow: + res, err := e.handleShow(ctx, safeSession, sql, bindVars, dest, destKeyspace, destTabletType, logStats) + return res.Fields, err + } + return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "unrecognized statement: %s", sql) +} + +func (e *Executor) handlePrepare(ctx context.Context, safeSession *SafeSession, sql string, bindVars map[string]*querypb.BindVariable, destKeyspace string, destTabletType topodatapb.TabletType, logStats *LogStats) ([]*querypb.Field, error) { + // V3 mode. + query, comments := sqlparser.SplitMarginComments(sql) + vcursor := newVCursorImpl(ctx, safeSession, destKeyspace, destTabletType, comments, e, logStats) + plan, err := e.getPlan( + vcursor, + query, + comments, + bindVars, + skipQueryPlanCache(safeSession), + logStats, + ) + execStart := time.Now() + logStats.PlanTime = execStart.Sub(logStats.StartTime) + + if err != nil { + logStats.Error = err + return nil, err + } + + qr, err := plan.Instructions.GetFields(vcursor, bindVars) + logStats.ExecuteTime = time.Since(execStart) + var errCount uint64 + if err != nil { + logStats.Error = err + errCount = 1 + } else { + logStats.RowsAffected = qr.RowsAffected + } + + plan.AddStats(1, time.Since(logStats.StartTime), uint64(logStats.ShardQueries), logStats.RowsAffected, errCount) + + return qr.Fields, err +} diff --git a/go/vt/vtgate/executor_dml_test.go b/go/vt/vtgate/executor_dml_test.go index da3d5720aca..b6f266b7bc8 100644 --- a/go/vt/vtgate/executor_dml_test.go +++ b/go/vt/vtgate/executor_dml_test.go @@ -1755,3 +1755,79 @@ func TestKeyShardDestQuery(t *testing.T) { sbc2.Queries = nil masterSession.TargetString = "" } + +// Prepared statement tests +func TestUpdateEqualWithPrepare(t *testing.T) { + executor, sbc1, sbc2, sbclookup := createExecutorEnv() + + logChan := QueryLogger.Subscribe("Test") + defer QueryLogger.Unsubscribe(logChan) + + _, err := executorPrepare(executor, "update music set a = :a0 where id = :id0", map[string]*querypb.BindVariable{ + "a0": sqltypes.Int64BindVariable(3), + "id0": sqltypes.Int64BindVariable(2), + }) + if err != nil { + t.Error(err) + } + + var wantQueries []*querypb.BoundQuery + + if !reflect.DeepEqual(sbclookup.Queries, wantQueries) { + t.Errorf("sbclookup.Queries: %+v, want %+v\n", sbclookup.Queries, wantQueries) + } + if sbc2.Queries != nil { + t.Errorf("sbc2.Queries: %+v, want nil\n", sbc2.Queries) + } + if sbc1.Queries != nil { + t.Errorf("sbc1.Queries: %+v, want nil\n", sbc1.Queries) + } +} +func TestInsertShardedWithPrepare(t *testing.T) { + executor, sbc1, sbc2, sbclookup := createExecutorEnv() + + logChan := QueryLogger.Subscribe("Test") + defer QueryLogger.Unsubscribe(logChan) + + _, err := executorPrepare(executor, "insert into user(id, v, name) values (:_Id0, 2, ':_name0')", map[string]*querypb.BindVariable{ + "_Id0": sqltypes.Int64BindVariable(1), + "_name0": sqltypes.BytesBindVariable([]byte("myname")), + "__seq0": sqltypes.Int64BindVariable(1), + }) + if err != nil { + t.Error(err) + } + + var wantQueries []*querypb.BoundQuery + + if !reflect.DeepEqual(sbc1.Queries, wantQueries) { + t.Errorf("sbc1.Queries:\n%+v, want\n%+v\n", sbc1.Queries, wantQueries) + } + if sbc2.Queries != nil { + t.Errorf("sbc2.Queries: %+v, want nil\n", sbc2.Queries) + } + + if !reflect.DeepEqual(sbclookup.Queries, wantQueries) { + t.Errorf("sbclookup.Queries: \n%+v, want \n%+v", sbclookup.Queries, wantQueries) + } +} + +func TestDeleteEqualWithPrepare(t *testing.T) { + executor, sbc, _, sbclookup := createExecutorEnv() + _, err := executorPrepare(executor, "delete from user where id = :id0", map[string]*querypb.BindVariable{ + "id0": sqltypes.Int64BindVariable(1), + }) + if err != nil { + t.Error(err) + } + + var wantQueries []*querypb.BoundQuery + + if !reflect.DeepEqual(sbc.Queries, wantQueries) { + t.Errorf("sbc.Queries:\n%+v, want\n%+v\n", sbc.Queries, wantQueries) + } + + if !reflect.DeepEqual(sbclookup.Queries, wantQueries) { + t.Errorf("sbclookup.Queries:\n%+v, want\n%+v\n", sbclookup.Queries, wantQueries) + } +} diff --git a/go/vt/vtgate/executor_framework_test.go b/go/vt/vtgate/executor_framework_test.go index d2b3c3f1297..88073f32f69 100644 --- a/go/vt/vtgate/executor_framework_test.go +++ b/go/vt/vtgate/executor_framework_test.go @@ -392,6 +392,15 @@ func executorExec(executor *Executor, sql string, bv map[string]*querypb.BindVar bv) } +func executorPrepare(executor *Executor, sql string, bv map[string]*querypb.BindVariable) ([]*querypb.Field, error) { + return executor.Prepare( + context.Background(), + "TestExecute", + NewSafeSession(masterSession), + sql, + bv) +} + func executorStream(executor *Executor, sql string) (qr *sqltypes.Result, err error) { results := make(chan *sqltypes.Result, 100) err = executor.StreamExecute( diff --git a/go/vt/vtgate/executor_select_test.go b/go/vt/vtgate/executor_select_test.go index 00d7a637268..8edf443fee9 100644 --- a/go/vt/vtgate/executor_select_test.go +++ b/go/vt/vtgate/executor_select_test.go @@ -2034,3 +2034,28 @@ func TestCrossShardSubqueryGetFields(t *testing.T) { t.Errorf("result: %+v, want %+v", result, wantResult) } } + +func TestSelectBindvarswithPrepare(t *testing.T) { + executor, sbc1, sbc2, _ := createExecutorEnv() + logChan := QueryLogger.Subscribe("Test") + defer QueryLogger.Unsubscribe(logChan) + + sql := "select id from user where id = :id" + _, err := executorPrepare(executor, sql, map[string]*querypb.BindVariable{ + "id": sqltypes.Int64BindVariable(1), + }) + if err != nil { + t.Error(err) + } + + wantQueries := []*querypb.BoundQuery{{ + Sql: "select id from user where 1 != 1", + BindVariables: map[string]*querypb.BindVariable{"id": sqltypes.Int64BindVariable(1)}, + }} + if !reflect.DeepEqual(sbc1.Queries, wantQueries) { + t.Errorf("sbc1.Queries: %+v, want %+v\n", sbc1.Queries, wantQueries) + } + if sbc2.Queries != nil { + t.Errorf("sbc2.Queries: %+v, want nil\n", sbc2.Queries) + } +} diff --git a/go/vt/vtgate/executor_stats.go b/go/vt/vtgate/executor_stats.go index 7b9d45f9040..6fd6170ca5c 100644 --- a/go/vt/vtgate/executor_stats.go +++ b/go/vt/vtgate/executor_stats.go @@ -94,7 +94,7 @@ function drawQPSChart() { var idx = qps[planTypes[j]].length - i - 1; datum.push(+qps[planTypes[j]][idx].toFixed(2)); } else { - // Assume 0.0 QPS for older, non-existant data points. + // Assume 0.0 QPS for older, non-existent data points. datum.push(0); } } diff --git a/go/vt/vtgate/executor_test.go b/go/vt/vtgate/executor_test.go index 5b59c62e48a..f66ed617d90 100644 --- a/go/vt/vtgate/executor_test.go +++ b/go/vt/vtgate/executor_test.go @@ -468,7 +468,6 @@ func TestExecutorSet(t *testing.T) { } } } - func TestExecutorAutocommit(t *testing.T) { executor, _, _, sbclookup := createExecutorEnv() session := NewSafeSession(&vtgatepb.Session{TargetString: "@master"}) @@ -607,7 +606,7 @@ func TestExecutorShow(t *testing.T) { executor, _, _, sbclookup := createExecutorEnv() session := NewSafeSession(&vtgatepb.Session{TargetString: "@master"}) - for _, query := range []string{"show databases", "show schemas", "show vitess_keyspaces"} { + for _, query := range []string{"show databases", "show vitess_keyspaces", "show keyspaces", "show DATABASES"} { qr, err := executor.Execute(context.Background(), "TestExecute", session, query, nil) if err != nil { t.Error(err) @@ -668,7 +667,7 @@ func TestExecutorShow(t *testing.T) { } if len(sbclookup.Queries) != 1 { - t.Errorf("Tablet should have recieved one 'show' query. Instead received: %v", sbclookup.Queries) + t.Errorf("Tablet should have received one 'show' query. Instead received: %v", sbclookup.Queries) } else { lastQuery := sbclookup.Queries[len(sbclookup.Queries)-1].Sql want := "show tables" @@ -1425,7 +1424,7 @@ func TestExecutorAddDropVschemaTableDDL(t *testing.T) { } _ = waitForVschemaTables(t, ks, append(vschemaTables, []string{"test_table", "test_table2"}...), executor) - // Should fail on a sharded keyspace + // Should fail adding a table on a sharded keyspace session = NewSafeSession(&vtgatepb.Session{TargetString: "TestExecutor"}) stmt = "alter vschema add table test_table" _, err = executor.Execute(context.Background(), "TestExecute", session, stmt, nil) @@ -1446,6 +1445,74 @@ func TestExecutorAddDropVschemaTableDDL(t *testing.T) { } } +func TestExecutorAddSequenceDDL(t *testing.T) { + *vschemaacl.AuthorizedDDLUsers = "%" + defer func() { + *vschemaacl.AuthorizedDDLUsers = "" + }() + executor, _, _, _ := createExecutorEnv() + ks := KsTestUnsharded + + vschema := executor.vm.GetCurrentSrvVschema() + + var vschemaTables []string + for t := range vschema.Keyspaces[ks].Tables { + vschemaTables = append(vschemaTables, t) + } + + session := NewSafeSession(&vtgatepb.Session{TargetString: ks}) + stmt := "alter vschema add sequence test_seq" + _, err := executor.Execute(context.Background(), "TestExecute", session, stmt, nil) + if err != nil { + t.Error(err) + } + _ = waitForVschemaTables(t, ks, append(vschemaTables, []string{"test_seq"}...), executor) + vschema = executor.vm.GetCurrentSrvVschema() + table := vschema.Keyspaces[ks].Tables["test_seq"] + wantType := "sequence" + if table.Type != wantType { + t.Errorf("want table type sequence got %v", table) + } + + // Should fail adding a table on a sharded keyspace + ksSharded := "TestExecutor" + vschemaTables = []string{} + vschema = executor.vm.GetCurrentSrvVschema() + for t := range vschema.Keyspaces[ksSharded].Tables { + vschemaTables = append(vschemaTables, t) + } + + session = NewSafeSession(&vtgatepb.Session{TargetString: ksSharded}) + + stmt = "alter vschema add sequence sequence_table" + _, err = executor.Execute(context.Background(), "TestExecute", session, stmt, nil) + + wantErr := "add sequence table: unsupported on sharded keyspace TestExecutor" + if err == nil || err.Error() != wantErr { + t.Errorf("want error %v got %v", wantErr, err) + } + + // Should be able to add autoincrement to table in sharded keyspace + stmt = "alter vschema on test_table add vindex hash_index (id)" + if _, err = executor.Execute(context.Background(), "TestExecute", session, stmt, nil); err != nil { + t.Error(err) + } + time.Sleep(10 * time.Millisecond) + + stmt = "alter vschema on test_table add auto_increment id using test_seq" + if _, err = executor.Execute(context.Background(), "TestExecute", session, stmt, nil); err != nil { + t.Error(err) + } + time.Sleep(10 * time.Millisecond) + + wantAutoInc := &vschemapb.AutoIncrement{Column: "id", Sequence: "test_seq"} + gotAutoInc := executor.vm.GetCurrentSrvVschema().Keyspaces[ksSharded].Tables["test_table"].AutoIncrement + + if !reflect.DeepEqual(wantAutoInc, gotAutoInc) { + t.Errorf("want autoinc %v, got autoinc %v", wantAutoInc, gotAutoInc) + } +} + func TestExecutorAddDropVindexDDL(t *testing.T) { *vschemaacl.AuthorizedDDLUsers = "%" defer func() { diff --git a/go/vt/vtgate/planbuilder/insert.go b/go/vt/vtgate/planbuilder/insert.go index bc847bac866..569ebaea699 100644 --- a/go/vt/vtgate/planbuilder/insert.go +++ b/go/vt/vtgate/planbuilder/insert.go @@ -158,14 +158,21 @@ func buildInsertShardedPlan(ins *sqlparser.Insert, table *vindexes.Table) (*engi for colIdx, col := range colVindex.Columns { routeValues[vIdx].Values[colIdx].Values = make([]sqltypes.PlanValue, len(rows)) colNum := findOrAddColumn(ins, col) - // swap bind variables - baseName := ":_" + col.CompliantName() for rowNum, row := range rows { innerpv, err := sqlparser.NewPlanValue(row[colNum]) if err != nil { return nil, vterrors.Wrapf(err, "could not compute value for vindex or auto-inc column") } routeValues[vIdx].Values[colIdx].Values[rowNum] = innerpv + } + } + } + for _, colVindex := range eins.Table.ColumnVindexes { + for _, col := range colVindex.Columns { + colNum := findOrAddColumn(ins, col) + // swap bind variables + baseName := ":_" + col.CompliantName() + for rowNum, row := range rows { row[colNum] = sqlparser.NewValArg([]byte(baseName + strconv.Itoa(rowNum))) } } diff --git a/go/vt/vtgate/planbuilder/select.go b/go/vt/vtgate/planbuilder/select.go index e75685d6117..f1ec12fba2f 100644 --- a/go/vt/vtgate/planbuilder/select.go +++ b/go/vt/vtgate/planbuilder/select.go @@ -269,7 +269,7 @@ func (pb *primitiveBuilder) expandStar(inrcs []*resultColumn, expr *sqlparser.St for _, col := range t.columnNames { var expr *sqlparser.AliasedExpr if singleTable { - // If there's only one table, we use unqualifed column names. + // If there's only one table, we use unqualified column names. expr = &sqlparser.AliasedExpr{ Expr: &sqlparser.ColName{ Metadata: t.columns[col.Lowered()], diff --git a/go/vt/vtgate/planbuilder/symtab.go b/go/vt/vtgate/planbuilder/symtab.go index 0127661ffbe..bc2744bb8ac 100644 --- a/go/vt/vtgate/planbuilder/symtab.go +++ b/go/vt/vtgate/planbuilder/symtab.go @@ -40,7 +40,7 @@ var errNoTable = errors.New("no table info") // vindex column names. These names can be resolved without the // need to qualify them by their table names. If there are // duplicates during a merge, those columns are removed from -// the unique list, thereby disallowing unqualifed references +// the unique list, thereby disallowing unqualified references // to such columns. // // After a select expression is analyzed, the diff --git a/go/vt/vtgate/planbuilder/testdata/dml_cases.txt b/go/vt/vtgate/planbuilder/testdata/dml_cases.txt index 9a9945abed9..7d43fb8f9f0 100644 --- a/go/vt/vtgate/planbuilder/testdata/dml_cases.txt +++ b/go/vt/vtgate/planbuilder/testdata/dml_cases.txt @@ -1359,6 +1359,41 @@ } } +# insert for overlapped vindex columns +"insert overlap_vindex (kid, column_a, column_b) VALUES (1,2,3)" +{ + "Original": "insert overlap_vindex (kid, column_a, column_b) VALUES (1,2,3)", + "Instructions": { + "Opcode": "InsertSharded", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "Query": "insert into overlap_vindex(kid, column_a, column_b) values (:_kid0, :_column_a0, 3)", + "Values": [ + [ + [ + 1 + ] + ], + [ + [ + 2 + ], + [ + 1 + ] + ] + ], + "Table": "overlap_vindex", + "Prefix": "insert into overlap_vindex(kid, column_a, column_b) values ", + "Mid": [ + "(:_kid0, :_column_a0, 3)" + ] + } +} + + # insert multiple rows in a multi column vindex table "insert multicolvin (column_a, column_b, column_c, kid) VALUES (1,2,3,4), (5,6,7,8)" { diff --git a/go/vt/vtgate/planbuilder/testdata/schema_test.json b/go/vt/vtgate/planbuilder/testdata/schema_test.json index b3dac7176b0..e39c602caad 100644 --- a/go/vt/vtgate/planbuilder/testdata/schema_test.json +++ b/go/vt/vtgate/planbuilder/testdata/schema_test.json @@ -47,6 +47,10 @@ "type": "lookup_test", "owner": "multicolvin" }, + "cola_kid_map": { + "type": "lookup_test", + "owner": "overlap_vindex" + }, "name_user_map": { "type": "multi", "owner": "user" @@ -212,6 +216,18 @@ } ] }, + "overlap_vindex": { + "column_vindexes": [ + { + "column": "kid", + "name": "kid_index" + }, + { + "columns": ["column_a", "kid"], + "name": "cola_kid_map" + } + ] + }, "music_extra": { "column_vindexes": [ { diff --git a/go/vt/vtgate/plugin_mysql_server.go b/go/vt/vtgate/plugin_mysql_server.go index 5f3df18dd8f..85240db4804 100644 --- a/go/vt/vtgate/plugin_mysql_server.go +++ b/go/vt/vtgate/plugin_mysql_server.go @@ -162,6 +162,128 @@ func (vh *vtgateHandler) ComQuery(c *mysql.Conn, query string, callback func(*sq return callback(result) } +// ComPrepare is the handler for command prepare. +func (vh *vtgateHandler) ComPrepare(c *mysql.Conn, query string) ([]*querypb.Field, error) { + var ctx context.Context + var cancel context.CancelFunc + if *mysqlQueryTimeout != 0 { + ctx, cancel = context.WithTimeout(context.Background(), *mysqlQueryTimeout) + defer cancel() + } else { + ctx = context.Background() + } + + ctx = callinfo.MysqlCallInfo(ctx, c) + + // Fill in the ImmediateCallerID with the UserData returned by + // the AuthServer plugin for that user. If nothing was + // returned, use the User. This lets the plugin map a MySQL + // user used for authentication to a Vitess User used for + // Table ACLs and Vitess authentication in general. + im := c.UserData.Get() + ef := callerid.NewEffectiveCallerID( + c.User, /* principal: who */ + c.RemoteAddr().String(), /* component: running client process */ + "VTGate MySQL Connector" /* subcomponent: part of the client */) + ctx = callerid.NewContext(ctx, ef, im) + + session, _ := c.ClientData.(*vtgatepb.Session) + if session == nil { + session = &vtgatepb.Session{ + Options: &querypb.ExecuteOptions{ + IncludedFields: querypb.ExecuteOptions_ALL, + }, + Autocommit: true, + } + if c.Capabilities&mysql.CapabilityClientFoundRows != 0 { + session.Options.ClientFoundRows = true + } + } + + if !session.InTransaction { + atomic.AddInt32(&busyConnections, 1) + } + defer func() { + if !session.InTransaction { + atomic.AddInt32(&busyConnections, -1) + } + }() + + if c.SchemaName != "" { + session.TargetString = c.SchemaName + } + + session, fld, err := vh.vtg.Prepare(ctx, session, query, make(map[string]*querypb.BindVariable)) + c.ClientData = session + err = mysql.NewSQLErrorFromError(err) + if err != nil { + return nil, err + } + return fld, nil +} + +func (vh *vtgateHandler) ComStmtExecute(c *mysql.Conn, prepare *mysql.PrepareData, callback func(*sqltypes.Result) error) error { + var ctx context.Context + var cancel context.CancelFunc + if *mysqlQueryTimeout != 0 { + ctx, cancel = context.WithTimeout(context.Background(), *mysqlQueryTimeout) + defer cancel() + } else { + ctx = context.Background() + } + + ctx = callinfo.MysqlCallInfo(ctx, c) + + // Fill in the ImmediateCallerID with the UserData returned by + // the AuthServer plugin for that user. If nothing was + // returned, use the User. This lets the plugin map a MySQL + // user used for authentication to a Vitess User used for + // Table ACLs and Vitess authentication in general. + im := c.UserData.Get() + ef := callerid.NewEffectiveCallerID( + c.User, /* principal: who */ + c.RemoteAddr().String(), /* component: running client process */ + "VTGate MySQL Connector" /* subcomponent: part of the client */) + ctx = callerid.NewContext(ctx, ef, im) + + session, _ := c.ClientData.(*vtgatepb.Session) + if session == nil { + session = &vtgatepb.Session{ + Options: &querypb.ExecuteOptions{ + IncludedFields: querypb.ExecuteOptions_ALL, + }, + Autocommit: true, + } + if c.Capabilities&mysql.CapabilityClientFoundRows != 0 { + session.Options.ClientFoundRows = true + } + } + + if !session.InTransaction { + atomic.AddInt32(&busyConnections, 1) + } + defer func() { + if !session.InTransaction { + atomic.AddInt32(&busyConnections, -1) + } + }() + + if c.SchemaName != "" { + session.TargetString = c.SchemaName + } + if session.Options.Workload == querypb.ExecuteOptions_OLAP { + err := vh.vtg.StreamExecute(ctx, session, prepare.PrepareStmt, prepare.BindVars, callback) + return mysql.NewSQLErrorFromError(err) + } + _, qr, err := vh.vtg.Execute(ctx, session, prepare.PrepareStmt, prepare.BindVars) + if err != nil { + err = mysql.NewSQLErrorFromError(err) + return err + } + + return callback(qr) +} + func (vh *vtgateHandler) WarningCount(c *mysql.Conn) uint16 { session, _ := c.ClientData.(*vtgatepb.Session) if session != nil { diff --git a/go/vt/vtgate/plugin_mysql_server_test.go b/go/vt/vtgate/plugin_mysql_server_test.go index c873206e6ad..c190f053e83 100644 --- a/go/vt/vtgate/plugin_mysql_server_test.go +++ b/go/vt/vtgate/plugin_mysql_server_test.go @@ -26,6 +26,7 @@ import ( "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/sqltypes" + querypb "vitess.io/vitess/go/vt/proto/query" ) type testHandler struct { @@ -43,6 +44,14 @@ func (th *testHandler) ComQuery(c *mysql.Conn, q string, callback func(*sqltypes return nil } +func (th *testHandler) ComPrepare(c *mysql.Conn, q string) ([]*querypb.Field, error) { + return nil, nil +} + +func (th *testHandler) ComStmtExecute(c *mysql.Conn, prepare *mysql.PrepareData, callback func(*sqltypes.Result) error) error { + return nil +} + func (th *testHandler) WarningCount(c *mysql.Conn) uint16 { return 0 } diff --git a/go/vt/vtgate/safe_session.go b/go/vt/vtgate/safe_session.go index ea8ca4f06ef..7657a9654ab 100644 --- a/go/vt/vtgate/safe_session.go +++ b/go/vt/vtgate/safe_session.go @@ -52,7 +52,7 @@ type SafeSession struct { // be issued if the state is autocommitable, // implying that no intermediate transactions were started. // If so, the state transitions to autocommited, which is terminal. -// If the token is succesfully issued, the caller has to perform +// If the token is successfully issued, the caller has to perform // the commit. If a token cannot be issued, then a traditional // commit has to be performed at the outermost level where // the autocommitable transition happened. diff --git a/go/vt/vtgate/scatter_conn.go b/go/vt/vtgate/scatter_conn.go index 3d0b5c71a2d..3bc5cb796e8 100644 --- a/go/vt/vtgate/scatter_conn.go +++ b/go/vt/vtgate/scatter_conn.go @@ -718,7 +718,7 @@ func injectShuffleQueryPartsRandomGenerator( } // shuffleQueryParts performs an in-place shuffle of the given array. -// The result is a psuedo-random permutation of the array chosen uniformally +// The result is a pseudo-random permutation of the array chosen uniformally // from the space of all permutations. func shuffleQueryParts(splits []*vtgatepb.SplitQueryResponse_Part) { for i := len(splits) - 1; i >= 1; i-- { diff --git a/go/vt/vtgate/vtgate.go b/go/vt/vtgate/vtgate.go index bf2ab39e585..5b5094d9626 100644 --- a/go/vt/vtgate/vtgate.go +++ b/go/vt/vtgate/vtgate.go @@ -833,6 +833,34 @@ func (vtg *VTGate) ResolveTransaction(ctx context.Context, dtid string) error { return formatError(vtg.txConn.Resolve(ctx, dtid)) } +// Prepare supports non-streaming prepare statement query with multi shards +func (vtg *VTGate) Prepare(ctx context.Context, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable) (newSession *vtgatepb.Session, fld []*querypb.Field, err error) { + // In this context, we don't care if we can't fully parse destination + destKeyspace, destTabletType, _, _ := vtg.executor.ParseDestinationTarget(session.TargetString) + statsKey := []string{"Execute", destKeyspace, topoproto.TabletTypeLString(destTabletType)} + defer vtg.timings.Record(statsKey, time.Now()) + + if bvErr := sqltypes.ValidateBindVariables(bindVariables); bvErr != nil { + err = vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "%v", bvErr) + goto handleError + } + + fld, err = vtg.executor.Prepare(ctx, "Prepare", NewSafeSession(session), sql, bindVariables) + if err == nil { + vtg.rowsReturned.Add(statsKey, int64(len(fld))) + return session, fld, nil + } + +handleError: + query := map[string]interface{}{ + "Sql": sql, + "BindVariables": bindVariables, + "Session": session, + } + err = recordAndAnnotateError(err, statsKey, query, vtg.logExecute) + return session, nil, err +} + // isKeyspaceRangeBasedSharded returns true if a keyspace is sharded // by range. This is true when there is a ShardingColumnType defined // in the SrvKeyspace (that is using the range-based sharding with the diff --git a/go/vt/vtqueryserver/plugin_mysql_server.go b/go/vt/vtqueryserver/plugin_mysql_server.go index 804cd207f8b..a900472d6bf 100644 --- a/go/vt/vtqueryserver/plugin_mysql_server.go +++ b/go/vt/vtqueryserver/plugin_mysql_server.go @@ -136,6 +136,14 @@ func (mh *proxyHandler) WarningCount(c *mysql.Conn) uint16 { return 0 } +func (mh *proxyHandler) ComPrepare(c *mysql.Conn, query string) ([]*querypb.Field, error) { + return nil, nil +} + +func (mh *proxyHandler) ComStmtExecute(c *mysql.Conn, prepare *mysql.PrepareData, callback func(*sqltypes.Result) error) error { + return nil +} + var mysqlListener *mysql.Listener var mysqlUnixListener *mysql.Listener diff --git a/go/vt/vtqueryserver/plugin_mysql_server_test.go b/go/vt/vtqueryserver/plugin_mysql_server_test.go index ca0f6b806cd..6c538b80a37 100644 --- a/go/vt/vtqueryserver/plugin_mysql_server_test.go +++ b/go/vt/vtqueryserver/plugin_mysql_server_test.go @@ -26,6 +26,7 @@ import ( "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/sqltypes" + querypb "vitess.io/vitess/go/vt/proto/query" ) type testHandler struct { @@ -43,6 +44,14 @@ func (th *testHandler) ComQuery(c *mysql.Conn, q string, callback func(*sqltypes return nil } +func (th *testHandler) ComPrepare(c *mysql.Conn, q string) ([]*querypb.Field, error) { + return nil, nil +} + +func (th *testHandler) ComStmtExecute(c *mysql.Conn, prepare *mysql.PrepareData, callback func(*sqltypes.Result) error) error { + return nil +} + func (th *testHandler) WarningCount(c *mysql.Conn) uint16 { return 0 } diff --git a/go/vt/vtqueryserver/vtqueryserver.go b/go/vt/vtqueryserver/vtqueryserver.go index 722a3ce49ad..b300c463130 100644 --- a/go/vt/vtqueryserver/vtqueryserver.go +++ b/go/vt/vtqueryserver/vtqueryserver.go @@ -51,7 +51,7 @@ var ( func initProxy(dbcfgs *dbconfigs.DBConfigs) (*tabletserver.TabletServer, error) { target.Keyspace = *targetKeyspace - log.Infof("initalizing vtqueryserver.Proxy for target %s", target.Keyspace) + log.Infof("initializing vtqueryserver.Proxy for target %s", target.Keyspace) // creates and registers the query service qs := tabletserver.NewTabletServerWithNilTopoServer(tabletenv.Config) diff --git a/go/vt/vttablet/endtoend/config_test.go b/go/vt/vttablet/endtoend/config_test.go index 7fa175faa2e..cb47068c47d 100644 --- a/go/vt/vttablet/endtoend/config_test.go +++ b/go/vt/vttablet/endtoend/config_test.go @@ -36,7 +36,7 @@ func compareIntDiff(end map[string]interface{}, tag string, start map[string]int return verifyIntValue(end, tag, framework.FetchInt(start, tag)+diff) } -// verifyIntValue retuns an error if values[tag] != want. +// verifyIntValue returns an error if values[tag] != want. func verifyIntValue(values map[string]interface{}, tag string, want int) error { got := framework.FetchInt(values, tag) if got != want { diff --git a/go/vt/vttablet/endtoend/framework/eventcatcher.go b/go/vt/vttablet/endtoend/framework/eventcatcher.go index 209867325eb..3465bce8e88 100644 --- a/go/vt/vttablet/endtoend/framework/eventcatcher.go +++ b/go/vt/vttablet/endtoend/framework/eventcatcher.go @@ -58,7 +58,7 @@ type QueryCatcher struct { catcher *eventCatcher } -// NewQueryCatcher sets up the capture and retuns a QueryCatcher. +// NewQueryCatcher sets up the capture and returns a QueryCatcher. // You must call Close when done. func NewQueryCatcher() QueryCatcher { return QueryCatcher{catcher: newEventCatcher(tabletenv.StatsLogger)} diff --git a/go/vt/vttablet/endtoend/main_test.go b/go/vt/vttablet/endtoend/main_test.go index f341d8fba36..8bba8fc6db7 100644 --- a/go/vt/vttablet/endtoend/main_test.go +++ b/go/vt/vttablet/endtoend/main_test.go @@ -250,6 +250,27 @@ var tableACLConfig = `{ "writers": ["dev"], "admins": ["dev"] }, + { + "name": "test_topic", + "table_names_or_prefixes": ["test_topic"], + "readers": ["dev"], + "writers": ["dev"], + "admins": ["dev"] + }, + { + "name": "vitess_topic_subscriber_1", + "table_names_or_prefixes": ["vitess_topic_subscriber_1"], + "readers": ["dev"], + "writers": ["dev"], + "admins": ["dev"] + }, + { + "name": "vitess_topic_subscriber_2", + "table_names_or_prefixes": ["vitess_topic_subscriber_2"], + "readers": ["dev"], + "writers": ["dev"], + "admins": ["dev"] + }, { "name": "vitess_acl_unmatched", "table_names_or_prefixes": ["vitess_acl_unmatched"], diff --git a/go/vt/vttablet/endtoend/message_test.go b/go/vt/vttablet/endtoend/message_test.go index 79a86227671..6a908c3582d 100644 --- a/go/vt/vttablet/endtoend/message_test.go +++ b/go/vt/vttablet/endtoend/message_test.go @@ -59,20 +59,7 @@ func TestMessage(t *testing.T) { } // Start goroutine to consume message stream. - go func() { - if err := client.MessageStream("vitess_message", func(qr *sqltypes.Result) error { - select { - case <-done: - return io.EOF - default: - } - ch <- qr - return nil - }); err != nil { - t.Error(err) - } - close(ch) - }() + go waitForMessage(t, client, "vitess_message", ch, done) got := <-ch want := &sqltypes.Result{ Fields: []*querypb.Field{{ @@ -248,20 +235,7 @@ func TestThreeColMessage(t *testing.T) { } defer client.Execute("drop table vitess_message3", nil) - go func() { - if err := client.MessageStream("vitess_message3", func(qr *sqltypes.Result) error { - select { - case <-done: - return io.EOF - default: - } - ch <- qr - return nil - }); err != nil { - t.Error(err) - } - close(ch) - }() + go waitForMessage(t, client, "vitess_message3", ch, done) // Verify fields. got := <-ch @@ -358,20 +332,7 @@ func TestMessageAuto(t *testing.T) { defer client.Execute("drop table vitess_message_auto", nil) // Start goroutine to consume message stream. - go func() { - if err := client.MessageStream("vitess_message_auto", func(qr *sqltypes.Result) error { - select { - case <-done: - return io.EOF - default: - } - ch <- qr - return nil - }); err != nil { - t.Error(err) - } - close(ch) - }() + go waitForMessage(t, client, "vitess_message_auto", ch, done) <-ch defer func() { close(done) }() @@ -461,3 +422,280 @@ func TestMessageAuto(t *testing.T) { t.Errorf("message received:\n%v, want\n%v", got, want) } } + +var createMessageTopic1 = `create table vitess_topic_subscriber_1( + time_scheduled bigint, + id bigint, + time_next bigint, + epoch bigint, + time_created bigint, + time_acked bigint, + message varchar(128), + primary key(time_scheduled, id), + unique index id_idx(id), + index next_idx(time_next, epoch)) +comment 'vitess_message,vt_topic=test_topic,vt_ack_wait=1,vt_purge_after=3,vt_batch_size=1,vt_cache_size=10,vt_poller_interval=1'` + +var createMessageTopic2 = `create table vitess_topic_subscriber_2( + time_scheduled bigint, + id bigint, + time_next bigint, + epoch bigint, + time_created bigint, + time_acked bigint, + message varchar(128), + primary key(time_scheduled, id), + unique index id_idx(id), + index next_idx(time_next, epoch)) +comment 'vitess_message,vt_topic=test_topic,vt_ack_wait=1,vt_purge_after=3,vt_batch_size=1,vt_cache_size=10,vt_poller_interval=1'` + +// TestMessageTopic tests for the case where id is an auto-inc column. +func TestMessageTopic(t *testing.T) { + ch1 := make(chan *sqltypes.Result) + ch2 := make(chan *sqltypes.Result) + done := make(chan struct{}) + client := framework.NewClient() + + // + // phase 1 tests inserts into a topic going to two subscribed message tables + // + if _, err := client.Execute(createMessageTopic1, nil); err != nil { + t.Fatal(err) + } + + if _, err := client.Execute(createMessageTopic2, nil); err != nil { + t.Fatal(err) + } + + // Start goroutines to consume message stream. + go waitForMessage(t, client, "vitess_topic_subscriber_1", ch1, done) + <-ch1 + go waitForMessage(t, client, "vitess_topic_subscriber_2", ch2, done) + <-ch2 + defer func() { close(done) }() + + // Create message. + err := client.Begin(false) + if err != nil { + t.Error(err) + return + } + // This insert should cause the engine to make a best-effort guess at generated ids. + // It will expedite the first two rows with null values, and the third row, and will + // give up on the last row, which should eventually be picked up by the poller. + _, err = client.Execute("insert into test_topic(id, message) values(1, 'msg1'), (2, 'msg2'), (3, 'msg3')", nil) + if err != nil { + t.Error(err) + return + } + + err = client.Commit() + if err != nil { + t.Error(err) + return + } + + // Only three messages should be queued on the first subscription table + if got, want := framework.FetchInt(framework.DebugVars(), "Messages/vitess_topic_subscriber_1.Queued"), 3; got != want { + t.Errorf("Messages/vitess_topic_subscriber_1.Queued: %d, want %d", got, want) + } + + // Only three messages should be queued on the second subscription table + if got, want := framework.FetchInt(framework.DebugVars(), "Messages/vitess_topic_subscriber_2.Queued"), 3; got != want { + t.Errorf("Messages/vitess_topic_subscriber_2.Queued: %d, want %d", got, want) + } + + wantResults := []*sqltypes.Result{{ + Rows: [][]sqltypes.Value{{ + sqltypes.NewInt64(1), + sqltypes.NULL, + sqltypes.NewVarChar("msg1"), + }}, + }, { + Rows: [][]sqltypes.Value{{ + sqltypes.NewInt64(2), + sqltypes.NULL, + sqltypes.NewVarChar("msg2"), + }}, + }, { + Rows: [][]sqltypes.Value{{ + sqltypes.NewInt64(3), + sqltypes.NULL, + sqltypes.NewVarChar("msg3"), + }}, + }} + + // Consume first three messages + // and ensure they were received promptly. + start := time.Now() + for i := 0; i < 3; i++ { + // make sure the first message table received all three messages + got1 := <-ch1 + got1.Rows[0][1] = sqltypes.NULL + + // Results can come in any order. + found := false + for _, want := range wantResults { + if reflect.DeepEqual(got1, want) { + found = true + } + } + if !found { + t.Errorf("message fetch 1: %v not found in expected list: %v", got1, wantResults) + } + + // make sure the second message table received all three messages + got2 := <-ch2 + got2.Rows[0][1] = sqltypes.NULL + + // Results can come in any order. + found = false + for _, want := range wantResults { + if reflect.DeepEqual(got2, want) { + found = true + } + } + if !found { + t.Errorf("message fetch 2: %v not found in expected list: %v", got2, wantResults) + } + } + if d := time.Since(start); d > 1*time.Second { + t.Errorf("messages were delayed: %v", d) + } + + // ack the first subscriber + _, err = client.MessageAck("vitess_topic_subscriber_1", []string{"1, 2, 3"}) + if err != nil { + t.Error(err) + } + + // ack the second subscriber + _, err = client.MessageAck("vitess_topic_subscriber_2", []string{"1, 2, 3"}) + if err != nil { + t.Error(err) + } + + // + // phase 2 tests deleting one of the subscribers and making sure + // that inserts into a topic go to one subscribed message table + // + + client.Execute("drop table vitess_topic_subscriber_1", nil) + + // Create message. + err = client.Begin(false) + if err != nil { + t.Error(err) + return + } + // This insert should cause the engine to make a best-effort guess at generated ids. + // It will expedite the first two rows with null values, and the third row, and will + // give up on the last row, which should eventually be picked up by the poller. + _, err = client.Execute("insert into test_topic(id, message) values(4, 'msg4'), (5, 'msg5'), (6, 'msg6')", nil) + if err != nil { + t.Error(err) + return + } + + err = client.Commit() + if err != nil { + t.Error(err) + return + } + + // no messages should be queued on the first subscription table + if got, want := framework.FetchInt(framework.DebugVars(), "Messages/vitess_topic_subscriber_1.Queued"), 3; got != want { + t.Errorf("Messages/vitess_topic_subscriber_1.Queued: %d, want %d", got, want) + } + + // Only three messages should be queued on the second subscription table + if got, want := framework.FetchInt(framework.DebugVars(), "Messages/vitess_topic_subscriber_2.Queued"), 6; got != want { + t.Errorf("Messages/vitess_topic_subscriber_2.Queued: %d, want %d", got, want) + } + + wantResults = []*sqltypes.Result{{ + Rows: [][]sqltypes.Value{{ + sqltypes.NewInt64(4), + sqltypes.NULL, + sqltypes.NewVarChar("msg4"), + }}, + }, { + Rows: [][]sqltypes.Value{{ + sqltypes.NewInt64(5), + sqltypes.NULL, + sqltypes.NewVarChar("msg5"), + }}, + }, { + Rows: [][]sqltypes.Value{{ + sqltypes.NewInt64(6), + sqltypes.NULL, + sqltypes.NewVarChar("msg6"), + }}, + }} + + // Consume first three messages + // and ensure they were received promptly. + start = time.Now() + for i := 0; i < 3; i++ { + // make sure the second message table received all three messages + got2 := <-ch2 + got2.Rows[0][1] = sqltypes.NULL + + // Results can come in any order. + found := false + for _, want := range wantResults { + if reflect.DeepEqual(got2, want) { + found = true + } + } + if !found { + t.Errorf("message fetch 2: %v not found in expected list: %v", got2, wantResults) + } + } + if d := time.Since(start); d > 1*time.Second { + t.Errorf("messages were delayed: %v", d) + } + + // ack the second subscriber + _, err = client.MessageAck("vitess_topic_subscriber_2", []string{"4, 5, 6"}) + if err != nil { + t.Error(err) + } + + // + // phase 3 tests deleting the last subscriber and making sure + // that inserts into a topic error out with table not found + // + + // remove the second subscriber which should remove the topic + if _, err := client.Execute("drop table vitess_topic_subscriber_2", nil); err != nil { + t.Fatal(err) + } + + // this should fail because the topic doesn't exist. Any other outcome fails the test + _, err = client.Execute("insert into test_topic(id, message) values(4, 'msg4'), (5, 'msg5'), (6, 'msg6')", nil) + switch { + case err == nil: + t.Error("test_topic shouldn't have existed for inserts to succeed") + + case err.Error() == "table test_topic not found in schema (CallerID: dev)": + + default: + t.Error(err) + } +} + +func waitForMessage(t *testing.T, client *framework.QueryClient, tableName string, ch chan *sqltypes.Result, done chan struct{}) { + if err := client.MessageStream(tableName, func(qr *sqltypes.Result) error { + select { + case <-done: + return io.EOF + default: + } + ch <- qr + return nil + }); err != nil { + t.Error(err) + } + close(ch) +} diff --git a/go/vt/vttablet/tabletmanager/action_agent.go b/go/vt/vttablet/tabletmanager/action_agent.go index f84931db386..7fe05a7b5b3 100644 --- a/go/vt/vttablet/tabletmanager/action_agent.go +++ b/go/vt/vttablet/tabletmanager/action_agent.go @@ -104,11 +104,15 @@ type ActionAgent struct { statsTabletType *stats.String // statsTabletTypeCount exposes the current tablet type as a label, - // with the value counting the occurances of the respective tablet type. + // with the value counting the occurrences of the respective tablet type. // Useful for Prometheus which doesn't support exporting strings as stat values // only used if exportStats is true. statsTabletTypeCount *stats.CountersWithSingleLabel + // statsBackupIsRunning is set to 1 (true) if a backup is running + // only used if exportStats is true + statsBackupIsRunning *stats.GaugesWithMultiLabels + // batchCtx is given to the agent by its creator, and should be used for // any background tasks spawned by the agent. batchCtx context.Context @@ -209,8 +213,8 @@ type ActionAgent struct { // _lockTablesConnection is used to get and release the table read locks to pause replication _lockTablesConnection *dbconnpool.DBConnection _lockTablesTimer *time.Timer - // unused - //_lockTablesTimeout *time.Duration + // _isBackupRunning tells us whether there is a backup that is currently running + _isBackupRunning bool } // NewActionAgent creates a new ActionAgent and registers all the @@ -262,6 +266,7 @@ func NewActionAgent( agent.exportStats = true agent.statsTabletType = stats.NewString("TabletType") agent.statsTabletTypeCount = stats.NewCountersWithSingleLabel("TabletTypeCount", "Number of times the tablet changed to the labeled type", "type") + agent.statsBackupIsRunning = stats.NewGaugesWithMultiLabels("BackupIsRunning", "Whether a backup is running", []string{"mode"}) var mysqlHost string var mysqlPort int32 diff --git a/go/vt/vttablet/tabletmanager/healthcheck.go b/go/vt/vttablet/tabletmanager/healthcheck.go index cf187767741..d3a4343e2d6 100644 --- a/go/vt/vttablet/tabletmanager/healthcheck.go +++ b/go/vt/vttablet/tabletmanager/healthcheck.go @@ -180,6 +180,7 @@ func (agent *ActionAgent) runHealthCheck() { } func (agent *ActionAgent) runHealthCheckLocked() { + agent.checkLock() // read the current tablet record and tablet control agent.mutex.Lock() tablet := proto.Clone(agent._tablet).(*topodatapb.Tablet) @@ -284,7 +285,7 @@ func (agent *ActionAgent) runHealthCheckLocked() { } // All master tablets have to run the VReplication engine. - // There is no guarantee that VREngine was succesfully started when tabletmanager + // There is no guarantee that VREngine was successfully started when tabletmanager // came up. This is because the mysql could have been in read-only mode, etc. // So, start the engine if it's not already running. if tablet.Type == topodatapb.TabletType_MASTER && !agent.VREngine.IsOpen() { diff --git a/go/vt/vttablet/tabletmanager/rpc_backup.go b/go/vt/vttablet/tabletmanager/rpc_backup.go index c4b9b06fca4..d7387b7c941 100644 --- a/go/vt/vttablet/tabletmanager/rpc_backup.go +++ b/go/vt/vttablet/tabletmanager/rpc_backup.go @@ -30,13 +30,13 @@ import ( topodatapb "vitess.io/vitess/go/vt/proto/topodata" ) +const ( + backupModeOnline = "online" + backupModeOffline = "offline" +) + // Backup takes a db backup and sends it to the BackupStorage func (agent *ActionAgent) Backup(ctx context.Context, concurrency int, logger logutil.Logger, allowMaster bool) error { - if err := agent.lock(ctx); err != nil { - return err - } - defer agent.unlock() - if agent.Cnf == nil { return fmt.Errorf("cannot perform backup without my.cnf, please restart vttablet with a my.cnf file specified") } @@ -49,7 +49,11 @@ func (agent *ActionAgent) Backup(ctx context.Context, concurrency int, logger lo if !allowMaster && currentTablet.Type == topodatapb.TabletType_MASTER { return fmt.Errorf("type MASTER cannot take backup. if you really need to do this, rerun the backup command with -allow_master") } - + engine, err := mysqlctl.GetBackupEngine() + if err != nil { + return vterrors.Wrap(err, "failed to find backup engine") + } + // get Tablet info from topo so that it is up to date tablet, err := agent.TopoServer.GetTablet(ctx, agent.TabletAlias) if err != nil { return err @@ -57,14 +61,28 @@ func (agent *ActionAgent) Backup(ctx context.Context, concurrency int, logger lo if !allowMaster && tablet.Type == topodatapb.TabletType_MASTER { return fmt.Errorf("type MASTER cannot take backup. if you really need to do this, rerun the backup command with -allow_master") } - originalType := tablet.Type - engine, err := mysqlctl.GetBackupEngine() - if err != nil { - return vterrors.Wrap(err, "failed to find backup engine") + // prevent concurrent backups, and record stats + backupMode := backupModeOnline + if engine.ShouldDrainForBackup() { + backupMode = backupModeOffline + } + if err := agent.beginBackup(backupMode); err != nil { + return err } - builtin, _ := engine.(*mysqlctl.BuiltinBackupEngine) - if builtin != nil { + defer agent.endBackup(backupMode) + + var originalType topodatapb.TabletType + if engine.ShouldDrainForBackup() { + if err := agent.lock(ctx); err != nil { + return err + } + defer agent.unlock() + tablet, err := agent.TopoServer.GetTablet(ctx, agent.TabletAlias) + if err != nil { + return err + } + originalType = tablet.Type // update our type to BACKUP if _, err := topotools.ChangeType(ctx, agent.TopoServer, tablet.Alias, topodatapb.TabletType_BACKUP); err != nil { return err @@ -83,8 +101,7 @@ func (agent *ActionAgent) Backup(ctx context.Context, concurrency int, logger lo name := fmt.Sprintf("%v.%v", time.Now().UTC().Format("2006-01-02.150405"), topoproto.TabletAliasString(tablet.Alias)) returnErr := mysqlctl.Backup(ctx, agent.Cnf, agent.MysqlDaemon, l, dir, name, concurrency, agent.hookExtraEnv()) - if builtin != nil { - + if engine.ShouldDrainForBackup() { bgCtx := context.Background() // Starting from here we won't be able to recover if we get stopped by a cancelled // context. It is also possible that the context already timed out during the @@ -105,9 +122,10 @@ func (agent *ActionAgent) Backup(ctx context.Context, concurrency int, logger lo if err := agent.refreshTablet(bgCtx, "after backup"); err != nil { return err } + // and re-run health check to be sure to capture any replication delay + // not needed for online backups because it will continue to run per schedule + agent.runHealthCheckLocked() } - // and re-run health check to be sure to capture any replication delay - agent.runHealthCheckLocked() return returnErr } @@ -138,3 +156,32 @@ func (agent *ActionAgent) RestoreFromBackup(ctx context.Context, logger logutil. return err } + +func (agent *ActionAgent) beginBackup(backupMode string) error { + agent.mutex.Lock() + defer agent.mutex.Unlock() + if agent._isBackupRunning { + return fmt.Errorf("a backup is already running on tablet: %v", agent.TabletAlias) + } + // when mode is online we don't take the action lock, so we continue to serve, + // but let's set _isBackupRunning to true + // so that we only allow one online backup at a time + // offline backups also run only one at a time because we take the action lock + // so this is not really needed in that case, however we are using it to record the state + agent._isBackupRunning = true + if agent.exportStats { + agent.statsBackupIsRunning.Set([]string{backupMode}, 1) + } + return nil +} + +func (agent *ActionAgent) endBackup(backupMode string) { + // now we set _isBackupRunning back to false + // have to take the mutex lock before writing to _ fields + agent.mutex.Lock() + defer agent.mutex.Unlock() + agent._isBackupRunning = false + if agent.exportStats { + agent.statsBackupIsRunning.Set([]string{backupMode}, 0) + } +} diff --git a/go/vt/vttablet/tabletmanager/rpc_server.go b/go/vt/vttablet/tabletmanager/rpc_server.go index 4720ae1a8b7..de903e30d75 100644 --- a/go/vt/vttablet/tabletmanager/rpc_server.go +++ b/go/vt/vttablet/tabletmanager/rpc_server.go @@ -52,7 +52,7 @@ func (agent *ActionAgent) lock(ctx context.Context) error { } } -// unlock is the symetrical action to lock. +// unlock is the symmetrical action to lock. func (agent *ActionAgent) unlock() { agent.actionMutexLocked = false agent.actionMutex.Unlock() diff --git a/go/vt/vttablet/tabletmanager/vreplication/controller.go b/go/vt/vttablet/tabletmanager/vreplication/controller.go index 44a0337c2ce..41c333aa9f5 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/controller.go +++ b/go/vt/vttablet/tabletmanager/vreplication/controller.go @@ -194,6 +194,11 @@ func (ct *controller) runBlp(ctx context.Context) (err error) { if _, err := dbClient.ExecuteFetch("set @@session.time_zone = '+00:00'", 10000); err != nil { return err } + // Tables may have varying character sets. To ship the bits without interpreting them + // we set the character set to be binary. + if _, err := dbClient.ExecuteFetch("set names binary", 10000); err != nil { + return err + } vreplicator := newVReplicator(ct.id, &ct.source, tablet, ct.blpStats, dbClient, ct.mysqld) return vreplicator.Replicate(ctx) } diff --git a/go/vt/vttablet/tabletmanager/vreplication/controller_plan.go b/go/vt/vttablet/tabletmanager/vreplication/controller_plan.go index 1a964e7b02a..e3c73415a1f 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/controller_plan.go +++ b/go/vt/vttablet/tabletmanager/vreplication/controller_plan.go @@ -27,7 +27,9 @@ import ( type controllerPlan struct { opcode int query string - id int + // delCopySate is set for deletes. + delCopyState string + id int } const ( @@ -35,6 +37,7 @@ const ( updateQuery deleteQuery selectQuery + reshardingJournalQuery ) // buildControllerPlan parses the input query and returns an appropriate plan. @@ -58,15 +61,23 @@ func buildControllerPlan(query string) (*controllerPlan, error) { } func buildInsertPlan(ins *sqlparser.Insert) (*controllerPlan, error) { + switch sqlparser.String(ins.Table) { + case reshardingJournalTableName: + return &controllerPlan{ + opcode: reshardingJournalQuery, + query: sqlparser.String(ins), + }, nil + case vreplicationTableName: + // no-op + default: + return nil, fmt.Errorf("invalid table name: %v", sqlparser.String(ins.Table)) + } if ins.Action != sqlparser.InsertStr { return nil, fmt.Errorf("unsupported construct: %v", sqlparser.String(ins)) } if ins.Ignore != "" { return nil, fmt.Errorf("unsupported construct: %v", sqlparser.String(ins)) } - if sqlparser.String(ins.Table) != "_vt.vreplication" { - return nil, fmt.Errorf("invalid table name: %v", sqlparser.String(ins.Table)) - } if ins.Partitions != nil { return nil, fmt.Errorf("unsupported construct: %v", sqlparser.String(ins)) } @@ -106,7 +117,15 @@ func buildInsertPlan(ins *sqlparser.Insert) (*controllerPlan, error) { } func buildUpdatePlan(upd *sqlparser.Update) (*controllerPlan, error) { - if sqlparser.String(upd.TableExprs) != "_vt.vreplication" { + switch sqlparser.String(upd.TableExprs) { + case reshardingJournalTableName: + return &controllerPlan{ + opcode: reshardingJournalQuery, + query: sqlparser.String(upd), + }, nil + case vreplicationTableName: + // no-op + default: return nil, fmt.Errorf("invalid table name: %v", sqlparser.String(upd.TableExprs)) } if upd.OrderBy != nil || upd.Limit != nil { @@ -131,12 +150,20 @@ func buildUpdatePlan(upd *sqlparser.Update) (*controllerPlan, error) { } func buildDeletePlan(del *sqlparser.Delete) (*controllerPlan, error) { + switch sqlparser.String(del.TableExprs) { + case reshardingJournalTableName: + return &controllerPlan{ + opcode: reshardingJournalQuery, + query: sqlparser.String(del), + }, nil + case vreplicationTableName: + // no-op + default: + return nil, fmt.Errorf("invalid table name: %v", sqlparser.String(del.TableExprs)) + } if del.Targets != nil { return nil, fmt.Errorf("unsupported construct: %v", sqlparser.String(del)) } - if sqlparser.String(del.TableExprs) != "_vt.vreplication" { - return nil, fmt.Errorf("invalid table name: %v", sqlparser.String(del.TableExprs)) - } if del.Partitions != nil { return nil, fmt.Errorf("unsupported construct: %v", sqlparser.String(del)) } @@ -150,20 +177,23 @@ func buildDeletePlan(del *sqlparser.Delete) (*controllerPlan, error) { } return &controllerPlan{ - opcode: deleteQuery, - query: sqlparser.String(del), - id: id, + opcode: deleteQuery, + query: sqlparser.String(del), + delCopyState: fmt.Sprintf("delete from %s where vrepl_id = %d", copySateTableName, id), + id: id, }, nil } func buildSelectPlan(sel *sqlparser.Select) (*controllerPlan, error) { - if sqlparser.String(sel.From) != "_vt.vreplication" { + switch sqlparser.String(sel.From) { + case vreplicationTableName, reshardingJournalTableName, copySateTableName: + return &controllerPlan{ + opcode: selectQuery, + query: sqlparser.String(sel), + }, nil + default: return nil, fmt.Errorf("invalid table name: %v", sqlparser.String(sel.From)) } - return &controllerPlan{ - opcode: selectQuery, - query: sqlparser.String(sel), - }, nil } func extractID(where *sqlparser.Where) (int, error) { diff --git a/go/vt/vttablet/tabletmanager/vreplication/controller_plan_test.go b/go/vt/vttablet/tabletmanager/vreplication/controller_plan_test.go index 533668a2955..ecdb32e6483 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/controller_plan_test.go +++ b/go/vt/vttablet/tabletmanager/vreplication/controller_plan_test.go @@ -45,6 +45,12 @@ func TestControllerPlan(t *testing.T) { opcode: insertQuery, query: "insert into _vt.vreplication(workflow, id) values ('', null)", }, + }, { + in: "insert into _vt.resharding_journal values (1)", + plan: &controllerPlan{ + opcode: reshardingJournalQuery, + query: "insert into _vt.resharding_journal values (1)", + }, }, { in: "replace into _vt.vreplication values(null)", err: "unsupported construct: replace into _vt.vreplication values (null)", @@ -84,6 +90,12 @@ func TestControllerPlan(t *testing.T) { query: "update _vt.vreplication set state = 'Running' where id = 1", id: 1, }, + }, { + in: "update _vt.resharding_journal set col = 1", + plan: &controllerPlan{ + opcode: reshardingJournalQuery, + query: "update _vt.resharding_journal set col = 1", + }, }, { in: "update a set state='Running' where id = 1", err: "invalid table name: a", @@ -116,16 +128,23 @@ func TestControllerPlan(t *testing.T) { }, { in: "delete from _vt.vreplication where id = 1", plan: &controllerPlan{ - opcode: deleteQuery, - query: "delete from _vt.vreplication where id = 1", - id: 1, + opcode: deleteQuery, + query: "delete from _vt.vreplication where id = 1", + delCopyState: "delete from _vt.copy_state where vrepl_id = 1", + id: 1, + }, + }, { + in: "delete from _vt.resharding_journal where id = 1", + plan: &controllerPlan{ + opcode: reshardingJournalQuery, + query: "delete from _vt.resharding_journal where id = 1", }, }, { in: "delete from a where id = 1", err: "invalid table name: a", }, { - in: "delete a, b from a where id = 1", - err: "unsupported construct: delete a, b from a where id = 1", + in: "delete a, b from _vt.vreplication where id = 1", + err: "unsupported construct: delete a, b from _vt.vreplication where id = 1", }, { in: "delete from _vt.vreplication where id = 1 order by id", err: "unsupported construct: delete from _vt.vreplication where id = 1 order by id asc", @@ -153,10 +172,22 @@ func TestControllerPlan(t *testing.T) { // Select }, { - in: "select * from _vt.vreplication where id = 1", + in: "select * from _vt.vreplication", + plan: &controllerPlan{ + opcode: selectQuery, + query: "select * from _vt.vreplication", + }, + }, { + in: "select * from _vt.resharding_journal", + plan: &controllerPlan{ + opcode: selectQuery, + query: "select * from _vt.resharding_journal", + }, + }, { + in: "select * from _vt.copy_state", plan: &controllerPlan{ opcode: selectQuery, - query: "select * from _vt.vreplication where id = 1", + query: "select * from _vt.copy_state", }, }, { in: "select * from a", diff --git a/go/vt/vttablet/tabletmanager/vreplication/engine.go b/go/vt/vttablet/tabletmanager/vreplication/engine.go index bbe760f46d4..9a08a974696 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/engine.go +++ b/go/vt/vttablet/tabletmanager/vreplication/engine.go @@ -32,6 +32,25 @@ import ( "vitess.io/vitess/go/vt/topo" ) +const ( + reshardingJournalTableName = "_vt.resharding_journal" + vreplicationTableName = "_vt.vreplication" + copySateTableName = "_vt.copy_state" + + createReshardingJournalTable = `create table if not exists _vt.resharding_journal( + id bigint, + db_name varbinary(255), + val blob, + primary key (id) +)` + + createCopyState = `create table if not exists _vt.copy_state ( + vrepl_id int, + table_name varbinary(128), + lastpk varbinary(2000), + primary key (vrepl_id, table_name))` +) + var tabletTypesStr = flag.String("vreplication_tablet_type", "REPLICA", "comma separated list of tablet types used as a source") // waitRetryTime can be changed to a smaller value for tests. @@ -102,7 +121,7 @@ func (vre *Engine) Open(ctx context.Context) error { // executeFetchMaybeCreateTable calls DBClient.ExecuteFetch and does one retry if // there's a failure due to mysql.ERNoSuchTable or mysql.ERBadDb which can be fixed -// by re-creating the _vt.vreplication table. +// by re-creating the vreplication tables. func (vre *Engine) executeFetchMaybeCreateTable(dbClient binlogplayer.DBClient, query string, maxrows int) (qr *sqltypes.Result, err error) { qr, err = dbClient.ExecuteFetch(query, maxrows) @@ -110,29 +129,33 @@ func (vre *Engine) executeFetchMaybeCreateTable(dbClient binlogplayer.DBClient, return } - // If it's a bad table or db, it could be because _vt.vreplication wasn't created. - // In that case we can try creating it again. + // If it's a bad table or db, it could be because the vreplication tables weren't created. + // In that case we can try creating them again. merr, isSQLErr := err.(*mysql.SQLError) if !isSQLErr || !(merr.Num == mysql.ERNoSuchTable || merr.Num == mysql.ERBadDb || merr.Num == mysql.ERBadFieldError) { return qr, err } - log.Info("Looks like _vt.vreplication table may not exist. Trying to recreate... ") + log.Info("Looks like the vreplication tables may not exist. Trying to recreate... ") if merr.Num == mysql.ERNoSuchTable || merr.Num == mysql.ERBadDb { for _, query := range binlogplayer.CreateVReplicationTable() { if _, merr := dbClient.ExecuteFetch(query, 0); merr != nil { - log.Warningf("Failed to ensure _vt.vreplication table exists: %v", merr) + log.Warningf("Failed to ensure %s exists: %v", vreplicationTableName, merr) return nil, err } } + if _, merr := dbClient.ExecuteFetch(createReshardingJournalTable, 0); merr != nil { + log.Warningf("Failed to ensure %s exists: %v", reshardingJournalTableName, merr) + return nil, err + } } if merr.Num == mysql.ERBadFieldError { - log.Info("Adding column to table _vt.vreplication") + log.Infof("Adding column to table %s", vreplicationTableName) for _, query := range binlogplayer.AlterVReplicationTable() { if _, merr := dbClient.ExecuteFetch(query, 0); merr != nil { merr, isSQLErr := err.(*mysql.SQLError) if !isSQLErr || !(merr.Num == mysql.ERDupFieldName) { - log.Warningf("Failed to alter _vt.vreplication table: %v", merr) + log.Warningf("Failed to alter %s table: %v", vreplicationTableName, merr) return nil, err } } @@ -286,9 +309,26 @@ func (vre *Engine) Exec(query string) (*sqltypes.Result, error) { ct.Stop() delete(vre.controllers, plan.id) } - return vre.executeFetchMaybeCreateTable(dbClient, plan.query, 1) - case selectQuery: - // select queries are passed through. + if err := dbClient.Begin(); err != nil { + return nil, err + } + qr, err := dbClient.ExecuteFetch(plan.query, 10000) + if err != nil { + return nil, err + } + if _, err := dbClient.ExecuteFetch(plan.delCopyState, 10000); err != nil { + // Legacy vreplication won't create this table. So, ignore table not found error. + merr, isSQLErr := err.(*mysql.SQLError) + if !isSQLErr || !(merr.Num == mysql.ERNoSuchTable) { + return nil, err + } + } + if err := dbClient.Commit(); err != nil { + return nil, err + } + return qr, nil + case selectQuery, reshardingJournalQuery: + // select and resharding journal queries are passed through. return vre.executeFetchMaybeCreateTable(dbClient, plan.query, 10000) } panic("unreachable") diff --git a/go/vt/vttablet/tabletmanager/vreplication/engine_test.go b/go/vt/vttablet/tabletmanager/vreplication/engine_test.go index a4ac882dd10..27f2d31d676 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/engine_test.go +++ b/go/vt/vttablet/tabletmanager/vreplication/engine_test.go @@ -69,7 +69,7 @@ func TestEngineOpen(t *testing.T) { // Verify stats if !reflect.DeepEqual(globalStats.controllers, vre.controllers) { - t.Errorf("stats are mismatched: %v, wnat %v", globalStats.controllers, vre.controllers) + t.Errorf("stats are mismatched: %v, want %v", globalStats.controllers, vre.controllers) } ct := vre.controllers[1] @@ -130,7 +130,7 @@ func TestEngineExec(t *testing.T) { // Verify stats if !reflect.DeepEqual(globalStats.controllers, vre.controllers) { - t.Errorf("stats are mismatched: %v, wnat %v", globalStats.controllers, vre.controllers) + t.Errorf("stats are mismatched: %v, want %v", globalStats.controllers, vre.controllers) } // Test Update @@ -172,14 +172,17 @@ func TestEngineExec(t *testing.T) { // Verify stats if !reflect.DeepEqual(globalStats.controllers, vre.controllers) { - t.Errorf("stats are mismatched: %v, wnat %v", globalStats.controllers, vre.controllers) + t.Errorf("stats are mismatched: %v, want %v", globalStats.controllers, vre.controllers) } // Test Delete dbClient.ExpectRequest("use _vt", &sqltypes.Result{}, nil) delQuery := "delete from _vt.vreplication where id = 1" + dbClient.ExpectRequest("begin", nil, nil) dbClient.ExpectRequest(delQuery, testDMLResponse, nil) + dbClient.ExpectRequest("delete from _vt.copy_state where vrepl_id = 1", nil, nil) + dbClient.ExpectRequest("commit", nil, nil) qr, err = vre.Exec(delQuery) if err != nil { @@ -230,7 +233,7 @@ func TestEngineBadInsert(t *testing.T) { // Verify stats if !reflect.DeepEqual(globalStats.controllers, vre.controllers) { - t.Errorf("stats are mismatched: %v, wnat %v", globalStats.controllers, vre.controllers) + t.Errorf("stats are mismatched: %v, want %v", globalStats.controllers, vre.controllers) } } @@ -412,6 +415,7 @@ func TestCreateDBAndTable(t *testing.T) { dbClient.ExpectRequest("CREATE DATABASE IF NOT EXISTS _vt", &sqltypes.Result{}, nil) dbClient.ExpectRequest("DROP TABLE IF EXISTS _vt.blp_checkpoint", &sqltypes.Result{}, nil) dbClient.ExpectRequestRE("CREATE TABLE IF NOT EXISTS _vt.vreplication.*", &sqltypes.Result{}, nil) + dbClient.ExpectRequestRE("create table if not exists _vt.resharding_journal.*", &sqltypes.Result{}, nil) dbClient.ExpectRequest("use _vt", &sqltypes.Result{}, nil) // Non-recoverable error. @@ -425,6 +429,7 @@ func TestCreateDBAndTable(t *testing.T) { dbClient.ExpectRequest("CREATE DATABASE IF NOT EXISTS _vt", &sqltypes.Result{}, nil) dbClient.ExpectRequest("DROP TABLE IF EXISTS _vt.blp_checkpoint", &sqltypes.Result{}, nil) dbClient.ExpectRequestRE("CREATE TABLE IF NOT EXISTS _vt.vreplication.*", &sqltypes.Result{}, nil) + dbClient.ExpectRequestRE("create table if not exists _vt.resharding_journal.*", &sqltypes.Result{}, nil) dbClient.ExpectRequest("insert into _vt.vreplication values (null)", &sqltypes.Result{InsertID: 1}, nil) diff --git a/go/vt/vttablet/tabletmanager/vreplication/framework_test.go b/go/vt/vttablet/tabletmanager/vreplication/framework_test.go index ba030413247..825e6e3ea63 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/framework_test.go +++ b/go/vt/vttablet/tabletmanager/vreplication/framework_test.go @@ -108,7 +108,7 @@ func TestMain(m *testing.M) { return 1 } - if err := env.Mysqld.ExecuteSuperQueryList(context.Background(), CreateCopyState); err != nil { + if err := env.Mysqld.ExecuteSuperQuery(context.Background(), createCopyState); err != nil { fmt.Fprintf(os.Stderr, "%v", err) return 1 } @@ -368,6 +368,16 @@ func (dbc *realDBClient) ExecuteFetch(query string, maxrows int) (*sqltypes.Resu return qr, err } +func expectDeleteQueries(t *testing.T) { + t.Helper() + expectDBClientQueries(t, []string{ + "begin", + "/delete from _vt.vreplication", + "/delete from _vt.copy_state", + "commit", + }) +} + func expectDBClientQueries(t *testing.T, queries []string) { t.Helper() failed := false @@ -454,9 +464,13 @@ func expectNontxQueries(t *testing.T, queries []string) { } } } - func expectData(t *testing.T, table string, values [][]string) { t.Helper() + customExpectData(t, table, values, env.Mysqld.FetchSuperQuery) +} + +func customExpectData(t *testing.T, table string, values [][]string, exec func(ctx context.Context, query string) (*sqltypes.Result, error)) { + t.Helper() var query string if len(strings.Split(table, ".")) == 1 { @@ -464,7 +478,7 @@ func expectData(t *testing.T, table string, values [][]string) { } else { query = fmt.Sprintf("select * from %s", table) } - qr, err := env.Mysqld.FetchSuperQuery(context.Background(), query) + qr, err := exec(context.Background(), query) if err != nil { t.Error(err) return diff --git a/go/vt/vttablet/tabletmanager/vreplication/table_plan_builder.go b/go/vt/vttablet/tabletmanager/vreplication/table_plan_builder.go index 7df66d1ce78..3c8d3bc27a4 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/table_plan_builder.go +++ b/go/vt/vttablet/tabletmanager/vreplication/table_plan_builder.go @@ -114,6 +114,7 @@ nextTable: tablePlan := &TablePlan{ TargetName: tableName, SendRule: sendRule, + Lastpk: lastpk, } plan.TargetTables[tableName] = tablePlan plan.TablePlans[tableName] = tablePlan diff --git a/go/vt/vttablet/tabletmanager/vreplication/vcopier_test.go b/go/vt/vttablet/tabletmanager/vreplication/vcopier_test.go index ca52264c7ca..a22adca9590 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/vcopier_test.go +++ b/go/vt/vttablet/tabletmanager/vreplication/vcopier_test.go @@ -75,9 +75,7 @@ func TestPlayerCopyTables(t *testing.T) { if _, err := playerEngine.Exec(query); err != nil { t.Fatal(err) } - expectDBClientQueries(t, []string{ - "/delete", - }) + expectDeleteQueries(t) }() expectDBClientQueries(t, []string{ @@ -193,9 +191,7 @@ func TestPlayerCopyBigTable(t *testing.T) { if _, err := playerEngine.Exec(query); err != nil { t.Fatal(err) } - expectDBClientQueries(t, []string{ - "/delete", - }) + expectDeleteQueries(t) }() expectDBClientQueries(t, []string{ @@ -246,6 +242,137 @@ func TestPlayerCopyBigTable(t *testing.T) { }) } +// TestPlayerCopyWildcardRule ensures the copy-catchup back-and-forth loop works correctly +// when the filter uses a wildcard rule +func TestPlayerCopyWildcardRule(t *testing.T) { + defer deleteTablet(addTablet(100, "0", topodatapb.TabletType_REPLICA, true, true)) + + savedPacketSize := *vstreamer.PacketSize + // PacketSize of 1 byte will send at most one row at a time. + *vstreamer.PacketSize = 1 + defer func() { *vstreamer.PacketSize = savedPacketSize }() + + savedCopyTimeout := copyTimeout + // copyTimeout should be low enough to have time to send one row. + copyTimeout = 500 * time.Millisecond + defer func() { copyTimeout = savedCopyTimeout }() + + savedWaitRetryTime := waitRetryTime + // waitRetry time shoulw be very low to cause the wait loop to execute multipel times. + waitRetryTime = 10 * time.Millisecond + defer func() { waitRetryTime = savedWaitRetryTime }() + + execStatements(t, []string{ + "create table src(id int, val varbinary(128), primary key(id))", + "insert into src values(1, 'aaa'), (2, 'bbb')", + fmt.Sprintf("create table %s.src(id int, val varbinary(128), primary key(id))", vrepldb), + }) + defer execStatements(t, []string{ + "drop table src", + fmt.Sprintf("drop table %s.src", vrepldb), + }) + env.SchemaEngine.Reload(context.Background()) + + count := 0 + vstreamRowsSendHook = func(ctx context.Context) { + defer func() { count++ }() + // Allow the first two calls to go through: field info and one row. + if count <= 1 { + return + } + // Insert a statement to test that catchup gets new events. + execStatements(t, []string{ + "insert into src values(3, 'ccc')", + }) + // Wait for context to expire and then send the row. + // This will cause the copier to abort and go back to catchup mode. + <-ctx.Done() + // Do this no more than once. + vstreamRowsSendHook = nil + } + + vstreamRowsHook = func(context.Context) { + // Sleeping 50ms guarantees that the catchup wait loop executes multiple times. + // This is because waitRetryTime is set to 10ms. + time.Sleep(50 * time.Millisecond) + // Do this no more than once. + vstreamRowsHook = nil + } + + filter := &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "/.*", + Filter: "", + }}, + } + + bls := &binlogdatapb.BinlogSource{ + Keyspace: env.KeyspaceName, + Shard: env.ShardName, + Filter: filter, + OnDdl: binlogdatapb.OnDDLAction_IGNORE, + } + query := binlogplayer.CreateVReplicationState("test", bls, "", binlogplayer.VReplicationInit, playerEngine.dbName) + qr, err := playerEngine.Exec(query) + if err != nil { + t.Fatal(err) + } + defer func() { + query := fmt.Sprintf("delete from _vt.vreplication where id = %d", qr.InsertID) + if _, err := playerEngine.Exec(query); err != nil { + t.Fatal(err) + } + expectDeleteQueries(t) + }() + + expectDBClientQueries(t, []string{ + "/insert into _vt.vreplication", + // Create the list of tables to copy and transition to Copying state. + "begin", + "/insert into _vt.copy_state", + "/update _vt.vreplication set state='Copying'", + "commit", + "rollback", + // The first fast-forward has no starting point. So, it just saves the current position. + "/update _vt.vreplication set pos=", + "begin", + "insert into src(id,val) values (1,'aaa')", + `/update _vt.copy_state set lastpk='fields: rows: ' where vrepl_id=.*`, + "commit", + "rollback", + // The next catchup executes the new row insert, but will be a no-op. + "begin", + "insert into src(id,val) select 3, 'ccc' from dual where (3) <= (1)", + "/update _vt.vreplication set pos=", + "commit", + // fastForward has nothing to add. Just saves position. + "begin", + "/update _vt.vreplication set pos=", + "commit", + // Second row gets copied. + "begin", + "insert into src(id,val) values (2,'bbb')", + `/update _vt.copy_state set lastpk='fields: rows: ' where vrepl_id=.*`, + "commit", + // Third row copied without going back to catchup state. + "begin", + "insert into src(id,val) values (3,'ccc')", + `/update _vt.copy_state set lastpk='fields: rows: ' where vrepl_id=.*`, + "commit", + "/delete from _vt.copy_state.*src", + // rollback is a no-op because the delete is autocommitted. + "rollback", + // Copy is done. Go into running state. + "/update _vt.vreplication set state='Running'", + // All tables copied. Final catch up followed by Running state. + }) + expectData(t, "src", [][]string{ + {"1", "aaa"}, + {"2", "bbb"}, + {"3", "ccc"}, + }) +} + // TestPlayerCopyTableContinuation tests the copy workflow where tables have been partially copied. func TestPlayerCopyTableContinuation(t *testing.T) { defer deleteTablet(addTablet(100, "0", topodatapb.TabletType_REPLICA, true, true)) @@ -349,11 +476,7 @@ func TestPlayerCopyTableContinuation(t *testing.T) { if _, err := playerEngine.Exec(query); err != nil { t.Fatal(err) } - for q := range globalDBQueries { - if strings.HasPrefix(q, "delete from _vt.vreplication") { - break - } - } + expectDeleteQueries(t) }() for q := range globalDBQueries { @@ -390,6 +513,11 @@ func TestPlayerCopyTableContinuation(t *testing.T) { "/delete from _vt.copy_state.*not_copied", "rollback", }) + // Explicitly eat the Running state query. You cant' make expectNontxQueries + // wait for it because it ignores _vt.vreplication events. + expectDBClientQueries(t, []string{ + "/update _vt.vreplication set state='Running'", + }) expectData(t, "dst1", [][]string{ {"1", "insert in"}, {"2", "no change"}, @@ -469,11 +597,7 @@ func TestPlayerCopyWildcardTableContinuation(t *testing.T) { if _, err := playerEngine.Exec(query); err != nil { t.Fatal(err) } - for q := range globalDBQueries { - if strings.HasPrefix(q, "delete from _vt.vreplication") { - break - } - } + expectDeleteQueries(t) }() expectNontxQueries(t, []string{ @@ -485,6 +609,11 @@ func TestPlayerCopyWildcardTableContinuation(t *testing.T) { "/delete from _vt.copy_state.*dst", "rollback", }) + // Explicitly eat the Running state query. You cant' make expectNontxQueries + // wait for it because it ignores _vt.vreplication events. + expectDBClientQueries(t, []string{ + "/update _vt.vreplication set state='Running'", + }) expectData(t, "dst", [][]string{ {"2", "copied"}, {"3", "uncopied"}, @@ -518,9 +647,7 @@ func TestPlayerCopyTablesNone(t *testing.T) { if _, err := playerEngine.Exec(query); err != nil { t.Fatal(err) } - expectDBClientQueries(t, []string{ - "/delete", - }) + expectDeleteQueries(t) }() expectDBClientQueries(t, []string{ @@ -580,9 +707,7 @@ func TestPlayerCopyTableCancel(t *testing.T) { if _, err := playerEngine.Exec(query); err != nil { t.Fatal(err) } - expectDBClientQueries(t, []string{ - "/delete", - }) + expectDeleteQueries(t) }() // Make sure rows get copied in spite of the early context cancel. @@ -607,7 +732,7 @@ func TestPlayerCopyTableCancel(t *testing.T) { // copy of dst1 is done: delete from copy_state. "/delete from _vt.copy_state.*dst1", "rollback", - // All tables copied. Final catch up followed by Running state. + // All tables copied. Go into running state. "/update _vt.vreplication set state='Running'", }) expectData(t, "dst1", [][]string{ diff --git a/go/vt/vttablet/tabletmanager/vreplication/vplayer_test.go b/go/vt/vttablet/tabletmanager/vreplication/vplayer_test.go index bc42d6f2f4c..e2c0e80c9b1 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/vplayer_test.go +++ b/go/vt/vttablet/tabletmanager/vreplication/vplayer_test.go @@ -26,6 +26,7 @@ import ( "golang.org/x/net/context" "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/binlog/binlogplayer" binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" @@ -446,6 +447,72 @@ func TestPlayerKeywordNames(t *testing.T) { } } } +func TestUnicode(t *testing.T) { + defer deleteTablet(addTablet(100, "0", topodatapb.TabletType_REPLICA, true, true)) + + execStatements(t, []string{ + "create table src1(id int, val varchar(128) COLLATE utf8_unicode_ci, primary key(id))", + fmt.Sprintf("create table %s.dst1(id int, val varchar(128) COLLATE utf8_unicode_ci, primary key(id)) DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci", vrepldb), + }) + defer execStatements(t, []string{ + "drop table src1", + fmt.Sprintf("drop table %s.dst1", vrepldb), + }) + env.SchemaEngine.Reload(context.Background()) + + filter := &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "dst1", + Filter: "select * from src1", + }}, + } + cancel, _ := startVReplication(t, filter, binlogdatapb.OnDDLAction_IGNORE, "") + defer cancel() + + testcases := []struct { + input string + output []string + table string + data [][]string + }{{ + // insert with insertNormal + input: "insert into src1 values(1, '👍')", + output: []string{ + "begin", + // We should expect the "Mojibaked" version. + "insert into dst1(id,val) values (1,'ðŸ‘\u008d')", + "/update _vt.vreplication set pos=", + "commit", + }, + table: "dst1", + data: [][]string{ + {"1", "👍"}, + }, + }} + + // We need a latin1 connection. + conn, err := env.Mysqld.GetDbaConnection() + if err != nil { + t.Fatal(err) + } + defer conn.Close() + + if _, err := conn.ExecuteFetch("set names latin1", 10000, false); err != nil { + t.Fatal(err) + } + + for _, tcases := range testcases { + if _, err := conn.ExecuteFetch(tcases.input, 10000, false); err != nil { + t.Error(err) + } + expectDBClientQueries(t, tcases.output) + if tcases.table != "" { + customExpectData(t, tcases.table, tcases.data, func(ctx context.Context, query string) (*sqltypes.Result, error) { + return conn.ExecuteFetch(query, 10000, true) + }) + } + } +} func TestPlayerUpdates(t *testing.T) { defer deleteTablet(addTablet(100, "0", topodatapb.TabletType_REPLICA, true, true)) @@ -1471,9 +1538,7 @@ func startVReplication(t *testing.T, filter *binlogdatapb.Filter, onddl binlogda if _, err := playerEngine.Exec(query); err != nil { t.Fatal(err) } - expectDBClientQueries(t, []string{ - "/delete", - }) + expectDeleteQueries(t) }, int(qr.InsertID) } diff --git a/go/vt/vttablet/tabletmanager/vreplication/vreplicator.go b/go/vt/vttablet/tabletmanager/vreplication/vreplicator.go index b83ddde3060..d067191c279 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/vreplicator.go +++ b/go/vt/vttablet/tabletmanager/vreplication/vreplicator.go @@ -43,15 +43,6 @@ var ( relayLogMaxItems = 1000 copyTimeout = 1 * time.Hour replicaLagTolerance = 10 * time.Second - - // CreateCopyState is the list of statements to execute for creating - // the _vt.copy_state table - CreateCopyState = []string{ - `create table if not exists _vt.copy_state ( - vrepl_id int, - table_name varbinary(128), - lastpk varbinary(2000), - primary key (vrepl_id, table_name))`} ) type vreplicator struct { @@ -142,11 +133,9 @@ func (vr *vreplicator) readSettings(ctx context.Context) (settings binlogplayer. return settings, numTablesToCopy, err } log.Info("Looks like _vt.copy_state table may not exist. Trying to create... ") - for _, query := range CreateCopyState { - if _, merr := vr.dbClient.Execute(query); merr != nil { - log.Errorf("Failed to ensure _vt.copy_state table exists: %v", merr) - return settings, numTablesToCopy, err - } + if _, merr := vr.dbClient.Execute(createCopyState); merr != nil { + log.Errorf("Failed to ensure _vt.copy_state table exists: %v", merr) + return settings, numTablesToCopy, err } // Redo the read. qr, err = vr.dbClient.Execute(query) diff --git a/go/vt/vttablet/tabletserver/messager/engine.go b/go/vt/vttablet/tabletserver/messager/engine.go index 8305115f1be..f22d0243479 100644 --- a/go/vt/vttablet/tabletserver/messager/engine.go +++ b/go/vt/vttablet/tabletserver/messager/engine.go @@ -269,7 +269,7 @@ func (me *Engine) schemaChanged(tables map[string]*schema.Table, created, altere } if me.managers[name] != nil { tabletenv.InternalErrors.Add("Messages", 1) - log.Errorf("Newly created table alread exists in messages: %s", name) + log.Errorf("Newly created table already exists in messages: %s", name) continue } mm := newMessageManager(me.tsv, t, me.conns, me.postponeSema) diff --git a/go/vt/vttablet/tabletserver/planbuilder/dml.go b/go/vt/vttablet/tabletserver/planbuilder/dml.go index 5e67140fa72..3b5f35c155c 100644 --- a/go/vt/vttablet/tabletserver/planbuilder/dml.go +++ b/go/vt/vttablet/tabletserver/planbuilder/dml.go @@ -50,6 +50,11 @@ func analyzeUpdate(upd *sqlparser.Update, tables map[string]*schema.Table) (plan } table, tableErr := plan.setTable(tableName, tables) + // Updates aren't supported on topics + if tableErr == nil && table.IsTopic() { + return nil, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "updates not allowed on topics") + } + // In passthrough dml mode, allow the operation even if the // table is unknown in the schema. if PassthroughDMLs { @@ -118,6 +123,11 @@ func analyzeDelete(del *sqlparser.Delete, tables map[string]*schema.Table) (plan } table, tableErr := plan.setTable(tableName, tables) + // Deletes aren't supported on topics + if tableErr == nil && table.IsTopic() { + return nil, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "deletes not allowed on topics") + } + // In passthrough dml mode, allow the operation even if the // table is unknown in the schema. if PassthroughDMLs { @@ -202,6 +212,11 @@ func analyzeSelect(sel *sqlparser.Select, tables map[string]*schema.Table) (plan return nil, err } + // Selects aren't supported on topics + if table.IsTopic() { + return nil, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "selects not allowed on topics") + } + if sel.Where != nil { comp, ok := sel.Where.Expr.(*sqlparser.ComparisonExpr) if ok && comp.IsImpossible() { @@ -328,6 +343,11 @@ func analyzeInsert(ins *sqlparser.Insert, tables map[string]*schema.Table) (plan // message inserts need to continue being strict, even in passthrough dml mode, // because field defaults are set here + case tableErr == nil && table.IsTopic(): + plan.PlanID = PlanInsertTopic + plan.Reason = ReasonTopic + return plan, nil + case PassthroughDMLs: // In passthrough dml mode, allow the operation even if the // table is unknown in the schema. diff --git a/go/vt/vttablet/tabletserver/planbuilder/plan.go b/go/vt/vttablet/tabletserver/planbuilder/plan.go index 65aa41c71a1..b332c67dcf6 100644 --- a/go/vt/vttablet/tabletserver/planbuilder/plan.go +++ b/go/vt/vttablet/tabletserver/planbuilder/plan.go @@ -69,6 +69,8 @@ const ( PlanInsertSubquery // PlanUpsertPK is for insert ... on duplicate key constructs. PlanUpsertPK + // PlanInsertTopic is for inserting into message topics. + PlanInsertTopic // PlanInsertMessage is for inserting into message tables. PlanInsertMessage // PlanSet is for SET statements. @@ -100,6 +102,7 @@ var planName = [NumPlans]string{ "INSERT_PK", "INSERT_SUBQUERY", "UPSERT_PK", + "INSERT_TOPIC", "INSERT_MESSAGE", "SET", "DDL", @@ -153,6 +156,7 @@ const ( ReasonUpsertMultiRow ReasonReplace ReasonMultiTable + ReasonTopic NumReasons ) @@ -167,6 +171,7 @@ var reasonName = [NumReasons]string{ "UPSERT_MULTI_ROW", "REPLACE", "MULTI_TABLE", + "TOPIC", } // String returns a string representation of a ReasonType. diff --git a/go/vt/vttablet/tabletserver/planbuilder/testdata/exec_cases.txt b/go/vt/vttablet/tabletserver/planbuilder/testdata/exec_cases.txt index b23e26fa529..0090a3ebcdd 100644 --- a/go/vt/vttablet/tabletserver/planbuilder/testdata/exec_cases.txt +++ b/go/vt/vttablet/tabletserver/planbuilder/testdata/exec_cases.txt @@ -958,6 +958,29 @@ options:PassthroughDMLs "PKValues": [[1, 3], [2, 4]] } +# topic insert with time_scheduled specified +"insert into test_topic(time_scheduled, id, message) values(1, 2, 'aa')" +{ + "PlanID": "INSERT_TOPIC", + "Reason": "TOPIC", + "TableName": "test_topic", + "Permissions": [ + { + "TableName": "test_topic", + "Role": 1 + } + ], + "FullQuery": "insert into test_topic(time_scheduled, id, message) values (1, 2, 'aa')" +} + +# topic update +"update test_topic set time_next = 1 where id = 1" +"updates not allowed on topics" + +# topic delete +"delete from test_topic where id = 1" +"deletes not allowed on topics" + # message insert with time_scheduled specified "insert into msg(time_scheduled, id, message) values(1, 2, 'aa')" { diff --git a/go/vt/vttablet/tabletserver/planbuilder/testdata/schema_test.json b/go/vt/vttablet/tabletserver/planbuilder/testdata/schema_test.json index 1a23e93eea4..6441dd36eea 100644 --- a/go/vt/vttablet/tabletserver/planbuilder/testdata/schema_test.json +++ b/go/vt/vttablet/tabletserver/planbuilder/testdata/schema_test.json @@ -355,6 +355,65 @@ ], "Type": 2 }, + { + "Name": "msg1_with_topic", + "Columns": [ + { + "Name": "time_scheduled" + }, + { + "Name": "id" + }, + { + "Name": "time_next" + }, + { + "Name": "epoch" + }, + { + "Name": "time_created" + }, + { + "Name": "time_acked" + }, + { + "Name": "message" + } + ], + "Indexes": [ + { + "Name": "PRIMARY", + "Unique": true, + "Columns": [ + "time_scheduled", + "id" + ], + "Cardinality": [ + 1 + ], + "DataColumns": [ + ] + } + ], + "PKColumns": [ + 0, + 1 + ], + "Type": 2, + "MessageInfo": { + "Topic": "test_topic" + } + }, + { + "Name": "test_topic", + "TopicInfo": { + "Subscribers": [ + { + "Name": "msg1_with_topic" + } + ] + } + }, { "Name": "dual", "Type": 0 diff --git a/go/vt/vttablet/tabletserver/rules/map_test.go b/go/vt/vttablet/tabletserver/rules/map_test.go index 1e788e228fb..b47715612cc 100644 --- a/go/vt/vttablet/tabletserver/rules/map_test.go +++ b/go/vt/vttablet/tabletserver/rules/map_test.go @@ -83,7 +83,7 @@ func TestMapSetRulesWithNil(t *testing.T) { t.Errorf("GetRules failed to retrieve blacklistQueryRules that has been set: %s", err) } if !reflect.DeepEqual(qrs, blacklistRules) { - t.Errorf("blacklistQueryRules retrived is %v, but the expected value should be %v", qrs, blacklistQueryRules) + t.Errorf("blacklistQueryRules retrieved is %v, but the expected value should be %v", qrs, blacklistQueryRules) } qri.SetRules(blacklistQueryRules, nil) @@ -93,7 +93,7 @@ func TestMapSetRulesWithNil(t *testing.T) { t.Errorf("GetRules failed to retrieve blacklistQueryRules that has been set: %s", err) } if !reflect.DeepEqual(qrs, New()) { - t.Errorf("blacklistQueryRules retrived is %v, but the expected value should be %v", qrs, blacklistQueryRules) + t.Errorf("blacklistQueryRules retrieved is %v, but the expected value should be %v", qrs, blacklistQueryRules) } } @@ -142,7 +142,7 @@ func TestMapGetSetQueryRules(t *testing.T) { t.Errorf("GetRules failed to retrieve blacklistQueryRules that has been set: %s", err) } if !reflect.DeepEqual(qrs, blacklistRules) { - t.Errorf("blacklistQueryRules retrived is %v, but the expected value should be %v", qrs, blacklistRules) + t.Errorf("blacklistQueryRules retrieved is %v, but the expected value should be %v", qrs, blacklistRules) } qrs, err = qri.Get(blacklistQueryRules) @@ -150,7 +150,7 @@ func TestMapGetSetQueryRules(t *testing.T) { t.Errorf("GetRules failed to retrieve blacklistQueryRules that has been set: %s", err) } if !reflect.DeepEqual(qrs, blacklistRules) { - t.Errorf("blacklistQueryRules retrived is %v, but the expected value should be %v", qrs, blacklistRules) + t.Errorf("blacklistQueryRules retrieved is %v, but the expected value should be %v", qrs, blacklistRules) } qrs, err = qri.Get(customQueryRules) @@ -158,7 +158,7 @@ func TestMapGetSetQueryRules(t *testing.T) { t.Errorf("GetRules failed to retrieve customQueryRules that has been set: %s", err) } if !reflect.DeepEqual(qrs, otherRules) { - t.Errorf("customQueryRules retrived is %v, but the expected value should be %v", qrs, customQueryRules) + t.Errorf("customQueryRules retrieved is %v, but the expected value should be %v", qrs, customQueryRules) } } diff --git a/go/vt/vttablet/tabletserver/schema/engine.go b/go/vt/vttablet/tabletserver/schema/engine.go index 65ad2dc0961..2afa16c8f53 100644 --- a/go/vt/vttablet/tabletserver/schema/engine.go +++ b/go/vt/vttablet/tabletserver/schema/engine.go @@ -180,6 +180,11 @@ func (se *Engine) Open() error { } se.tables = tables se.lastChange = curTime + + // register message topics on the engine if necessary + // must run after se.tables is set + se.registerTopics() + se.ticks.Start(func() { if err := se.Reload(ctx); err != nil { log.Errorf("periodic schema reload failed: %v", err) @@ -257,6 +262,7 @@ func (se *Engine) Reload(ctx context.Context) error { // The following section requires us to hold mu. rec := concurrency.AllErrorRecorder{} curTables := map[string]bool{"dual": true} + var created, altered []string for _, row := range tableData.Rows { tableName := row[0].ToString() curTables[tableName] = true @@ -264,7 +270,13 @@ func (se *Engine) Reload(ctx context.Context) error { // Check if we know about the table or it has been recreated. if _, ok := se.tables[tableName]; !ok || createTime >= se.lastChange { log.Infof("Reloading schema for table: %s", tableName) - rec.RecordError(se.tableWasCreatedOrAltered(ctx, tableName)) + wasCreated, err := se.tableWasCreatedOrAltered(ctx, tableName) + rec.RecordError(err) + if wasCreated { + created = append(created, tableName) + } else { + altered = append(altered, tableName) + } } else { // Only update table_rows, data_length, index_length, max_data_length se.tables[tableName].SetMysqlStats(row[4], row[5], row[6], row[7], row[8]) @@ -278,14 +290,19 @@ func (se *Engine) Reload(ctx context.Context) error { if curTables[tableName] { continue } - delete(se.tables, tableName) - dropped = append(dropped, tableName) - } - // We only need to broadcast dropped tables because - // tableWasCreatedOrAltered will broadcast the other changes. - if len(dropped) > 0 { - se.broadcast(nil, nil, dropped) + + // only keep track of non-topic table drops + if !se.tables[tableName].IsTopic() { + dropped = append(dropped, tableName) + delete(se.tables, tableName) + } } + + // register message topics on the engine if necessary + // must run after se.tables is set + se.registerTopics() + + se.broadcast(created, altered, dropped) return rec.Error() } @@ -306,24 +323,24 @@ func (se *Engine) mysqlTime(ctx context.Context, conn *connpool.DBConn) (int64, // tableWasCreatedOrAltered must be called if a DDL was applied to that table. // the se.mu mutex _must_ be locked before entering this method -func (se *Engine) tableWasCreatedOrAltered(ctx context.Context, tableName string) error { +func (se *Engine) tableWasCreatedOrAltered(ctx context.Context, tableName string) (bool, error) { if !se.isOpen { - return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "DDL called on closed schema") + return false, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "DDL called on closed schema") } conn, err := se.conns.Get(ctx) if err != nil { - return err + return false, err } defer conn.Recycle() tableData, err := conn.Exec(ctx, mysql.BaseShowTablesForTable(tableName), 1, false) if err != nil { tabletenv.InternalErrors.Add("Schema", 1) - return vterrors.Errorf(vtrpcpb.Code_UNKNOWN, "tableWasCreatedOrAltered: information_schema query failed for table %s: %v", tableName, err) + return false, vterrors.Errorf(vtrpcpb.Code_UNKNOWN, "tableWasCreatedOrAltered: information_schema query failed for table %s: %v", tableName, err) } if len(tableData.Rows) != 1 { // This can happen if DDLs race with each other. - return nil + return false, nil } row := tableData.Rows[0] table, err := LoadTable( @@ -334,25 +351,69 @@ func (se *Engine) tableWasCreatedOrAltered(ctx context.Context, tableName string ) if err != nil { tabletenv.InternalErrors.Add("Schema", 1) - return vterrors.Errorf(vtrpcpb.Code_UNKNOWN, "tableWasCreatedOrAltered: failed to load table %s: %v", tableName, err) + return false, vterrors.Errorf(vtrpcpb.Code_UNKNOWN, "tableWasCreatedOrAltered: failed to load table %s: %v", tableName, err) } + // table_rows, data_length, index_length, max_data_length table.SetMysqlStats(row[4], row[5], row[6], row[7], row[8]) - var created, altered []string + wasCreated := true if _, ok := se.tables[tableName]; ok { // If the table already exists, we overwrite it with the latest info. // This also means that the query cache needs to be cleared. // Otherwise, the query plans may not be in sync with the schema. log.Infof("Updating table %s", tableName) - altered = append(altered, tableName) - } else { - created = append(created, tableName) + wasCreated = false } se.tables[tableName] = table log.Infof("Initialized table: %s, type: %s", tableName, TypeNames[table.Type]) - se.broadcast(created, altered, nil) - return nil + return wasCreated, nil +} + +// registerTopics optionally connects the vt_topic metadata on a message table +// to a map of topic strings. A table can belong to only one topic. +func (se *Engine) registerTopics() { + // first drop all topics + for tableName, table := range se.tables { + if table.IsTopic() { + delete(se.tables, tableName) + } + } + + // then register all the topics from scratch + for _, table := range se.tables { + se.registerTopic(table) + } +} + +func (se *Engine) registerTopic(ta *Table) { + if ta.MessageInfo == nil || ta.MessageInfo.Topic == "" { + return + } + + topicName := ta.MessageInfo.Topic + topicTable, ok := se.tables[topicName] + if !ok { + // initialize topic table if necessary + topicTable = NewTable(topicName) + topicTable.TopicInfo = &TopicInfo{ + Subscribers: make([]*Table, 0, 1), + } + se.tables[topicName] = topicTable + log.Infof("creating topic table '%s'", topicName) + } else { + // check to see if this table is already registered to the topic + // so we don't double register + for _, t := range topicTable.TopicInfo.Subscribers { + if t.Name == ta.Name { + return + } + } + } + + // append this table to the list of subscribed tables to the topic + log.Infof("subscribing message table '%s' to topic '%s'", ta.Name.String(), topicName) + topicTable.TopicInfo.Subscribers = append(topicTable.TopicInfo.Subscribers, ta) } // RegisterNotifier registers the function for schema change notification. diff --git a/go/vt/vttablet/tabletserver/schema/engine_test.go b/go/vt/vttablet/tabletserver/schema/engine_test.go index 09ab7089379..dcf3c4d8fc8 100644 --- a/go/vt/vttablet/tabletserver/schema/engine_test.go +++ b/go/vt/vttablet/tabletserver/schema/engine_test.go @@ -21,7 +21,6 @@ import ( "fmt" "net/http" "net/http/httptest" - "reflect" "strings" "testing" "time" @@ -267,29 +266,13 @@ func TestCreateOrUpdateTable(t *testing.T) { mysql.BaseShowTablesRow(existingTable, false, ""), }, }) - i := 0 - se.RegisterNotifier("test", func(schema map[string]*Table, created, altered, dropped []string) { - switch i { - case 0: - if len(created) != 5 { - t.Errorf("callback 0: %v, want len of 5\n", created) - } - case 1: - want := []string{"test_table_01"} - if !reflect.DeepEqual(altered, want) { - t.Errorf("callback 0: %v, want %v\n", created, want) - } - default: - t.Fatal("unexpected") - } - i++ - }) - defer se.UnregisterNotifier("test") - if err := se.tableWasCreatedOrAltered(context.Background(), "test_table_01"); err != nil { + + wasCreated, err := se.tableWasCreatedOrAltered(context.Background(), existingTable) + if err != nil { t.Fatal(err) } - if i < 2 { - t.Error("Notifier did not get called") + if wasCreated { + t.Error("wanted wasCreated == false") } } diff --git a/go/vt/vttablet/tabletserver/schema/load_table.go b/go/vt/vttablet/tabletserver/schema/load_table.go index 3257af030d0..af57966d6ab 100644 --- a/go/vt/vttablet/tabletserver/schema/load_table.go +++ b/go/vt/vttablet/tabletserver/schema/load_table.go @@ -157,6 +157,8 @@ func loadMessageInfo(ta *Table, comment string) error { keyvals[kv[0]] = kv[1] } var err error + ta.MessageInfo.Topic = getTopic(keyvals) + if ta.MessageInfo.AckWaitDuration, err = getDuration(keyvals, "vt_ack_wait"); err != nil { return err } @@ -239,3 +241,7 @@ func getNum(in map[string]string, key string) (int, error) { } return v, nil } + +func getTopic(in map[string]string) string { + return in["vt_topic"] +} diff --git a/go/vt/vttablet/tabletserver/schema/load_table_test.go b/go/vt/vttablet/tabletserver/schema/load_table_test.go index e184c417149..7d1a583defb 100644 --- a/go/vt/vttablet/tabletserver/schema/load_table_test.go +++ b/go/vt/vttablet/tabletserver/schema/load_table_test.go @@ -200,6 +200,83 @@ func TestLoadTableMessage(t *testing.T) { } } +func TestLoadTableMessageTopic(t *testing.T) { + db := fakesqldb.New(t) + defer db.Close() + for query, result := range getMessageTableQueries() { + db.AddQuery(query, result) + } + table, err := newTestLoadTable("USER_TABLE", "vitess_message,vt_topic=test_topic,vt_ack_wait=30,vt_purge_after=120,vt_batch_size=1,vt_cache_size=10,vt_poller_interval=30", db) + if err != nil { + t.Fatal(err) + } + want := &Table{ + Name: sqlparser.NewTableIdent("test_table"), + Type: Message, + MessageInfo: &MessageInfo{ + IDPKIndex: 1, + Fields: []*querypb.Field{{ + Name: "id", + Type: sqltypes.Int64, + }, { + Name: "time_scheduled", + Type: sqltypes.Int64, + }, { + Name: "message", + Type: sqltypes.VarBinary, + }}, + AckWaitDuration: 30 * time.Second, + PurgeAfterDuration: 120 * time.Second, + BatchSize: 1, + CacheSize: 10, + PollInterval: 30 * time.Second, + Topic: "test_topic", + }, + } + table.Columns = nil + table.Indexes = nil + table.PKColumns = nil + if !reflect.DeepEqual(table, want) { + t.Errorf("Table:\n%+v, want\n%+v", table, want) + t.Errorf("Table:\n%+v, want\n%+v", table.MessageInfo, want.MessageInfo) + } + + // Missing property + _, err = newTestLoadTable("USER_TABLE", "vitess_message,vt_topic=test_topic,vt_ack_wait=30", db) + wanterr := "not specified for message table" + if err == nil || !strings.Contains(err.Error(), wanterr) { + t.Errorf("newTestLoadTable: %v, want %s", err, wanterr) + } + + // id column must be part of primary key. + for query, result := range getMessageTableQueries() { + db.AddQuery(query, result) + } + db.AddQuery( + "show index from test_table", + &sqltypes.Result{ + Fields: mysql.ShowIndexFromTableFields, + RowsAffected: 1, + Rows: [][]sqltypes.Value{ + mysql.ShowIndexFromTableRow("test_table", true, "PRIMARY", 1, "time_scheduled", false), + }, + }) + _, err = newTestLoadTable("USER_TABLE", "vitess_message,vt_topic=test_topic,vt_ack_wait=30,vt_purge_after=120,vt_batch_size=1,vt_cache_size=10,vt_poller_interval=30", db) + wanterr = "id column is not part of the primary key for message table: test_table" + if err == nil || err.Error() != wanterr { + t.Errorf("newTestLoadTable: %v, want %s", err, wanterr) + } + + for query, result := range getTestLoadTableQueries() { + db.AddQuery(query, result) + } + _, err = newTestLoadTable("USER_TABLE", "vitess_message,vt_topic=test_topic,vt_ack_wait=30,vt_purge_after=120,vt_batch_size=1,vt_cache_size=10,vt_poller_interval=30", db) + wanterr = "missing from message table: test_table" + if err == nil || !strings.Contains(err.Error(), wanterr) { + t.Errorf("newTestLoadTable: %v, must contain %s", err, wanterr) + } +} + func TestLoadTableWithBitColumn(t *testing.T) { db := fakesqldb.New(t) defer db.Close() diff --git a/go/vt/vttablet/tabletserver/schema/schema.go b/go/vt/vttablet/tabletserver/schema/schema.go index 5e11e33ceab..6748545af4c 100644 --- a/go/vt/vttablet/tabletserver/schema/schema.go +++ b/go/vt/vttablet/tabletserver/schema/schema.go @@ -68,6 +68,9 @@ type Table struct { // MessageInfo contains info for message tables. MessageInfo *MessageInfo + // TopicInfo contains info for message topics. + TopicInfo *TopicInfo + // These vars can be accessed concurrently. TableRows sync2.AtomicInt64 DataLength sync2.AtomicInt64 @@ -87,6 +90,13 @@ type SequenceInfo struct { LastVal int64 } +// TopicInfo contains info specific to message topics. +type TopicInfo struct { + // Subscribers links to all the message tables + // subscribed to this topic + Subscribers []*Table +} + // MessageInfo contains info specific to message tables. type MessageInfo struct { // IDPKIndex is the index of the ID column @@ -99,6 +109,10 @@ type MessageInfo struct { // returned for subscribers. Fields []*querypb.Field + // Optional topic to subscribe to. Any messages + // published to the topic will be added to this table. + Topic string + // AckWaitDuration specifies how long to wait after // the message was first sent. The back-off doubles // every attempt. @@ -201,6 +215,14 @@ func (ta *Table) HasPrimary() bool { return len(ta.Indexes) != 0 && ta.Indexes[0].Name.EqualString("primary") } +// IsTopic returns true if TopicInfo is not nil. +func (ta *Table) IsTopic() bool { + if ta.TopicInfo == nil { + return false + } + return true +} + // UniqueIndexes returns the number of unique indexes on the table func (ta *Table) UniqueIndexes() int { unique := 0 diff --git a/go/vt/vttablet/tabletserver/splitquery/equal_splits_algorithm.go b/go/vt/vttablet/tabletserver/splitquery/equal_splits_algorithm.go index d543b49a6be..eadf3f3c812 100644 --- a/go/vt/vttablet/tabletserver/splitquery/equal_splits_algorithm.go +++ b/go/vt/vttablet/tabletserver/splitquery/equal_splits_algorithm.go @@ -38,7 +38,7 @@ import ( // // The algorithm works by issuing a query to the database to find the minimum and maximum // elements of the split column in the table referenced by the given SQL query. Denote these -// by min and max, respecitvely. The algorithm then "splits" the interval [min, max] into +// by min and max, respectively. The algorithm then "splits" the interval [min, max] into // SplitParams.split_count sub-intervals of equal length: // [a_1, a_2], [a_2, a_3],..., [a_{split_count}, a_{split_count+1}], // where min=a_1 < a_2 < a_3 < ... < a_split_count < a_{split_count+1}=max. diff --git a/go/vt/vttablet/tabletserver/status.go b/go/vt/vttablet/tabletserver/status.go index ad8eaf7112b..5bea9a5e5c1 100644 --- a/go/vt/vttablet/tabletserver/status.go +++ b/go/vt/vttablet/tabletserver/status.go @@ -100,7 +100,7 @@ function drawQPSChart() { var idx = qps[planTypes[j]].length - i - 1; datum.push(+qps[planTypes[j]][idx].toFixed(2)); } else { - // Assume 0.0 QPS for older, non-existant data points. + // Assume 0.0 QPS for older, non-existent data points. datum.push(0); } } diff --git a/go/vt/vttablet/tabletserver/tabletserver.go b/go/vt/vttablet/tabletserver/tabletserver.go index 4c62eca4b10..64711af9b83 100644 --- a/go/vt/vttablet/tabletserver/tabletserver.go +++ b/go/vt/vttablet/tabletserver/tabletserver.go @@ -1010,30 +1010,58 @@ func (tsv *TabletServer) Execute(ctx context.Context, target *querypb.Target, sq if err != nil { return err } - qre := &QueryExecutor{ - query: query, - marginComments: comments, - bindVars: bindVariables, - transactionID: transactionID, - options: options, - plan: plan, - ctx: ctx, - logStats: logStats, - tsv: tsv, - } - extras := tsv.watcher.ComputeExtras(options) - result, err = qre.Execute() - if err != nil { - return err + if plan.PlanID == planbuilder.PlanInsertTopic { + result, err = tsv.topicExecute(ctx, query, comments, bindVariables, transactionID, options, plan, logStats) + } else { + result, err = tsv.qreExecute(ctx, query, comments, bindVariables, transactionID, options, plan, logStats) } - result.Extras = extras - result = result.StripMetadata(sqltypes.IncludeFieldsOrDefault(options)) - return nil + + return err }, ) return result, err } +func (tsv *TabletServer) topicExecute(ctx context.Context, query string, comments sqlparser.MarginComments, bindVariables map[string]*querypb.BindVariable, transactionID int64, options *querypb.ExecuteOptions, plan *TabletPlan, logStats *tabletenv.LogStats) (result *sqltypes.Result, err error) { + for _, subscriber := range plan.Table.TopicInfo.Subscribers { + // replace the topic name with the subscribed message table name + newQuery := strings.Replace(query, plan.Table.Name.String(), subscriber.Name.String(), -1) + var newPlan *TabletPlan + newPlan, err = tsv.qe.GetPlan(ctx, logStats, newQuery, skipQueryPlanCache(options)) + if err != nil { + return nil, err + } + + // because there isn't an option to return multiple results, only the last + // message table result is returned + result, err = tsv.qreExecute(ctx, newQuery, comments, bindVariables, transactionID, options, newPlan, logStats) + } + return result, err +} + +func (tsv *TabletServer) qreExecute(ctx context.Context, query string, comments sqlparser.MarginComments, bindVariables map[string]*querypb.BindVariable, transactionID int64, options *querypb.ExecuteOptions, plan *TabletPlan, logStats *tabletenv.LogStats) (result *sqltypes.Result, err error) { + qre := &QueryExecutor{ + query: query, + marginComments: comments, + bindVars: bindVariables, + transactionID: transactionID, + options: options, + plan: plan, + ctx: ctx, + logStats: logStats, + tsv: tsv, + } + extras := tsv.watcher.ComputeExtras(options) + result, err = qre.Execute() + if err != nil { + return nil, err + } + result.Extras = extras + result = result.StripMetadata(sqltypes.IncludeFieldsOrDefault(options)) + + return result, nil +} + // StreamExecute executes the query and streams the result. // The first QueryResult will have Fields set (and Rows nil). // The subsequent QueryResult will have Rows set (and Fields nil). @@ -1383,7 +1411,7 @@ func (tsv *TabletServer) VStreamRows(ctx context.Context, target *querypb.Target // SplitQuery splits a query + bind variables into smaller queries that return a // subset of rows from the original query. This is the new version that supports multiple -// split columns and multiple split algortihms. +// split columns and multiple split algorithms. // See the documentation of SplitQueryRequest in proto/vtgate.proto for more details. func (tsv *TabletServer) SplitQuery( ctx context.Context, diff --git a/go/vt/vttablet/tabletserver/txlogz.go b/go/vt/vttablet/tabletserver/txlogz.go index 553c2631906..843994bdd7e 100644 --- a/go/vt/vttablet/tabletserver/txlogz.go +++ b/go/vt/vttablet/tabletserver/txlogz.go @@ -79,7 +79,7 @@ func init() { // current transaction log. // Endpoint: /txlogz?timeout=%d&limit=%d // timeout: the txlogz will keep dumping transactions until timeout -// limit: txlogz will keep dumping transcations until it hits the limit +// limit: txlogz will keep dumping transactions until it hits the limit func txlogzHandler(w http.ResponseWriter, req *http.Request) { if err := acl.CheckAccessHTTP(req, acl.DEBUGGING); err != nil { acl.SendError(w, err) diff --git a/go/vt/vttablet/tabletserver/txserializer/tx_serializer.go b/go/vt/vttablet/tabletserver/txserializer/tx_serializer.go index 466da683795..f0c5e2f49cb 100644 --- a/go/vt/vttablet/tabletserver/txserializer/tx_serializer.go +++ b/go/vt/vttablet/tabletserver/txserializer/tx_serializer.go @@ -62,7 +62,7 @@ var ( // been rejected due to exceeding the max queue size per row (range). queueExceededDryRun = stats.NewCountersWithSingleLabel( "TxSerializerQueueExceededDryRun", - "Dry-run Number of transactions that were rejcted because the max queue size was exceeded", + "Dry-run Number of transactions that were rejected because the max queue size was exceeded", "table_name") // globalQueueExceeded is the same as queueExceeded but for the global queue. diff --git a/go/vt/vttablet/tabletserver/txthrottler/tx_throttler.go b/go/vt/vttablet/tabletserver/txthrottler/tx_throttler.go index 85ce7a0f704..401125fbde3 100644 --- a/go/vt/vttablet/tabletserver/txthrottler/tx_throttler.go +++ b/go/vt/vttablet/tabletserver/txthrottler/tx_throttler.go @@ -240,7 +240,7 @@ func (t *TxThrottler) Close() { // Throttle should be called before a new transaction is started. // It returns true if the transaction should not proceed (the caller // should back off). Throttle requires that Open() was previously called -// successfuly. +// successfully. func (t *TxThrottler) Throttle() (result bool) { if !t.config.enabled { return false diff --git a/go/vt/vttablet/tabletserver/vstreamer/rowstreamer.go b/go/vt/vttablet/tabletserver/vstreamer/rowstreamer.go index 61e27750df8..82d3b04ad5b 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/rowstreamer.go +++ b/go/vt/vttablet/tabletserver/vstreamer/rowstreamer.go @@ -19,11 +19,11 @@ package vstreamer import ( "context" "fmt" - "sync" "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/dbconfigs" + "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vtgate/vindexes" "vitess.io/vitess/go/vt/vttablet/tabletserver/schema" @@ -82,6 +82,9 @@ func (rs *rowStreamer) Stream() error { return err } defer conn.Close() + if _, err := conn.ExecuteFetch("set names binary", 1, false); err != nil { + return err + } return rs.streamQuery(conn, rs.send) } @@ -169,15 +172,10 @@ func (rs *rowStreamer) buildSelect() (string, error) { } func (rs *rowStreamer) streamQuery(conn *mysql.Conn, send func(*binlogdatapb.VStreamRowsResponse) error) error { - unlock, gtid, err := rs.lockTable() + gtid, err := rs.startStreaming(conn) if err != nil { return err } - defer unlock() - - if err := conn.ExecuteStreamFetch(rs.sendQuery); err != nil { - return err - } // first call the callback with the fields flds, err := conn.Fields() @@ -200,9 +198,6 @@ func (rs *rowStreamer) streamQuery(conn *mysql.Conn, send func(*binlogdatapb.VSt if err != nil { return fmt.Errorf("stream send error: %v", err) } - if err := unlock(); err != nil { - return err - } response := &binlogdatapb.VStreamRowsResponse{} lastpk := make([]sqltypes.Value, len(rs.pkColumns)) @@ -259,33 +254,40 @@ func (rs *rowStreamer) streamQuery(conn *mysql.Conn, send func(*binlogdatapb.VSt return nil } -func (rs *rowStreamer) lockTable() (unlock func() error, gtid string, err error) { - conn, err := rs.mysqlConnect() +func (rs *rowStreamer) startStreaming(conn *mysql.Conn) (string, error) { + lockConn, err := rs.mysqlConnect() if err != nil { - return nil, "", err + return "", err } + // To be safe, always unlock tables, even if lock tables might fail. + defer func() { + _, err := lockConn.ExecuteFetch("unlock tables", 0, false) + if err != nil { + log.Warning("Unlock tables failed: %v", err) + } else { + log.Infof("Tables unlocked", rs.plan.Table.Name) + } + lockConn.Close() + }() + + log.Infof("Locking table %s for copying", rs.plan.Table.Name) // mysql recommends this before locking tables. - if _, err := conn.ExecuteFetch("set autocommit=0", 0, false); err != nil { - return nil, "", err + if _, err := lockConn.ExecuteFetch("set autocommit=0", 0, false); err != nil { + return "", err } - if _, err := conn.ExecuteFetch(fmt.Sprintf("lock tables %s read", sqlparser.String(sqlparser.NewTableIdent(rs.plan.Table.Name))), 0, false); err != nil { - return nil, "", err - } - var once sync.Once - unlock = func() error { - var err error - once.Do(func() { - _, err = conn.ExecuteFetch("unlock tables", 0, false) - conn.Close() - }) - return err + if _, err := lockConn.ExecuteFetch(fmt.Sprintf("lock tables %s read", sqlparser.String(sqlparser.NewTableIdent(rs.plan.Table.Name))), 0, false); err != nil { + return "", err } - pos, err := conn.MasterPosition() + pos, err := lockConn.MasterPosition() if err != nil { - unlock() - return nil, "", err + return "", err } - return unlock, mysql.EncodePosition(pos), nil + + if err := conn.ExecuteStreamFetch(rs.sendQuery); err != nil { + return "", err + } + + return mysql.EncodePosition(pos), nil } func (rs *rowStreamer) mysqlConnect() (*mysql.Conn, error) { diff --git a/go/vt/vttablet/tabletserver/vstreamer/rowstreamer_test.go b/go/vt/vttablet/tabletserver/vstreamer/rowstreamer_test.go index b7f8db8022c..5266b3fe864 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/rowstreamer_test.go +++ b/go/vt/vttablet/tabletserver/vstreamer/rowstreamer_test.go @@ -107,6 +107,56 @@ func TestStreamRowsScan(t *testing.T) { checkStream(t, "select * from t3", []sqltypes.Value{sqltypes.NewInt64(1), sqltypes.NewVarBinary("aaa")}, wantQuery, wantStream) } +func TestStreamRowsUnicode(t *testing.T) { + if testing.Short() { + t.Skip() + } + + execStatements(t, []string{ + "create table t1(id int, val varchar(128) COLLATE utf8_unicode_ci, primary key(id))", + }) + defer execStatements(t, []string{ + "drop table t1", + }) + engine.se.Reload(context.Background()) + + // We need a latin1 connection. + conn, err := env.Mysqld.GetDbaConnection() + if err != nil { + t.Fatal(err) + } + defer conn.Close() + + if _, err := conn.ExecuteFetch("set names latin1", 10000, false); err != nil { + t.Fatal(err) + } + // This will get "Mojibaked" into the utf8 column. + if _, err := conn.ExecuteFetch("insert into t1 values(1, '👍')", 10000, false); err != nil { + t.Fatal(err) + } + + savecp := *engine.cp + // Rowstreamer must override this to "binary" + engine.cp.Charset = "latin1" + defer func() { engine.cp = &savecp }() + err = engine.StreamRows(context.Background(), "select * from t1", nil, func(rows *binlogdatapb.VStreamRowsResponse) error { + // Skip fields. + if len(rows.Rows) == 0 { + return nil + } + got := fmt.Sprintf("%q", rows.Rows[0].Values) + // We should expect a "Mojibaked" version of the string. + want := `"1ðŸ‘\u008d"` + if got != want { + t.Errorf("rows.Rows[0].Values: %s, want %s", got, want) + } + return nil + }) + if err != nil { + t.Error(err) + } +} + func TestStreamRowsKeyRange(t *testing.T) { if testing.Short() { t.Skip() diff --git a/go/vt/vttablet/tabletserver/vstreamer/vstreamer.go b/go/vt/vttablet/tabletserver/vstreamer/vstreamer.go index b0298a279d6..91e09e8f330 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/vstreamer.go +++ b/go/vt/vttablet/tabletserver/vstreamer/vstreamer.go @@ -135,7 +135,7 @@ func (vs *vstreamer) parseEvents(ctx context.Context, events <-chan mysql.Binlog bufferedEvents []*binlogdatapb.VEvent curSize int ) - // Buffering only takes row lenghts into consideration. + // Buffering only takes row lengths into consideration. // Length of other events is considered negligible. // If a new row event causes the packet size to be exceeded, // all existing rows are sent without the new row. diff --git a/go/vt/vttest/local_cluster.go b/go/vt/vttest/local_cluster.go index 7e3fa92a2dd..8532052dded 100644 --- a/go/vt/vttest/local_cluster.go +++ b/go/vt/vttest/local_cluster.go @@ -30,6 +30,12 @@ import ( "strings" "unicode" + "vitess.io/vitess/go/vt/proto/logutil" + // we need to import the grpcvtctlclient library so the gRPC + // vtctl client is registered and can be used. + _ "vitess.io/vitess/go/vt/vtctl/grpcvtctlclient" + "vitess.io/vitess/go/vt/vtctl/vtctlclient" + "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/log" @@ -101,6 +107,9 @@ type Config struct { TransactionMode string TransactionTimeout float64 + + // The host name to use for the table otherwise it will be resolved from the local hostname + TabletHostName string } // InitSchemas is a shortcut for tests that just want to setup a single @@ -235,6 +244,16 @@ func (db *LocalCluster) Setup() error { return err } + if !db.OnlyMySQL { + log.Infof("Starting vtcombo...") + db.vt = VtcomboProcess(db.Env, &db.Config, db.mysql) + if err := db.vt.WaitStart(); err != nil { + return err + } + log.Infof("vtcombo up: %s", db.vt.Address()) + } + + // Load schema will apply db and vschema migrations. Running after vtcombo starts to be able to apply vschema migrations if err := db.loadSchema(); err != nil { return err } @@ -245,15 +264,6 @@ func (db *LocalCluster) Setup() error { } } - if !db.OnlyMySQL { - log.Infof("Starting vtcombo...") - db.vt = VtcomboProcess(db.Env, &db.Config, db.mysql) - if err := db.vt.WaitStart(); err != nil { - return err - } - log.Infof("vtcombo up: %s", db.vt.Address()) - } - return nil } @@ -307,6 +317,7 @@ func isDir(path string) bool { return err == nil && info.IsDir() } +// loadSchema applies sql and vschema migrations respectively for each keyspace in the topology func (db *LocalCluster) loadSchema() error { if db.SchemaDir == "" { return nil @@ -342,12 +353,26 @@ func (db *LocalCluster) loadSchema() error { return err } + // One single vschema migration per file + if !db.OnlyMySQL && len(cmds) == 1 && strings.HasPrefix(strings.ToUpper(cmds[0]), "ALTER VSCHEMA") { + if err = db.applyVschema(keyspace, cmds[0]); err != nil { + return err + } + continue + } + for _, dbname := range db.shardNames(kpb) { if err := db.Execute(cmds, dbname); err != nil { return err } } } + + if !db.OnlyMySQL { + if err := db.reloadSchemaKeyspace(keyspace); err != nil { + return err + } + } } return nil @@ -432,6 +457,34 @@ func (db *LocalCluster) JSONConfig() interface{} { return config } +// GrpcPort returns the grpc port used by vtcombo +func (db *LocalCluster) GrpcPort() int { + return db.vt.PortGrpc +} + +func (db *LocalCluster) applyVschema(keyspace string, migration string) error { + server := fmt.Sprintf("localhost:%v", db.vt.PortGrpc) + args := []string{"ApplyVSchema", "-sql", migration, keyspace} + fmt.Printf("Applying vschema %v", args) + err := vtctlclient.RunCommandAndWait(context.Background(), server, args, func(e *logutil.Event) { + log.Info(e) + }) + + return err +} + +func (db *LocalCluster) reloadSchemaKeyspace(keyspace string) error { + server := fmt.Sprintf("localhost:%v", db.vt.PortGrpc) + args := []string{"ReloadSchemaKeyspace", "-include_master=true", keyspace} + fmt.Printf("Reloading keyspace schema %v", args) + + err := vtctlclient.RunCommandAndWait(context.Background(), server, args, func(e *logutil.Event) { + log.Info(e) + }) + + return err +} + // LoadSQLFile loads a parses a .sql file from disk, removing all the // different comments that mysql/mysqldump inserts in these, and returning // each individual SQL statement as its own string. diff --git a/go/vt/vttest/vtprocess.go b/go/vt/vttest/vtprocess.go index 91bf375a595..1e917282102 100644 --- a/go/vt/vttest/vtprocess.go +++ b/go/vt/vttest/vtprocess.go @@ -91,7 +91,7 @@ func (vtp *VtProcess) Address() string { return fmt.Sprintf("localhost:%d", vtp.Port) } -// WaitTerminate attemps to gracefully shutdown the Vitess process by sending +// WaitTerminate attempts to gracefully shutdown the Vitess process by sending // a SIGTERM, then wait for up to 10s for it to exit. If the process hasn't // exited cleanly after 10s, a SIGKILL is forced and the corresponding exit // error is returned to the user @@ -240,6 +240,9 @@ func VtcomboProcess(env Environment, args *Config, mysql MySQLManager) *VtProces if args.TransactionTimeout != 0 { vt.ExtraArgs = append(vt.ExtraArgs, "-queryserver-config-transaction-timeout", fmt.Sprintf("%f", args.TransactionTimeout)) } + if args.TabletHostName != "" { + vt.ExtraArgs = append(vt.ExtraArgs, []string{"-tablet_hostname", args.TabletHostName}...) + } if socket != "" { vt.ExtraArgs = append(vt.ExtraArgs, []string{ diff --git a/go/vt/vttime/clock.go b/go/vt/vttime/clock.go index 6347d0e8165..b648b3ab4fc 100644 --- a/go/vt/vttime/clock.go +++ b/go/vt/vttime/clock.go @@ -34,7 +34,7 @@ var ( // Clock returns the current time. type Clock interface { // Now returns the current time as Interval. - // This method should be thread safe (i.e. multipe go routines can + // This method should be thread safe (i.e. multiple go routines can // safely call this at the same time). // The returned interval is guaranteed to have earliest <= latest, // and all implementations enforce it. diff --git a/go/vt/worker/grpcvtworkerserver/server.go b/go/vt/worker/grpcvtworkerserver/server.go index 95e9e7b1df6..29891f65bdc 100644 --- a/go/vt/worker/grpcvtworkerserver/server.go +++ b/go/vt/worker/grpcvtworkerserver/server.go @@ -47,7 +47,7 @@ func NewVtworkerServer(wi *worker.Instance) *VtworkerServer { // ExecuteVtworkerCommand is part of the vtworkerdatapb.VtworkerServer interface func (s *VtworkerServer) ExecuteVtworkerCommand(args *vtworkerdatapb.ExecuteVtworkerCommandRequest, stream vtworkerservicepb.Vtworker_ExecuteVtworkerCommandServer) (err error) { - // Please note that this panic handler catches only panics occuring in the code below. + // Please note that this panic handler catches only panics occurring in the code below. // The actual execution of the vtworker command takes place in a new go routine // (started in Instance.setAndStartWorker()) which has its own panic handler. defer servenv.HandlePanic("vtworker", &err) diff --git a/go/vt/worker/key_resolver.go b/go/vt/worker/key_resolver.go index 2d63878c110..09d77c6dee7 100644 --- a/go/vt/worker/key_resolver.go +++ b/go/vt/worker/key_resolver.go @@ -33,7 +33,7 @@ import ( // This file defines the interface and implementations of sharding key resolvers. -// keyspaceIDResolver defines the interface that needs to be satisifed to get a +// keyspaceIDResolver defines the interface that needs to be satisfied to get a // keyspace ID from a database row. type keyspaceIDResolver interface { // keyspaceID takes a table row, and returns the keyspace id as bytes. diff --git a/go/vt/worker/legacy_split_clone.go b/go/vt/worker/legacy_split_clone.go index 9745e2c01e8..a191a1b79d3 100644 --- a/go/vt/worker/legacy_split_clone.go +++ b/go/vt/worker/legacy_split_clone.go @@ -289,7 +289,7 @@ func (scw *LegacySplitCloneWorker) init(ctx context.Context) error { scw.wr.Logger().Infof("Found overlapping shards: %+v\n", os) // one side should have served types, the other one none, - // figure out wich is which, then double check them all + // figure out which is which, then double check them all leftServingTypes, err := scw.wr.TopoServer().GetShardServingTypes(ctx, os.Left[0]) if err != nil { diff --git a/go/vt/worker/multi_split_diff.go b/go/vt/worker/multi_split_diff.go index b4ab8cf9cea..9a93ca1e192 100644 --- a/go/vt/worker/multi_split_diff.go +++ b/go/vt/worker/multi_split_diff.go @@ -41,7 +41,7 @@ import ( "vitess.io/vitess/go/vt/vtgate/vindexes" ) -// Scanners encapsulates a source and a destination. We create one of these per paralell runner. +// Scanners encapsulates a source and a destination. We create one of these per parallel runner. type Scanners struct { // this is how we get data from the source shard sourceScanner TableScanner diff --git a/go/vt/worker/restartable_result_reader.go b/go/vt/worker/restartable_result_reader.go index be0e7568879..04f95695fc6 100644 --- a/go/vt/worker/restartable_result_reader.go +++ b/go/vt/worker/restartable_result_reader.go @@ -168,7 +168,7 @@ func (r *RestartableResultReader) getTablet() (bool, error) { return false /* retryable */, nil } -// startStream assumes that getTablet() was succesfully called before and now +// startStream assumes that getTablet() was successfully called before and now // tries to connect to the set tablet and start the streaming query. // If the method returns an error, the first return value specifies if it is // okay to retry. diff --git a/go/vt/worker/restartable_result_reader_test.go b/go/vt/worker/restartable_result_reader_test.go index 95c09a5cd46..2e1d301f1ff 100644 --- a/go/vt/worker/restartable_result_reader_test.go +++ b/go/vt/worker/restartable_result_reader_test.go @@ -136,7 +136,7 @@ func TestGenerateQuery(t *testing.T) { want: "SELECT `a`,`b`,`msg1`,`msg2` FROM `t1` WHERE `a`>=11 AND `a`<26 ORDER BY `a`,`b`", }, { - desc: "start overriden by last row (multi-column primary key)", + desc: "start overridden by last row (multi-column primary key)", start: sqltypes.NewInt64(11), end: sqltypes.NewInt64(26), table: "t1", diff --git a/go/vt/worker/split_clone.go b/go/vt/worker/split_clone.go index c063c508b07..8b8cd2d00ef 100644 --- a/go/vt/worker/split_clone.go +++ b/go/vt/worker/split_clone.go @@ -592,7 +592,7 @@ func (scw *SplitCloneWorker) initShardsForHorizontalResharding(ctx context.Conte scw.wr.Logger().Infof("Found overlapping shards: %+v\n", os) // one side should have served types, the other one none, - // figure out wich is which, then double check them all + // figure out which is which, then double check them all leftServingTypes, err := scw.wr.TopoServer().GetShardServingTypes(ctx, os.Left[0]) if err != nil { return fmt.Errorf("cannot get shard serving cells for: %v", os.Left[0]) diff --git a/go/vt/worker/split_clone_cmd.go b/go/vt/worker/split_clone_cmd.go index b5f19acbd1a..4cf6b8795fb 100644 --- a/go/vt/worker/split_clone_cmd.go +++ b/go/vt/worker/split_clone_cmd.go @@ -74,7 +74,7 @@ const splitCloneHTML2 = `

- +

diff --git a/go/vt/worker/split_clone_test.go b/go/vt/worker/split_clone_test.go index 7e73330b273..88a83ae6e18 100644 --- a/go/vt/worker/split_clone_test.go +++ b/go/vt/worker/split_clone_test.go @@ -829,7 +829,7 @@ func TestSplitCloneV2_Offline_Reconciliation(t *testing.T) { tc.rightMasterQs.modifyFirstRows(2) // The destination tablets should see inserts, updates and deletes. - // Clear the entries added by setUp() because the reconcilation will + // Clear the entries added by setUp() because the reconciliation will // produce different statements in this test case. tc.leftMasterFakeDb.DeleteAllEntries() tc.rightMasterFakeDb.DeleteAllEntries() diff --git a/go/vt/worker/vertical_split_clone_cmd.go b/go/vt/worker/vertical_split_clone_cmd.go index 4f201036458..7045fb3e620 100644 --- a/go/vt/worker/vertical_split_clone_cmd.go +++ b/go/vt/worker/vertical_split_clone_cmd.go @@ -72,7 +72,7 @@ const verticalSplitCloneHTML2 = `

- +

diff --git a/go/vt/worker/vtworkerclient/interface.go b/go/vt/worker/vtworkerclient/interface.go index b7796de18e4..b35766b0540 100644 --- a/go/vt/worker/vtworkerclient/interface.go +++ b/go/vt/worker/vtworkerclient/interface.go @@ -27,7 +27,7 @@ import ( "vitess.io/vitess/go/vt/vterrors" ) -// protocol specifices which RPC client implementation should be used. +// protocol specifics which RPC client implementation should be used. var protocol = flag.String("vtworker_client_protocol", "grpc", "the protocol to use to talk to the vtworker server") // Client defines the interface used to send remote vtworker commands diff --git a/go/vt/workflow/manager_test.go b/go/vt/workflow/manager_test.go index 03192f45b7f..0620b882838 100644 --- a/go/vt/workflow/manager_test.go +++ b/go/vt/workflow/manager_test.go @@ -97,7 +97,7 @@ func TestManagerRestart(t *testing.T) { // Stop the manager. cancel() wg.Wait() - // Recreate the manager immitating restart. + // Recreate the manager imitating restart. m = NewManager(ts) // Make sure the workflow is still in the topo server. This diff --git a/go/vt/workflow/node_test.go b/go/vt/workflow/node_test.go index 8ac5a19da22..fec0b518b88 100644 --- a/go/vt/workflow/node_test.go +++ b/go/vt/workflow/node_test.go @@ -48,7 +48,7 @@ func (tw *testWorkflow) Action(ctx context.Context, path, name string) error { } // TestNodeManagerWithRoot unit tests basic NodeManager functionality -// wiht a single root node. +// with a single root node. func TestNodeManagerWithRoot(t *testing.T) { nodeManager := NewNodeManager() tw := &testWorkflow{} diff --git a/go/vt/workflow/resharding/workflow.go b/go/vt/workflow/resharding/workflow.go index 0061bdcab92..dedd4f7989b 100644 --- a/go/vt/workflow/resharding/workflow.go +++ b/go/vt/workflow/resharding/workflow.go @@ -378,7 +378,7 @@ func (hw *horizontalReshardingWorkflow) Run(ctx context.Context, manager *workfl if err := hw.runWorkflow(); err != nil { return err } - hw.setUIMessage(fmt.Sprintf("Horizontal Resharding is finished sucessfully.")) + hw.setUIMessage(fmt.Sprintf("Horizontal Resharding is finished successfully.")) return nil } diff --git a/go/vt/wrangler/fake_dbclient_test.go b/go/vt/wrangler/fake_dbclient_test.go new file mode 100644 index 00000000000..1f2ca1f54a7 --- /dev/null +++ b/go/vt/wrangler/fake_dbclient_test.go @@ -0,0 +1,120 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package wrangler + +import ( + "fmt" + "regexp" + "testing" + + "vitess.io/vitess/go/sqltypes" +) + +func verifyQueries(t *testing.T, dcs []*fakeDBClient) { + for _, dc := range dcs { + dc.verifyQueries(t) + } +} + +type dbResult struct { + result *sqltypes.Result + err error + called bool +} + +// fakeDBClient fakes a binlog_player.DBClient. +type fakeDBClient struct { + queries map[string]*dbResult + queriesRE map[string]*dbResult +} + +// NewfakeDBClient returns a new DBClientMock. +func newFakeDBClient() *fakeDBClient { + return &fakeDBClient{ + queries: map[string]*dbResult{ + "use _vt": {result: &sqltypes.Result{}, called: true}, + "select * from _vt.vreplication where db_name='db'": {result: &sqltypes.Result{}}, + }, + queriesRE: make(map[string]*dbResult), + } +} + +func (dc *fakeDBClient) addQuery(query string, result *sqltypes.Result, err error) { + dc.queries[query] = &dbResult{result: result, err: err} +} + +func (dc *fakeDBClient) addQueryRE(query string, result *sqltypes.Result, err error) { + dc.queriesRE[query] = &dbResult{result: result, err: err} +} + +// DBName is part of the DBClient interface +func (dc *fakeDBClient) DBName() string { + return "db" +} + +// Connect is part of the DBClient interface +func (dc *fakeDBClient) Connect() error { + return nil +} + +// Begin is part of the DBClient interface +func (dc *fakeDBClient) Begin() error { + return nil +} + +// Commit is part of the DBClient interface +func (dc *fakeDBClient) Commit() error { + return nil +} + +// Rollback is part of the DBClient interface +func (dc *fakeDBClient) Rollback() error { + return nil +} + +// Close is part of the DBClient interface +func (dc *fakeDBClient) Close() { +} + +// ExecuteFetch is part of the DBClient interface +func (dc *fakeDBClient) ExecuteFetch(query string, maxrows int) (qr *sqltypes.Result, err error) { + if dbr := dc.queries[query]; dbr != nil { + dbr.called = true + return dbr.result, dbr.err + } + for re, dbr := range dc.queriesRE { + if regexp.MustCompile(re).MatchString(query) { + dbr.called = true + return dbr.result, dbr.err + } + } + return nil, fmt.Errorf("unexpected query: %s", query) +} + +func (dc *fakeDBClient) verifyQueries(t *testing.T) { + t.Helper() + for query, dbr := range dc.queries { + if !dbr.called { + t.Errorf("query: %v was not called", query) + } + } + for query, dbr := range dc.queriesRE { + if !dbr.called { + t.Errorf("query: %v was not called", query) + } + } +} diff --git a/go/vt/wrangler/fake_tablet_test.go b/go/vt/wrangler/fake_tablet_test.go new file mode 100644 index 00000000000..85adabf6cb1 --- /dev/null +++ b/go/vt/wrangler/fake_tablet_test.go @@ -0,0 +1,228 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package wrangler + +import ( + "fmt" + "net" + "net/http" + "testing" + "time" + + "golang.org/x/net/context" + "google.golang.org/grpc" + "vitess.io/vitess/go/mysql/fakesqldb" + "vitess.io/vitess/go/vt/mysqlctl/fakemysqldaemon" + querypb "vitess.io/vitess/go/vt/proto/query" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/topo/topoproto" + "vitess.io/vitess/go/vt/vttablet/grpctmserver" + "vitess.io/vitess/go/vt/vttablet/tabletconn" + "vitess.io/vitess/go/vt/vttablet/tabletmanager" + "vitess.io/vitess/go/vt/vttablet/tmclient" + + // import the gRPC client implementation for tablet manager + _ "vitess.io/vitess/go/vt/vttablet/grpctmclient" + + // import the gRPC client implementation for query service + _ "vitess.io/vitess/go/vt/vttablet/grpctabletconn" +) + +// This file was copied from testlib. All tests from testlib should be moved +// to the current directory. In order to move tests from there, we have to +// remove the circular dependency it causes (through vtctl dependence). +// The tests in this directory call wrangler functions directly. So, there's +// no circular dependency. + +// This file contains utility methods for unit tests. +// We allow the creation of fake tablets, and running their event loop based +// on a FakeMysqlDaemon. + +// fakeTablet keeps track of a fake tablet in memory. It has: +// - a Tablet record (used for creating the tablet, kept for user's information) +// - a FakeMysqlDaemon (used by the fake event loop) +// - a 'done' channel (used to terminate the fake event loop) +type fakeTablet struct { + // Tablet and FakeMysqlDaemon are populated at NewFakeTablet time. + // We also create the RPCServer, so users can register more services + // before calling StartActionLoop(). + Tablet *topodatapb.Tablet + FakeMysqlDaemon *fakemysqldaemon.FakeMysqlDaemon + RPCServer *grpc.Server + + // The following fields are created when we start the event loop for + // the tablet, and closed / cleared when we stop it. + // The Listener is used by the gRPC server. + Agent *tabletmanager.ActionAgent + Listener net.Listener + + // These optional fields are used if the tablet also needs to + // listen on the 'vt' port. + StartHTTPServer bool + HTTPListener net.Listener + HTTPServer *http.Server +} + +// TabletOption is an interface for changing tablet parameters. +// It's a way to pass multiple parameters to NewFakeTablet without +// making it too cumbersome. +type TabletOption func(tablet *topodatapb.Tablet) + +// TabletKeyspaceShard is the option to set the tablet keyspace and shard +func TabletKeyspaceShard(t *testing.T, keyspace, shard string) TabletOption { + return func(tablet *topodatapb.Tablet) { + tablet.Keyspace = keyspace + shard, kr, err := topo.ValidateShardName(shard) + if err != nil { + t.Fatalf("cannot ValidateShardName value %v", shard) + } + tablet.Shard = shard + tablet.KeyRange = kr + } +} + +// newFakeTablet creates the test tablet in the topology. 'uid' +// has to be between 0 and 99. All the tablet info will be derived +// from that. Look at the implementation if you need values. +// Use TabletOption implementations if you need to change values at creation. +// 'db' can be nil if the test doesn't use a database at all. +func newFakeTablet(t *testing.T, wr *Wrangler, cell string, uid uint32, tabletType topodatapb.TabletType, db *fakesqldb.DB, options ...TabletOption) *fakeTablet { + if uid > 99 { + t.Fatalf("uid has to be between 0 and 99: %v", uid) + } + mysqlPort := int32(3300 + uid) + hostname := fmt.Sprintf("%v.%d", cell, uid) + tablet := &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{Cell: cell, Uid: uid}, + Hostname: hostname, + MysqlHostname: hostname, + PortMap: map[string]int32{ + "vt": int32(8100 + uid), + "grpc": int32(8200 + uid), + }, + Keyspace: "test_keyspace", + Shard: "0", + Type: tabletType, + } + topoproto.SetMysqlPort(tablet, mysqlPort) + for _, option := range options { + option(tablet) + } + if err := wr.InitTablet(context.Background(), tablet, false /* allowMasterOverride */, true /* createShardAndKeyspace */, false /* allowUpdate */); err != nil { + t.Fatalf("cannot create tablet %v: %v", uid, err) + } + + // create a FakeMysqlDaemon with the right information by default + fakeMysqlDaemon := fakemysqldaemon.NewFakeMysqlDaemon(db) + fakeMysqlDaemon.MysqlPort = mysqlPort + + return &fakeTablet{ + Tablet: tablet, + FakeMysqlDaemon: fakeMysqlDaemon, + RPCServer: grpc.NewServer(), + } +} + +// StartActionLoop will start the action loop for a fake tablet, +// using ft.FakeMysqlDaemon as the backing mysqld. +func (ft *fakeTablet) StartActionLoop(t *testing.T, wr *Wrangler) { + if ft.Agent != nil { + t.Fatalf("Agent for %v is already running", ft.Tablet.Alias) + } + + // Listen on a random port for gRPC. + var err error + ft.Listener, err = net.Listen("tcp", ":0") + if err != nil { + t.Fatalf("Cannot listen: %v", err) + } + gRPCPort := int32(ft.Listener.Addr().(*net.TCPAddr).Port) + + // If needed, listen on a random port for HTTP. + vtPort := ft.Tablet.PortMap["vt"] + if ft.StartHTTPServer { + ft.HTTPListener, err = net.Listen("tcp", ":0") + if err != nil { + t.Fatalf("Cannot listen on http port: %v", err) + } + handler := http.NewServeMux() + ft.HTTPServer = &http.Server{ + Handler: handler, + } + go ft.HTTPServer.Serve(ft.HTTPListener) + vtPort = int32(ft.HTTPListener.Addr().(*net.TCPAddr).Port) + } + + // Create a test agent on that port, and re-read the record + // (it has new ports and IP). + ft.Agent = tabletmanager.NewTestActionAgent(context.Background(), wr.TopoServer(), ft.Tablet.Alias, vtPort, gRPCPort, ft.FakeMysqlDaemon, nil) + ft.Tablet = ft.Agent.Tablet() + + // Register the gRPC server, and starts listening. + grpctmserver.RegisterForTest(ft.RPCServer, ft.Agent) + go ft.RPCServer.Serve(ft.Listener) + + // And wait for it to serve, so we don't start using it before it's + // ready. + timeout := 5 * time.Second + step := 10 * time.Millisecond + c := tmclient.NewTabletManagerClient() + for timeout >= 0 { + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + err := c.Ping(ctx, ft.Agent.Tablet()) + cancel() + if err == nil { + break + } + time.Sleep(step) + timeout -= step + } + if timeout < 0 { + panic("StartActionLoop failed.") + } +} + +// StopActionLoop will stop the Action Loop for the given FakeTablet +func (ft *fakeTablet) StopActionLoop(t *testing.T) { + if ft.Agent == nil { + t.Fatalf("Agent for %v is not running", ft.Tablet.Alias) + } + if ft.StartHTTPServer { + ft.HTTPListener.Close() + } + ft.Listener.Close() + ft.Agent.Stop() + ft.Agent = nil + ft.Listener = nil + ft.HTTPListener = nil +} + +// Target returns the keyspace/shard/type info of this tablet as Target. +func (ft *fakeTablet) Target() querypb.Target { + return querypb.Target{ + Keyspace: ft.Tablet.Keyspace, + Shard: ft.Tablet.Shard, + TabletType: ft.Tablet.Type, + } +} + +func init() { + // enforce we will use the right protocol (gRPC) in all unit tests + *tmclient.TabletManagerProtocol = "grpc" + *tabletconn.TabletProtocol = "grpc" +} diff --git a/go/vt/wrangler/migrater.go b/go/vt/wrangler/migrater.go new file mode 100644 index 00000000000..4832d85965f --- /dev/null +++ b/go/vt/wrangler/migrater.go @@ -0,0 +1,857 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package wrangler + +import ( + "fmt" + "hash/fnv" + "math" + "reflect" + "sort" + "strings" + "sync" + "time" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/sync2" + "vitess.io/vitess/go/vt/binlog/binlogplayer" + "vitess.io/vitess/go/vt/concurrency" + "vitess.io/vitess/go/vt/key" + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vschemapb "vitess.io/vitess/go/vt/proto/vschema" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vtgate/vindexes" +) + +// MigrateDirection specifies the migration direction. +type MigrateDirection int + +// The following constants define the migration direction. +const ( + DirectionForward = MigrateDirection(iota) + DirectionBackward +) + +// accessType specifies the type of access for a shard (allow/disallow writes). +type accessType int + +const ( + allowWrites = accessType(iota) + disallowWrites +) + +// migrater contains the metadata for migrating read and write traffic +// for vreplication streams. +type migrater struct { + migrationType binlogdatapb.MigrationType + wr *Wrangler + id int64 + sources map[string]*miSource + targets map[string]*miTarget + sourceKeyspace string + targetKeyspace string + tables []string +} + +// miTarget contains the metadata for each migration target. +type miTarget struct { + si *topo.ShardInfo + master *topo.TabletInfo + sources map[uint32]*binlogdatapb.BinlogSource + position string +} + +// miSource contains the metadata for each migration source. +type miSource struct { + si *topo.ShardInfo + master *topo.TabletInfo + position string + journaled bool +} + +// MigrateReads is a generic way of migrating read traffic for a resharding workflow. +func (wr *Wrangler) MigrateReads(ctx context.Context, targetKeyspace, workflow string, servedType topodatapb.TabletType, cells []string, direction MigrateDirection) error { + if servedType != topodatapb.TabletType_REPLICA && servedType != topodatapb.TabletType_RDONLY { + return fmt.Errorf("tablet type must be REPLICA or RDONLY: %v", servedType) + } + mi, err := wr.buildMigrater(ctx, targetKeyspace, workflow) + if err != nil { + return err + } + if err := mi.validate(ctx, false /* isWrite */); err != nil { + return err + } + + // For reads, locking the source keyspace is sufficient. + ctx, unlock, lockErr := wr.ts.LockKeyspace(ctx, mi.sourceKeyspace, "MigrateReads") + if lockErr != nil { + return lockErr + } + defer unlock(&err) + + if mi.migrationType == binlogdatapb.MigrationType_TABLES { + return mi.migrateTableReads(ctx, cells, servedType, direction) + } + return mi.migrateShardReads(ctx, cells, servedType, direction) +} + +// MigrateWrites is a generic way of migrating write traffic for a resharding workflow. +func (wr *Wrangler) MigrateWrites(ctx context.Context, targetKeyspace, workflow string, filteredReplicationWaitTime time.Duration) (journalID int64, err error) { + mi, err := wr.buildMigrater(ctx, targetKeyspace, workflow) + if err != nil { + return 0, err + } + mi.wr.Logger().Infof("Built migration metadata: %+v", mi) + if err := mi.validate(ctx, true /* isWrite */); err != nil { + return 0, err + } + + // Need to lock both source and target keyspaces. + ctx, sourceUnlock, lockErr := wr.ts.LockKeyspace(ctx, mi.sourceKeyspace, "MigrateWrites") + if lockErr != nil { + return 0, lockErr + } + defer sourceUnlock(&err) + if mi.targetKeyspace != mi.sourceKeyspace { + tctx, targetUnlock, lockErr := wr.ts.LockKeyspace(ctx, mi.targetKeyspace, "MigrateWrites") + if lockErr != nil { + return 0, lockErr + } + ctx = tctx + defer targetUnlock(&err) + } + + journalsExist, err := mi.checkJournals(ctx) + if err != nil { + return 0, err + } + if !journalsExist { + mi.wr.Logger().Infof("No previous journals were found. Proceeding normally.") + if err := mi.stopSourceWrites(ctx); err != nil { + mi.cancelMigration(ctx) + return 0, err + } + if err := mi.waitForCatchup(ctx, filteredReplicationWaitTime); err != nil { + mi.cancelMigration(ctx) + return 0, err + } + } else { + mi.wr.Logger().Infof("Journals were found. Completing the left over steps.") + // Need to gather positions in case all journals were not created. + if err := mi.gatherPositions(ctx); err != nil { + return 0, err + } + } + // This is the point of no return. Once a journal is created, + // traffic can be redirected to target shards. + if err := mi.createJournals(ctx); err != nil { + return 0, err + } + if err := mi.createReverseReplication(ctx); err != nil { + return 0, err + } + if err := mi.allowTargetWrites(ctx); err != nil { + return 0, err + } + if err := mi.changeRouting(ctx); err != nil { + return 0, err + } + mi.deleteTargetVReplication(ctx) + return mi.id, nil +} + +func (wr *Wrangler) buildMigrater(ctx context.Context, targetKeyspace, workflow string) (*migrater, error) { + targets, err := wr.buildMigrationTargets(ctx, targetKeyspace, workflow) + if err != nil { + return nil, err + } + + mi := &migrater{ + wr: wr, + id: hashStreams(targetKeyspace, targets), + targets: targets, + sources: make(map[string]*miSource), + targetKeyspace: targetKeyspace, + } + mi.wr.Logger().Infof("Migration ID for workflow %s: %d", workflow, mi.id) + + // Build the sources + for _, target := range targets { + for _, bls := range target.sources { + if mi.sourceKeyspace == "" { + mi.sourceKeyspace = bls.Keyspace + } else if mi.sourceKeyspace != bls.Keyspace { + return nil, fmt.Errorf("source keyspaces are mismatched across streams: %v vs %v", mi.sourceKeyspace, bls.Keyspace) + } + if _, ok := mi.sources[bls.Shard]; ok { + continue + } + + sourcesi, err := mi.wr.ts.GetShard(ctx, bls.Keyspace, bls.Shard) + if err != nil { + return nil, err + } + sourceMaster, err := mi.wr.ts.GetTablet(ctx, sourcesi.MasterAlias) + if err != nil { + return nil, err + } + mi.sources[bls.Shard] = &miSource{ + si: sourcesi, + master: sourceMaster, + } + + if mi.tables == nil { + for _, rule := range bls.Filter.Rules { + mi.tables = append(mi.tables, rule.Match) + } + sort.Strings(mi.tables) + } else { + var tables []string + for _, rule := range bls.Filter.Rules { + tables = append(tables, rule.Match) + } + sort.Strings(tables) + if !reflect.DeepEqual(mi.tables, tables) { + return nil, fmt.Errorf("table lists are mismatched across streams: %v vs %v", mi.tables, tables) + } + } + } + } + if mi.sourceKeyspace != mi.targetKeyspace { + mi.migrationType = binlogdatapb.MigrationType_TABLES + } else { + mi.migrationType = binlogdatapb.MigrationType_SHARDS + } + return mi, nil +} + +func (wr *Wrangler) buildMigrationTargets(ctx context.Context, targetKeyspace, workflow string) (targets map[string]*miTarget, err error) { + targets = make(map[string]*miTarget) + targetShards, err := wr.ts.GetShardNames(ctx, targetKeyspace) + if err != nil { + return nil, err + } + // We check all target shards. All of them may not have a stream. + // For example, if we're splitting -80 to -40,40-80, only those + // two target shards will have vreplication streams. + for _, targetShard := range targetShards { + targetsi, err := wr.ts.GetShard(ctx, targetKeyspace, targetShard) + if err != nil { + return nil, err + } + targetMaster, err := wr.ts.GetTablet(ctx, targetsi.MasterAlias) + if err != nil { + return nil, err + } + p3qr, err := wr.tmc.VReplicationExec(ctx, targetMaster.Tablet, fmt.Sprintf("select id, source from _vt.vreplication where workflow='%s' and db_name='%s'", workflow, targetMaster.DbName())) + if err != nil { + return nil, err + } + // If there's no vreplication stream, check the next target. + if len(p3qr.Rows) < 1 { + continue + } + + targets[targetShard] = &miTarget{ + si: targetsi, + master: targetMaster, + sources: make(map[uint32]*binlogdatapb.BinlogSource), + } + qr := sqltypes.Proto3ToResult(p3qr) + for _, row := range qr.Rows { + id, err := sqltypes.ToInt64(row[0]) + if err != nil { + return nil, err + } + var bls binlogdatapb.BinlogSource + if err := proto.UnmarshalText(row[1].ToString(), &bls); err != nil { + return nil, err + } + targets[targetShard].sources[uint32(id)] = &bls + } + } + if len(targets) == 0 { + return nil, fmt.Errorf("no streams found in keyspace %s for: %s", targetKeyspace, workflow) + } + return targets, nil +} + +// hashStreams produces a reproducible hash based on the input parameters. +func hashStreams(targetKeyspace string, targets map[string]*miTarget) int64 { + var expanded []string + for shard, target := range targets { + for uid := range target.sources { + expanded = append(expanded, fmt.Sprintf("%s:%d", shard, uid)) + } + } + sort.Strings(expanded) + hasher := fnv.New64() + hasher.Write([]byte(targetKeyspace)) + for _, str := range expanded { + hasher.Write([]byte(str)) + } + // Convert to int64 after dropping the highest bit. + return int64(hasher.Sum64() & math.MaxInt64) +} + +func (mi *migrater) validate(ctx context.Context, isWrite bool) error { + if mi.migrationType == binlogdatapb.MigrationType_TABLES { + // All shards must be present. + if err := mi.compareShards(ctx, mi.sourceKeyspace, mi.sourceShards()); err != nil { + return err + } + if err := mi.compareShards(ctx, mi.targetKeyspace, mi.targetShards()); err != nil { + return err + } + // Wildcard table names not allowed. + for _, table := range mi.tables { + if strings.HasPrefix(table, "/") { + return fmt.Errorf("cannot migrate streams with wild card table names: %v", table) + } + } + if isWrite { + return mi.validateTableForWrite(ctx) + } + } else { // binlogdatapb.MigrationType_SHARDS + // Source and target shards must not match. + for sourceShard := range mi.sources { + if _, ok := mi.targets[sourceShard]; ok { + return fmt.Errorf("target shard matches a source shard: %v", sourceShard) + } + } + if isWrite { + return mi.validateShardForWrite(ctx) + } + } + return nil +} + +func (mi *migrater) validateTableForWrite(ctx context.Context) error { + rules, err := mi.wr.getRoutingRules(ctx) + if err != nil { + return err + } + for _, table := range mi.tables { + for _, tabletType := range []topodatapb.TabletType{topodatapb.TabletType_REPLICA, topodatapb.TabletType_RDONLY} { + tt := strings.ToLower(tabletType.String()) + if rules[table+"@"+tt] == nil || rules[mi.targetKeyspace+"."+table+"@"+tt] == nil { + return fmt.Errorf("missing tablet type specific routing, read-only traffic must be migrated before migrating writes: %v", table) + } + } + } + return nil +} + +func (mi *migrater) validateShardForWrite(ctx context.Context) error { + srvKeyspaces, err := mi.wr.ts.GetSrvKeyspaceAllCells(ctx, mi.sourceKeyspace) + if err != nil { + return err + } + + // Checking one shard is enough. + var si *topo.ShardInfo + for _, source := range mi.sources { + si = source.si + break + } + + for _, srvKeyspace := range srvKeyspaces { + var shardServedTypes []string + for _, partition := range srvKeyspace.GetPartitions() { + if partition.GetServedType() == topodatapb.TabletType_MASTER { + continue + } + for _, shardReference := range partition.GetShardReferences() { + if key.KeyRangeEqual(shardReference.GetKeyRange(), si.GetKeyRange()) { + shardServedTypes = append(shardServedTypes, partition.GetServedType().String()) + } + } + } + if len(shardServedTypes) > 0 { + return fmt.Errorf("cannot migrate MASTER away from %v/%v until everything else is migrated. Make sure that the following types are migrated first: %v", si.Keyspace(), si.ShardName(), strings.Join(shardServedTypes, ", ")) + } + } + return nil +} + +func (mi *migrater) compareShards(ctx context.Context, keyspace string, sis []*topo.ShardInfo) error { + var shards []string + for _, si := range sis { + shards = append(shards, si.ShardName()) + } + topoShards, err := mi.wr.ts.GetShardNames(ctx, keyspace) + if err != nil { + return err + } + sort.Strings(topoShards) + sort.Strings(shards) + if !reflect.DeepEqual(topoShards, shards) { + return fmt.Errorf("mismatched shards for keyspace %s: topo: %v vs migrate command: %v", keyspace, topoShards, shards) + } + return nil +} + +func (mi *migrater) migrateTableReads(ctx context.Context, cells []string, servedType topodatapb.TabletType, direction MigrateDirection) error { + rules, err := mi.wr.getRoutingRules(ctx) + if err != nil { + return err + } + // We assume that the following rules were setup when the targets were created: + // table -> sourceKeyspace.table + // targetKeyspace.table -> sourceKeyspace.table + // For forward migration, we add tablet type specific rules to redirect traffic to the target. + // For backward, we delete them. + tt := strings.ToLower(servedType.String()) + for _, table := range mi.tables { + if direction == DirectionForward { + rules[table+"@"+tt] = []string{mi.targetKeyspace + "." + table} + rules[mi.targetKeyspace+"."+table+"@"+tt] = []string{mi.targetKeyspace + "." + table} + rules[mi.sourceKeyspace+"."+table+"@"+tt] = []string{mi.targetKeyspace + "." + table} + } else { + delete(rules, table+"@"+tt) + delete(rules, mi.targetKeyspace+"."+table+"@"+tt) + delete(rules, mi.sourceKeyspace+"."+table+"@"+tt) + } + } + if err := mi.wr.saveRoutingRules(ctx, rules); err != nil { + return err + } + return mi.wr.ts.RebuildSrvVSchema(ctx, cells) +} + +func (mi *migrater) migrateShardReads(ctx context.Context, cells []string, servedType topodatapb.TabletType, direction MigrateDirection) error { + var fromShards, toShards []*topo.ShardInfo + if direction == DirectionForward { + fromShards, toShards = mi.sourceShards(), mi.targetShards() + } else { + fromShards, toShards = mi.targetShards(), mi.sourceShards() + } + + if err := mi.wr.updateShardRecords(ctx, mi.sourceKeyspace, fromShards, cells, servedType, true /* isFrom */, false /* clearSourceShards */); err != nil { + return err + } + if err := mi.wr.updateShardRecords(ctx, mi.sourceKeyspace, toShards, cells, servedType, false, false); err != nil { + return err + } + return mi.wr.ts.MigrateServedType(ctx, mi.sourceKeyspace, toShards, fromShards, servedType, cells) +} + +func (mi *migrater) checkJournals(ctx context.Context) (journalsExist bool, err error) { + var exist sync2.AtomicBool + err = mi.forAllSources(func(source *miSource) error { + statement := fmt.Sprintf("select 1 from _vt.resharding_journal where id=%v", mi.id) + p3qr, err := mi.wr.tmc.VReplicationExec(ctx, source.master.Tablet, statement) + if err != nil { + return err + } + if len(p3qr.Rows) >= 1 { + exist.Set(true) + source.journaled = true + } + return nil + }) + return exist.Get(), err +} + +func (mi *migrater) stopSourceWrites(ctx context.Context) error { + var err error + if mi.migrationType == binlogdatapb.MigrationType_TABLES { + err = mi.changeTableSourceWrites(ctx, disallowWrites) + } else { + err = mi.changeShardsAccess(ctx, mi.sourceKeyspace, mi.sourceShards(), disallowWrites) + } + if err != nil { + return err + } + return mi.forAllSources(func(source *miSource) error { + var err error + source.position, err = mi.wr.tmc.MasterPosition(ctx, source.master.Tablet) + mi.wr.Logger().Infof("Position for source %v:%v: %v", mi.sourceKeyspace, source.si.ShardName(), source.position) + return err + }) +} + +func (mi *migrater) changeTableSourceWrites(ctx context.Context, access accessType) error { + return mi.forAllSources(func(source *miSource) error { + if _, err := mi.wr.ts.UpdateShardFields(ctx, mi.sourceKeyspace, source.si.ShardName(), func(si *topo.ShardInfo) error { + return si.UpdateSourceBlacklistedTables(ctx, topodatapb.TabletType_MASTER, nil, access == allowWrites /* remove */, mi.tables) + }); err != nil { + return err + } + return mi.wr.tmc.RefreshState(ctx, source.master.Tablet) + }) +} + +func (mi *migrater) waitForCatchup(ctx context.Context, filteredReplicationWaitTime time.Duration) error { + ctx, cancel := context.WithTimeout(ctx, filteredReplicationWaitTime) + defer cancel() + + var mu sync.Mutex + return mi.forAllUids(func(target *miTarget, uid uint32) error { + bls := target.sources[uid] + source := mi.sources[bls.Shard] + if err := mi.wr.tmc.VReplicationWaitForPos(ctx, target.master.Tablet, int(uid), source.position); err != nil { + return err + } + if _, err := mi.wr.tmc.VReplicationExec(ctx, target.master.Tablet, binlogplayer.StopVReplication(uid, "stopped for cutover")); err != nil { + return err + } + + // Need lock because a target can have multiple uids. + mu.Lock() + defer mu.Unlock() + if target.position != "" { + return nil + } + var err error + target.position, err = mi.wr.tmc.MasterPosition(ctx, target.master.Tablet) + mi.wr.Logger().Infof("Position for uid %v: %v", uid, target.position) + return err + }) +} + +func (mi *migrater) cancelMigration(ctx context.Context) { + var err error + if mi.migrationType == binlogdatapb.MigrationType_TABLES { + err = mi.changeTableSourceWrites(ctx, allowWrites) + } else { + err = mi.changeShardsAccess(ctx, mi.sourceKeyspace, mi.sourceShards(), allowWrites) + } + if err != nil { + mi.wr.Logger().Errorf("Cancel migration failed:", err) + } + + err = mi.forAllUids(func(target *miTarget, uid uint32) error { + if _, err := mi.wr.tmc.VReplicationExec(ctx, target.master.Tablet, binlogplayer.StartVReplication(uid)); err != nil { + return err + } + return nil + }) + if err != nil { + mi.wr.Logger().Errorf("Cancel migration failed: could not restart vreplication: %v", err) + } +} + +func (mi *migrater) gatherPositions(ctx context.Context) error { + err := mi.forAllSources(func(source *miSource) error { + var err error + source.position, err = mi.wr.tmc.MasterPosition(ctx, source.master.Tablet) + mi.wr.Logger().Infof("Position for source %v:%v: %v", mi.sourceKeyspace, source.si.ShardName(), source.position) + return err + }) + if err != nil { + return err + } + return mi.forAllTargets(func(target *miTarget) error { + var err error + target.position, err = mi.wr.tmc.MasterPosition(ctx, target.master.Tablet) + mi.wr.Logger().Infof("Position for target %v:%v: %v", mi.targetKeyspace, target.si.ShardName(), target.position) + return err + }) +} + +func (mi *migrater) createJournals(ctx context.Context) error { + var participants []*binlogdatapb.KeyspaceShard + for sourceShard := range mi.sources { + participants = append(participants, &binlogdatapb.KeyspaceShard{ + Keyspace: mi.sourceKeyspace, + Shard: sourceShard, + }) + } + return mi.forAllSources(func(source *miSource) error { + if source.journaled { + return nil + } + journal := &binlogdatapb.Journal{ + Id: mi.id, + MigrationType: mi.migrationType, + Tables: mi.tables, + LocalPosition: source.position, + Participants: participants, + } + for targetShard, target := range mi.targets { + found := false + for _, tsource := range target.sources { + if source.si.ShardName() == tsource.Shard { + found = true + break + } + } + if !found { + continue + } + journal.ShardGtids = append(journal.ShardGtids, &binlogdatapb.ShardGtid{ + Keyspace: mi.targetKeyspace, + Shard: targetShard, + Gtid: target.position, + }) + } + mi.wr.Logger().Infof("Creating journal: %v", journal) + statement := fmt.Sprintf("insert into _vt.resharding_journal "+ + "(id, db_name, val) "+ + "values (%v, %v, %v)", + mi.id, encodeString(source.master.DbName()), encodeString(journal.String())) + if _, err := mi.wr.tmc.VReplicationExec(ctx, source.master.Tablet, statement); err != nil { + return err + } + return nil + }) +} + +func (mi *migrater) createReverseReplication(ctx context.Context) error { + vs, err := mi.wr.ts.GetVSchema(ctx, mi.sourceKeyspace) + if err != nil { + return err + } + ksschema, err := vindexes.BuildKeyspaceSchema(vs, mi.sourceKeyspace) + if err != nil { + return err + } + return mi.forAllUids(func(target *miTarget, uid uint32) error { + bls := target.sources[uid] + source := mi.sources[bls.Shard] + reverseBls := &binlogdatapb.BinlogSource{ + Keyspace: mi.targetKeyspace, + Shard: target.si.ShardName(), + TabletType: bls.TabletType, + Filter: &binlogdatapb.Filter{}, + } + for _, rule := range bls.Filter.Rules { + var filter string + if strings.HasPrefix(rule.Match, "/") { + if ksschema.Keyspace.Sharded { + filter = bls.Shard + } + } else { + var inKeyrange string + if ksschema.Keyspace.Sharded { + vtable, ok := ksschema.Tables[rule.Match] + if !ok { + return fmt.Errorf("table %s not found in vschema", rule.Match) + } + // TODO(sougou): handle degenerate cases like sequence, etc. + // We currently assume the primary vindex is the best way to filter, which may not be true. + inKeyrange = fmt.Sprintf(" where in_keyrange(%s, '%s', '%s')", sqlparser.String(vtable.ColumnVindexes[0].Columns[0]), vs.Vindexes[vtable.ColumnVindexes[0].Name].Type, bls.Shard) + } + filter = fmt.Sprintf("select * from %s%s", rule.Match, inKeyrange) + } + reverseBls.Filter.Rules = append(reverseBls.Filter.Rules, &binlogdatapb.Rule{ + Match: rule.Match, + Filter: filter, + }) + } + + _, err := mi.wr.VReplicationExec(ctx, source.master.Alias, binlogplayer.CreateVReplicationState("ReversedResharding", reverseBls, target.position, binlogplayer.BlpStopped, source.master.DbName())) + return err + }) +} + +func (mi *migrater) allowTargetWrites(ctx context.Context) error { + if mi.migrationType == binlogdatapb.MigrationType_TABLES { + return mi.allowTableTargetWrites(ctx) + } + return mi.changeShardsAccess(ctx, mi.targetKeyspace, mi.targetShards(), allowWrites) +} + +func (mi *migrater) allowTableTargetWrites(ctx context.Context) error { + return mi.forAllTargets(func(target *miTarget) error { + if _, err := mi.wr.ts.UpdateShardFields(ctx, mi.targetKeyspace, target.si.ShardName(), func(si *topo.ShardInfo) error { + return si.UpdateSourceBlacklistedTables(ctx, topodatapb.TabletType_MASTER, nil, true, mi.tables) + }); err != nil { + return err + } + return mi.wr.tmc.RefreshState(ctx, target.master.Tablet) + }) +} + +func (mi *migrater) changeRouting(ctx context.Context) error { + if mi.migrationType == binlogdatapb.MigrationType_TABLES { + return mi.changeTableRouting(ctx) + } + return mi.changeShardRouting(ctx) +} + +func (mi *migrater) changeTableRouting(ctx context.Context) error { + rules, err := mi.wr.getRoutingRules(ctx) + if err != nil { + return err + } + // We assume that the following rules were setup when the targets were created: + // table -> sourceKeyspace.table + // targetKeyspace.table -> sourceKeyspace.table + // Additionally, MigrateReads would have added rules like this: + // table@replica -> targetKeyspace.table + // targetKeyspace.table@replica -> targetKeyspace.table + // After this step, only the following rules will be left: + // table -> targetKeyspace.table + // sourceKeyspace.table -> targetKeyspace.table + for _, table := range mi.tables { + for _, tabletType := range []topodatapb.TabletType{topodatapb.TabletType_REPLICA, topodatapb.TabletType_RDONLY} { + tt := strings.ToLower(tabletType.String()) + delete(rules, table+"@"+tt) + delete(rules, mi.targetKeyspace+"."+table+"@"+tt) + delete(rules, mi.sourceKeyspace+"."+table+"@"+tt) + mi.wr.Logger().Infof("Delete routing: %v %v %v", table+"@"+tt, mi.targetKeyspace+"."+table+"@"+tt, mi.sourceKeyspace+"."+table+"@"+tt) + } + delete(rules, mi.targetKeyspace+"."+table) + mi.wr.Logger().Infof("Delete routing: %v", mi.targetKeyspace+"."+table) + rules[table] = []string{mi.targetKeyspace + "." + table} + rules[mi.sourceKeyspace+"."+table] = []string{mi.targetKeyspace + "." + table} + mi.wr.Logger().Infof("Add routing: %v %v", table, mi.sourceKeyspace+"."+table) + } + if err := mi.wr.saveRoutingRules(ctx, rules); err != nil { + return err + } + return mi.wr.ts.RebuildSrvVSchema(ctx, nil) +} + +func (mi *migrater) changeShardRouting(ctx context.Context) error { + err := mi.forAllSources(func(source *miSource) error { + _, err := mi.wr.ts.UpdateShardFields(ctx, mi.sourceKeyspace, source.si.ShardName(), func(si *topo.ShardInfo) error { + si.IsMasterServing = false + return nil + }) + return err + }) + if err != nil { + return err + } + err = mi.forAllTargets(func(target *miTarget) error { + _, err := mi.wr.ts.UpdateShardFields(ctx, mi.targetKeyspace, target.si.ShardName(), func(si *topo.ShardInfo) error { + si.IsMasterServing = true + return nil + }) + return err + }) + if err != nil { + return err + } + return mi.wr.ts.MigrateServedType(ctx, mi.targetKeyspace, mi.targetShards(), mi.sourceShards(), topodatapb.TabletType_MASTER, nil) +} + +func (mi *migrater) deleteTargetVReplication(ctx context.Context) { + _ = mi.forAllUids(func(target *miTarget, uid uint32) error { + if _, err := mi.wr.tmc.VReplicationExec(ctx, target.master.Tablet, binlogplayer.DeleteVReplication(uid)); err != nil { + mi.wr.Logger().Errorf("Final cleanup: could not delete vreplication, please delete stopped streams manually: %v", err) + } + return nil + }) +} + +func (mi *migrater) changeShardsAccess(ctx context.Context, keyspace string, shards []*topo.ShardInfo, access accessType) error { + if err := mi.wr.ts.UpdateDisableQueryService(ctx, mi.sourceKeyspace, shards, topodatapb.TabletType_MASTER, nil, access == disallowWrites /* disable */); err != nil { + return err + } + return mi.wr.refreshMasters(ctx, shards) +} + +func (mi *migrater) forAllSources(f func(*miSource) error) error { + var wg sync.WaitGroup + allErrors := &concurrency.AllErrorRecorder{} + for _, source := range mi.sources { + wg.Add(1) + go func(source *miSource) { + defer wg.Done() + + if err := f(source); err != nil { + allErrors.RecordError(err) + } + }(source) + } + wg.Wait() + return allErrors.AggrError(vterrors.Aggregate) +} + +func (mi *migrater) forAllTargets(f func(*miTarget) error) error { + var wg sync.WaitGroup + allErrors := &concurrency.AllErrorRecorder{} + for _, target := range mi.targets { + wg.Add(1) + go func(target *miTarget) { + defer wg.Done() + + if err := f(target); err != nil { + allErrors.RecordError(err) + } + }(target) + } + wg.Wait() + return allErrors.AggrError(vterrors.Aggregate) +} + +func (mi *migrater) forAllUids(f func(target *miTarget, uid uint32) error) error { + var wg sync.WaitGroup + allErrors := &concurrency.AllErrorRecorder{} + for _, target := range mi.targets { + for uid := range target.sources { + wg.Add(1) + go func(target *miTarget, uid uint32) { + defer wg.Done() + + if err := f(target, uid); err != nil { + allErrors.RecordError(err) + } + }(target, uid) + } + } + wg.Wait() + return allErrors.AggrError(vterrors.Aggregate) +} + +func (mi *migrater) sourceShards() []*topo.ShardInfo { + shards := make([]*topo.ShardInfo, 0, len(mi.sources)) + for _, source := range mi.sources { + shards = append(shards, source.si) + } + return shards +} + +func (mi *migrater) targetShards() []*topo.ShardInfo { + shards := make([]*topo.ShardInfo, 0, len(mi.targets)) + for _, target := range mi.targets { + shards = append(shards, target.si) + } + return shards +} + +func (wr *Wrangler) getRoutingRules(ctx context.Context) (map[string][]string, error) { + rrs, err := wr.ts.GetRoutingRules(ctx) + if err != nil { + return nil, err + } + rules := make(map[string][]string, len(rrs.Rules)) + for _, rr := range rrs.Rules { + rules[rr.FromTable] = rr.ToTables + } + return rules, nil +} + +func (wr *Wrangler) saveRoutingRules(ctx context.Context, rules map[string][]string) error { + rrs := &vschemapb.RoutingRules{Rules: make([]*vschemapb.RoutingRule, 0, len(rules))} + for from, to := range rules { + rrs.Rules = append(rrs.Rules, &vschemapb.RoutingRule{ + FromTable: from, + ToTables: to, + }) + } + return wr.ts.SaveRoutingRules(ctx, rrs) +} diff --git a/go/vt/wrangler/migrater_env_test.go b/go/vt/wrangler/migrater_env_test.go new file mode 100644 index 00000000000..5384591ba48 --- /dev/null +++ b/go/vt/wrangler/migrater_env_test.go @@ -0,0 +1,397 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package wrangler + +import ( + "fmt" + "testing" + + "golang.org/x/net/context" + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/binlog/binlogplayer" + "vitess.io/vitess/go/vt/logutil" + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vschemapb "vitess.io/vitess/go/vt/proto/vschema" + "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/topo/memorytopo" + "vitess.io/vitess/go/vt/topotools" + "vitess.io/vitess/go/vt/vttablet/tabletmanager/vreplication" + "vitess.io/vitess/go/vt/vttablet/tmclient" +) + +const vreplQueryks = "select id, source from _vt.vreplication where workflow = 'test' and db_name = 'vt_ks'" +const vreplQueryks2 = "select id, source from _vt.vreplication where workflow = 'test' and db_name = 'vt_ks2'" + +type testMigraterEnv struct { + ts *topo.Server + wr *Wrangler + source1Master, source1Replica, source1Rdonly *fakeTablet + source2Master, source2Replica, source2Rdonly *fakeTablet + dest1Master, dest1Replica, dest1Rdonly *fakeTablet + dest2Master, dest2Replica, dest2Rdonly *fakeTablet + dbSource1Client, dbSource2Client *fakeDBClient + dbDest1Client, dbDest2Client *fakeDBClient + allDBClients []*fakeDBClient + targetKeyspace string + streams map[string][]uint32 +} + +func newTestTableMigrater(ctx context.Context, t *testing.T) *testMigraterEnv { + tme := &testMigraterEnv{} + tme.ts = memorytopo.NewServer("cell1", "cell2") + tme.wr = New(logutil.NewConsoleLogger(), tme.ts, tmclient.NewTabletManagerClient()) + + // Create cluster: ks1:-40,40- and ks2:-80,80-. + tme.source1Master = newFakeTablet(t, tme.wr, "cell1", 10, topodatapb.TabletType_MASTER, nil, TabletKeyspaceShard(t, "ks1", "-40")) + tme.source1Replica = newFakeTablet(t, tme.wr, "cell1", 11, topodatapb.TabletType_REPLICA, nil, TabletKeyspaceShard(t, "ks1", "-40")) + tme.source1Rdonly = newFakeTablet(t, tme.wr, "cell1", 12, topodatapb.TabletType_RDONLY, nil, TabletKeyspaceShard(t, "ks1", "-40")) + + tme.source2Master = newFakeTablet(t, tme.wr, "cell1", 20, topodatapb.TabletType_MASTER, nil, TabletKeyspaceShard(t, "ks1", "40-")) + tme.source2Replica = newFakeTablet(t, tme.wr, "cell1", 21, topodatapb.TabletType_REPLICA, nil, TabletKeyspaceShard(t, "ks1", "40-")) + tme.source2Rdonly = newFakeTablet(t, tme.wr, "cell1", 22, topodatapb.TabletType_RDONLY, nil, TabletKeyspaceShard(t, "ks1", "40-")) + + tme.dest1Master = newFakeTablet(t, tme.wr, "cell1", 30, topodatapb.TabletType_MASTER, nil, TabletKeyspaceShard(t, "ks2", "-80")) + tme.dest1Replica = newFakeTablet(t, tme.wr, "cell1", 31, topodatapb.TabletType_REPLICA, nil, TabletKeyspaceShard(t, "ks2", "-80")) + tme.dest1Rdonly = newFakeTablet(t, tme.wr, "cell1", 32, topodatapb.TabletType_RDONLY, nil, TabletKeyspaceShard(t, "ks2", "-80")) + + tme.dest2Master = newFakeTablet(t, tme.wr, "cell1", 40, topodatapb.TabletType_MASTER, nil, TabletKeyspaceShard(t, "ks2", "80-")) + tme.dest2Replica = newFakeTablet(t, tme.wr, "cell1", 41, topodatapb.TabletType_REPLICA, nil, TabletKeyspaceShard(t, "ks2", "80-")) + tme.dest2Rdonly = newFakeTablet(t, tme.wr, "cell1", 42, topodatapb.TabletType_RDONLY, nil, TabletKeyspaceShard(t, "ks2", "80-")) + + vs := &vschemapb.Keyspace{ + Sharded: true, + Vindexes: map[string]*vschemapb.Vindex{ + "hash": { + Type: "hash", + }, + }, + Tables: map[string]*vschemapb.Table{ + "t1": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Column: "c1", + Name: "hash", + }}, + }, + "t2": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Column: "c1", + Name: "hash", + }}, + }, + }, + } + if err := tme.ts.SaveVSchema(ctx, "ks1", vs); err != nil { + t.Fatal(err) + } + if err := tme.ts.SaveVSchema(ctx, "ks2", vs); err != nil { + t.Fatal(err) + } + if err := tme.ts.RebuildSrvVSchema(ctx, nil); err != nil { + t.Fatal(err) + } + err := topotools.RebuildKeyspace(ctx, logutil.NewConsoleLogger(), tme.ts, "ks1", []string{"cell1"}) + if err != nil { + t.Fatal(err) + } + err = topotools.RebuildKeyspace(ctx, logutil.NewConsoleLogger(), tme.ts, "ks2", []string{"cell1"}) + if err != nil { + t.Fatal(err) + } + + tme.startTablets(t) + tme.createDBClients(ctx, t) + tme.setMasterPositions() + + // Emulate the following replication streams (many-to-many table migration): + // -40 -> -80 + // 40- -> -80 + // 40- -> 80- + // -40 will only have one target, and 80- will have only one source. + bls1 := &binlogdatapb.BinlogSource{ + Keyspace: "ks1", + Shard: "-40", + Filter: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "t1", + Filter: "select * from t1 where in_keyrange('-80')", + }, { + Match: "t2", + Filter: "select * from t2 where in_keyrange('-80')", + }}, + }, + } + bls2 := &binlogdatapb.BinlogSource{ + Keyspace: "ks1", + Shard: "40-", + Filter: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "t1", + Filter: "select * from t1 where in_keyrange('-80')", + }, { + Match: "t2", + Filter: "select * from t2 where in_keyrange('-80')", + }}, + }, + } + tme.dbDest1Client.addQuery(vreplQueryks2, sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "id|source", + "int64|varchar"), + fmt.Sprintf("1|%v", bls1), + fmt.Sprintf("2|%v", bls2), + ), nil) + bls3 := &binlogdatapb.BinlogSource{ + Keyspace: "ks1", + Shard: "40-", + Filter: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "t1", + Filter: "select * from t1 where in_keyrange('80-')", + }, { + Match: "t2", + Filter: "select * from t2 where in_keyrange('80-')", + }}, + }, + } + tme.dbDest2Client.addQuery(vreplQueryks2, sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "id|source", + "int64|varchar"), + fmt.Sprintf("1|%v", bls3), + ), nil) + + if err := tme.wr.saveRoutingRules(ctx, map[string][]string{ + "t1": {"ks1.t1"}, + "ks2.t1": {"ks1.t1"}, + "t2": {"ks1.t2"}, + "ks2.t2": {"ks1.t2"}, + }); err != nil { + t.Fatal(err) + } + if err := tme.ts.RebuildSrvVSchema(ctx, nil); err != nil { + t.Fatal(err) + } + + tme.targetKeyspace = "ks2" + tme.streams = map[string][]uint32{ + "-80": {1, 2}, + "80-": {1}, + } + return tme +} + +func newTestShardMigrater(ctx context.Context, t *testing.T) *testMigraterEnv { + tme := &testMigraterEnv{} + tme.ts = memorytopo.NewServer("cell1", "cell2") + tme.wr = New(logutil.NewConsoleLogger(), tme.ts, tmclient.NewTabletManagerClient()) + + // Create cluster with "ks" as keyspace. -40,40- as serving, -80,80- as non-serving. + tme.source1Master = newFakeTablet(t, tme.wr, "cell1", 10, topodatapb.TabletType_MASTER, nil, TabletKeyspaceShard(t, "ks", "-40")) + tme.source1Replica = newFakeTablet(t, tme.wr, "cell1", 11, topodatapb.TabletType_REPLICA, nil, TabletKeyspaceShard(t, "ks", "-40")) + tme.source1Rdonly = newFakeTablet(t, tme.wr, "cell1", 12, topodatapb.TabletType_RDONLY, nil, TabletKeyspaceShard(t, "ks", "-40")) + + tme.source2Master = newFakeTablet(t, tme.wr, "cell1", 20, topodatapb.TabletType_MASTER, nil, TabletKeyspaceShard(t, "ks", "40-")) + tme.source2Replica = newFakeTablet(t, tme.wr, "cell1", 21, topodatapb.TabletType_REPLICA, nil, TabletKeyspaceShard(t, "ks", "40-")) + tme.source2Rdonly = newFakeTablet(t, tme.wr, "cell1", 22, topodatapb.TabletType_RDONLY, nil, TabletKeyspaceShard(t, "ks", "40-")) + + tme.dest1Master = newFakeTablet(t, tme.wr, "cell1", 30, topodatapb.TabletType_MASTER, nil, TabletKeyspaceShard(t, "ks", "-80")) + tme.dest1Replica = newFakeTablet(t, tme.wr, "cell1", 31, topodatapb.TabletType_REPLICA, nil, TabletKeyspaceShard(t, "ks", "-80")) + tme.dest1Rdonly = newFakeTablet(t, tme.wr, "cell1", 32, topodatapb.TabletType_RDONLY, nil, TabletKeyspaceShard(t, "ks", "-80")) + + tme.dest2Master = newFakeTablet(t, tme.wr, "cell1", 40, topodatapb.TabletType_MASTER, nil, TabletKeyspaceShard(t, "ks", "80-")) + tme.dest2Replica = newFakeTablet(t, tme.wr, "cell1", 41, topodatapb.TabletType_REPLICA, nil, TabletKeyspaceShard(t, "ks", "80-")) + tme.dest2Rdonly = newFakeTablet(t, tme.wr, "cell1", 42, topodatapb.TabletType_RDONLY, nil, TabletKeyspaceShard(t, "ks", "80-")) + + vs := &vschemapb.Keyspace{Sharded: true} + if err := tme.ts.SaveVSchema(ctx, "ks", vs); err != nil { + t.Fatal(err) + } + if err := tme.ts.RebuildSrvVSchema(ctx, nil); err != nil { + t.Fatal(err) + } + err := topotools.RebuildKeyspace(ctx, logutil.NewConsoleLogger(), tme.ts, "ks", nil) + if err != nil { + t.Fatal(err) + } + + tme.startTablets(t) + tme.createDBClients(ctx, t) + tme.setMasterPositions() + + // Emulate the following replication streams (simultaneous split and merge): + // -40 -> -80 + // 40- -> -80 + // 40- -> 80- + // -40 will only have one target, and 80- will have only one source. + bls1 := &binlogdatapb.BinlogSource{ + Keyspace: "ks", + Shard: "-40", + Filter: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "/.*", + Filter: "-80", + }}, + }, + } + bls2 := &binlogdatapb.BinlogSource{ + Keyspace: "ks", + Shard: "40-", + Filter: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "/.*", + Filter: "-80", + }}, + }, + } + tme.dbDest1Client.addQuery(vreplQueryks, sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "id|source", + "int64|varchar"), + fmt.Sprintf("1|%v", bls1), + fmt.Sprintf("2|%v", bls2), + ), nil) + bls3 := &binlogdatapb.BinlogSource{ + Keyspace: "ks", + Shard: "40-", + Filter: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "/.*", + Filter: "80-", + }}, + }, + } + tme.dbDest2Client.addQuery(vreplQueryks, sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "id|source", + "int64|varchar"), + fmt.Sprintf("1|%v", bls3), + ), nil) + + tme.targetKeyspace = "ks" + tme.streams = map[string][]uint32{ + "-80": {1, 2}, + "80-": {1}, + } + tme.dbSource1Client.addQuery(vreplQueryks, &sqltypes.Result{}, nil) + tme.dbSource2Client.addQuery(vreplQueryks, &sqltypes.Result{}, nil) + return tme +} + +func (tme *testMigraterEnv) startTablets(t *testing.T) { + tme.source1Replica.StartActionLoop(t, tme.wr) + tme.source1Rdonly.StartActionLoop(t, tme.wr) + tme.source1Master.StartActionLoop(t, tme.wr) + + tme.source2Replica.StartActionLoop(t, tme.wr) + tme.source2Rdonly.StartActionLoop(t, tme.wr) + tme.source2Master.StartActionLoop(t, tme.wr) + + tme.dest1Replica.StartActionLoop(t, tme.wr) + tme.dest1Rdonly.StartActionLoop(t, tme.wr) + tme.dest1Master.StartActionLoop(t, tme.wr) + + tme.dest2Replica.StartActionLoop(t, tme.wr) + tme.dest2Rdonly.StartActionLoop(t, tme.wr) + tme.dest2Master.StartActionLoop(t, tme.wr) +} + +func (tme *testMigraterEnv) stopTablets(t *testing.T) { + tme.source1Replica.StopActionLoop(t) + tme.source1Rdonly.StopActionLoop(t) + tme.source1Master.StopActionLoop(t) + + tme.source2Replica.StopActionLoop(t) + tme.source2Rdonly.StopActionLoop(t) + tme.source2Master.StopActionLoop(t) + + tme.dest1Replica.StopActionLoop(t) + tme.dest1Rdonly.StopActionLoop(t) + tme.dest1Master.StopActionLoop(t) + + tme.dest2Replica.StopActionLoop(t) + tme.dest2Rdonly.StopActionLoop(t) + tme.dest2Master.StopActionLoop(t) +} + +func (tme *testMigraterEnv) createDBClients(ctx context.Context, t *testing.T) { + tme.dbDest1Client = newFakeDBClient() + dbClientFactory1 := func() binlogplayer.DBClient { return tme.dbDest1Client } + tme.dest1Master.Agent.VREngine = vreplication.NewEngine(tme.ts, "", tme.dest1Master.FakeMysqlDaemon, dbClientFactory1, tme.dbDest1Client.DBName()) + if err := tme.dest1Master.Agent.VREngine.Open(ctx); err != nil { + t.Fatal(err) + } + + tme.dbDest2Client = newFakeDBClient() + dbClientFactory2 := func() binlogplayer.DBClient { return tme.dbDest2Client } + tme.dest2Master.Agent.VREngine = vreplication.NewEngine(tme.ts, "", tme.dest2Master.FakeMysqlDaemon, dbClientFactory2, tme.dbDest2Client.DBName()) + if err := tme.dest2Master.Agent.VREngine.Open(ctx); err != nil { + t.Fatal(err) + } + + tme.dbSource1Client = newFakeDBClient() + dbClientFactory3 := func() binlogplayer.DBClient { return tme.dbSource1Client } + tme.source1Master.Agent.VREngine = vreplication.NewEngine(tme.ts, "", tme.source1Master.FakeMysqlDaemon, dbClientFactory3, tme.dbSource1Client.DBName()) + if err := tme.source1Master.Agent.VREngine.Open(ctx); err != nil { + t.Fatal(err) + } + + tme.dbSource2Client = newFakeDBClient() + dbClientFactory4 := func() binlogplayer.DBClient { return tme.dbSource2Client } + tme.source2Master.Agent.VREngine = vreplication.NewEngine(tme.ts, "", tme.source2Master.FakeMysqlDaemon, dbClientFactory4, tme.dbSource2Client.DBName()) + if err := tme.source2Master.Agent.VREngine.Open(ctx); err != nil { + t.Fatal(err) + } + + tme.allDBClients = []*fakeDBClient{tme.dbDest1Client, tme.dbDest2Client, tme.dbSource1Client, tme.dbSource2Client} +} + +func (tme *testMigraterEnv) setMasterPositions() { + tme.source1Master.FakeMysqlDaemon.CurrentMasterPosition = mysql.Position{ + GTIDSet: mysql.MariadbGTIDSet{ + mysql.MariadbGTID{ + Domain: 5, + Server: 456, + Sequence: 892, + }, + }, + } + tme.source2Master.FakeMysqlDaemon.CurrentMasterPosition = mysql.Position{ + GTIDSet: mysql.MariadbGTIDSet{ + mysql.MariadbGTID{ + Domain: 5, + Server: 456, + Sequence: 892, + }, + }, + } + tme.dest1Master.FakeMysqlDaemon.CurrentMasterPosition = mysql.Position{ + GTIDSet: mysql.MariadbGTIDSet{ + mysql.MariadbGTID{ + Domain: 5, + Server: 456, + Sequence: 893, + }, + }, + } + tme.dest2Master.FakeMysqlDaemon.CurrentMasterPosition = mysql.Position{ + GTIDSet: mysql.MariadbGTIDSet{ + mysql.MariadbGTID{ + Domain: 5, + Server: 456, + Sequence: 893, + }, + }, + } +} diff --git a/go/vt/wrangler/migrater_test.go b/go/vt/wrangler/migrater_test.go new file mode 100644 index 00000000000..0e9f25995de --- /dev/null +++ b/go/vt/wrangler/migrater_test.go @@ -0,0 +1,1039 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package wrangler + +import ( + "errors" + "fmt" + "reflect" + "strings" + "testing" + "time" + + "golang.org/x/net/context" + "vitess.io/vitess/go/sqltypes" + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/topo" +) + +// TestTableMigrate tests table mode migrations. +// This has to be kept in sync with TestShardMigrate. +func TestTableMigrate(t *testing.T) { + ctx := context.Background() + tme := newTestTableMigrater(ctx, t) + defer tme.stopTablets(t) + + checkCellRouting(t, tme.wr, "cell1", map[string][]string{ + "t1": {"ks1.t1"}, + "ks2.t1": {"ks1.t1"}, + "t2": {"ks1.t2"}, + "ks2.t2": {"ks1.t2"}, + }) + + //------------------------------------------------------------------------------------------------------------------- + // Single cell RDONLY migration. + err := tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", topodatapb.TabletType_RDONLY, []string{"cell1"}, DirectionForward) + if err != nil { + t.Fatal(err) + } + checkCellRouting(t, tme.wr, "cell1", map[string][]string{ + "t1": {"ks1.t1"}, + "ks2.t1": {"ks1.t1"}, + "t2": {"ks1.t2"}, + "ks2.t2": {"ks1.t2"}, + "t1@rdonly": {"ks2.t1"}, + "ks2.t1@rdonly": {"ks2.t1"}, + "ks1.t1@rdonly": {"ks2.t1"}, + "t2@rdonly": {"ks2.t2"}, + "ks2.t2@rdonly": {"ks2.t2"}, + "ks1.t2@rdonly": {"ks2.t2"}, + }) + checkCellRouting(t, tme.wr, "cell2", map[string][]string{ + "t1": {"ks1.t1"}, + "ks2.t1": {"ks1.t1"}, + "t2": {"ks1.t2"}, + "ks2.t2": {"ks1.t2"}, + }) + verifyQueries(t, tme.allDBClients) + + //------------------------------------------------------------------------------------------------------------------- + // Other cell REPLICA migration. + // The global routing already contains redirections for rdonly. + // So, adding routes for replica and deploying to cell2 will also cause + // cell2 to migrate rdonly. This is a quirk that can be fixed later if necessary. + // TODO(sougou): check if it's worth fixing, or clearly document the quirk. + err = tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", topodatapb.TabletType_REPLICA, []string{"cell2"}, DirectionForward) + if err != nil { + t.Fatal(err) + } + checkCellRouting(t, tme.wr, "cell1", map[string][]string{ + "t1": {"ks1.t1"}, + "ks2.t1": {"ks1.t1"}, + "t2": {"ks1.t2"}, + "ks2.t2": {"ks1.t2"}, + "t1@rdonly": {"ks2.t1"}, + "ks2.t1@rdonly": {"ks2.t1"}, + "ks1.t1@rdonly": {"ks2.t1"}, + "t2@rdonly": {"ks2.t2"}, + "ks2.t2@rdonly": {"ks2.t2"}, + "ks1.t2@rdonly": {"ks2.t2"}, + }) + checkCellRouting(t, tme.wr, "cell2", map[string][]string{ + "t1": {"ks1.t1"}, + "ks2.t1": {"ks1.t1"}, + "t2": {"ks1.t2"}, + "ks2.t2": {"ks1.t2"}, + "t1@rdonly": {"ks2.t1"}, + "ks2.t1@rdonly": {"ks2.t1"}, + "ks1.t1@rdonly": {"ks2.t1"}, + "t2@rdonly": {"ks2.t2"}, + "ks2.t2@rdonly": {"ks2.t2"}, + "ks1.t2@rdonly": {"ks2.t2"}, + "t1@replica": {"ks2.t1"}, + "ks2.t1@replica": {"ks2.t1"}, + "ks1.t1@replica": {"ks2.t1"}, + "t2@replica": {"ks2.t2"}, + "ks2.t2@replica": {"ks2.t2"}, + "ks1.t2@replica": {"ks2.t2"}, + }) + verifyQueries(t, tme.allDBClients) + + //------------------------------------------------------------------------------------------------------------------- + // Single cell backward REPLICA migration. + err = tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", topodatapb.TabletType_REPLICA, []string{"cell2"}, DirectionBackward) + if err != nil { + t.Fatal(err) + } + checkRouting(t, tme.wr, map[string][]string{ + "t1": {"ks1.t1"}, + "ks2.t1": {"ks1.t1"}, + "t2": {"ks1.t2"}, + "ks2.t2": {"ks1.t2"}, + "t1@rdonly": {"ks2.t1"}, + "ks2.t1@rdonly": {"ks2.t1"}, + "ks1.t1@rdonly": {"ks2.t1"}, + "t2@rdonly": {"ks2.t2"}, + "ks2.t2@rdonly": {"ks2.t2"}, + "ks1.t2@rdonly": {"ks2.t2"}, + }) + verifyQueries(t, tme.allDBClients) + + //------------------------------------------------------------------------------------------------------------------- + // Migrate all REPLICA. + err = tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", topodatapb.TabletType_REPLICA, nil, DirectionForward) + if err != nil { + t.Fatal(err) + } + checkRouting(t, tme.wr, map[string][]string{ + "t1": {"ks1.t1"}, + "ks2.t1": {"ks1.t1"}, + "t2": {"ks1.t2"}, + "ks2.t2": {"ks1.t2"}, + "t1@rdonly": {"ks2.t1"}, + "ks2.t1@rdonly": {"ks2.t1"}, + "ks1.t1@rdonly": {"ks2.t1"}, + "t2@rdonly": {"ks2.t2"}, + "ks2.t2@rdonly": {"ks2.t2"}, + "ks1.t2@rdonly": {"ks2.t2"}, + "t1@replica": {"ks2.t1"}, + "ks2.t1@replica": {"ks2.t1"}, + "ks1.t1@replica": {"ks2.t1"}, + "t2@replica": {"ks2.t2"}, + "ks2.t2@replica": {"ks2.t2"}, + "ks1.t2@replica": {"ks2.t2"}, + }) + verifyQueries(t, tme.allDBClients) + + //------------------------------------------------------------------------------------------------------------------- + // All cells RDONLY backward migration. + err = tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", topodatapb.TabletType_RDONLY, nil, DirectionBackward) + if err != nil { + t.Fatal(err) + } + checkRouting(t, tme.wr, map[string][]string{ + "t1": {"ks1.t1"}, + "ks2.t1": {"ks1.t1"}, + "t2": {"ks1.t2"}, + "ks2.t2": {"ks1.t2"}, + "t1@replica": {"ks2.t1"}, + "ks2.t1@replica": {"ks2.t1"}, + "ks1.t1@replica": {"ks2.t1"}, + "t2@replica": {"ks2.t2"}, + "ks2.t2@replica": {"ks2.t2"}, + "ks1.t2@replica": {"ks2.t2"}, + }) + verifyQueries(t, tme.allDBClients) + + //------------------------------------------------------------------------------------------------------------------- + // All cells RDONLY backward migration. + err = tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", topodatapb.TabletType_REPLICA, nil, DirectionBackward) + if err != nil { + t.Fatal(err) + } + checkRouting(t, tme.wr, map[string][]string{ + "t1": {"ks1.t1"}, + "ks2.t1": {"ks1.t1"}, + "t2": {"ks1.t2"}, + "ks2.t2": {"ks1.t2"}, + }) + verifyQueries(t, tme.allDBClients) + + //------------------------------------------------------------------------------------------------------------------- + // Can't migrate master with MigrateReads. + err = tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", topodatapb.TabletType_MASTER, nil, DirectionForward) + want := "tablet type must be REPLICA or RDONLY: MASTER" + if err == nil || err.Error() != want { + t.Errorf("MigrateReads(master) err: %v, want %v", err, want) + } + verifyQueries(t, tme.allDBClients) + + //------------------------------------------------------------------------------------------------------------------- + // Can't migrate writes if REPLICA and RDONLY have not fully migrated yet. + _, err = tme.wr.MigrateWrites(ctx, tme.targetKeyspace, "test", 1*time.Second) + want = "missing tablet type specific routing, read-only traffic must be migrated before migrating writes" + if err == nil || !strings.Contains(err.Error(), want) { + t.Errorf("MigrateWrites err: %v, want %v", err, want) + } + verifyQueries(t, tme.allDBClients) + + //------------------------------------------------------------------------------------------------------------------- + // Test MigrateWrites cancelation on failure. + + // Migrate all the reads first. + err = tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", topodatapb.TabletType_RDONLY, nil, DirectionForward) + if err != nil { + t.Fatal(err) + } + err = tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", topodatapb.TabletType_REPLICA, nil, DirectionForward) + if err != nil { + t.Fatal(err) + } + checkRouting(t, tme.wr, map[string][]string{ + "t1": {"ks1.t1"}, + "ks2.t1": {"ks1.t1"}, + "t2": {"ks1.t2"}, + "ks2.t2": {"ks1.t2"}, + "t1@replica": {"ks2.t1"}, + "ks2.t1@replica": {"ks2.t1"}, + "ks1.t1@replica": {"ks2.t1"}, + "t2@replica": {"ks2.t2"}, + "ks2.t2@replica": {"ks2.t2"}, + "ks1.t2@replica": {"ks2.t2"}, + "t1@rdonly": {"ks2.t1"}, + "ks2.t1@rdonly": {"ks2.t1"}, + "ks1.t1@rdonly": {"ks2.t1"}, + "t2@rdonly": {"ks2.t2"}, + "ks2.t2@rdonly": {"ks2.t2"}, + "ks1.t2@rdonly": {"ks2.t2"}, + }) + + // Check for journals. + tme.dbSource1Client.addQuery("select 1 from _vt.resharding_journal where id = 9113431017721636330", &sqltypes.Result{}, nil) + tme.dbSource2Client.addQuery("select 1 from _vt.resharding_journal where id = 9113431017721636330", &sqltypes.Result{}, nil) + + // Wait for position: Reads current state, updates to Stopped, and re-reads. + state := sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "pos|state|message", + "varchar|varchar|varchar"), + "MariaDB/5-456-892|Running|", + ) + tme.dbDest1Client.addQuery("select pos, state, message from _vt.vreplication where id=1", state, nil) + tme.dbDest2Client.addQuery("select pos, state, message from _vt.vreplication where id=1", state, nil) + tme.dbDest1Client.addQuery("select pos, state, message from _vt.vreplication where id=2", state, nil) + tme.dbDest1Client.addQuery("update _vt.vreplication set state = 'Stopped', message = 'stopped for cutover' where id = 1", &sqltypes.Result{}, nil) + tme.dbDest2Client.addQuery("update _vt.vreplication set state = 'Stopped', message = 'stopped for cutover' where id = 1", &sqltypes.Result{}, nil) + tme.dbDest1Client.addQuery("update _vt.vreplication set state = 'Stopped', message = 'stopped for cutover' where id = 2", &sqltypes.Result{}, nil) + stopped := sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "id|state", + "int64|varchar"), + "1|Stopped", + ) + tme.dbDest1Client.addQuery("select * from _vt.vreplication where id = 1", stopped, nil) + tme.dbDest2Client.addQuery("select * from _vt.vreplication where id = 1", stopped, nil) + tme.dbDest1Client.addQuery("select * from _vt.vreplication where id = 2", stopped, nil) + + // Cancel Migration + cancel1 := "update _vt.vreplication set state = 'Running', stop_pos = null where id = 1" + cancel2 := "update _vt.vreplication set state = 'Running', stop_pos = null where id = 2" + tme.dbDest1Client.addQuery(cancel1, &sqltypes.Result{}, nil) + tme.dbDest2Client.addQuery(cancel1, &sqltypes.Result{}, nil) + tme.dbDest1Client.addQuery(cancel2, &sqltypes.Result{}, nil) + + _, err = tme.wr.MigrateWrites(ctx, tme.targetKeyspace, "test", 0*time.Second) + want = "DeadlineExceeded" + if err == nil || !strings.Contains(err.Error(), want) { + t.Errorf("MigrateWrites(0 timeout) err: %v, must contain %v", err, want) + } + checkRouting(t, tme.wr, map[string][]string{ + "t1": {"ks1.t1"}, + "ks2.t1": {"ks1.t1"}, + "t2": {"ks1.t2"}, + "ks2.t2": {"ks1.t2"}, + "t1@replica": {"ks2.t1"}, + "ks2.t1@replica": {"ks2.t1"}, + "ks1.t1@replica": {"ks2.t1"}, + "t2@replica": {"ks2.t2"}, + "ks2.t2@replica": {"ks2.t2"}, + "ks1.t2@replica": {"ks2.t2"}, + "t1@rdonly": {"ks2.t1"}, + "ks2.t1@rdonly": {"ks2.t1"}, + "ks1.t1@rdonly": {"ks2.t1"}, + "t2@rdonly": {"ks2.t2"}, + "ks2.t2@rdonly": {"ks2.t2"}, + "ks1.t2@rdonly": {"ks2.t2"}, + }) + checkBlacklist(t, tme.ts, "ks1:-40", nil) + checkBlacklist(t, tme.ts, "ks1:40-", nil) + checkBlacklist(t, tme.ts, "ks2:-80", nil) + checkBlacklist(t, tme.ts, "ks2:80-", nil) + + //------------------------------------------------------------------------------------------------------------------- + // Test successful MigrateWrites. + + // Create journals. + journal1 := "insert into _vt.resharding_journal.*9113431017721636330.*tables.*t1.*t2.*local_position.*MariaDB/5-456-892.*shard_gtids.*-80.*MariaDB/5-456-893.*participants.*40.*40" + tme.dbSource1Client.addQueryRE(journal1, &sqltypes.Result{}, nil) + journal2 := "insert into _vt.resharding_journal.*9113431017721636330.*tables.*t1.*t2.*local_position.*MariaDB/5-456-892.*shard_gtids.*80.*MariaDB/5-456-893.*80.*participants.*40.*40" + tme.dbSource2Client.addQueryRE(journal2, &sqltypes.Result{}, nil) + + // Create backward replicaions. + tme.dbSource1Client.addQueryRE("insert into _vt.vreplication.*ks2.*-80.*t1.*in_keyrange.*c1.*hash.*-40.*t2.*-40.*MariaDB/5-456-893.*Stopped", &sqltypes.Result{InsertID: 1}, nil) + tme.dbSource2Client.addQueryRE("insert into _vt.vreplication.*ks2.*-80.*t1.*in_keyrange.*c1.*hash.*40-.*t2.*40-.*MariaDB/5-456-893.*Stopped", &sqltypes.Result{InsertID: 1}, nil) + tme.dbSource2Client.addQueryRE("insert into _vt.vreplication.*ks2.*80-.*t1.*in_keyrange.*c1.*hash.*40-.*t2.*40-.*MariaDB/5-456-893.*Stopped", &sqltypes.Result{InsertID: 2}, nil) + tme.dbSource1Client.addQuery("select * from _vt.vreplication where id = 1", stopped, nil) + tme.dbSource2Client.addQuery("select * from _vt.vreplication where id = 1", stopped, nil) + tme.dbSource2Client.addQuery("select * from _vt.vreplication where id = 2", stopped, nil) + + // Delete the target replications. + tme.dbDest1Client.addQuery("delete from _vt.vreplication where id = 1", &sqltypes.Result{}, nil) + tme.dbDest2Client.addQuery("delete from _vt.vreplication where id = 1", &sqltypes.Result{}, nil) + tme.dbDest1Client.addQuery("delete from _vt.vreplication where id = 2", &sqltypes.Result{}, nil) + + journalID, err := tme.wr.MigrateWrites(ctx, tme.targetKeyspace, "test", 1*time.Second) + if err != nil { + t.Fatal(err) + } + if journalID != 9113431017721636330 { + t.Errorf("journal id: %d, want 9113431017721636330", journalID) + } + + checkRouting(t, tme.wr, map[string][]string{ + "t1": {"ks2.t1"}, + "ks1.t1": {"ks2.t1"}, + "t2": {"ks2.t2"}, + "ks1.t2": {"ks2.t2"}, + }) + checkBlacklist(t, tme.ts, "ks1:-40", []string{"t1", "t2"}) + checkBlacklist(t, tme.ts, "ks1:40-", []string{"t1", "t2"}) + checkBlacklist(t, tme.ts, "ks2:-80", nil) + checkBlacklist(t, tme.ts, "ks2:80-", nil) + + verifyQueries(t, tme.allDBClients) +} + +// TestShardMigrate tests table mode migrations. +// This has to be kept in sync with TestTableMigrate. +func TestShardMigrate(t *testing.T) { + ctx := context.Background() + tme := newTestShardMigrater(ctx, t) + defer tme.stopTablets(t) + + // Initial check + checkServedTypes(t, tme.ts, "ks:-40", 3) + checkServedTypes(t, tme.ts, "ks:40-", 3) + checkServedTypes(t, tme.ts, "ks:-80", 0) + checkServedTypes(t, tme.ts, "ks:80-", 0) + + //------------------------------------------------------------------------------------------------------------------- + // Single cell RDONLY migration. + err := tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", topodatapb.TabletType_RDONLY, []string{"cell1"}, DirectionForward) + if err != nil { + t.Fatal(err) + } + checkCellServedTypes(t, tme.ts, "ks:-40", "cell1", 2) + checkCellServedTypes(t, tme.ts, "ks:40-", "cell1", 2) + checkCellServedTypes(t, tme.ts, "ks:-80", "cell1", 1) + checkCellServedTypes(t, tme.ts, "ks:80-", "cell1", 1) + checkCellServedTypes(t, tme.ts, "ks:-40", "cell2", 3) + checkCellServedTypes(t, tme.ts, "ks:40-", "cell2", 3) + checkCellServedTypes(t, tme.ts, "ks:-80", "cell2", 0) + checkCellServedTypes(t, tme.ts, "ks:80-", "cell2", 0) + verifyQueries(t, tme.allDBClients) + + //------------------------------------------------------------------------------------------------------------------- + // Other cell REPLICA migration. + err = tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", topodatapb.TabletType_REPLICA, []string{"cell2"}, DirectionForward) + if err != nil { + t.Fatal(err) + } + checkCellServedTypes(t, tme.ts, "ks:-40", "cell1", 2) + checkCellServedTypes(t, tme.ts, "ks:40-", "cell1", 2) + checkCellServedTypes(t, tme.ts, "ks:-80", "cell1", 1) + checkCellServedTypes(t, tme.ts, "ks:80-", "cell1", 1) + checkCellServedTypes(t, tme.ts, "ks:-40", "cell2", 2) + checkCellServedTypes(t, tme.ts, "ks:40-", "cell2", 2) + checkCellServedTypes(t, tme.ts, "ks:-80", "cell2", 1) + checkCellServedTypes(t, tme.ts, "ks:80-", "cell2", 1) + verifyQueries(t, tme.allDBClients) + + //------------------------------------------------------------------------------------------------------------------- + // Single cell backward REPLICA migration. + err = tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", topodatapb.TabletType_REPLICA, []string{"cell2"}, DirectionBackward) + if err != nil { + t.Fatal(err) + } + checkCellServedTypes(t, tme.ts, "ks:-40", "cell1", 2) + checkCellServedTypes(t, tme.ts, "ks:40-", "cell1", 2) + checkCellServedTypes(t, tme.ts, "ks:-80", "cell1", 1) + checkCellServedTypes(t, tme.ts, "ks:80-", "cell1", 1) + checkCellServedTypes(t, tme.ts, "ks:-40", "cell2", 3) + checkCellServedTypes(t, tme.ts, "ks:40-", "cell2", 3) + checkCellServedTypes(t, tme.ts, "ks:-80", "cell2", 0) + checkCellServedTypes(t, tme.ts, "ks:80-", "cell2", 0) + verifyQueries(t, tme.allDBClients) + + //------------------------------------------------------------------------------------------------------------------- + // Migrate all RDONLY. + // This is an extra step that does not exist in the tables test. + // The per-cell migration mechanism is different for tables. So, this + // extra step is needed to bring things in sync. + err = tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", topodatapb.TabletType_RDONLY, nil, DirectionForward) + if err != nil { + t.Fatal(err) + } + checkServedTypes(t, tme.ts, "ks:-40", 2) + checkServedTypes(t, tme.ts, "ks:40-", 2) + checkServedTypes(t, tme.ts, "ks:-80", 1) + checkServedTypes(t, tme.ts, "ks:80-", 1) + verifyQueries(t, tme.allDBClients) + + //------------------------------------------------------------------------------------------------------------------- + // Migrate all REPLICA. + err = tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", topodatapb.TabletType_REPLICA, nil, DirectionForward) + if err != nil { + t.Fatal(err) + } + checkServedTypes(t, tme.ts, "ks:-40", 1) + checkServedTypes(t, tme.ts, "ks:40-", 1) + checkServedTypes(t, tme.ts, "ks:-80", 2) + checkServedTypes(t, tme.ts, "ks:80-", 2) + verifyQueries(t, tme.allDBClients) + + //------------------------------------------------------------------------------------------------------------------- + // All cells RDONLY backward migration. + err = tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", topodatapb.TabletType_RDONLY, nil, DirectionBackward) + if err != nil { + t.Fatal(err) + } + checkServedTypes(t, tme.ts, "ks:-40", 2) + checkServedTypes(t, tme.ts, "ks:40-", 2) + checkServedTypes(t, tme.ts, "ks:-80", 1) + checkServedTypes(t, tme.ts, "ks:80-", 1) + verifyQueries(t, tme.allDBClients) + + //------------------------------------------------------------------------------------------------------------------- + // Can't migrate master with MigrateReads. + err = tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", topodatapb.TabletType_MASTER, nil, DirectionForward) + want := "tablet type must be REPLICA or RDONLY: MASTER" + if err == nil || err.Error() != want { + t.Errorf("MigrateReads(master) err: %v, want %v", err, want) + } + verifyQueries(t, tme.allDBClients) + + //------------------------------------------------------------------------------------------------------------------- + // Can't migrate writes if REPLICA and RDONLY have not fully migrated yet. + _, err = tme.wr.MigrateWrites(ctx, tme.targetKeyspace, "test", 1*time.Second) + want = "cannot migrate MASTER away" + if err == nil || !strings.Contains(err.Error(), want) { + t.Errorf("MigrateWrites err: %v, want %v", err, want) + } + verifyQueries(t, tme.allDBClients) + + //------------------------------------------------------------------------------------------------------------------- + // Test MigrateWrites cancelation on failure. + + // Migrate all the reads first. + err = tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", topodatapb.TabletType_RDONLY, nil, DirectionForward) + if err != nil { + t.Fatal(err) + } + checkServedTypes(t, tme.ts, "ks:-40", 1) + checkServedTypes(t, tme.ts, "ks:40-", 1) + checkServedTypes(t, tme.ts, "ks:-80", 2) + checkServedTypes(t, tme.ts, "ks:80-", 2) + checkIsMasterServing(t, tme.ts, "ks:-40", true) + checkIsMasterServing(t, tme.ts, "ks:40-", true) + checkIsMasterServing(t, tme.ts, "ks:-80", false) + checkIsMasterServing(t, tme.ts, "ks:80-", false) + + // Check for journals. + tme.dbSource1Client.addQuery("select 1 from _vt.resharding_journal where id = 6432976123657117098", &sqltypes.Result{}, nil) + tme.dbSource2Client.addQuery("select 1 from _vt.resharding_journal where id = 6432976123657117098", &sqltypes.Result{}, nil) + + // Wait for position: Reads current state, updates to Stopped, and re-reads. + state := sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "pos|state|message", + "varchar|varchar|varchar"), + "MariaDB/5-456-892|Running|", + ) + tme.dbDest1Client.addQuery("select pos, state, message from _vt.vreplication where id=1", state, nil) + tme.dbDest2Client.addQuery("select pos, state, message from _vt.vreplication where id=1", state, nil) + tme.dbDest1Client.addQuery("select pos, state, message from _vt.vreplication where id=2", state, nil) + tme.dbDest1Client.addQuery("update _vt.vreplication set state = 'Stopped', message = 'stopped for cutover' where id = 1", &sqltypes.Result{}, nil) + tme.dbDest2Client.addQuery("update _vt.vreplication set state = 'Stopped', message = 'stopped for cutover' where id = 1", &sqltypes.Result{}, nil) + tme.dbDest1Client.addQuery("update _vt.vreplication set state = 'Stopped', message = 'stopped for cutover' where id = 2", &sqltypes.Result{}, nil) + stopped := sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "id|state", + "int64|varchar"), + "1|Stopped", + ) + tme.dbDest1Client.addQuery("select * from _vt.vreplication where id = 1", stopped, nil) + tme.dbDest2Client.addQuery("select * from _vt.vreplication where id = 1", stopped, nil) + tme.dbDest1Client.addQuery("select * from _vt.vreplication where id = 2", stopped, nil) + + // Cancel Migration + cancel1 := "update _vt.vreplication set state = 'Running', stop_pos = null where id = 1" + cancel2 := "update _vt.vreplication set state = 'Running', stop_pos = null where id = 2" + tme.dbDest1Client.addQuery(cancel1, &sqltypes.Result{}, nil) + tme.dbDest2Client.addQuery(cancel1, &sqltypes.Result{}, nil) + tme.dbDest1Client.addQuery(cancel2, &sqltypes.Result{}, nil) + + _, err = tme.wr.MigrateWrites(ctx, tme.targetKeyspace, "test", 0*time.Second) + want = "DeadlineExceeded" + if err == nil || !strings.Contains(err.Error(), want) { + t.Errorf("MigrateWrites(0 timeout) err: %v, must contain %v", err, want) + } + checkServedTypes(t, tme.ts, "ks:-40", 1) + checkServedTypes(t, tme.ts, "ks:40-", 1) + checkServedTypes(t, tme.ts, "ks:-80", 2) + checkServedTypes(t, tme.ts, "ks:80-", 2) + checkIsMasterServing(t, tme.ts, "ks:-40", true) + checkIsMasterServing(t, tme.ts, "ks:40-", true) + checkIsMasterServing(t, tme.ts, "ks:-80", false) + checkIsMasterServing(t, tme.ts, "ks:80-", false) + + //------------------------------------------------------------------------------------------------------------------- + // Test successful MigrateWrites. + + // Create journals. + journal1 := "insert into _vt.resharding_journal.*6432976123657117098.*migration_type:SHARDS.*local_position.*MariaDB/5-456-892.*shard_gtids.*-80.*MariaDB/5-456-893.*participants.*40.*40" + tme.dbSource1Client.addQueryRE(journal1, &sqltypes.Result{}, nil) + journal2 := "insert into _vt.resharding_journal.*6432976123657117098.*migration_type:SHARDS.*local_position.*MariaDB/5-456-892.*shard_gtids.*80.*MariaDB/5-456-893.*shard_gtids.*80.*MariaDB/5-456-893.*participants.*40.*40" + tme.dbSource2Client.addQueryRE(journal2, &sqltypes.Result{}, nil) + + // Create backward replicaions. + tme.dbSource1Client.addQueryRE("insert into _vt.vreplication.*-80.*-40.*MariaDB/5-456-893.*Stopped", &sqltypes.Result{InsertID: 1}, nil) + tme.dbSource2Client.addQueryRE("insert into _vt.vreplication.*-80.*40-.*MariaDB/5-456-893.*Stopped", &sqltypes.Result{InsertID: 1}, nil) + tme.dbSource2Client.addQueryRE("insert into _vt.vreplication.*80-.*40-.*MariaDB/5-456-893.*Stopped", &sqltypes.Result{InsertID: 2}, nil) + tme.dbSource1Client.addQuery("select * from _vt.vreplication where id = 1", stopped, nil) + tme.dbSource2Client.addQuery("select * from _vt.vreplication where id = 1", stopped, nil) + tme.dbSource2Client.addQuery("select * from _vt.vreplication where id = 2", stopped, nil) + + // Delete the target replications. + tme.dbDest1Client.addQuery("delete from _vt.vreplication where id = 1", &sqltypes.Result{}, nil) + tme.dbDest2Client.addQuery("delete from _vt.vreplication where id = 1", &sqltypes.Result{}, nil) + tme.dbDest1Client.addQuery("delete from _vt.vreplication where id = 2", &sqltypes.Result{}, nil) + + journalID, err := tme.wr.MigrateWrites(ctx, tme.targetKeyspace, "test", 1*time.Second) + if err != nil { + t.Fatal(err) + } + if journalID != 6432976123657117098 { + t.Errorf("journal id: %d, want 6432976123657117098", journalID) + } + + checkServedTypes(t, tme.ts, "ks:-40", 0) + checkServedTypes(t, tme.ts, "ks:40-", 0) + checkServedTypes(t, tme.ts, "ks:-80", 3) + checkServedTypes(t, tme.ts, "ks:80-", 3) + + checkIsMasterServing(t, tme.ts, "ks:-40", false) + checkIsMasterServing(t, tme.ts, "ks:40-", false) + checkIsMasterServing(t, tme.ts, "ks:-80", true) + checkIsMasterServing(t, tme.ts, "ks:80-", true) + + verifyQueries(t, tme.allDBClients) +} + +// TestMigrateFailJournal tests that cancel doesn't get called after point of no return. +// No need to test this for shard migrate because code paths are the same. +func TestMigrateFailJournal(t *testing.T) { + ctx := context.Background() + tme := newTestTableMigrater(ctx, t) + defer tme.stopTablets(t) + + err := tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", topodatapb.TabletType_RDONLY, nil, DirectionForward) + if err != nil { + t.Fatal(err) + } + err = tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", topodatapb.TabletType_REPLICA, nil, DirectionForward) + if err != nil { + t.Fatal(err) + } + + // Check for journals. + tme.dbSource1Client.addQuery("select 1 from _vt.resharding_journal where id = 9113431017721636330", &sqltypes.Result{}, nil) + tme.dbSource2Client.addQuery("select 1 from _vt.resharding_journal where id = 9113431017721636330", &sqltypes.Result{}, nil) + + // Wait for position: Reads current state, updates to Stopped, and re-reads. + state := sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "pos|state|message", + "varchar|varchar|varchar"), + "MariaDB/5-456-892|Running|", + ) + tme.dbDest1Client.addQuery("select pos, state, message from _vt.vreplication where id=1", state, nil) + tme.dbDest2Client.addQuery("select pos, state, message from _vt.vreplication where id=1", state, nil) + tme.dbDest1Client.addQuery("select pos, state, message from _vt.vreplication where id=2", state, nil) + tme.dbDest1Client.addQuery("update _vt.vreplication set state = 'Stopped', message = 'stopped for cutover' where id = 1", &sqltypes.Result{}, nil) + tme.dbDest2Client.addQuery("update _vt.vreplication set state = 'Stopped', message = 'stopped for cutover' where id = 1", &sqltypes.Result{}, nil) + tme.dbDest1Client.addQuery("update _vt.vreplication set state = 'Stopped', message = 'stopped for cutover' where id = 2", &sqltypes.Result{}, nil) + stopped := sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "id|state", + "int64|varchar"), + "1|Stopped", + ) + tme.dbDest1Client.addQuery("select * from _vt.vreplication where id = 1", stopped, nil) + tme.dbDest2Client.addQuery("select * from _vt.vreplication where id = 1", stopped, nil) + tme.dbDest1Client.addQuery("select * from _vt.vreplication where id = 2", stopped, nil) + + // Cancel Migration: these must not get called. + cancel1 := "update _vt.vreplication set state = 'Running', stop_pos = null where id = 1" + cancel2 := "update _vt.vreplication set state = 'Running', stop_pos = null where id = 2" + tme.dbDest1Client.addQuery(cancel1, &sqltypes.Result{}, nil) + tme.dbDest2Client.addQuery(cancel1, &sqltypes.Result{}, nil) + tme.dbDest1Client.addQuery(cancel2, &sqltypes.Result{}, nil) + + // Make the journal call fail. + tme.dbSource1Client.addQueryRE("insert into _vt.resharding_journal", nil, errors.New("journaling intentionally failed")) + tme.dbSource2Client.addQueryRE("insert into _vt.resharding_journal", nil, errors.New("journaling intentionally failed")) + + _, err = tme.wr.MigrateWrites(ctx, tme.targetKeyspace, "test", 1*time.Second) + want := "journaling intentionally failed" + if err == nil || !strings.Contains(err.Error(), want) { + t.Errorf("MigrateWrites(0 timeout) err: %v, must contain %v", err, want) + } + + // Verify that cancel didn't happen. + if tme.dbDest1Client.queries[cancel1].called { + t.Errorf("tme.dbDest1Client.queries[cancel1].called: %v, want false", tme.dbDest1Client.queries[cancel1]) + } + if tme.dbDest2Client.queries[cancel1].called { + t.Errorf("tme.dbDest1Client.queries[cancel1].called: %v, want false", tme.dbDest1Client.queries[cancel1]) + } + if tme.dbDest1Client.queries[cancel2].called { + t.Errorf("tme.dbDest1Client.queries[cancel1].called: %v, want false", tme.dbDest1Client.queries[cancel1]) + } +} + +func TestTableMigrateJournalExists(t *testing.T) { + ctx := context.Background() + tme := newTestTableMigrater(ctx, t) + defer tme.stopTablets(t) + + err := tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", topodatapb.TabletType_RDONLY, nil, DirectionForward) + if err != nil { + t.Fatal(err) + } + err = tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", topodatapb.TabletType_REPLICA, nil, DirectionForward) + if err != nil { + t.Fatal(err) + } + + // Show one journal as created. + tme.dbSource1Client.addQuery("select 1 from _vt.resharding_journal where id = 9113431017721636330", sqltypes.MakeTestResult(sqltypes.MakeTestFields("1", "int64"), "1"), nil) + tme.dbSource2Client.addQuery("select 1 from _vt.resharding_journal where id = 9113431017721636330", &sqltypes.Result{}, nil) + + // Create the missing journal. + journal2 := "insert into _vt.resharding_journal.*9113431017721636330.*tables.*t1.*t2.*local_position.*MariaDB/5-456-892.*shard_gtids.*80.*MariaDB/5-456-893.*80.*participants.*40.*40" + tme.dbSource2Client.addQueryRE(journal2, &sqltypes.Result{}, nil) + + // Create backward replicaions. + tme.dbSource1Client.addQueryRE("insert into _vt.vreplication.*ks2.*-80.*t1.*in_keyrange.*c1.*hash.*-40.*t2.*-40.*MariaDB/5-456-893.*Stopped", &sqltypes.Result{InsertID: 1}, nil) + tme.dbSource2Client.addQueryRE("insert into _vt.vreplication.*ks2.*-80.*t1.*in_keyrange.*c1.*hash.*40-.*t2.*40-.*MariaDB/5-456-893.*Stopped", &sqltypes.Result{InsertID: 1}, nil) + tme.dbSource2Client.addQueryRE("insert into _vt.vreplication.*ks2.*80-.*t1.*in_keyrange.*c1.*hash.*40-.*t2.*40-.*MariaDB/5-456-893.*Stopped", &sqltypes.Result{InsertID: 2}, nil) + stopped := sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "id|state", + "int64|varchar"), + "1|Stopped", + ) + tme.dbSource1Client.addQuery("select * from _vt.vreplication where id = 1", stopped, nil) + tme.dbSource2Client.addQuery("select * from _vt.vreplication where id = 1", stopped, nil) + tme.dbSource2Client.addQuery("select * from _vt.vreplication where id = 2", stopped, nil) + + // Delete the target replications. + tme.dbDest1Client.addQuery("delete from _vt.vreplication where id = 1", &sqltypes.Result{}, nil) + tme.dbDest2Client.addQuery("delete from _vt.vreplication where id = 1", &sqltypes.Result{}, nil) + tme.dbDest1Client.addQuery("delete from _vt.vreplication where id = 2", &sqltypes.Result{}, nil) + + _, err = tme.wr.MigrateWrites(ctx, tme.targetKeyspace, "test", 1*time.Second) + if err != nil { + t.Fatal(err) + } + + // Routes will be redone. + checkRouting(t, tme.wr, map[string][]string{ + "t1": {"ks2.t1"}, + "ks1.t1": {"ks2.t1"}, + "t2": {"ks2.t2"}, + "ks1.t2": {"ks2.t2"}, + }) + // We're showing that there are no blacklisted tables. But in real life, + // tables on ks1 should be blacklisted from the previous failed attempt. + checkBlacklist(t, tme.ts, "ks1:-40", nil) + checkBlacklist(t, tme.ts, "ks1:40-", nil) + checkBlacklist(t, tme.ts, "ks2:-80", nil) + checkBlacklist(t, tme.ts, "ks2:80-", nil) + + verifyQueries(t, tme.allDBClients) +} + +func TestShardMigrateJournalExists(t *testing.T) { + ctx := context.Background() + tme := newTestShardMigrater(ctx, t) + defer tme.stopTablets(t) + + err := tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", topodatapb.TabletType_RDONLY, nil, DirectionForward) + if err != nil { + t.Fatal(err) + } + err = tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", topodatapb.TabletType_REPLICA, nil, DirectionForward) + if err != nil { + t.Fatal(err) + } + + // Show one journal as created. + tme.dbSource1Client.addQuery("select 1 from _vt.resharding_journal where id = 6432976123657117098", sqltypes.MakeTestResult(sqltypes.MakeTestFields("1", "int64"), "1"), nil) + tme.dbSource2Client.addQuery("select 1 from _vt.resharding_journal where id = 6432976123657117098", &sqltypes.Result{}, nil) + + // Create the missing journal. + journal2 := "insert into _vt.resharding_journal.*6432976123657117098.*migration_type:SHARDS.*local_position.*MariaDB/5-456-892.*shard_gtids.*80.*MariaDB/5-456-893.*shard_gtids.*80.*MariaDB/5-456-893.*participants.*40.*40" + tme.dbSource2Client.addQueryRE(journal2, &sqltypes.Result{}, nil) + + // Create backward replicaions. + tme.dbSource1Client.addQueryRE("insert into _vt.vreplication.*-80.*-40.*MariaDB/5-456-893.*Stopped", &sqltypes.Result{InsertID: 1}, nil) + tme.dbSource2Client.addQueryRE("insert into _vt.vreplication.*-80.*40-.*MariaDB/5-456-893.*Stopped", &sqltypes.Result{InsertID: 1}, nil) + tme.dbSource2Client.addQueryRE("insert into _vt.vreplication.*80-.*40-.*MariaDB/5-456-893.*Stopped", &sqltypes.Result{InsertID: 2}, nil) + stopped := sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "id|state", + "int64|varchar"), + "1|Stopped", + ) + tme.dbSource1Client.addQuery("select * from _vt.vreplication where id = 1", stopped, nil) + tme.dbSource2Client.addQuery("select * from _vt.vreplication where id = 1", stopped, nil) + tme.dbSource2Client.addQuery("select * from _vt.vreplication where id = 2", stopped, nil) + + // Delete the target replications. + tme.dbDest1Client.addQuery("delete from _vt.vreplication where id = 1", &sqltypes.Result{}, nil) + tme.dbDest2Client.addQuery("delete from _vt.vreplication where id = 1", &sqltypes.Result{}, nil) + tme.dbDest1Client.addQuery("delete from _vt.vreplication where id = 2", &sqltypes.Result{}, nil) + + _, err = tme.wr.MigrateWrites(ctx, tme.targetKeyspace, "test", 1*time.Second) + if err != nil { + t.Fatal(err) + } + + checkServedTypes(t, tme.ts, "ks:-40", 0) + checkServedTypes(t, tme.ts, "ks:40-", 0) + checkServedTypes(t, tme.ts, "ks:-80", 3) + checkServedTypes(t, tme.ts, "ks:80-", 3) + + checkIsMasterServing(t, tme.ts, "ks:-40", false) + checkIsMasterServing(t, tme.ts, "ks:40-", false) + checkIsMasterServing(t, tme.ts, "ks:-80", true) + checkIsMasterServing(t, tme.ts, "ks:80-", true) + + verifyQueries(t, tme.allDBClients) +} + +func TestMigrateNoStreamsFound(t *testing.T) { + ctx := context.Background() + tme := newTestTableMigrater(ctx, t) + defer tme.stopTablets(t) + + tme.dbDest1Client.addQuery(vreplQueryks2, &sqltypes.Result{}, nil) + tme.dbDest2Client.addQuery(vreplQueryks2, &sqltypes.Result{}, nil) + + err := tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", topodatapb.TabletType_RDONLY, nil, DirectionForward) + want := "no streams found in keyspace ks2 for: test" + if err == nil || !strings.Contains(err.Error(), want) { + t.Errorf("MigrateReads: %v, must contain %v", err, want) + } +} + +func TestMigrateDistinctSources(t *testing.T) { + ctx := context.Background() + tme := newTestTableMigrater(ctx, t) + defer tme.stopTablets(t) + + bls := &binlogdatapb.BinlogSource{ + Keyspace: "ks2", + Shard: "-80", + Filter: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "t1", + Filter: "select * from t1 where in_keyrange('-80')", + }, { + Match: "t2", + Filter: "select * from t2 where in_keyrange('-80')", + }}, + }, + } + tme.dbDest1Client.addQuery(vreplQueryks2, sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "id|source", + "int64|varchar"), + fmt.Sprintf("1|%v", bls), + ), nil) + + err := tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", topodatapb.TabletType_RDONLY, nil, DirectionForward) + want := "source keyspaces are mismatched across streams" + if err == nil || !strings.Contains(err.Error(), want) { + t.Errorf("MigrateReads: %v, must contain %v", err, want) + } +} + +func TestMigrateMismatchedTables(t *testing.T) { + ctx := context.Background() + tme := newTestTableMigrater(ctx, t) + defer tme.stopTablets(t) + + bls := &binlogdatapb.BinlogSource{ + Keyspace: "ks1", + Shard: "-40", + Filter: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "t1", + Filter: "select * from t1 where in_keyrange('-80')", + }}, + }, + } + tme.dbDest1Client.addQuery(vreplQueryks2, sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "id|source", + "int64|varchar"), + fmt.Sprintf("1|%v", bls), + ), nil) + + err := tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", topodatapb.TabletType_RDONLY, nil, DirectionForward) + want := "table lists are mismatched across streams" + if err == nil || !strings.Contains(err.Error(), want) { + t.Errorf("MigrateReads: %v, must contain %v", err, want) + } +} + +func TestTableMigrateAllShardsNotPresent(t *testing.T) { + ctx := context.Background() + tme := newTestTableMigrater(ctx, t) + defer tme.stopTablets(t) + + tme.dbDest1Client.addQuery(vreplQueryks2, &sqltypes.Result{}, nil) + + err := tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", topodatapb.TabletType_RDONLY, nil, DirectionForward) + want := "mismatched shards for keyspace" + if err == nil || !strings.Contains(err.Error(), want) { + t.Errorf("MigrateReads: %v, must contain %v", err, want) + } +} + +func TestMigrateNoTableWildcards(t *testing.T) { + ctx := context.Background() + tme := newTestTableMigrater(ctx, t) + defer tme.stopTablets(t) + + bls1 := &binlogdatapb.BinlogSource{ + Keyspace: "ks1", + Shard: "-40", + Filter: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "/.*", + Filter: "", + }}, + }, + } + bls2 := &binlogdatapb.BinlogSource{ + Keyspace: "ks1", + Shard: "40-", + Filter: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "/.*", + Filter: "", + }}, + }, + } + tme.dbDest1Client.addQuery(vreplQueryks2, sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "id|source", + "int64|varchar"), + fmt.Sprintf("1|%v", bls1), + fmt.Sprintf("2|%v", bls2), + ), nil) + bls3 := &binlogdatapb.BinlogSource{ + Keyspace: "ks1", + Shard: "40-", + Filter: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "/.*", + Filter: "", + }}, + }, + } + tme.dbDest2Client.addQuery(vreplQueryks2, sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "id|source", + "int64|varchar"), + fmt.Sprintf("1|%v", bls3), + ), nil) + + err := tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", topodatapb.TabletType_RDONLY, nil, DirectionForward) + want := "cannot migrate streams with wild card table names" + if err == nil || !strings.Contains(err.Error(), want) { + t.Errorf("MigrateReads: %v, must contain %v", err, want) + } +} + +func TestShardMigrateTargetMatchesSource(t *testing.T) { + ctx := context.Background() + tme := newTestShardMigrater(ctx, t) + defer tme.stopTablets(t) + + bls := &binlogdatapb.BinlogSource{ + Keyspace: "ks", + Shard: "-80", + Filter: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "/.*", + Filter: "-80", + }}, + }, + } + tme.dbDest1Client.addQuery(vreplQueryks, sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "id|source", + "int64|varchar"), + fmt.Sprintf("1|%v", bls), + ), nil) + + err := tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", topodatapb.TabletType_RDONLY, nil, DirectionForward) + want := "target shard matches a source shard" + if err == nil || !strings.Contains(err.Error(), want) { + t.Errorf("MigrateReads: %v, must contain %v", err, want) + } +} + +func checkRouting(t *testing.T, wr *Wrangler, want map[string][]string) { + t.Helper() + ctx := context.Background() + got, err := wr.getRoutingRules(ctx) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(got, want) { + t.Errorf("rules:\n%v, want\n%v", got, want) + } + cells, err := wr.ts.GetCellInfoNames(ctx) + if err != nil { + t.Fatal(err) + } + for _, cell := range cells { + checkCellRouting(t, wr, cell, want) + } +} + +func checkCellRouting(t *testing.T, wr *Wrangler, cell string, want map[string][]string) { + t.Helper() + ctx := context.Background() + svs, err := wr.ts.GetSrvVSchema(ctx, cell) + if err != nil { + t.Fatal(err) + } + got := make(map[string][]string) + for _, rr := range svs.RoutingRules.Rules { + got[rr.FromTable] = append(got[rr.FromTable], rr.ToTables...) + } + if !reflect.DeepEqual(got, want) { + t.Errorf("srv rules for cell %s:\n%v, want\n%v", cell, got, want) + } +} + +func checkBlacklist(t *testing.T, ts *topo.Server, keyspaceShard string, want []string) { + t.Helper() + ctx := context.Background() + splits := strings.Split(keyspaceShard, ":") + si, err := ts.GetShard(ctx, splits[0], splits[1]) + if err != nil { + t.Fatal(err) + } + tc := si.GetTabletControl(topodatapb.TabletType_MASTER) + var got []string + if tc != nil { + got = tc.BlacklistedTables + } + if !reflect.DeepEqual(got, want) { + t.Errorf("Blacklisted tables for %v: %v, want %v", keyspaceShard, got, want) + } +} + +func checkServedTypes(t *testing.T, ts *topo.Server, keyspaceShard string, want int) { + t.Helper() + ctx := context.Background() + splits := strings.Split(keyspaceShard, ":") + si, err := ts.GetShard(ctx, splits[0], splits[1]) + if err != nil { + t.Fatal(err) + } + + servedTypes, err := ts.GetShardServingTypes(ctx, si) + if err != nil { + t.Fatal(err) + } + + if len(servedTypes) != want { + t.Errorf("shard %v has wrong served types: got: %v, want: %v", keyspaceShard, len(servedTypes), want) + } +} + +func checkCellServedTypes(t *testing.T, ts *topo.Server, keyspaceShard, cell string, want int) { + t.Helper() + ctx := context.Background() + splits := strings.Split(keyspaceShard, ":") + srvKeyspace, err := ts.GetSrvKeyspace(ctx, cell, splits[0]) + if err != nil { + t.Fatal(err) + } + count := 0 +outer: + for _, partition := range srvKeyspace.GetPartitions() { + for _, ref := range partition.ShardReferences { + if ref.Name == splits[1] { + count++ + continue outer + } + } + } + if count != want { + t.Errorf("serving types for keyspaceShard %s, cell %s: %d, want %d", keyspaceShard, cell, count, want) + } +} + +func checkIsMasterServing(t *testing.T, ts *topo.Server, keyspaceShard string, want bool) { + t.Helper() + ctx := context.Background() + splits := strings.Split(keyspaceShard, ":") + si, err := ts.GetShard(ctx, splits[0], splits[1]) + if err != nil { + t.Fatal(err) + } + if want != si.IsMasterServing { + t.Errorf("IsMasterServing(%v): %v, want %v", keyspaceShard, si.IsMasterServing, want) + } +} diff --git a/go/vt/wrangler/reparent.go b/go/vt/wrangler/reparent.go index 88aa0132795..06b99e21f32 100644 --- a/go/vt/wrangler/reparent.go +++ b/go/vt/wrangler/reparent.go @@ -555,7 +555,7 @@ func (maxPosSearch *maxReplPosSearch) processTablet(tablet *topodatapb.Tablet) { maxPosSearch.maxPosLock.Unlock() } -// chooseNewMaster finds a tablet that is going to become master after reparent. The criterias +// chooseNewMaster finds a tablet that is going to become master after reparent. The criteria // for the new master-elect are (preferably) to be in the same cell as the current master, and // to be different from avoidMasterTabletAlias. The tablet with the largest replication // position is chosen to minimize the time of catching up with the master. Note that the search @@ -649,7 +649,7 @@ func (wr *Wrangler) emergencyReparentShardLocked(ctx context.Context, ev *events } // Deal with the old master: try to remote-scrap it, if it's - // truely dead we force-scrap it. Remove it from our map in any case. + // truly dead we force-scrap it. Remove it from our map in any case. if shardInfo.HasMaster() { deleteOldMaster := true shardInfoMasterAliasStr := topoproto.TabletAliasString(shardInfo.MasterAlias) diff --git a/go/vt/wrangler/testlib/migrate_served_from_test.go b/go/vt/wrangler/testlib/migrate_served_from_test.go index 19d8f4ef69c..9b804ed7c16 100644 --- a/go/vt/wrangler/testlib/migrate_served_from_test.go +++ b/go/vt/wrangler/testlib/migrate_served_from_test.go @@ -117,8 +117,7 @@ func TestMigrateServedFrom(t *testing.T) { sqltypes.NewVarBinary("Running"), sqltypes.NewVarBinary(""), }}}, nil) - dbClient.ExpectRequest("use _vt", &sqltypes.Result{}, nil) - dbClient.ExpectRequest("delete from _vt.vreplication where id = 1", &sqltypes.Result{RowsAffected: 1}, nil) + expectDeleteVRepl(dbClient) // simulate the clone, by fixing the dest shard record if err := vp.Run([]string{"SourceShardAdd", "--tables", "gone1,gone2", "dest/0", "1", "source/0"}); err != nil { diff --git a/go/vt/wrangler/testlib/migrate_served_types_test.go b/go/vt/wrangler/testlib/migrate_served_types_test.go index 5cb170c5c8c..3db2caa496e 100644 --- a/go/vt/wrangler/testlib/migrate_served_types_test.go +++ b/go/vt/wrangler/testlib/migrate_served_types_test.go @@ -165,8 +165,7 @@ func TestMigrateServedTypes(t *testing.T) { sqltypes.NewVarBinary("Running"), sqltypes.NewVarBinary(""), }}}, nil) - dbClient1.ExpectRequest("use _vt", &sqltypes.Result{}, nil) - dbClient1.ExpectRequest("delete from _vt.vreplication where id = 1", &sqltypes.Result{RowsAffected: 1}, nil) + expectDeleteVRepl(dbClient1) // dest2Rdonly will see the refresh dest2Rdonly.StartActionLoop(t, wr) @@ -194,8 +193,7 @@ func TestMigrateServedTypes(t *testing.T) { sqltypes.NewVarBinary("Running"), sqltypes.NewVarBinary(""), }}}, nil) - dbClient2.ExpectRequest("use _vt", &sqltypes.Result{}, nil) - dbClient2.ExpectRequest("delete from _vt.vreplication where id = 1", &sqltypes.Result{RowsAffected: 1}, nil) + expectDeleteVRepl(dbClient2) // migrate will error if the overlapping shards have no "SourceShard" entry // and we cannot decide which shard is the source or the destination. @@ -431,8 +429,7 @@ func TestMultiShardMigrateServedTypes(t *testing.T) { sqltypes.NewVarBinary("Running"), sqltypes.NewVarBinary(""), }}}, nil) - dbClient1.ExpectRequest("use _vt", &sqltypes.Result{}, nil) - dbClient1.ExpectRequest("delete from _vt.vreplication where id = 1", &sqltypes.Result{RowsAffected: 1}, nil) + expectDeleteVRepl(dbClient1) // Override with a fake VREngine after Agent is initialized in action loop. dbClient2 := binlogplayer.NewMockDBClient(t) @@ -450,8 +447,7 @@ func TestMultiShardMigrateServedTypes(t *testing.T) { sqltypes.NewVarBinary("Running"), sqltypes.NewVarBinary(""), }}}, nil) - dbClient2.ExpectRequest("use _vt", &sqltypes.Result{}, nil) - dbClient2.ExpectRequest("delete from _vt.vreplication where id = 1", &sqltypes.Result{RowsAffected: 1}, nil) + expectDeleteVRepl(dbClient2) // migrate will error if the overlapping shards have no "SourceShard" entry // and we cannot decide which shard is the source or the destination. @@ -521,8 +517,7 @@ func TestMultiShardMigrateServedTypes(t *testing.T) { sqltypes.NewVarBinary("Running"), sqltypes.NewVarBinary(""), }}}, nil) - dbClient1.ExpectRequest("use _vt", &sqltypes.Result{}, nil) - dbClient1.ExpectRequest("delete from _vt.vreplication where id = 1", &sqltypes.Result{RowsAffected: 1}, nil) + expectDeleteVRepl(dbClient1) // Override with a fake VREngine after Agent is initialized in action loop. dbClient2 = binlogplayer.NewMockDBClient(t) @@ -540,8 +535,7 @@ func TestMultiShardMigrateServedTypes(t *testing.T) { sqltypes.NewVarBinary("Running"), sqltypes.NewVarBinary(""), }}}, nil) - dbClient2.ExpectRequest("use _vt", &sqltypes.Result{}, nil) - dbClient2.ExpectRequest("delete from _vt.vreplication where id = 1", &sqltypes.Result{RowsAffected: 1}, nil) + expectDeleteVRepl(dbClient2) // // simulate the clone, by fixing the dest shard record checkShardSourceShards(t, ts, "80-c0", 0) @@ -588,3 +582,11 @@ func TestMultiShardMigrateServedTypes(t *testing.T) { checkShardSourceShards(t, ts, "80-c0", 0) checkShardSourceShards(t, ts, "c0-", 0) } + +func expectDeleteVRepl(dbClient *binlogplayer.MockDBClient) { + dbClient.ExpectRequest("use _vt", &sqltypes.Result{}, nil) + dbClient.ExpectRequest("begin", nil, nil) + dbClient.ExpectRequest("delete from _vt.vreplication where id = 1", &sqltypes.Result{RowsAffected: 1}, nil) + dbClient.ExpectRequest("delete from _vt.copy_state where vrepl_id = 1", nil, nil) + dbClient.ExpectRequest("commit", nil, nil) +} diff --git a/helm/vitess/templates/_vttablet.tpl b/helm/vitess/templates/_vttablet.tpl index a480acaf33e..ded11358c48 100644 --- a/helm/vitess/templates/_vttablet.tpl +++ b/helm/vitess/templates/_vttablet.tpl @@ -195,6 +195,7 @@ spec: # remove the old socket file if it is still around rm -f /vtdataroot/tabletdata/mysql.sock + rm -f /vtdataroot/tabletdata/mysql.sock.lock {{- end -}} diff --git a/java/client/src/main/java/io/vitess/client/VTGateConnection.java b/java/client/src/main/java/io/vitess/client/VTGateConnection.java index d22f01f5dc2..480de673a57 100644 --- a/java/client/src/main/java/io/vitess/client/VTGateConnection.java +++ b/java/client/src/main/java/io/vitess/client/VTGateConnection.java @@ -36,6 +36,8 @@ import io.vitess.proto.Vtgate.SplitQueryRequest; import io.vitess.proto.Vtgate.SplitQueryResponse; import io.vitess.proto.Vtgate.StreamExecuteRequest; +import io.vitess.proto.Vtgate.VStreamRequest; +import io.vitess.proto.Vtgate.VStreamResponse; import java.io.Closeable; import java.io.IOException; @@ -252,6 +254,26 @@ public ListenableFuture> apply( }, directExecutor())); } + /** + * Starts streaming the vstream binlog events. + * + * @param ctx Context on user and execution deadline if any. + * @param vstreamRequest VStreamRequest containing starting VGtid positions + * in binlog and optional Filters + * @return Streaming iterator over VStream events + * @throws SQLException If anything fails on query execution. + */ + StreamIterator getVStream(Context ctx, VStreamRequest vstreamRequest) + throws SQLException { + VStreamRequest request = vstreamRequest; + + if (ctx.getCallerId() != null) { + request = request.toBuilder().setCallerId(ctx.getCallerId()).build(); + } + + return client.getVStream(ctx, request); + } + /** * @inheritDoc */ diff --git a/proto/binlogdata.proto b/proto/binlogdata.proto index 021f1fbf189..b28dbc5356f 100644 --- a/proto/binlogdata.proto +++ b/proto/binlogdata.proto @@ -187,6 +187,7 @@ enum VEventType { FIELD = 13; HEARTBEAT = 14; VGTID = 15; + JOURNAL = 16; } // RowChange represents one row change @@ -216,6 +217,27 @@ message VGtid { repeated ShardGtid shard_gtids = 1; } +message KeyspaceShard { + string keyspace = 1; + string shard = 2; +} + +// MigrationType specifies the type of migration for the Journal. +enum MigrationType { + TABLES = 0; + SHARDS = 1; +} + +message Journal { + int64 id = 1; + MigrationType migration_type = 2; + repeated string tables = 3; + string local_position = 4; + repeated ShardGtid shard_gtids = 5; + repeated KeyspaceShard participants = 6; + repeated int64 reversed_ids = 7; +} + // VEvent represents a vstream event message VEvent { VEventType type = 1; @@ -225,6 +247,7 @@ message VEvent { RowEvent row_event = 5; FieldEvent field_event = 6; VGtid vgtid = 7; + Journal journal = 8; // current_time specifies the current time to handle clock skew. int64 current_time = 20; } diff --git a/py/vtproto/binlogdata_pb2.py b/py/vtproto/binlogdata_pb2.py index c744f3b7280..53eda7da5b1 100644 --- a/py/vtproto/binlogdata_pb2.py +++ b/py/vtproto/binlogdata_pb2.py @@ -23,7 +23,7 @@ package='binlogdata', syntax='proto3', serialized_options=_b('Z\'vitess.io/vitess/go/vt/proto/binlogdata'), - serialized_pb=_b('\n\x10\x62inlogdata.proto\x12\nbinlogdata\x1a\x0bvtrpc.proto\x1a\x0bquery.proto\x1a\x0etopodata.proto\"7\n\x07\x43harset\x12\x0e\n\x06\x63lient\x18\x01 \x01(\x05\x12\x0c\n\x04\x63onn\x18\x02 \x01(\x05\x12\x0e\n\x06server\x18\x03 \x01(\x05\"\xb5\x03\n\x11\x42inlogTransaction\x12;\n\nstatements\x18\x01 \x03(\x0b\x32\'.binlogdata.BinlogTransaction.Statement\x12&\n\x0b\x65vent_token\x18\x04 \x01(\x0b\x32\x11.query.EventToken\x1a\xae\x02\n\tStatement\x12\x42\n\x08\x63\x61tegory\x18\x01 \x01(\x0e\x32\x30.binlogdata.BinlogTransaction.Statement.Category\x12$\n\x07\x63harset\x18\x02 \x01(\x0b\x32\x13.binlogdata.Charset\x12\x0b\n\x03sql\x18\x03 \x01(\x0c\"\xa9\x01\n\x08\x43\x61tegory\x12\x13\n\x0f\x42L_UNRECOGNIZED\x10\x00\x12\x0c\n\x08\x42L_BEGIN\x10\x01\x12\r\n\tBL_COMMIT\x10\x02\x12\x0f\n\x0b\x42L_ROLLBACK\x10\x03\x12\x15\n\x11\x42L_DML_DEPRECATED\x10\x04\x12\n\n\x06\x42L_DDL\x10\x05\x12\n\n\x06\x42L_SET\x10\x06\x12\r\n\tBL_INSERT\x10\x07\x12\r\n\tBL_UPDATE\x10\x08\x12\r\n\tBL_DELETE\x10\tJ\x04\x08\x02\x10\x03J\x04\x08\x03\x10\x04\"v\n\x15StreamKeyRangeRequest\x12\x10\n\x08position\x18\x01 \x01(\t\x12%\n\tkey_range\x18\x02 \x01(\x0b\x32\x12.topodata.KeyRange\x12$\n\x07\x63harset\x18\x03 \x01(\x0b\x32\x13.binlogdata.Charset\"S\n\x16StreamKeyRangeResponse\x12\x39\n\x12\x62inlog_transaction\x18\x01 \x01(\x0b\x32\x1d.binlogdata.BinlogTransaction\"]\n\x13StreamTablesRequest\x12\x10\n\x08position\x18\x01 \x01(\t\x12\x0e\n\x06tables\x18\x02 \x03(\t\x12$\n\x07\x63harset\x18\x03 \x01(\x0b\x32\x13.binlogdata.Charset\"Q\n\x14StreamTablesResponse\x12\x39\n\x12\x62inlog_transaction\x18\x01 \x01(\x0b\x32\x1d.binlogdata.BinlogTransaction\"%\n\x04Rule\x12\r\n\x05match\x18\x01 \x01(\t\x12\x0e\n\x06\x66ilter\x18\x02 \x01(\t\")\n\x06\x46ilter\x12\x1f\n\x05rules\x18\x01 \x03(\x0b\x32\x10.binlogdata.Rule\"\xde\x01\n\x0c\x42inlogSource\x12\x10\n\x08keyspace\x18\x01 \x01(\t\x12\r\n\x05shard\x18\x02 \x01(\t\x12)\n\x0btablet_type\x18\x03 \x01(\x0e\x32\x14.topodata.TabletType\x12%\n\tkey_range\x18\x04 \x01(\x0b\x32\x12.topodata.KeyRange\x12\x0e\n\x06tables\x18\x05 \x03(\t\x12\"\n\x06\x66ilter\x18\x06 \x01(\x0b\x32\x12.binlogdata.Filter\x12\'\n\x06on_ddl\x18\x07 \x01(\x0e\x32\x17.binlogdata.OnDDLAction\"B\n\tRowChange\x12\x1a\n\x06\x62\x65\x66ore\x18\x01 \x01(\x0b\x32\n.query.Row\x12\x19\n\x05\x61\x66ter\x18\x02 \x01(\x0b\x32\n.query.Row\"J\n\x08RowEvent\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12*\n\x0brow_changes\x18\x02 \x03(\x0b\x32\x15.binlogdata.RowChange\">\n\nFieldEvent\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12\x1c\n\x06\x66ields\x18\x02 \x03(\x0b\x32\x0c.query.Field\":\n\tShardGtid\x12\x10\n\x08keyspace\x18\x01 \x01(\t\x12\r\n\x05shard\x18\x02 \x01(\t\x12\x0c\n\x04gtid\x18\x03 \x01(\t\"3\n\x05VGtid\x12*\n\x0bshard_gtids\x18\x01 \x03(\x0b\x32\x15.binlogdata.ShardGtid\"\xea\x01\n\x06VEvent\x12$\n\x04type\x18\x01 \x01(\x0e\x32\x16.binlogdata.VEventType\x12\x11\n\ttimestamp\x18\x02 \x01(\x03\x12\x0c\n\x04gtid\x18\x03 \x01(\t\x12\x0b\n\x03\x64\x64l\x18\x04 \x01(\t\x12\'\n\trow_event\x18\x05 \x01(\x0b\x32\x14.binlogdata.RowEvent\x12+\n\x0b\x66ield_event\x18\x06 \x01(\x0b\x32\x16.binlogdata.FieldEvent\x12 \n\x05vgtid\x18\x07 \x01(\x0b\x32\x11.binlogdata.VGtid\x12\x14\n\x0c\x63urrent_time\x18\x14 \x01(\x03\"\xc7\x01\n\x0eVStreamRequest\x12,\n\x13\x65\x66\x66\x65\x63tive_caller_id\x18\x01 \x01(\x0b\x32\x0f.vtrpc.CallerID\x12\x32\n\x13immediate_caller_id\x18\x02 \x01(\x0b\x32\x15.query.VTGateCallerID\x12\x1d\n\x06target\x18\x03 \x01(\x0b\x32\r.query.Target\x12\x10\n\x08position\x18\x04 \x01(\t\x12\"\n\x06\x66ilter\x18\x05 \x01(\x0b\x32\x12.binlogdata.Filter\"5\n\x0fVStreamResponse\x12\"\n\x06\x65vents\x18\x01 \x03(\x0b\x32\x12.binlogdata.VEvent\"\xc8\x01\n\x12VStreamRowsRequest\x12,\n\x13\x65\x66\x66\x65\x63tive_caller_id\x18\x01 \x01(\x0b\x32\x0f.vtrpc.CallerID\x12\x32\n\x13immediate_caller_id\x18\x02 \x01(\x0b\x32\x15.query.VTGateCallerID\x12\x1d\n\x06target\x18\x03 \x01(\x0b\x32\r.query.Target\x12\r\n\x05query\x18\x04 \x01(\t\x12\"\n\x06lastpk\x18\x05 \x01(\x0b\x32\x12.query.QueryResult\"\x97\x01\n\x13VStreamRowsResponse\x12\x1c\n\x06\x66ields\x18\x01 \x03(\x0b\x32\x0c.query.Field\x12\x1e\n\x08pkfields\x18\x02 \x03(\x0b\x32\x0c.query.Field\x12\x0c\n\x04gtid\x18\x03 \x01(\t\x12\x18\n\x04rows\x18\x04 \x03(\x0b\x32\n.query.Row\x12\x1a\n\x06lastpk\x18\x05 \x01(\x0b\x32\n.query.Row*>\n\x0bOnDDLAction\x12\n\n\x06IGNORE\x10\x00\x12\x08\n\x04STOP\x10\x01\x12\x08\n\x04\x45XEC\x10\x02\x12\x0f\n\x0b\x45XEC_IGNORE\x10\x03*\xc4\x01\n\nVEventType\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x08\n\x04GTID\x10\x01\x12\t\n\x05\x42\x45GIN\x10\x02\x12\n\n\x06\x43OMMIT\x10\x03\x12\x0c\n\x08ROLLBACK\x10\x04\x12\x07\n\x03\x44\x44L\x10\x05\x12\n\n\x06INSERT\x10\x06\x12\x0b\n\x07REPLACE\x10\x07\x12\n\n\x06UPDATE\x10\x08\x12\n\n\x06\x44\x45LETE\x10\t\x12\x07\n\x03SET\x10\n\x12\t\n\x05OTHER\x10\x0b\x12\x07\n\x03ROW\x10\x0c\x12\t\n\x05\x46IELD\x10\r\x12\r\n\tHEARTBEAT\x10\x0e\x12\t\n\x05VGTID\x10\x0f\x42)Z\'vitess.io/vitess/go/vt/proto/binlogdatab\x06proto3') + serialized_pb=_b('\n\x10\x62inlogdata.proto\x12\nbinlogdata\x1a\x0bvtrpc.proto\x1a\x0bquery.proto\x1a\x0etopodata.proto\"7\n\x07\x43harset\x12\x0e\n\x06\x63lient\x18\x01 \x01(\x05\x12\x0c\n\x04\x63onn\x18\x02 \x01(\x05\x12\x0e\n\x06server\x18\x03 \x01(\x05\"\xb5\x03\n\x11\x42inlogTransaction\x12;\n\nstatements\x18\x01 \x03(\x0b\x32\'.binlogdata.BinlogTransaction.Statement\x12&\n\x0b\x65vent_token\x18\x04 \x01(\x0b\x32\x11.query.EventToken\x1a\xae\x02\n\tStatement\x12\x42\n\x08\x63\x61tegory\x18\x01 \x01(\x0e\x32\x30.binlogdata.BinlogTransaction.Statement.Category\x12$\n\x07\x63harset\x18\x02 \x01(\x0b\x32\x13.binlogdata.Charset\x12\x0b\n\x03sql\x18\x03 \x01(\x0c\"\xa9\x01\n\x08\x43\x61tegory\x12\x13\n\x0f\x42L_UNRECOGNIZED\x10\x00\x12\x0c\n\x08\x42L_BEGIN\x10\x01\x12\r\n\tBL_COMMIT\x10\x02\x12\x0f\n\x0b\x42L_ROLLBACK\x10\x03\x12\x15\n\x11\x42L_DML_DEPRECATED\x10\x04\x12\n\n\x06\x42L_DDL\x10\x05\x12\n\n\x06\x42L_SET\x10\x06\x12\r\n\tBL_INSERT\x10\x07\x12\r\n\tBL_UPDATE\x10\x08\x12\r\n\tBL_DELETE\x10\tJ\x04\x08\x02\x10\x03J\x04\x08\x03\x10\x04\"v\n\x15StreamKeyRangeRequest\x12\x10\n\x08position\x18\x01 \x01(\t\x12%\n\tkey_range\x18\x02 \x01(\x0b\x32\x12.topodata.KeyRange\x12$\n\x07\x63harset\x18\x03 \x01(\x0b\x32\x13.binlogdata.Charset\"S\n\x16StreamKeyRangeResponse\x12\x39\n\x12\x62inlog_transaction\x18\x01 \x01(\x0b\x32\x1d.binlogdata.BinlogTransaction\"]\n\x13StreamTablesRequest\x12\x10\n\x08position\x18\x01 \x01(\t\x12\x0e\n\x06tables\x18\x02 \x03(\t\x12$\n\x07\x63harset\x18\x03 \x01(\x0b\x32\x13.binlogdata.Charset\"Q\n\x14StreamTablesResponse\x12\x39\n\x12\x62inlog_transaction\x18\x01 \x01(\x0b\x32\x1d.binlogdata.BinlogTransaction\"%\n\x04Rule\x12\r\n\x05match\x18\x01 \x01(\t\x12\x0e\n\x06\x66ilter\x18\x02 \x01(\t\")\n\x06\x46ilter\x12\x1f\n\x05rules\x18\x01 \x03(\x0b\x32\x10.binlogdata.Rule\"\xde\x01\n\x0c\x42inlogSource\x12\x10\n\x08keyspace\x18\x01 \x01(\t\x12\r\n\x05shard\x18\x02 \x01(\t\x12)\n\x0btablet_type\x18\x03 \x01(\x0e\x32\x14.topodata.TabletType\x12%\n\tkey_range\x18\x04 \x01(\x0b\x32\x12.topodata.KeyRange\x12\x0e\n\x06tables\x18\x05 \x03(\t\x12\"\n\x06\x66ilter\x18\x06 \x01(\x0b\x32\x12.binlogdata.Filter\x12\'\n\x06on_ddl\x18\x07 \x01(\x0e\x32\x17.binlogdata.OnDDLAction\"B\n\tRowChange\x12\x1a\n\x06\x62\x65\x66ore\x18\x01 \x01(\x0b\x32\n.query.Row\x12\x19\n\x05\x61\x66ter\x18\x02 \x01(\x0b\x32\n.query.Row\"J\n\x08RowEvent\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12*\n\x0brow_changes\x18\x02 \x03(\x0b\x32\x15.binlogdata.RowChange\">\n\nFieldEvent\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12\x1c\n\x06\x66ields\x18\x02 \x03(\x0b\x32\x0c.query.Field\":\n\tShardGtid\x12\x10\n\x08keyspace\x18\x01 \x01(\t\x12\r\n\x05shard\x18\x02 \x01(\t\x12\x0c\n\x04gtid\x18\x03 \x01(\t\"3\n\x05VGtid\x12*\n\x0bshard_gtids\x18\x01 \x03(\x0b\x32\x15.binlogdata.ShardGtid\"0\n\rKeyspaceShard\x12\x10\n\x08keyspace\x18\x01 \x01(\t\x12\r\n\x05shard\x18\x02 \x01(\t\"\xe3\x01\n\x07Journal\x12\n\n\x02id\x18\x01 \x01(\x03\x12\x31\n\x0emigration_type\x18\x02 \x01(\x0e\x32\x19.binlogdata.MigrationType\x12\x0e\n\x06tables\x18\x03 \x03(\t\x12\x16\n\x0elocal_position\x18\x04 \x01(\t\x12*\n\x0bshard_gtids\x18\x05 \x03(\x0b\x32\x15.binlogdata.ShardGtid\x12/\n\x0cparticipants\x18\x06 \x03(\x0b\x32\x19.binlogdata.KeyspaceShard\x12\x14\n\x0creversed_ids\x18\x07 \x03(\x03\"\x90\x02\n\x06VEvent\x12$\n\x04type\x18\x01 \x01(\x0e\x32\x16.binlogdata.VEventType\x12\x11\n\ttimestamp\x18\x02 \x01(\x03\x12\x0c\n\x04gtid\x18\x03 \x01(\t\x12\x0b\n\x03\x64\x64l\x18\x04 \x01(\t\x12\'\n\trow_event\x18\x05 \x01(\x0b\x32\x14.binlogdata.RowEvent\x12+\n\x0b\x66ield_event\x18\x06 \x01(\x0b\x32\x16.binlogdata.FieldEvent\x12 \n\x05vgtid\x18\x07 \x01(\x0b\x32\x11.binlogdata.VGtid\x12$\n\x07journal\x18\x08 \x01(\x0b\x32\x13.binlogdata.Journal\x12\x14\n\x0c\x63urrent_time\x18\x14 \x01(\x03\"\xc7\x01\n\x0eVStreamRequest\x12,\n\x13\x65\x66\x66\x65\x63tive_caller_id\x18\x01 \x01(\x0b\x32\x0f.vtrpc.CallerID\x12\x32\n\x13immediate_caller_id\x18\x02 \x01(\x0b\x32\x15.query.VTGateCallerID\x12\x1d\n\x06target\x18\x03 \x01(\x0b\x32\r.query.Target\x12\x10\n\x08position\x18\x04 \x01(\t\x12\"\n\x06\x66ilter\x18\x05 \x01(\x0b\x32\x12.binlogdata.Filter\"5\n\x0fVStreamResponse\x12\"\n\x06\x65vents\x18\x01 \x03(\x0b\x32\x12.binlogdata.VEvent\"\xc8\x01\n\x12VStreamRowsRequest\x12,\n\x13\x65\x66\x66\x65\x63tive_caller_id\x18\x01 \x01(\x0b\x32\x0f.vtrpc.CallerID\x12\x32\n\x13immediate_caller_id\x18\x02 \x01(\x0b\x32\x15.query.VTGateCallerID\x12\x1d\n\x06target\x18\x03 \x01(\x0b\x32\r.query.Target\x12\r\n\x05query\x18\x04 \x01(\t\x12\"\n\x06lastpk\x18\x05 \x01(\x0b\x32\x12.query.QueryResult\"\x97\x01\n\x13VStreamRowsResponse\x12\x1c\n\x06\x66ields\x18\x01 \x03(\x0b\x32\x0c.query.Field\x12\x1e\n\x08pkfields\x18\x02 \x03(\x0b\x32\x0c.query.Field\x12\x0c\n\x04gtid\x18\x03 \x01(\t\x12\x18\n\x04rows\x18\x04 \x03(\x0b\x32\n.query.Row\x12\x1a\n\x06lastpk\x18\x05 \x01(\x0b\x32\n.query.Row*>\n\x0bOnDDLAction\x12\n\n\x06IGNORE\x10\x00\x12\x08\n\x04STOP\x10\x01\x12\x08\n\x04\x45XEC\x10\x02\x12\x0f\n\x0b\x45XEC_IGNORE\x10\x03*\xd1\x01\n\nVEventType\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x08\n\x04GTID\x10\x01\x12\t\n\x05\x42\x45GIN\x10\x02\x12\n\n\x06\x43OMMIT\x10\x03\x12\x0c\n\x08ROLLBACK\x10\x04\x12\x07\n\x03\x44\x44L\x10\x05\x12\n\n\x06INSERT\x10\x06\x12\x0b\n\x07REPLACE\x10\x07\x12\n\n\x06UPDATE\x10\x08\x12\n\n\x06\x44\x45LETE\x10\t\x12\x07\n\x03SET\x10\n\x12\t\n\x05OTHER\x10\x0b\x12\x07\n\x03ROW\x10\x0c\x12\t\n\x05\x46IELD\x10\r\x12\r\n\tHEARTBEAT\x10\x0e\x12\t\n\x05VGTID\x10\x0f\x12\x0b\n\x07JOURNAL\x10\x10*\'\n\rMigrationType\x12\n\n\x06TABLES\x10\x00\x12\n\n\x06SHARDS\x10\x01\x42)Z\'vitess.io/vitess/go/vt/proto/binlogdatab\x06proto3') , dependencies=[vtrpc__pb2.DESCRIPTOR,query__pb2.DESCRIPTOR,topodata__pb2.DESCRIPTOR,]) @@ -52,8 +52,8 @@ ], containing_type=None, serialized_options=None, - serialized_start=2433, - serialized_end=2495, + serialized_start=2751, + serialized_end=2813, ) _sym_db.RegisterEnumDescriptor(_ONDDLACTION) @@ -128,15 +128,42 @@ name='VGTID', index=15, number=15, serialized_options=None, type=None), + _descriptor.EnumValueDescriptor( + name='JOURNAL', index=16, number=16, + serialized_options=None, + type=None), ], containing_type=None, serialized_options=None, - serialized_start=2498, - serialized_end=2694, + serialized_start=2816, + serialized_end=3025, ) _sym_db.RegisterEnumDescriptor(_VEVENTTYPE) VEventType = enum_type_wrapper.EnumTypeWrapper(_VEVENTTYPE) +_MIGRATIONTYPE = _descriptor.EnumDescriptor( + name='MigrationType', + full_name='binlogdata.MigrationType', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='TABLES', index=0, number=0, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='SHARDS', index=1, number=1, + serialized_options=None, + type=None), + ], + containing_type=None, + serialized_options=None, + serialized_start=3027, + serialized_end=3066, +) +_sym_db.RegisterEnumDescriptor(_MIGRATIONTYPE) + +MigrationType = enum_type_wrapper.EnumTypeWrapper(_MIGRATIONTYPE) IGNORE = 0 STOP = 1 EXEC = 2 @@ -157,6 +184,9 @@ FIELD = 13 HEARTBEAT = 14 VGTID = 15 +JOURNAL = 16 +TABLES = 0 +SHARDS = 1 _BINLOGTRANSACTION_STATEMENT_CATEGORY = _descriptor.EnumDescriptor( @@ -826,6 +856,117 @@ ) +_KEYSPACESHARD = _descriptor.Descriptor( + name='KeyspaceShard', + full_name='binlogdata.KeyspaceShard', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='keyspace', full_name='binlogdata.KeyspaceShard.keyspace', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='shard', full_name='binlogdata.KeyspaceShard.shard', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1582, + serialized_end=1630, +) + + +_JOURNAL = _descriptor.Descriptor( + name='Journal', + full_name='binlogdata.Journal', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='id', full_name='binlogdata.Journal.id', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='migration_type', full_name='binlogdata.Journal.migration_type', index=1, + number=2, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='tables', full_name='binlogdata.Journal.tables', index=2, + number=3, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='local_position', full_name='binlogdata.Journal.local_position', index=3, + number=4, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='shard_gtids', full_name='binlogdata.Journal.shard_gtids', index=4, + number=5, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='participants', full_name='binlogdata.Journal.participants', index=5, + number=6, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='reversed_ids', full_name='binlogdata.Journal.reversed_ids', index=6, + number=7, type=3, cpp_type=2, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1633, + serialized_end=1860, +) + + _VEVENT = _descriptor.Descriptor( name='VEvent', full_name='binlogdata.VEvent', @@ -883,7 +1024,14 @@ is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( - name='current_time', full_name='binlogdata.VEvent.current_time', index=7, + name='journal', full_name='binlogdata.VEvent.journal', index=7, + number=8, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='current_time', full_name='binlogdata.VEvent.current_time', index=8, number=20, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, @@ -901,8 +1049,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=1583, - serialized_end=1817, + serialized_start=1863, + serialized_end=2135, ) @@ -960,8 +1108,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=1820, - serialized_end=2019, + serialized_start=2138, + serialized_end=2337, ) @@ -991,8 +1139,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=2021, - serialized_end=2074, + serialized_start=2339, + serialized_end=2392, ) @@ -1050,8 +1198,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=2077, - serialized_end=2277, + serialized_start=2395, + serialized_end=2595, ) @@ -1109,8 +1257,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=2280, - serialized_end=2431, + serialized_start=2598, + serialized_end=2749, ) _BINLOGTRANSACTION_STATEMENT.fields_by_name['category'].enum_type = _BINLOGTRANSACTION_STATEMENT_CATEGORY @@ -1134,10 +1282,14 @@ _ROWEVENT.fields_by_name['row_changes'].message_type = _ROWCHANGE _FIELDEVENT.fields_by_name['fields'].message_type = query__pb2._FIELD _VGTID.fields_by_name['shard_gtids'].message_type = _SHARDGTID +_JOURNAL.fields_by_name['migration_type'].enum_type = _MIGRATIONTYPE +_JOURNAL.fields_by_name['shard_gtids'].message_type = _SHARDGTID +_JOURNAL.fields_by_name['participants'].message_type = _KEYSPACESHARD _VEVENT.fields_by_name['type'].enum_type = _VEVENTTYPE _VEVENT.fields_by_name['row_event'].message_type = _ROWEVENT _VEVENT.fields_by_name['field_event'].message_type = _FIELDEVENT _VEVENT.fields_by_name['vgtid'].message_type = _VGTID +_VEVENT.fields_by_name['journal'].message_type = _JOURNAL _VSTREAMREQUEST.fields_by_name['effective_caller_id'].message_type = vtrpc__pb2._CALLERID _VSTREAMREQUEST.fields_by_name['immediate_caller_id'].message_type = query__pb2._VTGATECALLERID _VSTREAMREQUEST.fields_by_name['target'].message_type = query__pb2._TARGET @@ -1165,6 +1317,8 @@ DESCRIPTOR.message_types_by_name['FieldEvent'] = _FIELDEVENT DESCRIPTOR.message_types_by_name['ShardGtid'] = _SHARDGTID DESCRIPTOR.message_types_by_name['VGtid'] = _VGTID +DESCRIPTOR.message_types_by_name['KeyspaceShard'] = _KEYSPACESHARD +DESCRIPTOR.message_types_by_name['Journal'] = _JOURNAL DESCRIPTOR.message_types_by_name['VEvent'] = _VEVENT DESCRIPTOR.message_types_by_name['VStreamRequest'] = _VSTREAMREQUEST DESCRIPTOR.message_types_by_name['VStreamResponse'] = _VSTREAMRESPONSE @@ -1172,6 +1326,7 @@ DESCRIPTOR.message_types_by_name['VStreamRowsResponse'] = _VSTREAMROWSRESPONSE DESCRIPTOR.enum_types_by_name['OnDDLAction'] = _ONDDLACTION DESCRIPTOR.enum_types_by_name['VEventType'] = _VEVENTTYPE +DESCRIPTOR.enum_types_by_name['MigrationType'] = _MIGRATIONTYPE _sym_db.RegisterFileDescriptor(DESCRIPTOR) Charset = _reflection.GeneratedProtocolMessageType('Charset', (_message.Message,), dict( @@ -1280,6 +1435,20 @@ )) _sym_db.RegisterMessage(VGtid) +KeyspaceShard = _reflection.GeneratedProtocolMessageType('KeyspaceShard', (_message.Message,), dict( + DESCRIPTOR = _KEYSPACESHARD, + __module__ = 'binlogdata_pb2' + # @@protoc_insertion_point(class_scope:binlogdata.KeyspaceShard) + )) +_sym_db.RegisterMessage(KeyspaceShard) + +Journal = _reflection.GeneratedProtocolMessageType('Journal', (_message.Message,), dict( + DESCRIPTOR = _JOURNAL, + __module__ = 'binlogdata_pb2' + # @@protoc_insertion_point(class_scope:binlogdata.Journal) + )) +_sym_db.RegisterMessage(Journal) + VEvent = _reflection.GeneratedProtocolMessageType('VEvent', (_message.Message,), dict( DESCRIPTOR = _VEVENT, __module__ = 'binlogdata_pb2' diff --git a/test/backup.py b/test/backup.py index ebe06836ed8..c6d3d743452 100755 --- a/test/backup.py +++ b/test/backup.py @@ -31,6 +31,7 @@ use_mysqlctld = False use_xtrabackup = False +xtrabackup_stripes = 0 stream_mode = 'tar' tablet_master = None tablet_replica1 = None @@ -48,6 +49,7 @@ def setUpModule(): '-xtrabackup_stream_mode', stream_mode, '-xtrabackup_user=vt_dba', + '-xtrabackup_stripes=%d' % (xtrabackup_stripes), '-xtrabackup_backup_flags', '--password=VtDbaPass'] @@ -208,7 +210,7 @@ def _check_data(self, t, count, msg): logging.exception('exception waiting for data to replicate') timeout = utils.wait_step(msg, timeout) - def _restore(self, t, tablet_type='replica'): + def _restore(self, t, tablet_type='replica', extra_args=[]): """Erase mysql/tablet dir, then start tablet with restore enabled.""" logging.debug("restoring tablet %s",str(datetime.datetime.now())) self._reset_tablet_dir(t) @@ -217,6 +219,8 @@ def _restore(self, t, tablet_type='replica'): if use_xtrabackup: xtra_args.extend(xtrabackup_args) + xtra_args.extend(extra_args) + t.start_vttablet(wait_for_state='SERVING', init_tablet_type=tablet_type, init_keyspace='test_keyspace', @@ -232,7 +236,7 @@ def _restore(self, t, tablet_type='replica'): t.check_db_var('rpl_semi_sync_slave_enabled', 'OFF') t.check_db_status('rpl_semi_sync_slave_status', 'OFF') - def _restore_wait_for_backup(self, t, tablet_type='replica'): + def _restore_wait_for_backup(self, t, tablet_type='replica', extra_args=[]): """Erase mysql/tablet dir, then start tablet with wait_for_restore_interval.""" self._reset_tablet_dir(t) @@ -242,6 +246,7 @@ def _restore_wait_for_backup(self, t, tablet_type='replica'): ] if use_xtrabackup: xtra_args.extend(xtrabackup_args) + xtra_args.extend(extra_args) t.start_vttablet(wait_for_state=None, init_tablet_type=tablet_type, @@ -370,9 +375,14 @@ def _test_backup(self, tablet_type, backup_only): tablet_type: 'replica' or 'rdonly'. """ - # bring up another replica concurrently, telling it to wait until a backup + # Bring up another replica concurrently, telling it to wait until a backup # is available instead of starting up empty. - self._restore_wait_for_backup(tablet_replica2, tablet_type=tablet_type) + # + # Override the backup engine implementation to a non-existent one for restore. + # This setting should only matter for taking new backups. We should be able + # to restore a previous backup successfully regardless of this setting. + self._restore_wait_for_backup(tablet_replica2, tablet_type=tablet_type, + extra_args=['-backup_engine_implementation', 'fake_implementation']) # insert data on master, wait for slave to get it tablet_master.mquery('vt_test_keyspace', self._create_vt_insert_test) @@ -539,7 +549,7 @@ def _restore_in_place(t): def test_terminated_restore(self): stop_restore_msg = 'Copying file 10' if use_xtrabackup: - stop_restore_msg = 'Restore: Preparing the files' + stop_restore_msg = 'Restore: Preparing' def _terminated_restore(t): for e in utils.vtctld_connection.execute_vtctl_command( ['RestoreFromBackup', t.tablet_alias]): diff --git a/test/cell_aliases.py b/test/cell_aliases.py index bb910daadb4..a942bfdc393 100755 --- a/test/cell_aliases.py +++ b/test/cell_aliases.py @@ -36,7 +36,7 @@ from vtdb import keyrange_constants from vtdb import vtgate_client -use_alias = False +use_alias = True # initial shards # range '' - 80 @@ -226,9 +226,12 @@ def test_cells_aliases(self): utils.apply_vschema(vschema) # Adds alias so vtgate can route to replica/rdonly tablets that are not in the same cell, but same alias - if use_alias: utils.run_vtctl(['AddCellsAlias', '-cells', 'test_nj,test_ny','region_east_coast'], auto_log=True) + + # Check that UpdateCellsAlias is idempotent. + utils.run_vtctl(['UpdateCellsAlias', '-cells', 'test_nj,test_ny','region_east_coast'], auto_log=True) + tablet_types_to_wait='MASTER,REPLICA' else: tablet_types_to_wait='MASTER' diff --git a/test/cluster/keytar/config/vitess_config.yaml b/test/cluster/keytar/config/vitess_config.yaml index f800793a01c..a8b0e8a995b 100644 --- a/test/cluster/keytar/config/vitess_config.yaml +++ b/test/cluster/keytar/config/vitess_config.yaml @@ -3,8 +3,8 @@ install: - python-mysqldb extra: - apt-get update - - wget https://storage.googleapis.com/golang/go1.11.1.linux-amd64.tar.gz - - tar -C /usr/local -xzf go1.11.1.linux-amd64.tar.gz + - wget https://dl.google.com/go/go1.12.7.linux-amd64.tar.gz + - tar -C /usr/local -xzf go1.12.7.linux-amd64.tar.gz - wget https://storage.googleapis.com/kubernetes-helm/helm-v2.1.3-linux-amd64.tar.gz - tar -zxvf helm-v2.1.3-linux-amd64.tar.gz - pip install numpy diff --git a/test/config.json b/test/config.json index 96ea4642d5c..93802fc4b0a 100644 --- a/test/config.json +++ b/test/config.json @@ -278,6 +278,15 @@ "RetryMax": 0, "Tags": [] }, + "prepared_statement": { + "File": "prepared_statement_test.py", + "Args": [], + "Command": [], + "Manual": false, + "Shard": 4, + "RetryMax": 0, + "Tags": [] + }, "mysqlctl": { "File": "mysqlctl.py", "Args": [], diff --git a/test/prepared_statement_test.py b/test/prepared_statement_test.py new file mode 100755 index 00000000000..963f2550c6f --- /dev/null +++ b/test/prepared_statement_test.py @@ -0,0 +1,301 @@ +#!/usr/bin/env python +# +# Copyright 2019 The Vitess Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Ensures the vtgate MySQL server protocol plugin works as expected with prepared statments. + +We use table ACLs to verify the user name authenticated by the connector is +set properly. +""" + +import datetime +import socket +import unittest + +import mysql.connector +from mysql.connector import FieldType +from mysql.connector.cursor import MySQLCursorPrepared +from mysql.connector.errors import Error + +import environment +import utils +import tablet +import warnings + +# single shard / 2 tablets +shard_0_master = tablet.Tablet() +shard_0_slave = tablet.Tablet() + +table_acl_config = environment.tmproot + '/table_acl_config.json' +mysql_auth_server_static = (environment.tmproot + + '/mysql_auth_server_static.json') + + +json_example = '''{ + "quiz": { + "sport": { + "q1": { + "question": "Which one is correct team name in NBA?", + "options": [ + "New York Bulls", + "Los Angeles Kings", + "Golden State Warriros", + "Huston Rocket" + ], + "answer": "Huston Rocket" + } + }, + "maths": { + "q1": { + "question": "5 + 7 = ?", + "options": [ + "10", + "11", + "12", + "13" + ], + "answer": "12" + }, + "q2": { + "question": "12 - 8 = ?", + "options": [ + "1", + "2", + "3", + "4" + ], + "answer": "4" + } + } + } +}''' + +insert_stmt = '''insert into vt_prepare_stmt_test values(%s, %s, %s, %s, %s, %s, %s, + %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)''' + +def setUpModule(): + try: + environment.topo_server().setup() + + # setup all processes + setup_procs = [ + shard_0_master.init_mysql(), + shard_0_slave.init_mysql(), + ] + utils.wait_procs(setup_procs) + + utils.run_vtctl(['CreateKeyspace', 'test_keyspace']) + + shard_0_master.init_tablet('replica', 'test_keyspace', '0') + shard_0_slave.init_tablet('replica', 'test_keyspace', '0') + + # create databases so vttablet can start behaving normally + shard_0_master.create_db('vt_test_keyspace') + shard_0_slave.create_db('vt_test_keyspace') + + except: + tearDownModule() + raise + + +def tearDownModule(): + utils.required_teardown() + if utils.options.skip_teardown: + return + + shard_0_master.kill_vttablet() + shard_0_slave.kill_vttablet() + + teardown_procs = [ + shard_0_master.teardown_mysql(), + shard_0_slave.teardown_mysql(), + ] + utils.wait_procs(teardown_procs, raise_on_error=False) + + environment.topo_server().teardown() + utils.kill_sub_processes() + utils.remove_tmp_files() + + shard_0_master.remove_tree() + shard_0_slave.remove_tree() + + +create_vt_prepare_test = '''create table vt_prepare_stmt_test ( +id bigint auto_increment, +msg varchar(64), +keyspace_id bigint(20) unsigned NOT NULL, +tinyint_unsigned TINYINT, +bool_signed BOOL, +smallint_unsigned SMALLINT, +mediumint_unsigned MEDIUMINT, +int_unsigned INT, +float_unsigned FLOAT(10,2), +double_unsigned DOUBLE(16,2), +decimal_unsigned DECIMAL, +t_date DATE, +t_datetime DATETIME, +t_time TIME, +t_timestamp TIMESTAMP, +c8 bit(8) DEFAULT NULL, +c16 bit(16) DEFAULT NULL, +c24 bit(24) DEFAULT NULL, +c32 bit(32) DEFAULT NULL, +c40 bit(40) DEFAULT NULL, +c48 bit(48) DEFAULT NULL, +c56 bit(56) DEFAULT NULL, +c63 bit(63) DEFAULT NULL, +c64 bit(64) DEFAULT NULL, +json_col JSON, +text_col TEXT, +data longblob, +primary key (id) +) Engine=InnoDB''' + + +class TestPreparedStatements(unittest.TestCase): + """This test makes sure that prepared statements is working correctly. + """ + + def test_prepared_statements(self): + with open(table_acl_config, 'w') as fd: + fd.write("""{ + "table_groups": [ + { + "table_names_or_prefixes": ["vt_prepare_stmt_test", "dual"], + "readers": ["vtgate client 1"], + "writers": ["vtgate client 1"], + "admins": ["vtgate client 1"] + } + ] +} +""") + + with open(mysql_auth_server_static, 'w') as fd: + fd.write("""{ + "testuser1": { + "Password": "testpassword1", + "UserData": "vtgate client 1" + }, + "testuser2": { + "Password": "testpassword2", + "UserData": "vtgate client 2" + } +} +""") + + # start the tablets + shard_0_master.start_vttablet(wait_for_state='NOT_SERVING', + table_acl_config=table_acl_config) + shard_0_slave.start_vttablet(wait_for_state='NOT_SERVING', + table_acl_config=table_acl_config) + + # setup replication + utils.run_vtctl(['InitShardMaster', '-force', 'test_keyspace/0', + shard_0_master.tablet_alias], auto_log=True) + utils.run_vtctl(['ApplySchema', '-sql', create_vt_prepare_test, + 'test_keyspace']) + for t in [shard_0_master, shard_0_slave]: + utils.run_vtctl(['RunHealthCheck', t.tablet_alias]) + + # start vtgate + utils.VtGate(mysql_server=True).start( + extra_args=['-mysql_auth_server_impl', 'static', + '-mysql_server_query_timeout', '1s', + '-mysql_auth_server_static_file', mysql_auth_server_static]) + # We use gethostbyname('localhost') so we don't presume + # of the IP format (travis is only IP v4, really). + params = dict(host=socket.gethostbyname('localhost'), + port=utils.vtgate.mysql_port, + user='testuser1', + passwd='testpassword1', + db='test_keyspace', + use_pure=True) + + # 'vtgate client 1' is authorized to access vt_prepare_insert_test + conn = mysql.connector.Connect(**params) + cursor = conn.cursor() + cursor.execute('select * from vt_prepare_stmt_test', {}) + cursor.fetchone() + cursor.close() + + cursor = conn.cursor() + try: + cursor.execute('selet * from vt_prepare_stmt_test', {}) + cursor.close() + except mysql.connector.Error as err: + if err.errno == 1105: + print "Captured the error" + else: + raise + + # Insert several rows using prepared statements + text_value = "text" * 100 # Large text value + largeComment = 'L' * ((4 * 1024) + 1) # Large blob + + # Set up the values for the prepared statement + cursor = conn.cursor(cursor_class=MySQLCursorPrepared) + for i in range(1, 100): + insert_values = (i, str(i) + "21", i * 100, 127, 1, 32767, 8388607, 2147483647, 2.55, 64.9,55.5, + datetime.date(2009, 5, 5), datetime.date(2009, 5, 5), datetime.datetime.now().time(), datetime.date(2009, 5, 5), + 1,1,1,1,1,1,1,1,1, json_example, text_value, largeComment) + cursor.execute(insert_stmt, insert_values) + + cursor.fetchone() + cursor.close() + + cursor = conn.cursor(cursor_class=MySQLCursorPrepared) + cursor.execute('select * from vt_prepare_stmt_test where id = %s', (1,)) + result = cursor.fetchall() + + # Validate the query results. + if cursor.rowcount != 1: + self.fail('expected 1 row got ' + str(cursor.rowcount)) + + if result[0][1] != "121": + self.fail('Received incorrect value, wanted: 121, got ' + result[1]) + + cursor.close() + + # Update a row using prepared statements + updated_text_value = "text_col_msg" + updated_data_value = "updated" + + cursor = conn.cursor(cursor_class=MySQLCursorPrepared) + cursor.execute('update vt_prepare_stmt_test set data = %s , text_col = %s where id = %s', (updated_data_value, updated_text_value, 1)) + cursor.close() + + # Validate the update results + cursor = conn.cursor(cursor_class=MySQLCursorPrepared) + cursor.execute('select * from vt_prepare_stmt_test where id = %s', (1,)) + result = cursor.fetchone() + if result[-1] != updated_data_value or result[-2] != updated_text_value: + self.fail("Received incorrect values") + cursor.close() + + # Delete from table using prepared statements + cursor = conn.cursor(cursor_class=MySQLCursorPrepared) + cursor.execute('delete from vt_prepare_stmt_test where text_col = %s', (text_value,)) + cursor.close() + + # Validate Deletion + cursor = conn.cursor(cursor_class=MySQLCursorPrepared) + cursor.execute('select count(*) from vt_prepare_stmt_test') + res = cursor.fetchone() + if res[0] != 1: + self.fail("Delete failed") + cursor.close() + +if __name__ == '__main__': + utils.main() diff --git a/test/xtrabackup_xbstream.py b/test/xtrabackup_xbstream.py index 751d2f6af62..0f027185211 100755 --- a/test/xtrabackup_xbstream.py +++ b/test/xtrabackup_xbstream.py @@ -23,4 +23,5 @@ if __name__ == '__main__': backup.use_xtrabackup = True backup.stream_mode = 'xbstream' + backup.xtrabackup_stripes = 8 utils.main(backup) diff --git a/vagrant-scripts/bootstrap_vm.sh b/vagrant-scripts/bootstrap_vm.sh index 665a97a152b..bea07394dea 100755 --- a/vagrant-scripts/bootstrap_vm.sh +++ b/vagrant-scripts/bootstrap_vm.sh @@ -34,10 +34,11 @@ apt-get install -y make \ ant \ zip \ unzip +pip install mysql-connector-python # Install golang -GO_VER='1.11.1' -GO_DOWNLOAD_URL='https://storage.googleapis.com/golang' +GO_VER='1.12.7' +GO_DOWNLOAD_URL='https://dl.google.com/go/' GO_FILENAME="go${GO_VER}.linux-amd64.tar.gz" wget "${GO_DOWNLOAD_URL}/${GO_FILENAME}" -O "${TMP_DIR}/${GO_FILENAME}" tar xzf "${TMP_DIR}/${GO_FILENAME}" -C "/usr/local" diff --git a/vendor/vendor.json b/vendor/vendor.json index 772ea6c20c0..212a4c22416 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -3,34 +3,68 @@ "ignore": "appengine test", "package": [ { - "checksumSHA1": "ZLRh6zW4/DnVsGpgtt+ZiIaEFKc=", + "checksumSHA1": "b1KgRkWqz0RmrEBK6IJ8kOJva6w=", "path": "cloud.google.com/go/compute/metadata", - "revision": "686f0e89858ea78eae54d4b2021e6bfc7d3a30ca", - "revisionTime": "2016-12-16T21:27:53Z" + "revision": "cdaaf98f9226c39dc162b8e55083b2fbc67b4674", + "revisionTime": "2019-07-12T19:01:42Z", + "version": "v0.43.0", + "versionExact": "v0.43.0" }, { - "checksumSHA1": "4iounbuF7SMZdx/MlKSUuhnV848=", + "checksumSHA1": "+S/9jBntS1iHZwxkR64vsy97Gh8=", + "path": "cloud.google.com/go/iam", + "revision": "cdaaf98f9226c39dc162b8e55083b2fbc67b4674", + "revisionTime": "2019-07-12T19:01:42Z", + "version": "v0.43.0", + "versionExact": "v0.43.0" + }, + { + "checksumSHA1": "JfGXEtr79UaxukcL05IERkjYm/g=", "path": "cloud.google.com/go/internal", - "revision": "686f0e89858ea78eae54d4b2021e6bfc7d3a30ca", - "revisionTime": "2016-12-16T21:27:53Z" + "revision": "cdaaf98f9226c39dc162b8e55083b2fbc67b4674", + "revisionTime": "2019-07-12T19:01:42Z", + "version": "v0.43.0", + "versionExact": "v0.43.0" }, { - "checksumSHA1": "W2xJ0+fvugRhRi1PMi64bYofBbU=", + "checksumSHA1": "wQ4uGuRwMb24vG16pPQDOOCPkFo=", "path": "cloud.google.com/go/internal/optional", - "revision": "686f0e89858ea78eae54d4b2021e6bfc7d3a30ca", - "revisionTime": "2016-12-16T21:27:53Z" + "revision": "cdaaf98f9226c39dc162b8e55083b2fbc67b4674", + "revisionTime": "2019-07-12T19:01:42Z", + "version": "v0.43.0", + "versionExact": "v0.43.0" }, { - "checksumSHA1": "9q9/aJWq19uF6NE4tx4qJvP5Uho=", + "checksumSHA1": "5Jz6j0verpzBzMwIEZai+EbV6Ls=", "path": "cloud.google.com/go/internal/testutil", - "revision": "686f0e89858ea78eae54d4b2021e6bfc7d3a30ca", - "revisionTime": "2016-12-16T21:27:53Z" + "revision": "cdaaf98f9226c39dc162b8e55083b2fbc67b4674", + "revisionTime": "2019-07-12T19:01:42Z", + "version": "v0.43.0", + "versionExact": "v0.43.0" }, { - "checksumSHA1": "cHbMVt14iTMFZHeZMM9Q06dSV+M=", + "checksumSHA1": "jHOn3QLqvr1luNqyOg24/BXLrsM=", + "path": "cloud.google.com/go/internal/trace", + "revision": "cdaaf98f9226c39dc162b8e55083b2fbc67b4674", + "revisionTime": "2019-07-12T19:01:42Z", + "version": "v0.43.0", + "versionExact": "v0.43.0" + }, + { + "checksumSHA1": "FSifvUBJjm4OsSU5rp5s5+bqvN0=", + "path": "cloud.google.com/go/internal/version", + "revision": "cdaaf98f9226c39dc162b8e55083b2fbc67b4674", + "revisionTime": "2019-07-12T19:01:42Z", + "version": "v0.43.0", + "versionExact": "v0.43.0" + }, + { + "checksumSHA1": "tof7cbQFVwy2rN/iJfKXDKUf4rs=", "path": "cloud.google.com/go/storage", - "revision": "686f0e89858ea78eae54d4b2021e6bfc7d3a30ca", - "revisionTime": "2016-12-16T21:27:53Z" + "revision": "cdaaf98f9226c39dc162b8e55083b2fbc67b4674", + "revisionTime": "2019-07-12T19:01:42Z", + "version": "v0.43.0", + "versionExact": "v0.43.0" }, { "checksumSHA1": "t5pzf8AGtuCmECrPlJM9oAky+dk=", @@ -353,10 +387,10 @@ "versionExact": "v2.0.0" }, { - "checksumSHA1": "5rPfda8jFccr3A6heL+JAmi9K9g=", + "checksumSHA1": "CSPbwbyzqA6sfORicn4HFtIhF/c=", "path": "github.com/davecgh/go-spew/spew", - "revision": "5215b55f46b2b919f50a1df0eaa5886afe4e3b3d", - "revisionTime": "2015-11-05T21:09:06Z" + "revision": "d8f796af33cc11cb798c1aaeb27a4ebc5099927d", + "revisionTime": "2018-08-30T19:11:22Z" }, { "checksumSHA1": "a2yC46a1qsJomgY6rb+FkTFiqmE=", @@ -397,98 +431,100 @@ "revisionTime": "2016-01-21T18:51:14Z" }, { - "checksumSHA1": "BP2buXHHOKxI5eYS2xELVng2kf4=", + "checksumSHA1": "MgBmXopyMTVxPYgWfLrSsMIFE84=", "path": "github.com/golang/protobuf/jsonpb", - "revision": "aa810b61a9c79d51363740d207bb46cf8e620ed5", - "revisionTime": "2018-08-14T21:14:27Z", - "version": "v1.2.0", - "versionExact": "v1.2.0" + "revision": "6c65a5562fc06764971b7c5d05c76c75e84bdbf7", + "revisionTime": "2019-07-01T18:22:01Z", + "version": "v1.3.2", + "versionExact": "v1.3.2" }, { - "checksumSHA1": "3QpmjZApyomODlDRgXomf4DgsJU=", + "checksumSHA1": "78dam9bgu36kpCiQPsy5ni3s1cs=", "path": "github.com/golang/protobuf/jsonpb/jsonpb_test_proto", - "revision": "aa810b61a9c79d51363740d207bb46cf8e620ed5", - "revisionTime": "2018-08-14T21:14:27Z", - "version": "v1.2.0", - "versionExact": "v1.2.0" + "revision": "6c65a5562fc06764971b7c5d05c76c75e84bdbf7", + "revisionTime": "2019-07-01T18:22:01Z", + "version": "v1.3.2", + "versionExact": "v1.3.2" }, { - "checksumSHA1": "mE9XW26JSpe4meBObM6J/Oeq0eg=", + "checksumSHA1": "CGj8VcI/CpzxaNqlqpEVM7qElD4=", "path": "github.com/golang/protobuf/proto", - "revision": "aa810b61a9c79d51363740d207bb46cf8e620ed5", - "revisionTime": "2018-08-14T21:14:27Z", - "version": "v1.2.0", - "versionExact": "v1.2.0" + "revision": "6c65a5562fc06764971b7c5d05c76c75e84bdbf7", + "revisionTime": "2019-07-01T18:22:01Z", + "version": "v1.3.2", + "versionExact": "v1.3.2" }, { - "checksumSHA1": "DlygQR0Ml7JuGquUo2U4C0MJLBs=", + "checksumSHA1": "cQvjaQ6YsQ2s/QOmJeSGMjbyLhU=", "path": "github.com/golang/protobuf/proto/proto3_proto", - "revision": "aa810b61a9c79d51363740d207bb46cf8e620ed5", - "revisionTime": "2018-08-14T21:14:27Z", - "version": "v1.2.0", - "versionExact": "v1.2.0" + "revision": "6c65a5562fc06764971b7c5d05c76c75e84bdbf7", + "revisionTime": "2019-07-01T18:22:01Z", + "version": "v1.3.2", + "versionExact": "v1.3.2" }, { - "checksumSHA1": "jI4LkaFUi2rO9vZwvIJJ9tCY6mM=", + "checksumSHA1": "Ap3fxoENMwxwOM77e56TlCVt+7o=", "path": "github.com/golang/protobuf/proto/test_proto", - "revision": "aa810b61a9c79d51363740d207bb46cf8e620ed5", - "revisionTime": "2018-08-14T21:14:27Z", - "version": "v1.2.0", - "versionExact": "v1.2.0" + "revision": "6c65a5562fc06764971b7c5d05c76c75e84bdbf7", + "revisionTime": "2019-07-01T18:22:01Z", + "version": "v1.3.2", + "versionExact": "v1.3.2" }, { "checksumSHA1": "A0DxiyxXV4u8PmwUwlVkQ2CKyiQ=", "path": "github.com/golang/protobuf/proto/testdata", "revision": "1909bc2f63dc92bb931deace8b8312c4db72d12f", - "revisionTime": "2017-08-08T02:16:21Z" + "revisionTime": "2017-08-08T02:16:21Z", + "version": "v1.3.2", + "versionExact": "v1.3.2" }, { "checksumSHA1": "iqWBA0GWNr+cwAdF2KVy1eq9mlU=", "path": "github.com/golang/protobuf/protoc-gen-go", - "revision": "aa810b61a9c79d51363740d207bb46cf8e620ed5", - "revisionTime": "2018-08-14T21:14:27Z", - "version": "v1.2.0", - "versionExact": "v1.2.0" + "revision": "6c65a5562fc06764971b7c5d05c76c75e84bdbf7", + "revisionTime": "2019-07-01T18:22:01Z", + "version": "v1.3.2", + "versionExact": "v1.3.2" }, { - "checksumSHA1": "DA2cyOt1W92RTyXAqKQ4JWKGR8U=", + "checksumSHA1": "WOkXetG3AqJnfVVuqTJvdukcHps=", "path": "github.com/golang/protobuf/protoc-gen-go/descriptor", - "revision": "aa810b61a9c79d51363740d207bb46cf8e620ed5", - "revisionTime": "2018-08-14T21:14:27Z", - "version": "v1.2.0", - "versionExact": "v1.2.0" + "revision": "6c65a5562fc06764971b7c5d05c76c75e84bdbf7", + "revisionTime": "2019-07-01T18:22:01Z", + "version": "v1.3.2", + "versionExact": "v1.3.2" }, { - "checksumSHA1": "+BH6O73wJlqOQtPGpmnT+5l3tAw=", + "checksumSHA1": "dqkZJ8o1Hj3gbN30RyZ7G3CxhfU=", "path": "github.com/golang/protobuf/protoc-gen-go/generator", - "revision": "aa810b61a9c79d51363740d207bb46cf8e620ed5", - "revisionTime": "2018-08-14T21:14:27Z", - "version": "v1.2.0", - "versionExact": "v1.2.0" + "revision": "6c65a5562fc06764971b7c5d05c76c75e84bdbf7", + "revisionTime": "2019-07-01T18:22:01Z", + "version": "v1.3.2", + "versionExact": "v1.3.2" }, { "checksumSHA1": "uY4dEtqaAe5gsU8gbpCI1JgEIII=", "path": "github.com/golang/protobuf/protoc-gen-go/generator/internal/remap", - "revision": "aa810b61a9c79d51363740d207bb46cf8e620ed5", - "revisionTime": "2018-08-14T21:14:27Z", - "version": "v1.2.0", - "versionExact": "v1.2.0" + "revision": "6c65a5562fc06764971b7c5d05c76c75e84bdbf7", + "revisionTime": "2019-07-01T18:22:01Z", + "version": "v1.3.2", + "versionExact": "v1.3.2" }, { - "checksumSHA1": "EGcFhhLBcZ2f7hTDhtkuK6q1MUc=", + "checksumSHA1": "fejUXovU2abLTPX7kU8fzwT8Kmo=", "path": "github.com/golang/protobuf/protoc-gen-go/grpc", - "revision": "aa810b61a9c79d51363740d207bb46cf8e620ed5", - "revisionTime": "2018-08-14T21:14:27Z", - "version": "v1.2.0", - "versionExact": "v1.2.0" + "revision": "6c65a5562fc06764971b7c5d05c76c75e84bdbf7", + "revisionTime": "2019-07-01T18:22:01Z", + "version": "v1.3.2", + "versionExact": "v1.3.2" }, { "checksumSHA1": "h4PLbJDYnRmcUuf56USJ5K3xJOg=", "path": "github.com/golang/protobuf/protoc-gen-go/plugin", - "revision": "aa810b61a9c79d51363740d207bb46cf8e620ed5", - "revisionTime": "2018-08-14T21:14:27Z", - "version": "v1.2.0", - "versionExact": "v1.2.0" + "revision": "6c65a5562fc06764971b7c5d05c76c75e84bdbf7", + "revisionTime": "2019-07-01T18:22:01Z", + "version": "v1.3.2", + "versionExact": "v1.3.2" }, { "checksumSHA1": "/vLtyN6HK5twSZIFerD199YTmjk=", @@ -497,52 +533,52 @@ "revisionTime": "2016-11-03T22:44:32Z" }, { - "checksumSHA1": "tkJPssYejSjuAwE2tdEnoEIj93Q=", + "checksumSHA1": "aEiR2m3NGaMGTbUW5P+w5gKFyc8=", "path": "github.com/golang/protobuf/ptypes", - "revision": "aa810b61a9c79d51363740d207bb46cf8e620ed5", - "revisionTime": "2018-08-14T21:14:27Z", - "version": "v1.2.0", - "versionExact": "v1.2.0" + "revision": "6c65a5562fc06764971b7c5d05c76c75e84bdbf7", + "revisionTime": "2019-07-01T18:22:01Z", + "version": "v1.3.2", + "versionExact": "v1.3.2" }, { - "checksumSHA1": "G0aiY+KmzFsQLTNzRAGRhJNSj7A=", + "checksumSHA1": "2/Xg4L9IVGQRJB8zCELZx7/Z4HU=", "path": "github.com/golang/protobuf/ptypes/any", - "revision": "aa810b61a9c79d51363740d207bb46cf8e620ed5", - "revisionTime": "2018-08-14T21:14:27Z", - "version": "v1.2.0", - "versionExact": "v1.2.0" + "revision": "6c65a5562fc06764971b7c5d05c76c75e84bdbf7", + "revisionTime": "2019-07-01T18:22:01Z", + "version": "v1.3.2", + "versionExact": "v1.3.2" }, { - "checksumSHA1": "kjVDCbK5/WiHqP1g4GMUxm75jos=", + "checksumSHA1": "RE9rLveNHapyMKQC8p10tbkUE9w=", "path": "github.com/golang/protobuf/ptypes/duration", - "revision": "aa810b61a9c79d51363740d207bb46cf8e620ed5", - "revisionTime": "2018-08-14T21:14:27Z", - "version": "v1.2.0", - "versionExact": "v1.2.0" + "revision": "6c65a5562fc06764971b7c5d05c76c75e84bdbf7", + "revisionTime": "2019-07-01T18:22:01Z", + "version": "v1.3.2", + "versionExact": "v1.3.2" }, { - "checksumSHA1": "VCwyXqpYo81QNvC7z6nsp+yczc4=", + "checksumSHA1": "RT/PGRMtH/yBCbIJfZftaz5yc3M=", "path": "github.com/golang/protobuf/ptypes/struct", - "revision": "aa810b61a9c79d51363740d207bb46cf8e620ed5", - "revisionTime": "2018-08-14T21:14:27Z", - "version": "v1.2.0", - "versionExact": "v1.2.0" + "revision": "6c65a5562fc06764971b7c5d05c76c75e84bdbf7", + "revisionTime": "2019-07-01T18:22:01Z", + "version": "v1.3.2", + "versionExact": "v1.3.2" }, { - "checksumSHA1": "FdeygjOuyR2p5v9b0kNOtzfpjS4=", + "checksumSHA1": "seEwY2xETpK9yHJ9+bHqkLZ0VMU=", "path": "github.com/golang/protobuf/ptypes/timestamp", - "revision": "aa810b61a9c79d51363740d207bb46cf8e620ed5", - "revisionTime": "2018-08-14T21:14:27Z", - "version": "v1.2.0", - "versionExact": "v1.2.0" + "revision": "6c65a5562fc06764971b7c5d05c76c75e84bdbf7", + "revisionTime": "2019-07-01T18:22:01Z", + "version": "v1.3.2", + "versionExact": "v1.3.2" }, { - "checksumSHA1": "7sWfJ35gaddpCbcKYZRG2nL6eQo=", + "checksumSHA1": "KlQCb83HC090bojw4ofNDxn2nho=", "path": "github.com/golang/protobuf/ptypes/wrappers", - "revision": "aa810b61a9c79d51363740d207bb46cf8e620ed5", - "revisionTime": "2018-08-14T21:14:27Z", - "version": "v1.2.0", - "versionExact": "v1.2.0" + "revision": "6c65a5562fc06764971b7c5d05c76c75e84bdbf7", + "revisionTime": "2019-07-01T18:22:01Z", + "version": "v1.3.2", + "versionExact": "v1.3.2" }, { "checksumSHA1": "p/8vSviYF91gFflhrt5vkyksroo=", @@ -550,12 +586,48 @@ "revision": "553a641470496b2327abcac10b36396bd98e45c9", "revisionTime": "2017-02-15T23:32:05Z" }, + { + "checksumSHA1": "HYqCnwjrsx9RYvx7uA0EkzLGnxA=", + "path": "github.com/google/go-cmp/cmp", + "revision": "6d8cafd2f64fe3cd66b7530d95df066b00bdd777", + "revisionTime": "2019-08-01T21:37:55Z" + }, + { + "checksumSHA1": "FUnTgtE5i3f8asIvicGkJSFlrts=", + "path": "github.com/google/go-cmp/cmp/internal/diff", + "revision": "6d8cafd2f64fe3cd66b7530d95df066b00bdd777", + "revisionTime": "2019-08-01T21:37:55Z" + }, + { + "checksumSHA1": "nR8EJ8i8lqxxmtLPnXI7WlYANiE=", + "path": "github.com/google/go-cmp/cmp/internal/flags", + "revision": "6d8cafd2f64fe3cd66b7530d95df066b00bdd777", + "revisionTime": "2019-08-01T21:37:55Z" + }, + { + "checksumSHA1": "0pcLJsUQUaBdPXM5LuL9uFeuETs=", + "path": "github.com/google/go-cmp/cmp/internal/function", + "revision": "6d8cafd2f64fe3cd66b7530d95df066b00bdd777", + "revisionTime": "2019-08-01T21:37:55Z" + }, + { + "checksumSHA1": "ZNN1jJuHnBCpo21lSv25VvkotIM=", + "path": "github.com/google/go-cmp/cmp/internal/value", + "revision": "6d8cafd2f64fe3cd66b7530d95df066b00bdd777", + "revisionTime": "2019-08-01T21:37:55Z" + }, { "checksumSHA1": "V/53BpqgOkSDZCX6snQCAkdO2fM=", "path": "github.com/googleapis/gax-go", "revision": "da06d194a00e19ce00d9011a13931c3f6f6887c7", "revisionTime": "2016-11-07T00:24:06Z" }, + { + "checksumSHA1": "WZoHSeTnVjnPIX2+U1Otst5MUKw=", + "path": "github.com/googleapis/gax-go/v2", + "revision": "bd5b16380fd03dc758d11cef74ba2e3bc8b0e8c2", + "revisionTime": "2019-05-13T18:38:25Z" + }, { "checksumSHA1": "P3zGmsNjW8m15a+nks4FdVpFKwE=", "path": "github.com/gopherjs/gopherjs/js", @@ -618,6 +690,12 @@ "revision": "6bb64b370b90e7ef1fa532be9e591a81c3493e00", "revisionTime": "2016-05-03T14:34:40Z" }, + { + "checksumSHA1": "UThRII2e7MEeIJ2sTHbCXC+4tKU=", + "path": "github.com/hashicorp/golang-lru/simplelru", + "revision": "7f827b33c0f158ec5dfbba01bb0b14a4541fd81d", + "revisionTime": "2019-07-26T16:11:22Z" + }, { "checksumSHA1": "E3Xcanc9ouQwL+CZGOUyA/+giLg=", "path": "github.com/hashicorp/serf/coordinate", @@ -913,10 +991,10 @@ "revisionTime": "2016-05-23T15:31:47Z" }, { - "checksumSHA1": "Bn333k9lTndxU3D6n/G5c+GMcYY=", + "checksumSHA1": "/7bZ0f2fM9AAsLf3nMca6Gtlm6E=", "path": "github.com/stretchr/testify/assert", - "revision": "8d64eb7173c7753d6419fd4a9caf057398611364", - "revisionTime": "2016-05-24T23:42:29Z" + "revision": "221dbe5ed46703ee255b1da0dec05086f5035f62", + "revisionTime": "2019-05-17T17:51:56Z" }, { "checksumSHA1": "P9FJpir2c4G5PA46qEkaWy3l60U=", @@ -1098,6 +1176,108 @@ "revision": "6d7457066b9b62f64b9a884659d89ad5c5ad5173", "revisionTime": "2019-01-28T07:28:38Z" }, + { + "checksumSHA1": "w+WRj7WpdItd5iR7PcaQQKMrVB0=", + "path": "go.opencensus.io", + "revision": "b4a14686f0a98096416fe1b4cb848e384fb2b22b", + "revisionTime": "2019-07-13T07:22:01Z" + }, + { + "checksumSHA1": "KLZy3Nh+8JlI04JmBa/Jc8fxrVQ=", + "path": "go.opencensus.io/internal", + "revision": "b4a14686f0a98096416fe1b4cb848e384fb2b22b", + "revisionTime": "2019-07-13T07:22:01Z" + }, + { + "checksumSHA1": "Dw3rpna1DwTa7TCzijInKcU49g4=", + "path": "go.opencensus.io/internal/tagencoding", + "revision": "b4a14686f0a98096416fe1b4cb848e384fb2b22b", + "revisionTime": "2019-07-13T07:22:01Z" + }, + { + "checksumSHA1": "r6fbtPwxK4/TYUOWc7y0hXdAG4Q=", + "path": "go.opencensus.io/metric/metricdata", + "revision": "b4a14686f0a98096416fe1b4cb848e384fb2b22b", + "revisionTime": "2019-07-13T07:22:01Z" + }, + { + "checksumSHA1": "kWj13srwY1SH5KgFecPhEfHnzVc=", + "path": "go.opencensus.io/metric/metricproducer", + "revision": "b4a14686f0a98096416fe1b4cb848e384fb2b22b", + "revisionTime": "2019-07-13T07:22:01Z" + }, + { + "checksumSHA1": "kZAPvdijG2qWdS00Vt2NS4kH02k=", + "path": "go.opencensus.io/plugin/ocgrpc", + "revision": "b4a14686f0a98096416fe1b4cb848e384fb2b22b", + "revisionTime": "2019-07-13T07:22:01Z" + }, + { + "checksumSHA1": "Ur+xijNXCbNHR8Q5VjW1czSAabo=", + "path": "go.opencensus.io/plugin/ochttp", + "revision": "b4a14686f0a98096416fe1b4cb848e384fb2b22b", + "revisionTime": "2019-07-13T07:22:01Z" + }, + { + "checksumSHA1": "UZhIoErIy1tKLmVT/5huwlp6KFQ=", + "path": "go.opencensus.io/plugin/ochttp/propagation/b3", + "revision": "b4a14686f0a98096416fe1b4cb848e384fb2b22b", + "revisionTime": "2019-07-13T07:22:01Z" + }, + { + "checksumSHA1": "q+y8X+5nDONIlJlxfkv+OtA18ds=", + "path": "go.opencensus.io/resource", + "revision": "b4a14686f0a98096416fe1b4cb848e384fb2b22b", + "revisionTime": "2019-07-13T07:22:01Z" + }, + { + "checksumSHA1": "Cc4tRuW0IjlfAFY8BcdfMDqG0R8=", + "path": "go.opencensus.io/stats", + "revision": "b4a14686f0a98096416fe1b4cb848e384fb2b22b", + "revisionTime": "2019-07-13T07:22:01Z" + }, + { + "checksumSHA1": "oIo4NRi6AVCfcwVfHzCXAsoZsdI=", + "path": "go.opencensus.io/stats/internal", + "revision": "b4a14686f0a98096416fe1b4cb848e384fb2b22b", + "revisionTime": "2019-07-13T07:22:01Z" + }, + { + "checksumSHA1": "vN9GN1vwD4RU/3ld2tKK00K0i94=", + "path": "go.opencensus.io/stats/view", + "revision": "b4a14686f0a98096416fe1b4cb848e384fb2b22b", + "revisionTime": "2019-07-13T07:22:01Z" + }, + { + "checksumSHA1": "AoqL/neZwl05Fv08vcXXlhbY12g=", + "path": "go.opencensus.io/tag", + "revision": "b4a14686f0a98096416fe1b4cb848e384fb2b22b", + "revisionTime": "2019-07-13T07:22:01Z" + }, + { + "checksumSHA1": "0O3djqX4bcg5O9LZdcinEoYeQKs=", + "path": "go.opencensus.io/trace", + "revision": "b4a14686f0a98096416fe1b4cb848e384fb2b22b", + "revisionTime": "2019-07-13T07:22:01Z" + }, + { + "checksumSHA1": "JkvEb8oMEFjic5K/03Tyr5Lok+w=", + "path": "go.opencensus.io/trace/internal", + "revision": "b4a14686f0a98096416fe1b4cb848e384fb2b22b", + "revisionTime": "2019-07-13T07:22:01Z" + }, + { + "checksumSHA1": "FHJParRi8f1GHO7Cx+lk3bMWBq0=", + "path": "go.opencensus.io/trace/propagation", + "revision": "b4a14686f0a98096416fe1b4cb848e384fb2b22b", + "revisionTime": "2019-07-13T07:22:01Z" + }, + { + "checksumSHA1": "UHbxxaMqpEPsubh8kPwzSlyEwqI=", + "path": "go.opencensus.io/trace/tracestate", + "revision": "b4a14686f0a98096416fe1b4cb848e384fb2b22b", + "revisionTime": "2019-07-13T07:22:01Z" + }, { "checksumSHA1": "FwW3Vv4jW0Nv7V2SZC7x/Huj5M4=", "path": "golang.org/x/crypto/argon2", @@ -1177,34 +1357,34 @@ "revisionTime": "2018-11-14T21:44:15Z" }, { - "checksumSHA1": "4q+J7KldqFG28gkuEdHTyHgNcz4=", + "checksumSHA1": "uRlaEkyMCxyjj57KBa81GEfvCwg=", "path": "golang.org/x/oauth2", - "revision": "04e1573abc896e70388bd387a69753c378d46466", - "revisionTime": "2016-07-30T22:43:56Z" + "revision": "0f29369cfe4552d0e4bcddc57cc75f4d7e672a33", + "revisionTime": "2019-05-07T23:52:07Z" }, { - "checksumSHA1": "Ml9aUD6MMJOwU2ZndUEWjQZA6cs=", + "checksumSHA1": "UmEcak5EiFA6UpbMnlfkQzHyw3M=", "path": "golang.org/x/oauth2/google", - "revision": "04e1573abc896e70388bd387a69753c378d46466", - "revisionTime": "2016-07-30T22:43:56Z" + "revision": "0f29369cfe4552d0e4bcddc57cc75f4d7e672a33", + "revisionTime": "2019-05-07T23:52:07Z" }, { - "checksumSHA1": "D3v/aqfB9swlaZcSksCoF+lbOqo=", + "checksumSHA1": "+9KSfsjsC3F2CldDDb+Dt+d/H3Q=", "path": "golang.org/x/oauth2/internal", - "revision": "04e1573abc896e70388bd387a69753c378d46466", - "revisionTime": "2016-07-30T22:43:56Z" + "revision": "0f29369cfe4552d0e4bcddc57cc75f4d7e672a33", + "revisionTime": "2019-05-07T23:52:07Z" }, { - "checksumSHA1": "A/8+i+ZrWYF+ihbus3fjWVi7u6I=", + "checksumSHA1": "huVltYnXdRFDJLgp/ZP9IALzG7g=", "path": "golang.org/x/oauth2/jws", - "revision": "04e1573abc896e70388bd387a69753c378d46466", - "revisionTime": "2016-07-30T22:43:56Z" + "revision": "0f29369cfe4552d0e4bcddc57cc75f4d7e672a33", + "revisionTime": "2019-05-07T23:52:07Z" }, { - "checksumSHA1": "McqNj0/805YfYQJQGomeB0s+EcU=", + "checksumSHA1": "HGS6ig1GfcE2CBHBsi965ZVn9Xw=", "path": "golang.org/x/oauth2/jwt", - "revision": "04e1573abc896e70388bd387a69753c378d46466", - "revisionTime": "2016-07-30T22:43:56Z" + "revision": "0f29369cfe4552d0e4bcddc57cc75f4d7e672a33", + "revisionTime": "2019-05-07T23:52:07Z" }, { "checksumSHA1": "1CmUDjhZlyKZcbLYlWI7cRzK3fI=", @@ -1309,64 +1489,108 @@ "revisionTime": "2016-10-28T04:02:39Z" }, { - "checksumSHA1": "I1JSeU5OMapl+4s2VrnBkMon3Bw=", + "checksumSHA1": "FhzGDPlkW5SaQGtSgKnjQAiYVk0=", "path": "google.golang.org/api/gensupport", - "revision": "55146ba61254fdb1c26d65ff3c04bc1611ad73fb", - "revisionTime": "2016-12-12T20:09:13Z" + "revision": "02490b97dff7cfde1995bd77de808fd27053bc87", + "revisionTime": "2019-06-24T17:16:18Z", + "version": "v0.7.0", + "versionExact": "v0.7.0" }, { - "checksumSHA1": "BWKmb7kGYbfbvXO6E7tCpTh9zKE=", + "checksumSHA1": "YIDE68w/xMptf6Nu9hHiOwXOvho=", "path": "google.golang.org/api/googleapi", - "revision": "55146ba61254fdb1c26d65ff3c04bc1611ad73fb", - "revisionTime": "2016-12-12T20:09:13Z" + "revision": "02490b97dff7cfde1995bd77de808fd27053bc87", + "revisionTime": "2019-06-24T17:16:18Z", + "version": "v0.7.0", + "versionExact": "v0.7.0" }, { "checksumSHA1": "1K0JxrUfDqAB3MyRiU1LKjfHyf4=", "path": "google.golang.org/api/googleapi/internal/uritemplates", - "revision": "55146ba61254fdb1c26d65ff3c04bc1611ad73fb", - "revisionTime": "2016-12-12T20:09:13Z" + "revision": "02490b97dff7cfde1995bd77de808fd27053bc87", + "revisionTime": "2019-06-24T17:16:18Z", + "version": "v0.7.0", + "versionExact": "v0.7.0" }, { "checksumSHA1": "Mr2fXhMRzlQCgANFm91s536pG7E=", "path": "google.golang.org/api/googleapi/transport", - "revision": "55146ba61254fdb1c26d65ff3c04bc1611ad73fb", - "revisionTime": "2016-12-12T20:09:13Z" + "revision": "02490b97dff7cfde1995bd77de808fd27053bc87", + "revisionTime": "2019-06-24T17:16:18Z", + "version": "v0.7.0", + "versionExact": "v0.7.0" }, { - "checksumSHA1": "nefIfmUzE2DJD4Tpodz+WHQLfeE=", + "checksumSHA1": "6Tg4dDJKzoSrAA5beVknvnjluOU=", "path": "google.golang.org/api/internal", - "revision": "55146ba61254fdb1c26d65ff3c04bc1611ad73fb", - "revisionTime": "2016-12-12T20:09:13Z" + "revision": "02490b97dff7cfde1995bd77de808fd27053bc87", + "revisionTime": "2019-06-24T17:16:18Z", + "version": "v0.7.0", + "versionExact": "v0.7.0" }, { - "checksumSHA1": "slcGOTGSdukEPPSN81Q5WZGmhog=", + "checksumSHA1": "zh9AcT6oNvhnOqb7w7njY48TkvI=", "path": "google.golang.org/api/iterator", - "revision": "55146ba61254fdb1c26d65ff3c04bc1611ad73fb", - "revisionTime": "2016-12-12T20:09:13Z" + "revision": "02490b97dff7cfde1995bd77de808fd27053bc87", + "revisionTime": "2019-06-24T17:16:18Z", + "version": "v0.7.0", + "versionExact": "v0.7.0" }, { - "checksumSHA1": "7u58UYArY+urG47WScM0HFdBahs=", + "checksumSHA1": "XdTB13Pxzd95rhckAEBpCeMp69M=", "path": "google.golang.org/api/iterator/testing", - "revision": "55146ba61254fdb1c26d65ff3c04bc1611ad73fb", - "revisionTime": "2016-12-12T20:09:13Z" + "revision": "02490b97dff7cfde1995bd77de808fd27053bc87", + "revisionTime": "2019-06-24T17:16:18Z", + "version": "v0.7.0", + "versionExact": "v0.7.0" }, { - "checksumSHA1": "kEQSHsbkLxyIijEvp5W2SF3uqsU=", + "checksumSHA1": "2AyxThTPscWdy49fGsU2tg0Uyw8=", "path": "google.golang.org/api/option", - "revision": "55146ba61254fdb1c26d65ff3c04bc1611ad73fb", - "revisionTime": "2016-12-12T20:09:13Z" + "revision": "02490b97dff7cfde1995bd77de808fd27053bc87", + "revisionTime": "2019-06-24T17:16:18Z", + "version": "v0.7.0", + "versionExact": "v0.7.0" }, { - "checksumSHA1": "xygm9BwoCg7vc0PPgAPdxNKJ38c=", + "checksumSHA1": "Aka6Sle3vs6xGP70PADl9lAlZIE=", "path": "google.golang.org/api/storage/v1", - "revision": "55146ba61254fdb1c26d65ff3c04bc1611ad73fb", - "revisionTime": "2016-12-12T20:09:13Z" + "revision": "02490b97dff7cfde1995bd77de808fd27053bc87", + "revisionTime": "2019-06-24T17:16:18Z", + "version": "v0.7.0", + "versionExact": "v0.7.0" }, { - "checksumSHA1": "ztaquTYXuYLb5Kc6mtF64yrsA7E=", + "checksumSHA1": "hOQM3ns9t81o566ge8UNFEtoXX8=", "path": "google.golang.org/api/transport", - "revision": "55146ba61254fdb1c26d65ff3c04bc1611ad73fb", - "revisionTime": "2016-12-12T20:09:13Z" + "revision": "02490b97dff7cfde1995bd77de808fd27053bc87", + "revisionTime": "2019-06-24T17:16:18Z", + "version": "v0.7.0", + "versionExact": "v0.7.0" + }, + { + "checksumSHA1": "XeonlHuXpmHUQDqIK2qJ/DSKg0o=", + "path": "google.golang.org/api/transport/grpc", + "revision": "02490b97dff7cfde1995bd77de808fd27053bc87", + "revisionTime": "2019-06-24T17:16:18Z", + "version": "v0.7.0", + "versionExact": "v0.7.0" + }, + { + "checksumSHA1": "WzZfHJ4G6jO/qf3n6DI9a9awJQk=", + "path": "google.golang.org/api/transport/http", + "revision": "02490b97dff7cfde1995bd77de808fd27053bc87", + "revisionTime": "2019-06-24T17:16:18Z", + "version": "v0.7.0", + "versionExact": "v0.7.0" + }, + { + "checksumSHA1": "sJcKCvjPtoysqyelsB2CQzC5oQI=", + "path": "google.golang.org/api/transport/http/internal/propagation", + "revision": "02490b97dff7cfde1995bd77de808fd27053bc87", + "revisionTime": "2019-06-24T17:16:18Z", + "version": "v0.7.0", + "versionExact": "v0.7.0" }, { "checksumSHA1": "buWXkeU6VNtym88sZ7lKJvsCVXk=", @@ -1433,10 +1657,34 @@ "versionExact": "v1.5.0" }, { - "checksumSHA1": "MgYFT27I9gfAtSVBpGVqkCYOj3U=", + "checksumSHA1": "GlPZsxfa/OYvumlfU8+2j4cVai8=", + "path": "google.golang.org/genproto/googleapis/api/annotations", + "revision": "fa694d86fc64c7654a660f8908de4e879866748d", + "revisionTime": "2019-08-01T16:59:51Z" + }, + { + "checksumSHA1": "nTQH9H1cWFc4Ft8sJylUT9ANl/Y=", + "path": "google.golang.org/genproto/googleapis/iam/v1", + "revision": "fa694d86fc64c7654a660f8908de4e879866748d", + "revisionTime": "2019-08-01T16:59:51Z" + }, + { + "checksumSHA1": "EOkBjXBkCQcsEf9fk2KOQZcJO08=", + "path": "google.golang.org/genproto/googleapis/rpc/code", + "revision": "fa694d86fc64c7654a660f8908de4e879866748d", + "revisionTime": "2019-08-01T16:59:51Z" + }, + { + "checksumSHA1": "dU5fToNngC22+3DsebkdYv+T3jE=", "path": "google.golang.org/genproto/googleapis/rpc/status", - "revision": "b5d43981345bdb2c233eb4bf3277847b48c6fdc6", - "revisionTime": "2018-11-09T15:42:31Z" + "revision": "fa694d86fc64c7654a660f8908de4e879866748d", + "revisionTime": "2019-08-01T16:59:51Z" + }, + { + "checksumSHA1": "F1znYp6CXz3gZ0WGdy89d7jZgP4=", + "path": "google.golang.org/genproto/googleapis/type/expr", + "revision": "fa694d86fc64c7654a660f8908de4e879866748d", + "revisionTime": "2019-08-01T16:59:51Z" }, { "checksumSHA1": "O6SQTcVdhL+4betKp/7ketCc/AU=", @@ -1462,6 +1710,22 @@ "version": "v1.16.0", "versionExact": "v1.16.0" }, + { + "checksumSHA1": "ZD8cJs3NtFy3pzofoTThBvVVdKU=", + "path": "google.golang.org/grpc/balancer/grpclb", + "revision": "2e463a05d100327ca47ac218281906921038fd95", + "revisionTime": "2018-10-23T17:37:47Z", + "version": "v1.16.0", + "versionExact": "v1.16.0" + }, + { + "checksumSHA1": "CWf3yHL+DCM8pZETYCGA70C4JGM=", + "path": "google.golang.org/grpc/balancer/grpclb/grpc_lb_v1", + "revision": "2e463a05d100327ca47ac218281906921038fd95", + "revisionTime": "2018-10-23T17:37:47Z", + "version": "v1.16.0", + "versionExact": "v1.16.0" + }, { "checksumSHA1": "DJ1AtOk4Pu7bqtUMob95Hw8HPNw=", "path": "google.golang.org/grpc/balancer/roundrobin", @@ -1494,6 +1758,70 @@ "version": "v1.16.0", "versionExact": "v1.16.0" }, + { + "checksumSHA1": "RqDVFWVRXNIzSEge/L8JSMskEME=", + "path": "google.golang.org/grpc/credentials/alts", + "revision": "2e463a05d100327ca47ac218281906921038fd95", + "revisionTime": "2018-10-23T17:37:47Z", + "version": "v1.16.0", + "versionExact": "v1.16.0" + }, + { + "checksumSHA1": "qAUIOU0aukDblUKBw9Pbjzc+nW8=", + "path": "google.golang.org/grpc/credentials/alts/internal", + "revision": "2e463a05d100327ca47ac218281906921038fd95", + "revisionTime": "2018-10-23T17:37:47Z", + "version": "v1.16.0", + "versionExact": "v1.16.0" + }, + { + "checksumSHA1": "PTVv5w1hd88sHf2TJbctBasS4ck=", + "path": "google.golang.org/grpc/credentials/alts/internal/authinfo", + "revision": "2e463a05d100327ca47ac218281906921038fd95", + "revisionTime": "2018-10-23T17:37:47Z", + "version": "v1.16.0", + "versionExact": "v1.16.0" + }, + { + "checksumSHA1": "/s6U8ulRJiogFjFygs450dOeIoI=", + "path": "google.golang.org/grpc/credentials/alts/internal/conn", + "revision": "2e463a05d100327ca47ac218281906921038fd95", + "revisionTime": "2018-10-23T17:37:47Z", + "version": "v1.16.0", + "versionExact": "v1.16.0" + }, + { + "checksumSHA1": "znhrvWfbdiviJiZpekYHOi4TRmw=", + "path": "google.golang.org/grpc/credentials/alts/internal/handshaker", + "revision": "2e463a05d100327ca47ac218281906921038fd95", + "revisionTime": "2018-10-23T17:37:47Z", + "version": "v1.16.0", + "versionExact": "v1.16.0" + }, + { + "checksumSHA1": "CliKuySSTAK7m5iZuEA3fRiLHjg=", + "path": "google.golang.org/grpc/credentials/alts/internal/handshaker/service", + "revision": "2e463a05d100327ca47ac218281906921038fd95", + "revisionTime": "2018-10-23T17:37:47Z", + "version": "v1.16.0", + "versionExact": "v1.16.0" + }, + { + "checksumSHA1": "3/WS7uTk/B23ijy0PoHmIS/A76M=", + "path": "google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp", + "revision": "2e463a05d100327ca47ac218281906921038fd95", + "revisionTime": "2018-10-23T17:37:47Z", + "version": "v1.16.0", + "versionExact": "v1.16.0" + }, + { + "checksumSHA1": "KreBPF6lZnpT8psfiyRson0C9lI=", + "path": "google.golang.org/grpc/credentials/google", + "revision": "2e463a05d100327ca47ac218281906921038fd95", + "revisionTime": "2018-10-23T17:37:47Z", + "version": "v1.16.0", + "versionExact": "v1.16.0" + }, { "checksumSHA1": "QbufP1o0bXrtd5XecqdRCK/Vl0M=", "path": "google.golang.org/grpc/credentials/oauth",