diff --git a/go/cmd/vtbackup/vtbackup.go b/go/cmd/vtbackup/vtbackup.go index 0abdab9522a..8c81154b8c1 100644 --- a/go/cmd/vtbackup/vtbackup.go +++ b/go/cmd/vtbackup/vtbackup.go @@ -88,9 +88,6 @@ import ( ) const ( - backupTimestampFormat = "2006-01-02.150405" - manifestFileName = "MANIFEST" - // operationTimeout is the timeout for individual operations like fetching // the master position. This does not impose an overall timeout on // long-running processes like taking the backup. It only applies to @@ -154,7 +151,6 @@ func main() { }() // Open connection backup storage. - backupDir := fmt.Sprintf("%v/%v", *initKeyspace, *initShard) backupStorage, err := backupstorage.GetBackupStorage() if err != nil { log.Errorf("Can't get backup storage: %v", err) @@ -168,13 +164,14 @@ func main() { // Try to take a backup, if it's been long enough since the last one. // Skip pruning if backup wasn't fully successful. We don't want to be // deleting things if the backup process is not healthy. + backupDir := mysqlctl.GetBackupDir(*initKeyspace, *initShard) doBackup, err := shouldBackup(ctx, topoServer, backupStorage, backupDir) if err != nil { log.Errorf("Can't take backup: %v", err) exit.Return(1) } if doBackup { - if err := takeBackup(ctx, topoServer, backupStorage, backupDir); err != nil { + if err := takeBackup(ctx, topoServer, backupStorage); err != nil { log.Errorf("Failed to take backup: %v", err) exit.Return(1) } @@ -187,7 +184,7 @@ func main() { } } -func takeBackup(ctx context.Context, topoServer *topo.Server, backupStorage backupstorage.BackupStorage, backupDir string) error { +func takeBackup(ctx context.Context, topoServer *topo.Server, backupStorage backupstorage.BackupStorage) error { // This is an imaginary tablet alias. The value doesn't matter for anything, // except that we generate a random UID to ensure the target backup // directory is unique if multiple vtbackup instances are launched for the @@ -249,6 +246,7 @@ func takeBackup(ctx context.Context, topoServer *topo.Server, backupStorage back TopoServer: topoServer, Keyspace: *initKeyspace, Shard: *initShard, + TabletAlias: topoproto.TabletAliasString(tabletAlias), } // In initial_backup mode, just take a backup of this empty database. if *initialBackup { @@ -265,15 +263,16 @@ func takeBackup(ctx context.Context, topoServer *topo.Server, backupStorage back if err := mysqld.ExecuteSuperQueryList(ctx, cmds); err != nil { return fmt.Errorf("can't initialize database: %v", err) } + backupParams.BackupTime = time.Now() // Now we're ready to take the backup. - name := backupName(time.Now(), tabletAlias) - if err := mysqlctl.Backup(ctx, backupDir, name, backupParams); err != nil { + if err := mysqlctl.Backup(ctx, backupParams); err != nil { return fmt.Errorf("backup failed: %v", err) } log.Info("Initial backup successful.") return nil } + backupDir := mysqlctl.GetBackupDir(*initKeyspace, *initShard) log.Infof("Restoring latest backup from directory %v", backupDir) params := mysqlctl.RestoreParams{ Cnf: mycnf, @@ -284,12 +283,16 @@ func takeBackup(ctx context.Context, topoServer *topo.Server, backupStorage back LocalMetadata: map[string]string{}, DeleteBeforeRestore: true, DbName: dbName, - Dir: backupDir, + Keyspace: *initKeyspace, + Shard: *initShard, } - restorePos, err := mysqlctl.Restore(ctx, params) + backupManifest, err := mysqlctl.Restore(ctx, params) + var restorePos mysql.Position switch err { case nil: log.Infof("Successfully restored from backup at replication position %v", restorePos) + // if err is nil, we expect backupManifest to be non-nil + restorePos = backupManifest.Position case mysqlctl.ErrNoBackup: // There is no backup found, but we may be taking the initial backup of a shard if !*allowFirstBackup { @@ -339,7 +342,7 @@ func takeBackup(ctx context.Context, topoServer *topo.Server, backupStorage back // Remember the time when we fetched the master position, not when we caught // up to it, so the timestamp on our backup is honest (assuming we make it // to the goal position). - backupTime := time.Now() + backupParams.BackupTime = time.Now() // Wait for replication to catch up. waitStartTime := time.Now() @@ -380,8 +383,7 @@ func takeBackup(ctx context.Context, topoServer *topo.Server, backupStorage back } // Now we can take a new backup. - name := backupName(backupTime, tabletAlias) - if err := mysqlctl.Backup(ctx, backupDir, name, backupParams); err != nil { + if err := mysqlctl.Backup(ctx, backupParams); err != nil { return fmt.Errorf("error taking backup: %v", err) } @@ -490,10 +492,6 @@ func retryOnError(ctx context.Context, fn func() error) error { } } -func backupName(backupTime time.Time, tabletAlias *topodatapb.TabletAlias) string { - return fmt.Sprintf("%v.%v", backupTime.UTC().Format(backupTimestampFormat), topoproto.TabletAliasString(tabletAlias)) -} - func pruneBackups(ctx context.Context, backupStorage backupstorage.BackupStorage, backupDir string) error { if *minRetentionTime == 0 { log.Info("Pruning of old backups is disabled.") @@ -542,7 +540,7 @@ func parseBackupTime(name string) (time.Time, error) { if len(parts) != 3 { return time.Time{}, fmt.Errorf("backup name not in expected format (date.time.tablet-alias): %v", name) } - backupTime, err := time.Parse(backupTimestampFormat, fmt.Sprintf("%s.%s", parts[0], parts[1])) + backupTime, err := time.Parse(mysqlctl.BackupTimestampFormat, fmt.Sprintf("%s.%s", parts[0], parts[1])) if err != nil { return time.Time{}, fmt.Errorf("can't parse timestamp from backup %q: %v", name, err) } diff --git a/go/vt/logutil/proto3.go b/go/vt/logutil/proto3.go index 950a74ec664..43fd959dd82 100644 --- a/go/vt/logutil/proto3.go +++ b/go/vt/logutil/proto3.go @@ -20,28 +20,29 @@ import ( "time" logutilpb "vitess.io/vitess/go/vt/proto/logutil" + vttimepb "vitess.io/vitess/go/vt/proto/vttime" ) // This file contains a few functions to help with proto3. -// ProtoToTime converts a logutilpb.Time to a time.Time. +// ProtoToTime converts a vttimepb.Time to a time.Time. // proto3 will eventually support timestamps, at which point we'll retire // this. // // A nil pointer is like the empty timestamp. -func ProtoToTime(ts *logutilpb.Time) time.Time { +func ProtoToTime(ts *vttimepb.Time) time.Time { if ts == nil { // treat nil like the empty Timestamp - return time.Unix(0, 0).UTC() + return time.Time{} } return time.Unix(ts.Seconds, int64(ts.Nanoseconds)).UTC() } -// TimeToProto converts the time.Time to a logutilpb.Time. -func TimeToProto(t time.Time) *logutilpb.Time { +// TimeToProto converts the time.Time to a vttimepb.Time. +func TimeToProto(t time.Time) *vttimepb.Time { seconds := t.Unix() nanos := int64(t.Sub(time.Unix(seconds, 0))) - return &logutilpb.Time{ + return &vttimepb.Time{ Seconds: seconds, Nanoseconds: int32(nanos), } diff --git a/go/vt/logutil/proto3_test.go b/go/vt/logutil/proto3_test.go index 185450f3b20..47b858c1dea 100644 --- a/go/vt/logutil/proto3_test.go +++ b/go/vt/logutil/proto3_test.go @@ -22,7 +22,7 @@ import ( "time" "github.com/golang/protobuf/proto" - logutilpb "vitess.io/vitess/go/vt/proto/logutil" + "vitess.io/vitess/go/vt/proto/vttime" ) const ( @@ -39,43 +39,43 @@ func utcDate(year, month, day int) time.Time { } var tests = []struct { - pt *logutilpb.Time + pt *vttime.Time t time.Time }{ // The timestamp representing the Unix epoch date. - {pt: &logutilpb.Time{Seconds: 0, Nanoseconds: 0}, + {pt: &vttime.Time{Seconds: 0, Nanoseconds: 0}, t: utcDate(1970, 1, 1)}, // The smallest representable timestamp with non-negative nanos. - {pt: &logutilpb.Time{Seconds: math.MinInt64, Nanoseconds: 0}, + {pt: &vttime.Time{Seconds: math.MinInt64, Nanoseconds: 0}, t: time.Unix(math.MinInt64, 0).UTC()}, // The earliest valid timestamp. - {pt: &logutilpb.Time{Seconds: minValidSeconds, Nanoseconds: 0}, + {pt: &vttime.Time{Seconds: minValidSeconds, Nanoseconds: 0}, t: utcDate(1, 1, 1)}, // The largest representable timestamp with nanos in range. - {pt: &logutilpb.Time{Seconds: math.MaxInt64, Nanoseconds: 1e9 - 1}, + {pt: &vttime.Time{Seconds: math.MaxInt64, Nanoseconds: 1e9 - 1}, t: time.Unix(math.MaxInt64, 1e9-1).UTC()}, // The largest valid timestamp. - {pt: &logutilpb.Time{Seconds: maxValidSeconds - 1, Nanoseconds: 1e9 - 1}, + {pt: &vttime.Time{Seconds: maxValidSeconds - 1, Nanoseconds: 1e9 - 1}, t: time.Date(9999, 12, 31, 23, 59, 59, 1e9-1, time.UTC)}, // The smallest invalid timestamp that is larger than the valid range. - {pt: &logutilpb.Time{Seconds: maxValidSeconds, Nanoseconds: 0}, + {pt: &vttime.Time{Seconds: maxValidSeconds, Nanoseconds: 0}, t: time.Unix(maxValidSeconds, 0).UTC()}, // A date before the epoch. - {pt: &logutilpb.Time{Seconds: -281836800, Nanoseconds: 0}, + {pt: &vttime.Time{Seconds: -281836800, Nanoseconds: 0}, t: utcDate(1961, 1, 26)}, // A date after the epoch. - {pt: &logutilpb.Time{Seconds: 1296000000, Nanoseconds: 0}, + {pt: &vttime.Time{Seconds: 1296000000, Nanoseconds: 0}, t: utcDate(2011, 1, 26)}, // A date after the epoch, in the middle of the day. - {pt: &logutilpb.Time{Seconds: 1296012345, Nanoseconds: 940483}, + {pt: &vttime.Time{Seconds: 1296012345, Nanoseconds: 940483}, t: time.Date(2011, 1, 26, 3, 25, 45, 940483, time.UTC)}, } diff --git a/go/vt/mysqlctl/backup.go b/go/vt/mysqlctl/backup.go index 41ebc861182..6d11c85eea9 100644 --- a/go/vt/mysqlctl/backup.go +++ b/go/vt/mysqlctl/backup.go @@ -19,6 +19,7 @@ package mysqlctl import ( "errors" "flag" + "fmt" "os" "path/filepath" "strings" @@ -46,6 +47,8 @@ const ( // RestoreState is the name of the sentinel file used to detect whether a previous restore // terminated abnormally RestoreState = "restore_in_progress" + // BackupTimestampFormat is the format in which we save BackupTime and FinishedTime + BackupTimestampFormat = "2006-01-02.150405" ) const ( @@ -89,14 +92,17 @@ var ( // - uses the BackupStorage service to store a new backup // - shuts down Mysqld during the backup // - remember if we were replicating, restore the exact same state -func Backup(ctx context.Context, dir, name string, params BackupParams) error { +func Backup(ctx context.Context, params BackupParams) error { + + backupDir := GetBackupDir(params.Keyspace, params.Shard) + name := fmt.Sprintf("%v.%v", params.BackupTime.UTC().Format(BackupTimestampFormat), params.TabletAlias) // Start the backup with the BackupStorage. bs, err := backupstorage.GetBackupStorage() if err != nil { return vterrors.Wrap(err, "unable to get backup storage") } defer bs.Close() - bh, err := bs.StartBackup(ctx, dir, name) + bh, err := bs.StartBackup(ctx, backupDir, name) if err != nil { return vterrors.Wrap(err, "StartBackup failed") } @@ -215,88 +221,81 @@ func removeExistingFiles(cnf *Mycnf) error { // Restore is the main entry point for backup restore. If there is no // appropriate backup on the BackupStorage, Restore logs an error // and returns ErrNoBackup. Any other error is returned. -func Restore(ctx context.Context, params RestoreParams) (mysql.Position, error) { - - // extract params - cnf := params.Cnf - mysqld := params.Mysqld - logger := params.Logger - localMetadata := params.LocalMetadata - deleteBeforeRestore := params.DeleteBeforeRestore - dbName := params.DbName - dir := params.Dir +func Restore(ctx context.Context, params RestoreParams) (*BackupManifest, error) { - rval := mysql.Position{} - - if !deleteBeforeRestore { - logger.Infof("Restore: Checking if a restore is in progress") - if !RestoreWasInterrupted(cnf) { - logger.Infof("Restore: No %v file found, checking no existing data is present", RestoreState) + if !params.DeleteBeforeRestore { + params.Logger.Infof("Restore: Checking if a restore is in progress") + if !RestoreWasInterrupted(params.Cnf) { + params.Logger.Infof("Restore: No %v file found, checking no existing data is present", RestoreState) // Wait for mysqld to be ready, in case it was launched in parallel with us. - if err := mysqld.Wait(ctx, cnf); err != nil { - return mysql.Position{}, err + if err := params.Mysqld.Wait(ctx, params.Cnf); err != nil { + return nil, err } - ok, err := checkNoDB(ctx, mysqld, dbName) + ok, err := checkNoDB(ctx, params.Mysqld, params.DbName) if err != nil { - return mysql.Position{}, err + return nil, err } if !ok { - logger.Infof("Auto-restore is enabled, but mysqld already contains data. Assuming vttablet was just restarted.") - if err = PopulateMetadataTables(mysqld, localMetadata, dbName); err == nil { + params.Logger.Infof("Auto-restore is enabled, but mysqld already contains data. Assuming vttablet was just restarted.") + if err = PopulateMetadataTables(params.Mysqld, params.LocalMetadata, params.DbName); err == nil { err = ErrExistingDB } - return mysql.Position{}, err + return nil, err } } } // find the right backup handle: most recent one, with a MANIFEST - logger.Infof("Restore: looking for a suitable backup to restore") + params.Logger.Infof("Restore: looking for a suitable backup to restore") bs, err := backupstorage.GetBackupStorage() if err != nil { - return mysql.Position{}, err + return nil, err } defer bs.Close() - bhs, err := bs.ListBackups(ctx, dir) + // Backups are stored in a directory structure that starts with + // / + backupDir := GetBackupDir(params.Keyspace, params.Shard) + bhs, err := bs.ListBackups(ctx, backupDir) if err != nil { - return mysql.Position{}, vterrors.Wrap(err, "ListBackups failed") + return nil, vterrors.Wrap(err, "ListBackups failed") } if len(bhs) == 0 { // There are no backups (not even broken/incomplete ones). - logger.Errorf("no backup to restore on BackupStorage for directory %v. Starting up empty.", dir) + params.Logger.Errorf("no backup to restore on BackupStorage for directory %v. Starting up empty.", backupDir) // Wait for mysqld to be ready, in case it was launched in parallel with us. - if err = mysqld.Wait(ctx, cnf); err != nil { - logger.Errorf("mysqld is not running: %v", err) - return mysql.Position{}, err + if err = params.Mysqld.Wait(ctx, params.Cnf); err != nil { + params.Logger.Errorf("mysqld is not running: %v", err) + return nil, err } // Since this is an empty database make sure we start replication at the beginning - if err := mysqld.ResetReplication(ctx); err != nil { - logger.Errorf("error resetting slave replication: %v. Continuing", err) + if err := params.Mysqld.ResetReplication(ctx); err != nil { + params.Logger.Errorf("error resetting slave replication: %v. Continuing", err) } - if err := PopulateMetadataTables(mysqld, localMetadata, dbName); err != nil { - logger.Errorf("error populating metadata tables: %v. Continuing", err) + if err := PopulateMetadataTables(params.Mysqld, params.LocalMetadata, params.DbName); err != nil { + params.Logger.Errorf("error populating metadata tables: %v. Continuing", err) } // Always return ErrNoBackup - return mysql.Position{}, ErrNoBackup + return nil, ErrNoBackup } - bh, err := FindBackupToRestore(ctx, cnf, mysqld, logger, dir, bhs) + bh, err := FindBackupToRestore(ctx, params, bhs) if err != nil { - return rval, err + return nil, err } re, err := GetRestoreEngine(ctx, bh) if err != nil { - return mysql.Position{}, vterrors.Wrap(err, "Failed to find restore engine") + return nil, vterrors.Wrap(err, "Failed to find restore engine") } - if rval, err = re.ExecuteRestore(ctx, params, bh); err != nil { - return rval, err + manifest, err := re.ExecuteRestore(ctx, params, bh) + if err != nil { + return nil, err } // mysqld needs to be running in order for mysql_upgrade to work. @@ -306,41 +305,45 @@ func Restore(ctx context.Context, params RestoreParams) (mysql.Position, error) // is executed. And since with --skip-grant-tables anyone can connect to MySQL // without password, we are passing --skip-networking to greatly reduce the set // of those who can connect. - logger.Infof("Restore: starting mysqld for mysql_upgrade") + params.Logger.Infof("Restore: starting mysqld for mysql_upgrade") // Note Start will use dba user for waiting, this is fine, it will be allowed. - err = mysqld.Start(context.Background(), cnf, "--skip-grant-tables", "--skip-networking") + err = params.Mysqld.Start(context.Background(), params.Cnf, "--skip-grant-tables", "--skip-networking") if err != nil { - return mysql.Position{}, err + return nil, err } - logger.Infof("Restore: running mysql_upgrade") - if err := mysqld.RunMysqlUpgrade(); err != nil { - return mysql.Position{}, vterrors.Wrap(err, "mysql_upgrade failed") + params.Logger.Infof("Restore: running mysql_upgrade") + if err := params.Mysqld.RunMysqlUpgrade(); err != nil { + return nil, vterrors.Wrap(err, "mysql_upgrade failed") } + // Add backupTime and restorePosition to LocalMetadata + params.LocalMetadata["RestoredBackupTime"] = manifest.BackupTime + params.LocalMetadata["RestorePosition"] = mysql.EncodePosition(manifest.Position) + // Populate local_metadata before starting without --skip-networking, // so it's there before we start announcing ourselves. - logger.Infof("Restore: populating local_metadata") - err = PopulateMetadataTables(mysqld, localMetadata, dbName) + params.Logger.Infof("Restore: populating local_metadata") + err = PopulateMetadataTables(params.Mysqld, params.LocalMetadata, params.DbName) if err != nil { - return mysql.Position{}, err + return nil, err } // The MySQL manual recommends restarting mysqld after running mysql_upgrade, // so that any changes made to system tables take effect. - logger.Infof("Restore: restarting mysqld after mysql_upgrade") - err = mysqld.Shutdown(context.Background(), cnf, true) + params.Logger.Infof("Restore: restarting mysqld after mysql_upgrade") + err = params.Mysqld.Shutdown(context.Background(), params.Cnf, true) if err != nil { - return mysql.Position{}, err + return nil, err } - err = mysqld.Start(context.Background(), cnf) + err = params.Mysqld.Start(context.Background(), params.Cnf) if err != nil { - return mysql.Position{}, err + return nil, err } - if err = removeStateFile(cnf); err != nil { - return mysql.Position{}, err + if err = removeStateFile(params.Cnf); err != nil { + return nil, err } - return rval, nil + return manifest, nil } diff --git a/go/vt/mysqlctl/backupengine.go b/go/vt/mysqlctl/backupengine.go index e65ecd97c55..1daa3135fe8 100644 --- a/go/vt/mysqlctl/backupengine.go +++ b/go/vt/mysqlctl/backupengine.go @@ -23,9 +23,9 @@ import ( "fmt" "os" "path/filepath" + "time" "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/mysqlctl/backupstorage" "vitess.io/vitess/go/vt/proto/vtrpc" @@ -56,8 +56,13 @@ type BackupParams struct { HookExtraEnv map[string]string // TopoServer, Keyspace and Shard are used to discover master tablet TopoServer *topo.Server - Keyspace string - Shard string + // Keyspace and Shard are used to infer the directory where backups should be stored + Keyspace string + Shard string + // TabletAlias is used along with backupTime to construct the backup name + TabletAlias string + // BackupTime is the time at which the backup is being started + BackupTime time.Time } // RestoreParams is the struct that holds all params passed to ExecuteRestore @@ -76,15 +81,20 @@ type RestoreParams struct { // restoring. This is always set to false when starting a tablet with -restore_from_backup, // but is set to true when executing a RestoreFromBackup command on an already running vttablet DeleteBeforeRestore bool - // Name of the managed database / schema + // DbName is the name of the managed database / schema DbName string - // Directory location to search for a usable backup - Dir string + // Keyspace and Shard are used to infer the directory where backups are stored + Keyspace string + Shard string + // StartTime: if non-zero, look for a backup that was taken at or before this time + // Otherwise, find the most recent backup + StartTime time.Time } // RestoreEngine is the interface to restore a backup with a given engine. +// Returns the manifest of a backup if successful, otherwise returns an error type RestoreEngine interface { - ExecuteRestore(ctx context.Context, params RestoreParams, bh backupstorage.BackupHandle) (mysql.Position, error) + ExecuteRestore(ctx context.Context, params RestoreParams, bh backupstorage.BackupHandle) (*BackupManifest, error) } // BackupRestoreEngine is a combination of BackupEngine and RestoreEngine. @@ -169,6 +179,9 @@ type BackupManifest struct { // Position is the replication position at which the backup was taken. Position mysql.Position + // BackupTime is when the backup was taken in UTC time (RFC 3339 format) + BackupTime string + // FinishedTime is the time (in RFC 3339 format, UTC) at which the backup finished, if known. // Some backups may not set this field if they were created before the field was added. FinishedTime string @@ -177,23 +190,39 @@ type BackupManifest struct { // FindBackupToRestore returns a selected candidate backup to be restored. // It returns the most recent backup that is complete, meaning it has a valid // MANIFEST file. -func FindBackupToRestore(ctx context.Context, cnf *Mycnf, mysqld MysqlDaemon, logger logutil.Logger, dir string, bhs []backupstorage.BackupHandle) (backupstorage.BackupHandle, error) { +func FindBackupToRestore(ctx context.Context, params RestoreParams, bhs []backupstorage.BackupHandle) (backupstorage.BackupHandle, error) { var bh backupstorage.BackupHandle var index int + // if a StartTime is provided in params, then find a backup that was taken at or before that time + checkBackupTime := !params.StartTime.IsZero() + backupDir := GetBackupDir(params.Keyspace, params.Shard) for index = len(bhs) - 1; index >= 0; index-- { bh = bhs[index] // Check that the backup MANIFEST exists and can be successfully decoded. - _, err := GetBackupManifest(ctx, bh) + bm, err := GetBackupManifest(ctx, bh) if err != nil { - log.Warningf("Possibly incomplete backup %v in directory %v on BackupStorage: can't read MANIFEST: %v)", bh.Name(), dir, err) + params.Logger.Warningf("Possibly incomplete backup %v in directory %v on BackupStorage: can't read MANIFEST: %v)", bh.Name(), backupDir, err) continue } - logger.Infof("Restore: found backup %v %v to restore", bh.Directory(), bh.Name()) - break + var backupTime time.Time + if checkBackupTime { + backupTime, err = time.Parse(time.RFC3339, bm.BackupTime) + if err != nil { + params.Logger.Warningf("Restore: skipping backup %v/%v with invalid time %v: %v", backupDir, bh.Name(), bm.BackupTime, err) + continue + } + } + if !checkBackupTime /* not snapshot */ || backupTime.Equal(params.StartTime) || backupTime.Before(params.StartTime) { + params.Logger.Infof("Restore: found backup %v %v to restore", bh.Directory(), bh.Name()) + break + } } if index < 0 { + if checkBackupTime { + params.Logger.Errorf("No valid backup found before time %v", params.StartTime.Format(BackupTimestampFormat)) + } // There is at least one attempted backup, but none could be read. // This implies there is data we ought to have, so it's not safe to start // up empty. @@ -256,3 +285,9 @@ func RestoreWasInterrupted(cnf *Mycnf) bool { _, err := os.Stat(name) return err == nil } + +// GetBackupDir returns the directory where backups for the +// given keyspace/shard are (or will be) stored +func GetBackupDir(keyspace, shard string) string { + return fmt.Sprintf("%v/%v", keyspace, shard) +} diff --git a/go/vt/mysqlctl/builtinbackupengine.go b/go/vt/mysqlctl/builtinbackupengine.go index 30222b697c4..5dc3b3fcbfb 100644 --- a/go/vt/mysqlctl/builtinbackupengine.go +++ b/go/vt/mysqlctl/builtinbackupengine.go @@ -35,7 +35,6 @@ import ( "vitess.io/vitess/go/vt/concurrency" "vitess.io/vitess/go/vt/hook" "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/mysqlctl/backupstorage" "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/topo" @@ -241,28 +240,18 @@ func findFilesToBackup(cnf *Mycnf) ([]FileEntry, error) { // and an overall error. func (be *BuiltinBackupEngine) ExecuteBackup(ctx context.Context, params BackupParams, bh backupstorage.BackupHandle) (bool, error) { - // extract all params from BackupParams - cnf := params.Cnf - mysqld := params.Mysqld - logger := params.Logger - backupConcurrency := params.Concurrency - hookExtraEnv := params.HookExtraEnv - topoServer := params.TopoServer - keyspace := params.Keyspace - shard := params.Shard - - logger.Infof("Hook: %v, Compress: %v", *backupStorageHook, *backupStorageCompress) + params.Logger.Infof("Hook: %v, Compress: %v", *backupStorageHook, *backupStorageCompress) // Save initial state so we can restore. slaveStartRequired := false sourceIsMaster := false readOnly := true var replicationPosition mysql.Position - semiSyncMaster, semiSyncSlave := mysqld.SemiSyncEnabled() + semiSyncMaster, semiSyncSlave := params.Mysqld.SemiSyncEnabled() // See if we need to restart replication after backup. - logger.Infof("getting current replication status") - slaveStatus, err := mysqld.SlaveStatus() + params.Logger.Infof("getting current replication status") + slaveStatus, err := params.Mysqld.SlaveStatus() switch err { case nil: slaveStartRequired = slaveStatus.SlaveRunning() @@ -274,7 +263,7 @@ func (be *BuiltinBackupEngine) ExecuteBackup(ctx context.Context, params BackupP } // get the read-only flag - readOnly, err = mysqld.IsReadOnly() + readOnly, err = params.Mysqld.IsReadOnly() if err != nil { return false, vterrors.Wrap(err, "can't get read-only status") } @@ -282,47 +271,47 @@ func (be *BuiltinBackupEngine) ExecuteBackup(ctx context.Context, params BackupP // get the replication position if sourceIsMaster { if !readOnly { - logger.Infof("turning master read-only before backup") - if err = mysqld.SetReadOnly(true); err != nil { + params.Logger.Infof("turning master read-only before backup") + if err = params.Mysqld.SetReadOnly(true); err != nil { return false, vterrors.Wrap(err, "can't set read-only status") } } - replicationPosition, err = mysqld.MasterPosition() + replicationPosition, err = params.Mysqld.MasterPosition() if err != nil { return false, vterrors.Wrap(err, "can't get master position") } } else { - if err = mysqld.StopSlave(hookExtraEnv); err != nil { + if err = params.Mysqld.StopSlave(params.HookExtraEnv); err != nil { return false, vterrors.Wrapf(err, "can't stop slave") } var slaveStatus mysql.SlaveStatus - slaveStatus, err = mysqld.SlaveStatus() + slaveStatus, err = params.Mysqld.SlaveStatus() if err != nil { return false, vterrors.Wrap(err, "can't get slave status") } replicationPosition = slaveStatus.Position } - logger.Infof("using replication position: %v", replicationPosition) + params.Logger.Infof("using replication position: %v", replicationPosition) // shutdown mysqld - err = mysqld.Shutdown(ctx, cnf, true) + err = params.Mysqld.Shutdown(ctx, params.Cnf, true) if err != nil { return false, vterrors.Wrap(err, "can't shutdown mysqld") } // Backup everything, capture the error. - backupErr := be.backupFiles(ctx, cnf, mysqld, logger, bh, replicationPosition, backupConcurrency, hookExtraEnv) + backupErr := be.backupFiles(ctx, params, bh, replicationPosition) usable := backupErr == nil // Try to restart mysqld, use background context in case we timed out the original context - err = mysqld.Start(context.Background(), cnf) + err = params.Mysqld.Start(context.Background(), params.Cnf) if err != nil { return usable, vterrors.Wrap(err, "can't restart mysqld") } // And set read-only mode - logger.Infof("resetting mysqld read-only to %v", readOnly) - if err := mysqld.SetReadOnly(readOnly); err != nil { + params.Logger.Infof("resetting mysqld read-only to %v", readOnly) + if err := params.Mysqld.SetReadOnly(readOnly); err != nil { return usable, err } @@ -330,21 +319,21 @@ func (be *BuiltinBackupEngine) ExecuteBackup(ctx context.Context, params BackupP if semiSyncMaster || semiSyncSlave { // Only do this if one of them was on, since both being off could mean // the plugin isn't even loaded, and the server variables don't exist. - logger.Infof("restoring semi-sync settings from before backup: master=%v, slave=%v", + params.Logger.Infof("restoring semi-sync settings from before backup: master=%v, slave=%v", semiSyncMaster, semiSyncSlave) - err := mysqld.SetSemiSyncEnabled(semiSyncMaster, semiSyncSlave) + err := params.Mysqld.SetSemiSyncEnabled(semiSyncMaster, semiSyncSlave) if err != nil { return usable, err } } if slaveStartRequired { - logger.Infof("restarting mysql replication") - if err := mysqld.StartSlave(hookExtraEnv); err != nil { + params.Logger.Infof("restarting mysql replication") + if err := params.Mysqld.StartSlave(params.HookExtraEnv); err != nil { return usable, vterrors.Wrap(err, "cannot restart slave") } // this should be quick, but we might as well just wait - if err := WaitForSlaveStart(mysqld, slaveStartDeadline); err != nil { + if err := WaitForSlaveStart(params.Mysqld, slaveStartDeadline); err != nil { return usable, vterrors.Wrap(err, "slave is not restarting") } @@ -360,7 +349,7 @@ func (be *BuiltinBackupEngine) ExecuteBackup(ctx context.Context, params BackupP remoteCtx, remoteCancel := context.WithTimeout(ctx, *topo.RemoteOperationTimeout) defer remoteCancel() - masterPos, err := getMasterPosition(remoteCtx, tmc, topoServer, keyspace, shard) + masterPos, err := getMasterPosition(remoteCtx, tmc, params.TopoServer, params.Keyspace, params.Shard) // If we are unable to get master position, return error. if err != nil { return usable, err @@ -370,7 +359,7 @@ func (be *BuiltinBackupEngine) ExecuteBackup(ctx context.Context, params BackupP if err := ctx.Err(); err != nil { return usable, err } - status, err := mysqld.SlaveStatus() + status, err := params.Mysqld.SlaveStatus() if err != nil { return usable, err } @@ -387,16 +376,17 @@ func (be *BuiltinBackupEngine) ExecuteBackup(ctx context.Context, params BackupP } // backupFiles finds the list of files to backup, and creates the backup. -func (be *BuiltinBackupEngine) backupFiles(ctx context.Context, cnf *Mycnf, mysqld MysqlDaemon, logger logutil.Logger, bh backupstorage.BackupHandle, replicationPosition mysql.Position, backupConcurrency int, hookExtraEnv map[string]string) (finalErr error) { +func (be *BuiltinBackupEngine) backupFiles(ctx context.Context, params BackupParams, bh backupstorage.BackupHandle, replicationPosition mysql.Position) (finalErr error) { + // Get the files to backup. - fes, err := findFilesToBackup(cnf) + fes, err := findFilesToBackup(params.Cnf) if err != nil { return vterrors.Wrap(err, "can't find files to backup") } - logger.Infof("found %v files to backup", len(fes)) + params.Logger.Infof("found %v files to backup", len(fes)) // Backup with the provided concurrency. - sema := sync2.NewSemaphore(backupConcurrency, 0) + sema := sync2.NewSemaphore(params.Concurrency, 0) rec := concurrency.AllErrorRecorder{} wg := sync.WaitGroup{} for i := range fes { @@ -414,7 +404,7 @@ func (be *BuiltinBackupEngine) backupFiles(ctx context.Context, cnf *Mycnf, mysq // Backup the individual file. name := fmt.Sprintf("%v", i) - rec.RecordError(be.backupFile(ctx, cnf, mysqld, logger, bh, &fes[i], name, hookExtraEnv)) + rec.RecordError(be.backupFile(ctx, params, bh, &fes[i], name)) }(i) } @@ -440,6 +430,7 @@ func (be *BuiltinBackupEngine) backupFiles(ctx context.Context, cnf *Mycnf, mysq BackupManifest: BackupManifest{ BackupMethod: builtinBackupEngineName, Position: replicationPosition, + BackupTime: params.BackupTime.UTC().Format(time.RFC3339), FinishedTime: time.Now().UTC().Format(time.RFC3339), }, @@ -460,9 +451,9 @@ func (be *BuiltinBackupEngine) backupFiles(ctx context.Context, cnf *Mycnf, mysq } // backupFile backs up an individual file. -func (be *BuiltinBackupEngine) backupFile(ctx context.Context, cnf *Mycnf, mysqld MysqlDaemon, logger logutil.Logger, bh backupstorage.BackupHandle, fe *FileEntry, name string, hookExtraEnv map[string]string) (finalErr error) { +func (be *BuiltinBackupEngine) backupFile(ctx context.Context, params BackupParams, bh backupstorage.BackupHandle, fe *FileEntry, name string) (finalErr error) { // Open the source file for reading. - source, err := fe.open(cnf, true) + source, err := fe.open(params.Cnf, true) if err != nil { return err } @@ -473,7 +464,7 @@ func (be *BuiltinBackupEngine) backupFile(ctx context.Context, cnf *Mycnf, mysql return err } - logger.Infof("Backing up file: %v", fe.Name) + params.Logger.Infof("Backing up file: %v", fe.Name) // Open the destination file for writing, and a buffer. wc, err := bh.AddFile(ctx, name, fi.Size()) if err != nil { @@ -483,7 +474,7 @@ func (be *BuiltinBackupEngine) backupFile(ctx context.Context, cnf *Mycnf, mysql if rerr := wc.Close(); rerr != nil { if finalErr != nil { // We already have an error, just log this one. - logger.Errorf2(rerr, "failed to close file %v,%v", name, fe.Name) + params.Logger.Errorf2(rerr, "failed to close file %v,%v", name, fe.Name) } else { finalErr = rerr } @@ -500,7 +491,7 @@ func (be *BuiltinBackupEngine) backupFile(ctx context.Context, cnf *Mycnf, mysql var wait hook.WaitFunc if *backupStorageHook != "" { h := hook.NewHook(*backupStorageHook, []string{"-operation", "write"}) - h.ExtraEnv = hookExtraEnv + h.ExtraEnv = params.HookExtraEnv pipe, wait, _, err = h.ExecuteAsWritePipe(writer) if err != nil { return vterrors.Wrapf(err, "'%v' hook returned error", *backupStorageHook) @@ -540,7 +531,7 @@ func (be *BuiltinBackupEngine) backupFile(ctx context.Context, cnf *Mycnf, mysql } stderr, err := wait() if stderr != "" { - logger.Infof("'%v' hook returned stderr: %v", *backupStorageHook, stderr) + params.Logger.Infof("'%v' hook returned stderr: %v", *backupStorageHook, stderr) } if err != nil { return vterrors.Wrapf(err, "'%v' returned error", *backupStorageHook) @@ -560,45 +551,39 @@ func (be *BuiltinBackupEngine) backupFile(ctx context.Context, cnf *Mycnf, mysql // ExecuteRestore restores from a backup. If the restore is successful // we return the position from which replication should start // otherwise an error is returned -func (be *BuiltinBackupEngine) ExecuteRestore(ctx context.Context, params RestoreParams, bh backupstorage.BackupHandle) (mysql.Position, error) { - - cnf := params.Cnf - mysqld := params.Mysqld - logger := params.Logger - restoreConcurrency := params.Concurrency - hookExtraEnv := params.HookExtraEnv +func (be *BuiltinBackupEngine) ExecuteRestore(ctx context.Context, params RestoreParams, bh backupstorage.BackupHandle) (*BackupManifest, error) { - zeroPosition := mysql.Position{} var bm builtinBackupManifest if err := getBackupManifestInto(ctx, bh, &bm); err != nil { - return zeroPosition, err + return nil, err } // mark restore as in progress - if err := createStateFile(cnf); err != nil { - return zeroPosition, err + if err := createStateFile(params.Cnf); err != nil { + return nil, err } - if err := prepareToRestore(ctx, cnf, mysqld, logger); err != nil { - return zeroPosition, err + if err := prepareToRestore(ctx, params.Cnf, params.Mysqld, params.Logger); err != nil { + return nil, err } - logger.Infof("Restore: copying %v files", len(bm.FileEntries)) + params.Logger.Infof("Restore: copying %v files", len(bm.FileEntries)) - if err := be.restoreFiles(context.Background(), cnf, bh, bm.FileEntries, bm.TransformHook, !bm.SkipCompress, restoreConcurrency, hookExtraEnv, logger); err != nil { + if err := be.restoreFiles(context.Background(), params, bh, bm); err != nil { // don't delete the file here because that is how we detect an interrupted restore - return zeroPosition, vterrors.Wrap(err, "failed to restore files") + return nil, vterrors.Wrap(err, "failed to restore files") } - logger.Infof("Restore: returning replication position %v", bm.Position) - return bm.Position, nil + params.Logger.Infof("Restore: returning replication position %v", bm.Position) + return &bm.BackupManifest, nil } // restoreFiles will copy all the files from the BackupStorage to the // right place. -func (be *BuiltinBackupEngine) restoreFiles(ctx context.Context, cnf *Mycnf, bh backupstorage.BackupHandle, fes []FileEntry, transformHook string, compress bool, restoreConcurrency int, hookExtraEnv map[string]string, logger logutil.Logger) error { - sema := sync2.NewSemaphore(restoreConcurrency, 0) +func (be *BuiltinBackupEngine) restoreFiles(ctx context.Context, params RestoreParams, bh backupstorage.BackupHandle, bm builtinBackupManifest) error { + fes := bm.FileEntries + sema := sync2.NewSemaphore(params.Concurrency, 0) rec := concurrency.AllErrorRecorder{} wg := sync.WaitGroup{} for i := range fes { @@ -616,8 +601,8 @@ func (be *BuiltinBackupEngine) restoreFiles(ctx context.Context, cnf *Mycnf, bh // And restore the file. name := fmt.Sprintf("%v", i) - logger.Infof("Copying file %v: %v", name, fes[i].Name) - err := be.restoreFile(ctx, cnf, bh, &fes[i], transformHook, compress, name, hookExtraEnv) + params.Logger.Infof("Copying file %v: %v", name, fes[i].Name) + err := be.restoreFile(ctx, params, bh, &fes[i], bm.TransformHook, !bm.SkipCompress, name) if err != nil { rec.RecordError(vterrors.Wrapf(err, "can't restore file %v to %v", name, fes[i].Name)) } @@ -628,7 +613,7 @@ func (be *BuiltinBackupEngine) restoreFiles(ctx context.Context, cnf *Mycnf, bh } // restoreFile restores an individual file. -func (be *BuiltinBackupEngine) restoreFile(ctx context.Context, cnf *Mycnf, bh backupstorage.BackupHandle, fe *FileEntry, transformHook string, compress bool, name string, hookExtraEnv map[string]string) (finalErr error) { +func (be *BuiltinBackupEngine) restoreFile(ctx context.Context, params RestoreParams, bh backupstorage.BackupHandle, fe *FileEntry, transformHook string, compress bool, name string) (finalErr error) { // Open the source file for reading. source, err := bh.ReadFile(ctx, name) if err != nil { @@ -637,7 +622,7 @@ func (be *BuiltinBackupEngine) restoreFile(ctx context.Context, cnf *Mycnf, bh b defer source.Close() // Open the destination file for writing. - dstFile, err := fe.open(cnf, false) + dstFile, err := fe.open(params.Cnf, false) if err != nil { return vterrors.Wrap(err, "can't open destination file for writing") } @@ -666,7 +651,7 @@ func (be *BuiltinBackupEngine) restoreFile(ctx context.Context, cnf *Mycnf, bh b var wait hook.WaitFunc if transformHook != "" { h := hook.NewHook(transformHook, []string{"-operation", "read"}) - h.ExtraEnv = hookExtraEnv + h.ExtraEnv = params.HookExtraEnv reader, wait, _, err = h.ExecuteAsReadPipe(reader) if err != nil { return vterrors.Wrapf(err, "'%v' hook returned error", transformHook) diff --git a/go/vt/mysqlctl/mycnf_test.go b/go/vt/mysqlctl/mycnf_test.go index 0298b7f8713..d938c068b18 100644 --- a/go/vt/mysqlctl/mycnf_test.go +++ b/go/vt/mysqlctl/mycnf_test.go @@ -86,7 +86,6 @@ func TestMycnf(t *testing.T) { // 3. go test // 4. \rm $VTROOT/vthook/make_mycnf // 5. Add No Prefix back -//lint:ignore U1000 Test for Mycnf hook changes func NoTestMycnfHook(t *testing.T) { os.Setenv("MYSQL_FLAVOR", "MariaDB") uid := uint32(11111) diff --git a/go/vt/mysqlctl/replication.go b/go/vt/mysqlctl/replication.go index 6774918733d..a52b792e753 100644 --- a/go/vt/mysqlctl/replication.go +++ b/go/vt/mysqlctl/replication.go @@ -285,12 +285,9 @@ func (mysqld *Mysqld) ResetReplication(ctx context.Context) error { // // Array indices for the results of SHOW PROCESSLIST. const ( - //lint:ignore U1000 needed for correct indexing of result columns colConnectionID = iota - //lint:ignore U1000 needed for correct indexing of result columns colUsername colClientAddr - //lint:ignore U1000 needed for correct indexing of result columns colDbName colCommand ) diff --git a/go/vt/mysqlctl/xtrabackupengine.go b/go/vt/mysqlctl/xtrabackupengine.go index 7b01ed88178..8c89c1de069 100644 --- a/go/vt/mysqlctl/xtrabackupengine.go +++ b/go/vt/mysqlctl/xtrabackupengine.go @@ -122,16 +122,12 @@ func closeFile(wc io.WriteCloser, fileName string, logger logutil.Logger, finalE // ExecuteBackup returns a boolean that indicates if the backup is usable, // and an overall error. func (be *XtrabackupEngine) ExecuteBackup(ctx context.Context, params BackupParams, bh backupstorage.BackupHandle) (complete bool, finalErr error) { - // extract all params from BackupParams - cnf := params.Cnf - mysqld := params.Mysqld - logger := params.Logger if *xtrabackupUser == "" { return false, vterrors.New(vtrpc.Code_INVALID_ARGUMENT, "xtrabackupUser must be specified.") } // use a mysql connection to detect flavor at runtime - conn, err := mysqld.GetDbaConnection() + conn, err := params.Mysqld.GetDbaConnection() if conn != nil && err == nil { defer conn.Close() } @@ -144,7 +140,7 @@ func (be *XtrabackupEngine) ExecuteBackup(ctx context.Context, params BackupPara return false, vterrors.Wrap(err, "unable to obtain master position") } flavor := pos.GTIDSet.Flavor() - logger.Infof("Detected MySQL flavor: %v", flavor) + params.Logger.Infof("Detected MySQL flavor: %v", flavor) backupFileName := be.backupFileName() numStripes := int(*xtrabackupStripes) @@ -154,19 +150,19 @@ func (be *XtrabackupEngine) ExecuteBackup(ctx context.Context, params BackupPara // do not write the MANIFEST unless all files were closed successfully, // maintaining the contract that a MANIFEST file should only exist if the // backup was created successfully. - logger.Infof("Starting backup with %v stripe(s)", numStripes) - replicationPosition, err := be.backupFiles(ctx, cnf, logger, bh, backupFileName, numStripes, flavor) + params.Logger.Infof("Starting backup with %v stripe(s)", numStripes) + replicationPosition, err := be.backupFiles(ctx, params, bh, backupFileName, numStripes, flavor) if err != nil { return false, err } // open the MANIFEST - logger.Infof("Writing backup MANIFEST") + params.Logger.Infof("Writing backup MANIFEST") mwc, err := bh.AddFile(ctx, backupManifestFileName, 0) if err != nil { return false, vterrors.Wrapf(err, "cannot add %v to backup", backupManifestFileName) } - defer closeFile(mwc, backupManifestFileName, logger, &finalErr) + defer closeFile(mwc, backupManifestFileName, params.Logger, &finalErr) // JSON-encode and write the MANIFEST bm := &xtraBackupManifest{ @@ -174,6 +170,7 @@ func (be *XtrabackupEngine) ExecuteBackup(ctx context.Context, params BackupPara BackupManifest: BackupManifest{ BackupMethod: xtrabackupEngineName, Position: replicationPosition, + BackupTime: params.BackupTime.UTC().Format(time.RFC3339), FinishedTime: time.Now().UTC().Format(time.RFC3339), }, @@ -194,19 +191,19 @@ func (be *XtrabackupEngine) ExecuteBackup(ctx context.Context, params BackupPara return false, vterrors.Wrapf(err, "cannot write %v", backupManifestFileName) } - logger.Infof("Backup completed") + params.Logger.Infof("Backup completed") return true, nil } -func (be *XtrabackupEngine) backupFiles(ctx context.Context, cnf *Mycnf, logger logutil.Logger, bh backupstorage.BackupHandle, backupFileName string, numStripes int, flavor string) (replicationPosition mysql.Position, finalErr error) { - backupProgram := path.Join(*xtrabackupEnginePath, xtrabackupBinaryName) +func (be *XtrabackupEngine) backupFiles(ctx context.Context, params BackupParams, bh backupstorage.BackupHandle, backupFileName string, numStripes int, flavor string) (replicationPosition mysql.Position, finalErr error) { - flagsToExec := []string{"--defaults-file=" + cnf.path, + backupProgram := path.Join(*xtrabackupEnginePath, xtrabackupBinaryName) + flagsToExec := []string{"--defaults-file=" + params.Cnf.path, "--backup", - "--socket=" + cnf.SocketFile, + "--socket=" + params.Cnf.SocketFile, "--slave-info", "--user=" + *xtrabackupUser, - "--target-dir=" + cnf.TmpDir, + "--target-dir=" + params.Cnf.TmpDir, } if *xtrabackupStreamMode != "" { flagsToExec = append(flagsToExec, "--stream="+*xtrabackupStreamMode) @@ -224,7 +221,7 @@ func (be *XtrabackupEngine) backupFiles(ctx context.Context, cnf *Mycnf, logger // a timeout on the final Close() step. addFilesCtx, cancelAddFiles := context.WithCancel(ctx) defer cancelAddFiles() - destFiles, err := addStripeFiles(addFilesCtx, bh, backupFileName, numStripes, logger) + destFiles, err := addStripeFiles(addFilesCtx, bh, backupFileName, numStripes, params.Logger) if err != nil { return replicationPosition, vterrors.Wrapf(err, "cannot create backup file %v", backupFileName) } @@ -238,7 +235,7 @@ func (be *XtrabackupEngine) backupFiles(ctx context.Context, cnf *Mycnf, logger timer.Stop() return case <-timer.C: - logger.Errorf("Timed out waiting for Close() on backup file to complete") + params.Logger.Errorf("Timed out waiting for Close() on backup file to complete") // Cancelling the Context that was originally passed to bh.AddFile() // should hopefully cause Close() calls on the file that AddFile() // returned to abort. If the underlying implementation doesn't @@ -254,7 +251,7 @@ func (be *XtrabackupEngine) backupFiles(ctx context.Context, cnf *Mycnf, logger if numStripes > 1 { filename = stripeFileName(backupFileName, i) } - closeFile(file, filename, logger, &finalErr) + closeFile(file, filename, params.Logger, &finalErr) } }() @@ -307,7 +304,7 @@ func (be *XtrabackupEngine) backupFiles(ctx context.Context, cnf *Mycnf, logger capture := false for scanner.Scan() { line := scanner.Text() - logger.Infof("xtrabackup stderr: %s", line) + params.Logger.Infof("xtrabackup stderr: %s", line) // Wait until we see the first line of the binlog position. // Then capture all subsequent lines. We need multiple lines since @@ -321,7 +318,7 @@ func (be *XtrabackupEngine) backupFiles(ctx context.Context, cnf *Mycnf, logger fmt.Fprintln(stderrBuilder, line) } if err := scanner.Err(); err != nil { - logger.Errorf("error reading from xtrabackup stderr: %v", err) + params.Logger.Errorf("error reading from xtrabackup stderr: %v", err) } }() @@ -358,7 +355,7 @@ func (be *XtrabackupEngine) backupFiles(ctx context.Context, cnf *Mycnf, logger return replicationPosition, vterrors.Wrap(err, "xtrabackup failed with error") } - replicationPosition, rerr := findReplicationPosition(sterrOutput, flavor, logger) + replicationPosition, rerr := findReplicationPosition(sterrOutput, flavor, params.Logger) if rerr != nil { return replicationPosition, vterrors.Wrap(rerr, "backup failed trying to find replication position") } @@ -367,38 +364,33 @@ func (be *XtrabackupEngine) backupFiles(ctx context.Context, cnf *Mycnf, logger } // ExecuteRestore restores from a backup. Any error is returned. -func (be *XtrabackupEngine) ExecuteRestore(ctx context.Context, params RestoreParams, bh backupstorage.BackupHandle) (mysql.Position, error) { - - cnf := params.Cnf - mysqld := params.Mysqld - logger := params.Logger +func (be *XtrabackupEngine) ExecuteRestore(ctx context.Context, params RestoreParams, bh backupstorage.BackupHandle) (*BackupManifest, error) { - zeroPosition := mysql.Position{} var bm xtraBackupManifest if err := getBackupManifestInto(ctx, bh, &bm); err != nil { - return zeroPosition, err + return nil, err } // mark restore as in progress - if err := createStateFile(cnf); err != nil { - return zeroPosition, err + if err := createStateFile(params.Cnf); err != nil { + return nil, err } - if err := prepareToRestore(ctx, cnf, mysqld, logger); err != nil { - return zeroPosition, err + if err := prepareToRestore(ctx, params.Cnf, params.Mysqld, params.Logger); err != nil { + return nil, err } // copy / extract files - logger.Infof("Restore: Extracting files from %v", bm.FileName) + params.Logger.Infof("Restore: Extracting files from %v", bm.FileName) - if err := be.restoreFromBackup(ctx, cnf, bh, bm, logger); err != nil { + if err := be.restoreFromBackup(ctx, params.Cnf, bh, bm, params.Logger); err != nil { // don't delete the file here because that is how we detect an interrupted restore - return zeroPosition, err + return nil, err } // now find the slave position and return that - logger.Infof("Restore: returning replication position %v", bm.Position) - return bm.Position, nil + params.Logger.Infof("Restore: returning replication position %v", bm.Position) + return &bm.BackupManifest, nil } func (be *XtrabackupEngine) restoreFromBackup(ctx context.Context, cnf *Mycnf, bh backupstorage.BackupHandle, bm xtraBackupManifest, logger logutil.Logger) error { diff --git a/go/vt/proto/logutil/logutil.pb.go b/go/vt/proto/logutil/logutil.pb.go index 5806897eb9b..16f34fa049c 100644 --- a/go/vt/proto/logutil/logutil.pb.go +++ b/go/vt/proto/logutil/logutil.pb.go @@ -5,8 +5,10 @@ package logutil import ( fmt "fmt" - proto "github.com/golang/protobuf/proto" math "math" + + proto "github.com/golang/protobuf/proto" + vttime "vitess.io/vitess/go/vt/proto/vttime" ) // Reference imports to suppress errors if they are not otherwise used. @@ -56,72 +58,23 @@ func (Level) EnumDescriptor() ([]byte, []int) { return fileDescriptor_31f5dd3702a8edf9, []int{0} } -// Time represents a time stamp in nanoseconds. In go, use logutil library -// to convert times. -type Time struct { - Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` - Nanoseconds int32 `protobuf:"varint,2,opt,name=nanoseconds,proto3" json:"nanoseconds,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Time) Reset() { *m = Time{} } -func (m *Time) String() string { return proto.CompactTextString(m) } -func (*Time) ProtoMessage() {} -func (*Time) Descriptor() ([]byte, []int) { - return fileDescriptor_31f5dd3702a8edf9, []int{0} -} - -func (m *Time) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Time.Unmarshal(m, b) -} -func (m *Time) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Time.Marshal(b, m, deterministic) -} -func (m *Time) XXX_Merge(src proto.Message) { - xxx_messageInfo_Time.Merge(m, src) -} -func (m *Time) XXX_Size() int { - return xxx_messageInfo_Time.Size(m) -} -func (m *Time) XXX_DiscardUnknown() { - xxx_messageInfo_Time.DiscardUnknown(m) -} - -var xxx_messageInfo_Time proto.InternalMessageInfo - -func (m *Time) GetSeconds() int64 { - if m != nil { - return m.Seconds - } - return 0 -} - -func (m *Time) GetNanoseconds() int32 { - if m != nil { - return m.Nanoseconds - } - return 0 -} - // Event is a single logging event type Event struct { - Time *Time `protobuf:"bytes,1,opt,name=time,proto3" json:"time,omitempty"` - Level Level `protobuf:"varint,2,opt,name=level,proto3,enum=logutil.Level" json:"level,omitempty"` - File string `protobuf:"bytes,3,opt,name=file,proto3" json:"file,omitempty"` - Line int64 `protobuf:"varint,4,opt,name=line,proto3" json:"line,omitempty"` - Value string `protobuf:"bytes,5,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Time *vttime.Time `protobuf:"bytes,1,opt,name=time,proto3" json:"time,omitempty"` + Level Level `protobuf:"varint,2,opt,name=level,proto3,enum=logutil.Level" json:"level,omitempty"` + File string `protobuf:"bytes,3,opt,name=file,proto3" json:"file,omitempty"` + Line int64 `protobuf:"varint,4,opt,name=line,proto3" json:"line,omitempty"` + Value string `protobuf:"bytes,5,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *Event) Reset() { *m = Event{} } func (m *Event) String() string { return proto.CompactTextString(m) } func (*Event) ProtoMessage() {} func (*Event) Descriptor() ([]byte, []int) { - return fileDescriptor_31f5dd3702a8edf9, []int{1} + return fileDescriptor_31f5dd3702a8edf9, []int{0} } func (m *Event) XXX_Unmarshal(b []byte) error { @@ -142,7 +95,7 @@ func (m *Event) XXX_DiscardUnknown() { var xxx_messageInfo_Event proto.InternalMessageInfo -func (m *Event) GetTime() *Time { +func (m *Event) GetTime() *vttime.Time { if m != nil { return m.Time } @@ -179,29 +132,26 @@ func (m *Event) GetValue() string { func init() { proto.RegisterEnum("logutil.Level", Level_name, Level_value) - proto.RegisterType((*Time)(nil), "logutil.Time") proto.RegisterType((*Event)(nil), "logutil.Event") } func init() { proto.RegisterFile("logutil.proto", fileDescriptor_31f5dd3702a8edf9) } var fileDescriptor_31f5dd3702a8edf9 = []byte{ - // 260 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x4c, 0x90, 0x41, 0x4b, 0xfb, 0x40, - 0x10, 0xc5, 0xff, 0xdb, 0x64, 0xff, 0xb1, 0x13, 0x5a, 0xc2, 0xe0, 0x21, 0xc7, 0x58, 0x8a, 0x04, - 0x0f, 0x09, 0x54, 0xf0, 0x6e, 0x25, 0x4a, 0xa1, 0x24, 0xb0, 0x0a, 0x82, 0xb7, 0xaa, 0x63, 0x59, - 0xd8, 0x66, 0xc5, 0x6c, 0xf7, 0x63, 0xf8, 0x99, 0x25, 0x93, 0x46, 0xbc, 0xbd, 0xf7, 0x7b, 0xc3, - 0x9b, 0x61, 0x60, 0x66, 0xec, 0xfe, 0xe8, 0xb4, 0x29, 0x3e, 0xbf, 0xac, 0xb3, 0x18, 0x9d, 0xec, - 0x62, 0x0d, 0xe1, 0x93, 0x3e, 0x10, 0xa6, 0x10, 0x75, 0xf4, 0x66, 0xdb, 0xf7, 0x2e, 0x15, 0x99, - 0xc8, 0x03, 0x35, 0x5a, 0xcc, 0x20, 0x6e, 0x77, 0xad, 0x1d, 0xd3, 0x49, 0x26, 0x72, 0xa9, 0xfe, - 0xa2, 0xc5, 0xb7, 0x00, 0x59, 0x79, 0x6a, 0x1d, 0x5e, 0x40, 0xe8, 0xf4, 0x81, 0xb8, 0x22, 0x5e, - 0xcd, 0x8a, 0x71, 0x69, 0xbf, 0x42, 0x71, 0x84, 0x4b, 0x90, 0x86, 0x3c, 0x19, 0x2e, 0x9a, 0xaf, - 0xe6, 0xbf, 0x33, 0xdb, 0x9e, 0xaa, 0x21, 0x44, 0x84, 0xf0, 0x43, 0x1b, 0x4a, 0x83, 0x4c, 0xe4, - 0x53, 0xc5, 0xba, 0x67, 0x46, 0xb7, 0x94, 0x86, 0x7c, 0x1f, 0x6b, 0x3c, 0x07, 0xe9, 0x77, 0xe6, - 0x48, 0xa9, 0xe4, 0xc1, 0xc1, 0x5c, 0xdd, 0x80, 0xe4, 0x36, 0x3c, 0x83, 0x70, 0x53, 0xdf, 0x37, - 0xc9, 0x3f, 0x8c, 0x21, 0x7a, 0xbe, 0x55, 0xf5, 0xa6, 0x7e, 0x48, 0x04, 0x4e, 0x41, 0x56, 0x4a, - 0x35, 0x2a, 0x99, 0xf4, 0xfc, 0xae, 0xa9, 0x1f, 0x9b, 0x6d, 0x95, 0x04, 0xeb, 0xcb, 0x97, 0xa5, - 0xd7, 0x8e, 0xba, 0xae, 0xd0, 0xb6, 0x1c, 0x54, 0xb9, 0xb7, 0xa5, 0x77, 0x25, 0x7f, 0xad, 0x3c, - 0x9d, 0xfa, 0xfa, 0x9f, 0xed, 0xf5, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x27, 0x02, 0xa1, 0x99, - 0x55, 0x01, 0x00, 0x00, + // 235 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x34, 0x8f, 0x41, 0x4b, 0x03, 0x31, + 0x10, 0x85, 0x4d, 0x77, 0x63, 0xed, 0x54, 0xcb, 0x32, 0x78, 0x08, 0x9e, 0x82, 0x14, 0x59, 0x3c, + 0x6c, 0xa0, 0x82, 0x77, 0x95, 0x55, 0x0a, 0x65, 0x17, 0xa2, 0x20, 0x78, 0x53, 0x18, 0x4b, 0x20, + 0x6d, 0xc4, 0xa6, 0xf9, 0x17, 0xfe, 0x67, 0xd9, 0x49, 0x7b, 0x7b, 0xef, 0x7b, 0x8f, 0xc7, 0x0c, + 0x5c, 0xf8, 0xb0, 0xde, 0x47, 0xe7, 0x9b, 0x9f, 0xdf, 0x10, 0x03, 0x8e, 0x0f, 0xf6, 0x0a, 0xa2, + 0xdb, 0x50, 0x86, 0xd7, 0x7f, 0x02, 0x64, 0x9b, 0x68, 0x1b, 0x51, 0x43, 0x39, 0x70, 0x25, 0xb4, + 0xa8, 0xa7, 0x8b, 0xf3, 0x26, 0x45, 0xae, 0xbd, 0xb9, 0x0d, 0x59, 0x4e, 0x70, 0x0e, 0xd2, 0x53, + 0x22, 0xaf, 0x46, 0x5a, 0xd4, 0xb3, 0xc5, 0xac, 0x39, 0xee, 0xaf, 0x06, 0x6a, 0x73, 0x88, 0x08, + 0xe5, 0xb7, 0xf3, 0xa4, 0x0a, 0x2d, 0xea, 0x89, 0x65, 0x3d, 0x30, 0xef, 0xb6, 0xa4, 0x4a, 0x2d, + 0xea, 0xc2, 0xb2, 0xc6, 0x4b, 0x90, 0xe9, 0xd3, 0xef, 0x49, 0x49, 0x2e, 0x66, 0x73, 0x7b, 0x0f, + 0x92, 0xd7, 0xf0, 0x0c, 0xca, 0x65, 0xf7, 0xdc, 0x57, 0x27, 0x38, 0x85, 0xf1, 0xfb, 0x83, 0xed, + 0x96, 0xdd, 0x4b, 0x25, 0x70, 0x02, 0xb2, 0xb5, 0xb6, 0xb7, 0xd5, 0x68, 0xe0, 0x4f, 0x7d, 0xf7, + 0xda, 0xaf, 0xda, 0xaa, 0x78, 0xbc, 0xf9, 0x98, 0x27, 0x17, 0x69, 0xb7, 0x6b, 0x5c, 0x30, 0x59, + 0x99, 0x75, 0x30, 0x29, 0x1a, 0xfe, 0xd3, 0x1c, 0x4e, 0xfd, 0x3a, 0x65, 0x7b, 0xf7, 0x1f, 0x00, + 0x00, 0xff, 0xff, 0xdd, 0xfa, 0x9b, 0x9a, 0x1c, 0x01, 0x00, 0x00, } diff --git a/go/vt/proto/topodata/topodata.pb.go b/go/vt/proto/topodata/topodata.pb.go index e3f8c175d05..aae8f87896c 100644 --- a/go/vt/proto/topodata/topodata.pb.go +++ b/go/vt/proto/topodata/topodata.pb.go @@ -5,8 +5,10 @@ package topodata import ( fmt "fmt" - proto "github.com/golang/protobuf/proto" math "math" + + proto "github.com/golang/protobuf/proto" + vttime "vitess.io/vitess/go/vt/proto/vttime" ) // Reference imports to suppress errors if they are not otherwise used. @@ -20,6 +22,34 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package +// KeyspaceType describes the type of the keyspace +type KeyspaceType int32 + +const ( + // NORMAL is the default value + KeyspaceType_NORMAL KeyspaceType = 0 + // SNAPSHOT is when we are creating a snapshot keyspace + KeyspaceType_SNAPSHOT KeyspaceType = 1 +) + +var KeyspaceType_name = map[int32]string{ + 0: "NORMAL", + 1: "SNAPSHOT", +} + +var KeyspaceType_value = map[string]int32{ + "NORMAL": 0, + "SNAPSHOT": 1, +} + +func (x KeyspaceType) String() string { + return proto.EnumName(KeyspaceType_name, int32(x)) +} + +func (KeyspaceType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_52c350cb619f972e, []int{0} +} + // KeyspaceIdType describes the type of the sharding key for a // range-based sharded keyspace. type KeyspaceIdType int32 @@ -52,7 +82,7 @@ func (x KeyspaceIdType) String() string { } func (KeyspaceIdType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_52c350cb619f972e, []int{0} + return fileDescriptor_52c350cb619f972e, []int{1} } // TabletType represents the type of a given tablet. @@ -123,7 +153,7 @@ func (x TabletType) String() string { } func (TabletType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_52c350cb619f972e, []int{1} + return fileDescriptor_52c350cb619f972e, []int{2} } // KeyRange describes a range of sharding keys, when range-based @@ -671,10 +701,22 @@ type Keyspace struct { ShardingColumnType KeyspaceIdType `protobuf:"varint,2,opt,name=sharding_column_type,json=shardingColumnType,proto3,enum=topodata.KeyspaceIdType" json:"sharding_column_type,omitempty"` // ServedFrom will redirect the appropriate traffic to // another keyspace. - ServedFroms []*Keyspace_ServedFrom `protobuf:"bytes,4,rep,name=served_froms,json=servedFroms,proto3" json:"served_froms,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + ServedFroms []*Keyspace_ServedFrom `protobuf:"bytes,4,rep,name=served_froms,json=servedFroms,proto3" json:"served_froms,omitempty"` + // keyspace_type will determine how this keyspace is treated by + // vtgate / vschema. Normal keyspaces are routable by + // any query. Snapshot keyspaces are only accessible + // by explicit addresssing or by calling "use keyspace" first + KeyspaceType KeyspaceType `protobuf:"varint,5,opt,name=keyspace_type,json=keyspaceType,proto3,enum=topodata.KeyspaceType" json:"keyspace_type,omitempty"` + // base_keyspace is the base keyspace from which a snapshot + // keyspace is created. empty for normal keyspaces + BaseKeyspace string `protobuf:"bytes,6,opt,name=base_keyspace,json=baseKeyspace,proto3" json:"base_keyspace,omitempty"` + // snapshot_time (in UTC) is a property of snapshot + // keyspaces which tells us what point in time + // the snapshot is of + SnapshotTime *vttime.Time `protobuf:"bytes,7,opt,name=snapshot_time,json=snapshotTime,proto3" json:"snapshot_time,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *Keyspace) Reset() { *m = Keyspace{} } @@ -723,6 +765,27 @@ func (m *Keyspace) GetServedFroms() []*Keyspace_ServedFrom { return nil } +func (m *Keyspace) GetKeyspaceType() KeyspaceType { + if m != nil { + return m.KeyspaceType + } + return KeyspaceType_NORMAL +} + +func (m *Keyspace) GetBaseKeyspace() string { + if m != nil { + return m.BaseKeyspace + } + return "" +} + +func (m *Keyspace) GetSnapshotTime() *vttime.Time { + if m != nil { + return m.SnapshotTime + } + return nil +} + // ServedFrom indicates a relationship between a TabletType and the // keyspace name that's serving it. type Keyspace_ServedFrom struct { @@ -1246,6 +1309,7 @@ func (m *CellsAlias) GetCells() []string { } func init() { + proto.RegisterEnum("topodata.KeyspaceType", KeyspaceType_name, KeyspaceType_value) proto.RegisterEnum("topodata.KeyspaceIdType", KeyspaceIdType_name, KeyspaceIdType_value) proto.RegisterEnum("topodata.TabletType", TabletType_name, TabletType_value) proto.RegisterType((*KeyRange)(nil), "topodata.KeyRange") @@ -1273,82 +1337,88 @@ func init() { func init() { proto.RegisterFile("topodata.proto", fileDescriptor_52c350cb619f972e) } var fileDescriptor_52c350cb619f972e = []byte{ - // 1218 bytes of a gzipped FileDescriptorProto + // 1314 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x57, 0xe1, 0x6e, 0x1b, 0x45, - 0x10, 0xe6, 0xec, 0xb3, 0x63, 0x8f, 0x1d, 0xe7, 0xba, 0xa4, 0xd5, 0xe9, 0xa0, 0x22, 0xb2, 0x54, - 0x11, 0x15, 0xe1, 0xa0, 0xb4, 0x85, 0xa8, 0x12, 0x52, 0x5d, 0xc7, 0xa5, 0x69, 0x1a, 0xc7, 0x5a, - 0x3b, 0x82, 0xf2, 0xe7, 0x74, 0xf1, 0x6d, 0xd2, 0x53, 0xce, 0xb7, 0xee, 0xee, 0x26, 0x92, 0x79, - 0x05, 0x7e, 0x00, 0x7f, 0x79, 0x03, 0x1e, 0x81, 0x27, 0xe0, 0x39, 0xe0, 0x49, 0xd0, 0xce, 0xde, - 0xd9, 0x67, 0xbb, 0x2d, 0x29, 0xca, 0xbf, 0x99, 0xdd, 0x99, 0xb9, 0x99, 0x6f, 0xe6, 0x9b, 0xb5, - 0xa1, 0xa1, 0xf8, 0x84, 0x87, 0x81, 0x0a, 0x5a, 0x13, 0xc1, 0x15, 0x27, 0x95, 0x4c, 0x6f, 0xee, - 0x42, 0xe5, 0x90, 0x4d, 0x69, 0x90, 0x9c, 0x33, 0xb2, 0x09, 0x25, 0xa9, 0x02, 0xa1, 0x5c, 0x6b, - 0xcb, 0xda, 0xae, 0x53, 0xa3, 0x10, 0x07, 0x8a, 0x2c, 0x09, 0xdd, 0x02, 0x9e, 0x69, 0xb1, 0xf9, - 0x00, 0x6a, 0xc3, 0xe0, 0x34, 0x66, 0xaa, 0x1d, 0x47, 0x81, 0x24, 0x04, 0xec, 0x11, 0x8b, 0x63, - 0xf4, 0xaa, 0x52, 0x94, 0xb5, 0xd3, 0x65, 0x64, 0x9c, 0xd6, 0xa9, 0x16, 0x9b, 0x7f, 0xda, 0x50, - 0x36, 0x5e, 0xe4, 0x0b, 0x28, 0x05, 0xda, 0x13, 0x3d, 0x6a, 0xbb, 0xb7, 0x5b, 0xb3, 0xec, 0x72, - 0x61, 0xa9, 0xb1, 0x21, 0x1e, 0x54, 0x5e, 0x73, 0xa9, 0x92, 0x60, 0xcc, 0x30, 0x5c, 0x95, 0xce, - 0x74, 0xb2, 0x07, 0x95, 0x09, 0x17, 0xca, 0x1f, 0x07, 0x13, 0xd7, 0xde, 0x2a, 0x6e, 0xd7, 0x76, - 0xef, 0x2e, 0xc7, 0x6a, 0xf5, 0xb9, 0x50, 0x47, 0xc1, 0xa4, 0x9b, 0x28, 0x31, 0xa5, 0x6b, 0x13, - 0xa3, 0xe9, 0xa8, 0x17, 0x6c, 0x2a, 0x27, 0xc1, 0x88, 0xb9, 0x25, 0x13, 0x35, 0xd3, 0x11, 0x86, - 0xd7, 0x81, 0x08, 0xdd, 0x32, 0x5e, 0x18, 0x85, 0xec, 0x40, 0xf5, 0x82, 0x4d, 0x7d, 0xa1, 0x91, - 0x72, 0xd7, 0x30, 0x71, 0x32, 0xff, 0x58, 0x86, 0x21, 0x86, 0x31, 0x68, 0x6e, 0x83, 0xad, 0xa6, - 0x13, 0xe6, 0x56, 0xb6, 0xac, 0xed, 0xc6, 0xee, 0xe6, 0x72, 0x62, 0xc3, 0xe9, 0x84, 0x51, 0xb4, - 0x20, 0xdb, 0xe0, 0x84, 0xa7, 0xbe, 0xae, 0xc8, 0xe7, 0x57, 0x4c, 0x88, 0x28, 0x64, 0x6e, 0x15, - 0xbf, 0xdd, 0x08, 0x4f, 0x7b, 0xc1, 0x98, 0x1d, 0xa7, 0xa7, 0xa4, 0x05, 0xb6, 0x0a, 0xce, 0xa5, - 0x0b, 0x58, 0xac, 0xb7, 0x52, 0xec, 0x30, 0x38, 0x97, 0xa6, 0x52, 0xb4, 0x23, 0xf7, 0xa0, 0x31, - 0x9e, 0xca, 0x37, 0xb1, 0x3f, 0x83, 0xb0, 0x8e, 0x71, 0xd7, 0xf1, 0xf4, 0x79, 0x86, 0xe3, 0x5d, - 0x00, 0x63, 0xa6, 0xe1, 0x71, 0xd7, 0xb7, 0xac, 0xed, 0x12, 0xad, 0xe2, 0x89, 0x46, 0xcf, 0x7b, - 0x0c, 0xf5, 0x3c, 0x8a, 0xba, 0xb9, 0x17, 0x6c, 0x9a, 0xf6, 0x5b, 0x8b, 0x1a, 0xb2, 0xab, 0x20, - 0xbe, 0x34, 0x1d, 0x2a, 0x51, 0xa3, 0x3c, 0x2e, 0xec, 0x59, 0xde, 0x37, 0x50, 0x9d, 0x25, 0xf5, - 0x5f, 0x8e, 0xd5, 0x9c, 0xe3, 0x0b, 0xbb, 0x52, 0x74, 0xec, 0x17, 0x76, 0xa5, 0xe6, 0xd4, 0x9b, - 0xbf, 0x95, 0xa1, 0x34, 0xc0, 0x2e, 0xec, 0x41, 0x7d, 0x1c, 0x48, 0xc5, 0x84, 0x7f, 0x8d, 0x09, - 0xaa, 0x19, 0x53, 0x33, 0xa5, 0x0b, 0xfd, 0x2b, 0x5c, 0xa3, 0x7f, 0xdf, 0x42, 0x5d, 0x32, 0x71, - 0xc5, 0x42, 0x5f, 0x37, 0x49, 0xba, 0xc5, 0x65, 0xcc, 0x31, 0xa3, 0xd6, 0x00, 0x6d, 0xb0, 0x9b, - 0x35, 0x39, 0x93, 0x25, 0x79, 0x02, 0xeb, 0x92, 0x5f, 0x8a, 0x11, 0xf3, 0x71, 0x7e, 0x64, 0x3a, - 0xa0, 0x9f, 0xac, 0xf8, 0xa3, 0x11, 0xca, 0xb4, 0x2e, 0xe7, 0x8a, 0x24, 0xcf, 0x60, 0x43, 0x61, - 0x35, 0xfe, 0x88, 0x27, 0x4a, 0xf0, 0x58, 0xba, 0xe5, 0xe5, 0x21, 0x37, 0x31, 0x4c, 0xd1, 0x1d, - 0x63, 0x45, 0x1b, 0x2a, 0xaf, 0x4a, 0x72, 0x1f, 0x6e, 0x45, 0xd2, 0x4f, 0x61, 0xd3, 0x29, 0x46, - 0xc9, 0x39, 0x4e, 0x70, 0x85, 0x6e, 0x44, 0xf2, 0x08, 0xcf, 0x07, 0xe6, 0xd8, 0x7b, 0x05, 0x30, - 0x2f, 0x88, 0x3c, 0x82, 0x5a, 0x9a, 0x01, 0x4e, 0xb2, 0xf5, 0x9e, 0x49, 0x06, 0x35, 0x93, 0x75, - 0x53, 0xf5, 0x12, 0x90, 0x6e, 0x61, 0xab, 0xa8, 0x9b, 0x8a, 0x8a, 0xf7, 0xbb, 0x05, 0xb5, 0x5c, - 0xb1, 0xd9, 0x8a, 0xb0, 0x66, 0x2b, 0x62, 0x81, 0x94, 0x85, 0x77, 0x91, 0xb2, 0xf8, 0x4e, 0x52, - 0xda, 0xd7, 0x68, 0xea, 0x1d, 0x28, 0x63, 0xa2, 0xd2, 0x2d, 0x61, 0x6e, 0xa9, 0xe6, 0xfd, 0x61, - 0xc1, 0xfa, 0x02, 0x8a, 0x37, 0x5a, 0x3b, 0xf9, 0x12, 0xc8, 0x69, 0x1c, 0x8c, 0x2e, 0xe2, 0x48, - 0x2a, 0x3d, 0x50, 0x26, 0x05, 0x1b, 0x4d, 0x6e, 0xe5, 0x6e, 0x30, 0xa8, 0xd4, 0x59, 0x9e, 0x09, - 0xfe, 0x13, 0x4b, 0x70, 0x37, 0x55, 0x68, 0xaa, 0xcd, 0x38, 0x51, 0x72, 0xca, 0xcd, 0xbf, 0x0a, - 0xb8, 0xb9, 0x0d, 0x3a, 0x5f, 0xc1, 0x26, 0x02, 0x12, 0x25, 0xe7, 0xfe, 0x88, 0xc7, 0x97, 0xe3, - 0x04, 0xd7, 0x49, 0xca, 0x34, 0x92, 0xdd, 0x75, 0xf0, 0x4a, 0x6f, 0x14, 0xf2, 0x62, 0xd5, 0x03, - 0xeb, 0x2c, 0x60, 0x9d, 0xee, 0x02, 0x88, 0xf8, 0x8d, 0x03, 0x33, 0xe3, 0x4b, 0xb1, 0xb0, 0xe6, - 0x27, 0x33, 0xa6, 0x9c, 0x09, 0x3e, 0x96, 0xab, 0xab, 0x38, 0x8b, 0x91, 0x92, 0xe5, 0x99, 0xe0, - 0xe3, 0x8c, 0x2c, 0x5a, 0x96, 0xde, 0x65, 0x36, 0x76, 0x5a, 0xbd, 0x59, 0xe8, 0xf3, 0x43, 0x55, - 0x5c, 0x1c, 0x2a, 0x83, 0x67, 0xf3, 0x67, 0x0b, 0x1c, 0xc3, 0x3f, 0x36, 0x89, 0xa3, 0x51, 0xa0, - 0x22, 0x9e, 0x90, 0x47, 0x50, 0x4a, 0x78, 0xc8, 0xf4, 0x86, 0xd1, 0xc5, 0x7c, 0xb6, 0x44, 0xb9, - 0x9c, 0x69, 0xab, 0xc7, 0x43, 0x46, 0x8d, 0xb5, 0xf7, 0x04, 0x6c, 0xad, 0xea, 0x3d, 0x95, 0x96, - 0x70, 0x9d, 0x3d, 0xa5, 0xe6, 0x4a, 0xf3, 0x04, 0x1a, 0xe9, 0x17, 0xce, 0x98, 0x60, 0xc9, 0x88, - 0xe9, 0xf7, 0x35, 0xd7, 0x4c, 0x94, 0x3f, 0x78, 0x9b, 0x35, 0x7f, 0xb1, 0x80, 0x60, 0xdc, 0xc5, - 0x29, 0xbf, 0x89, 0xd8, 0xe4, 0x21, 0xdc, 0x79, 0x73, 0xc9, 0xc4, 0xd4, 0x2c, 0x97, 0x11, 0xf3, - 0xc3, 0x48, 0xea, 0xaf, 0x18, 0xb2, 0x56, 0xe8, 0x26, 0xde, 0x0e, 0xcc, 0xe5, 0x7e, 0x7a, 0xd7, - 0xfc, 0xc7, 0x86, 0xda, 0x40, 0x5c, 0xcd, 0x66, 0xf8, 0x3b, 0x80, 0x49, 0x20, 0x54, 0xa4, 0x31, - 0xcd, 0x60, 0xff, 0x3c, 0x07, 0xfb, 0xdc, 0x74, 0x36, 0x4f, 0xfd, 0xcc, 0x9e, 0xe6, 0x5c, 0xdf, - 0x49, 0x86, 0xc2, 0x07, 0x93, 0xa1, 0xf8, 0x3f, 0xc8, 0xd0, 0x86, 0x5a, 0x8e, 0x0c, 0x29, 0x17, - 0xb6, 0xde, 0x5e, 0x47, 0x8e, 0x0e, 0x30, 0xa7, 0x83, 0xf7, 0xb7, 0x05, 0xb7, 0x56, 0x4a, 0xd4, - 0xac, 0xc8, 0xbd, 0x47, 0xef, 0x67, 0xc5, 0xfc, 0x21, 0x22, 0x1d, 0x70, 0x30, 0x4b, 0x5f, 0x64, - 0x03, 0x65, 0x08, 0x52, 0xcb, 0xd7, 0xb5, 0x38, 0x71, 0x74, 0x43, 0x2e, 0xe8, 0x92, 0xf4, 0xe1, - 0xb6, 0x09, 0xb2, 0xfc, 0x20, 0x99, 0x47, 0xf1, 0xd3, 0xa5, 0x48, 0x8b, 0xef, 0xd1, 0xc7, 0x72, - 0xe5, 0x4c, 0x7a, 0xfe, 0x4d, 0x30, 0xfe, 0x3d, 0x0f, 0x46, 0xba, 0x25, 0x0f, 0xa1, 0xd2, 0x61, - 0x71, 0x7c, 0x90, 0x9c, 0x71, 0xfd, 0x63, 0x08, 0x71, 0x11, 0x7e, 0x10, 0x86, 0x82, 0x49, 0x99, - 0x4e, 0xfd, 0xba, 0x39, 0x6d, 0x9b, 0x43, 0x4d, 0x09, 0xc1, 0xb9, 0x4a, 0x03, 0xa2, 0x9c, 0x2e, - 0x8a, 0x26, 0x80, 0x0e, 0x26, 0xcd, 0x0f, 0x8a, 0xb7, 0xae, 0x9b, 0xfb, 0xbb, 0xd0, 0x58, 0x1c, - 0x12, 0x52, 0x85, 0xd2, 0x49, 0x6f, 0xd0, 0x1d, 0x3a, 0x1f, 0x11, 0x80, 0xf2, 0xc9, 0x41, 0x6f, - 0xf8, 0xf5, 0x43, 0xc7, 0xd2, 0xc7, 0x4f, 0x5f, 0x0d, 0xbb, 0x03, 0xa7, 0x70, 0xff, 0x57, 0x0b, - 0x60, 0x5e, 0x21, 0xa9, 0xc1, 0xda, 0x49, 0xef, 0xb0, 0x77, 0xfc, 0x7d, 0xcf, 0xb8, 0x1c, 0xb5, - 0x07, 0xc3, 0x2e, 0x75, 0x2c, 0x7d, 0x41, 0xbb, 0xfd, 0x97, 0x07, 0x9d, 0xb6, 0x53, 0xd0, 0x17, - 0x74, 0xff, 0xb8, 0xf7, 0xf2, 0x95, 0x53, 0xc4, 0x58, 0xed, 0x61, 0xe7, 0xb9, 0x11, 0x07, 0xfd, - 0x36, 0xed, 0x3a, 0x36, 0x71, 0xa0, 0xde, 0xfd, 0xa1, 0xdf, 0xa5, 0x07, 0x47, 0xdd, 0xde, 0xb0, - 0xfd, 0xd2, 0x29, 0x69, 0x9f, 0xa7, 0xed, 0xce, 0xe1, 0x49, 0xdf, 0x29, 0x9b, 0x60, 0x83, 0xe1, - 0x31, 0xed, 0x3a, 0x6b, 0x5a, 0xd9, 0xa7, 0xed, 0x83, 0x5e, 0x77, 0xdf, 0xa9, 0x78, 0x05, 0xc7, - 0x7a, 0xba, 0x07, 0x1b, 0x11, 0x6f, 0x5d, 0x45, 0x8a, 0x49, 0x69, 0xfe, 0x33, 0xfc, 0x78, 0x2f, - 0xd5, 0x22, 0xbe, 0x63, 0xa4, 0x9d, 0x73, 0xbe, 0x73, 0xa5, 0x76, 0xf0, 0x76, 0x27, 0x6b, 0xd5, - 0x69, 0x19, 0xf5, 0x07, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x89, 0xf4, 0xf5, 0x28, 0x73, 0x0c, + 0x10, 0xee, 0xd9, 0x67, 0xe7, 0x3c, 0x3e, 0x27, 0xd7, 0x25, 0xad, 0x4e, 0x07, 0x15, 0x91, 0x51, + 0x85, 0x15, 0x84, 0x03, 0x69, 0x0b, 0x51, 0x11, 0x52, 0x5d, 0xc7, 0xa5, 0x69, 0x12, 0xc7, 0x5a, + 0x3b, 0x82, 0xf2, 0xe7, 0x74, 0xb1, 0x37, 0xe9, 0x29, 0xe7, 0x3b, 0xf7, 0x76, 0x63, 0xc9, 0xbc, + 0x02, 0x3f, 0x80, 0xbf, 0xbc, 0x01, 0x8f, 0xc0, 0xbb, 0xf0, 0x07, 0x9e, 0x04, 0xed, 0xec, 0xdd, + 0xf9, 0x6c, 0xb7, 0x25, 0x45, 0xf9, 0xb7, 0x33, 0x3b, 0x33, 0x37, 0xf3, 0xcd, 0xcc, 0xb7, 0x36, + 0xac, 0x8b, 0x68, 0x12, 0x8d, 0x3c, 0xe1, 0x35, 0x27, 0x71, 0x24, 0x22, 0x62, 0xa4, 0xb2, 0x03, + 0xc2, 0x1f, 0x33, 0xa5, 0xad, 0xef, 0x82, 0x71, 0xc8, 0x66, 0xd4, 0x0b, 0x2f, 0x18, 0xd9, 0x84, + 0x12, 0x17, 0x5e, 0x2c, 0x6c, 0x6d, 0x4b, 0x6b, 0x98, 0x54, 0x09, 0xc4, 0x82, 0x22, 0x0b, 0x47, + 0x76, 0x01, 0x75, 0xf2, 0x58, 0x7f, 0x00, 0xd5, 0x81, 0x77, 0x16, 0x30, 0xd1, 0x0a, 0x7c, 0x8f, + 0x13, 0x02, 0xfa, 0x90, 0x05, 0x01, 0x7a, 0x55, 0x28, 0x9e, 0xa5, 0xd3, 0x95, 0xaf, 0x9c, 0x6a, + 0x54, 0x1e, 0xeb, 0x7f, 0xea, 0x50, 0x56, 0x5e, 0xe4, 0x33, 0x28, 0x79, 0xd2, 0x13, 0x3d, 0xaa, + 0xbb, 0x77, 0x9a, 0x59, 0xa6, 0xb9, 0xb0, 0x54, 0xd9, 0x10, 0x07, 0x8c, 0x57, 0x11, 0x17, 0xa1, + 0x37, 0x66, 0x18, 0xae, 0x42, 0x33, 0x99, 0xec, 0x81, 0x31, 0x89, 0x62, 0xe1, 0x8e, 0xbd, 0x89, + 0xad, 0x6f, 0x15, 0x1b, 0xd5, 0xdd, 0x7b, 0xcb, 0xb1, 0x9a, 0xbd, 0x28, 0x16, 0xc7, 0xde, 0xa4, + 0x13, 0x8a, 0x78, 0x46, 0xd7, 0x26, 0x4a, 0x92, 0x51, 0x2f, 0xd9, 0x8c, 0x4f, 0xbc, 0x21, 0xb3, + 0x4b, 0x2a, 0x6a, 0x2a, 0x23, 0x0c, 0xaf, 0xbc, 0x78, 0x64, 0x97, 0xf1, 0x42, 0x09, 0x64, 0x07, + 0x2a, 0x97, 0x6c, 0xe6, 0xc6, 0x12, 0x29, 0x7b, 0x0d, 0x13, 0x27, 0xf3, 0x8f, 0xa5, 0x18, 0x62, + 0x18, 0x85, 0x66, 0x03, 0x74, 0x31, 0x9b, 0x30, 0xdb, 0xd8, 0xd2, 0x1a, 0xeb, 0xbb, 0x9b, 0xcb, + 0x89, 0x0d, 0x66, 0x13, 0x46, 0xd1, 0x82, 0x34, 0xc0, 0x1a, 0x9d, 0xb9, 0xb2, 0x22, 0x37, 0x9a, + 0xb2, 0x38, 0xf6, 0x47, 0xcc, 0xae, 0xe0, 0xb7, 0xd7, 0x47, 0x67, 0x5d, 0x6f, 0xcc, 0x4e, 0x12, + 0x2d, 0x69, 0x82, 0x2e, 0xbc, 0x0b, 0x6e, 0x03, 0x16, 0xeb, 0xac, 0x14, 0x3b, 0xf0, 0x2e, 0xb8, + 0xaa, 0x14, 0xed, 0xc8, 0x7d, 0x58, 0x1f, 0xcf, 0xf8, 0xeb, 0xc0, 0xcd, 0x20, 0x34, 0x31, 0x6e, + 0x0d, 0xb5, 0xcf, 0x53, 0x1c, 0xef, 0x01, 0x28, 0x33, 0x09, 0x8f, 0x5d, 0xdb, 0xd2, 0x1a, 0x25, + 0x5a, 0x41, 0x8d, 0x44, 0xcf, 0x79, 0x0c, 0x66, 0x1e, 0x45, 0xd9, 0xdc, 0x4b, 0x36, 0x4b, 0xfa, + 0x2d, 0x8f, 0x12, 0xb2, 0xa9, 0x17, 0x5c, 0xa9, 0x0e, 0x95, 0xa8, 0x12, 0x1e, 0x17, 0xf6, 0x34, + 0xe7, 0x6b, 0xa8, 0x64, 0x49, 0xfd, 0x97, 0x63, 0x25, 0xe7, 0xf8, 0x42, 0x37, 0x8a, 0x96, 0xfe, + 0x42, 0x37, 0xaa, 0x96, 0x59, 0xff, 0xad, 0x0c, 0xa5, 0x3e, 0x76, 0x61, 0x0f, 0xcc, 0xb1, 0xc7, + 0x05, 0x8b, 0xdd, 0x6b, 0x4c, 0x50, 0x55, 0x99, 0xaa, 0x29, 0x5d, 0xe8, 0x5f, 0xe1, 0x1a, 0xfd, + 0xfb, 0x16, 0x4c, 0xce, 0xe2, 0x29, 0x1b, 0xb9, 0xb2, 0x49, 0xdc, 0x2e, 0x2e, 0x63, 0x8e, 0x19, + 0x35, 0xfb, 0x68, 0x83, 0xdd, 0xac, 0xf2, 0xec, 0xcc, 0xc9, 0x13, 0xa8, 0xf1, 0xe8, 0x2a, 0x1e, + 0x32, 0x17, 0xe7, 0x87, 0x27, 0x03, 0xfa, 0xe1, 0x8a, 0x3f, 0x1a, 0xe1, 0x99, 0x9a, 0x7c, 0x2e, + 0x70, 0xf2, 0x0c, 0x36, 0x04, 0x56, 0xe3, 0x0e, 0xa3, 0x50, 0xc4, 0x51, 0xc0, 0xed, 0xf2, 0xf2, + 0x90, 0xab, 0x18, 0xaa, 0xe8, 0xb6, 0xb2, 0xa2, 0xeb, 0x22, 0x2f, 0x72, 0xb2, 0x0d, 0xb7, 0x7d, + 0xee, 0x26, 0xb0, 0xc9, 0x14, 0xfd, 0xf0, 0x02, 0x27, 0xd8, 0xa0, 0x1b, 0x3e, 0x3f, 0x46, 0x7d, + 0x5f, 0xa9, 0x9d, 0x97, 0x00, 0xf3, 0x82, 0xc8, 0x23, 0xa8, 0x26, 0x19, 0xe0, 0x24, 0x6b, 0xef, + 0x98, 0x64, 0x10, 0xd9, 0x59, 0x36, 0x55, 0x92, 0x00, 0xb7, 0x0b, 0x5b, 0x45, 0xd9, 0x54, 0x14, + 0x9c, 0xdf, 0x35, 0xa8, 0xe6, 0x8a, 0x4d, 0x29, 0x42, 0xcb, 0x28, 0x62, 0x61, 0x29, 0x0b, 0x6f, + 0x5b, 0xca, 0xe2, 0x5b, 0x97, 0x52, 0xbf, 0x46, 0x53, 0xef, 0x42, 0x19, 0x13, 0xe5, 0x76, 0x09, + 0x73, 0x4b, 0x24, 0xe7, 0x0f, 0x0d, 0x6a, 0x0b, 0x28, 0xde, 0x68, 0xed, 0xe4, 0x73, 0x20, 0x67, + 0x81, 0x37, 0xbc, 0x0c, 0x7c, 0x2e, 0xe4, 0x40, 0xa9, 0x14, 0x74, 0x34, 0xb9, 0x9d, 0xbb, 0xc1, + 0xa0, 0x5c, 0x66, 0x79, 0x1e, 0x47, 0x3f, 0xb1, 0x10, 0xb9, 0xc9, 0xa0, 0x89, 0x94, 0xed, 0x44, + 0xc9, 0x2a, 0xd7, 0xff, 0x2a, 0x22, 0x73, 0x2b, 0x74, 0xbe, 0x80, 0x4d, 0x04, 0xc4, 0x0f, 0x2f, + 0xdc, 0x61, 0x14, 0x5c, 0x8d, 0x43, 0xa4, 0x93, 0x64, 0xd3, 0x48, 0x7a, 0xd7, 0xc6, 0x2b, 0xc9, + 0x28, 0xe4, 0xc5, 0xaa, 0x07, 0xd6, 0x59, 0xc0, 0x3a, 0xed, 0x05, 0x10, 0xf1, 0x1b, 0x07, 0x6a, + 0xc6, 0x97, 0x62, 0x61, 0xcd, 0x4f, 0xb2, 0x4d, 0x39, 0x8f, 0xa3, 0x31, 0x5f, 0xa5, 0xe2, 0x34, + 0x46, 0xb2, 0x2c, 0xcf, 0xe2, 0x68, 0x9c, 0x2e, 0x8b, 0x3c, 0x73, 0xf2, 0x0d, 0xd4, 0xd2, 0x4e, + 0xab, 0x34, 0x4a, 0x98, 0xc6, 0xdd, 0xd5, 0x10, 0x98, 0x84, 0x79, 0x99, 0x93, 0xc8, 0x27, 0x50, + 0x3b, 0xf3, 0x38, 0x73, 0xb3, 0xd9, 0x51, 0xbc, 0x6d, 0x4a, 0x65, 0x86, 0xd0, 0x97, 0x50, 0xe3, + 0xa1, 0x37, 0xe1, 0xaf, 0x22, 0xe1, 0xca, 0xe7, 0x2f, 0xa1, 0x70, 0xb3, 0x39, 0x15, 0xf8, 0x1a, + 0x0e, 0xfc, 0x31, 0xa3, 0x66, 0x6a, 0x22, 0x25, 0xe7, 0x2a, 0xdd, 0x05, 0x99, 0xe3, 0xcd, 0xce, + 0x43, 0x7e, 0xd2, 0x8b, 0x8b, 0x93, 0xae, 0x9a, 0x5c, 0xff, 0x59, 0x03, 0x4b, 0x91, 0x02, 0x9b, + 0x04, 0xfe, 0xd0, 0x13, 0x7e, 0x14, 0x92, 0x47, 0x50, 0x0a, 0xa3, 0x11, 0x93, 0xb4, 0x27, 0x11, + 0xfe, 0x78, 0x89, 0x07, 0x72, 0xa6, 0xcd, 0x6e, 0x34, 0x62, 0x54, 0x59, 0x3b, 0x4f, 0x40, 0x97, + 0xa2, 0x24, 0xcf, 0xa4, 0x84, 0xeb, 0x90, 0xa7, 0x98, 0x0b, 0xf5, 0x53, 0x58, 0x4f, 0xbe, 0x70, + 0xce, 0x62, 0x16, 0x0e, 0x99, 0x7c, 0xf4, 0x73, 0x13, 0x86, 0xe7, 0xf7, 0xa6, 0xd8, 0xfa, 0x2f, + 0x1a, 0x10, 0x8c, 0xbb, 0xb8, 0x7a, 0x37, 0x11, 0x9b, 0x3c, 0x84, 0xbb, 0xaf, 0xaf, 0x58, 0x3c, + 0x53, 0x8c, 0x37, 0x64, 0xee, 0xc8, 0xe7, 0xf2, 0x2b, 0x8a, 0x41, 0x0c, 0xba, 0x89, 0xb7, 0x7d, + 0x75, 0xb9, 0x9f, 0xdc, 0xd5, 0xff, 0xd1, 0xa1, 0xda, 0x8f, 0xa7, 0xd9, 0xd8, 0x7c, 0x07, 0x30, + 0xf1, 0x62, 0xe1, 0x4b, 0x4c, 0x53, 0xd8, 0x3f, 0xcd, 0xc1, 0x3e, 0x37, 0xcd, 0x26, 0xb4, 0x97, + 0xda, 0xd3, 0x9c, 0xeb, 0x5b, 0x37, 0xb4, 0xf0, 0xde, 0x1b, 0x5a, 0xfc, 0x1f, 0x1b, 0xda, 0x82, + 0x6a, 0x6e, 0x43, 0x93, 0x05, 0xdd, 0x7a, 0x73, 0x1d, 0xb9, 0x1d, 0x85, 0xf9, 0x8e, 0x3a, 0x7f, + 0x6b, 0x70, 0x7b, 0xa5, 0x44, 0xb9, 0x15, 0xb9, 0x47, 0xf2, 0xdd, 0x5b, 0x31, 0x7f, 0x1d, 0x49, + 0x1b, 0x2c, 0xcc, 0xd2, 0x8d, 0xd3, 0x81, 0x52, 0x0b, 0x52, 0xcd, 0xd7, 0xb5, 0x38, 0x71, 0x74, + 0x83, 0x2f, 0xc8, 0x9c, 0xf4, 0xe0, 0x8e, 0x0a, 0xb2, 0xfc, 0x4a, 0xaa, 0x97, 0xfa, 0xa3, 0xa5, + 0x48, 0x8b, 0x8f, 0xe4, 0x07, 0x7c, 0x45, 0xc7, 0x1d, 0xf7, 0x26, 0x36, 0xfe, 0x1d, 0xaf, 0x58, + 0x42, 0xdd, 0x87, 0x60, 0xb4, 0x59, 0x10, 0x1c, 0x84, 0xe7, 0x91, 0xfc, 0x85, 0x86, 0xb8, 0xc4, + 0xae, 0x37, 0x1a, 0xc5, 0x8c, 0xf3, 0x64, 0xea, 0x6b, 0x4a, 0xdb, 0x52, 0x4a, 0xb9, 0x12, 0x71, + 0x14, 0x89, 0x24, 0x20, 0x9e, 0x13, 0xa2, 0xa8, 0x03, 0xc8, 0x60, 0x5c, 0xfd, 0xca, 0x79, 0x23, + 0xdd, 0x6c, 0x37, 0xc0, 0xcc, 0xf3, 0x27, 0x01, 0x28, 0x77, 0x4f, 0xe8, 0x71, 0xeb, 0xc8, 0xba, + 0x45, 0x4c, 0x30, 0xfa, 0xdd, 0x56, 0xaf, 0xff, 0xfc, 0x64, 0x60, 0x69, 0xdb, 0xbb, 0xb0, 0xbe, + 0x38, 0x4e, 0xa4, 0x02, 0xa5, 0xd3, 0x6e, 0xbf, 0x33, 0xb0, 0x6e, 0x49, 0xb7, 0xd3, 0x83, 0xee, + 0xe0, 0xab, 0x87, 0x96, 0x26, 0xd5, 0x4f, 0x5f, 0x0e, 0x3a, 0x7d, 0xab, 0xb0, 0xfd, 0xab, 0x06, + 0x30, 0xc7, 0x82, 0x54, 0x61, 0xed, 0xb4, 0x7b, 0xd8, 0x3d, 0xf9, 0xbe, 0xab, 0x5c, 0x8e, 0x5b, + 0xfd, 0x41, 0x87, 0x5a, 0x9a, 0xbc, 0xa0, 0x9d, 0xde, 0xd1, 0x41, 0xbb, 0x65, 0x15, 0xe4, 0x05, + 0xdd, 0x3f, 0xe9, 0x1e, 0xbd, 0xb4, 0x8a, 0x18, 0xab, 0x35, 0x68, 0x3f, 0x57, 0xc7, 0x7e, 0xaf, + 0x45, 0x3b, 0x96, 0x4e, 0x2c, 0x30, 0x3b, 0x3f, 0xf4, 0x3a, 0xf4, 0xe0, 0xb8, 0xd3, 0x1d, 0xb4, + 0x8e, 0xac, 0x92, 0xf4, 0x79, 0xda, 0x6a, 0x1f, 0x9e, 0xf6, 0xac, 0xb2, 0x0a, 0xd6, 0x1f, 0x9c, + 0xd0, 0x8e, 0xb5, 0x26, 0x85, 0x7d, 0xda, 0x3a, 0xe8, 0x76, 0xf6, 0x2d, 0xc3, 0x29, 0x58, 0xda, + 0xd3, 0x3d, 0xd8, 0xf0, 0xa3, 0xe6, 0xd4, 0x17, 0x8c, 0x73, 0xf5, 0x47, 0xe7, 0xc7, 0xfb, 0x89, + 0xe4, 0x47, 0x3b, 0xea, 0xb4, 0x73, 0x11, 0xed, 0x4c, 0xc5, 0x0e, 0xde, 0xee, 0xa4, 0x4d, 0x3d, + 0x2b, 0xa3, 0xfc, 0xe0, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xa8, 0x5c, 0xc8, 0x21, 0x3e, 0x0d, 0x00, 0x00, } diff --git a/go/vt/proto/vschema/vschema.pb.go b/go/vt/proto/vschema/vschema.pb.go index 3f403b8b458..1c82260e1f3 100644 --- a/go/vt/proto/vschema/vschema.pb.go +++ b/go/vt/proto/vschema/vschema.pb.go @@ -5,8 +5,9 @@ package vschema import ( fmt "fmt" - proto "github.com/golang/protobuf/proto" math "math" + + proto "github.com/golang/protobuf/proto" query "vitess.io/vitess/go/vt/proto/query" ) @@ -115,12 +116,13 @@ func (m *RoutingRule) GetToTables() []string { // Keyspace is the vschema for a keyspace. type Keyspace struct { // If sharded is false, vindexes and tables are ignored. - Sharded bool `protobuf:"varint,1,opt,name=sharded,proto3" json:"sharded,omitempty"` - Vindexes map[string]*Vindex `protobuf:"bytes,2,rep,name=vindexes,proto3" json:"vindexes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - Tables map[string]*Table `protobuf:"bytes,3,rep,name=tables,proto3" json:"tables,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Sharded bool `protobuf:"varint,1,opt,name=sharded,proto3" json:"sharded,omitempty"` + Vindexes map[string]*Vindex `protobuf:"bytes,2,rep,name=vindexes,proto3" json:"vindexes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Tables map[string]*Table `protobuf:"bytes,3,rep,name=tables,proto3" json:"tables,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + RequireExplicitRouting bool `protobuf:"varint,4,opt,name=require_explicit_routing,json=requireExplicitRouting,proto3" json:"require_explicit_routing,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *Keyspace) Reset() { *m = Keyspace{} } @@ -169,6 +171,13 @@ func (m *Keyspace) GetTables() map[string]*Table { return nil } +func (m *Keyspace) GetRequireExplicitRouting() bool { + if m != nil { + return m.RequireExplicitRouting + } + return false +} + // Vindex is the vindex info for a Keyspace. type Vindex struct { // The type must match one of the predefined @@ -552,46 +561,48 @@ func init() { func init() { proto.RegisterFile("vschema.proto", fileDescriptor_3f6849254fea3e77) } var fileDescriptor_3f6849254fea3e77 = []byte{ - // 643 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x54, 0xdd, 0x6e, 0xd3, 0x30, - 0x14, 0x56, 0xda, 0x35, 0x6b, 0x4f, 0xd6, 0x0e, 0xac, 0x6d, 0x84, 0x4e, 0xd3, 0xaa, 0x68, 0x40, - 0xe1, 0xa2, 0x95, 0x3a, 0x21, 0x41, 0xd1, 0x10, 0x63, 0xe2, 0x62, 0x62, 0x12, 0x28, 0x9b, 0x76, - 0xc1, 0x4d, 0xe5, 0xb5, 0x66, 0x8b, 0xd6, 0xc6, 0x99, 0xed, 0x04, 0xf2, 0x28, 0xdc, 0xf2, 0x06, - 0x3c, 0x0f, 0x2f, 0x83, 0xe2, 0x9f, 0xcc, 0xe9, 0xca, 0x9d, 0x3f, 0x9f, 0xf3, 0x7d, 0xe7, 0xf3, - 0xb1, 0x7d, 0xa0, 0x9d, 0xf1, 0xe9, 0x0d, 0x59, 0xe0, 0x41, 0xc2, 0xa8, 0xa0, 0x68, 0x5d, 0xc3, - 0xae, 0x77, 0x97, 0x12, 0x96, 0xab, 0xdd, 0x60, 0x0c, 0x1b, 0x21, 0x4d, 0x45, 0x14, 0x5f, 0x87, - 0xe9, 0x9c, 0x70, 0xf4, 0x0a, 0x1a, 0xac, 0x58, 0xf8, 0x4e, 0xaf, 0xde, 0xf7, 0x46, 0x5b, 0x03, - 0x23, 0x62, 0x65, 0x85, 0x2a, 0x25, 0x38, 0x05, 0xcf, 0xda, 0x45, 0x7b, 0x00, 0xdf, 0x19, 0x5d, - 0x4c, 0x04, 0xbe, 0x9a, 0x13, 0xdf, 0xe9, 0x39, 0xfd, 0x56, 0xd8, 0x2a, 0x76, 0x2e, 0x8a, 0x0d, - 0xb4, 0x0b, 0x2d, 0x41, 0x55, 0x90, 0xfb, 0xb5, 0x5e, 0xbd, 0xdf, 0x0a, 0x9b, 0x82, 0xca, 0x18, - 0x0f, 0xfe, 0xd4, 0xa0, 0xf9, 0x99, 0xe4, 0x3c, 0xc1, 0x53, 0x82, 0x7c, 0x58, 0xe7, 0x37, 0x98, - 0xcd, 0xc8, 0x4c, 0xaa, 0x34, 0x43, 0x03, 0xd1, 0x3b, 0x68, 0x66, 0x51, 0x3c, 0x23, 0x3f, 0xb5, - 0x84, 0x37, 0xda, 0x2f, 0x0d, 0x1a, 0xfa, 0xe0, 0x52, 0x67, 0x7c, 0x8a, 0x05, 0xcb, 0xc3, 0x92, - 0x80, 0x5e, 0x83, 0xab, 0xab, 0xd7, 0x25, 0x75, 0xef, 0x21, 0x55, 0xb9, 0x51, 0x44, 0x9d, 0xdc, - 0x3d, 0x83, 0x76, 0x45, 0x11, 0x3d, 0x82, 0xfa, 0x2d, 0xc9, 0xf5, 0x01, 0x8b, 0x25, 0x7a, 0x06, - 0x8d, 0x0c, 0xcf, 0x53, 0xe2, 0xd7, 0x7a, 0x4e, 0xdf, 0x1b, 0x6d, 0x96, 0xc2, 0x8a, 0x18, 0xaa, - 0xe8, 0xb8, 0xf6, 0xc6, 0xe9, 0x9e, 0x82, 0x67, 0x15, 0x59, 0xa1, 0x75, 0x50, 0xd5, 0xea, 0x94, - 0x5a, 0x92, 0x66, 0x49, 0x05, 0xbf, 0x1d, 0x70, 0x55, 0x01, 0x84, 0x60, 0x4d, 0xe4, 0x89, 0x69, - 0xba, 0x5c, 0xa3, 0x43, 0x70, 0x13, 0xcc, 0xf0, 0xc2, 0x74, 0x6a, 0x77, 0xc9, 0xd5, 0xe0, 0xab, - 0x8c, 0xea, 0xc3, 0xaa, 0x54, 0xb4, 0x05, 0x0d, 0xfa, 0x23, 0x26, 0xcc, 0xaf, 0x4b, 0x25, 0x05, - 0xba, 0x6f, 0xc1, 0xb3, 0x92, 0x57, 0x98, 0xde, 0xb2, 0x4d, 0xb7, 0x6c, 0x93, 0xbf, 0x6a, 0xd0, - 0x50, 0xf7, 0xbf, 0xca, 0xe3, 0x7b, 0xd8, 0x9c, 0xd2, 0x79, 0xba, 0x88, 0x27, 0x4b, 0xd7, 0xba, - 0x5d, 0x9a, 0x3d, 0x91, 0x71, 0xdd, 0xc8, 0xce, 0xd4, 0x42, 0x84, 0xa3, 0x23, 0xe8, 0xe0, 0x54, - 0xd0, 0x49, 0x14, 0x4f, 0x19, 0x59, 0x90, 0x58, 0x48, 0xdf, 0xde, 0x68, 0xa7, 0xa4, 0x1f, 0xa7, - 0x82, 0x9e, 0x9a, 0x68, 0xd8, 0xc6, 0x36, 0x44, 0x2f, 0x61, 0x5d, 0x09, 0x72, 0x7f, 0x4d, 0x96, - 0xdd, 0x5c, 0x2a, 0x1b, 0x9a, 0x38, 0xda, 0x01, 0x37, 0x89, 0xe2, 0x98, 0xcc, 0xfc, 0x86, 0xf4, - 0xaf, 0x11, 0x1a, 0xc3, 0x53, 0x7d, 0x82, 0x79, 0xc4, 0xc5, 0x04, 0xa7, 0xe2, 0x86, 0xb2, 0x48, - 0x60, 0x11, 0x65, 0xc4, 0x77, 0xe5, 0xeb, 0x7d, 0xa2, 0x12, 0xce, 0x22, 0x2e, 0x8e, 0xed, 0x70, - 0x70, 0x01, 0x1b, 0xf6, 0xe9, 0x8a, 0x1a, 0x2a, 0x55, 0xf7, 0x48, 0xa3, 0xa2, 0x73, 0x31, 0x5e, - 0x98, 0xe6, 0xca, 0x75, 0xf1, 0x47, 0x8c, 0xf5, 0xba, 0xfc, 0x4b, 0x06, 0x06, 0x27, 0xd0, 0xae, - 0x1c, 0xfa, 0xbf, 0xb2, 0x5d, 0x68, 0x72, 0x72, 0x97, 0x92, 0x78, 0x6a, 0xa4, 0x4b, 0x1c, 0x1c, - 0x81, 0x7b, 0x52, 0x2d, 0xee, 0x58, 0xc5, 0xf7, 0xf5, 0x55, 0x16, 0xac, 0xce, 0xc8, 0x1b, 0xa8, - 0x81, 0x72, 0x91, 0x27, 0x44, 0xdd, 0x6b, 0xf0, 0xd7, 0x01, 0x38, 0x67, 0xd9, 0xe5, 0xb9, 0x6c, - 0x26, 0xfa, 0x00, 0xad, 0x5b, 0xfd, 0xc5, 0xcc, 0x60, 0x09, 0xca, 0x4e, 0xdf, 0xe7, 0x95, 0xff, - 0x50, 0x3f, 0xca, 0x7b, 0x12, 0x1a, 0x43, 0x9b, 0xa9, 0x51, 0x33, 0x51, 0xe3, 0x49, 0xfd, 0x8e, - 0xed, 0x55, 0xe3, 0x89, 0x87, 0x1b, 0xcc, 0x42, 0xdd, 0x2f, 0xd0, 0xa9, 0x0a, 0xaf, 0x78, 0xc0, - 0x2f, 0xaa, 0xbf, 0xee, 0xf1, 0x83, 0xd1, 0x60, 0xbd, 0xe9, 0x8f, 0xcf, 0xbf, 0x1d, 0x64, 0x91, - 0x20, 0x9c, 0x0f, 0x22, 0x3a, 0x54, 0xab, 0xe1, 0x35, 0x1d, 0x66, 0x62, 0x28, 0x67, 0xea, 0x50, - 0x73, 0xaf, 0x5c, 0x09, 0x0f, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0xc8, 0x0d, 0x93, 0x48, 0x89, - 0x05, 0x00, 0x00, + // 673 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x54, 0xcf, 0x4e, 0xdb, 0x4e, + 0x10, 0x96, 0x13, 0x62, 0x92, 0x31, 0x09, 0xbf, 0xdf, 0x0a, 0xa8, 0x1b, 0x84, 0x88, 0x2c, 0xda, + 0xa6, 0x3d, 0x24, 0x52, 0x50, 0x25, 0x9a, 0x8a, 0xaa, 0x14, 0x71, 0x40, 0x45, 0x6a, 0x65, 0x10, + 0x87, 0x5e, 0x2c, 0xe3, 0x6c, 0x61, 0x45, 0xe2, 0x35, 0xbb, 0x6b, 0x97, 0x3c, 0x4a, 0xaf, 0x7d, + 0xad, 0x3e, 0x42, 0x5f, 0xa2, 0xf2, 0xfe, 0x31, 0x1b, 0x48, 0x6f, 0x3b, 0x3b, 0xf3, 0x7d, 0xf3, + 0xed, 0xec, 0xcc, 0x40, 0xbb, 0xe0, 0xc9, 0x0d, 0x9e, 0xc5, 0x83, 0x8c, 0x51, 0x41, 0xd1, 0xaa, + 0x36, 0xbb, 0xde, 0x5d, 0x8e, 0xd9, 0x5c, 0xdd, 0x06, 0x63, 0x58, 0x0b, 0x69, 0x2e, 0x48, 0x7a, + 0x1d, 0xe6, 0x53, 0xcc, 0xd1, 0x1b, 0x68, 0xb0, 0xf2, 0xe0, 0x3b, 0xbd, 0x7a, 0xdf, 0x1b, 0x6d, + 0x0c, 0x0c, 0x89, 0x15, 0x15, 0xaa, 0x90, 0xe0, 0x14, 0x3c, 0xeb, 0x16, 0xed, 0x00, 0x7c, 0x67, + 0x74, 0x16, 0x89, 0xf8, 0x6a, 0x8a, 0x7d, 0xa7, 0xe7, 0xf4, 0x5b, 0x61, 0xab, 0xbc, 0xb9, 0x28, + 0x2f, 0xd0, 0x36, 0xb4, 0x04, 0x55, 0x4e, 0xee, 0xd7, 0x7a, 0xf5, 0x7e, 0x2b, 0x6c, 0x0a, 0x2a, + 0x7d, 0x3c, 0xf8, 0x53, 0x83, 0xe6, 0x67, 0x3c, 0xe7, 0x59, 0x9c, 0x60, 0xe4, 0xc3, 0x2a, 0xbf, + 0x89, 0xd9, 0x04, 0x4f, 0x24, 0x4b, 0x33, 0x34, 0x26, 0x7a, 0x0f, 0xcd, 0x82, 0xa4, 0x13, 0x7c, + 0xaf, 0x29, 0xbc, 0xd1, 0x6e, 0x25, 0xd0, 0xc0, 0x07, 0x97, 0x3a, 0xe2, 0x24, 0x15, 0x6c, 0x1e, + 0x56, 0x00, 0xf4, 0x16, 0x5c, 0x9d, 0xbd, 0x2e, 0xa1, 0x3b, 0x4f, 0xa1, 0x4a, 0x8d, 0x02, 0xea, + 0x60, 0x74, 0x00, 0x3e, 0xc3, 0x77, 0x39, 0x61, 0x38, 0xc2, 0xf7, 0xd9, 0x94, 0x24, 0x44, 0x44, + 0x4c, 0x3d, 0xdb, 0x5f, 0x91, 0xf2, 0xb6, 0xb4, 0xff, 0x44, 0xbb, 0x75, 0x51, 0xba, 0x67, 0xd0, + 0x5e, 0xd0, 0x82, 0xfe, 0x83, 0xfa, 0x2d, 0x9e, 0xeb, 0xd2, 0x94, 0x47, 0xf4, 0x02, 0x1a, 0x45, + 0x3c, 0xcd, 0xb1, 0x5f, 0xeb, 0x39, 0x7d, 0x6f, 0xb4, 0x5e, 0x49, 0x52, 0xc0, 0x50, 0x79, 0xc7, + 0xb5, 0x03, 0xa7, 0x7b, 0x0a, 0x9e, 0x25, 0x6f, 0x09, 0xd7, 0xde, 0x22, 0x57, 0xa7, 0xe2, 0x92, + 0x30, 0x8b, 0x2a, 0xf8, 0xe5, 0x80, 0xab, 0x12, 0x20, 0x04, 0x2b, 0x62, 0x9e, 0x99, 0xef, 0x92, + 0x67, 0xb4, 0x0f, 0x6e, 0x16, 0xb3, 0x78, 0x66, 0x6a, 0xbc, 0xfd, 0x48, 0xd5, 0xe0, 0xab, 0xf4, + 0xea, 0x32, 0xa9, 0x50, 0xb4, 0x01, 0x0d, 0xfa, 0x23, 0xc5, 0xcc, 0xaf, 0x4b, 0x26, 0x65, 0x74, + 0xdf, 0x81, 0x67, 0x05, 0x2f, 0x11, 0xbd, 0x61, 0x8b, 0x6e, 0xd9, 0x22, 0x7f, 0xd6, 0xa0, 0xa1, + 0x3a, 0x67, 0x99, 0xc6, 0x0f, 0xb0, 0x9e, 0xd0, 0x69, 0x3e, 0x4b, 0xa3, 0x47, 0x0d, 0xb1, 0x59, + 0x89, 0x3d, 0x96, 0x7e, 0x5d, 0xc8, 0x4e, 0x62, 0x59, 0x98, 0xa3, 0x43, 0xe8, 0xc4, 0xb9, 0xa0, + 0x11, 0x49, 0x13, 0x86, 0x67, 0x38, 0x15, 0x52, 0xb7, 0x37, 0xda, 0xaa, 0xe0, 0x47, 0xb9, 0xa0, + 0xa7, 0xc6, 0x1b, 0xb6, 0x63, 0xdb, 0x44, 0xaf, 0x61, 0x55, 0x11, 0x72, 0x7f, 0x45, 0xa6, 0x5d, + 0x7f, 0x94, 0x36, 0x34, 0x7e, 0xb4, 0x05, 0x6e, 0x46, 0xd2, 0x14, 0x4f, 0xfc, 0x86, 0xd4, 0xaf, + 0x2d, 0x34, 0x86, 0xe7, 0xfa, 0x05, 0x53, 0xc2, 0x45, 0x14, 0xe7, 0xe2, 0x86, 0x32, 0x22, 0x62, + 0x41, 0x0a, 0xec, 0xbb, 0xb2, 0xb1, 0x9e, 0xa9, 0x80, 0x33, 0xc2, 0xc5, 0x91, 0xed, 0x0e, 0x2e, + 0x60, 0xcd, 0x7e, 0x5d, 0x99, 0x43, 0x85, 0xea, 0x1a, 0x69, 0xab, 0xac, 0x5c, 0x1a, 0xcf, 0x4c, + 0x71, 0xe5, 0xb9, 0x9c, 0x2e, 0x23, 0xbd, 0x2e, 0xa7, 0xd0, 0x98, 0xc1, 0x31, 0xb4, 0x17, 0x1e, + 0xfd, 0x4f, 0xda, 0x2e, 0x34, 0x39, 0xbe, 0xcb, 0x71, 0x9a, 0x18, 0xea, 0xca, 0x0e, 0x0e, 0xc1, + 0x3d, 0x5e, 0x4c, 0xee, 0x58, 0xc9, 0x77, 0xf5, 0x57, 0x96, 0xa8, 0xce, 0xc8, 0x1b, 0xa8, 0x55, + 0x74, 0x31, 0xcf, 0xb0, 0xfa, 0xd7, 0xe0, 0xb7, 0x03, 0x70, 0xce, 0x8a, 0xcb, 0x73, 0x59, 0x4c, + 0xf4, 0x11, 0x5a, 0xb7, 0x7a, 0x38, 0xcd, 0x4a, 0x0a, 0xaa, 0x4a, 0x3f, 0xc4, 0x55, 0x13, 0xac, + 0x9b, 0xf2, 0x01, 0x84, 0xc6, 0xd0, 0xd6, 0xd3, 0x1a, 0xa9, 0xc5, 0xa6, 0xa6, 0x63, 0x73, 0xd9, + 0x62, 0xe3, 0xe1, 0x1a, 0xb3, 0xac, 0xee, 0x17, 0xe8, 0x2c, 0x12, 0x2f, 0x69, 0xe0, 0x57, 0x8b, + 0x53, 0xf7, 0xff, 0x93, 0xa5, 0x62, 0xf5, 0xf4, 0xa7, 0x97, 0xdf, 0xf6, 0x0a, 0x22, 0x30, 0xe7, + 0x03, 0x42, 0x87, 0xea, 0x34, 0xbc, 0xa6, 0xc3, 0x42, 0x0c, 0xe5, 0x36, 0x1e, 0x6a, 0xec, 0x95, + 0x2b, 0xcd, 0xfd, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0xb8, 0xa7, 0x99, 0x19, 0xc3, 0x05, 0x00, + 0x00, } diff --git a/go/vt/proto/vttime/time.pb.go b/go/vt/proto/vttime/time.pb.go new file mode 100644 index 00000000000..79cca2f65ee --- /dev/null +++ b/go/vt/proto/vttime/time.pb.go @@ -0,0 +1,88 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: time.proto + +package vttime + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// Time represents a time stamp in nanoseconds. In go, use logutil library +// to convert times. +type Time struct { + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + Nanoseconds int32 `protobuf:"varint,2,opt,name=nanoseconds,proto3" json:"nanoseconds,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Time) Reset() { *m = Time{} } +func (m *Time) String() string { return proto.CompactTextString(m) } +func (*Time) ProtoMessage() {} +func (*Time) Descriptor() ([]byte, []int) { + return fileDescriptor_49a92d779a28c7fd, []int{0} +} + +func (m *Time) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Time.Unmarshal(m, b) +} +func (m *Time) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Time.Marshal(b, m, deterministic) +} +func (m *Time) XXX_Merge(src proto.Message) { + xxx_messageInfo_Time.Merge(m, src) +} +func (m *Time) XXX_Size() int { + return xxx_messageInfo_Time.Size(m) +} +func (m *Time) XXX_DiscardUnknown() { + xxx_messageInfo_Time.DiscardUnknown(m) +} + +var xxx_messageInfo_Time proto.InternalMessageInfo + +func (m *Time) GetSeconds() int64 { + if m != nil { + return m.Seconds + } + return 0 +} + +func (m *Time) GetNanoseconds() int32 { + if m != nil { + return m.Nanoseconds + } + return 0 +} + +func init() { + proto.RegisterType((*Time)(nil), "vttime.Time") +} + +func init() { proto.RegisterFile("time.proto", fileDescriptor_49a92d779a28c7fd) } + +var fileDescriptor_49a92d779a28c7fd = []byte{ + // 120 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2a, 0xc9, 0xcc, 0x4d, + 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x2b, 0x2b, 0x01, 0xf1, 0x94, 0x9c, 0xb8, 0x58, + 0x42, 0x32, 0x73, 0x53, 0x85, 0x24, 0xb8, 0xd8, 0x8b, 0x53, 0x93, 0xf3, 0xf3, 0x52, 0x8a, 0x25, + 0x18, 0x15, 0x18, 0x35, 0x98, 0x83, 0x60, 0x5c, 0x21, 0x05, 0x2e, 0xee, 0xbc, 0xc4, 0xbc, 0x7c, + 0x98, 0x2c, 0x93, 0x02, 0xa3, 0x06, 0x6b, 0x10, 0xb2, 0x90, 0x93, 0x6a, 0x94, 0x72, 0x59, 0x66, + 0x49, 0x6a, 0x71, 0xb1, 0x5e, 0x66, 0xbe, 0x3e, 0x84, 0xa5, 0x9f, 0x9e, 0xaf, 0x5f, 0x56, 0xa2, + 0x0f, 0xb6, 0x4b, 0x1f, 0x62, 0x55, 0x12, 0x1b, 0x98, 0x67, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, + 0xb4, 0x42, 0xf5, 0xf7, 0x87, 0x00, 0x00, 0x00, +} diff --git a/go/vt/topo/topoproto/keyspace.go b/go/vt/topo/topoproto/keyspace.go new file mode 100644 index 00000000000..e409f763fd7 --- /dev/null +++ b/go/vt/topo/topoproto/keyspace.go @@ -0,0 +1,34 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package topoproto + +import ( + "fmt" + "strings" + + topodatapb "vitess.io/vitess/go/vt/proto/topodata" +) + +// ParseKeyspaceType parses a string into a KeyspaceType +func ParseKeyspaceType(param string) (topodatapb.KeyspaceType, error) { + value, ok := topodatapb.KeyspaceType_value[strings.ToUpper(param)] + if !ok { + // default + return topodatapb.KeyspaceType_NORMAL, fmt.Errorf("unknown keyspace type: %v", value) + } + return topodatapb.KeyspaceType(value), nil +} diff --git a/go/vt/topo/topoproto/tablet.go b/go/vt/topo/topoproto/tablet.go index 97dd1263c04..c5bd52811d4 100644 --- a/go/vt/topo/topoproto/tablet.go +++ b/go/vt/topo/topoproto/tablet.go @@ -34,8 +34,8 @@ import ( // This file contains the topodata.Tablet utility functions. const ( - // Default name for databases is the prefix plus keyspace - vtDbPrefix = "vt_" + // VtDbPrefix + keyspace is the default name for databases. + VtDbPrefix = "vt_" ) // cache the conversion from tablet type enum to lower case string. @@ -258,7 +258,7 @@ func TabletDbName(tablet *topodatapb.Tablet) string { if tablet.Keyspace == "" { return "" } - return vtDbPrefix + tablet.Keyspace + return VtDbPrefix + tablet.Keyspace } // TabletIsAssigned returns if this tablet is assigned to a keyspace and shard. diff --git a/go/vt/vtctl/vtctl.go b/go/vt/vtctl/vtctl.go index 94cb8744a61..128799fb2b1 100644 --- a/go/vt/vtctl/vtctl.go +++ b/go/vt/vtctl/vtctl.go @@ -120,11 +120,14 @@ import ( "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/topotools" + "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/wrangler" replicationdatapb "vitess.io/vitess/go/vt/proto/replicationdata" topodatapb "vitess.io/vitess/go/vt/proto/topodata" vschemapb "vitess.io/vitess/go/vt/proto/vschema" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/proto/vttime" ) var ( @@ -282,8 +285,8 @@ var commands = []commandGroup{ { "Keyspaces", []command{ {"CreateKeyspace", commandCreateKeyspace, - "[-sharding_column_name=name] [-sharding_column_type=type] [-served_from=tablettype1:ks1,tablettype2:ks2,...] [-force] ", - "Creates the specified keyspace."}, + "[-sharding_column_name=name] [-sharding_column_type=type] [-served_from=tablettype1:ks1,tablettype2:ks2,...] [-force] [-keyspace_type=type] [-base_keyspace=base_keyspace] [-snapshot_time=time] ", + "Creates the specified keyspace. keyspace_type can be NORMAL or SNAPSHOT. For a SNAPSHOT keyspace you must specify the name of a base_keyspace, and a snapshot_time in UTC, in RFC3339 time format, e.g. 2006-01-02T15:04:05+00:00"}, {"DeleteKeyspace", commandDeleteKeyspace, "[-recursive] ", "Deletes the specified keyspace. In recursive mode, it also recursively deletes all shards in the keyspace. Otherwise, there must be no shards left in the keyspace."}, @@ -1156,7 +1159,7 @@ func commandExecuteHook(ctx context.Context, wr *wrangler.Wrangler, subFlags *fl } func commandCreateShard(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { - force := subFlags.Bool("force", false, "Proceeds with the command even if the keyspace already exists") + force := subFlags.Bool("force", false, "Proceeds with the command even if the shard already exists") parent := subFlags.Bool("parent", false, "Creates the parent keyspace if it doesn't already exist") if err := subFlags.Parse(args); err != nil { return err @@ -1552,6 +1555,9 @@ func commandCreateKeyspace(ctx context.Context, wr *wrangler.Wrangler, subFlags var servedFrom flagutil.StringMapValue subFlags.Var(&servedFrom, "served_from", "Specifies a comma-separated list of dbtype:keyspace pairs used to serve traffic") + keyspaceType := subFlags.String("keyspace_type", "", "Specifies the type of the keyspace") + baseKeyspace := subFlags.String("base_keyspace", "", "Specifies the base keyspace for a snapshot keyspace") + timestampStr := subFlags.String("snapshot_time", "", "Specifies the snapshot time for this keyspace") if err := subFlags.Parse(args); err != nil { return err } @@ -1564,9 +1570,40 @@ func commandCreateKeyspace(ctx context.Context, wr *wrangler.Wrangler, subFlags if err != nil { return err } + ktype := topodatapb.KeyspaceType_NORMAL + if *keyspaceType != "" { + kt, err := topoproto.ParseKeyspaceType(*keyspaceType) + if err != nil { + wr.Logger().Infof("error parsing keyspace type %v, defaulting to NORMAL", *keyspaceType) + } else { + ktype = kt + } + } + + var snapshotTime *vttime.Time + if ktype == topodatapb.KeyspaceType_SNAPSHOT { + if *baseKeyspace == "" { + return vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "base_keyspace must be specified while creating a snapshot keyspace") + } + if _, err := wr.TopoServer().GetKeyspace(ctx, *baseKeyspace); err != nil { + return vterrors.Wrapf(err, "Cannot find base_keyspace: %v", *baseKeyspace) + } + // process snapshot_time + if *timestampStr == "" { + return vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "snapshot_time must be specified when creating a snapshot keyspace") + } + timeTime, err := time.Parse(time.RFC3339, *timestampStr) + if err != nil { + return err + } + snapshotTime = logutil.TimeToProto(timeTime) + } ki := &topodatapb.Keyspace{ ShardingColumnName: *shardingColumnName, ShardingColumnType: kit, + KeyspaceType: ktype, + BaseKeyspace: *baseKeyspace, + SnapshotTime: snapshotTime, } if len(servedFrom) > 0 { for name, value := range servedFrom { @@ -1590,7 +1627,36 @@ func commandCreateKeyspace(ctx context.Context, wr *wrangler.Wrangler, subFlags err = wr.TopoServer().EnsureVSchema(ctx, keyspace) } - return err + if err != nil { + return err + } + + if ktype == topodatapb.KeyspaceType_SNAPSHOT { + // copy vschema from base keyspace + vs, err := wr.TopoServer().GetVSchema(ctx, *baseKeyspace) + if err != nil { + wr.Logger().Infof("error from GetVSchema for base_keyspace: %v, %v", *baseKeyspace, err) + if topo.IsErrType(err, topo.NoNode) { + vs = &vschemapb.Keyspace{ + Sharded: false, + Tables: make(map[string]*vschemapb.Table), + Vindexes: make(map[string]*vschemapb.Vindex), + RequireExplicitRouting: true, + } + } else { + return err + } + } else { + // SNAPSHOT keyspaces are excluded from global routing. + vs.RequireExplicitRouting = true + } + if err := wr.TopoServer().SaveVSchema(ctx, keyspace, vs); err != nil { + wr.Logger().Infof("error from SaveVSchema %v:%v", vs, err) + return err + } + return wr.TopoServer().RebuildSrvVSchema(ctx, []string{} /* cells */) + } + return nil } func commandDeleteKeyspace(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { diff --git a/go/vt/vtctld/api_test.go b/go/vt/vtctld/api_test.go index 57d4e77a0e3..e387faf3f08 100644 --- a/go/vt/vtctld/api_test.go +++ b/go/vt/vtctld/api_test.go @@ -31,6 +31,7 @@ import ( "vitess.io/vitess/go/vt/wrangler" topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vschemapb "vitess.io/vitess/go/vt/proto/vschema" ) func compactJSON(in []byte) string { @@ -50,9 +51,29 @@ func TestAPI(t *testing.T) { // Populate topo. Remove ServedTypes from shards to avoid ordering issues. ts.CreateKeyspace(ctx, "ks1", &topodatapb.Keyspace{ShardingColumnName: "shardcol"}) ts.CreateShard(ctx, "ks1", "-80") - ts.CreateShard(ctx, "ks1", "80-") + // SaveVSchema to test that creating a snapshot keyspace copies VSchema + vs := &vschemapb.Keyspace{ + Sharded: true, + Vindexes: map[string]*vschemapb.Vindex{ + "name1": { + Type: "hash", + }, + }, + Tables: map[string]*vschemapb.Table{ + "table1": { + ColumnVindexes: []*vschemapb.ColumnVindex{ + { + Column: "column1", + Name: "name1", + }, + }, + }, + }, + } + ts.SaveVSchema(ctx, "ks1", vs) + tablet1 := topodatapb.Tablet{ Alias: &topodatapb.TabletAlias{Cell: "cell1", Uid: 100}, Keyspace: "ks1", @@ -109,15 +130,24 @@ func TestAPI(t *testing.T) { table := []struct { method, path, body, want string }{ + // Create snapshot keyspace using API + {"POST", "vtctl/", `["CreateKeyspace", "-keyspace_type=SNAPSHOT", "-base_keyspace=ks1", "-snapshot_time=2006-01-02T15:04:05+00:00", "ks3"]`, `{ + "Error": "", + "Output": "" + }`}, + // Cells {"GET", "cells", "", `["cell1","cell2"]`}, // Keyspaces - {"GET", "keyspaces", "", `["ks1"]`}, + {"GET", "keyspaces", "", `["ks1", "ks3"]`}, {"GET", "keyspaces/ks1", "", `{ "sharding_column_name": "shardcol", "sharding_column_type": 0, - "served_froms": [] + "served_froms": [], + "keyspace_type":0, + "base_keyspace":"", + "snapshot_time":null }`}, {"GET", "keyspaces/nonexistent", "", "404 page not found"}, {"POST", "keyspaces/ks1?action=TestKeyspaceAction", "", `{ @@ -254,7 +284,15 @@ func TestAPI(t *testing.T) { // vtctl RunCommand {"POST", "vtctl/", `["GetKeyspace","ks1"]`, `{ "Error": "", - "Output": "{\n \"sharding_column_name\": \"shardcol\",\n \"sharding_column_type\": 0,\n \"served_froms\": [\n ]\n}\n\n" + "Output": "{\n \"sharding_column_name\": \"shardcol\",\n \"sharding_column_type\": 0,\n \"served_froms\": [\n ],\n \"keyspace_type\": 0,\n \"base_keyspace\": \"\",\n \"snapshot_time\": null\n}\n\n" + }`}, + {"POST", "vtctl/", `["GetKeyspace","ks3"]`, `{ + "Error": "", + "Output": "{\n \"sharding_column_name\": \"\",\n \"sharding_column_type\": 0,\n \"served_froms\": [\n ],\n \"keyspace_type\": 1,\n \"base_keyspace\": \"ks1\",\n \"snapshot_time\": {\n \"seconds\": \"1136214245\",\n \"nanoseconds\": 0\n }\n}\n\n" + }`}, + {"POST", "vtctl/", `["GetVSchema","ks3"]`, `{ + "Error": "", + "Output": "{\n \"sharded\": true,\n \"vindexes\": {\n \"name1\": {\n \"type\": \"hash\"\n }\n },\n \"tables\": {\n \"table1\": {\n \"columnVindexes\": [\n {\n \"column\": \"column1\",\n \"name\": \"name1\"\n }\n ]\n }\n },\n \"requireExplicitRouting\": true\n}\n\n" }`}, {"POST", "vtctl/", `["GetKeyspace","does_not_exist"]`, `{ "Error": "node doesn't exist: keyspaces/does_not_exist/Keyspace", diff --git a/go/vt/vtgate/gateway/gateway.go b/go/vt/vtgate/gateway/gateway.go index 7b458332fac..7350ed82676 100644 --- a/go/vt/vtgate/gateway/gateway.go +++ b/go/vt/vtgate/gateway/gateway.go @@ -106,7 +106,7 @@ func GetCreator() Creator { // Note it has the same name as the Gateway's interface method, as it // just calls it. func WaitForTablets(gw Gateway, tabletTypesToWait []topodatapb.TabletType) error { - log.Infof("Gateway waiting for serving tablets...") + log.Infof("Gateway waiting for serving tablets of types %v ...", tabletTypesToWait) ctx, cancel := context.WithTimeout(context.Background(), *initialTabletTimeout) defer cancel() @@ -119,7 +119,7 @@ func WaitForTablets(gw Gateway, tabletTypesToWait []topodatapb.TabletType) error // In this scenario, we were able to reach the // topology service, but some tablets may not be // ready. We just warn and keep going. - log.Warningf("Timeout waiting for all keyspaces / shards to have healthy tablets, may be in degraded mode") + log.Warningf("Timeout waiting for all keyspaces / shards to have healthy tablets of types %v, may be in degraded mode", tabletTypesToWait) err = nil default: // Nothing to do here, the caller will log.Fatalf. diff --git a/go/vt/vtgate/vindexes/vschema.go b/go/vt/vtgate/vindexes/vschema.go index 3cf5c9a2bb4..8fe3abdefb7 100644 --- a/go/vt/vtgate/vindexes/vschema.go +++ b/go/vt/vtgate/vindexes/vschema.go @@ -224,10 +224,14 @@ func buildTables(ks *vschemapb.Keyspace, vschema *VSchema, ksvschema *KeyspaceSc if err != nil { return err } - if _, ok := vschema.uniqueVindexes[vname]; ok { - vschema.uniqueVindexes[vname] = nil - } else { - vschema.uniqueVindexes[vname] = vindex + + // If the keyspace requires explicit routing, don't include it in global routing + if !ks.RequireExplicitRouting { + if _, ok := vschema.uniqueVindexes[vname]; ok { + vschema.uniqueVindexes[vname] = nil + } else { + vschema.uniqueVindexes[vname] = vindex + } } ksvschema.Vindexes[vname] = vindex } @@ -326,10 +330,13 @@ func buildTables(ks *vschemapb.Keyspace, vschema *VSchema, ksvschema *KeyspaceSc t.Ordered = colVindexSorted(t.ColumnVindexes) // Add the table to the map entries. - if _, ok := vschema.uniqueTables[tname]; ok { - vschema.uniqueTables[tname] = nil - } else { - vschema.uniqueTables[tname] = t + // If the keyspace requires explicit routing, don't include it in global routing + if !ks.RequireExplicitRouting { + if _, ok := vschema.uniqueTables[tname]; ok { + vschema.uniqueTables[tname] = nil + } else { + vschema.uniqueTables[tname] = t + } } ksvschema.Tables[tname] = t } diff --git a/go/vt/vttablet/tabletmanager/init_tablet.go b/go/vt/vttablet/tabletmanager/init_tablet.go index 47e5c00917f..a0d3d6413ea 100644 --- a/go/vt/vttablet/tabletmanager/init_tablet.go +++ b/go/vt/vttablet/tabletmanager/init_tablet.go @@ -151,6 +151,19 @@ func (agent *ActionAgent) InitTablet(port, gRPCPort int32) error { log.Infof("Using detected machine hostname: %v To change this, fix your machine network configuration or override it with -tablet_hostname.", hostname) } + // if we are recovering from a snapshot we set initDbNameOverride + // but only if it not already set + if *initDbNameOverride == "" { + keyspaceInfo, err := agent.TopoServer.GetKeyspace(ctx, *initKeyspace) + if err != nil { + return vterrors.Wrapf(err, "Error getting keyspace: %v", *initKeyspace) + } + baseKeyspace := keyspaceInfo.Keyspace.BaseKeyspace + if baseKeyspace != "" { + *initDbNameOverride = topoproto.VtDbPrefix + baseKeyspace + } + } + // create and populate tablet record tablet := &topodatapb.Tablet{ Alias: agent.TabletAlias, @@ -195,7 +208,7 @@ func (agent *ActionAgent) InitTablet(port, gRPCPort int32) error { // instance of a startup timeout). Upon running this code // again, we want to fix ShardReplication. if updateErr := topo.UpdateTabletReplicationData(ctx, agent.TopoServer, tablet); updateErr != nil { - return updateErr + return vterrors.Wrap(updateErr, "UpdateTabletReplicationData failed") } // Then overwrite everything, ignoring version mismatch. diff --git a/go/vt/vttablet/tabletmanager/orchestrator.go b/go/vt/vttablet/tabletmanager/orchestrator.go index 65e0b4141a5..a527d583a74 100644 --- a/go/vt/vttablet/tabletmanager/orchestrator.go +++ b/go/vt/vttablet/tabletmanager/orchestrator.go @@ -186,6 +186,9 @@ func (orc *orcClient) apiGet(pathParts ...string) ([]byte, error) { // Note that url.String() will URL-escape the path we gave it above. req, err := http.NewRequest("GET", url.String(), nil) + if err != nil { + return nil, err + } if *orcUser != "" { req.SetBasicAuth(*orcUser, *orcPassword) } diff --git a/go/vt/vttablet/tabletmanager/restore.go b/go/vt/vttablet/tabletmanager/restore.go index b197c8784c9..37b5150ea3a 100644 --- a/go/vt/vttablet/tabletmanager/restore.go +++ b/go/vt/vttablet/tabletmanager/restore.go @@ -34,6 +34,7 @@ import ( "vitess.io/vitess/go/vt/topo/topoproto" topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" ) // This file handles the initial backup restore upon startup. @@ -83,7 +84,21 @@ func (agent *ActionAgent) restoreDataLocked(ctx context.Context, logger logutil. // Record local metadata values based on the original type. localMetadata := agent.getLocalMetadataValues(originalType) tablet := agent.Tablet() - dir := fmt.Sprintf("%v/%v", tablet.Keyspace, tablet.Shard) + + keyspace := tablet.Keyspace + keyspaceInfo, err := agent.TopoServer.GetKeyspace(ctx, keyspace) + if err != nil { + return err + } + // For a SNAPSHOT keyspace, we have to look for backups of BaseKeyspace + // so we will pass the BaseKeyspace in RestoreParams instead of tablet.Keyspace + if keyspaceInfo.KeyspaceType == topodatapb.KeyspaceType_SNAPSHOT { + if keyspaceInfo.BaseKeyspace == "" { + return vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, fmt.Sprintf("snapshot keyspace %v has no base_keyspace set", tablet.Keyspace)) + } + keyspace = keyspaceInfo.BaseKeyspace + log.Infof("Using base_keyspace %v to restore keyspace %v", keyspace, tablet.Keyspace) + } params := mysqlctl.RestoreParams{ Cnf: agent.Cnf, @@ -94,14 +109,15 @@ func (agent *ActionAgent) restoreDataLocked(ctx context.Context, logger logutil. LocalMetadata: localMetadata, DeleteBeforeRestore: deleteBeforeRestore, DbName: topoproto.TabletDbName(tablet), - Dir: dir, + Keyspace: keyspace, + Shard: tablet.Shard, + StartTime: logutil.ProtoToTime(keyspaceInfo.SnapshotTime), } // Loop until a backup exists, unless we were told to give up immediately. - var pos mysql.Position - var err error + var backupManifest *mysqlctl.BackupManifest for { - pos, err = mysqlctl.Restore(ctx, params) + backupManifest, err = mysqlctl.Restore(ctx, params) if waitForBackupInterval == 0 { break } @@ -118,14 +134,19 @@ func (agent *ActionAgent) restoreDataLocked(ctx context.Context, logger logutil. } } + var pos mysql.Position + if backupManifest != nil { + pos = backupManifest.Position + } switch err { case nil: // Starting from here we won't be able to recover if we get stopped by a cancelled // context. Thus we use the background context to get through to the finish. - - // Reconnect to master. - if err := agent.startReplication(context.Background(), pos, originalType); err != nil { - return err + if keyspaceInfo.KeyspaceType == topodatapb.KeyspaceType_NORMAL { + // Reconnect to master only for "NORMAL" keyspaces + if err := agent.startReplication(context.Background(), pos, originalType); err != nil { + return err + } } case mysqlctl.ErrNoBackup: // No-op, starting with empty database. diff --git a/go/vt/vttablet/tabletmanager/rpc_backup.go b/go/vt/vttablet/tabletmanager/rpc_backup.go index 5ebf261a4e6..5c10ca512cb 100644 --- a/go/vt/vttablet/tabletmanager/rpc_backup.go +++ b/go/vt/vttablet/tabletmanager/rpc_backup.go @@ -97,8 +97,6 @@ func (agent *ActionAgent) Backup(ctx context.Context, concurrency int, logger lo l := logutil.NewTeeLogger(logutil.NewConsoleLogger(), logger) // now we can run the backup - dir := fmt.Sprintf("%v/%v", tablet.Keyspace, tablet.Shard) - name := fmt.Sprintf("%v.%v", time.Now().UTC().Format("2006-01-02.150405"), topoproto.TabletAliasString(tablet.Alias)) backupParams := mysqlctl.BackupParams{ Cnf: agent.Cnf, Mysqld: agent.MysqlDaemon, @@ -108,9 +106,11 @@ func (agent *ActionAgent) Backup(ctx context.Context, concurrency int, logger lo TopoServer: agent.TopoServer, Keyspace: tablet.Keyspace, Shard: tablet.Shard, + TabletAlias: topoproto.TabletAliasString(tablet.Alias), + BackupTime: time.Now(), } - returnErr := mysqlctl.Backup(ctx, dir, name, backupParams) + returnErr := mysqlctl.Backup(ctx, backupParams) if engine.ShouldDrainForBackup() { bgCtx := context.Background() diff --git a/proto/logutil.proto b/proto/logutil.proto index d720752314e..1e244f3aec7 100644 --- a/proto/logutil.proto +++ b/proto/logutil.proto @@ -21,12 +21,7 @@ option go_package = "vitess.io/vitess/go/vt/proto/logutil"; package logutil; -// Time represents a time stamp in nanoseconds. In go, use logutil library -// to convert times. -message Time { - int64 seconds = 1; - int32 nanoseconds = 2; -} +import "time.proto"; // Level is the level of the log messages. enum Level { @@ -43,7 +38,7 @@ enum Level { // Event is a single logging event message Event { - Time time = 1; + vttime.Time time = 1; Level level = 2; string file = 3; int64 line = 4; diff --git a/proto/time.proto b/proto/time.proto new file mode 100644 index 00000000000..ff1a5151a93 --- /dev/null +++ b/proto/time.proto @@ -0,0 +1,30 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreedto in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package contains a shared time data structure + +syntax = "proto3"; +option go_package = "vitess.io/vitess/go/vt/proto/vttime"; + +package vttime; + +// Time represents a time stamp in nanoseconds. In go, use logutil library +// to convert times. +message Time { + int64 seconds = 1; + int32 nanoseconds = 2; +} + diff --git a/proto/topodata.proto b/proto/topodata.proto index 87758a1f930..6598a90dbd9 100644 --- a/proto/topodata.proto +++ b/proto/topodata.proto @@ -26,6 +26,8 @@ option java_package="io.vitess.proto"; package topodata; +import "time.proto"; + // KeyRange describes a range of sharding keys, when range-based // sharding is used. message KeyRange { @@ -33,6 +35,15 @@ message KeyRange { bytes end = 2; } +// KeyspaceType describes the type of the keyspace +enum KeyspaceType { + // NORMAL is the default value + NORMAL = 0; + + // SNAPSHOT is when we are creating a snapshot keyspace + SNAPSHOT = 1; +} + // KeyspaceIdType describes the type of the sharding key for a // range-based sharded keyspace. enum KeyspaceIdType { @@ -266,6 +277,21 @@ message Keyspace { // ServedFrom will redirect the appropriate traffic to // another keyspace. repeated ServedFrom served_froms = 4; + + // keyspace_type will determine how this keyspace is treated by + // vtgate / vschema. Normal keyspaces are routable by + // any query. Snapshot keyspaces are only accessible + // by explicit addresssing or by calling "use keyspace" first + KeyspaceType keyspace_type = 5; + + // base_keyspace is the base keyspace from which a snapshot + // keyspace is created. empty for normal keyspaces + string base_keyspace = 6; + + // snapshot_time (in UTC) is a property of snapshot + // keyspaces which tells us what point in time + // the snapshot is of + vttime.Time snapshot_time = 7; } // ShardReplication describes the MySQL replication relationships diff --git a/proto/vschema.proto b/proto/vschema.proto index bc94d486113..a1fce0c68b8 100644 --- a/proto/vschema.proto +++ b/proto/vschema.proto @@ -43,6 +43,8 @@ message Keyspace { bool sharded = 1; map vindexes = 2; map tables = 3; + // If require_explicit_routing is true, vindexes and tables are not added to global routing + bool require_explicit_routing = 4; } // Vindex is the vindex info for a Keyspace. diff --git a/py/vtproto/logutil_pb2.py b/py/vtproto/logutil_pb2.py index 7f8d6d237e5..afa2bac4050 100644 --- a/py/vtproto/logutil_pb2.py +++ b/py/vtproto/logutil_pb2.py @@ -8,20 +8,22 @@ from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database -from google.protobuf import descriptor_pb2 # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() +import time_pb2 as time__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name='logutil.proto', package='logutil', syntax='proto3', - serialized_pb=_b('\n\rlogutil.proto\x12\x07logutil\",\n\x04Time\x12\x0f\n\x07seconds\x18\x01 \x01(\x03\x12\x13\n\x0bnanoseconds\x18\x02 \x01(\x05\"n\n\x05\x45vent\x12\x1b\n\x04time\x18\x01 \x01(\x0b\x32\r.logutil.Time\x12\x1d\n\x05level\x18\x02 \x01(\x0e\x32\x0e.logutil.Level\x12\x0c\n\x04\x66ile\x18\x03 \x01(\t\x12\x0c\n\x04line\x18\x04 \x01(\x03\x12\r\n\x05value\x18\x05 \x01(\t*6\n\x05Level\x12\x08\n\x04INFO\x10\x00\x12\x0b\n\x07WARNING\x10\x01\x12\t\n\x05\x45RROR\x10\x02\x12\x0b\n\x07\x43ONSOLE\x10\x03\x42&Z$vitess.io/vitess/go/vt/proto/logutilb\x06proto3') -) + serialized_options=_b('Z$vitess.io/vitess/go/vt/proto/logutil'), + serialized_pb=_b('\n\rlogutil.proto\x12\x07logutil\x1a\ntime.proto\"m\n\x05\x45vent\x12\x1a\n\x04time\x18\x01 \x01(\x0b\x32\x0c.vttime.Time\x12\x1d\n\x05level\x18\x02 \x01(\x0e\x32\x0e.logutil.Level\x12\x0c\n\x04\x66ile\x18\x03 \x01(\t\x12\x0c\n\x04line\x18\x04 \x01(\x03\x12\r\n\x05value\x18\x05 \x01(\t*6\n\x05Level\x12\x08\n\x04INFO\x10\x00\x12\x0b\n\x07WARNING\x10\x01\x12\t\n\x05\x45RROR\x10\x02\x12\x0b\n\x07\x43ONSOLE\x10\x03\x42&Z$vitess.io/vitess/go/vt/proto/logutilb\x06proto3') + , + dependencies=[time__pb2.DESCRIPTOR,]) _LEVEL = _descriptor.EnumDescriptor( name='Level', @@ -31,25 +33,25 @@ values=[ _descriptor.EnumValueDescriptor( name='INFO', index=0, number=0, - options=None, + serialized_options=None, type=None), _descriptor.EnumValueDescriptor( name='WARNING', index=1, number=1, - options=None, + serialized_options=None, type=None), _descriptor.EnumValueDescriptor( name='ERROR', index=2, number=2, - options=None, + serialized_options=None, type=None), _descriptor.EnumValueDescriptor( name='CONSOLE', index=3, number=3, - options=None, + serialized_options=None, type=None), ], containing_type=None, - options=None, - serialized_start=184, - serialized_end=238, + serialized_options=None, + serialized_start=149, + serialized_end=203, ) _sym_db.RegisterEnumDescriptor(_LEVEL) @@ -61,44 +63,6 @@ -_TIME = _descriptor.Descriptor( - name='Time', - full_name='logutil.Time', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='seconds', full_name='logutil.Time.seconds', index=0, - number=1, type=3, cpp_type=2, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='nanoseconds', full_name='logutil.Time.nanoseconds', index=1, - number=2, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=26, - serialized_end=70, -) - - _EVENT = _descriptor.Descriptor( name='Event', full_name='logutil.Event', @@ -112,65 +76,57 @@ has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='level', full_name='logutil.Event.level', index=1, number=2, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='file', full_name='logutil.Event.file', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='line', full_name='logutil.Event.line', index=3, number=4, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='value', full_name='logutil.Event.value', index=4, number=5, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), + serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], - options=None, + serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], - serialized_start=72, - serialized_end=182, + serialized_start=38, + serialized_end=147, ) -_EVENT.fields_by_name['time'].message_type = _TIME +_EVENT.fields_by_name['time'].message_type = time__pb2._TIME _EVENT.fields_by_name['level'].enum_type = _LEVEL -DESCRIPTOR.message_types_by_name['Time'] = _TIME DESCRIPTOR.message_types_by_name['Event'] = _EVENT DESCRIPTOR.enum_types_by_name['Level'] = _LEVEL _sym_db.RegisterFileDescriptor(DESCRIPTOR) -Time = _reflection.GeneratedProtocolMessageType('Time', (_message.Message,), dict( - DESCRIPTOR = _TIME, - __module__ = 'logutil_pb2' - # @@protoc_insertion_point(class_scope:logutil.Time) - )) -_sym_db.RegisterMessage(Time) - Event = _reflection.GeneratedProtocolMessageType('Event', (_message.Message,), dict( DESCRIPTOR = _EVENT, __module__ = 'logutil_pb2' @@ -179,6 +135,5 @@ _sym_db.RegisterMessage(Event) -DESCRIPTOR.has_options = True -DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('Z$vitess.io/vitess/go/vt/proto/logutil')) +DESCRIPTOR._options = None # @@protoc_insertion_point(module_scope) diff --git a/py/vtproto/time_pb2.py b/py/vtproto/time_pb2.py new file mode 100644 index 00000000000..af172b3c5d8 --- /dev/null +++ b/py/vtproto/time_pb2.py @@ -0,0 +1,77 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: time.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='time.proto', + package='vttime', + syntax='proto3', + serialized_options=_b('Z#vitess.io/vitess/go/vt/proto/vttime'), + serialized_pb=_b('\n\ntime.proto\x12\x06vttime\",\n\x04Time\x12\x0f\n\x07seconds\x18\x01 \x01(\x03\x12\x13\n\x0bnanoseconds\x18\x02 \x01(\x05\x42%Z#vitess.io/vitess/go/vt/proto/vttimeb\x06proto3') +) + + + + +_TIME = _descriptor.Descriptor( + name='Time', + full_name='vttime.Time', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='seconds', full_name='vttime.Time.seconds', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='nanoseconds', full_name='vttime.Time.nanoseconds', index=1, + number=2, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=22, + serialized_end=66, +) + +DESCRIPTOR.message_types_by_name['Time'] = _TIME +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +Time = _reflection.GeneratedProtocolMessageType('Time', (_message.Message,), dict( + DESCRIPTOR = _TIME, + __module__ = 'time_pb2' + # @@protoc_insertion_point(class_scope:vttime.Time) + )) +_sym_db.RegisterMessage(Time) + + +DESCRIPTOR._options = None +# @@protoc_insertion_point(module_scope) diff --git a/py/vtproto/time_pb2_grpc.py b/py/vtproto/time_pb2_grpc.py new file mode 100644 index 00000000000..a89435267cb --- /dev/null +++ b/py/vtproto/time_pb2_grpc.py @@ -0,0 +1,3 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +import grpc + diff --git a/py/vtproto/topodata_pb2.py b/py/vtproto/topodata_pb2.py index 66ac9fea9b0..71becda9e16 100644 --- a/py/vtproto/topodata_pb2.py +++ b/py/vtproto/topodata_pb2.py @@ -13,6 +13,7 @@ _sym_db = _symbol_database.Default() +import time_pb2 as time__pb2 DESCRIPTOR = _descriptor.FileDescriptor( @@ -20,9 +21,33 @@ package='topodata', syntax='proto3', serialized_options=_b('\n\017io.vitess.protoZ%vitess.io/vitess/go/vt/proto/topodata'), - serialized_pb=_b('\n\x0etopodata.proto\x12\x08topodata\"&\n\x08KeyRange\x12\r\n\x05start\x18\x01 \x01(\x0c\x12\x0b\n\x03\x65nd\x18\x02 \x01(\x0c\"(\n\x0bTabletAlias\x12\x0c\n\x04\x63\x65ll\x18\x01 \x01(\t\x12\x0b\n\x03uid\x18\x02 \x01(\r\"\xb6\x03\n\x06Tablet\x12$\n\x05\x61lias\x18\x01 \x01(\x0b\x32\x15.topodata.TabletAlias\x12\x10\n\x08hostname\x18\x02 \x01(\t\x12/\n\x08port_map\x18\x04 \x03(\x0b\x32\x1d.topodata.Tablet.PortMapEntry\x12\x10\n\x08keyspace\x18\x05 \x01(\t\x12\r\n\x05shard\x18\x06 \x01(\t\x12%\n\tkey_range\x18\x07 \x01(\x0b\x32\x12.topodata.KeyRange\x12\"\n\x04type\x18\x08 \x01(\x0e\x32\x14.topodata.TabletType\x12\x18\n\x10\x64\x62_name_override\x18\t \x01(\t\x12(\n\x04tags\x18\n \x03(\x0b\x32\x1a.topodata.Tablet.TagsEntry\x12\x16\n\x0emysql_hostname\x18\x0c \x01(\t\x12\x12\n\nmysql_port\x18\r \x01(\x05\x1a.\n\x0cPortMapEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1a+\n\tTagsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01J\x04\x08\x03\x10\x04J\x04\x08\x0b\x10\x0c\"\xd3\x04\n\x05Shard\x12+\n\x0cmaster_alias\x18\x01 \x01(\x0b\x32\x15.topodata.TabletAlias\x12%\n\tkey_range\x18\x02 \x01(\x0b\x32\x12.topodata.KeyRange\x12\x30\n\x0cserved_types\x18\x03 \x03(\x0b\x32\x1a.topodata.Shard.ServedType\x12\x32\n\rsource_shards\x18\x04 \x03(\x0b\x32\x1b.topodata.Shard.SourceShard\x12\x36\n\x0ftablet_controls\x18\x06 \x03(\x0b\x32\x1d.topodata.Shard.TabletControl\x12\x19\n\x11is_master_serving\x18\x07 \x01(\x08\x1a\x46\n\nServedType\x12)\n\x0btablet_type\x18\x01 \x01(\x0e\x32\x14.topodata.TabletType\x12\r\n\x05\x63\x65lls\x18\x02 \x03(\t\x1ar\n\x0bSourceShard\x12\x0b\n\x03uid\x18\x01 \x01(\r\x12\x10\n\x08keyspace\x18\x02 \x01(\t\x12\r\n\x05shard\x18\x03 \x01(\t\x12%\n\tkey_range\x18\x04 \x01(\x0b\x32\x12.topodata.KeyRange\x12\x0e\n\x06tables\x18\x05 \x03(\t\x1a{\n\rTabletControl\x12)\n\x0btablet_type\x18\x01 \x01(\x0e\x32\x14.topodata.TabletType\x12\r\n\x05\x63\x65lls\x18\x02 \x03(\t\x12\x1a\n\x12\x62lacklisted_tables\x18\x04 \x03(\t\x12\x0e\n\x06\x66rozen\x18\x05 \x01(\x08J\x04\x08\x03\x10\x04J\x04\x08\x05\x10\x06\"\xf5\x01\n\x08Keyspace\x12\x1c\n\x14sharding_column_name\x18\x01 \x01(\t\x12\x36\n\x14sharding_column_type\x18\x02 \x01(\x0e\x32\x18.topodata.KeyspaceIdType\x12\x33\n\x0cserved_froms\x18\x04 \x03(\x0b\x32\x1d.topodata.Keyspace.ServedFrom\x1aX\n\nServedFrom\x12)\n\x0btablet_type\x18\x01 \x01(\x0e\x32\x14.topodata.TabletType\x12\r\n\x05\x63\x65lls\x18\x02 \x03(\t\x12\x10\n\x08keyspace\x18\x03 \x01(\tJ\x04\x08\x03\x10\x04\"w\n\x10ShardReplication\x12.\n\x05nodes\x18\x01 \x03(\x0b\x32\x1f.topodata.ShardReplication.Node\x1a\x33\n\x04Node\x12+\n\x0ctablet_alias\x18\x01 \x01(\x0b\x32\x15.topodata.TabletAlias\"E\n\x0eShardReference\x12\x0c\n\x04name\x18\x01 \x01(\t\x12%\n\tkey_range\x18\x02 \x01(\x0b\x32\x12.topodata.KeyRange\"i\n\x12ShardTabletControl\x12\x0c\n\x04name\x18\x01 \x01(\t\x12%\n\tkey_range\x18\x02 \x01(\x0b\x32\x12.topodata.KeyRange\x12\x1e\n\x16query_service_disabled\x18\x03 \x01(\x08\"\xda\x03\n\x0bSrvKeyspace\x12;\n\npartitions\x18\x01 \x03(\x0b\x32\'.topodata.SrvKeyspace.KeyspacePartition\x12\x1c\n\x14sharding_column_name\x18\x02 \x01(\t\x12\x36\n\x14sharding_column_type\x18\x03 \x01(\x0e\x32\x18.topodata.KeyspaceIdType\x12\x35\n\x0bserved_from\x18\x04 \x03(\x0b\x32 .topodata.SrvKeyspace.ServedFrom\x1a\xaf\x01\n\x11KeyspacePartition\x12)\n\x0bserved_type\x18\x01 \x01(\x0e\x32\x14.topodata.TabletType\x12\x32\n\x10shard_references\x18\x02 \x03(\x0b\x32\x18.topodata.ShardReference\x12;\n\x15shard_tablet_controls\x18\x03 \x03(\x0b\x32\x1c.topodata.ShardTabletControl\x1aI\n\nServedFrom\x12)\n\x0btablet_type\x18\x01 \x01(\x0e\x32\x14.topodata.TabletType\x12\x10\n\x08keyspace\x18\x02 \x01(\tJ\x04\x08\x05\x10\x06\"6\n\x08\x43\x65llInfo\x12\x16\n\x0eserver_address\x18\x01 \x01(\t\x12\x0c\n\x04root\x18\x02 \x01(\tJ\x04\x08\x03\x10\x04\"\x1b\n\nCellsAlias\x12\r\n\x05\x63\x65lls\x18\x02 \x03(\t*2\n\x0eKeyspaceIdType\x12\t\n\x05UNSET\x10\x00\x12\n\n\x06UINT64\x10\x01\x12\t\n\x05\x42YTES\x10\x02*\x90\x01\n\nTabletType\x12\x0b\n\x07UNKNOWN\x10\x00\x12\n\n\x06MASTER\x10\x01\x12\x0b\n\x07REPLICA\x10\x02\x12\n\n\x06RDONLY\x10\x03\x12\t\n\x05\x42\x41TCH\x10\x03\x12\t\n\x05SPARE\x10\x04\x12\x10\n\x0c\x45XPERIMENTAL\x10\x05\x12\n\n\x06\x42\x41\x43KUP\x10\x06\x12\x0b\n\x07RESTORE\x10\x07\x12\x0b\n\x07\x44RAINED\x10\x08\x1a\x02\x10\x01\x42\x38\n\x0fio.vitess.protoZ%vitess.io/vitess/go/vt/proto/topodatab\x06proto3') + serialized_pb=_b('\n\x0etopodata.proto\x12\x08topodata\x1a\ntime.proto\"&\n\x08KeyRange\x12\r\n\x05start\x18\x01 \x01(\x0c\x12\x0b\n\x03\x65nd\x18\x02 \x01(\x0c\"(\n\x0bTabletAlias\x12\x0c\n\x04\x63\x65ll\x18\x01 \x01(\t\x12\x0b\n\x03uid\x18\x02 \x01(\r\"\xb6\x03\n\x06Tablet\x12$\n\x05\x61lias\x18\x01 \x01(\x0b\x32\x15.topodata.TabletAlias\x12\x10\n\x08hostname\x18\x02 \x01(\t\x12/\n\x08port_map\x18\x04 \x03(\x0b\x32\x1d.topodata.Tablet.PortMapEntry\x12\x10\n\x08keyspace\x18\x05 \x01(\t\x12\r\n\x05shard\x18\x06 \x01(\t\x12%\n\tkey_range\x18\x07 \x01(\x0b\x32\x12.topodata.KeyRange\x12\"\n\x04type\x18\x08 \x01(\x0e\x32\x14.topodata.TabletType\x12\x18\n\x10\x64\x62_name_override\x18\t \x01(\t\x12(\n\x04tags\x18\n \x03(\x0b\x32\x1a.topodata.Tablet.TagsEntry\x12\x16\n\x0emysql_hostname\x18\x0c \x01(\t\x12\x12\n\nmysql_port\x18\r \x01(\x05\x1a.\n\x0cPortMapEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1a+\n\tTagsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01J\x04\x08\x03\x10\x04J\x04\x08\x0b\x10\x0c\"\xd3\x04\n\x05Shard\x12+\n\x0cmaster_alias\x18\x01 \x01(\x0b\x32\x15.topodata.TabletAlias\x12%\n\tkey_range\x18\x02 \x01(\x0b\x32\x12.topodata.KeyRange\x12\x30\n\x0cserved_types\x18\x03 \x03(\x0b\x32\x1a.topodata.Shard.ServedType\x12\x32\n\rsource_shards\x18\x04 \x03(\x0b\x32\x1b.topodata.Shard.SourceShard\x12\x36\n\x0ftablet_controls\x18\x06 \x03(\x0b\x32\x1d.topodata.Shard.TabletControl\x12\x19\n\x11is_master_serving\x18\x07 \x01(\x08\x1a\x46\n\nServedType\x12)\n\x0btablet_type\x18\x01 \x01(\x0e\x32\x14.topodata.TabletType\x12\r\n\x05\x63\x65lls\x18\x02 \x03(\t\x1ar\n\x0bSourceShard\x12\x0b\n\x03uid\x18\x01 \x01(\r\x12\x10\n\x08keyspace\x18\x02 \x01(\t\x12\r\n\x05shard\x18\x03 \x01(\t\x12%\n\tkey_range\x18\x04 \x01(\x0b\x32\x12.topodata.KeyRange\x12\x0e\n\x06tables\x18\x05 \x03(\t\x1a{\n\rTabletControl\x12)\n\x0btablet_type\x18\x01 \x01(\x0e\x32\x14.topodata.TabletType\x12\r\n\x05\x63\x65lls\x18\x02 \x03(\t\x12\x1a\n\x12\x62lacklisted_tables\x18\x04 \x03(\t\x12\x0e\n\x06\x66rozen\x18\x05 \x01(\x08J\x04\x08\x03\x10\x04J\x04\x08\x05\x10\x06\"\xe0\x02\n\x08Keyspace\x12\x1c\n\x14sharding_column_name\x18\x01 \x01(\t\x12\x36\n\x14sharding_column_type\x18\x02 \x01(\x0e\x32\x18.topodata.KeyspaceIdType\x12\x33\n\x0cserved_froms\x18\x04 \x03(\x0b\x32\x1d.topodata.Keyspace.ServedFrom\x12-\n\rkeyspace_type\x18\x05 \x01(\x0e\x32\x16.topodata.KeyspaceType\x12\x15\n\rbase_keyspace\x18\x06 \x01(\t\x12#\n\rsnapshot_time\x18\x07 \x01(\x0b\x32\x0c.vttime.Time\x1aX\n\nServedFrom\x12)\n\x0btablet_type\x18\x01 \x01(\x0e\x32\x14.topodata.TabletType\x12\r\n\x05\x63\x65lls\x18\x02 \x03(\t\x12\x10\n\x08keyspace\x18\x03 \x01(\tJ\x04\x08\x03\x10\x04\"w\n\x10ShardReplication\x12.\n\x05nodes\x18\x01 \x03(\x0b\x32\x1f.topodata.ShardReplication.Node\x1a\x33\n\x04Node\x12+\n\x0ctablet_alias\x18\x01 \x01(\x0b\x32\x15.topodata.TabletAlias\"E\n\x0eShardReference\x12\x0c\n\x04name\x18\x01 \x01(\t\x12%\n\tkey_range\x18\x02 \x01(\x0b\x32\x12.topodata.KeyRange\"i\n\x12ShardTabletControl\x12\x0c\n\x04name\x18\x01 \x01(\t\x12%\n\tkey_range\x18\x02 \x01(\x0b\x32\x12.topodata.KeyRange\x12\x1e\n\x16query_service_disabled\x18\x03 \x01(\x08\"\xda\x03\n\x0bSrvKeyspace\x12;\n\npartitions\x18\x01 \x03(\x0b\x32\'.topodata.SrvKeyspace.KeyspacePartition\x12\x1c\n\x14sharding_column_name\x18\x02 \x01(\t\x12\x36\n\x14sharding_column_type\x18\x03 \x01(\x0e\x32\x18.topodata.KeyspaceIdType\x12\x35\n\x0bserved_from\x18\x04 \x03(\x0b\x32 .topodata.SrvKeyspace.ServedFrom\x1a\xaf\x01\n\x11KeyspacePartition\x12)\n\x0bserved_type\x18\x01 \x01(\x0e\x32\x14.topodata.TabletType\x12\x32\n\x10shard_references\x18\x02 \x03(\x0b\x32\x18.topodata.ShardReference\x12;\n\x15shard_tablet_controls\x18\x03 \x03(\x0b\x32\x1c.topodata.ShardTabletControl\x1aI\n\nServedFrom\x12)\n\x0btablet_type\x18\x01 \x01(\x0e\x32\x14.topodata.TabletType\x12\x10\n\x08keyspace\x18\x02 \x01(\tJ\x04\x08\x05\x10\x06\"6\n\x08\x43\x65llInfo\x12\x16\n\x0eserver_address\x18\x01 \x01(\t\x12\x0c\n\x04root\x18\x02 \x01(\tJ\x04\x08\x03\x10\x04\"\x1b\n\nCellsAlias\x12\r\n\x05\x63\x65lls\x18\x02 \x03(\t*(\n\x0cKeyspaceType\x12\n\n\x06NORMAL\x10\x00\x12\x0c\n\x08SNAPSHOT\x10\x01*2\n\x0eKeyspaceIdType\x12\t\n\x05UNSET\x10\x00\x12\n\n\x06UINT64\x10\x01\x12\t\n\x05\x42YTES\x10\x02*\x90\x01\n\nTabletType\x12\x0b\n\x07UNKNOWN\x10\x00\x12\n\n\x06MASTER\x10\x01\x12\x0b\n\x07REPLICA\x10\x02\x12\n\n\x06RDONLY\x10\x03\x12\t\n\x05\x42\x41TCH\x10\x03\x12\t\n\x05SPARE\x10\x04\x12\x10\n\x0c\x45XPERIMENTAL\x10\x05\x12\n\n\x06\x42\x41\x43KUP\x10\x06\x12\x0b\n\x07RESTORE\x10\x07\x12\x0b\n\x07\x44RAINED\x10\x08\x1a\x02\x10\x01\x42\x38\n\x0fio.vitess.protoZ%vitess.io/vitess/go/vt/proto/topodatab\x06proto3') + , + dependencies=[time__pb2.DESCRIPTOR,]) + +_KEYSPACETYPE = _descriptor.EnumDescriptor( + name='KeyspaceType', + full_name='topodata.KeyspaceType', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='NORMAL', index=0, number=0, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='SNAPSHOT', index=1, number=1, + serialized_options=None, + type=None), + ], + containing_type=None, + serialized_options=None, + serialized_start=2377, + serialized_end=2417, ) +_sym_db.RegisterEnumDescriptor(_KEYSPACETYPE) +KeyspaceType = enum_type_wrapper.EnumTypeWrapper(_KEYSPACETYPE) _KEYSPACEIDTYPE = _descriptor.EnumDescriptor( name='KeyspaceIdType', full_name='topodata.KeyspaceIdType', @@ -44,8 +69,8 @@ ], containing_type=None, serialized_options=None, - serialized_start=2258, - serialized_end=2308, + serialized_start=2419, + serialized_end=2469, ) _sym_db.RegisterEnumDescriptor(_KEYSPACEIDTYPE) @@ -99,12 +124,14 @@ ], containing_type=None, serialized_options=_b('\020\001'), - serialized_start=2311, - serialized_end=2455, + serialized_start=2472, + serialized_end=2616, ) _sym_db.RegisterEnumDescriptor(_TABLETTYPE) TabletType = enum_type_wrapper.EnumTypeWrapper(_TABLETTYPE) +NORMAL = 0 +SNAPSHOT = 1 UNSET = 0 UINT64 = 1 BYTES = 2 @@ -154,8 +181,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=28, - serialized_end=66, + serialized_start=40, + serialized_end=78, ) @@ -192,8 +219,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=68, - serialized_end=108, + serialized_start=80, + serialized_end=120, ) @@ -230,8 +257,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=446, - serialized_end=492, + serialized_start=458, + serialized_end=504, ) _TABLET_TAGSENTRY = _descriptor.Descriptor( @@ -267,8 +294,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=494, - serialized_end=537, + serialized_start=506, + serialized_end=549, ) _TABLET = _descriptor.Descriptor( @@ -367,8 +394,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=111, - serialized_end=549, + serialized_start=123, + serialized_end=561, ) @@ -405,8 +432,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=830, - serialized_end=900, + serialized_start=842, + serialized_end=912, ) _SHARD_SOURCESHARD = _descriptor.Descriptor( @@ -463,8 +490,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=902, - serialized_end=1016, + serialized_start=914, + serialized_end=1028, ) _SHARD_TABLETCONTROL = _descriptor.Descriptor( @@ -514,8 +541,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=1018, - serialized_end=1141, + serialized_start=1030, + serialized_end=1153, ) _SHARD = _descriptor.Descriptor( @@ -579,8 +606,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=552, - serialized_end=1147, + serialized_start=564, + serialized_end=1159, ) @@ -624,8 +651,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=1301, - serialized_end=1389, + serialized_start=1420, + serialized_end=1508, ) _KEYSPACE = _descriptor.Descriptor( @@ -656,6 +683,27 @@ message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='keyspace_type', full_name='topodata.Keyspace.keyspace_type', index=3, + number=5, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='base_keyspace', full_name='topodata.Keyspace.base_keyspace', index=4, + number=6, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='snapshot_time', full_name='topodata.Keyspace.snapshot_time', index=5, + number=7, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], @@ -668,8 +716,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=1150, - serialized_end=1395, + serialized_start=1162, + serialized_end=1514, ) @@ -699,8 +747,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=1465, - serialized_end=1516, + serialized_start=1584, + serialized_end=1635, ) _SHARDREPLICATION = _descriptor.Descriptor( @@ -729,8 +777,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=1397, - serialized_end=1516, + serialized_start=1516, + serialized_end=1635, ) @@ -767,8 +815,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=1518, - serialized_end=1587, + serialized_start=1637, + serialized_end=1706, ) @@ -812,8 +860,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=1589, - serialized_end=1694, + serialized_start=1708, + serialized_end=1813, ) @@ -857,8 +905,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=1915, - serialized_end=2090, + serialized_start=2034, + serialized_end=2209, ) _SRVKEYSPACE_SERVEDFROM = _descriptor.Descriptor( @@ -894,8 +942,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=2092, - serialized_end=2165, + serialized_start=2211, + serialized_end=2284, ) _SRVKEYSPACE = _descriptor.Descriptor( @@ -945,8 +993,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=1697, - serialized_end=2171, + serialized_start=1816, + serialized_end=2290, ) @@ -983,8 +1031,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=2173, - serialized_end=2227, + serialized_start=2292, + serialized_end=2346, ) @@ -1014,8 +1062,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=2229, - serialized_end=2256, + serialized_start=2348, + serialized_end=2375, ) _TABLET_PORTMAPENTRY.containing_type = _TABLET @@ -1040,6 +1088,8 @@ _KEYSPACE_SERVEDFROM.containing_type = _KEYSPACE _KEYSPACE.fields_by_name['sharding_column_type'].enum_type = _KEYSPACEIDTYPE _KEYSPACE.fields_by_name['served_froms'].message_type = _KEYSPACE_SERVEDFROM +_KEYSPACE.fields_by_name['keyspace_type'].enum_type = _KEYSPACETYPE +_KEYSPACE.fields_by_name['snapshot_time'].message_type = time__pb2._TIME _SHARDREPLICATION_NODE.fields_by_name['tablet_alias'].message_type = _TABLETALIAS _SHARDREPLICATION_NODE.containing_type = _SHARDREPLICATION _SHARDREPLICATION.fields_by_name['nodes'].message_type = _SHARDREPLICATION_NODE @@ -1065,6 +1115,7 @@ DESCRIPTOR.message_types_by_name['SrvKeyspace'] = _SRVKEYSPACE DESCRIPTOR.message_types_by_name['CellInfo'] = _CELLINFO DESCRIPTOR.message_types_by_name['CellsAlias'] = _CELLSALIAS +DESCRIPTOR.enum_types_by_name['KeyspaceType'] = _KEYSPACETYPE DESCRIPTOR.enum_types_by_name['KeyspaceIdType'] = _KEYSPACEIDTYPE DESCRIPTOR.enum_types_by_name['TabletType'] = _TABLETTYPE _sym_db.RegisterFileDescriptor(DESCRIPTOR) diff --git a/py/vtproto/vschema_pb2.py b/py/vtproto/vschema_pb2.py index 94f613b8df1..90897a8f264 100644 --- a/py/vtproto/vschema_pb2.py +++ b/py/vtproto/vschema_pb2.py @@ -20,7 +20,7 @@ package='vschema', syntax='proto3', serialized_options=_b('Z$vitess.io/vitess/go/vt/proto/vschema'), - serialized_pb=_b('\n\rvschema.proto\x12\x07vschema\x1a\x0bquery.proto\"3\n\x0cRoutingRules\x12#\n\x05rules\x18\x01 \x03(\x0b\x32\x14.vschema.RoutingRule\"4\n\x0bRoutingRule\x12\x12\n\nfrom_table\x18\x01 \x01(\t\x12\x11\n\tto_tables\x18\x02 \x03(\t\"\xfe\x01\n\x08Keyspace\x12\x0f\n\x07sharded\x18\x01 \x01(\x08\x12\x31\n\x08vindexes\x18\x02 \x03(\x0b\x32\x1f.vschema.Keyspace.VindexesEntry\x12-\n\x06tables\x18\x03 \x03(\x0b\x32\x1d.vschema.Keyspace.TablesEntry\x1a@\n\rVindexesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x1e\n\x05value\x18\x02 \x01(\x0b\x32\x0f.vschema.Vindex:\x02\x38\x01\x1a=\n\x0bTablesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x1d\n\x05value\x18\x02 \x01(\x0b\x32\x0e.vschema.Table:\x02\x38\x01\"\x81\x01\n\x06Vindex\x12\x0c\n\x04type\x18\x01 \x01(\t\x12+\n\x06params\x18\x02 \x03(\x0b\x32\x1b.vschema.Vindex.ParamsEntry\x12\r\n\x05owner\x18\x03 \x01(\t\x1a-\n\x0bParamsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xca\x01\n\x05Table\x12\x0c\n\x04type\x18\x01 \x01(\t\x12.\n\x0f\x63olumn_vindexes\x18\x02 \x03(\x0b\x32\x15.vschema.ColumnVindex\x12.\n\x0e\x61uto_increment\x18\x03 \x01(\x0b\x32\x16.vschema.AutoIncrement\x12 \n\x07\x63olumns\x18\x04 \x03(\x0b\x32\x0f.vschema.Column\x12\x0e\n\x06pinned\x18\x05 \x01(\t\x12!\n\x19\x63olumn_list_authoritative\x18\x06 \x01(\x08\"=\n\x0c\x43olumnVindex\x12\x0e\n\x06\x63olumn\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x0f\n\x07\x63olumns\x18\x03 \x03(\t\"1\n\rAutoIncrement\x12\x0e\n\x06\x63olumn\x18\x01 \x01(\t\x12\x10\n\x08sequence\x18\x02 \x01(\t\"1\n\x06\x43olumn\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x19\n\x04type\x18\x02 \x01(\x0e\x32\x0b.query.Type\"\xb6\x01\n\nSrvVSchema\x12\x35\n\tkeyspaces\x18\x01 \x03(\x0b\x32\".vschema.SrvVSchema.KeyspacesEntry\x12,\n\rrouting_rules\x18\x02 \x01(\x0b\x32\x15.vschema.RoutingRules\x1a\x43\n\x0eKeyspacesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12 \n\x05value\x18\x02 \x01(\x0b\x32\x11.vschema.Keyspace:\x02\x38\x01\x42&Z$vitess.io/vitess/go/vt/proto/vschemab\x06proto3') + serialized_pb=_b('\n\rvschema.proto\x12\x07vschema\x1a\x0bquery.proto\"3\n\x0cRoutingRules\x12#\n\x05rules\x18\x01 \x03(\x0b\x32\x14.vschema.RoutingRule\"4\n\x0bRoutingRule\x12\x12\n\nfrom_table\x18\x01 \x01(\t\x12\x11\n\tto_tables\x18\x02 \x03(\t\"\xa0\x02\n\x08Keyspace\x12\x0f\n\x07sharded\x18\x01 \x01(\x08\x12\x31\n\x08vindexes\x18\x02 \x03(\x0b\x32\x1f.vschema.Keyspace.VindexesEntry\x12-\n\x06tables\x18\x03 \x03(\x0b\x32\x1d.vschema.Keyspace.TablesEntry\x12 \n\x18require_explicit_routing\x18\x04 \x01(\x08\x1a@\n\rVindexesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x1e\n\x05value\x18\x02 \x01(\x0b\x32\x0f.vschema.Vindex:\x02\x38\x01\x1a=\n\x0bTablesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x1d\n\x05value\x18\x02 \x01(\x0b\x32\x0e.vschema.Table:\x02\x38\x01\"\x81\x01\n\x06Vindex\x12\x0c\n\x04type\x18\x01 \x01(\t\x12+\n\x06params\x18\x02 \x03(\x0b\x32\x1b.vschema.Vindex.ParamsEntry\x12\r\n\x05owner\x18\x03 \x01(\t\x1a-\n\x0bParamsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xca\x01\n\x05Table\x12\x0c\n\x04type\x18\x01 \x01(\t\x12.\n\x0f\x63olumn_vindexes\x18\x02 \x03(\x0b\x32\x15.vschema.ColumnVindex\x12.\n\x0e\x61uto_increment\x18\x03 \x01(\x0b\x32\x16.vschema.AutoIncrement\x12 \n\x07\x63olumns\x18\x04 \x03(\x0b\x32\x0f.vschema.Column\x12\x0e\n\x06pinned\x18\x05 \x01(\t\x12!\n\x19\x63olumn_list_authoritative\x18\x06 \x01(\x08\"=\n\x0c\x43olumnVindex\x12\x0e\n\x06\x63olumn\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x0f\n\x07\x63olumns\x18\x03 \x03(\t\"1\n\rAutoIncrement\x12\x0e\n\x06\x63olumn\x18\x01 \x01(\t\x12\x10\n\x08sequence\x18\x02 \x01(\t\"1\n\x06\x43olumn\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x19\n\x04type\x18\x02 \x01(\x0e\x32\x0b.query.Type\"\xb6\x01\n\nSrvVSchema\x12\x35\n\tkeyspaces\x18\x01 \x03(\x0b\x32\".vschema.SrvVSchema.KeyspacesEntry\x12,\n\rrouting_rules\x18\x02 \x01(\x0b\x32\x15.vschema.RoutingRules\x1a\x43\n\x0eKeyspacesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12 \n\x05value\x18\x02 \x01(\x0b\x32\x11.vschema.Keyspace:\x02\x38\x01\x42&Z$vitess.io/vitess/go/vt/proto/vschemab\x06proto3') , dependencies=[query__pb2.DESCRIPTOR,]) @@ -129,8 +129,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=274, - serialized_end=338, + serialized_start=308, + serialized_end=372, ) _KEYSPACE_TABLESENTRY = _descriptor.Descriptor( @@ -166,8 +166,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=340, - serialized_end=401, + serialized_start=374, + serialized_end=435, ) _KEYSPACE = _descriptor.Descriptor( @@ -198,6 +198,13 @@ message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='require_explicit_routing', full_name='vschema.Keyspace.require_explicit_routing', index=3, + number=4, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], @@ -211,7 +218,7 @@ oneofs=[ ], serialized_start=147, - serialized_end=401, + serialized_end=435, ) @@ -248,8 +255,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=488, - serialized_end=533, + serialized_start=522, + serialized_end=567, ) _VINDEX = _descriptor.Descriptor( @@ -292,8 +299,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=404, - serialized_end=533, + serialized_start=438, + serialized_end=567, ) @@ -358,8 +365,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=536, - serialized_end=738, + serialized_start=570, + serialized_end=772, ) @@ -403,8 +410,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=740, - serialized_end=801, + serialized_start=774, + serialized_end=835, ) @@ -441,8 +448,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=803, - serialized_end=852, + serialized_start=837, + serialized_end=886, ) @@ -479,8 +486,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=854, - serialized_end=903, + serialized_start=888, + serialized_end=937, ) @@ -517,8 +524,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=1021, - serialized_end=1088, + serialized_start=1055, + serialized_end=1122, ) _SRVVSCHEMA = _descriptor.Descriptor( @@ -554,8 +561,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=906, - serialized_end=1088, + serialized_start=940, + serialized_end=1122, ) _ROUTINGRULES.fields_by_name['rules'].message_type = _ROUTINGRULE diff --git a/test/config.json b/test/config.json index 0951f33fa22..fb48408558c 100644 --- a/test/config.json +++ b/test/config.json @@ -285,6 +285,15 @@ "RetryMax": 0, "Tags": [] }, + "recovery": { + "File": "recovery.py", + "Args": [], + "Command": [], + "Manual": false, + "Shard": 4, + "RetryMax": 0, + "Tags": [] + }, "reparent": { "File": "reparent.py", "Args": [], @@ -345,6 +354,15 @@ "RetryMax": 0, "Tags": [] }, + "sharded_recovery": { + "File": "sharded_recovery.py", + "Args": [], + "Command": [], + "Manual": false, + "Shard": 4, + "RetryMax": 0, + "Tags": [] + }, "tabletmanager": { "File": "tabletmanager.py", "Args": [], @@ -541,6 +559,15 @@ "RetryMax": 0, "Tags": [] }, + "xb_recovery": { + "File": "xb_recovery.py", + "Args": [], + "Command": [], + "Manual": false, + "Shard": 4, + "RetryMax": 0, + "Tags": [] + }, "xtrabackup_xbstream": { "File": "xtrabackup_xbstream.py", "Args": [], diff --git a/test/recovery.py b/test/recovery.py new file mode 100755 index 00000000000..a319e5c7de8 --- /dev/null +++ b/test/recovery.py @@ -0,0 +1,534 @@ +#!/usr/bin/env python + +# Copyright 2019 The Vitess Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from datetime import datetime +import json +import logging +import os +import unittest + +import MySQLdb + +import environment +import tablet +import utils + +from mysql_flavor import mysql_flavor +from vtdb import vtgate_client + +use_xtrabackup = False +xtrabackup_args = [] +stream_mode = 'xbstream' + +# tablets +tablet_master = tablet.Tablet() +tablet_replica1 = tablet.Tablet() +tablet_replica2 = tablet.Tablet() +tablet_replica3 = tablet.Tablet() + +all_tablets = [tablet_master, tablet_replica1, tablet_replica2, tablet_replica3] + +def setUpModule(): + global xtrabackup_args + xtrabackup_args = ['-backup_engine_implementation', + 'xtrabackup', + '-xtrabackup_stream_mode', + stream_mode, + '-xtrabackup_user=vt_dba'] + try: + environment.topo_server().setup() + setup_procs = [t.init_mysql() for t in all_tablets] + utils.wait_procs(setup_procs) + except: + tearDownModule() + raise + +def tearDownModule(): + utils.required_teardown() + if utils.options.skip_teardown: + return + teardown_procs = [t.teardown_mysql() for t in all_tablets] + utils.wait_procs(teardown_procs, raise_on_error=False) + environment.topo_server().teardown() + utils.kill_sub_processes() + utils.remove_tmp_files() + for t in all_tablets: + t.remove_tree() + +def get_connection(timeout=15.0): + protocol, endpoint = utils.vtgate.rpc_endpoint(python=True) + try: + return vtgate_client.connect(protocol, endpoint, timeout) + except Exception: + logging.exception('Connection to vtgate (timeout=%s) failed.', timeout) + raise + +class TestRecovery(unittest.TestCase): + + def setUp(self): + xtra_args = ['-enable_replication_reporter'] + if use_xtrabackup: + xtra_args.extend(xtrabackup_args) + tablet_master.init_tablet('replica', 'test_keyspace', '0', start=True, + supports_backups=True, + extra_args=xtra_args) + tablet_replica1.init_tablet('replica', 'test_keyspace', '0', start=True, + supports_backups=True, + extra_args=xtra_args) + utils.run_vtctl(['InitShardMaster', '-force', 'test_keyspace/0', + tablet_master.tablet_alias]) + + def tearDown(self): + for t in all_tablets: + t.kill_vttablet() + + tablet.Tablet.check_vttablet_count() + environment.topo_server().wipe() + for t in all_tablets: + t.reset_replication() + t.set_semi_sync_enabled(master=False, slave=False) + t.clean_dbs() + + for backup in self._list_backups(): + self._remove_backup(backup) + + _create_vt_insert_test = '''create table vt_insert_test ( + id bigint auto_increment, + msg varchar(64), + primary key (id) + ) Engine=InnoDB''' + + _vschema_json = '''{ + "tables": { + "vt_insert_test": {} + } +}''' + + + def _insert_data(self, t, index): + """Add a single row with value 'index' to the given tablet.""" + t.mquery( + 'vt_test_keyspace', + "insert into vt_insert_test (msg) values ('test %s')" % + index, write=True) + + def _check_data(self, t, count, msg): + """Check that the specified tablet has the expected number of rows.""" + timeout = 10 + while True: + try: + result = t.mquery( + 'vt_test_keyspace', 'select count(*) from vt_insert_test') + if result[0][0] == count: + break + except MySQLdb.DatabaseError: + # ignore exceptions, we'll just timeout (the tablet creation + # can take some time to replicate, and we get a 'table vt_insert_test + # does not exist exception in some rare cases) + logging.exception('exception waiting for data to replicate') + timeout = utils.wait_step(msg, timeout) + + def _restore(self, t, keyspace): + """Erase mysql/tablet dir, then start tablet with restore enabled.""" + self._reset_tablet_dir(t) + + # create a recovery keyspace + utils.run_vtctl(['CreateKeyspace', + '-keyspace_type=SNAPSHOT', + '-base_keyspace=test_keyspace', + '-snapshot_time', + datetime.utcnow().isoformat("T")+"Z", + keyspace]) + # set disable_active_reparents to true and enable_replication_reporter to false + # otherwise replication_reporter will try to restart replication + xtra_args = ['-disable_active_reparents', + '-enable_replication_reporter=false'] + xtra_args.extend(tablet.get_backup_storage_flags()) + if use_xtrabackup: + xtra_args.extend(xtrabackup_args) + + t.start_vttablet(wait_for_state='SERVING', + init_tablet_type='replica', + init_keyspace=keyspace, + init_shard='0', + supports_backups=True, + extra_args=xtra_args) + + def _reset_tablet_dir(self, t): + """Stop mysql, delete everything including tablet dir, restart mysql.""" + utils.wait_procs([t.teardown_mysql()]) + # Specify ignore_options because we want to delete the tree even + # if the test's -k / --keep-logs was specified on the command line. + t.remove_tree(ignore_options=True) + proc = t.init_mysql() + utils.wait_procs([proc]) + + def _list_backups(self): + """Get a list of backup names for the test shard.""" + backups, _ = utils.run_vtctl(tablet.get_backup_storage_flags() + + ['ListBackups', 'test_keyspace/0'], + mode=utils.VTCTL_VTCTL, trap_output=True) + return backups.splitlines() + + def _remove_backup(self, backup): + """Remove a named backup from the test shard.""" + utils.run_vtctl( + tablet.get_backup_storage_flags() + + ['RemoveBackup', 'test_keyspace/0', backup], + auto_log=True, mode=utils.VTCTL_VTCTL) + + def test_basic_recovery(self): + """Test recovery from backup flow. + + test_recovery will: + - create a shard with master and replica1 only + - run InitShardMaster + - insert some data + - take a backup + - insert more data on the master + - create a recovery keyspace + - bring up tablet_replica2 in the new keyspace + - check that new tablet does not have data created after backup + - check that vtgate queries work correctly + + """ + + # insert data on master, wait for replica to get it + utils.run_vtctl(['ApplySchema', + '-sql', self._create_vt_insert_test, + 'test_keyspace'], + auto_log=True) + self._insert_data(tablet_master, 1) + self._check_data(tablet_replica1, 1, 'replica1 tablet getting data') + + master_pos = mysql_flavor().master_position(tablet_master) + # backup the replica + utils.run_vtctl(['Backup', tablet_replica1.tablet_alias], auto_log=True) + + # check that the backup shows up in the listing + backups = self._list_backups() + logging.debug('list of backups: %s', backups) + self.assertEqual(len(backups), 1) + self.assertTrue(backups[0].endswith(tablet_replica1.tablet_alias)) + # backup name is of format date.time.tablet_alias + strs = backups[0].split('.') + expectedTime = datetime.strptime(strs[0] + '.' + strs[1], '%Y-%m-%d.%H%M%S') + + # insert more data on the master + self._insert_data(tablet_master, 2) + + utils.run_vtctl(['ApplyVSchema', + '-vschema', self._vschema_json, + 'test_keyspace'], + auto_log=True) + + vs = utils.run_vtctl_json(['GetVSchema', 'test_keyspace']) + logging.debug('test_keyspace vschema: %s', str(vs)) + ks = utils.run_vtctl_json(['GetSrvKeyspace', 'test_nj', 'test_keyspace']) + logging.debug('Serving keyspace before: %s', str(ks)) + vs = utils.run_vtctl_json(['GetSrvVSchema', 'test_nj']) + logging.debug('Serving vschema before recovery: %s', str(vs)) + + # now bring up the recovery keyspace with 1 tablet, letting it restore from backup. + self._restore(tablet_replica2, 'recovery_keyspace') + + vs = utils.run_vtctl_json(['GetSrvVSchema', 'test_nj']) + logging.debug('Serving vschema after recovery: %s', str(vs)) + ks = utils.run_vtctl_json(['GetSrvKeyspace', 'test_nj', 'test_keyspace']) + logging.debug('Serving keyspace after: %s', str(ks)) + vs = utils.run_vtctl_json(['GetVSchema', 'recovery_keyspace']) + logging.debug('recovery_keyspace vschema: %s', str(vs)) + + # check the new replica has only 1 row + self._check_data(tablet_replica2, 1, 'replica2 tablet should not have new data') + + # check that the restored replica has the right local_metadata + result = tablet_replica2.mquery('_vt', 'select * from local_metadata') + metadata = {} + for row in result: + metadata[row[0]] = row[1] + self.assertEqual(metadata['Alias'], 'test_nj-0000062346') + self.assertEqual(metadata['ClusterAlias'], 'recovery_keyspace.0') + self.assertEqual(metadata['DataCenter'], 'test_nj') + self.assertEqual(metadata['RestorePosition'], master_pos) + logging.debug('RestoredBackupTime: %s', str(metadata['RestoredBackupTime'])) + gotTime = datetime.strptime(metadata['RestoredBackupTime'], '%Y-%m-%dT%H:%M:%SZ') + self.assertEqual(gotTime, expectedTime) + + # update original 1st row in master + tablet_master.mquery( + 'vt_test_keyspace', + "update vt_insert_test set msg='new msg' where id=1", write=True) + + # verify that master has new value + result = tablet_master.mquery('vt_test_keyspace', 'select msg from vt_insert_test where id=1') + self.assertEqual(result[0][0], 'new msg') + + # verify that restored replica has old value + result = tablet_replica2.mquery('vt_test_keyspace', 'select msg from vt_insert_test where id=1') + self.assertEqual(result[0][0], 'test 1') + + # start vtgate + vtgate = utils.VtGate() + vtgate.start(tablets=[ + tablet_master, tablet_replica1, tablet_replica2 + ], tablet_types_to_wait='REPLICA') + utils.vtgate.wait_for_endpoints('test_keyspace.0.master', 1) + utils.vtgate.wait_for_endpoints('test_keyspace.0.replica', 1) + utils.vtgate.wait_for_endpoints('recovery_keyspace.0.replica', 1) + + # check that vtgate doesn't route queries to new tablet + vtgate_conn = get_connection() + cursor = vtgate_conn.cursor( + tablet_type='replica', keyspace=None, writable=True) + + cursor.execute('select count(*) from vt_insert_test', {}) + result = cursor.fetchall() + if not result: + self.fail('Result cannot be null') + else: + self.assertEqual(result[0][0], 2) + + cursor.execute('select msg from vt_insert_test where id=1', {}) + result = cursor.fetchall() + if not result: + self.fail('Result cannot be null') + else: + self.assertEqual(result[0][0], 'new msg') + + # check that new keyspace is accessible by using ks.table + cursor.execute('select count(*) from recovery_keyspace.vt_insert_test', {}) + result = cursor.fetchall() + if not result: + self.fail('Result cannot be null') + else: + self.assertEqual(result[0][0], 1) + + cursor.execute('select msg from recovery_keyspace.vt_insert_test where id=1', {}) + result = cursor.fetchall() + if not result: + self.fail('Result cannot be null') + else: + self.assertEqual(result[0][0], 'test 1') + + # check that new keyspace is accessible with 'use ks' + cursor.execute('use recovery_keyspace@replica', {}) + cursor.execute('select count(*) from vt_insert_test', {}) + result = cursor.fetchall() + if not result: + self.fail('Result cannot be null') + else: + self.assertEqual(result[0][0], 1) + + cursor.execute('select msg from recovery_keyspace.vt_insert_test where id=1', {}) + result = cursor.fetchall() + if not result: + self.fail('Result cannot be null') + else: + self.assertEqual(result[0][0], 'test 1') + + # TODO check that new tablet is accessible with 'use ks:shard' + # this currently does not work through the python client, though it works from mysql client + #cursor.execute('use recovery_keyspace:0@replica', {}) + #cursor.execute('select count(*) from vt_insert_test', {}) + #result = cursor.fetchall() + #if not result: + #self.fail('Result cannot be null') + #else: + #self.assertEqual(result[0][0], 1) + + vtgate_conn.close() + tablet_replica2.kill_vttablet() + vtgate.kill() + + def test_multi_recovery(self): + """Test recovery from backup flow. + + test_multi_recovery will: + - create a shard with master and replica1 only + - run InitShardMaster + - insert some data + - take a backup + - insert more data on the master + - take another backup + - create a recovery keyspace after first backup + - bring up tablet_replica2 in the new keyspace + - check that new tablet does not have data created after backup1 + - create second recovery keyspace after second backup + - bring up tablet_replica3 in second keyspace + - check that new tablet has data created after backup1 but not data created after backup2 + - check that vtgate queries work correctly + + """ + + # insert data on master, wait for replica to get it + utils.run_vtctl(['ApplySchema', + '-sql', self._create_vt_insert_test, + 'test_keyspace'], + auto_log=True) + self._insert_data(tablet_master, 1) + self._check_data(tablet_replica1, 1, 'replica1 tablet getting data') + + # backup the replica + utils.run_vtctl(['Backup', tablet_replica1.tablet_alias], auto_log=True) + + # check that the backup shows up in the listing + backups = self._list_backups() + logging.debug('list of backups: %s', backups) + self.assertEqual(len(backups), 1) + self.assertTrue(backups[0].endswith(tablet_replica1.tablet_alias)) + + # insert more data on the master + self._insert_data(tablet_master, 2) + # wait for it to replicate + self._check_data(tablet_replica1, 2, 'replica1 tablet getting data') + + utils.run_vtctl(['ApplyVSchema', + '-vschema', self._vschema_json, + 'test_keyspace'], + auto_log=True) + + vs = utils.run_vtctl_json(['GetVSchema', 'test_keyspace']) + logging.debug('test_keyspace vschema: %s', str(vs)) + + # now bring up the other replica, letting it restore from backup. + self._restore(tablet_replica2, 'recovery_ks1') + + # we are not asserting on the contents of vschema here + # because the later part of the test (vtgate) will fail + # if the vschema is not copied correctly from the base_keyspace + vs = utils.run_vtctl_json(['GetVSchema', 'recovery_ks1']) + logging.debug('recovery_ks1 vschema: %s', str(vs)) + + # check the new replica does not have the data + self._check_data(tablet_replica2, 1, 'replica2 tablet should not have new data') + + # update original 1st row in master + tablet_master.mquery( + 'vt_test_keyspace', + "update vt_insert_test set msg='new msg 1' where id=1", write=True) + + # verify that master has new value + result = tablet_master.mquery('vt_test_keyspace', 'select msg from vt_insert_test where id=1') + self.assertEqual(result[0][0], 'new msg 1') + + # verify that restored replica has old value + result = tablet_replica2.mquery('vt_test_keyspace', 'select msg from vt_insert_test where id=1') + self.assertEqual(result[0][0], 'test 1') + + # take another backup on the replica + utils.run_vtctl(['Backup', tablet_replica1.tablet_alias], auto_log=True) + + # insert more data on the master + self._insert_data(tablet_master, 3) + # wait for it to replicate + self._check_data(tablet_replica1, 3, 'replica1 tablet getting data') + + # now bring up the other replica, letting it restore from backup2. + # this also validates that if there are multiple backups, the most recent one is used + self._restore(tablet_replica3, 'recovery_ks2') + + vs = utils.run_vtctl(['GetVSchema', 'recovery_ks2']) + logging.debug('recovery_ks2 vschema: %s', str(vs)) + + # check the new replica does not have the latest data + self._check_data(tablet_replica3, 2, 'replica3 tablet should not have new data') + + # update original 1st row in master again + tablet_master.mquery( + 'vt_test_keyspace', + "update vt_insert_test set msg='new msg 2' where id=1", write=True) + + # verify that master has new value + result = tablet_master.mquery('vt_test_keyspace', 'select msg from vt_insert_test where id=1') + self.assertEqual(result[0][0], 'new msg 2') + + # verify that restored replica has correct value + result = tablet_replica3.mquery('vt_test_keyspace', 'select msg from vt_insert_test where id=1') + self.assertEqual(result[0][0], 'new msg 1') + + # start vtgate + vtgate = utils.VtGate() + vtgate.start(tablets=all_tablets, tablet_types_to_wait='REPLICA') + utils.vtgate.wait_for_endpoints('test_keyspace.0.master', 1) + utils.vtgate.wait_for_endpoints('test_keyspace.0.replica', 1) + utils.vtgate.wait_for_endpoints('recovery_ks1.0.replica', 1) + utils.vtgate.wait_for_endpoints('recovery_ks2.0.replica', 1) + + # check that vtgate doesn't route queries to new tablet + vtgate_conn = get_connection() + cursor = vtgate_conn.cursor( + tablet_type='replica', keyspace=None, writable=True) + + cursor.execute('select count(*) from vt_insert_test', {}) + result = cursor.fetchall() + if not result: + self.fail('Result cannot be null') + else: + self.assertEqual(result[0][0], 3) + + cursor.execute('select msg from vt_insert_test where id=1', {}) + result = cursor.fetchall() + if not result: + self.fail('Result cannot be null') + else: + self.assertEqual(result[0][0], 'new msg 2') + + # check that new keyspace is accessible by using ks.table + cursor.execute('select count(*) from recovery_ks1.vt_insert_test', {}) + result = cursor.fetchall() + if not result: + self.fail('Result cannot be null') + else: + self.assertEqual(result[0][0], 1) + + cursor.execute('select msg from recovery_ks1.vt_insert_test where id=1', {}) + result = cursor.fetchall() + if not result: + self.fail('Result cannot be null') + else: + self.assertEqual(result[0][0], 'test 1') + + # check that new keyspace is accessible by using ks.table + cursor.execute('select count(*) from recovery_ks2.vt_insert_test', {}) + result = cursor.fetchall() + if not result: + self.fail('Result cannot be null') + else: + self.assertEqual(result[0][0], 2) + + cursor.execute('select msg from recovery_ks2.vt_insert_test where id=1', {}) + result = cursor.fetchall() + if not result: + self.fail('Result cannot be null') + else: + self.assertEqual(result[0][0], 'new msg 1') + + # TODO check that new tablet is accessible with 'use ks:shard' + # this currently does not work through the python client, though it works from mysql client + #cursor.execute('use recovery_ks1:0@replica', {}) + #cursor.execute('select count(*) from vt_insert_test', {}) + #result = cursor.fetchall() + #if not result: + #self.fail('Result cannot be null') + #else: + #self.assertEqual(result[0][0], 1) + + vtgate_conn.close() + vtgate.kill() + +if __name__ == '__main__': + utils.main() diff --git a/test/sharded_recovery.py b/test/sharded_recovery.py new file mode 100755 index 00000000000..26c93cb3693 --- /dev/null +++ b/test/sharded_recovery.py @@ -0,0 +1,648 @@ +#!/usr/bin/env python + +# Copyright 2019 The Vitess Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import datetime +import json +import logging +import os +import unittest + +import MySQLdb + +import environment +import tablet +import utils + +from vtdb import vtgate_client + +# initial shard, covers everything +tablet_master = tablet.Tablet() +tablet_replica1 = tablet.Tablet() +tablet_rdonly = tablet.Tablet() +# to use for recovery keyspace +tablet_replica2 = tablet.Tablet() +tablet_replica3 = tablet.Tablet() + +# split shards +# range '' - 80 +shard_0_master = tablet.Tablet() +shard_0_replica = tablet.Tablet() +shard_0_rdonly = tablet.Tablet() +# range 80 - '' +shard_1_master = tablet.Tablet() +shard_1_replica = tablet.Tablet() +shard_1_rdonly = tablet.Tablet() + +all_tablets = [tablet_master, tablet_replica1, tablet_replica2, tablet_replica3, tablet_rdonly, + shard_0_master, shard_0_replica, shard_0_rdonly, shard_1_master, shard_1_replica, shard_1_rdonly] + +def setUpModule(): + try: + environment.topo_server().setup() + setup_procs = [t.init_mysql() for t in all_tablets] + utils.wait_procs(setup_procs) + except: + tearDownModule() + raise + +def tearDownModule(): + utils.required_teardown() + if utils.options.skip_teardown: + return + teardown_procs = [t.teardown_mysql() for t in all_tablets] + utils.wait_procs(teardown_procs, raise_on_error=False) + environment.topo_server().teardown() + utils.kill_sub_processes() + utils.remove_tmp_files() + for t in all_tablets: + t.remove_tree() + +def get_connection(timeout=15.0): + protocol, endpoint = utils.vtgate.rpc_endpoint(python=True) + try: + return vtgate_client.connect(protocol, endpoint, timeout) + except Exception: + logging.exception('Connection to vtgate (timeout=%s) failed.', timeout) + raise + +class TestShardedRecovery(unittest.TestCase): + + def setUp(self): + xtra_args = ['-enable_replication_reporter'] + tablet_master.init_tablet('replica', 'test_keyspace', '0', start=True, + supports_backups=True, + extra_args=xtra_args) + tablet_replica1.init_tablet('replica', 'test_keyspace', '0', start=True, + supports_backups=True, + extra_args=xtra_args) + tablet_rdonly.init_tablet('rdonly', 'test_keyspace', '0', start=True, + supports_backups=True, + extra_args=xtra_args) + utils.run_vtctl(['InitShardMaster', '-force', 'test_keyspace/0', + tablet_master.tablet_alias]) + + def tearDown(self): + for t in all_tablets: + t.kill_vttablet() + + tablet.Tablet.check_vttablet_count() + environment.topo_server().wipe() + for t in all_tablets: + t.reset_replication() + t.set_semi_sync_enabled(master=False, slave=False) + t.clean_dbs() + + for shard in ['0', '-80', '80-']: + for backup in self._list_backups(shard): + self._remove_backup(backup, shard) + + _create_vt_insert_test = '''create table vt_insert_test ( + id bigint, + msg varchar(64), + primary key (id) + ) Engine=InnoDB''' + + _vschema_json = '''{ + "sharded": true, + "vindexes": { + "hash": { + "type": "hash" + } + }, + "tables": { + "vt_insert_test": { + "column_vindexes": [ + { + "column": "id", + "name": "hash" + } + ] + } + } +}''' + + def _insert_data(self, t, index): + """Add a single row with value 'index' to the given tablet.""" + t.mquery( + 'vt_test_keyspace', + "insert into vt_insert_test (id, msg) values (%d, 'test %s')" % + (index, index), write=True) + + def _check_data(self, t, count, msg): + """Check that the specified tablet has the expected number of rows.""" + timeout = 10 + while True: + try: + result = t.mquery( + 'vt_test_keyspace', 'select count(*) from vt_insert_test') + if result[0][0] == count: + break + except MySQLdb.DatabaseError: + # ignore exceptions, we'll just timeout (the tablet creation + # can take some time to replicate, and we get a 'table vt_insert_test + # does not exist exception in some rare cases) + logging.exception('exception waiting for data to replicate') + timeout = utils.wait_step(msg, timeout) + + def _restore(self, t, keyspace, shard): + """Erase mysql/tablet dir, then start tablet with restore enabled.""" + self._reset_tablet_dir(t) + + # create a recovery keyspace + utils.run_vtctl(['CreateKeyspace', + '-keyspace_type=SNAPSHOT', + '-base_keyspace=test_keyspace', + '-snapshot_time', + datetime.datetime.utcnow().isoformat("T")+"Z", + keyspace]) + + # set disable_active_reparents to true and enable_replication_reporter to false + # otherwise replication_reporter will try to restart replication + xtra_args = ['-disable_active_reparents', + '-enable_replication_reporter=false'] + xtra_args.extend(tablet.get_backup_storage_flags()) + t.start_vttablet(wait_for_state='SERVING', + init_tablet_type='replica', + init_keyspace=keyspace, + init_shard=shard, + supports_backups=True, + extra_args=xtra_args) + + def _reset_tablet_dir(self, t): + """Stop mysql, delete everything including tablet dir, restart mysql.""" + utils.wait_procs([t.teardown_mysql()]) + # Specify ignore_options because we want to delete the tree even + # if the test's -k / --keep-logs was specified on the command line. + t.remove_tree(ignore_options=True) + proc = t.init_mysql() + utils.wait_procs([proc]) + + def _list_backups(self, shard): + """Get a list of backup names for the test shard.""" + backups, _ = utils.run_vtctl(tablet.get_backup_storage_flags() + + ['ListBackups', 'test_keyspace/%s' % shard], + mode=utils.VTCTL_VTCTL, trap_output=True) + return backups.splitlines() + + def _remove_backup(self, backup, shard): + """Remove a named backup from the test shard.""" + utils.run_vtctl( + tablet.get_backup_storage_flags() + + ['RemoveBackup', 'test_keyspace/%s' % shard, backup], + auto_log=True, mode=utils.VTCTL_VTCTL) + + def test_unsharded_recovery_after_sharding(self): + """Test recovery from backup flow. + + test_recovery will: + - create a shard with master and replica1 only + - run InitShardMaster + - insert some data + - take a backup + - insert more data on the master + - perform a resharding + - create a recovery keyspace + - bring up tablet_replica2 in the new keyspace + - check that new tablet does not have data created after backup + - check that vtgate queries work correctly + + """ + + # insert data on master, wait for replica to get it + utils.run_vtctl(['ApplySchema', + '-sql', self._create_vt_insert_test, + 'test_keyspace'], + auto_log=True) + self._insert_data(tablet_master, 1) + self._check_data(tablet_replica1, 1, 'replica1 tablet getting data') + # insert more data on the master + self._insert_data(tablet_master, 2) + + # backup the replica + utils.run_vtctl(['Backup', tablet_replica1.tablet_alias], auto_log=True) + + # check that the backup shows up in the listing + backups = self._list_backups('0') + logging.debug('list of backups: %s', backups) + self.assertEqual(len(backups), 1) + self.assertTrue(backups[0].endswith(tablet_replica1.tablet_alias)) + + # insert more data on the master + self._insert_data(tablet_master, 3) + + utils.run_vtctl(['ApplyVSchema', + '-vschema', self._vschema_json, + 'test_keyspace'], + auto_log=True) + + # create the split shards + shard_0_master.init_tablet( + 'replica', + keyspace='test_keyspace', + shard='-80', + tablet_index=0) + shard_0_replica.init_tablet( + 'replica', + keyspace='test_keyspace', + shard='-80', + tablet_index=1) + shard_0_rdonly.init_tablet( + 'rdonly', + keyspace='test_keyspace', + shard='-80', + tablet_index=2) + shard_1_master.init_tablet( + 'replica', + keyspace='test_keyspace', + shard='80-', + tablet_index=0) + shard_1_replica.init_tablet( + 'replica', + keyspace='test_keyspace', + shard='80-', + tablet_index=1) + shard_1_rdonly.init_tablet( + 'rdonly', + keyspace='test_keyspace', + shard='80-', + tablet_index=2) + + for t in [shard_0_master, shard_0_replica, shard_0_rdonly, + shard_1_master, shard_1_replica, shard_1_rdonly]: + t.start_vttablet(wait_for_state=None, + binlog_use_v3_resharding_mode=True) + + for t in [shard_0_master, shard_0_replica, shard_0_rdonly, + shard_1_master, shard_1_replica, shard_1_rdonly]: + t.wait_for_vttablet_state('NOT_SERVING') + + utils.run_vtctl(['InitShardMaster', '-force', 'test_keyspace/-80', + shard_0_master.tablet_alias], auto_log=True) + utils.run_vtctl(['InitShardMaster', '-force', 'test_keyspace/80-', + shard_1_master.tablet_alias], auto_log=True) + + for t in [shard_0_replica, shard_1_replica]: + utils.wait_for_tablet_type(t.tablet_alias, 'replica') + + sharded_tablets = [shard_0_master, shard_0_replica, shard_0_rdonly, + shard_1_master, shard_1_replica, shard_1_rdonly] + for t in sharded_tablets: + t.wait_for_vttablet_state('SERVING') + + # we need to create the schema, and the worker will do data copying + for keyspace_shard in ('test_keyspace/-80', 'test_keyspace/80-'): + utils.run_vtctl(['CopySchemaShard', + 'test_keyspace/0', + keyspace_shard], + auto_log=True) + + utils.run_vtctl( + ['SplitClone', 'test_keyspace', '0', '-80,80-'], auto_log=True) + + utils.run_vtctl( + ['MigrateServedTypes', 'test_keyspace/0', 'rdonly'], auto_log=True) + utils.run_vtctl( + ['MigrateServedTypes', 'test_keyspace/0', 'replica'], auto_log=True) + # then serve master from the split shards + utils.run_vtctl(['MigrateServedTypes', 'test_keyspace/0', 'master'], + auto_log=True) + + # remove the original tablets in the original shard + tablet.kill_tablets([tablet_master, tablet_replica1, tablet_rdonly]) + for t in [tablet_replica1, tablet_rdonly]: + utils.run_vtctl(['DeleteTablet', t.tablet_alias], auto_log=True) + utils.run_vtctl(['DeleteTablet', '-allow_master', + tablet_master.tablet_alias], auto_log=True) + + # rebuild the serving graph, all mentions of the old shards should be gone + utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'], auto_log=True) + + # delete the original shard + utils.run_vtctl(['DeleteShard', 'test_keyspace/0'], auto_log=True) + + # now bring up the recovery keyspace and a tablet, letting it restore from backup. + self._restore(tablet_replica2, 'recovery_keyspace', '0') + + # check the new replica does not have the data + self._check_data(tablet_replica2, 2, 'replica2 tablet should not have new data') + + # start vtgate + vtgate = utils.VtGate() + vtgate.start(tablets=[ + shard_0_master, shard_0_replica, shard_1_master, shard_1_replica, tablet_replica2 + ], tablet_types_to_wait='REPLICA') + utils.vtgate.wait_for_endpoints('test_keyspace.-80.master', 1) + utils.vtgate.wait_for_endpoints('test_keyspace.80-.replica', 1) + utils.vtgate.wait_for_endpoints('test_keyspace.-80.master', 1) + utils.vtgate.wait_for_endpoints('test_keyspace.80-.replica', 1) + utils.vtgate.wait_for_endpoints('recovery_keyspace.0.replica', 1) + + # check that vtgate doesn't route queries to new tablet + vtgate_conn = get_connection() + cursor = vtgate_conn.cursor( + tablet_type='replica', keyspace=None, writable=True) + + cursor.execute('select count(*) from vt_insert_test', {}) + result = cursor.fetchall() + if not result: + self.fail('Result cannot be null') + else: + self.assertEqual(result[0][0], 3) + + # check that new tablet is accessible by using ks.table + cursor.execute('select count(*) from recovery_keyspace.vt_insert_test', {}) + result = cursor.fetchall() + if not result: + self.fail('Result cannot be null') + else: + self.assertEqual(result[0][0], 2) + + # check that new tablet is accessible with 'use ks' + cursor.execute('use recovery_keyspace@replica', {}) + cursor.execute('select count(*) from vt_insert_test', {}) + result = cursor.fetchall() + if not result: + self.fail('Result cannot be null') + else: + self.assertEqual(result[0][0], 2) + + # TODO check that new tablet is accessible with 'use ks:shard' + # this currently does not work through the python client, though it works from mysql client + #cursor.execute('use recovery_keyspace:0@replica', {}) + #cursor.execute('select count(*) from vt_insert_test', {}) + #result = cursor.fetchall() + #if not result: + #self.fail('Result cannot be null') + #else: + #self.assertEqual(result[0][0], 1) + + vtgate_conn.close() + tablet_replica2.kill_vttablet() + vtgate.kill() + + def test_sharded_recovery(self): + """Test recovery from backup flow. + + test_recovery will: + - create a shard with master and replica1 only + - run InitShardMaster + - insert some data + - perform a resharding + - take a backup of both new shards + - insert more data on the masters of both shards + - create a recovery keyspace + - bring up tablet_replica2 and tablet_replica3 in the new keyspace + - check that new tablets do not have data created after backup + - check that vtgate queries work correctly + + """ + + # insert data on master, wait for replica to get it + utils.run_vtctl(['ApplySchema', + '-sql', self._create_vt_insert_test, + 'test_keyspace'], + auto_log=True) + self._insert_data(tablet_master, 1) + self._check_data(tablet_replica1, 1, 'replica1 tablet getting data') + # insert more data on the master + self._insert_data(tablet_master, 4) + + utils.run_vtctl(['ApplyVSchema', + '-vschema', self._vschema_json, + 'test_keyspace'], + auto_log=True) + + # create the split shards + shard_0_master.init_tablet( + 'replica', + keyspace='test_keyspace', + shard='-80', + tablet_index=0) + shard_0_replica.init_tablet( + 'replica', + keyspace='test_keyspace', + shard='-80', + tablet_index=1) + shard_0_rdonly.init_tablet( + 'rdonly', + keyspace='test_keyspace', + shard='-80', + tablet_index=2) + shard_1_master.init_tablet( + 'replica', + keyspace='test_keyspace', + shard='80-', + tablet_index=0) + shard_1_replica.init_tablet( + 'replica', + keyspace='test_keyspace', + shard='80-', + tablet_index=1) + shard_1_rdonly.init_tablet( + 'rdonly', + keyspace='test_keyspace', + shard='80-', + tablet_index=2) + + for t in [shard_0_master, shard_0_replica, shard_0_rdonly, + shard_1_master, shard_1_replica, shard_1_rdonly]: + t.start_vttablet(wait_for_state=None, + binlog_use_v3_resharding_mode=True) + + for t in [shard_0_master, shard_0_replica, shard_0_rdonly, + shard_1_master, shard_1_replica, shard_1_rdonly]: + t.wait_for_vttablet_state('NOT_SERVING') + + utils.run_vtctl(['InitShardMaster', '-force', 'test_keyspace/-80', + shard_0_master.tablet_alias], auto_log=True) + utils.run_vtctl(['InitShardMaster', '-force', 'test_keyspace/80-', + shard_1_master.tablet_alias], auto_log=True) + + for t in [shard_0_replica, shard_1_replica]: + utils.wait_for_tablet_type(t.tablet_alias, 'replica') + + sharded_tablets = [shard_0_master, shard_0_replica, shard_0_rdonly, + shard_1_master, shard_1_replica, shard_1_rdonly] + for t in sharded_tablets: + t.wait_for_vttablet_state('SERVING') + + # we need to create the schema, and the worker will do data copying + for keyspace_shard in ('test_keyspace/-80', 'test_keyspace/80-'): + utils.run_vtctl(['CopySchemaShard', + 'test_keyspace/0', + keyspace_shard], + auto_log=True) + + utils.run_vtctl( + ['SplitClone', 'test_keyspace', '0', '-80,80-'], auto_log=True) + + utils.run_vtctl( + ['MigrateServedTypes', 'test_keyspace/0', 'rdonly'], auto_log=True) + utils.run_vtctl( + ['MigrateServedTypes', 'test_keyspace/0', 'replica'], auto_log=True) + # then serve master from the split shards + utils.run_vtctl(['MigrateServedTypes', 'test_keyspace/0', 'master'], + auto_log=True) + + # remove the original tablets in the original shard + tablet.kill_tablets([tablet_master, tablet_replica1, tablet_rdonly]) + for t in [tablet_replica1, tablet_rdonly]: + utils.run_vtctl(['DeleteTablet', t.tablet_alias], auto_log=True) + utils.run_vtctl(['DeleteTablet', '-allow_master', + tablet_master.tablet_alias], auto_log=True) + + # rebuild the serving graph, all mentions of the old shards should be gone + utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'], auto_log=True) + + # delete the original shard + utils.run_vtctl(['DeleteShard', 'test_keyspace/0'], auto_log=True) + + result = shard_0_master.mquery('vt_test_keyspace', "select count(*) from vt_insert_test") + shard_0_count = result[0][0] + logging.debug("Shard -80 has %d rows", shard_0_count) + shard_0_test_id = 0 + if shard_0_count > 0: + result = shard_0_master.mquery('vt_test_keyspace', "select id from vt_insert_test") + shard_0_test_id = result[0][0] + + result = shard_1_master.mquery('vt_test_keyspace', "select count(*) from vt_insert_test") + shard_1_count = result[0][0] + logging.debug("Shard 80- has %d rows", shard_1_count) + shard_1_test_id = 0 + if shard_1_count > 0: + result = shard_1_master.mquery('vt_test_keyspace', "select id from vt_insert_test") + shard_1_test_id = result[0][0] + + # backup the new shards + utils.run_vtctl(['Backup', shard_0_replica.tablet_alias], auto_log=True) + utils.run_vtctl(['Backup', shard_1_replica.tablet_alias], auto_log=True) + + # check that the backup shows up in the listing + backups = self._list_backups('-80') + logging.debug('list of backups: %s', backups) + self.assertEqual(len(backups), 1) + self.assertTrue(backups[0].endswith(shard_0_replica.tablet_alias)) + + backups = self._list_backups('80-') + logging.debug('list of backups: %s', backups) + self.assertEqual(len(backups), 1) + self.assertTrue(backups[0].endswith(shard_1_replica.tablet_alias)) + + # start vtgate + vtgate = utils.VtGate() + vtgate.start(tablets=[ + shard_0_master, shard_1_master + ], tablet_types_to_wait='MASTER') + utils.vtgate.wait_for_endpoints('test_keyspace.-80.master', 1) + utils.vtgate.wait_for_endpoints('test_keyspace.80-.master', 1) + + vtgate_conn = get_connection() + cursor = vtgate_conn.cursor( + tablet_type='master', keyspace=None, writable=True) + # insert more data on the masters + for i in [2, 3]: + cursor.execute('insert into vt_insert_test (id, msg) values (:id, :msg)', {'id': i, 'msg': 'test %s' % i}) + + vtgate_conn.close() + vtgate.kill() + + # now bring up the recovery keyspace and 2 tablets, letting it restore from backup. + self._restore(tablet_replica2, 'recovery_keyspace', '-80') + self._restore(tablet_replica3, 'recovery_keyspace', '80-') + + # check the new replicas have the correct number of rows + self._check_data(tablet_replica2, shard_0_count, 'replica2 tablet should not have new data') + self._check_data(tablet_replica3, shard_1_count, 'replica3 tablet should not have new data') + + # start vtgate + vtgate = utils.VtGate() + vtgate.start(tablets=[ + shard_0_master, shard_0_replica, shard_1_master, shard_1_replica, tablet_replica2, tablet_replica3 + ], tablet_types_to_wait='REPLICA') + utils.vtgate.wait_for_endpoints('test_keyspace.-80.master', 1) + utils.vtgate.wait_for_endpoints('test_keyspace.80-.replica', 1) + utils.vtgate.wait_for_endpoints('test_keyspace.-80.master', 1) + utils.vtgate.wait_for_endpoints('test_keyspace.80-.replica', 1) + utils.vtgate.wait_for_endpoints('recovery_keyspace.-80.replica', 1) + utils.vtgate.wait_for_endpoints('recovery_keyspace.80-.replica', 1) + + # check that vtgate doesn't route queries to new tablet + vtgate_conn = get_connection() + cursor = vtgate_conn.cursor( + tablet_type='replica', keyspace=None, writable=True) + + cursor.execute('select count(*) from vt_insert_test', {}) + result = cursor.fetchall() + if not result: + self.fail('Result cannot be null') + else: + self.assertEqual(result[0][0], 4) + + # check that new keyspace is accessible by using ks.table + cursor.execute('select count(*) from recovery_keyspace.vt_insert_test', {}) + result = cursor.fetchall() + if not result: + self.fail('Result cannot be null') + else: + self.assertEqual(result[0][0], 2) + + # check that new keyspace is accessible with 'use ks' + cursor.execute('use recovery_keyspace@replica', {}) + cursor.execute('select count(*) from vt_insert_test', {}) + result = cursor.fetchall() + if not result: + self.fail('Result cannot be null') + else: + self.assertEqual(result[0][0], 2) + + # TODO check that new tablet is accessible with 'use ks:shard' + # this currently does not work through the python client, though it works from mysql client + #cursor.execute('use recovery_keyspace:-80@replica', {}) + #cursor.execute('select count(*) from vt_insert_test', {}) + #result = cursor.fetchall() + #if not result: + # self.fail('Result cannot be null') + #else: + # self.assertEqual(result[0][0], shard_0_count) + #cursor.execute('select id from vt_insert_test', {}) + #result = cursor.fetchall() + #if not result: + # self.fail('Result cannot be null') + #else: + # self.assertEqual(result[0][0], shard_0_test_id) + + #cursor.execute('use recovery_keyspace:80-@replica', {}) + #cursor.execute('select count(*) from vt_insert_test', {}) + #result = cursor.fetchall() + #if not result: + # self.fail('Result cannot be null') + #else: + # self.assertEqual(result[0][0], shard_1_count) + #cursor.execute('use recovery_keyspace:80-@replica', {}) + #cursor.execute('select id from vt_insert_test', {}) + #result = cursor.fetchall() + #if not result: + # self.fail('Result cannot be null') + #else: + # self.assertEqual(result[0][0], shard_1_test_id) + + vtgate_conn.close() + tablet_replica2.kill_vttablet() + tablet_replica3.kill_vttablet() + vtgate.kill() + +if __name__ == '__main__': + utils.main() diff --git a/test/xb_recovery.py b/test/xb_recovery.py new file mode 100755 index 00000000000..d73483d707c --- /dev/null +++ b/test/xb_recovery.py @@ -0,0 +1,25 @@ +#!/usr/bin/env python + +# Copyright 2019 The Vitess Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""Re-runs recovery.py with use_xtrabackup=True.""" + +import recovery +import utils + +if __name__ == '__main__': + recovery.use_xtrabackup = True + utils.main(recovery)