|
74 | 74 | shardKsName = fmt.Sprintf("%s/%s", keyspaceName, shardName)
|
75 | 75 | dbCredentialFile string
|
76 | 76 | shardName = "0"
|
77 |
| - commonTabletArg = []string{ |
78 |
| - "--vreplication_retry_delay", "1s", |
79 |
| - "--degraded_threshold", "5s", |
80 |
| - "--lock_tables_timeout", "5s", |
81 |
| - "--watch_replication_stream", |
82 |
| - "--enable_replication_reporter", |
83 |
| - "--serving_state_grace_period", "1s", |
84 |
| - } |
| 77 | + commonTabletArg = getDefaultCommonArgs() |
85 | 78 |
|
86 | 79 | vtInsertTest = `
|
87 | 80 | create table vt_insert_test (
|
@@ -1456,3 +1449,149 @@ func verifyTabletRestoreStats(t *testing.T, vars map[string]any) {
|
1456 | 1449 |
|
1457 | 1450 | require.Contains(t, bd, "BackupStorage.File.File:Read")
|
1458 | 1451 | }
|
| 1452 | + |
| 1453 | +func getDefaultCommonArgs() []string { |
| 1454 | + return []string{ |
| 1455 | + "--vreplication_retry_delay", "1s", |
| 1456 | + "--degraded_threshold", "5s", |
| 1457 | + "--lock_tables_timeout", "5s", |
| 1458 | + "--watch_replication_stream", |
| 1459 | + "--enable_replication_reporter", |
| 1460 | + "--serving_state_grace_period", "1s", |
| 1461 | + } |
| 1462 | +} |
| 1463 | + |
| 1464 | +func setDefaultCommonArgs() { commonTabletArg = getDefaultCommonArgs() } |
| 1465 | + |
| 1466 | +// fetch the backup engine used on the last backup triggered by the end-to-end tests. |
| 1467 | +func getBackupEngineOfLastBackup(t *testing.T) string { |
| 1468 | + lastBackup := getLastBackup(t) |
| 1469 | + |
| 1470 | + manifest := readManifestFile(t, path.Join(localCluster.CurrentVTDATAROOT, "backups", keyspaceName, shardName, lastBackup)) |
| 1471 | + |
| 1472 | + return manifest.BackupMethod |
| 1473 | +} |
| 1474 | + |
| 1475 | +func getLastBackup(t *testing.T) string { |
| 1476 | + backups, err := localCluster.ListBackups(shardKsName) |
| 1477 | + require.NoError(t, err) |
| 1478 | + |
| 1479 | + return backups[len(backups)-1] |
| 1480 | +} |
| 1481 | + |
| 1482 | +func TestBackupEngineSelector(t *testing.T) { |
| 1483 | + defer setDefaultCommonArgs() |
| 1484 | + defer cluster.PanicHandler(t) |
| 1485 | + |
| 1486 | + // launch the custer with xtrabackup as the default engine |
| 1487 | + code, err := LaunchCluster(XtraBackup, "xbstream", 0, &CompressionDetails{CompressorEngineName: "pgzip"}) |
| 1488 | + require.Nilf(t, err, "setup failed with status code %d", code) |
| 1489 | + |
| 1490 | + defer TearDownCluster() |
| 1491 | + |
| 1492 | + localCluster.DisableVTOrcRecoveries(t) |
| 1493 | + defer func() { |
| 1494 | + localCluster.EnableVTOrcRecoveries(t) |
| 1495 | + }() |
| 1496 | + verifyInitialReplication(t) |
| 1497 | + |
| 1498 | + t.Run("backup with backup-engine=builtin", func(t *testing.T) { |
| 1499 | + // first try to backup with an alternative engine (builtin) |
| 1500 | + err = localCluster.VtctldClientProcess.ExecuteCommand("Backup", "--allow-primary", "--backup-engine=builtin", primary.Alias) |
| 1501 | + require.NoError(t, err) |
| 1502 | + engineUsed := getBackupEngineOfLastBackup(t) |
| 1503 | + require.Equal(t, "builtin", engineUsed) |
| 1504 | + }) |
| 1505 | + |
| 1506 | + t.Run("backup with backup-engine=xtrabackup", func(t *testing.T) { |
| 1507 | + // then try to backup specifying the xtrabackup engine |
| 1508 | + err = localCluster.VtctldClientProcess.ExecuteCommand("Backup", "--allow-primary", "--backup-engine=xtrabackup", primary.Alias) |
| 1509 | + require.NoError(t, err) |
| 1510 | + engineUsed := getBackupEngineOfLastBackup(t) |
| 1511 | + require.Equal(t, "xtrabackup", engineUsed) |
| 1512 | + }) |
| 1513 | + |
| 1514 | + t.Run("backup without specifying backup-engine", func(t *testing.T) { |
| 1515 | + // check that by default we still use the xtrabackup engine if not specified |
| 1516 | + err = localCluster.VtctldClientProcess.ExecuteCommand("Backup", "--allow-primary", primary.Alias) |
| 1517 | + require.NoError(t, err) |
| 1518 | + engineUsed := getBackupEngineOfLastBackup(t) |
| 1519 | + require.Equal(t, "xtrabackup", engineUsed) |
| 1520 | + }) |
| 1521 | +} |
| 1522 | + |
| 1523 | +func TestRestoreAllowedBackupEngines(t *testing.T) { |
| 1524 | + defer setDefaultCommonArgs() |
| 1525 | + defer cluster.PanicHandler(t) |
| 1526 | + |
| 1527 | + backupMsg := "right after xtrabackup backup" |
| 1528 | + |
| 1529 | + cDetails := &CompressionDetails{CompressorEngineName: "pgzip"} |
| 1530 | + |
| 1531 | + // launch the custer with xtrabackup as the default engine |
| 1532 | + code, err := LaunchCluster(XtraBackup, "xbstream", 0, cDetails) |
| 1533 | + require.Nilf(t, err, "setup failed with status code %d", code) |
| 1534 | + |
| 1535 | + defer TearDownCluster() |
| 1536 | + |
| 1537 | + localCluster.DisableVTOrcRecoveries(t) |
| 1538 | + defer func() { |
| 1539 | + localCluster.EnableVTOrcRecoveries(t) |
| 1540 | + }() |
| 1541 | + verifyInitialReplication(t) |
| 1542 | + |
| 1543 | + t.Run("generate backups", func(t *testing.T) { |
| 1544 | + // lets take two backups, each using a different backup engine |
| 1545 | + err = localCluster.VtctldClientProcess.ExecuteCommand("Backup", "--allow-primary", "--backup-engine=builtin", primary.Alias) |
| 1546 | + require.NoError(t, err) |
| 1547 | + |
| 1548 | + err = localCluster.VtctldClientProcess.ExecuteCommand("Backup", "--allow-primary", "--backup-engine=xtrabackup", primary.Alias) |
| 1549 | + require.NoError(t, err) |
| 1550 | + }) |
| 1551 | + |
| 1552 | + // insert more data on the primary |
| 1553 | + _, err = primary.VttabletProcess.QueryTablet(fmt.Sprintf("insert into vt_insert_test (msg) values ('%s')", backupMsg), keyspaceName, true) |
| 1554 | + require.NoError(t, err) |
| 1555 | + |
| 1556 | + t.Run("restore replica and verify data", func(t *testing.T) { |
| 1557 | + // now bring up another replica, letting it restore from backup. |
| 1558 | + restoreWaitForBackup(t, "replica", cDetails, true) |
| 1559 | + err = replica2.VttabletProcess.WaitForTabletStatusesForTimeout([]string{"SERVING"}, timeout) |
| 1560 | + require.NoError(t, err) |
| 1561 | + |
| 1562 | + // check the new replica has the data |
| 1563 | + cluster.VerifyRowsInTablet(t, replica2, keyspaceName, 2) |
| 1564 | + result, err := replica2.VttabletProcess.QueryTablet( |
| 1565 | + fmt.Sprintf("select msg from vt_insert_test where msg='%s'", backupMsg), replica2.VttabletProcess.Keyspace, true) |
| 1566 | + require.NoError(t, err) |
| 1567 | + require.Equal(t, backupMsg, result.Named().Row().AsString("msg", "")) |
| 1568 | + }) |
| 1569 | + |
| 1570 | + t.Run("test broken restore", func(t *testing.T) { |
| 1571 | + // now lets break the last backup in the shard |
| 1572 | + err = os.Remove(path.Join(localCluster.CurrentVTDATAROOT, |
| 1573 | + "backups", keyspaceName, shardName, |
| 1574 | + getLastBackup(t), "backup.xbstream.gz")) |
| 1575 | + require.NoError(t, err) |
| 1576 | + |
| 1577 | + // and try to restore from it |
| 1578 | + err = localCluster.VtctldClientProcess.ExecuteCommand("RestoreFromBackup", replica2.Alias) |
| 1579 | + require.Error(t, err) // this should fail |
| 1580 | + }) |
| 1581 | + |
| 1582 | + t.Run("test older working backup", func(t *testing.T) { |
| 1583 | + // now we retry but with the first backup |
| 1584 | + err = localCluster.VtctldClientProcess.ExecuteCommand("RestoreFromBackup", "--allowed-backup-engines=builtin", replica2.Alias) |
| 1585 | + require.NoError(t, err) // this should succeed |
| 1586 | + |
| 1587 | + // make sure we are replicating after the restore is done |
| 1588 | + err = replica2.VttabletProcess.WaitForTabletStatusesForTimeout([]string{"SERVING"}, timeout) |
| 1589 | + require.NoError(t, err) |
| 1590 | + cluster.VerifyRowsInTablet(t, replica2, keyspaceName, 2) |
| 1591 | + |
| 1592 | + result, err := replica2.VttabletProcess.QueryTablet( |
| 1593 | + fmt.Sprintf("select msg from vt_insert_test where msg='%s'", backupMsg), replica2.VttabletProcess.Keyspace, true) |
| 1594 | + require.NoError(t, err) |
| 1595 | + require.Equal(t, backupMsg, result.Named().Row().AsString("msg", "")) |
| 1596 | + }) |
| 1597 | +} |
0 commit comments