diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupAdmin.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupAdmin.java index 25055fd5e8e6..0dd0aeb8de65 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupAdmin.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupAdmin.java @@ -42,7 +42,7 @@ public interface BackupAdmin extends Closeable { * @return the backup Id */ - String backupTables(final BackupRequest userRequest) throws IOException; + BackupInfo backupTables(final BackupRequest userRequest) throws IOException; /** * Restore backup diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java index c36b398e5e86..4cffce870f01 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java @@ -510,7 +510,7 @@ public void restore(RestoreRequest request) throws IOException { } @Override - public String backupTables(BackupRequest request) throws IOException { + public BackupInfo backupTables(BackupRequest request) throws IOException { BackupType type = request.getBackupType(); String targetRootDir = request.getTargetRootDir(); List tableList = request.getTableList(); @@ -593,7 +593,7 @@ public String backupTables(BackupRequest request) throws IOException { client.execute(); - return backupId; + return client.backupInfo; } private List excludeNonExistingTables(List tableList, diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java index 66694f4384f4..055dd60fb32d 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java @@ -347,7 +347,7 @@ public void execute() throws IOException { .withTargetRootDir(targetBackupDir).withTotalTasks(workers) .withBandwidthPerTasks(bandwidth).withNoChecksumVerify(ignoreChecksum) .withBackupSetName(setName).build(); - String backupId = admin.backupTables(request); + String backupId = admin.backupTables(request).getBackupId(); System.out.println("Backup session " + backupId + " finished. Status: SUCCESS"); } catch (IOException e) { System.out.println("Backup session finished. Status: FAILURE"); diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java index c92c0747e83c..d35b3d903e1b 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java @@ -20,8 +20,11 @@ import java.io.IOException; import java.util.ArrayList; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; @@ -33,7 +36,9 @@ import org.apache.hadoop.hbase.backup.util.BackupUtils; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.net.Address; import org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore; +import org.apache.hadoop.hbase.rsgroup.RSGroupInfo; import org.apache.hadoop.hbase.util.CommonFSUtils; import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; import org.apache.yetus.audience.InterfaceAudience; @@ -93,13 +98,36 @@ public Map getIncrBackupLogFileMap() throws IOException { } newTimestamps = readRegionServerLastLogRollResult(); - logList = getLogFilesForNewBackup(previousTimestampMins, newTimestamps, conf, savedStartCode); + logList = getLogFilesForNewBackup(previousTimestampMins, newTimestamps, conf, savedStartCode, + getParticipatingServerNames(backupInfo.getTables())); logList = excludeProcV2WALs(logList); backupInfo.setIncrBackupFileList(logList); return newTimestamps; } + private Set getParticipatingServerNames(Set tables) throws IOException { + Set
participatingServers = new HashSet<>(); + boolean flag = false; + for (TableName table : tables) { + RSGroupInfo rsGroupInfo = conn.getAdmin().getRSGroup(table); + if (rsGroupInfo != null && !rsGroupInfo.getServers().isEmpty()) { + LOG.info("Participating servers for table {}, rsgroup Name: {} are: {}", table, + rsGroupInfo.getName(), rsGroupInfo.getServers()); + participatingServers.addAll(rsGroupInfo.getServers()); + } else { + LOG.warn( + "Rsgroup isn't available for table {}, all servers in the cluster will be participating ", + table); + flag = true; + } + } + + return flag + ? new HashSet<>() + : participatingServers.stream().map(a -> a.toString()).collect(Collectors.toSet()); + } + private List excludeProcV2WALs(List logList) { List list = new ArrayList<>(); for (int i = 0; i < logList.size(); i++) { @@ -126,8 +154,8 @@ private List excludeProcV2WALs(List logList) { * @throws IOException exception */ private List getLogFilesForNewBackup(Map olderTimestamps, - Map newestTimestamps, Configuration conf, String savedStartCode) - throws IOException { + Map newestTimestamps, Configuration conf, String savedStartCode, + Set servers) throws IOException { LOG.debug("In getLogFilesForNewBackup()\n" + "olderTimestamps: " + olderTimestamps + "\n newestTimestamps: " + newestTimestamps); @@ -160,7 +188,7 @@ private List getLogFilesForNewBackup(Map olderTimestamps, for (FileStatus rs : rss) { p = rs.getPath(); host = BackupUtils.parseHostNameFromLogFile(p); - if (host == null) { + if (host == null || (!servers.isEmpty() && !servers.contains(host))) { continue; } FileStatus[] logs; @@ -215,7 +243,7 @@ private List getLogFilesForNewBackup(Map olderTimestamps, continue; } host = BackupUtils.parseHostFromOldLog(p); - if (host == null) { + if (host == null || (!servers.isEmpty() && !servers.contains(host))) { continue; } currentLogTS = BackupUtils.getCreationTime(p); diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/master/BackupLogCleaner.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/master/BackupLogCleaner.java index 1c6bc4077d7f..33c041b22e9c 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/master/BackupLogCleaner.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/master/BackupLogCleaner.java @@ -21,8 +21,10 @@ import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.stream.Collectors; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; @@ -41,6 +43,7 @@ import org.apache.hadoop.hbase.master.region.MasterRegionFactory; import org.apache.hadoop.hbase.net.Address; import org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore; +import org.apache.hadoop.hbase.rsgroup.RSGroupInfo; import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; @@ -98,6 +101,7 @@ private Map serverToPreservationBoundaryTs(List backu // This map tracks, for every backup root, the most recent created backup (= highest timestamp) Map newestBackupPerRootDir = new HashMap<>(); + Set
servers = new HashSet<>(); for (BackupInfo backup : backups) { BackupInfo existingEntry = newestBackupPerRootDir.get(backup.getBackupRootDir()); if (existingEntry == null || existingEntry.getStartTs() < backup.getStartTs()) { @@ -105,6 +109,21 @@ private Map serverToPreservationBoundaryTs(List backu } } + for (BackupInfo backup : backups) { + for (TableName table : backup.getTables()) { + RSGroupInfo rsGroupInfo = conn.getAdmin().getRSGroup(table); + if ( + rsGroupInfo != null && rsGroupInfo.getServers() != null + && !rsGroupInfo.getServers().isEmpty() + ) { + servers.addAll(rsGroupInfo.getServers()); + } else { + servers.addAll(conn.getAdmin().getRegionServers().stream().map(s -> s.getAddress()) + .collect(Collectors.toList())); + } + } + } + if (LOG.isDebugEnabled()) { LOG.debug("WAL cleanup time-boundary using info from: {}. ", newestBackupPerRootDir.entrySet().stream() @@ -124,7 +143,7 @@ private Map serverToPreservationBoundaryTs(List backu .entrySet()) { Address address = Address.fromString(entry.getKey()); Long storedTs = boundaries.get(address); - if (storedTs == null || entry.getValue() < storedTs) { + if ((storedTs == null || entry.getValue() < storedTs) && servers.contains(address)) { boundaries.put(address, entry.getValue()); } } diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java index b9a76347440e..3ac364d338f1 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java @@ -17,15 +17,19 @@ */ package org.apache.hadoop.hbase.backup; +import static org.junit.Assert.assertTrue; + import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; +import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.Objects; +import java.util.Set; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; @@ -58,7 +62,10 @@ import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.master.cleaner.LogCleaner; import org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner; +import org.apache.hadoop.hbase.net.Address; import org.apache.hadoop.hbase.regionserver.LogRoller; +import org.apache.hadoop.hbase.rsgroup.RSGroupInfo; +import org.apache.hadoop.hbase.rsgroup.RSGroupUtil; import org.apache.hadoop.hbase.security.HadoopSecurityEnabledUserProviderForTesting; import org.apache.hadoop.hbase.security.UserProvider; import org.apache.hadoop.hbase.security.access.SecureTestUtil; @@ -87,6 +94,15 @@ public class TestBackupBase { protected static Configuration conf1; protected static Configuration conf2; + protected static final int RSGROUP_RS_NUM = 5; + protected static final int NUM_REGIONSERVERS = 3; + protected static final String RSGROUP_NAME = "rsgroup1"; + protected static final String RSGROUP_NAMESPACE = "rsgroup_ns"; + protected static final TableName RSGROUP_TABLE_1 = + TableName.valueOf(RSGROUP_NAMESPACE + ":rsgroup_table1"); + protected static final TableName RSGROUP_TABLE_2 = + TableName.valueOf(RSGROUP_NAMESPACE + ":rsgroup_table2"); + protected static TableName table1 = TableName.valueOf("table1"); protected static TableDescriptor table1Desc; protected static TableName table2 = TableName.valueOf("table2"); @@ -108,6 +124,7 @@ public class TestBackupBase { protected static boolean autoRestoreOnFailure; protected static boolean useSecondCluster; + protected static boolean enableRSgroup; static class IncrementalTableBackupClientForTest extends IncrementalTableBackupClient { public IncrementalTableBackupClientForTest() { @@ -292,6 +309,22 @@ public void execute() throws IOException { } } + private static RSGroupInfo addGroup(String groupName, int serverCount) throws IOException { + Admin admin = TEST_UTIL.getAdmin(); + RSGroupInfo defaultInfo = admin.getRSGroup(RSGroupInfo.DEFAULT_GROUP); + admin.addRSGroup(groupName); + Set
set = new HashSet<>(); + for (Address server : defaultInfo.getServers()) { + if (set.size() == serverCount) { + break; + } + set.add(server); + } + admin.moveServersToRSGroup(set, groupName); + RSGroupInfo result = admin.getRSGroup(groupName); + return result; + } + public static void setUpHelper() throws Exception { BACKUP_ROOT_DIR = Path.SEPARATOR + "backupUT"; BACKUP_REMOTE_ROOT_DIR = Path.SEPARATOR + "backupUT"; @@ -314,7 +347,13 @@ public static void setUpHelper() throws Exception { // Set MultiWAL (with 2 default WAL files per RS) conf1.set(WALFactory.WAL_PROVIDER, provider); - TEST_UTIL.startMiniCluster(); + if (enableRSgroup) { + conf1.setBoolean(RSGroupUtil.RS_GROUP_ENABLED, true); + TEST_UTIL.startMiniCluster(RSGROUP_RS_NUM + NUM_REGIONSERVERS); + addGroup(RSGROUP_NAME, RSGROUP_RS_NUM); + } else { + TEST_UTIL.startMiniCluster(); + } if (useSecondCluster) { conf2 = HBaseConfiguration.create(conf1); @@ -352,6 +391,7 @@ public static void setUpHelper() throws Exception { public static void setUp() throws Exception { TEST_UTIL = new HBaseTestingUtil(); conf1 = TEST_UTIL.getConfiguration(); + enableRSgroup = false; autoRestoreOnFailure = true; useSecondCluster = false; setUpHelper(); @@ -377,6 +417,7 @@ public static void tearDown() throws Exception { } TEST_UTIL.shutdownMiniCluster(); TEST_UTIL.shutdownMiniMapReduceCluster(); + enableRSgroup = false; autoRestoreOnFailure = true; useSecondCluster = false; } @@ -406,16 +447,16 @@ protected BackupRequest createBackupRequest(BackupType type, List tab return request; } - protected String backupTables(BackupType type, List tables, String path) + protected BackupInfo backupTables(BackupType type, List tables, String path) throws IOException { Connection conn = null; BackupAdmin badmin = null; - String backupId; + BackupInfo backupInfo; try { conn = ConnectionFactory.createConnection(conf1); badmin = new BackupAdminImpl(conn); BackupRequest request = createBackupRequest(type, new ArrayList<>(tables), path); - backupId = badmin.backupTables(request); + backupInfo = badmin.backupTables(request); } finally { if (badmin != null) { badmin.close(); @@ -424,14 +465,14 @@ protected String backupTables(BackupType type, List tables, String pa conn.close(); } } - return backupId; + return backupInfo; } - protected String fullTableBackup(List tables) throws IOException { + protected BackupInfo fullTableBackup(List tables) throws IOException { return backupTables(BackupType.FULL, tables, BACKUP_ROOT_DIR); } - protected String incrementalTableBackup(List tables) throws IOException { + protected BackupInfo incrementalTableBackup(List tables) throws IOException { return backupTables(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR); } @@ -479,6 +520,23 @@ protected static void createTables() throws Exception { table.close(); ha.close(); conn.close(); + + if (enableRSgroup) { + ha.createNamespace(NamespaceDescriptor.create(RSGROUP_NAMESPACE) + .addConfiguration(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP, RSGROUP_NAME).build()); + + ha.createTable(TableDescriptorBuilder.newBuilder(RSGROUP_TABLE_1) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(famName)).build()); + table = ConnectionFactory.createConnection(conf1).getTable(RSGROUP_TABLE_1); + loadTable(table); + table.close(); + + ha.createTable(TableDescriptorBuilder.newBuilder(RSGROUP_TABLE_2) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(famName)).build()); + table = ConnectionFactory.createConnection(conf1).getTable(RSGROUP_TABLE_2); + loadTable(table); + table.close(); + } } protected boolean checkSucceeded(String backupId) throws IOException { @@ -501,7 +559,7 @@ protected boolean checkFailed(String backupId) throws IOException { return status.getState() == BackupState.FAILED; } - private BackupInfo getBackupInfo(String backupId) throws IOException { + protected BackupInfo getBackupInfo(String backupId) throws IOException { try (BackupSystemTable table = new BackupSystemTable(TEST_UTIL.getConnection())) { BackupInfo status = table.readBackupInfo(backupId); return status; @@ -538,6 +596,26 @@ protected List getListOfWALFiles(Configuration c) throws IOException return logFiles; } + protected Set
getRsgroupServers(String rsgroupName) throws IOException { + RSGroupInfo rsGroupInfo = TEST_UTIL.getAdmin().getRSGroup(rsgroupName); + if ( + rsGroupInfo != null && rsGroupInfo.getServers() != null && !rsGroupInfo.getServers().isEmpty() + ) { + return new HashSet<>(rsGroupInfo.getServers()); + } + return new HashSet<>(); + } + + protected void checkIfWALFilesBelongToRsgroup(List walFiles, String rsgroupName) + throws IOException { + for (String file : walFiles) { + Address walServerAddress = + Address.fromString(BackupUtils.parseHostNameFromLogFile(new Path(file))); + assertTrue("Backed WAL files should be from RSGroup " + rsgroupName, + getRsgroupServers(rsgroupName).contains(walServerAddress)); + } + } + protected void dumpBackupDir() throws IOException { // Dump Backup Dir FileSystem fs = FileSystem.get(conf1); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDelete.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDelete.java index 785859c52805..c7282457c6e9 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDelete.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDelete.java @@ -60,7 +60,7 @@ public class TestBackupDelete extends TestBackupBase { public void testBackupDelete() throws Exception { LOG.info("test backup delete on a single table with data"); List tableList = Lists.newArrayList(table1); - String backupId = fullTableBackup(tableList); + String backupId = fullTableBackup(tableList).getBackupId(); assertTrue(checkSucceeded(backupId)); LOG.info("backup complete"); String[] backupIds = new String[] { backupId }; @@ -87,7 +87,7 @@ public void testBackupDelete() throws Exception { public void testBackupDeleteCommand() throws Exception { LOG.info("test backup delete on a single table with data: command-line"); List tableList = Lists.newArrayList(table1); - String backupId = fullTableBackup(tableList); + String backupId = fullTableBackup(tableList).getBackupId(); assertTrue(checkSucceeded(backupId)); LOG.info("backup complete"); ByteArrayOutputStream baos = new ByteArrayOutputStream(); @@ -119,7 +119,7 @@ public long currentTime() { return System.currentTimeMillis() - 2 * 24 * 3600 * 1000; } }); - String backupId = fullTableBackup(tableList); + String backupId = fullTableBackup(tableList).getBackupId(); assertTrue(checkSucceeded(backupId)); EnvironmentEdgeManager.reset(); @@ -169,12 +169,12 @@ public void testBackupDeleteUpdatesIncrementalBackupSet() throws Exception { LOG.info("Test backup delete updates the incremental backup set"); BackupSystemTable backupSystemTable = new BackupSystemTable(TEST_UTIL.getConnection()); - String backupId1 = fullTableBackup(Lists.newArrayList(table1, table2)); + String backupId1 = fullTableBackup(Lists.newArrayList(table1, table2)).getBackupId(); assertTrue(checkSucceeded(backupId1)); assertEquals(Sets.newHashSet(table1, table2), backupSystemTable.getIncrementalBackupTableSet(BACKUP_ROOT_DIR)); - String backupId2 = fullTableBackup(Lists.newArrayList(table3)); + String backupId2 = fullTableBackup(Lists.newArrayList(table3)).getBackupId(); assertTrue(checkSucceeded(backupId2)); assertEquals(Sets.newHashSet(table1, table2, table3), backupSystemTable.getIncrementalBackupTableSet(BACKUP_ROOT_DIR)); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteRestore.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteRestore.java index 2798e1a16f0d..9499957c5688 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteRestore.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteRestore.java @@ -56,7 +56,7 @@ public void testBackupDeleteRestore() throws Exception { LOG.info("test full restore on a single table empty table"); List tables = Lists.newArrayList(table1); - String backupId = fullTableBackup(tables); + String backupId = fullTableBackup(tables).getBackupId(); assertTrue(checkSucceeded(backupId)); LOG.info("backup complete"); int numRows = TEST_UTIL.countRows(table1); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteWithFailures.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteWithFailures.java index 12c8d5c4065c..254783346cd1 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteWithFailures.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteWithFailures.java @@ -142,7 +142,7 @@ private void testBackupDeleteWithFailuresAfter(int expected, Failure... failures throws Exception { LOG.info("test repair backup delete on a single table with data and failures " + failures[0]); List tableList = Lists.newArrayList(table1); - String backupId = fullTableBackup(tableList); + String backupId = fullTableBackup(tableList).getBackupId(); assertTrue(checkSucceeded(backupId)); LOG.info("backup complete"); String[] backupIds = new String[] { backupId }; diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDescribe.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDescribe.java index 7ce039fd6668..489f17e289d4 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDescribe.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDescribe.java @@ -82,7 +82,7 @@ public void testBackupDescribeCommand() throws Exception { LOG.info("test backup describe on a single table with data: command-line"); List tableList = Lists.newArrayList(table1); - String backupId = fullTableBackup(tableList); + String backupId = fullTableBackup(tableList).getBackupId(); LOG.info("backup complete"); assertTrue(checkSucceeded(backupId)); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupLogCleanerWithRsgroup.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupLogCleanerWithRsgroup.java new file mode 100644 index 000000000000..3a42b7a6b8fd --- /dev/null +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupLogCleanerWithRsgroup.java @@ -0,0 +1,129 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.backup; + +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Random; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtil; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; +import org.apache.hadoop.hbase.backup.master.BackupLogCleaner; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.master.HMaster; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hbase.thirdparty.com.google.common.collect.Iterables; +import org.apache.hbase.thirdparty.com.google.common.collect.Lists; + +@Category(MediumTests.class) +public class TestBackupLogCleanerWithRsgroup extends TestBackupBase { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestBackupLogCleanerWithRsgroup.class); + + private static final Logger LOG = LoggerFactory.getLogger(TestBackupLogCleanerWithRsgroup.class); + + @BeforeClass + public static void setUp() throws Exception { + TEST_UTIL = new HBaseTestingUtil(); + conf1 = TEST_UTIL.getConfiguration(); + enableRSgroup = true; + autoRestoreOnFailure = true; + useSecondCluster = false; + setUpHelper(); + } + + @Test + public void testBackupLogCleanerRsgroup() throws Exception { + // #1 - create full backup for all tables + LOG.info("create full backup image for all tables"); + List tableSetFullList = Lists.newArrayList(RSGROUP_TABLE_1); + + try (BackupSystemTable systemTable = new BackupSystemTable(TEST_UTIL.getConnection())) { + // Verify that we have no backup sessions yet + assertFalse(systemTable.hasBackupSessions()); + + List walFiles = getListOfWALFiles(TEST_UTIL.getConfiguration()); + BackupLogCleaner cleaner = new BackupLogCleaner(); + cleaner.setConf(TEST_UTIL.getConfiguration()); + Map params = new HashMap<>(); + params.put(HMaster.MASTER, TEST_UTIL.getHBaseCluster().getMaster()); + cleaner.init(params); + cleaner.setConf(TEST_UTIL.getConfiguration()); + + Iterable deletable = cleaner.getDeletableFiles(walFiles); + // We can delete all files because we do not have yet recorded backup sessions + assertTrue(Iterables.size(deletable) == walFiles.size()); + String backupIdFull = fullTableBackup(tableSetFullList).getBackupId(); + assertTrue(checkSucceeded(backupIdFull)); + + // Check one more time + deletable = cleaner.getDeletableFiles(walFiles); + assertTrue(Iterables.size(deletable) == walFiles.size()); + + Connection conn = ConnectionFactory.createConnection(conf1); + // #2 - insert some data to table + Table t1 = conn.getTable(RSGROUP_TABLE_1); + Put p1; + Random rnd = new Random(); + for (int i = 0; i < 5000; i++) { + p1 = new Put(Bytes.toBytes(1000000 + rnd.nextInt(9000000))); + p1.addColumn(famName, qualName, Bytes.toBytes("val" + i)); + t1.put(p1); + } + t1.close(); + + List newWalFiles = getListOfWALFiles(TEST_UTIL.getConfiguration()); + // New list of wal files is greater than the previous one, + // because new wal per RS have been opened after full backup + assertTrue(walFiles.size() < newWalFiles.size()); + + deletable = cleaner.getDeletableFiles(newWalFiles); + assertTrue(newWalFiles.size() > Iterables.size(deletable)); + + // #3 - incremental backup + List tableSetIncList = Lists.newArrayList(RSGROUP_TABLE_1); + String backupIdIncMultiple = + backupTables(BackupType.INCREMENTAL, tableSetIncList, BACKUP_ROOT_DIR).getBackupId(); + assertTrue(checkSucceeded(backupIdIncMultiple)); + + deletable = cleaner.getDeletableFiles(newWalFiles); + assertTrue(Iterables.size(deletable) == newWalFiles.size()); + + conn.close(); + } + } +} diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMerge.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMerge.java index 38204f68e31a..f72fc5dc3f71 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMerge.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMerge.java @@ -64,7 +64,7 @@ public void TestIncBackupMergeRestore() throws Exception { BackupAdminImpl client = new BackupAdminImpl(conn); BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR); - String backupIdFull = client.backupTables(request); + String backupIdFull = client.backupTables(request).getBackupId(); assertTrue(checkSucceeded(backupIdFull)); @@ -85,7 +85,7 @@ public void TestIncBackupMergeRestore() throws Exception { // #3 - incremental backup for multiple tables tables = Lists.newArrayList(table1, table2); request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR); - String backupIdIncMultiple = client.backupTables(request); + String backupIdIncMultiple = client.backupTables(request).getBackupId(); assertTrue(checkSucceeded(backupIdIncMultiple)); @@ -97,7 +97,7 @@ public void TestIncBackupMergeRestore() throws Exception { // #3 - incremental backup for multiple tables request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR); - String backupIdIncMultiple2 = client.backupTables(request); + String backupIdIncMultiple2 = client.backupTables(request).getBackupId(); assertTrue(checkSucceeded(backupIdIncMultiple2)); try (BackupAdmin bAdmin = new BackupAdminImpl(conn)) { @@ -139,15 +139,15 @@ public void testIncBackupMergeRestoreSeparateFs() throws Exception { List tables = Lists.newArrayList(table1, table2); BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR, true); - String backupIdFull = client.backupTables(request); + String backupIdFull = client.backupTables(request).getBackupId(); assertTrue(checkSucceeded(backupIdFull)); request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR, true); - String backupIdIncMultiple = client.backupTables(request); + String backupIdIncMultiple = client.backupTables(request).getBackupId(); assertTrue(checkSucceeded(backupIdIncMultiple)); request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR, true); - String backupIdIncMultiple2 = client.backupTables(request); + String backupIdIncMultiple2 = client.backupTables(request).getBackupId(); assertTrue(checkSucceeded(backupIdIncMultiple2)); try (BackupAdmin bAdmin = new BackupAdminImpl(conn)) { diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMultipleDeletes.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMultipleDeletes.java index 36cecd3faf58..5149880820dd 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMultipleDeletes.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMultipleDeletes.java @@ -63,7 +63,7 @@ public void testBackupMultipleDeletes() throws Exception { Admin admin = conn.getAdmin(); BackupAdmin client = new BackupAdminImpl(conn); BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR); - String backupIdFull = client.backupTables(request); + String backupIdFull = client.backupTables(request).getBackupId(); assertTrue(checkSucceeded(backupIdFull)); // #2 - insert some data to table table1 Table t1 = conn.getTable(table1); @@ -78,7 +78,7 @@ public void testBackupMultipleDeletes() throws Exception { // #3 - incremental backup for table1 tables = Lists.newArrayList(table1); request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR); - String backupIdInc1 = client.backupTables(request); + String backupIdInc1 = client.backupTables(request).getBackupId(); assertTrue(checkSucceeded(backupIdInc1)); // #4 - insert some data to table table2 Table t2 = conn.getTable(table2); @@ -91,7 +91,7 @@ public void testBackupMultipleDeletes() throws Exception { // #5 - incremental backup for table1, table2 tables = Lists.newArrayList(table1, table2); request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR); - String backupIdInc2 = client.backupTables(request); + String backupIdInc2 = client.backupTables(request).getBackupId(); assertTrue(checkSucceeded(backupIdInc2)); // #6 - insert some data to table table1 t1 = conn.getTable(table1); @@ -103,7 +103,7 @@ public void testBackupMultipleDeletes() throws Exception { // #7 - incremental backup for table1 tables = Lists.newArrayList(table1); request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR); - String backupIdInc3 = client.backupTables(request); + String backupIdInc3 = client.backupTables(request).getBackupId(); assertTrue(checkSucceeded(backupIdInc3)); // #8 - insert some data to table table2 t2 = conn.getTable(table2); @@ -115,17 +115,17 @@ public void testBackupMultipleDeletes() throws Exception { // #9 - incremental backup for table1, table2 tables = Lists.newArrayList(table1, table2); request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR); - String backupIdInc4 = client.backupTables(request); + String backupIdInc4 = client.backupTables(request).getBackupId(); assertTrue(checkSucceeded(backupIdInc4)); // #10 full backup for table3 tables = Lists.newArrayList(table3); request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR); - String backupIdFull2 = client.backupTables(request); + String backupIdFull2 = client.backupTables(request).getBackupId(); assertTrue(checkSucceeded(backupIdFull2)); // #11 - incremental backup for table3 tables = Lists.newArrayList(table3); request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR); - String backupIdInc5 = client.backupTables(request); + String backupIdInc5 = client.backupTables(request).getBackupId(); assertTrue(checkSucceeded(backupIdInc5)); LOG.error("Delete backupIdInc2"); client.deleteBackups(new String[] { backupIdInc2 }); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupRestoreOnEmptyEnvironment.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupRestoreOnEmptyEnvironment.java index 300ca360a4ee..1fb37b51f878 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupRestoreOnEmptyEnvironment.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupRestoreOnEmptyEnvironment.java @@ -220,7 +220,7 @@ private String backup(BackupType backupType, List tables) throws IOEx BackupRequest backupRequest = new BackupRequest.Builder().withTargetRootDir(BACKUP_ROOT_DIR.toString()) .withTableList(new ArrayList<>(tables)).withBackupType(backupType).build(); - return backupAdmin.backupTables(backupRequest); + return backupAdmin.backupTables(backupRequest).getBackupId(); } } diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupRestoreWithModifications.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupRestoreWithModifications.java index 62ba5006ac7b..0723aa43b1c3 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupRestoreWithModifications.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupRestoreWithModifications.java @@ -217,7 +217,7 @@ private String backup(BackupType backupType, List tables) throws IOEx BackupRequest backupRequest = new BackupRequest.Builder().withTargetRootDir(BACKUP_ROOT_DIR.toString()) .withTableList(new ArrayList<>(tables)).withBackupType(backupType).build(); - return backupAdmin.backupTables(backupRequest); + return backupAdmin.backupTables(backupRequest).getBackupId(); } } diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupShowHistory.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupShowHistory.java index fa624250929d..4d39ec175f2b 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupShowHistory.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupShowHistory.java @@ -68,7 +68,7 @@ public void testBackupHistory() throws Exception { LOG.info("test backup history on a single table with data"); List tableList = Lists.newArrayList(table1); - String backupId = fullTableBackup(tableList); + String backupId = fullTableBackup(tableList).getBackupId(); assertTrue(checkSucceeded(backupId)); LOG.info("backup complete"); @@ -92,7 +92,7 @@ public void testBackupHistory() throws Exception { assertTrue(output.indexOf(backupId) > 0); tableList = Lists.newArrayList(table2); - String backupId2 = fullTableBackup(tableList); + String backupId2 = fullTableBackup(tableList).getBackupId(); assertTrue(checkSucceeded(backupId2)); LOG.info("backup complete: " + table2); BackupInfo.Filter tableNameFilter = image -> { diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupStatusProgress.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupStatusProgress.java index 1a1e5dbf1cc1..93f99c72f0a9 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupStatusProgress.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupStatusProgress.java @@ -53,7 +53,7 @@ public void testBackupStatusProgress() throws Exception { LOG.info("test backup status/progress on a single table with data"); List tableList = Lists.newArrayList(table1); - String backupId = fullTableBackup(tableList); + String backupId = fullTableBackup(tableList).getBackupId(); LOG.info("backup complete"); assertTrue(checkSucceeded(backupId)); @@ -70,7 +70,7 @@ public void testBackupStatusProgressCommand() throws Exception { LOG.info("test backup status/progress on a single table with data: command-line"); List tableList = Lists.newArrayList(table1); - String backupId = fullTableBackup(tableList); + String backupId = fullTableBackup(tableList).getBackupId(); LOG.info("backup complete"); assertTrue(checkSucceeded(backupId)); ByteArrayOutputStream baos = new ByteArrayOutputStream(); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java index d16d7af75014..12cd012460dd 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java @@ -56,7 +56,7 @@ public void testFullRestoreSingle() throws Exception { LOG.info("test full restore on a single table empty table"); List tables = Lists.newArrayList(table1); - String backupId = fullTableBackup(tables); + String backupId = fullTableBackup(tables).getBackupId(); assertTrue(checkSucceeded(backupId)); LOG.info("backup complete"); @@ -92,7 +92,7 @@ public void testFullRestoreSingleWithRegion() throws Exception { TEST_UTIL.compact(tableName, true); List tables = Lists.newArrayList(tableName); - String backupId = fullTableBackup(tables); + String backupId = fullTableBackup(tables).getBackupId(); assertTrue(checkSucceeded(backupId)); LOG.info("backup complete"); @@ -114,7 +114,7 @@ public void testFullRestoreSingleCommand() throws Exception { LOG.info("test full restore on a single table empty table: command-line"); List tables = Lists.newArrayList(table1); - String backupId = fullTableBackup(tables); + String backupId = fullTableBackup(tables).getBackupId(); LOG.info("backup complete"); assertTrue(checkSucceeded(backupId)); // restore [tableMapping] @@ -135,7 +135,7 @@ public void testFullRestoreCheckCommand() throws Exception { LOG.info("test full restore on a single table: command-line, check only"); List tables = Lists.newArrayList(table1); - String backupId = fullTableBackup(tables); + String backupId = fullTableBackup(tables).getBackupId(); LOG.info("backup complete"); assertTrue(checkSucceeded(backupId)); // restore [tableMapping] @@ -157,7 +157,7 @@ public void testFullRestoreCheckCommand() throws Exception { public void testFullRestoreMultiple() throws Exception { LOG.info("create full backup image on multiple tables"); List tables = Lists.newArrayList(table2, table3); - String backupId = fullTableBackup(tables); + String backupId = fullTableBackup(tables).getBackupId(); assertTrue(checkSucceeded(backupId)); TableName[] restore_tableset = new TableName[] { table2, table3 }; @@ -181,7 +181,7 @@ public void testFullRestoreMultiple() throws Exception { public void testFullRestoreMultipleCommand() throws Exception { LOG.info("create full backup image on multiple tables: command-line"); List tables = Lists.newArrayList(table2, table3); - String backupId = fullTableBackup(tables); + String backupId = fullTableBackup(tables).getBackupId(); assertTrue(checkSucceeded(backupId)); TableName[] restore_tableset = new TableName[] { table2, table3 }; @@ -210,7 +210,7 @@ public void testFullRestoreMultipleCommand() throws Exception { public void testFullRestoreSingleOverwrite() throws Exception { LOG.info("test full restore on a single table empty table"); List tables = Lists.newArrayList(table1); - String backupId = fullTableBackup(tables); + String backupId = fullTableBackup(tables).getBackupId(); assertTrue(checkSucceeded(backupId)); LOG.info("backup complete"); @@ -229,7 +229,7 @@ public void testFullRestoreSingleOverwrite() throws Exception { public void testFullRestoreSingleOverwriteCommand() throws Exception { LOG.info("test full restore on a single table empty table: command-line"); List tables = Lists.newArrayList(table1); - String backupId = fullTableBackup(tables); + String backupId = fullTableBackup(tables).getBackupId(); assertTrue(checkSucceeded(backupId)); LOG.info("backup complete"); TableName[] tableset = new TableName[] { table1 }; @@ -254,7 +254,7 @@ public void testFullRestoreMultipleOverwrite() throws Exception { LOG.info("create full backup image on multiple tables"); List tables = Lists.newArrayList(table2, table3); - String backupId = fullTableBackup(tables); + String backupId = fullTableBackup(tables).getBackupId(); assertTrue(checkSucceeded(backupId)); TableName[] restore_tableset = new TableName[] { table2, table3 }; @@ -272,7 +272,7 @@ public void testFullRestoreMultipleOverwriteCommand() throws Exception { LOG.info("create full backup image on multiple tables: command-line"); List tables = Lists.newArrayList(table2, table3); - String backupId = fullTableBackup(tables); + String backupId = fullTableBackup(tables).getBackupId(); assertTrue(checkSucceeded(backupId)); TableName[] restore_tableset = new TableName[] { table2, table3 }; @@ -297,7 +297,7 @@ public void testFullRestoreMultipleOverwriteCommand() throws Exception { public void testFullRestoreSingleDNE() throws Exception { LOG.info("test restore fails on a single table that does not exist"); List tables = Lists.newArrayList(table1); - String backupId = fullTableBackup(tables); + String backupId = fullTableBackup(tables).getBackupId(); assertTrue(checkSucceeded(backupId)); LOG.info("backup complete"); @@ -317,7 +317,7 @@ public void testFullRestoreSingleDNE() throws Exception { public void testFullRestoreSingleDNECommand() throws Exception { LOG.info("test restore fails on a single table that does not exist: command-line"); List tables = Lists.newArrayList(table1); - String backupId = fullTableBackup(tables); + String backupId = fullTableBackup(tables).getBackupId(); assertTrue(checkSucceeded(backupId)); LOG.info("backup complete"); @@ -340,7 +340,7 @@ public void testFullRestoreMultipleDNE() throws Exception { LOG.info("test restore fails on multiple tables that do not exist"); List tables = Lists.newArrayList(table2, table3); - String backupId = fullTableBackup(tables); + String backupId = fullTableBackup(tables).getBackupId(); assertTrue(checkSucceeded(backupId)); TableName[] restore_tableset = @@ -360,7 +360,7 @@ public void testFullRestoreMultipleDNECommand() throws Exception { LOG.info("test restore fails on multiple tables that do not exist: command-line"); List tables = Lists.newArrayList(table2, table3); - String backupId = fullTableBackup(tables); + String backupId = fullTableBackup(tables).getBackupId(); assertTrue(checkSucceeded(backupId)); TableName[] restore_tableset = diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java index 7f6182c6e543..a93538080f49 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java @@ -54,6 +54,7 @@ import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.LogRoller; +import org.apache.hadoop.hbase.rsgroup.RSGroupInfo; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.tool.BulkLoadHFiles; import org.apache.hadoop.hbase.util.Bytes; @@ -202,8 +203,12 @@ public void TestIncBackupRestore() throws Exception { // #3 - incremental backup for multiple tables tables = Lists.newArrayList(table1, table2); request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR); - String backupIdIncMultiple = client.backupTables(request); + BackupInfo backupInfoIncMultiple = client.backupTables(request); + String backupIdIncMultiple = backupInfoIncMultiple.getBackupId(); assertTrue(checkSucceeded(backupIdIncMultiple)); + checkIfWALFilesBelongToRsgroup(backupInfoIncMultiple.getIncrBackupFileList(), + RSGroupInfo.DEFAULT_GROUP); + BackupManifest manifest = HBackupFileSystem.getManifest(conf1, new Path(BACKUP_ROOT_DIR), backupIdIncMultiple); assertEquals(Sets.newHashSet(table1, table2), new HashSet<>(manifest.getTableList())); @@ -233,8 +238,11 @@ public void TestIncBackupRestore() throws Exception { // #4 - additional incremental backup for multiple tables request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR); - String backupIdIncMultiple2 = client.backupTables(request); + BackupInfo backupInfoIncMultiple2 = client.backupTables(request); + String backupIdIncMultiple2 = backupInfoIncMultiple2.getBackupId(); assertTrue(checkSucceeded(backupIdIncMultiple2)); + checkIfWALFilesBelongToRsgroup(backupInfoIncMultiple2.getIncrBackupFileList(), + RSGroupInfo.DEFAULT_GROUP); validateRootPathCanBeOverridden(BACKUP_ROOT_DIR, backupIdIncMultiple2); // #5 - restore full backup for all tables @@ -301,7 +309,7 @@ public void TestIncBackupRestoreWithOriginalSplits() throws Exception { Connection conn = TEST_UTIL.getConnection(); BackupAdminImpl backupAdmin = new BackupAdminImpl(conn); BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR); - String fullBackupId = backupAdmin.backupTables(request); + String fullBackupId = backupAdmin.backupTables(request).getBackupId(); assertTrue(checkSucceeded(fullBackupId)); TableName[] fromTables = new TableName[] { table1 }; @@ -334,7 +342,7 @@ public void TestIncBackupRestoreWithOriginalSplits() throws Exception { assertNotEquals(currentRegions, TEST_UTIL.getHBaseCluster().getRegions(table1)); request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR); - String incrementalBackupId = backupAdmin.backupTables(request); + String incrementalBackupId = backupAdmin.backupTables(request).getBackupId(); assertTrue(checkSucceeded(incrementalBackupId)); preRestoreBackupFiles = getBackupFiles(); backupAdmin.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, incrementalBackupId, @@ -366,7 +374,7 @@ public void TestIncBackupRestoreWithOriginalSplits() throws Exception { } request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR); - incrementalBackupId = backupAdmin.backupTables(request); + incrementalBackupId = backupAdmin.backupTables(request).getBackupId(); assertTrue(checkSucceeded(incrementalBackupId)); preRestoreBackupFiles = getBackupFiles(); @@ -406,7 +414,7 @@ public void TestIncBackupRestoreWithOriginalSplitsSeperateFs() throws Exception BackupRequest request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR, true); - String incrementalBackupId = admin.backupTables(request); + String incrementalBackupId = admin.backupTables(request).getBackupId(); assertTrue(checkSucceeded(incrementalBackupId)); TableName[] fromTable = new TableName[] { table1 }; @@ -485,7 +493,7 @@ public void TestIncBackupRestoreHandlesArchivedFiles() throws Exception { BackupRequest request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR, true); - String incrementalBackupId = admin.backupTables(request); + String incrementalBackupId = admin.backupTables(request).getBackupId(); assertTrue(checkSucceeded(incrementalBackupId)); TableName[] fromTable = new TableName[] { table1 }; @@ -516,7 +524,7 @@ private String takeFullBackup(List tables, BackupAdminImpl backupAdmi boolean noChecksumVerify) throws IOException { BackupRequest req = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR, noChecksumVerify); - String backupId = backupAdmin.backupTables(req); + String backupId = backupAdmin.backupTables(req).getBackupId(); checkSucceeded(backupId); return backupId; } diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupDeleteTable.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupDeleteTable.java index a5eec87fb06b..0d7d5528558d 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupDeleteTable.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupDeleteTable.java @@ -65,7 +65,7 @@ public void testIncBackupDeleteTable() throws Exception { BackupAdminImpl client = new BackupAdminImpl(conn); BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR); - String backupIdFull = client.backupTables(request); + String backupIdFull = client.backupTables(request).getBackupId(); assertTrue(checkSucceeded(backupIdFull)); @@ -88,7 +88,7 @@ public void testIncBackupDeleteTable() throws Exception { // #3 - incremental backup for table1 tables = Lists.newArrayList(table1); request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR); - String backupIdIncMultiple = client.backupTables(request); + String backupIdIncMultiple = client.backupTables(request).getBackupId(); assertTrue(checkSucceeded(backupIdIncMultiple)); // #4 - restore full backup for all tables, without overwrite diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupMergeWithBulkLoad.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupMergeWithBulkLoad.java index 058413fa1d15..897b67865ffc 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupMergeWithBulkLoad.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupMergeWithBulkLoad.java @@ -216,7 +216,7 @@ private String backup(BackupType backupType, List tables) throws IOEx BackupRequest backupRequest = new BackupRequest.Builder().withTargetRootDir(BACKUP_ROOT_DIR.toString()) .withTableList(new ArrayList<>(tables)).withBackupType(backupType).build(); - return backupAdmin.backupTables(backupRequest); + return backupAdmin.backupTables(backupRequest).getBackupId(); } } diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupMergeWithFailures.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupMergeWithFailures.java index 1ece1770489b..0e4b3f32cbf7 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupMergeWithFailures.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupMergeWithFailures.java @@ -240,7 +240,7 @@ public void TestIncBackupMergeRestore() throws Exception { BackupAdminImpl client = new BackupAdminImpl(conn); BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR); - String backupIdFull = client.backupTables(request); + String backupIdFull = client.backupTables(request).getBackupId(); assertTrue(checkSucceeded(backupIdFull)); @@ -261,7 +261,7 @@ public void TestIncBackupMergeRestore() throws Exception { // #3 - incremental backup for multiple tables tables = Lists.newArrayList(table1, table2); request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR); - String backupIdIncMultiple = client.backupTables(request); + String backupIdIncMultiple = client.backupTables(request).getBackupId(); assertTrue(checkSucceeded(backupIdIncMultiple)); @@ -273,7 +273,7 @@ public void TestIncBackupMergeRestore() throws Exception { // #3 - incremental backup for multiple tables request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR); - String backupIdIncMultiple2 = client.backupTables(request); + String backupIdIncMultiple2 = client.backupTables(request).getBackupId(); assertTrue(checkSucceeded(backupIdIncMultiple2)); // #4 Merge backup images with failures diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithBulkLoad.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithBulkLoad.java index d24ec160d0cb..8ab1e8b91ff6 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithBulkLoad.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithBulkLoad.java @@ -69,7 +69,7 @@ public void TestIncBackupDeleteTable() throws Exception { BackupAdminImpl client = new BackupAdminImpl(conn); BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR); - String backupIdFull = client.backupTables(request); + String backupIdFull = client.backupTables(request).getBackupId(); assertTrue(checkSucceeded(backupIdFull)); @@ -96,7 +96,7 @@ public void TestIncBackupDeleteTable() throws Exception { // #3 - incremental backup for table1 tables = Lists.newArrayList(table1); request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR); - String backupIdIncMultiple = client.backupTables(request); + String backupIdIncMultiple = client.backupTables(request).getBackupId(); assertTrue(checkSucceeded(backupIdIncMultiple)); // #4 bulk load again LOG.debug("bulk loading into " + testName); @@ -109,7 +109,7 @@ public void TestIncBackupDeleteTable() throws Exception { // #5 - incremental backup for table1 tables = Lists.newArrayList(table1); request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR); - String backupIdIncMultiple1 = client.backupTables(request); + String backupIdIncMultiple1 = client.backupTables(request).getBackupId(); assertTrue(checkSucceeded(backupIdIncMultiple1)); // Delete all data in table1 TEST_UTIL.deleteTableData(table1); @@ -124,7 +124,7 @@ public void TestIncBackupDeleteTable() throws Exception { Assert.assertEquals(TEST_UTIL.countRows(hTable), NB_ROWS_IN_BATCH * 2 + actual + actual1); request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR); - backupIdFull = client.backupTables(request); + backupIdFull = client.backupTables(request).getBackupId(); try (final BackupSystemTable table = new BackupSystemTable(conn)) { List bulkLoads = table.readBulkloadRows(tables); assertTrue("bulkloads still has " + bulkLoads.size() + " entries", bulkLoads.isEmpty()); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithDataLoss.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithDataLoss.java index cf442f5f0dd7..26a585fde779 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithDataLoss.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithDataLoss.java @@ -54,11 +54,12 @@ public void testFullBackupBreaksDependencyOnOlderBackups() throws Exception { List tables = Lists.newArrayList(table1); insertIntoTable(conn, table1, famName, 1, 1).close(); - String backup1 = - client.backupTables(createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR)); + String backup1 = client + .backupTables(createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR)).getBackupId(); insertIntoTable(conn, table1, famName, 2, 1).close(); String backup2 = - client.backupTables(createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR)); + client.backupTables(createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR)) + .getBackupId(); assertTrue(checkSucceeded(backup1)); assertTrue(checkSucceeded(backup2)); @@ -67,14 +68,16 @@ public void testFullBackupBreaksDependencyOnOlderBackups() throws Exception { TEST_UTIL.getTestFileSystem().delete(new Path(BACKUP_ROOT_DIR, backup2), true); insertIntoTable(conn, table1, famName, 4, 1).close(); - String backup4 = - client.backupTables(createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR)); + String backup4 = client + .backupTables(createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR)).getBackupId(); insertIntoTable(conn, table1, famName, 5, 1).close(); String backup5 = - client.backupTables(createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR)); + client.backupTables(createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR)) + .getBackupId(); insertIntoTable(conn, table1, famName, 6, 1).close(); String backup6 = - client.backupTables(createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR)); + client.backupTables(createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR)) + .getBackupId(); assertTrue(checkSucceeded(backup4)); assertTrue(checkSucceeded(backup5)); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithFailures.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithFailures.java index c8d536564188..f2c122632a64 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithFailures.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithFailures.java @@ -95,7 +95,7 @@ public void testIncBackupRestore() throws Exception { BackupAdminImpl client = new BackupAdminImpl(conn); BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR); - String backupIdFull = client.backupTables(request); + String backupIdFull = client.backupTables(request).getBackupId(); assertTrue(checkSucceeded(backupIdFull)); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithRsgroup.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithRsgroup.java new file mode 100644 index 000000000000..f59d8dff5f9b --- /dev/null +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithRsgroup.java @@ -0,0 +1,237 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.backup; + +import static org.junit.Assert.assertTrue; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtil; +import org.apache.hadoop.hbase.SingleProcessHBaseCluster; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl; +import org.apache.hadoop.hbase.backup.util.BackupUtils; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.client.TableDescriptorBuilder; +import org.apache.hadoop.hbase.regionserver.HRegion; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hbase.thirdparty.com.google.common.collect.Lists; + +@Category(LargeTests.class) +@RunWith(Parameterized.class) +public class TestIncrementalBackupWithRsgroup extends TestBackupBase { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestIncrementalBackupWithRsgroup.class); + + private static final Logger LOG = LoggerFactory.getLogger(TestIncrementalBackupWithRsgroup.class); + + public TestIncrementalBackupWithRsgroup(Boolean b) { + } + + @Parameterized.Parameters + public static Collection data() { + List params = new ArrayList<>(); + params.add(new Object[] { Boolean.TRUE }); + return params; + } + + @BeforeClass + public static void setUp() throws Exception { + TEST_UTIL = new HBaseTestingUtil(); + conf1 = TEST_UTIL.getConfiguration(); + enableRSgroup = true; + autoRestoreOnFailure = true; + useSecondCluster = false; + setUpHelper(); + } + + // implement all test cases in 1 test since incremental + // backup/restore has dependencies + @Test + public void TestIncBackupRestore() throws Exception { + int ADD_ROWS = 99; + + // #1 - create full backup for all tables + LOG.info("create full backup image for all tables"); + List tables = Lists.newArrayList(RSGROUP_TABLE_1, RSGROUP_TABLE_2); + final byte[] fam3Name = Bytes.toBytes("f3"); + final byte[] mobName = Bytes.toBytes("mob"); + + TableDescriptor newTable1Desc = TableDescriptorBuilder.newBuilder(RSGROUP_TABLE_1) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(famName)) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(fam3Name)) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(mobName).setMobEnabled(true) + .setMobThreshold(5L).build()) + .build(); + TEST_UTIL.getAdmin().modifyTable(newTable1Desc); + + try (Connection conn = ConnectionFactory.createConnection(conf1)) { + int NB_ROWS_FAM3 = 6; + insertIntoTable(conn, RSGROUP_TABLE_1, fam3Name, 3, NB_ROWS_FAM3).close(); + insertIntoTable(conn, RSGROUP_TABLE_1, mobName, 3, NB_ROWS_FAM3).close(); + Admin admin = conn.getAdmin(); + BackupAdminImpl client = new BackupAdminImpl(conn); + BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR); + String backupIdFull = client.backupTables(request).getBackupId(); + assertTrue(checkSucceeded(backupIdFull)); + + // #2 - insert some data to table + Table t1 = insertIntoTable(conn, RSGROUP_TABLE_1, famName, 1, ADD_ROWS); + LOG.debug("writing " + ADD_ROWS + " rows to " + RSGROUP_TABLE_1); + Assert.assertEquals(HBaseTestingUtil.countRows(t1), + NB_ROWS_IN_BATCH + ADD_ROWS + NB_ROWS_FAM3); + LOG.debug("written " + ADD_ROWS + " rows to " + RSGROUP_TABLE_1); + // additionally, insert rows to MOB cf + int NB_ROWS_MOB = 111; + insertIntoTable(conn, RSGROUP_TABLE_1, mobName, 3, NB_ROWS_MOB); + LOG.debug("written " + NB_ROWS_MOB + " rows to " + RSGROUP_TABLE_1 + " to Mob enabled CF"); + t1.close(); + Assert.assertEquals(HBaseTestingUtil.countRows(t1), + NB_ROWS_IN_BATCH + ADD_ROWS + NB_ROWS_MOB); + Table t2 = conn.getTable(RSGROUP_TABLE_2); + Put p2; + for (int i = 0; i < 5; i++) { + p2 = new Put(Bytes.toBytes("row-t2" + i)); + p2.addColumn(famName, qualName, Bytes.toBytes("val" + i)); + t2.put(p2); + } + Assert.assertEquals(NB_ROWS_IN_BATCH + 5, HBaseTestingUtil.countRows(t2)); + t2.close(); + LOG.debug("written " + 5 + " rows to " + RSGROUP_TABLE_2); + // split RSGROUP_TABLE_1 + SingleProcessHBaseCluster cluster = TEST_UTIL.getHBaseCluster(); + List regions = cluster.getRegions(RSGROUP_TABLE_1); + byte[] name = regions.get(0).getRegionInfo().getRegionName(); + long startSplitTime = EnvironmentEdgeManager.currentTime(); + try { + admin.splitRegionAsync(name).get(); + } catch (Exception e) { + // although split fail, this may not affect following check in current API, + // exception will be thrown. + LOG.debug("region is not splittable, because " + e); + } + while (!admin.isTableAvailable(RSGROUP_TABLE_1)) { + Thread.sleep(100); + } + long endSplitTime = EnvironmentEdgeManager.currentTime(); + // split finished + LOG.debug("split finished in =" + (endSplitTime - startSplitTime)); + + // #3 - incremental backup for multiple tables + tables = Lists.newArrayList(RSGROUP_TABLE_1, RSGROUP_TABLE_2); + request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR); + BackupInfo backupInfoIncMultiple = client.backupTables(request); + String backupIdIncMultiple = backupInfoIncMultiple.getBackupId(); + assertTrue(checkSucceeded(backupIdIncMultiple)); + checkIfWALFilesBelongToRsgroup(backupInfoIncMultiple.getIncrBackupFileList(), RSGROUP_NAME); + + // add column family f2 to RSGROUP_TABLE_1 + // drop column family f3 + final byte[] fam2Name = Bytes.toBytes("f2"); + newTable1Desc = TableDescriptorBuilder.newBuilder(newTable1Desc) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(fam2Name)).removeColumnFamily(fam3Name) + .build(); + TEST_UTIL.getAdmin().modifyTable(newTable1Desc); + + int NB_ROWS_FAM2 = 7; + Table t3 = insertIntoTable(conn, RSGROUP_TABLE_1, fam2Name, 2, NB_ROWS_FAM2); + t3.close(); + + // Wait for 5 sec to make sure that old WALs were deleted + Thread.sleep(5000); + + // #4 - additional incremental backup for multiple tables + request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR); + BackupInfo backupInfoIncMultiple2 = client.backupTables(request); + String backupIdIncMultiple2 = backupInfoIncMultiple2.getBackupId(); + assertTrue(checkSucceeded(backupIdIncMultiple2)); + checkIfWALFilesBelongToRsgroup(backupInfoIncMultiple2.getIncrBackupFileList(), RSGROUP_NAME); + + // #5 - restore full backup for all tables + TableName[] tablesRestoreFull = new TableName[] { RSGROUP_TABLE_1, RSGROUP_TABLE_2 }; + TableName[] tablesMapFull = new TableName[] { table1_restore, table2_restore }; + + LOG.debug("Restoring full " + backupIdFull); + client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupIdFull, false, + tablesRestoreFull, tablesMapFull, true)); + + // #6.1 - check tables for full restore + Admin hAdmin = TEST_UTIL.getAdmin(); + assertTrue(hAdmin.tableExists(table1_restore)); + assertTrue(hAdmin.tableExists(table2_restore)); + hAdmin.close(); + + // #6.2 - checking row count of tables for full restore + Table hTable = conn.getTable(table1_restore); + Assert.assertEquals(HBaseTestingUtil.countRows(hTable), NB_ROWS_IN_BATCH + NB_ROWS_FAM3); + hTable.close(); + + hTable = conn.getTable(table2_restore); + Assert.assertEquals(NB_ROWS_IN_BATCH, HBaseTestingUtil.countRows(hTable)); + hTable.close(); + + // #7 - restore incremental backup for multiple tables, with overwrite + TableName[] tablesRestoreIncMultiple = new TableName[] { RSGROUP_TABLE_1, RSGROUP_TABLE_2 }; + TableName[] tablesMapIncMultiple = new TableName[] { table1_restore, table2_restore }; + client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupIdIncMultiple2, false, + tablesRestoreIncMultiple, tablesMapIncMultiple, true)); + hTable = conn.getTable(table1_restore); + + LOG.debug("After incremental restore: " + hTable.getDescriptor()); + int countFamName = TEST_UTIL.countRows(hTable, famName); + LOG.debug("f1 has " + countFamName + " rows"); + Assert.assertEquals(countFamName, NB_ROWS_IN_BATCH + ADD_ROWS); + + int countFam2Name = TEST_UTIL.countRows(hTable, fam2Name); + LOG.debug("f2 has " + countFam2Name + " rows"); + Assert.assertEquals(countFam2Name, NB_ROWS_FAM2); + + int countMobName = TEST_UTIL.countRows(hTable, mobName); + LOG.debug("mob has " + countMobName + " rows"); + Assert.assertEquals(countMobName, NB_ROWS_MOB); + hTable.close(); + + hTable = conn.getTable(table2_restore); + Assert.assertEquals(NB_ROWS_IN_BATCH + 5, HBaseTestingUtil.countRows(hTable)); + hTable.close(); + admin.close(); + } + } +} diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteBackup.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteBackup.java index a148ab232dc2..3743c1916a43 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteBackup.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteBackup.java @@ -116,7 +116,8 @@ public void testFullBackupRemote() throws Exception { latch.countDown(); String backupId = - backupTables(BackupType.FULL, Lists.newArrayList(table1), BACKUP_REMOTE_ROOT_DIR); + backupTables(BackupType.FULL, Lists.newArrayList(table1), BACKUP_REMOTE_ROOT_DIR) + .getBackupId(); assertTrue(checkSucceeded(backupId)); LOG.info("backup complete " + backupId); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteRestore.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteRestore.java index b3a2872c7091..0041dc11da71 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteRestore.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteRestore.java @@ -69,7 +69,8 @@ public static void setUp() throws Exception { public void testFullRestoreRemote() throws Exception { LOG.info("test remote full backup on a single table"); String backupId = - backupTables(BackupType.FULL, toList(table1.getNameAsString()), BACKUP_REMOTE_ROOT_DIR); + backupTables(BackupType.FULL, toList(table1.getNameAsString()), BACKUP_REMOTE_ROOT_DIR) + .getBackupId(); LOG.info("backup complete"); TableName[] tableset = new TableName[] { table1 }; TableName[] tablemap = new TableName[] { table1_restore }; @@ -90,7 +91,8 @@ public void testFullRestoreRemote() throws Exception { public void testFullRestoreRemoteWithAlternateRestoreOutputDir() throws Exception { LOG.info("test remote full backup on a single table with alternate restore output dir"); String backupId = - backupTables(BackupType.FULL, toList(table1.getNameAsString()), BACKUP_REMOTE_ROOT_DIR); + backupTables(BackupType.FULL, toList(table1.getNameAsString()), BACKUP_REMOTE_ROOT_DIR) + .getBackupId(); LOG.info("backup complete"); TableName[] tableset = new TableName[] { table1 }; TableName[] tablemap = new TableName[] { table1_restore }; diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRepairAfterFailedDelete.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRepairAfterFailedDelete.java index 93345fd17059..d6110eefb845 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRepairAfterFailedDelete.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRepairAfterFailedDelete.java @@ -50,7 +50,7 @@ public class TestRepairAfterFailedDelete extends TestBackupBase { public void testRepairBackupDelete() throws Exception { LOG.info("test repair backup delete on a single table with data"); List tableList = Lists.newArrayList(table1); - String backupId = fullTableBackup(tableList); + String backupId = fullTableBackup(tableList).getBackupId(); assertTrue(checkSucceeded(backupId)); LOG.info("backup complete"); String[] backupIds = new String[] { backupId }; diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRestoreBoundaryTests.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRestoreBoundaryTests.java index 7b49558031e8..9411e433d182 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRestoreBoundaryTests.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRestoreBoundaryTests.java @@ -47,7 +47,7 @@ public class TestRestoreBoundaryTests extends TestBackupBase { @Test public void testFullRestoreSingleEmpty() throws Exception { LOG.info("test full restore on a single table empty table"); - String backupId = fullTableBackup(toList(table1.getNameAsString())); + String backupId = fullTableBackup(toList(table1.getNameAsString())).getBackupId(); LOG.info("backup complete"); TableName[] tableset = new TableName[] { table1 }; TableName[] tablemap = new TableName[] { table1_restore }; @@ -67,7 +67,7 @@ public void testFullRestoreMultipleEmpty() throws Exception { LOG.info("create full backup image on multiple tables"); List tables = toList(table2.getNameAsString(), table3.getNameAsString()); - String backupId = fullTableBackup(tables); + String backupId = fullTableBackup(tables).getBackupId(); TableName[] restore_tableset = new TableName[] { table2, table3 }; TableName[] tablemap = new TableName[] { table2_restore, table3_restore }; getBackupAdmin().restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/master/TestBackupLogCleaner.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/master/TestBackupLogCleaner.java index 56bb25837810..83eda7ae5d9b 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/master/TestBackupLogCleaner.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/master/TestBackupLogCleaner.java @@ -40,12 +40,15 @@ import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.util.Lists; import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.collect.Iterables; + @Category(LargeTests.class) public class TestBackupLogCleaner extends TestBackupBase { @@ -81,7 +84,8 @@ public void testBackupLogCleaner() throws Exception { assertEquals(walFilesBeforeBackup, deletable); // Create a FULL backup B1 in backupRoot R1, containing all tables - String backupIdB1 = backupTables(BackupType.FULL, tableSetFull, backupRoot1.toString()); + String backupIdB1 = + backupTables(BackupType.FULL, tableSetFull, backupRoot1.toString()).getBackupId(); assertTrue(checkSucceeded(backupIdB1)); // As part of a backup, WALs are rolled, so we expect a new WAL file @@ -89,6 +93,13 @@ public void testBackupLogCleaner() throws Exception { mergeAsSet(walFilesBeforeBackup, getListOfWALFiles(TEST_UTIL.getConfiguration())); assertTrue(walFilesBeforeBackup.size() < walFilesAfterB1.size()); + String backupIdFull = fullTableBackup(tableSetFull).getBackupId(); + assertTrue(checkSucceeded(backupIdFull)); + // Check one more time + deletable = cleaner.getDeletableFiles(walFilesAfterB1); + // We can delete wal files because they were saved into backup system table table + assertTrue(Iterables.size(deletable) == walFilesAfterB1.size()); + // Currently, we only have backup B1, so we can delete any WAL preceding B1 deletable = cleaner.getDeletableFiles(walFilesAfterB1); assertEquals(toSet(walFilesBeforeBackup), toSet(deletable)); @@ -117,7 +128,8 @@ public void testBackupLogCleaner() throws Exception { // Note that incremental tables always include all tables already included in the backup root, // i.e. the backup will contain all tables (1, 2, 3, 4), ignoring what we specify here. LOG.debug("Creating B2"); - String backupIdB2 = backupTables(BackupType.INCREMENTAL, tableSet14, backupRoot1.toString()); + String backupIdB2 = + backupTables(BackupType.INCREMENTAL, tableSet14, backupRoot1.toString()).getBackupId(); assertTrue(checkSucceeded(backupIdB2)); // As part of a backup, WALs are rolled, so we expect a new WAL file @@ -128,12 +140,17 @@ public void testBackupLogCleaner() throws Exception { // At this point, we have backups in root R1: B1 and B2. // We only consider the most recent backup (B2) to determine which WALs can be deleted: // all WALs preceding B2 + List tableSetIncList = Lists.newArrayList(table1, table2, table3); + String backupIdIncMultiple = + backupTables(BackupType.INCREMENTAL, tableSetIncList, BACKUP_ROOT_DIR).getBackupId(); + assertTrue(checkSucceeded(backupIdIncMultiple)); deletable = cleaner.getDeletableFiles(walFilesAfterB2); assertEquals(toSet(walFilesAfterB1), toSet(deletable)); // Create a FULL backup B3 in backupRoot R2, containing tables 1 & 4 LOG.debug("Creating B3"); - String backupIdB3 = backupTables(BackupType.FULL, tableSetFull, backupRoot2.toString()); + String backupIdB3 = + backupTables(BackupType.FULL, tableSetFull, backupRoot2.toString()).getBackupId(); assertTrue(checkSucceeded(backupIdB3)); // As part of a backup, WALs are rolled, so we expect a new WAL file @@ -153,7 +170,8 @@ public void testBackupLogCleaner() throws Exception { // Create a FULL backup B4 in backupRoot R1, with a subset of tables LOG.debug("Creating B4"); - String backupIdB4 = backupTables(BackupType.FULL, tableSet14, backupRoot1.toString()); + String backupIdB4 = + backupTables(BackupType.FULL, tableSet14, backupRoot1.toString()).getBackupId(); assertTrue(checkSucceeded(backupIdB4)); // As part of a backup, WALs are rolled, so we expect a new WAL file @@ -174,7 +192,8 @@ public void testBackupLogCleaner() throws Exception { assertEquals(toSet(walFilesAfterB1), toSet(deletable)); // Create a FULL backup B5 in backupRoot R1, for tables 2 & 3 - String backupIdB5 = backupTables(BackupType.FULL, tableSet23, backupRoot1.toString()); + String backupIdB5 = + backupTables(BackupType.FULL, tableSet23, backupRoot1.toString()).getBackupId(); assertTrue(checkSucceeded(backupIdB5)); // As part of a backup, WALs are rolled, so we expect a new WAL file diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfo.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfo.java index 94ea6f5845c8..4bac3f8fa9a5 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfo.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfo.java @@ -21,6 +21,7 @@ import java.util.Collections; import java.util.HashMap; import java.util.Map; +import java.util.NavigableSet; import java.util.Objects; import java.util.Set; import java.util.SortedSet; @@ -40,7 +41,7 @@ public class RSGroupInfo { private final String name; // Keep servers in a sorted set so has an expected ordering when displayed. - private final SortedSet
servers; + private final NavigableSet
servers; // Keep tables sorted too. /** @@ -100,8 +101,10 @@ public boolean containsServer(Address hostPort) { return servers.contains(hostPort); } - /** Get list of servers. */ - public Set
getServers() { + /** + * Get list of servers. + */ + public NavigableSet
getServers() { return servers; } diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestBackupRestore.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestBackupRestore.java index 85ad99bdcf8a..af62e1dded73 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestBackupRestore.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestBackupRestore.java @@ -229,7 +229,7 @@ private void loadData(TableName table, int numRows) throws IOException { } private String backup(BackupRequest request, BackupAdmin client) throws IOException { - String backupId = client.backupTables(request); + String backupId = client.backupTables(request).getBackupId(); return backupId; }