diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java index bdb96ef3c14a..94877005a60e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java @@ -291,7 +291,29 @@ default void createTable(TableDescriptor desc, byte[][] splitKeys) throws IOExce * @throws IOException if a remote or network exception occurs */ default void deleteTable(TableName tableName) throws IOException { - get(deleteTableAsync(tableName), getSyncWaitTimeout(), TimeUnit.MILLISECONDS); + get(deleteTableAsync(tableName, true), getSyncWaitTimeout(), TimeUnit.MILLISECONDS); + } + + /** + * Deletes a table. Synchronous operation. + * @param tableName name of table to delete + * @param archive if archive the table + * @throws IOException if a remote or network exception occurs + */ + default void deleteTable(TableName tableName, boolean archive) throws IOException { + get(deleteTableAsync(tableName, archive), getSyncWaitTimeout(), TimeUnit.MILLISECONDS); + } + + /** + * backward compatible + * + * @param tableName name of table to delete + * @throws IOException if a remote or network exception occurs + * @return the result of the async delete. You can use Future.get(long, TimeUnit) + * to wait on the operation to complete. + */ + default Future deleteTableAsync(TableName tableName) throws IOException{ + return deleteTableAsync(tableName, true); } /** @@ -300,11 +322,12 @@ default void deleteTable(TableName tableName) throws IOException { * ExecutionException if there was an error while executing the operation or TimeoutException in * case the wait timeout was not long enough to allow the operation to complete. * @param tableName name of table to delete + * @param archive if archive the table * @throws IOException if a remote or network exception occurs * @return the result of the async delete. You can use Future.get(long, TimeUnit) to wait on the * operation to complete. */ - Future deleteTableAsync(TableName tableName) throws IOException; + Future deleteTableAsync(TableName tableName, boolean archive) throws IOException; /** * Truncate a table. Synchronous operation. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java index 18a449e40083..00455e08030c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java @@ -195,7 +195,16 @@ CompletableFuture createTable(TableDescriptor desc, byte[] startKey, byte[ * Deletes a table. * @param tableName name of table to delete */ - CompletableFuture deleteTable(TableName tableName); + default CompletableFuture deleteTable(TableName tableName){ + return deleteTable(tableName, true); + } + + /** + * Deletes a table. + * @param tableName name of table to delete + * @param archive if need to archive the table + */ + CompletableFuture deleteTable(TableName tableName, boolean archive); /** * Truncate a table. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java index ba016111cd3d..4f4aac96d5a4 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java @@ -147,14 +147,13 @@ public CompletableFuture modifyTable(TableDescriptor desc) { } @Override + public CompletableFuture deleteTable(TableName tableName, boolean archive) { + return wrap(rawAdmin.deleteTable(tableName, archive)); + } public CompletableFuture modifyTableStoreFileTracker(TableName tableName, String dstSFT) { return wrap(rawAdmin.modifyTableStoreFileTracker(tableName, dstSFT)); } - @Override - public CompletableFuture deleteTable(TableName tableName) { - return wrap(rawAdmin.deleteTable(tableName)); - } @Override public CompletableFuture truncateTable(TableName tableName, boolean preserveSplits) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java index 4d614907326e..35b5f0ead5a8 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java @@ -672,9 +672,9 @@ ModifyTableStoreFileTrackerResponse> procedureCall(tableName, } @Override - public CompletableFuture deleteTable(TableName tableName) { + public CompletableFuture deleteTable(TableName tableName, boolean archive) { return this. procedureCall(tableName, - RequestConverter.buildDeleteTableRequest(tableName, ng.getNonceGroup(), ng.newNonce()), + RequestConverter.buildDeleteTableRequest(tableName, ng.getNonceGroup(), ng.newNonce(), archive), (s, c, req, done) -> s.deleteTable(c, req, done), (resp) -> resp.getProcId(), new DeleteTableProcedureBiConsumer(tableName)); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java index 13e85f91c958..1641e60d306d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java @@ -1017,12 +1017,16 @@ public static OfflineRegionRequest buildOfflineRegionRequest(final byte[] region /** * Creates a protocol buffer DeleteTableRequest n * @return a DeleteTableRequest */ - public static DeleteTableRequest buildDeleteTableRequest(final TableName tableName, - final long nonceGroup, final long nonce) { + public static DeleteTableRequest buildDeleteTableRequest( + final TableName tableName, + final long nonceGroup, + final long nonce, + final boolean archive) { DeleteTableRequest.Builder builder = DeleteTableRequest.newBuilder(); builder.setTableName(ProtobufUtil.toProtoTableName(tableName)); builder.setNonceGroup(nonceGroup); builder.setNonce(nonce); + builder.setArchive(archive); return builder.build(); } diff --git a/hbase-protocol-shaded/src/main/protobuf/server/master/Master.proto b/hbase-protocol-shaded/src/main/protobuf/server/master/Master.proto index 257abe8f11ca..c38b967516f4 100644 --- a/hbase-protocol-shaded/src/main/protobuf/server/master/Master.proto +++ b/hbase-protocol-shaded/src/main/protobuf/server/master/Master.proto @@ -152,6 +152,7 @@ message DeleteTableRequest { required TableName table_name = 1; optional uint64 nonce_group = 2 [default = 0]; optional uint64 nonce = 3 [default = 0]; + optional bool archive = 4 [default = true]; } message DeleteTableResponse { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index 816e4497f1f2..62d8f559a553 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -2431,27 +2431,57 @@ private static boolean isCatalogTable(final TableName tableName) { return tableName.equals(TableName.META_TABLE_NAME); } + private void checkSnapshot(TableName tableName, boolean archive) throws IOException{ + /* + * If decide to delete the table without archive, need to make sure the table has no snapshot + * Meanwhile, the check will scan the snapshots which will do list and open, if there is lots + * of snapshot, the performance may be impacted, should evaluate the performance between directly + * archive and snapshot scan + * TODO: find some any way to get if the table snapshotted or not + */ + if(!archive){ + LOG.debug("Scan the snapshot to check if there is snapshot for:" + + tableName.getNameAsString()); + + //List all the snapshots + List snapShotslist = snapshotManager.getCompletedSnapshots(); + List tableList = snapShotslist.stream().map(n -> n.getTable()). + filter(c -> c.equals(tableName.getNameAsString())). + collect(Collectors.toList()); + if(!tableList.isEmpty() || snapshotManager.isTakingSnapshot(tableName)){ + throw new DoNotRetryIOException("There is snapshot for the table and archive is needed"); + } + } + } + @Override - public long deleteTable(final TableName tableName, final long nonceGroup, final long nonce) - throws IOException { + public long deleteTable( + final TableName tableName, + final long nonceGroup, + final long nonce, + final boolean archive) throws IOException { checkInitialized(); - return MasterProcedureUtil - .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { - @Override - protected void run() throws IOException { - getMaster().getMasterCoprocessorHost().preDeleteTable(tableName); + //Check if there is snapshot for the table if without archive called for deleteTable + checkSnapshot(tableName,archive); + + + return MasterProcedureUtil.submitProcedure( + new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { + @Override + protected void run() throws IOException { + getMaster().getMasterCoprocessorHost().preDeleteTable(tableName); LOG.info(getClientIdAuditPrefix() + " delete " + tableName); - // TODO: We can handle/merge duplicate request - // - // We need to wait for the procedure to potentially fail due to "prepare" sanity - // checks. This will block only the beginning of the procedure. See HBASE-19953. - ProcedurePrepareLatch latch = ProcedurePrepareLatch.createBlockingLatch(); - submitProcedure( - new DeleteTableProcedure(procedureExecutor.getEnvironment(), tableName, latch)); - latch.await(); + // TODO: We can handle/merge duplicate request + // + // We need to wait for the procedure to potentially fail due to "prepare" sanity + // checks. This will block only the beginning of the procedure. See HBASE-19953. + ProcedurePrepareLatch latch = ProcedurePrepareLatch.createBlockingLatch(); + submitProcedure(new DeleteTableProcedure(procedureExecutor.getEnvironment(), + tableName, latch, archive)); + latch.await(); getMaster().getMasterCoprocessorHost().postDeleteTable(tableName); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java index e74505fa7fb9..f97ce0be6557 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java @@ -828,8 +828,8 @@ public DeleteSnapshotResponse deleteSnapshot(RpcController controller, public DeleteTableResponse deleteTable(RpcController controller, DeleteTableRequest request) throws ServiceException { try { - long procId = server.deleteTable(ProtobufUtil.toTableName(request.getTableName()), - request.getNonceGroup(), request.getNonce()); + long procId = master.deleteTable(ProtobufUtil.toTableName( + request.getTableName()), request.getNonceGroup(), request.getNonce(), request.getArchive()); return DeleteTableResponse.newBuilder().setProcId(procId).build(); } catch (IOException ioe) { throw new ServiceException(ioe); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java index fbeb155a88ba..4f43910bfcef 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java @@ -137,12 +137,33 @@ long createTable(final TableDescriptor desc, final byte[][] splitKeys, final lon */ long createSystemTable(final TableDescriptor tableDescriptor) throws IOException; + /** + * Delete a table, this is used for backcompitible Unit test. + * @param tableName The table name + * @param nonceGroup + * @param nonce + * @throws IOException + */ + default long deleteTable( + final TableName tableName, + final long nonceGroup, + final long nonce) throws IOException{ + return deleteTable(tableName, nonceGroup, nonce, true); + } + /** * Delete a table - * @param tableName The table name nnn - */ - long deleteTable(final TableName tableName, final long nonceGroup, final long nonce) - throws IOException; + * @param tableName The table name + * @param nonceGroup + * @param nonce + * @param archive + * @throws IOException + */ + long deleteTable( + final TableName tableName, + final long nonceGroup, + final long nonce, + final boolean archive) throws IOException; /** * Truncate a table diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java index 80fb5d0534d4..5eb664f5b4d2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java @@ -61,6 +61,7 @@ public class DeleteTableProcedure extends AbstractStateMachineTableProcedure regions; private TableName tableName; + private boolean archive; public DeleteTableProcedure() { // Required by the Procedure framework to create the procedure on replay @@ -72,9 +73,15 @@ public DeleteTableProcedure(final MasterProcedureEnv env, final TableName tableN } public DeleteTableProcedure(final MasterProcedureEnv env, final TableName tableName, - final ProcedurePrepareLatch syncLatch) { + final ProcedurePrepareLatch syncLatch) { + this(env, tableName, syncLatch, true); + } + + public DeleteTableProcedure(final MasterProcedureEnv env, final TableName tableName, + final ProcedurePrepareLatch syncLatch, final boolean archive) { super(env, syncLatch); this.tableName = tableName; + this.archive = archive; } @Override @@ -108,7 +115,7 @@ protected Flow executeFromState(final MasterProcedureEnv env, DeleteTableState s break; case DELETE_TABLE_CLEAR_FS_LAYOUT: LOG.debug("Deleting regions from filesystem for {}", this); - DeleteTableProcedure.deleteFromFs(env, getTableName(), regions, true); + DeleteTableProcedure.deleteFromFs(env, getTableName(), regions, archive); setNextState(DeleteTableState.DELETE_TABLE_REMOVE_FROM_META); break; case DELETE_TABLE_REMOVE_FROM_META: @@ -298,7 +305,7 @@ protected static void deleteFromFs(final MasterProcedureEnv env, final TableName Path mobTableDir = CommonFSUtils.getTableDir(new Path(mfs.getRootDir(), MobConstants.MOB_DIR_NAME), tableName); Path regionDir = new Path(mobTableDir, MobUtils.getMobRegionInfo(tableName).getEncodedName()); - if (fs.exists(regionDir)) { + if (fs.exists(regionDir) && archive) { HFileArchiver.archiveRegion(fs, mfs.getRootDir(), mobTableDir, regionDir); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java index bc3969ffd518..4f774e8ab5a9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java @@ -248,8 +248,20 @@ public long deleteTable(final TableName tableName, final long nonceGroup, final } @Override - public long truncateTable(final TableName tableName, final boolean preserveSplits, - final long nonceGroup, final long nonce) throws IOException { + public long deleteTable( + final TableName tableName, + final long nonceGroup, + final long nonce, + final boolean archive) throws IOException { + return -1; + } + + @Override + public long truncateTable( + final TableName tableName, + final boolean preserveSplits, + final long nonceGroup, + final long nonce) throws IOException { return -1; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDeleteTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDeleteTable.java new file mode 100644 index 000000000000..9bba9cfbf5ce --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDeleteTable.java @@ -0,0 +1,353 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import java.io.IOException; +import java.util.Random; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.DoNotRetryIOException; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.master.snapshot.DisabledTableSnapshotHandler; +import org.apache.hadoop.hbase.mob.MobUtils; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.CommonFSUtils; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.hadoop.hbase.util.HFileArchiveUtil; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.util.FSUtils; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.rules.TestName; +import org.mockito.Mockito; + +@Category(MediumTests.class) +public class TestDeleteTable{ + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestDeleteTable.class); + + private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + + private final static byte[] FAMILY1 = Bytes.toBytes("normal"); + private final static byte[] FAMILY2 = Bytes.toBytes("mob"); + + private final static byte[] QF = Bytes.toBytes("qualifier"); + private static Random random = new Random(); + + @Rule + public TestName name = new TestName(); + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + TEST_UTIL.startMiniCluster(1); + } + + @AfterClass + public static void tearDownAfterClass() throws Exception { + TEST_UTIL.shutdownMiniCluster(); + } + + + private static byte[] generateValue(int size) { + byte[] val = new byte[size]; + random.nextBytes(val); + return val; + } + + private HTableDescriptor createTableDescriptor(TableName tableName) { + HTableDescriptor htd = new HTableDescriptor(tableName); + HColumnDescriptor hcd1 = new HColumnDescriptor(FAMILY1); + HColumnDescriptor hcd2 = new HColumnDescriptor(FAMILY2); + + hcd1.setMobEnabled(true); + hcd1.setMobThreshold(0); + + + htd.addFamily(hcd1); + htd.addFamily(hcd2); + return htd; + } + + private Table createTableWithFiles(HTableDescriptor htd) throws IOException { + Table table = TEST_UTIL.createTable(htd, null); + try { + // insert data + byte[] value = generateValue(10); + byte[] row = Bytes.toBytes("row"); + Put put1 = new Put(row); + put1.addColumn(FAMILY1, QF, EnvironmentEdgeManager.currentTime(), value); + table.put(put1); + + Put put2 = new Put(row); + put2.addColumn(FAMILY2, QF, EnvironmentEdgeManager.currentTime(), value); + table.put(put2); + + // create an hfile + TEST_UTIL.getAdmin().flush(htd.getTableName()); + } catch (IOException e) { + table.close(); + throw e; + } + return table; + } + + + @Test + public void testDeleteTableWithoutArchive() throws Exception { + final TableName tableName = TableName.valueOf(name.getMethodName()); + HTableDescriptor htd = createTableDescriptor(tableName); + HColumnDescriptor hcd_mob = htd.getFamily(FAMILY1); + HColumnDescriptor hcd = htd.getFamily(FAMILY2); + + Table table = createTableWithFiles(htd); + + HRegionInfo regionInfo = TEST_UTIL.getHBaseAdmin().getTableRegions(tableName).get(0); + + // the mob file exists + Assert.assertEquals(1, countMobFiles(tableName, hcd_mob.getNameAsString())); + Assert.assertEquals(0, countArchiveMobFiles(tableName, hcd_mob.getNameAsString())); + Assert.assertEquals(1, countRegionFiles(tableName, hcd.getNameAsString())); + Assert.assertEquals(0, countArchiveRegionFiles(tableName, hcd.getNameAsString(), regionInfo)); + Assert.assertTrue(mobTableDirExist(tableName)); + Assert.assertTrue(tableDirExist(tableName)); + + table.close(); + TEST_UTIL.deleteTable(tableName, false); + + + Assert.assertFalse(TEST_UTIL.getAdmin().tableExists(tableName)); + Assert.assertEquals(0, countMobFiles(tableName, hcd_mob.getNameAsString())); + Assert.assertEquals(0, countArchiveMobFiles(tableName, hcd_mob.getNameAsString())); + Assert.assertEquals(0, countRegionFiles(tableName, hcd.getNameAsString())); + Assert.assertEquals(0, countArchiveRegionFiles(tableName, hcd.getNameAsString(), regionInfo)); + Assert.assertFalse(mobTableDirExist(tableName)); + Assert.assertFalse(tableDirExist(tableName)); + } + + @Test + public void testDeleteTableWithArchive() throws Exception { + final TableName tableName = TableName.valueOf(name.getMethodName()); + HTableDescriptor htd = createTableDescriptor(tableName); + HColumnDescriptor hcd_mob = htd.getFamily(FAMILY1); + HColumnDescriptor hcd = htd.getFamily(FAMILY2); + + Table table = createTableWithFiles(htd); + + HRegionInfo regionInfo = TEST_UTIL.getHBaseAdmin().getTableRegions(tableName).get(0); + + + // the mob file exists + Assert.assertEquals(1, countMobFiles(tableName, hcd_mob.getNameAsString())); + Assert.assertEquals(0, countArchiveMobFiles(tableName, hcd_mob.getNameAsString())); + Assert.assertEquals(1, countRegionFiles(tableName, hcd.getNameAsString())); + Assert.assertEquals(0, countArchiveRegionFiles(tableName, hcd.getNameAsString(), regionInfo)); + Assert.assertTrue(mobTableDirExist(tableName)); + Assert.assertTrue(tableDirExist(tableName)); + + table.close(); + TEST_UTIL.deleteTable(tableName, true); + + + Assert.assertFalse(TEST_UTIL.getAdmin().tableExists(tableName)); + Assert.assertEquals(0, countMobFiles(tableName, hcd_mob.getNameAsString())); + Assert.assertEquals(1, countArchiveMobFiles(tableName, hcd_mob.getNameAsString())); + Assert.assertEquals(0, countRegionFiles(tableName, hcd.getNameAsString())); + Assert.assertEquals(1, countArchiveRegionFiles(tableName, hcd.getNameAsString(), regionInfo)); + Assert.assertFalse(mobTableDirExist(tableName)); + Assert.assertFalse(tableDirExist(tableName)); + } + + @Test + public void testDeleteTableDefaultBehavior() throws Exception { + final TableName tableName = TableName.valueOf(name.getMethodName()); + HTableDescriptor htd = createTableDescriptor(tableName); + HColumnDescriptor hcd_mob = htd.getFamily(FAMILY1); + HColumnDescriptor hcd = htd.getFamily(FAMILY2); + + Table table = createTableWithFiles(htd); + + HRegionInfo regionInfo = TEST_UTIL.getHBaseAdmin().getTableRegions(tableName).get(0); + + + // the mob file exists + Assert.assertEquals(1, countMobFiles(tableName, hcd_mob.getNameAsString())); + Assert.assertEquals(0, countArchiveMobFiles(tableName, hcd_mob.getNameAsString())); + Assert.assertEquals(1, countRegionFiles(tableName, hcd.getNameAsString())); + Assert.assertEquals(0, countArchiveRegionFiles(tableName, hcd.getNameAsString(), regionInfo)); + Assert.assertTrue(mobTableDirExist(tableName)); + Assert.assertTrue(tableDirExist(tableName)); + + table.close(); + TEST_UTIL.deleteTable(tableName); + + + Assert.assertFalse(TEST_UTIL.getAdmin().tableExists(tableName)); + Assert.assertEquals(0, countMobFiles(tableName, hcd_mob.getNameAsString())); + Assert.assertEquals(1, countArchiveMobFiles(tableName, hcd_mob.getNameAsString())); + Assert.assertEquals(0, countRegionFiles(tableName, hcd.getNameAsString())); + Assert.assertEquals(1, countArchiveRegionFiles(tableName, hcd.getNameAsString(), regionInfo)); + Assert.assertFalse(mobTableDirExist(tableName)); + Assert.assertFalse(tableDirExist(tableName)); + } + + + @Test + public void testDeleteTableWithSnapshot() throws Exception{ + + //no snapshot + final TableName tn_no_snapshot = TableName.valueOf(name.getMethodName()); + //snapshot in progress + final TableName tn_snapshot_inprogress = TableName.valueOf(name.getMethodName() + +"_snapshot_inprogress"); + //complete snapshot + final TableName tn_snapshot = TableName.valueOf(name.getMethodName()+"_snapshot"); + + + + HTableDescriptor htd_no_snapshot = createTableDescriptor(tn_no_snapshot); + HTableDescriptor htd_snapshot_ingress = createTableDescriptor(tn_snapshot_inprogress); + HTableDescriptor htd_snapshot = createTableDescriptor(tn_snapshot); + + + Table tb_no_snapshot = createTableWithFiles(htd_no_snapshot); + Table tb_snapshot_ingress = createTableWithFiles(htd_snapshot_ingress); + Table tb_snapshot = createTableWithFiles(htd_snapshot); + + + String snapshotName = name.getMethodName()+"-snapshot"; + TEST_UTIL.getAdmin().snapshot(snapshotName, tn_snapshot); + + + + //if delete the table with snapshot in progress without archive, there should be exception + Exception exception_withsnap_inprogress = Assert.assertThrows(DoNotRetryIOException.class, + ()-> { + TEST_UTIL.deleteTable(tn_snapshot, false); + }); + Assert.assertTrue(exception_withsnap_inprogress.getMessage(). + contains("There is snapshot for the table and archive is needed")); + + + // set a mock handler and make the table in the handler to mock the in process + DisabledTableSnapshotHandler mockHandler = Mockito.mock(DisabledTableSnapshotHandler.class); + TEST_UTIL.getMiniHBaseCluster().getMaster().getSnapshotManager() + .setSnapshotHandlerForTesting(tn_snapshot_inprogress, mockHandler); + + //if delete the table with snapshot without archive, there should be exception + Exception exception_withsnap = Assert.assertThrows(DoNotRetryIOException.class, ()->{ + TEST_UTIL.deleteTable(tn_snapshot_inprogress, false); + }); + Assert.assertTrue(exception_withsnap.getMessage(). + contains("There is snapshot for the table and archive is needed")); + + + //test with correct step + TEST_UTIL.deleteTable(tn_no_snapshot, false); + TEST_UTIL.deleteTable(tn_snapshot_inprogress, true); + TEST_UTIL.deleteTable(tn_snapshot, true); + + tb_no_snapshot.close(); + tb_snapshot_ingress.close(); + tb_snapshot.close(); + + + } + + private int countMobFiles(TableName tn, String familyName) throws IOException { + FileSystem fs = TEST_UTIL.getTestFileSystem(); + Path mobFileDir = MobUtils.getMobFamilyPath(TEST_UTIL.getConfiguration(), tn, familyName); + if (fs.exists(mobFileDir)) { + return fs.listStatus(mobFileDir).length; + } + return 0; + } + + private int countArchiveMobFiles(TableName tn, String familyName) + throws IOException { + FileSystem fs = TEST_UTIL.getTestFileSystem(); + Path storePath = HFileArchiveUtil.getStoreArchivePath(TEST_UTIL.getConfiguration(), tn, + MobUtils.getMobRegionInfo(tn).getEncodedName(), familyName); + if (fs.exists(storePath)) { + return fs.listStatus(storePath).length; + } + return 0; + } + + private boolean mobTableDirExist(TableName tn) throws IOException { + FileSystem fs = TEST_UTIL.getTestFileSystem(); + Path tableDir = + CommonFSUtils.getTableDir(MobUtils.getMobHome(TEST_UTIL.getConfiguration()), tn); + return fs.exists(tableDir); + } + + + private int countRegionFiles(TableName tn, String familyName) throws IOException { + FileSystem fs = TEST_UTIL.getTestFileSystem(); + + Path rootDir = CommonFSUtils.getRootDir(TEST_UTIL.getConfiguration()); + + if(TEST_UTIL.getHBaseAdmin().getTableRegions(tn).isEmpty()) { + return 0; + } + + HRegionInfo regionInfo = TEST_UTIL.getHBaseAdmin().getTableRegions(tn).get(0); + + Path regionDir = FSUtils.getRegionDirFromRootDir(rootDir, regionInfo); + Path nfDir = new Path(regionDir,familyName); + + if (fs.exists(nfDir)) { + return fs.listStatus(nfDir).length; + } + return 0; + } + + private int countArchiveRegionFiles(TableName tn, String familyName, HRegionInfo regionInfo) + throws IOException { + + FileSystem fs = TEST_UTIL.getTestFileSystem(); + Path storePath = HFileArchiveUtil.getStoreArchivePath(TEST_UTIL.getConfiguration(), tn, + regionInfo.getEncodedName(), familyName); + + if (fs.exists(storePath)) { + return fs.listStatus(storePath).length; + } + return 0; + } + + private boolean tableDirExist(TableName tn) throws IOException { + FileSystem fs = TEST_UTIL.getTestFileSystem(); + + Path rootDir = CommonFSUtils.getRootDir(TEST_UTIL.getConfiguration()); + Path tableDir = CommonFSUtils.getTableDir(rootDir, tn); + return fs.exists(tableDir); + } +} diff --git a/hbase-shell/src/main/ruby/hbase/admin.rb b/hbase-shell/src/main/ruby/hbase/admin.rb index e90e37c4bc63..d0d95f0fa327 100644 --- a/hbase-shell/src/main/ruby/hbase/admin.rb +++ b/hbase-shell/src/main/ruby/hbase/admin.rb @@ -443,13 +443,13 @@ def disabled?(table_name) #---------------------------------------------------------------------------------------------- # Drops a table - def drop(table_name) + def drop(table_name, archive=true) tableExists(table_name) raise ArgumentError, "Table #{table_name} is enabled. Disable it first." if enabled?( table_name ) - @admin.deleteTable(org.apache.hadoop.hbase.TableName.valueOf(table_name)) + @admin.deleteTable(org.apache.hadoop.hbase.TableName.valueOf(table_name), archive) end #---------------------------------------------------------------------------------------------- diff --git a/hbase-shell/src/main/ruby/shell/commands/drop.rb b/hbase-shell/src/main/ruby/shell/commands/drop.rb index 063eb32773e8..9fed0ef2556a 100644 --- a/hbase-shell/src/main/ruby/shell/commands/drop.rb +++ b/hbase-shell/src/main/ruby/shell/commands/drop.rb @@ -22,14 +22,17 @@ module Commands class Drop < Command def help <<-EOF -Drop the named table. Table must first be disabled: +Drop the named table. Table must first be disabled: hbase> drop 't1' hbase> drop 'ns1:t1' +By default archive is enabled, you can swith off it by set it to false: + hbase> drop 't1',false + hbase> drop 'ns1:t1',false EOF end - def command(table) - admin.drop(table) + def command(table,archive=true) + admin.drop(table,archive) end end end diff --git a/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java index d3f5a51dc4ae..be0e435b7a72 100644 --- a/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java +++ b/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java @@ -1810,6 +1810,20 @@ public void deleteTable(TableName tableName) throws IOException { getAdmin().deleteTable(tableName); } + /** + * Drop an existing table + * @param tableName existing table + * @param archive if archive the table + */ + public void deleteTable(TableName tableName, boolean archive) throws IOException { + try { + getAdmin().disableTable(tableName); + } catch (TableNotEnabledException e) { + LOG.debug("Table: " + tableName + " already disabled, so just deleting it."); + } + getAdmin().deleteTable(tableName, archive); + } + /** * Drop an existing table * @param tableName existing table diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java index 13a1b9920ecf..0a382d128e24 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java @@ -1099,6 +1099,11 @@ public Future deleteTableAsync(TableName tableName) { throw new NotImplementedException("deleteTableAsync not supported in ThriftAdmin"); } + @Override + public Future deleteTableAsync(TableName tableName, boolean archive) { + throw new NotImplementedException("deleteTableAsync not supported in ThriftAdmin"); + } + @Override public Future truncateTableAsync(TableName tableName, boolean preserveSplits) { throw new NotImplementedException("truncateTableAsync not supported in ThriftAdmin");