diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java index 0120ca892fa2..5cdccae48e6d 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.om; +import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import com.google.common.cache.CacheBuilder; import com.google.common.cache.CacheLoader; @@ -53,6 +54,7 @@ import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB; import org.apache.hadoop.ozone.om.codec.OmDBDiffReportEntryCodec; import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.service.SnapshotDiffCleanupService; @@ -372,15 +374,19 @@ public static DBCheckpoint createOmSnapshotCheckpoint( // Acquire deletedTable write lock omMetadataManager.getTableLock(OmMetadataManagerImpl.DELETED_TABLE) .writeLock().lock(); + // TODO: [SNAPSHOT] HDDS-8067. Acquire deletedDirectoryTable write lock try { // Create DB checkpoint for snapshot dbCheckpoint = store.getSnapshot(snapshotInfo.getCheckpointDirName()); // Clean up active DB's deletedTable right after checkpoint is taken, // with table write lock held - deleteKeysInSnapshotScopeFromDTableInternal(omMetadataManager, + deleteKeysFromDelKeyTableInSnapshotScope(omMetadataManager, + snapshotInfo.getVolumeName(), snapshotInfo.getBucketName()); + // Clean up deletedDirectoryTable as well + deleteKeysFromDelDirTableInSnapshotScope(omMetadataManager, snapshotInfo.getVolumeName(), snapshotInfo.getBucketName()); - // TODO: [SNAPSHOT] HDDS-8064. Clean up deletedDirTable as well } finally { + // TODO: [SNAPSHOT] HDDS-8067. Release deletedDirectoryTable write lock // Release deletedTable write lock omMetadataManager.getTableLock(OmMetadataManagerImpl.DELETED_TABLE) .writeLock().unlock(); @@ -413,73 +419,153 @@ public static DBCheckpoint createOmSnapshotCheckpoint( } /** - * Helper method to delete keys in the snapshot scope from active DB's - * deletedTable. - * + * Helper method to delete DB keys in the snapshot scope (bucket) + * from active DB's deletedDirectoryTable. * @param omMetadataManager OMMetadataManager instance * @param volumeName volume name * @param bucketName bucket name */ - private static void deleteKeysInSnapshotScopeFromDTableInternal( + private static void deleteKeysFromDelDirTableInSnapshotScope( OMMetadataManager omMetadataManager, String volumeName, String bucketName) throws IOException { // Range delete start key (inclusive) - String beginKey = - omMetadataManager.getOzoneKey(volumeName, bucketName, ""); - - // Range delete end key (exclusive) to be found + final String beginKey = getOzonePathKeyWithVolumeBucketNames( + omMetadataManager, volumeName, bucketName); + // Range delete end key (exclusive). To be calculated String endKey; - // Start performance tracking timer - long startTime = System.nanoTime(); + try (TableIterator> + iter = omMetadataManager.getDeletedDirTable().iterator()) { + endKey = findEndKeyGivenPrefix(iter, beginKey); + } - try (TableIterator> - keyIter = omMetadataManager.getDeletedTable().iterator()) { - - keyIter.seek(beginKey); - // Continue only when there are entries of snapshot (bucket) scope - // in deletedTable in the first place - if (!keyIter.hasNext()) { - // Use null as a marker. No need to do deleteRange() at all. - endKey = null; - } else { - // Remember the last key with a matching prefix - endKey = keyIter.next().getKey(); - - // Loop until prefix mismatches. - // TODO: [SNAPSHOT] Try to seek next predicted bucket name (speed up?) - while (keyIter.hasNext()) { - Table.KeyValue entry = keyIter.next(); - String dbKey = entry.getKey(); - if (dbKey.startsWith(beginKey)) { - endKey = dbKey; - } + // Clean up deletedDirectoryTable + deleteRangeInclusive(omMetadataManager.getDeletedDirTable(), + beginKey, endKey); + } + + /** + * Helper method to generate /volumeId/bucketId/ DB key prefix from given + * volume name and bucket name as a prefix in FSO deletedDirectoryTable. + * Follows: + * {@link OmMetadataManagerImpl#getOzonePathKey(long, long, long, String)}. + *

+ * Note: Currently, this is only intended to be a special use case in + * {@link OmSnapshotManager}. If this is used elsewhere, consider moving this + * to {@link OMMetadataManager}. + * + * @param volumeName volume name + * @param bucketName bucket name + * @return /volumeId/bucketId/ + * e.g. /-9223372036854772480/-9223372036854771968/ + */ + @VisibleForTesting + public static String getOzonePathKeyWithVolumeBucketNames( + OMMetadataManager omMetadataManager, + String volumeName, + String bucketName) throws IOException { + + final long volumeId = omMetadataManager.getVolumeId(volumeName); + final long bucketId = omMetadataManager.getBucketId(volumeName, bucketName); + return OM_KEY_PREFIX + volumeId + OM_KEY_PREFIX + bucketId + OM_KEY_PREFIX; + } + + /** + * Helper method to locate the end key with the given prefix and iterator. + * @param keyIter TableIterator + * @param keyPrefix DB key prefix String + * @return endKey String, or null if no keys with such prefix is found + */ + private static String findEndKeyGivenPrefix( + TableIterator> keyIter, + String keyPrefix) throws IOException { + + String endKey; + keyIter.seek(keyPrefix); + // Continue only when there are entries of snapshot (bucket) scope + // in deletedTable in the first place + if (!keyIter.hasNext()) { + // No key matching keyPrefix. No need to do delete or deleteRange at all. + endKey = null; + } else { + // Remember the last key with a matching prefix + endKey = keyIter.next().getKey(); + + // Loop until prefix mismatches. + // TODO: [SNAPSHOT] Try to seek to next predicted bucket name instead of + // the while-loop for a potential speed up? + // Start performance tracking timer + long startTime = System.nanoTime(); + while (keyIter.hasNext()) { + Table.KeyValue entry = keyIter.next(); + String dbKey = entry.getKey(); + if (dbKey.startsWith(keyPrefix)) { + endKey = dbKey; } } + // Time took for the iterator to finish (in ns) + long timeElapsed = System.nanoTime() - startTime; + if (timeElapsed >= DB_TABLE_ITER_LOOP_THRESHOLD_NS) { + // Print time elapsed + LOG.warn("Took {} ns to find endKey. Caller is {}", timeElapsed, + new Throwable().fillInStackTrace().getStackTrace()[1] + .getMethodName()); + } } + return endKey; + } - // Time took for the iterator to finish (in ns) - long timeElapsed = System.nanoTime() - startTime; - if (timeElapsed >= DB_TABLE_ITER_LOOP_THRESHOLD_NS) { - // Print time elapsed - LOG.warn("Took {} ns to clean up deletedTable", timeElapsed); - } + /** + * Helper method to do deleteRange on a table, including endKey. + * TODO: Move this into {@link Table} ? + * @param table Table + * @param beginKey begin key + * @param endKey end key + */ + private static void deleteRangeInclusive( + Table table, String beginKey, String endKey) + throws IOException { if (endKey != null) { - // Clean up deletedTable - omMetadataManager.getDeletedTable().deleteRange(beginKey, endKey); - + table.deleteRange(beginKey, endKey); // Remove range end key itself - omMetadataManager.getDeletedTable().delete(endKey); + table.delete(endKey); } + } - // Note: We do not need to invalidate deletedTable cache since entries - // are not added to its table cache in the first place. - // See OMKeyDeleteRequest and OMKeyPurgeRequest#validateAndUpdateCache. + /** + * Helper method to delete DB keys in the snapshot scope (bucket) + * from active DB's deletedTable. + * @param omMetadataManager OMMetadataManager instance + * @param volumeName volume name + * @param bucketName bucket name + */ + private static void deleteKeysFromDelKeyTableInSnapshotScope( + OMMetadataManager omMetadataManager, + String volumeName, + String bucketName) throws IOException { + + // Range delete start key (inclusive) + final String beginKey = + omMetadataManager.getOzoneKey(volumeName, bucketName, ""); + // Range delete end key (exclusive). To be found + String endKey; + try (TableIterator> + iter = omMetadataManager.getDeletedTable().iterator()) { + endKey = findEndKeyGivenPrefix(iter, beginKey); + } + + // Clean up deletedTable + deleteRangeInclusive(omMetadataManager.getDeletedTable(), beginKey, endKey); + + // No need to invalidate deletedTable (or deletedDirectoryTable) table + // cache since entries are not added to its table cache in the first place. + // See OMKeyDeleteRequest and OMKeyPurgeRequest#validateAndUpdateCache. + // // This makes the table clean up efficient as we only need one // deleteRange() operation. No need to invalidate cache entries // one by one. diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/DirectoryDeletingService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/DirectoryDeletingService.java index ceed9ebe9aa7..a59b6f9041e9 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/DirectoryDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/DirectoryDeletingService.java @@ -108,7 +108,7 @@ public int getPriority() { } @Override - public BackgroundTaskResult call() throws Exception { + public BackgroundTaskResult call() { if (shouldRun()) { if (LOG.isDebugEnabled()) { LOG.debug("Running DirectoryDeletingService"); @@ -122,6 +122,8 @@ public BackgroundTaskResult call() throws Exception { List> allSubDirList = new ArrayList<>((int) remainNum); + // TODO: [SNAPSHOT] HDDS-8067. Acquire deletedDirectoryTable write lock + Table.KeyValue pendingDeletedDirInfo; try (TableIterator> deleteTableIterator = getOzoneManager().getMetadataManager(). @@ -159,6 +161,8 @@ public BackgroundTaskResult call() throws Exception { LOG.error("Error while running delete directories and files " + "background task. Will retry at next run.", e); } + // TODO: [SNAPSHOT] HDDS-8067. Release deletedDirectoryTable write lock + // in finally block } // place holder by returning empty results of this call back. diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/KeyDeletingService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/KeyDeletingService.java index 284eb9aa1162..5cf186428e41 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/KeyDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/KeyDeletingService.java @@ -113,7 +113,7 @@ public int getPriority() { } @Override - public BackgroundTaskResult call() throws Exception { + public BackgroundTaskResult call() { // Check if this is the Leader OM. If not leader, no need to execute this // task. if (shouldRun()) { @@ -125,6 +125,8 @@ public BackgroundTaskResult call() throws Exception { // OM would have to keep track of which snapshot the key is coming // from. And PurgeKeysRequest would have to be adjusted to be able // to operate on snapshot checkpoints. + + // TODO: [SNAPSHOT] HDDS-8067. Acquire deletedTable write lock List keyBlocksList = manager .getPendingDeletionKeys(keyLimitPerTask); if (keyBlocksList != null && !keyBlocksList.isEmpty()) { @@ -136,6 +138,8 @@ public BackgroundTaskResult call() throws Exception { LOG.error("Error while running delete keys background task. Will " + "retry at next run.", e); } + // TODO: [SNAPSHOT] HDDS-8067. Release deletedTable write lock + // in finally block } // By design, no one cares about the results of this call back. return EmptyTaskResult.newResult(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotUtils.java index 5491c5461110..096218585d86 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotUtils.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotUtils.java @@ -63,7 +63,7 @@ public static SnapshotInfo getSnapshotInfo(final OzoneManager ozoneManager, .getSnapshotInfoTable() .get(snapshotKey); } catch (IOException e) { - LOG.error("Snapshot {}: not found.", snapshotKey, e); + LOG.error("Snapshot '{}' is not found.", snapshotKey, e); throw e; } if (snapshotInfo == null) { diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java index 6287cc00cbdb..cd1589ee63ff 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java @@ -26,6 +26,8 @@ import org.apache.hadoop.hdds.scm.HddsWhiteboxTestUtils; import org.apache.hadoop.hdds.utils.db.DBStore; import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.snapshot.OmSnapshotUtils; import org.apache.ozone.test.GenericTestUtils; @@ -47,6 +49,9 @@ import static org.apache.hadoop.ozone.OzoneConsts.OM_DB_NAME; import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; import static org.apache.hadoop.ozone.OzoneConsts.OM_SNAPSHOT_CHECKPOINT_DIR; +import static org.apache.hadoop.ozone.OzoneConsts.SNAPSHOT_INFO_TABLE; +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.BUCKET_TABLE; +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.VOLUME_TABLE; import static org.apache.hadoop.ozone.om.OmSnapshotManager.OM_HARDLINK_FILE; import static org.apache.hadoop.ozone.om.snapshot.OmSnapshotUtils.getINode; import static org.apache.hadoop.ozone.om.OmSnapshotManager.getSnapshotPrefix; @@ -91,14 +96,39 @@ public void cleanup() throws Exception { @Test public void testCloseOnEviction() throws IOException { - // set up db table - SnapshotInfo first = createSnapshotInfo(); - SnapshotInfo second = createSnapshotInfo(); + // set up db tables + Table volumeTable = mock(Table.class); + Table bucketTable = mock(Table.class); Table snapshotInfoTable = mock(Table.class); + HddsWhiteboxTestUtils.setInternalState( + om.getMetadataManager(), VOLUME_TABLE, volumeTable); + HddsWhiteboxTestUtils.setInternalState( + om.getMetadataManager(), BUCKET_TABLE, bucketTable); + HddsWhiteboxTestUtils.setInternalState( + om.getMetadataManager(), SNAPSHOT_INFO_TABLE, snapshotInfoTable); + + final String volumeName = UUID.randomUUID().toString(); + final String dbVolumeKey = om.getMetadataManager().getVolumeKey(volumeName); + final OmVolumeArgs omVolumeArgs = OmVolumeArgs.newBuilder() + .setVolume(volumeName) + .setAdminName("bilbo") + .setOwnerName("bilbo") + .build(); + when(volumeTable.get(dbVolumeKey)).thenReturn(omVolumeArgs); + + String bucketName = UUID.randomUUID().toString(); + final String dbBucketKey = om.getMetadataManager().getBucketKey( + volumeName, bucketName); + final OmBucketInfo omBucketInfo = OmBucketInfo.newBuilder() + .setVolumeName(volumeName) + .setBucketName(bucketName) + .build(); + when(bucketTable.get(dbBucketKey)).thenReturn(omBucketInfo); + + SnapshotInfo first = createSnapshotInfo(volumeName, bucketName); + SnapshotInfo second = createSnapshotInfo(volumeName, bucketName); when(snapshotInfoTable.get(first.getTableKey())).thenReturn(first); when(snapshotInfoTable.get(second.getTableKey())).thenReturn(second); - HddsWhiteboxTestUtils.setInternalState( - om.getMetadataManager(), "snapshotInfoTable", snapshotInfoTable); // create the first snapshot checkpoint OmSnapshotManager.createOmSnapshotCheckpoint(om.getMetadataManager(), @@ -188,15 +218,12 @@ public void testHardLinkCreation() throws IOException { } } - private SnapshotInfo createSnapshotInfo() { + private SnapshotInfo createSnapshotInfo( + String volumeName, String bucketName) { String snapshotName = UUID.randomUUID().toString(); - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); String snapshotId = UUID.randomUUID().toString(); - return SnapshotInfo.newInstance(volumeName, - bucketName, - snapshotName, - snapshotId); + return SnapshotInfo.newInstance( + volumeName, bucketName, snapshotName, snapshotId); } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotResponseTestUtil.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotResponseTestUtil.java new file mode 100644 index 000000000000..577d6ac9ddfb --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotResponseTestUtil.java @@ -0,0 +1,69 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +package org.apache.hadoop.ozone.om.response.snapshot; + +import org.apache.hadoop.hdds.utils.db.cache.CacheKey; +import org.apache.hadoop.hdds.utils.db.cache.CacheValue; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; + +import java.io.IOException; + +/** + * Common test utility method(s) shared between + * {@link TestOMSnapshotCreateResponse} and + * {@link TestOMSnapshotDeleteResponse}. + */ +public final class OMSnapshotResponseTestUtil { + + private OMSnapshotResponseTestUtil() { + throw new IllegalStateException("Util class should not be initialized."); + } + + static void addVolumeBucketInfoToTable(OMMetadataManager omMetadataManager, + String volumeName, + String bucketName) + throws IOException { + + // Add volume entry to volumeTable for volumeId lookup + final OmVolumeArgs omVolumeArgs = OmVolumeArgs.newBuilder() + .setVolume(volumeName) + .setAdminName("bilbo") + .setOwnerName("bilbo") + .build(); + final String dbVolumeKey = omMetadataManager.getVolumeKey(volumeName); + omMetadataManager.getVolumeTable().addCacheEntry( + new CacheKey<>(dbVolumeKey), + CacheValue.get(1L, omVolumeArgs)); + omMetadataManager.getVolumeTable().put(dbVolumeKey, omVolumeArgs); + + // Add bucket entry to bucketTable for bucketId lookup + final OmBucketInfo omBucketInfo = OmBucketInfo.newBuilder() + .setVolumeName(volumeName) + .setBucketName(bucketName) + .build(); + final String dbBucketKey = omMetadataManager.getBucketKey( + volumeName, bucketName); + omMetadataManager.getBucketTable().addCacheEntry( + new CacheKey<>(dbBucketKey), + CacheValue.get(2L, omBucketInfo)); + omMetadataManager.getBucketTable().put(dbBucketKey, omBucketInfo); + } +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotCreateResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotCreateResponse.java index f8a67fbe96f1..8ec9e2713c4e 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotCreateResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotCreateResponse.java @@ -26,8 +26,11 @@ import java.util.Set; import java.util.UUID; +import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.TableIterator; +import org.apache.hadoop.ozone.om.OmSnapshotManager; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.junit.After; @@ -47,6 +50,8 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos .OMResponse; import org.apache.hadoop.hdds.utils.db.BatchOperation; + +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.apache.hadoop.ozone.om.OmSnapshotManager.getSnapshotPath; /** @@ -60,6 +65,7 @@ public class TestOMSnapshotCreateResponse { private OMMetadataManager omMetadataManager; private BatchOperation batchOperation; private OzoneConfiguration ozoneConfiguration; + @Before public void setup() throws Exception { ozoneConfiguration = new OzoneConfiguration(); @@ -93,9 +99,11 @@ public void testAddToDBBatch() throws Exception { omMetadataManager .countRowsInTable(omMetadataManager.getSnapshotInfoTable())); - // Prepare for deletedTable clean up logic verification - Set dtSentinelKeys = addTestKeysToDeletedTable( - volumeName, bucketName); + // Populate deletedTable and deletedDirectoryTable + Set dtSentinelKeys = + addTestKeysToDeletedTable(volumeName, bucketName); + Set ddtSentinelKeys = + addTestKeysToDeletedDirTable(volumeName, bucketName); // commit to table OMSnapshotCreateResponse omSnapshotCreateResponse = @@ -124,8 +132,9 @@ public void testAddToDBBatch() throws Exception { Assert.assertEquals(snapshotInfo.getTableKey(), keyValue.getKey()); Assert.assertEquals(snapshotInfo, storedInfo); - // Check deletedTable clean up works as expected + // Check deletedTable and deletedDirectoryTable clean up work as expected verifyEntriesLeftInDeletedTable(dtSentinelKeys); + verifyEntriesLeftInDeletedDirTable(ddtSentinelKeys); } private Set addTestKeysToDeletedTable( @@ -134,60 +143,131 @@ private Set addTestKeysToDeletedTable( RepeatedOmKeyInfo dummyRepeatedKeyInfo = new RepeatedOmKeyInfo.Builder() .setOmKeyInfos(new ArrayList<>()).build(); - // Add deletedTable key entries that surround the snapshot scope - Set dtSentinelKeys = new HashSet<>(); + // Add deletedTable key entries that "surround" the snapshot scope + Set sentinelKeys = new HashSet<>(); // Get a bucket name right before and after the bucketName // e.g. When bucketName is buck2, bucketNameBefore is buck1, // bucketNameAfter is buck3 // This will not guarantee the bucket name is valid for Ozone but // this would be good enough for this unit test. char bucketNameLastChar = bucketName.charAt(bucketName.length() - 1); + String bucketNameBefore = bucketName.substring(0, bucketName.length() - 1) + - Character.toString((char)(bucketNameLastChar - 1)); + (char) (bucketNameLastChar - 1); for (int i = 0; i < 3; i++) { String dtKey = omMetadataManager.getOzoneKey(volumeName, bucketNameBefore, "dtkey" + i); omMetadataManager.getDeletedTable().put(dtKey, dummyRepeatedKeyInfo); - dtSentinelKeys.add(dtKey); + sentinelKeys.add(dtKey); } + String bucketNameAfter = bucketName.substring(0, bucketName.length() - 1) + - Character.toString((char)(bucketNameLastChar + 1)); + (char) (bucketNameLastChar + 1); for (int i = 0; i < 3; i++) { String dtKey = omMetadataManager.getOzoneKey(volumeName, bucketNameAfter, "dtkey" + i); omMetadataManager.getDeletedTable().put(dtKey, dummyRepeatedKeyInfo); - dtSentinelKeys.add(dtKey); + sentinelKeys.add(dtKey); } + // Add deletedTable key entries in the snapshot (bucket) scope for (int i = 0; i < 10; i++) { String dtKey = omMetadataManager.getOzoneKey(volumeName, bucketName, "dtkey" + i); omMetadataManager.getDeletedTable().put(dtKey, dummyRepeatedKeyInfo); + // These are the keys that should be deleted. + // Thus not added to sentinelKeys list. + } + + return sentinelKeys; + } + + /** + * Populates deletedDirectoryTable for the test. + * @param volumeName volume name + * @param bucketName bucket name + * @return A set of DB keys + */ + private Set addTestKeysToDeletedDirTable( + String volumeName, String bucketName) throws IOException { + + OMSnapshotResponseTestUtil.addVolumeBucketInfoToTable( + omMetadataManager, volumeName, bucketName); + + final OmKeyInfo dummyOmKeyInfo = new OmKeyInfo.Builder() + .setBucketName(bucketName) + .setVolumeName(volumeName) + .setKeyName("dummyKey") + .setReplicationConfig(StandaloneReplicationConfig.getInstance(ONE)) + .build(); + // Add deletedDirectoryTable key entries that "surround" the snapshot scope + Set sentinelKeys = new HashSet<>(); + + final String dbKeyPfx = + OmSnapshotManager.getOzonePathKeyWithVolumeBucketNames( + omMetadataManager, volumeName, bucketName); + + // Calculate offset to bucketId's last character in dbKeyPfx. + // First -1 for offset, second -1 for second to last char (before '/') + final int offset = dbKeyPfx.length() - 1 - 1; + + char bucketIdLastChar = dbKeyPfx.charAt(offset); + + String dbKeyPfxBefore = dbKeyPfx.substring(0, offset) + + (char) (bucketIdLastChar - 1) + dbKeyPfx.substring(offset); + for (int i = 0; i < 3; i++) { + String dtKey = dbKeyPfxBefore + "dir" + i; + omMetadataManager.getDeletedDirTable().put(dtKey, dummyOmKeyInfo); + sentinelKeys.add(dtKey); + } + + String dbKeyPfxAfter = dbKeyPfx.substring(0, offset) + + (char) (bucketIdLastChar + 1) + dbKeyPfx.substring(offset); + for (int i = 0; i < 3; i++) { + String dtKey = dbKeyPfxAfter + "dir" + i; + omMetadataManager.getDeletedDirTable().put(dtKey, dummyOmKeyInfo); + sentinelKeys.add(dtKey); + } + + // Add key entries in the snapshot (bucket) scope + for (int i = 0; i < 10; i++) { + String dtKey = dbKeyPfx + "dir" + i; + omMetadataManager.getDeletedDirTable().put(dtKey, dummyOmKeyInfo); + // These are the keys that should be deleted. + // Thus not added to sentinelKeys list. } - return dtSentinelKeys; + return sentinelKeys; } - private void verifyEntriesLeftInDeletedTable(Set dtSentinelKeys) + private void verifyEntriesLeftInDeletedTable(Set expectedKeys) throws IOException { + // Only keys inside the snapshot scope would be deleted from deletedTable. + verifyEntriesLeftInTable(omMetadataManager.getDeletedTable(), expectedKeys); + } + + private void verifyEntriesLeftInDeletedDirTable(Set expectedKeys) + throws IOException { + verifyEntriesLeftInTable(omMetadataManager.getDeletedDirTable(), + expectedKeys); + } + + private void verifyEntriesLeftInTable( + Table table, Set expectedKeys) throws IOException { - // Verify that only keys inside the snapshot scope are gone from - // deletedTable. - try (TableIterator> - keyIter = omMetadataManager.getDeletedTable().iterator()) { + try (TableIterator> + keyIter = table.iterator()) { keyIter.seekToFirst(); while (keyIter.hasNext()) { - Table.KeyValue entry = keyIter.next(); - String dtKey = entry.getKey(); - // deletedTable should not have bucketName keys - Assert.assertTrue("deletedTable should contain key", - dtSentinelKeys.contains(dtKey)); - dtSentinelKeys.remove(dtKey); + Table.KeyValue entry = keyIter.next(); + String dbKey = entry.getKey(); + Assert.assertTrue(table.getName() + " should contain key", + expectedKeys.contains(dbKey)); + expectedKeys.remove(dbKey); } } - Assert.assertTrue("deletedTable is missing keys that should be there", - dtSentinelKeys.isEmpty()); + Assert.assertTrue(table.getName() + " is missing keys that should be there", + expectedKeys.isEmpty()); } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotDeleteResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotDeleteResponse.java index bfbeeafb2bbb..296636ed5161 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotDeleteResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotDeleteResponse.java @@ -47,7 +47,6 @@ import static org.apache.hadoop.ozone.om.helpers.SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE; import static org.apache.hadoop.ozone.om.helpers.SnapshotInfo.SnapshotStatus.SNAPSHOT_DELETED; - /** * This class tests OMSnapshotDeleteResponse. */ @@ -94,6 +93,8 @@ public void testAddToDBBatch() throws Exception { .countRowsInTable(omMetadataManager.getSnapshotInfoTable())); // Prepare the table, write an entry with SnapshotCreate + OMSnapshotResponseTestUtil.addVolumeBucketInfoToTable( + omMetadataManager, volumeName, bucketName); OMSnapshotCreateResponse omSnapshotCreateResponse = new OMSnapshotCreateResponse(OMResponse.newBuilder() .setCmdType(Type.CreateSnapshot)