diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestFindContainerKeys.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestFindContainerKeys.java
new file mode 100644
index 00000000000..da499fde6f6
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestFindContainerKeys.java
@@ -0,0 +1,286 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.debug;
+
+import com.google.gson.Gson;
+import com.google.gson.GsonBuilder;
+import org.apache.hadoop.hdds.client.BlockID;
+import org.apache.hadoop.hdds.client.RatisReplicationConfig;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.utils.db.DBStore;
+import org.apache.hadoop.hdds.utils.db.DBStoreBuilder;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.ozone.ClientVersion;
+import org.apache.hadoop.ozone.debug.container.ContainerCommands;
+import org.apache.hadoop.ozone.debug.container.ContainerKeyInfo;
+import org.apache.hadoop.ozone.debug.container.ContainerKeyInfoResponse;
+import org.apache.hadoop.ozone.debug.container.FindContainerKeys;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
+import org.apache.hadoop.ozone.om.request.OMRequestTestUtils;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
+import picocli.CommandLine;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.PrintWriter;
+import java.io.StringWriter;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE;
+import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX;
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DIRECTORY_TABLE;
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.FILE_TABLE;
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.KEY_TABLE;
+import static org.assertj.core.api.Assertions.assertThat;
+
+/**
+ * Test class for {@link FindContainerKeys}.
+ */
+public class TestFindContainerKeys {
+ private DBStore dbStore;
+ @TempDir
+ private File tempDir;
+ private StringWriter stdout, stderr;
+ private PrintWriter pstdout, pstderr;
+ private CommandLine cmd;
+ private static final Gson GSON = new GsonBuilder().setPrettyPrinting().create();
+ private String[] cmdArgs;
+
+ @BeforeEach
+ public void setup() throws IOException {
+ OzoneConfiguration conf = new OzoneConfiguration();
+ stdout = new StringWriter();
+ pstdout = new PrintWriter(stdout);
+ stderr = new StringWriter();
+ pstderr = new PrintWriter(stderr);
+
+ cmd = new CommandLine(new OzoneDebug())
+ .addSubcommand(new ContainerCommands())
+ .addSubcommand(new FindContainerKeys())
+ .setOut(pstdout)
+ .setErr(pstderr);
+
+ dbStore = DBStoreBuilder.newBuilder(conf).setName("om.db")
+ .setPath(tempDir.toPath()).addTable(KEY_TABLE).addTable(FILE_TABLE)
+ .addTable(DIRECTORY_TABLE)
+ .build();
+
+ cmdArgs =
+ new String[]{"find-keys", "--om-db", dbStore.getDbLocation().getAbsolutePath(), "--container-ids", "1 2 3"};
+ }
+
+ @AfterEach
+ public void shutdown() throws IOException {
+ closeDbStore();
+ pstderr.close();
+ stderr.close();
+ pstdout.close();
+ stdout.close();
+ }
+
+ @Test
+ void testFSO() throws Exception {
+ /*
+ Structure:
+ keyName (container id)
+
+ /vol1/bucet1
+ - key1 (1)
+ - dir1
+ - key2 (2)
+ - dir2
+ - key3 (3)
+ - key4 (3)
+ - key5 (4)
+ */
+ long volumeId = -123L;
+ long bucketId = -456L;
+ long dirObjectId1 = -789L;
+ long dirObjectId2 = -788L;
+ createDirectory(volumeId, bucketId, bucketId, dirObjectId1, "dir1");
+ createDirectory(volumeId, bucketId, dirObjectId1, dirObjectId2, "dir2");
+ createFile(volumeId, bucketId, "key1", -987L, bucketId, 1L);
+ createFile(volumeId, bucketId, "key2", -986L, dirObjectId1, 2L);
+ createFile(volumeId, bucketId, "key3", -985L, dirObjectId2, 3L);
+ createFile(volumeId, bucketId, "key4", -984L, dirObjectId2, 3L);
+ createFile(volumeId, bucketId, "key5", -983L, dirObjectId2, 4L);
+
+ closeDbStore();
+
+ int exitCode = cmd.execute(cmdArgs);
+ assertThat(exitCode).isEqualTo(0);
+
+ // Create expected response
+ List expectedKeysForContainer1 = new ArrayList<>();
+ expectedKeysForContainer1.add(new ContainerKeyInfo(1L, "vol1", volumeId, "bucket1", bucketId, "key1", bucketId));
+ List expectedKeysForContainer2 = new ArrayList<>();
+ expectedKeysForContainer2.add(
+ new ContainerKeyInfo(2L, "vol1", volumeId, "bucket1", bucketId, "dir1/key2", dirObjectId1));
+ List expectedKeysForContainer3 = new ArrayList<>();
+ expectedKeysForContainer3.add(
+ new ContainerKeyInfo(3L, "vol1", volumeId, "bucket1", bucketId, "dir1/dir2/key3", dirObjectId2));
+ expectedKeysForContainer3.add(
+ new ContainerKeyInfo(3L, "vol1", volumeId, "bucket1", bucketId, "dir1/dir2/key4", dirObjectId2));
+ Map> expectedContainerIdToKeyInfos = new HashMap<>();
+ expectedContainerIdToKeyInfos.put(1L, expectedKeysForContainer1);
+ expectedContainerIdToKeyInfos.put(2L, expectedKeysForContainer2);
+ expectedContainerIdToKeyInfos.put(3L, expectedKeysForContainer3);
+ ContainerKeyInfoResponse expectedResponse = new ContainerKeyInfoResponse(5, expectedContainerIdToKeyInfos);
+ assertThat(GSON.fromJson(stdout.toString(), ContainerKeyInfoResponse.class)).isEqualTo(expectedResponse);
+
+ assertThat(stderr.toString()).isEmpty();
+ }
+
+ @Test
+ void testNonFSO() throws Exception {
+ /*
+ Structure:
+ keyName (container id)
+
+ /vol1/bucket1
+ - key1 (1)
+ - dir1/key2 (2)
+ - dir1/dir2/key3 (3)
+ - dir1/dir2/key4 (3)
+ - key5 (4)
+ */
+ createKey("key1", 1L);
+ createKey("dir1/key2", 2L);
+ createKey("dir1/dir2/key3", 3L);
+ createKey("dir1/dir2/key4", 3L);
+ createKey("key5", 4L);
+
+ closeDbStore();
+
+ int exitCode = cmd.execute(cmdArgs);
+ assertThat(exitCode).isEqualTo(0);
+
+ // Create expected response
+ List expectedKeysForContainer1 = new ArrayList<>();
+ expectedKeysForContainer1.add(new ContainerKeyInfo(1L, "vol1", 0, "bucket1", 0, "key1", 0));
+ List expectedKeysForContainer2 = new ArrayList<>();
+ expectedKeysForContainer2.add(
+ new ContainerKeyInfo(2L, "vol1", 0, "bucket1", 0, "dir1/key2", 0));
+ List expectedKeysForContainer3 = new ArrayList<>();
+ expectedKeysForContainer3.add(
+ new ContainerKeyInfo(3L, "vol1", 0, "bucket1", 0, "dir1/dir2/key3", 0));
+ expectedKeysForContainer3.add(
+ new ContainerKeyInfo(3L, "vol1", 0, "bucket1", 0, "dir1/dir2/key4", 0));
+ Map> expectedContainerIdToKeyInfos = new HashMap<>();
+ expectedContainerIdToKeyInfos.put(1L, expectedKeysForContainer1);
+ expectedContainerIdToKeyInfos.put(2L, expectedKeysForContainer2);
+ expectedContainerIdToKeyInfos.put(3L, expectedKeysForContainer3);
+ ContainerKeyInfoResponse expectedResponse = new ContainerKeyInfoResponse(5, expectedContainerIdToKeyInfos);
+ assertThat(GSON.fromJson(stdout.toString(), ContainerKeyInfoResponse.class)).isEqualTo(expectedResponse);
+
+ assertThat(stderr.toString()).isEmpty();
+ }
+
+ /**
+ * Close db store because of the lock.
+ */
+ private void closeDbStore() throws IOException {
+ if (dbStore != null && !dbStore.isClosed()) {
+ dbStore.close();
+ }
+ }
+
+ @Test
+ void testWhenThereAreNoKeysForContainerIds() throws Exception {
+
+ // create keys for tables
+ long volumeId = -123L;
+ long bucketId = -456L;
+ createFile(volumeId, bucketId, "key1", -987L, bucketId, 4L);
+ createKey("key2", 5L);
+ createKey("key3", 6L);
+
+ closeDbStore();
+
+ int exitCode = cmd.execute(cmdArgs);
+ assertThat(exitCode).isEqualTo(0);
+
+ assertThat(stderr.toString()).contains("No keys were found for container IDs: [1, 2, 3]\n" + "Keys processed: 3\n");
+
+ assertThat(stdout.toString()).isEmpty();
+ }
+
+ private void createFile(long volumeId, long bucketId, String keyName, long objectId, long parentId, long containerId)
+ throws Exception {
+ try (Table table = dbStore.getTable(FILE_TABLE)) {
+ // format: /volumeId/bucketId/parentId(bucketId)/keyName
+ String key =
+ OM_KEY_PREFIX + volumeId + OM_KEY_PREFIX + bucketId + OM_KEY_PREFIX + parentId + OM_KEY_PREFIX + keyName;
+
+ OmKeyInfo value = getOmKeyInfo("vol1", "bucket1", keyName, containerId, objectId, parentId);
+
+ table.put(key.getBytes(UTF_8), value.getProtobuf(ClientVersion.CURRENT_VERSION).toByteArray());
+ }
+ }
+
+ private void createKey(String keyName, long containerId) throws Exception {
+ try (Table table = dbStore.getTable(KEY_TABLE)) {
+ String volumeName = "vol1";
+ String bucketName = "bucket1";
+ // format: /volumeName/bucketName/keyName
+ String key = OM_KEY_PREFIX + volumeName + OM_KEY_PREFIX + bucketName + OM_KEY_PREFIX + keyName;
+
+ // generate table value
+ OmKeyInfo value = getOmKeyInfo(volumeName, bucketName, keyName, containerId, 0, 0);
+
+ table.put(key.getBytes(UTF_8), value.getProtobuf(ClientVersion.CURRENT_VERSION).toByteArray());
+ }
+ }
+
+ private void createDirectory(long volumeId, long bucketId, long parentId, long objectId, String keyName)
+ throws Exception {
+ try (Table table = dbStore.getTable(DIRECTORY_TABLE)) {
+
+ // format: /volumeId/bucketId/parentId(bucketId)/keyName
+ String key =
+ OM_KEY_PREFIX + volumeId + OM_KEY_PREFIX + bucketId + OM_KEY_PREFIX + parentId + OM_KEY_PREFIX + keyName;
+
+ OmDirectoryInfo value = OMRequestTestUtils.createOmDirectoryInfo(keyName, objectId, parentId);
+
+ table.put(key.getBytes(UTF_8), value.getProtobuf().toByteArray());
+ }
+ }
+
+ private static OmKeyInfo getOmKeyInfo(String volumeName, String bucketName,
+ String keyName, long containerId,
+ long objectId,
+ long parentId) {
+ return OMRequestTestUtils
+ .createOmKeyInfo(volumeName, bucketName, keyName, RatisReplicationConfig.getInstance(ONE), objectId, parentId,
+ new OmKeyLocationInfoGroup(0L, Collections.singletonList(new OmKeyLocationInfo.Builder()
+ .setBlockID(new BlockID(containerId, 1))
+ .build())))
+ .build();
+ }
+
+}
diff --git a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java
index 9651c16175a..b3f3c400b34 100644
--- a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java
+++ b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java
@@ -510,7 +510,7 @@ default String getOzonePathKey(long volumeId, long bucketId,
}
/**
- * Given ozone path key, component id, return the corresponding
+ * Given ozone path key, component id, return the corresponding
* DB path key for delete table.
*
* @param objectId - object Id
@@ -611,4 +611,26 @@ String getMultipartKey(long volumeId, long bucketId,
*/
boolean containsIncompleteMPUs(String volume, String bucket)
throws IOException;
+
+ /**
+ * Helper method to generate /volumeId/bucketId/ DB key prefix from given
+ * volume name and bucket name as a prefix for FSO buckets.
+ *
+ * @param volumeName volume name
+ * @param bucketName bucket name
+ * @return /volumeId/bucketId/
+ * e.g. /-9223372036854772480/-9223372036854771968/
+ */
+ String getOzonePathKeyForFso(String volumeName, String bucketName)
+ throws IOException;
+
+ /**
+ * Helper method to generate /volumeId/bucketId DB key prefix from given
+ * volume id and bucket id as a prefix for FSO buckets.
+ * @param volumeId volume id
+ * @param bucketId bucket id
+ * @return /volumeId/bucketId
+ * e.g. /-9223372036854772480/-9223372036854771968/
+ */
+ String getOzonePathKeyForFso(long volumeId, long bucketId);
}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
index 982e04df04d..b06b4c256fb 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
@@ -2185,4 +2185,17 @@ public void deleteWithBatch(AutoCloseable batchOperator, String id)
}
}
}
+
+ @Override
+ public String getOzonePathKeyForFso(String volumeName, String bucketName)
+ throws IOException {
+ final long volumeId = getVolumeId(volumeName);
+ final long bucketId = getBucketId(volumeName, bucketName);
+ return getOzonePathKeyForFso(volumeId, bucketId);
+ }
+
+ @Override
+ public String getOzonePathKeyForFso(long volumeId, long bucketId) {
+ return OM_KEY_PREFIX + volumeId + OM_KEY_PREFIX + bucketId + OM_KEY_PREFIX;
+ }
}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java
index eb37e399dfe..5d16e1829ed 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java
@@ -98,7 +98,6 @@
import static org.apache.hadoop.ozone.om.snapshot.SnapshotDiffManager.getSnapshotRootPath;
import static org.apache.hadoop.ozone.om.snapshot.SnapshotUtils.checkSnapshotActive;
import static org.apache.hadoop.ozone.om.snapshot.SnapshotUtils.dropColumnFamilyHandle;
-import static org.apache.hadoop.ozone.om.snapshot.SnapshotUtils.getOzonePathKeyForFso;
import static org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse.JobStatus.DONE;
/**
@@ -507,8 +506,8 @@ private static void deleteKeysFromDelDirTableInSnapshotScope(
String bucketName) throws IOException {
// Range delete start key (inclusive)
- final String keyPrefix = getOzonePathKeyForFso(omMetadataManager,
- volumeName, bucketName);
+ final String keyPrefix = omMetadataManager
+ .getOzonePathKeyForFso(volumeName, bucketName);
try (TableIterator>
iter = omMetadataManager.getDeletedDirTable().iterator(keyPrefix)) {
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDirectoryCleaningService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDirectoryCleaningService.java
index fe0f6e111ed..e928c5be63f 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDirectoryCleaningService.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDirectoryCleaningService.java
@@ -63,7 +63,6 @@
import static org.apache.hadoop.ozone.om.helpers.SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE;
import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.getDirectoryInfo;
-import static org.apache.hadoop.ozone.om.snapshot.SnapshotUtils.getOzonePathKeyForFso;
/**
* Snapshot BG Service for deleted directory deep clean and exclusive size
@@ -211,8 +210,9 @@ public BackgroundTaskResult call() {
.getKeyTable(bucketInfo.getBucketLayout());
}
- String dbBucketKeyForDir = getOzonePathKeyForFso(metadataManager,
- currSnapInfo.getVolumeName(), currSnapInfo.getBucketName());
+ String dbBucketKeyForDir = metadataManager
+ .getOzonePathKeyForFso(currSnapInfo.getVolumeName(),
+ currSnapInfo.getBucketName());
try (ReferenceCounted
rcCurrOmSnapshot = omSnapshotManager.getActiveSnapshot(
currSnapInfo.getVolumeName(),
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotUtils.java
index 89823995d0c..77b3aebef67 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotUtils.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotUtils.java
@@ -186,8 +186,8 @@ public static Map getColumnFamilyToKeyPrefixMap(
String bucketName
) throws IOException {
String keyPrefix = getOzonePathKey(volumeName, bucketName);
- String keyPrefixFso = getOzonePathKeyForFso(omMetadataManager, volumeName,
- bucketName);
+ String keyPrefixFso = omMetadataManager
+ .getOzonePathKeyForFso(volumeName, bucketName);
Map columnFamilyToPrefixMap = new HashMap<>();
columnFamilyToPrefixMap.put(KEY_TABLE, keyPrefix);
@@ -216,27 +216,4 @@ public static String getOzonePathKey(String volumeName,
OM_KEY_PREFIX;
}
- /**
- * Helper method to generate /volumeId/bucketId/ DB key prefix from given
- * volume name and bucket name as a prefix for FSO buckets.
- * Follows:
- * {@link OmMetadataManagerImpl#getOzonePathKey(long, long, long, String)}.
- *
- * Note: Currently, this is only intended to be a special use case in
- * Snapshot. If this is used elsewhere, consider moving this to
- * {@link OMMetadataManager}.
- *
- * @param volumeName volume name
- * @param bucketName bucket name
- * @return /volumeId/bucketId/
- * e.g. /-9223372036854772480/-9223372036854771968/
- */
- public static String getOzonePathKeyForFso(OMMetadataManager metadataManager,
- String volumeName,
- String bucketName)
- throws IOException {
- final long volumeId = metadataManager.getVolumeId(volumeName);
- final long bucketId = metadataManager.getBucketId(volumeName, bucketName);
- return OM_KEY_PREFIX + volumeId + OM_KEY_PREFIX + bucketId + OM_KEY_PREFIX;
- }
}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java
index 21b94ce5f05..2f5daf1ce28 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java
@@ -28,6 +28,7 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdds.HddsUtils;
import org.apache.hadoop.hdds.client.BlockID;
+import org.apache.hadoop.hdds.client.RatisReplicationConfig;
import org.apache.hadoop.hdds.client.ReplicationConfig;
import org.apache.hadoop.hdds.client.ReplicationConfigValidator;
import org.apache.hadoop.hdds.conf.ConfigurationSource;
@@ -559,6 +560,23 @@ public static OmKeyInfo.Builder createOmKeyInfo(String volumeName, String bucket
new OmKeyLocationInfoGroup(0L, new ArrayList<>(), false));
}
+ public static OmKeyInfo.Builder createOmKeyInfo(String volumeName, String bucketName, String keyName,
+ RatisReplicationConfig replicationConfig, long objectId,
+ long parentId, OmKeyLocationInfoGroup omKeyLocationInfoGroup) {
+ return new OmKeyInfo.Builder()
+ .setVolumeName(volumeName)
+ .setBucketName(bucketName)
+ .setKeyName(keyName)
+ .setFileName(OzoneFSUtils.getFileName(keyName))
+ .setReplicationConfig(replicationConfig)
+ .setParentObjectID(parentId)
+ .setObjectID(objectId)
+ .setUpdateID(0L)
+ .setCreationTime(Time.now())
+ .addOmKeyLocationInfoGroup(omKeyLocationInfoGroup)
+ .setDataSize(1000L);
+ }
+
/**
* Create OmDirectoryInfo.
*/
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotCreateResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotCreateResponse.java
index 7f74f3d17ec..27c289a0a46 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotCreateResponse.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotCreateResponse.java
@@ -32,7 +32,6 @@
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.SnapshotInfo;
-import org.apache.hadoop.ozone.om.snapshot.SnapshotUtils;
import org.apache.hadoop.util.Time;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
@@ -215,8 +214,8 @@ private Set addTestKeysToDeletedDirTable(String volumeName,
// Add deletedDirectoryTable key entries that "surround" the snapshot scope
Set sentinelKeys = new HashSet<>();
- final String dbKeyPfx = SnapshotUtils.getOzonePathKeyForFso(
- omMetadataManager, volumeName, bucketName);
+ final String dbKeyPfx = omMetadataManager
+ .getOzonePathKeyForFso(volumeName, bucketName);
// Calculate offset to bucketId's last character in dbKeyPfx.
// First -1 for offset, second -1 for second to last char (before '/')
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/ContainerCommands.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/ContainerCommands.java
index 5592926bf88..79f9e9f6dab 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/ContainerCommands.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/ContainerCommands.java
@@ -79,7 +79,8 @@
ListSubcommand.class,
InfoSubcommand.class,
ExportSubcommand.class,
- InspectSubcommand.class
+ InspectSubcommand.class,
+ FindContainerKeys.class
})
@MetaInfServices(SubcommandWithParent.class)
public class ContainerCommands implements Callable, SubcommandWithParent {
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/ContainerKeyInfo.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/ContainerKeyInfo.java
new file mode 100644
index 00000000000..6d068108f0a
--- /dev/null
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/ContainerKeyInfo.java
@@ -0,0 +1,81 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.debug.container;
+
+import java.util.Objects;
+
+/**
+ * Class that holds basic key data in relation to container it is in.
+ */
+public final class ContainerKeyInfo {
+
+ private final long containerID;
+ private final String volumeName;
+ private final long volumeId;
+ private final String bucketName;
+ private final long bucketId;
+ private final String keyName;
+ private final long parentId;
+
+ public ContainerKeyInfo(long containerID, String volumeName, long volumeId,
+ String bucketName, long bucketId, String keyName,
+ long parentId) {
+ this.containerID = containerID;
+ this.volumeName = volumeName;
+ this.volumeId = volumeId;
+ this.bucketName = bucketName;
+ this.bucketId = bucketId;
+ this.keyName = keyName;
+ this.parentId = parentId;
+ }
+
+ public long getContainerID() {
+ return containerID;
+ }
+
+ public String getVolumeName() {
+ return volumeName;
+ }
+
+ public String getBucketName() {
+ return bucketName;
+ }
+
+ public String getKeyName() {
+ return keyName;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) {
+ return true;
+ }
+ if (!(o instanceof ContainerKeyInfo)) {
+ return false;
+ }
+ ContainerKeyInfo that = (ContainerKeyInfo) o;
+ return containerID == that.containerID && volumeId == that.volumeId && bucketId == that.bucketId &&
+ parentId == that.parentId && Objects.equals(volumeName, that.volumeName) &&
+ Objects.equals(bucketName, that.bucketName) && Objects.equals(keyName, that.keyName);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(containerID, volumeName, volumeId, bucketName, bucketId, keyName, parentId);
+ }
+}
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/ContainerKeyInfoResponse.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/ContainerKeyInfoResponse.java
new file mode 100644
index 00000000000..ccf00a65b74
--- /dev/null
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/ContainerKeyInfoResponse.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.debug.container;
+
+import com.fasterxml.jackson.annotation.JsonInclude;
+
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+
+/**
+ * Class for response for container key scanner.
+ */
+@JsonInclude(JsonInclude.Include.NON_NULL)
+public final class ContainerKeyInfoResponse {
+
+ private final long keysProcessed;
+ private final Map> containerIdToKeyInfos;
+
+ public ContainerKeyInfoResponse(long keysProcessed, Map> containerIdToKeyInfos) {
+ this.keysProcessed = keysProcessed;
+ this.containerIdToKeyInfos = containerIdToKeyInfos;
+ }
+
+ public long getKeysProcessed() {
+ return keysProcessed;
+ }
+
+ public Map> getContainerIdToKeyInfos() {
+ return containerIdToKeyInfos;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) {
+ return true;
+ }
+ if (!(o instanceof ContainerKeyInfoResponse)) {
+ return false;
+ }
+ ContainerKeyInfoResponse that = (ContainerKeyInfoResponse) o;
+ return keysProcessed == that.keysProcessed && Objects.equals(containerIdToKeyInfos, that.containerIdToKeyInfos);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(keysProcessed, containerIdToKeyInfos);
+ }
+}
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/FindContainerKeys.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/FindContainerKeys.java
new file mode 100644
index 00000000000..ffed74759e5
--- /dev/null
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/FindContainerKeys.java
@@ -0,0 +1,381 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.debug.container;
+
+import com.google.common.collect.Sets;
+import com.google.gson.GsonBuilder;
+import org.apache.commons.lang3.tuple.Pair;
+import org.apache.hadoop.hdds.cli.SubcommandWithParent;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.hdds.utils.db.TableIterator;
+import org.apache.hadoop.ozone.debug.OzoneDebug;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
+import org.apache.hadoop.ozone.om.helpers.BucketLayout;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
+import org.kohsuke.MetaInfServices;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import picocli.CommandLine;
+
+import java.io.IOException;
+import java.io.PrintWriter;
+import java.nio.file.Path;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Optional;
+import java.util.Queue;
+import java.util.Set;
+import java.util.concurrent.Callable;
+import java.util.stream.Collectors;
+
+import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX;
+import static org.apache.hadoop.ozone.OzoneConsts.ROOT_PATH;
+
+/**
+ * Finds keys that reference a container/s.
+ */
+@CommandLine.Command(
+ name = "find-keys",
+ description = "Find keys that reference a container"
+)
+@MetaInfServices(SubcommandWithParent.class)
+public class FindContainerKeys
+ implements Callable, SubcommandWithParent {
+
+ public static final Logger LOG =
+ LoggerFactory.getLogger(FindContainerKeys.class);
+ @CommandLine.Spec
+ private static CommandLine.Model.CommandSpec spec;
+ @CommandLine.Option(names = {"--om-db"},
+ paramLabel = "",
+ required = true,
+ description = "Path to OM DB.")
+ private String dbPath;
+ @CommandLine.Option(names = {"--container-ids"},
+ split = " ",
+ paramLabel = "",
+ required = true,
+ description = "One or more container IDs separated by space.")
+ private Set containerIds;
+ private static Map directoryTable;
+ private static boolean isDirTableLoaded = false;
+
+ @Override
+ public Void call() throws Exception {
+ OzoneConfiguration ozoneConfiguration = new OzoneConfiguration();
+ ozoneConfiguration.set("ozone.om.db.dirs",
+ dbPath.substring(0, dbPath.lastIndexOf("/")));
+ OmMetadataManagerImpl omMetadataManager =
+ new OmMetadataManagerImpl(ozoneConfiguration, null);
+
+ ContainerKeyInfoResponse containerKeyInfoResponse =
+ scanDBForContainerKeys(omMetadataManager);
+
+ try {
+ printOutput(containerKeyInfoResponse);
+ } finally {
+ closeStdChannels();
+ omMetadataManager.stop();
+ }
+
+ return null;
+ }
+
+ private void closeStdChannels() {
+ out().close();
+ err().close();
+ }
+
+ private Map getDirectoryTableData(
+ OmMetadataManagerImpl metadataManager)
+ throws IOException {
+ Map directoryTableData = new HashMap<>();
+
+ try (
+ TableIterator>
+ iterator = metadataManager.getDirectoryTable().iterator()) {
+ while (iterator.hasNext()) {
+ Table.KeyValue next = iterator.next();
+ directoryTableData.put(next.getKey(), next.getValue());
+ }
+ }
+
+ return directoryTableData;
+ }
+
+ @Override
+ public Class> getParentType() {
+ return OzoneDebug.class;
+ }
+
+ private static PrintWriter err() {
+ return spec.commandLine().getErr();
+ }
+
+ private static PrintWriter out() {
+ return spec.commandLine().getOut();
+ }
+
+ public Map getAbsolutePathForObjectIDs(
+ long bucketId, String prefix, Optional> dirObjIds) {
+ // Root of a bucket would always have the
+ // key as /volumeId/bucketId/bucketId/
+ if (!dirObjIds.isPresent() || dirObjIds.get().isEmpty()) {
+ return Collections.emptyMap();
+ }
+ Set objIds = Sets.newHashSet(dirObjIds.get());
+ Map objectIdPathMap = new HashMap<>();
+ Queue> objectIdPathVals = new LinkedList<>();
+ Pair root = Pair.of(bucketId, ROOT_PATH);
+ objectIdPathVals.add(root);
+ addToPathMap(root, objIds, objectIdPathMap);
+
+ while (!objectIdPathVals.isEmpty() && !objIds.isEmpty()) {
+ Pair parentPair = objectIdPathVals.poll();
+ String subDir = prefix + parentPair.getKey() + OM_KEY_PREFIX;
+
+ Iterator subDirIterator =
+ directoryTable.keySet().stream()
+ .filter(k -> k.startsWith(subDir))
+ .collect(Collectors.toList()).iterator();
+ while (!objIds.isEmpty() && subDirIterator.hasNext()) {
+ OmDirectoryInfo childDir =
+ directoryTable.get(subDirIterator.next());
+ Pair pathVal = Pair.of(childDir.getObjectID(),
+ parentPair.getValue().resolve(childDir.getName()));
+ addToPathMap(pathVal, objIds, objectIdPathMap);
+ objectIdPathVals.add(pathVal);
+ }
+ }
+ // Invalid directory objectId which does not exist in the given bucket.
+ if (!objIds.isEmpty()) {
+ throw new IllegalArgumentException(
+ "Dir object Ids required but not found in bucket: " + objIds);
+ }
+ return objectIdPathMap;
+ }
+
+ private void addToPathMap(Pair objectIDPath,
+ Set dirObjIds, Map pathMap) {
+ if (dirObjIds.contains(objectIDPath.getKey())) {
+ pathMap.put(objectIDPath.getKey(), objectIDPath.getValue());
+ dirObjIds.remove(objectIDPath.getKey());
+ }
+ }
+
+ private ContainerKeyInfoResponse scanDBForContainerKeys(
+ OmMetadataManagerImpl omMetadataManager)
+ throws IOException {
+ Map> containerKeyInfos = new HashMap<>();
+
+ long keysProcessed = 0;
+
+ keysProcessed += processFileTable(containerKeyInfos, omMetadataManager);
+ keysProcessed += processKeyTable(containerKeyInfos, omMetadataManager);
+
+ return new ContainerKeyInfoResponse(keysProcessed, containerKeyInfos);
+ }
+
+ private long processKeyTable(
+ Map> containerKeyInfos,
+ OmMetadataManagerImpl omMetadataManager) throws IOException {
+ long keysProcessed = 0L;
+
+ // Anything but not FSO bucket layout
+ Table fileTable = omMetadataManager.getKeyTable(
+ BucketLayout.DEFAULT);
+ try (TableIterator>
+ iterator = fileTable.iterator()) {
+ while (iterator.hasNext()) {
+ Table.KeyValue next = iterator.next();
+ keysProcessed++;
+
+ if (Objects.isNull(next.getValue().getKeyLocationVersions())) {
+ continue;
+ }
+
+ processKeyData(containerKeyInfos, next.getValue());
+ }
+ }
+
+ return keysProcessed;
+ }
+
+
+ private long processFileTable(
+ Map> containerKeyInfos,
+ OmMetadataManagerImpl omMetadataManager)
+ throws IOException {
+ long keysProcessed = 0L;
+
+ try (TableIterator>
+ iterator = omMetadataManager.getFileTable().iterator()) {
+ while (iterator.hasNext()) {
+ Table.KeyValue next = iterator.next();
+ keysProcessed++;
+
+ if (Objects.isNull(next.getValue().getKeyLocationVersions())) {
+ continue;
+ }
+
+ processFileData(containerKeyInfos, next.getKey(), next.getValue(),
+ omMetadataManager);
+ }
+ }
+
+ return keysProcessed;
+ }
+
+ /**
+ * @param key file table key.
+ * @return Pair of volume id and bucket id.
+ */
+ private Pair parseKey(String key) {
+ String[] keyParts = key.split(OM_KEY_PREFIX);
+ return Pair.of(Long.parseLong(keyParts[1]), Long.parseLong(keyParts[2]));
+ }
+
+ private void processKeyData(
+ Map> containerKeyInfos,
+ OmKeyInfo keyInfo) {
+ long volumeId = 0L;
+ long bucketId = 0L;
+
+ for (OmKeyLocationInfoGroup locationInfoGroup :
+ keyInfo.getKeyLocationVersions()) {
+ for (List locationInfos :
+ locationInfoGroup.getLocationVersionMap().values()) {
+ for (OmKeyLocationInfo locationInfo : locationInfos) {
+ if (!containerIds.contains(locationInfo.getContainerID())) {
+ continue;
+ }
+
+ List containerKeyInfoList = new ArrayList<>();
+ containerKeyInfoList.add(
+ new ContainerKeyInfo(locationInfo.getContainerID(),
+ keyInfo.getVolumeName(), volumeId, keyInfo.getBucketName(),
+ bucketId, keyInfo.getKeyName(), keyInfo.getParentObjectID()));
+
+ containerKeyInfos.merge(locationInfo.getContainerID(),
+ containerKeyInfoList,
+ (existingList, newList) -> {
+ existingList.addAll(newList);
+ return existingList;
+ });
+ }
+ }
+ }
+ }
+
+ private void processFileData(
+ Map> containerKeyInfos,
+ String key, OmKeyInfo keyInfo, OmMetadataManagerImpl omMetadataManager)
+ throws IOException {
+
+ Pair volumeAndBucketId = parseKey(key);
+ Long volumeId = volumeAndBucketId.getLeft();
+ Long bucketId = volumeAndBucketId.getRight();
+
+ for (OmKeyLocationInfoGroup locationInfoGroup :
+ keyInfo.getKeyLocationVersions()) {
+ for (List locationInfos :
+ locationInfoGroup.getLocationVersionMap().values()) {
+ for (OmKeyLocationInfo locationInfo : locationInfos) {
+ if (!containerIds.contains(locationInfo.getContainerID())) {
+ continue;
+ }
+
+ if (!isDirTableLoaded) {
+ long start = System.currentTimeMillis();
+ directoryTable = getDirectoryTableData(omMetadataManager);
+ long end = System.currentTimeMillis();
+ LOG.info("directoryTable loaded in " + (end - start) + " ms.");
+ isDirTableLoaded = true;
+ }
+
+ String keyName = getFsoKeyWithPrefix(volumeId, bucketId, keyInfo,
+ omMetadataManager);
+
+ containerKeyInfos.merge(locationInfo.getContainerID(),
+ new ArrayList<>(Collections.singletonList(
+ new ContainerKeyInfo(locationInfo.getContainerID(),
+ keyInfo.getVolumeName(), volumeId,
+ keyInfo.getBucketName(), bucketId, keyName,
+ keyInfo.getParentObjectID()))),
+ (existingList, newList) -> {
+ existingList.addAll(newList);
+ return existingList;
+ });
+ }
+ }
+ }
+ }
+
+ private static String removeBeginningSlash(String path) {
+ if (path.startsWith(OM_KEY_PREFIX)) {
+ return path.substring(1);
+ }
+
+ return path;
+ }
+
+ private String getFsoKeyWithPrefix(long volumeId, long bucketId,
+ OmKeyInfo value,
+ OMMetadataManager omMetadataManager) {
+ String prefix = omMetadataManager.getOzonePathKeyForFso(volumeId, bucketId);
+ Set dirObjIds = new HashSet<>();
+ dirObjIds.add(value.getParentObjectID());
+ Map absolutePaths =
+ getAbsolutePathForObjectIDs(bucketId, prefix, Optional.of(dirObjIds));
+ Path path = absolutePaths.get(value.getParentObjectID());
+ String keyPath;
+ if (path.toString().equals(OM_KEY_PREFIX)) {
+ keyPath = path.toString();
+ } else {
+ keyPath = path + OM_KEY_PREFIX;
+ }
+
+ return removeBeginningSlash(keyPath + value.getKeyName());
+ }
+
+ private void printOutput(ContainerKeyInfoResponse containerKeyInfoResponse) {
+ if (containerKeyInfoResponse.getContainerIdToKeyInfos().isEmpty()) {
+ err().println("No keys were found for container IDs: " + containerIds);
+ err().println(
+ "Keys processed: " + containerKeyInfoResponse.getKeysProcessed());
+ return;
+ }
+
+ out().print(new GsonBuilder().setPrettyPrinting().create()
+ .toJson(containerKeyInfoResponse));
+ }
+
+}
diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/debug/package-info.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/debug/package-info.java
new file mode 100644
index 00000000000..d3e47a86e86
--- /dev/null
+++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/debug/package-info.java
@@ -0,0 +1,24 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ *
+ */
+
+/**
+ * Unit tests for Ozone Debug tools.
+ */
+package org.apache.hadoop.ozone.debug;