From cf21fe21e45e443ed81b66552ae23ab20c179489 Mon Sep 17 00:00:00 2001 From: Christos Bisias Date: Tue, 31 Oct 2023 19:19:52 +0200 Subject: [PATCH 01/33] Add container key scanner command --- .../ozone/debug/TestContainerKeyScanner.java | 240 +++++++++++ .../ozone/om/request/OMRequestTestUtils.java | 51 +-- hadoop-ozone/tools/pom.xml | 19 + .../apache/hadoop/ozone/admin/om/OMAdmin.java | 6 +- .../hadoop/ozone/debug/ContainerKeyInfo.java | 84 ++++ .../ozone/debug/ContainerKeyInfoResponse.java | 47 +++ .../ozone/debug/ContainerKeyInfoWrapper.java | 43 ++ .../ozone/debug/ContainerKeyScanner.java | 394 ++++++++++++++++++ .../ozone/debug/TestContainerKeyScanner.java | 299 +++++++++++++ .../hadoop/ozone/debug/package-info.java | 24 ++ 10 files changed, 1173 insertions(+), 34 deletions(-) create mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestContainerKeyScanner.java create mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyInfo.java create mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyInfoResponse.java create mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyInfoWrapper.java create mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java create mode 100644 hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/debug/TestContainerKeyScanner.java create mode 100644 hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/debug/package-info.java diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestContainerKeyScanner.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestContainerKeyScanner.java new file mode 100644 index 00000000000..3a52aca92b7 --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestContainerKeyScanner.java @@ -0,0 +1,240 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.ozone.debug; + +import org.apache.hadoop.hdds.client.BlockID; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.utils.db.DBStore; +import org.apache.hadoop.hdds.utils.db.DBStoreBuilder; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.ozone.ClientVersion; +import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; +import picocli.CommandLine; + +import java.io.File; +import java.io.IOException; +import java.io.PrintWriter; +import java.io.StringWriter; +import java.util.ArrayList; +import java.util.Collections; + +import static java.nio.charset.StandardCharsets.UTF_8; + +/** + * This class tests `ozone debug ldb` CLI that reads from a RocksDB directory. + */ +public class TestContainerKeyScanner { + private static final String KEY_TABLE = "keyTable"; + private static final String FILE_TABLE = "fileTable"; + private static final String DIRECTORY_TABLE = "directoryTable"; + private DBStore dbStore; + @TempDir + private File tempDir; + private StringWriter stdout, stderr; + private PrintWriter pstdout, pstderr; + private CommandLine cmd; + + private static final String KEYS_FOUND_OUTPUT = "{\n" + + " \"keysProcessed\": 3,\n" + + " \"containerKeys\": {\n" + + " \"1\": [\n" + + " {\n" + + " \"containerID\": 1,\n" + + " \"volumeName\": \"vol1\",\n" + + " \"volumeId\": -123,\n" + + " \"bucketName\": \"bucket1\",\n" + + " \"bucketId\": -456,\n" + + " \"keyName\": \"dir1/key1\",\n" + + " \"parentId\": -789\n" + + " }\n" + + " ],\n" + + " \"2\": [\n" + + " {\n" + + " \"containerID\": 2,\n" + + " \"volumeName\": \"vol1\",\n" + + " \"volumeId\": 0,\n" + + " \"bucketName\": \"bucket1\",\n" + + " \"bucketId\": 0,\n" + + " \"keyName\": \"key2\",\n" + + " \"parentId\": 0\n" + + " }\n" + + " ],\n" + + " \"3\": [\n" + + " {\n" + + " \"containerID\": 3,\n" + + " \"volumeName\": \"vol1\",\n" + + " \"volumeId\": 0,\n" + + " \"bucketName\": \"bucket1\",\n" + + " \"bucketId\": 0,\n" + + " \"keyName\": \"key3\",\n" + + " \"parentId\": 0\n" + + " }\n" + + " ]\n" + + " }\n" + + "}\n"; + + private static final String KEYS_NOT_FOUND_OUTPUT = + "No keys were found for container IDs: [1, 2, 3]\n" + + "Keys processed: 3\n"; + + @BeforeEach + public void setup() throws IOException { + OzoneConfiguration conf = new OzoneConfiguration(); + stdout = new StringWriter(); + pstdout = new PrintWriter(stdout); + stderr = new StringWriter(); + pstderr = new PrintWriter(stderr); + + cmd = new CommandLine(new RDBParser()) + .addSubcommand(new ContainerKeyScanner()) + .setOut(pstdout) + .setErr(pstderr); + + dbStore = DBStoreBuilder.newBuilder(conf).setName("om.db") + .setPath(tempDir.toPath()).addTable(KEY_TABLE).addTable(FILE_TABLE) + .addTable(DIRECTORY_TABLE) + .build(); + } + + @AfterEach + public void shutdown() throws IOException { + if (dbStore != null) { + dbStore.close(); + } + pstderr.close(); + stderr.close(); + pstdout.close(); + stdout.close(); + } + + @Test + void testWhenThereAreKeysForConatainerIds() throws IOException { + + // create keys for tables + long volumeId = -123L; + long bucketId = -456L; + long dirObjectId = -789L; + createDirectory(volumeId, bucketId, bucketId, dirObjectId, "dir1"); + createFile(volumeId, bucketId, "key1", -987L, dirObjectId, 1L); + createKey("key2", 2L); + createKey("key3", 3L); + + String[] cmdArgs = + {"--db", dbStore.getDbLocation().getAbsolutePath(), "ckscanner", + "-ids", "1,2,3"}; + + int exitCode = cmd.execute(cmdArgs); + Assertions.assertEquals(0, exitCode); + + Assertions.assertEquals(KEYS_FOUND_OUTPUT, stdout.toString()); + + Assertions.assertTrue(stderr.toString().isEmpty()); + } + + @Test + void testWhenThereAreNotKeysForConatainerIds() throws IOException { + + // create keys for tables + long volumeId = -123L; + long bucketId = -456L; + createFile(volumeId, bucketId, "key1", -987L, bucketId, 4L); + createKey("key2", 5L); + createKey("key3", 6L); + + String[] cmdArgs = + {"--db", dbStore.getDbLocation().getAbsolutePath(), "ckscanner", + "-ids", "1,2,3"}; + + int exitCode = cmd.execute(cmdArgs); + Assertions.assertEquals(0, exitCode); + + Assertions.assertEquals(KEYS_NOT_FOUND_OUTPUT, stdout.toString()); + + Assertions.assertTrue(stderr.toString().isEmpty()); + } + + private void createFile(long volumeId, long bucketId, String keyName, + long objectId, long parentId, long containerId) + throws IOException { + Table table = dbStore.getTable(FILE_TABLE); + + // format: /volumeId/bucketId/parentId(bucketId)/keyName + String key = + "/" + volumeId + "/" + bucketId + "/" + parentId + "/" + keyName; + + OmKeyInfo value = + getOmKeyInfo("vol1", "bucket1", keyName, containerId, objectId, + parentId); + + table.put(key.getBytes(UTF_8), + value.getProtobuf(ClientVersion.CURRENT_VERSION).toByteArray()); + } + + private void createKey(String keyName, long containerId) throws IOException { + Table table = dbStore.getTable(KEY_TABLE); + + String volumeName = "vol1"; + String bucketName = "bucket1"; + // format: /volumeName/bucketName/keyName + String key = "/" + volumeName + "/" + bucketName + "/" + keyName; + + // generate table value + OmKeyInfo value = + getOmKeyInfo(volumeName, bucketName, keyName, containerId, 0, 0); + + table.put(key.getBytes(UTF_8), + value.getProtobuf(ClientVersion.CURRENT_VERSION).toByteArray()); + } + + private void createDirectory(long volumeId, long bucketId, long parentId, + long objectId, String keyName) + throws IOException { + Table table = dbStore.getTable(DIRECTORY_TABLE); + + // format: /volumeId/bucketId/parentId(bucketId)/keyName + String key = + "/" + volumeId + "/" + bucketId + "/" + parentId + "/" + keyName; + + OmDirectoryInfo value = + OMRequestTestUtils.createOmDirectoryInfo(keyName, objectId, parentId); + + table.put(key.getBytes(UTF_8), value.getProtobuf().toByteArray()); + } + + private static OmKeyInfo getOmKeyInfo(String volumeName, String bucketName, + String keyName, long containerId, + long objectId, + long parentId) { + return OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, + keyName, HddsProtos.ReplicationType.STAND_ALONE, + HddsProtos.ReplicationFactor.ONE, objectId, parentId, 1, 1, 1, false, + new ArrayList<>( + Collections.singletonList( + new OmKeyLocationInfo.Builder().setBlockID( + new BlockID(containerId, 1)).build()))); + } + +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java index 21b94ce5f05..cef0acb41b5 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java @@ -19,12 +19,6 @@ package org.apache.hadoop.ozone.om.request; -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.UUID; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdds.HddsUtils; import org.apache.hadoop.hdds.client.BlockID; @@ -34,6 +28,8 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; +import org.apache.hadoop.hdds.utils.db.cache.CacheKey; +import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.ClientVersion; import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.OzoneAcl; @@ -42,7 +38,9 @@ import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; @@ -50,39 +48,27 @@ import org.apache.hadoop.ozone.om.helpers.OmMultipartUpload; import org.apache.hadoop.ozone.om.helpers.OmPrefixInfo; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; -import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.AddAclRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.BucketInfo; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateTenantRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteTenantRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetS3VolumeContextRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyInfo; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListTenantRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .MultipartUploadAbortRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .MultipartCommitUploadPartRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .MultipartUploadCompleteRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .KeyArgs; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .MultipartInfoInitiateRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartCommitUploadPartRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartInfoInitiateRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartUploadAbortRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartUploadCompleteRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PartKeyInfo; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .SetVolumePropertyRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .AddAclRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .RemoveAclRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .SetAclRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RemoveAclRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetAclRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetVolumePropertyRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.TenantAssignAdminRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.TenantAssignUserAccessIdRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.TenantGetUserInfoRequest; @@ -93,13 +79,16 @@ import org.apache.hadoop.ozone.security.acl.OzoneObj; import org.apache.hadoop.ozone.security.acl.OzoneObj.ResourceType; import org.apache.hadoop.ozone.security.acl.OzoneObj.StoreType; - import org.apache.hadoop.ozone.security.acl.OzoneObjInfo; import org.apache.hadoop.ozone.storage.proto.OzoneManagerStorageProtos; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; -import org.apache.hadoop.hdds.utils.db.cache.CacheKey; -import org.apache.hadoop.hdds.utils.db.cache.CacheValue; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.UUID; import static org.mockito.Mockito.any; import static org.mockito.Mockito.doCallRealMethod; diff --git a/hadoop-ozone/tools/pom.xml b/hadoop-ozone/tools/pom.xml index 839d01f0fa8..5b2ac535b58 100644 --- a/hadoop-ozone/tools/pom.xml +++ b/hadoop-ozone/tools/pom.xml @@ -108,6 +108,25 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> hdds-test-utils test + + org.mockito + mockito-core + test + + + org.mockito + mockito-junit-jupiter + + + org.junit.jupiter + junit-jupiter-params + test + + + org.mockito + mockito-junit-jupiter + test + diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/OMAdmin.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/OMAdmin.java index ce7d4ed7a7c..3ae60bb7e47 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/OMAdmin.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/OMAdmin.java @@ -32,9 +32,6 @@ import org.apache.hadoop.ozone.om.protocolPB.OmTransport; import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolClientSideTranslatorPB; import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolPB; - -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SERVICE_IDS_KEY; import org.apache.ratis.protocol.ClientId; import org.kohsuke.MetaInfServices; import picocli.CommandLine; @@ -43,6 +40,9 @@ import java.util.Collection; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SERVICE_IDS_KEY; + /** * Subcommand for admin operations related to OM. */ diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyInfo.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyInfo.java new file mode 100644 index 00000000000..bc4d45fc5be --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyInfo.java @@ -0,0 +1,84 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.debug; + +import java.util.Objects; + +/** + * Class that holds basic key data in relation to container it is in. + */ +public class ContainerKeyInfo { + + private final long containerID; + private final String volumeName; + private final long volumeId; + private final String bucketName; + private final long bucketId; + private final String keyName; + private final long parentId; + + public ContainerKeyInfo(long containerID, String volumeName, long volumeId, + String bucketName, long bucketId, String keyName, + long parentId) { + this.containerID = containerID; + this.volumeName = volumeName; + this.volumeId = volumeId; + this.bucketName = bucketName; + this.bucketId = bucketId; + this.keyName = keyName; + this.parentId = parentId; + } + + public long getContainerID() { + return containerID; + } + + public String getVolumeName() { + return volumeName; + } + + public String getBucketName() { + return bucketName; + } + + public String getKeyName() { + return keyName; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + ContainerKeyInfo that = (ContainerKeyInfo) o; + return containerID == that.containerID && volumeId == that.volumeId && + bucketId == that.bucketId && parentId == that.parentId && + Objects.equals(volumeName, that.volumeName) && + Objects.equals(bucketName, that.bucketName) && + Objects.equals(keyName, that.keyName); + } + + @Override + public int hashCode() { + return Objects.hash(containerID, volumeName, volumeId, bucketName, bucketId, + keyName, parentId); + } +} diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyInfoResponse.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyInfoResponse.java new file mode 100644 index 00000000000..b29283ecdb3 --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyInfoResponse.java @@ -0,0 +1,47 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.debug; + +import com.fasterxml.jackson.annotation.JsonInclude; + +import java.util.List; +import java.util.Map; + +/** + * Class for response for container key scanner. + */ +@JsonInclude(JsonInclude.Include.NON_NULL) +public class ContainerKeyInfoResponse { + + private final long keysProcessed; + private final Map> containerKeys; + + public ContainerKeyInfoResponse( + long keysProcessed, Map> containerKeys) { + this.keysProcessed = keysProcessed; + this.containerKeys = containerKeys; + } + + public long getKeysProcessed() { + return keysProcessed; + } + + public Map> getContainerKeys() { + return containerKeys; + } +} diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyInfoWrapper.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyInfoWrapper.java new file mode 100644 index 00000000000..41c3ee9cdd9 --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyInfoWrapper.java @@ -0,0 +1,43 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.debug; + +import java.util.List; + +/** + * Class for aggregation of collected data. + */ +public class ContainerKeyInfoWrapper { + + private final long keysProcessed; + private final List containerKeyInfos; + + public ContainerKeyInfoWrapper(long keysProcessed, + List containerKeyInfos) { + this.keysProcessed = keysProcessed; + this.containerKeyInfos = containerKeyInfos; + } + + public long getKeysProcessed() { + return keysProcessed; + } + + public List getContainerKeyInfos() { + return containerKeyInfos; + } +} diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java new file mode 100644 index 00000000000..e31cd7bec3e --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java @@ -0,0 +1,394 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.debug; + +import com.google.common.collect.Sets; +import com.google.gson.Gson; +import com.google.gson.GsonBuilder; +import org.apache.commons.lang3.tuple.Pair; +import org.apache.hadoop.hdds.cli.SubcommandWithParent; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.utils.db.DBColumnFamilyDefinition; +import org.apache.hadoop.hdds.utils.db.DBDefinition; +import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB; +import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksIterator; +import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; +import org.kohsuke.MetaInfServices; +import org.rocksdb.ColumnFamilyDescriptor; +import org.rocksdb.ColumnFamilyHandle; +import org.rocksdb.RocksDBException; +import picocli.CommandLine; + +import java.io.IOException; +import java.io.PrintWriter; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; +import java.util.Queue; +import java.util.Set; +import java.util.concurrent.Callable; + +import static java.nio.charset.StandardCharsets.UTF_8; +import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; +import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; + +/** + * Parser for a list of container IDs, to scan for keys. + */ +@CommandLine.Command( + name = "ckscanner", + description = "Parse a list of container IDs" +) +@MetaInfServices(SubcommandWithParent.class) +public class ContainerKeyScanner implements Callable, + SubcommandWithParent { + + public static final String FILE_TABLE = "fileTable"; + public static final String KEY_TABLE = "keyTable"; + public static final String DIRECTORY_TABLE = "directoryTable"; + + @CommandLine.Spec + private static CommandLine.Model.CommandSpec spec; + + @CommandLine.ParentCommand + private RDBParser parent; + + @CommandLine.Option(names = {"-ids", "--container-ids"}, + split = ",", + paramLabel = "containerIDs", + required = true, + description = "Set of container IDs to be used for getting all " + + "their keys. Example-usage: 1,11,2.(Separated by ',')") + private Set containerIds; + + @Override + public Void call() throws Exception { + ContainerKeyInfoWrapper containerKeyInfoWrapper = + scanDBForContainerKeys(parent.getDbPath()); + + printOutput(containerKeyInfoWrapper); + + return null; + } + + @Override + public Class getParentType() { + return RDBParser.class; + } + + private static PrintWriter err() { + return spec.commandLine().getErr(); + } + + private static PrintWriter out() { + return spec.commandLine().getOut(); + } + + // TODO optimize this method to use single objectId instead of a set + // and to return pair of objectId and path instead of a map. + // Further optimization could be done to reuse db + // and not connect to it for every method call + public Map getAbsolutePathForObjectIDs( + long bucketId, String prefix, Optional> dirObjIds, + String dbPath) + throws IOException, RocksDBException { + // Root of a bucket would always have the + // key as /volumeId/bucketId/bucketId/ + if (!dirObjIds.isPresent() || dirObjIds.get().isEmpty()) { + return Collections.emptyMap(); + } + Set objIds = Sets.newHashSet(dirObjIds.get()); + Map objectIdPathMap = new HashMap<>(); + Queue> objectIdPathVals = new LinkedList<>(); + Pair root = Pair.of(bucketId, Paths.get(OZONE_URI_DELIMITER)); + objectIdPathVals.add(root); + addToPathMap(root, objIds, objectIdPathMap); + + while (!objectIdPathVals.isEmpty() && !objIds.isEmpty()) { + Pair parentPair = objectIdPathVals.poll(); + // read directoryTable + List columnFamilyDescriptors = + RocksDBUtils.getColumnFamilyDescriptors(dbPath); + final List columnFamilyHandles = new ArrayList<>(); + + try (ManagedRocksDB db = ManagedRocksDB.openReadOnly(dbPath, + columnFamilyDescriptors, columnFamilyHandles)) { + dbPath = removeTrailingSlashIfNeeded(dbPath); + DBDefinition dbDefinition = DBDefinitionFactory.getDefinition( + Paths.get(dbPath), new OzoneConfiguration()); + if (dbDefinition == null) { + throw new IllegalStateException("Incorrect DB Path"); + } + + DBColumnFamilyDefinition columnFamilyDefinition = + dbDefinition.getColumnFamily(DIRECTORY_TABLE); + if (columnFamilyDefinition == null) { + throw new IllegalStateException( + "Table with name" + DIRECTORY_TABLE + " not found"); + } + + ColumnFamilyHandle columnFamilyHandle = getColumnFamilyHandle( + columnFamilyDefinition.getName().getBytes(UTF_8), + columnFamilyHandles); + if (columnFamilyHandle == null) { + throw new IllegalStateException("columnFamilyHandle is null"); + } + + try (ManagedRocksIterator iterator = new ManagedRocksIterator( + db.get().newIterator(columnFamilyHandle))) { + iterator.get().seekToFirst(); + while (!objIds.isEmpty() && iterator.get().isValid()) { + String subDir = prefix + parentPair.getKey() + OM_KEY_PREFIX; + String key = new String(iterator.get().key(), UTF_8); + if (!key.contains(subDir)) { + iterator.get().next(); + continue; + } + + OmDirectoryInfo childDir = + ((OmDirectoryInfo) columnFamilyDefinition.getValueCodec() + .fromPersistedFormat(iterator.get().value())); + Pair pathVal = Pair.of(childDir.getObjectID(), + parentPair.getValue().resolve(childDir.getName())); + addToPathMap(pathVal, objIds, objectIdPathMap); + objectIdPathVals.add(pathVal); + iterator.get().next(); + } + } + } + } + // Invalid directory objectId which does not exist in the given bucket. + if (!objIds.isEmpty()) { + throw new IllegalArgumentException( + "Dir object Ids required but not found in bucket: " + objIds); + } + return objectIdPathMap; + } + + private void addToPathMap(Pair objectIDPath, + Set dirObjIds, Map pathMap) { + if (dirObjIds.contains(objectIDPath.getKey())) { + pathMap.put(objectIDPath.getKey(), objectIDPath.getValue()); + dirObjIds.remove(objectIDPath.getKey()); + } + } + + private ContainerKeyInfoWrapper scanDBForContainerKeys(String dbPath) + throws RocksDBException, IOException { + List containerKeyInfos = new ArrayList<>(); + + List columnFamilyDescriptors = + RocksDBUtils.getColumnFamilyDescriptors(dbPath); + final List columnFamilyHandles = new ArrayList<>(); + long keysProcessed = 0; + + try (ManagedRocksDB db = ManagedRocksDB.openReadOnly(dbPath, + columnFamilyDescriptors, columnFamilyHandles)) { + dbPath = removeTrailingSlashIfNeeded(dbPath); + DBDefinition dbDefinition = DBDefinitionFactory.getDefinition( + Paths.get(dbPath), new OzoneConfiguration()); + if (dbDefinition == null) { + throw new IllegalStateException("Incorrect DB Path"); + } + + keysProcessed += + processTable(dbDefinition, columnFamilyHandles, db, + containerKeyInfos, FILE_TABLE, dbPath); + keysProcessed += + processTable(dbDefinition, columnFamilyHandles, db, + containerKeyInfos, KEY_TABLE, dbPath); + } + return new ContainerKeyInfoWrapper(keysProcessed, containerKeyInfos); + } + + private long processTable(DBDefinition dbDefinition, + List columnFamilyHandles, + ManagedRocksDB db, + List containerKeyInfos, + String tableName, String dbPath) + throws IOException, RocksDBException { + long keysProcessed = 0; + DBColumnFamilyDefinition columnFamilyDefinition = + dbDefinition.getColumnFamily(tableName); + if (columnFamilyDefinition == null) { + throw new IllegalStateException( + "Table with name" + tableName + " not found"); + } + + ColumnFamilyHandle columnFamilyHandle = getColumnFamilyHandle( + columnFamilyDefinition.getName().getBytes(UTF_8), + columnFamilyHandles); + if (columnFamilyHandle == null) { + throw new IllegalStateException("columnFamilyHandle is null"); + } + + try (ManagedRocksIterator iterator = new ManagedRocksIterator( + db.get().newIterator(columnFamilyHandle))) { + iterator.get().seekToFirst(); + while (iterator.get().isValid()) { + OmKeyInfo value = ((OmKeyInfo) columnFamilyDefinition.getValueCodec() + .fromPersistedFormat(iterator.get().value())); + List keyLocationVersions = + value.getKeyLocationVersions(); + if (Objects.isNull(keyLocationVersions)) { + iterator.get().next(); + keysProcessed++; + continue; + } + + long volumeId = 0; + long bucketId = 0; + // volumeId and bucketId are only applicable to file table + if (tableName.equals(FILE_TABLE)) { + String key = new String(iterator.get().key(), UTF_8); + String[] keyParts = key.split(OM_KEY_PREFIX); + volumeId = Long.parseLong(keyParts[1]); + bucketId = Long.parseLong(keyParts[2]); + } + + for (OmKeyLocationInfoGroup locationInfoGroup : keyLocationVersions) { + for (List locationInfos : + locationInfoGroup.getLocationVersionMap().values()) { + for (OmKeyLocationInfo locationInfo : locationInfos) { + if (containerIds.contains(locationInfo.getContainerID())) { + // Generate asbolute key path for FSO keys + StringBuilder keyName = new StringBuilder(); + if (tableName.equals(FILE_TABLE)) { + handleFileTable(dbPath, volumeId, bucketId, value, keyName); + } + keyName.append(value.getKeyName()); + containerKeyInfos.add( + new ContainerKeyInfo(locationInfo.getContainerID(), + value.getVolumeName(), volumeId, value.getBucketName(), + bucketId, keyName.toString(), + value.getParentObjectID())); + } + } + } + } + iterator.get().next(); + keysProcessed++; + } + return keysProcessed; + } + } + + private static String removeBeginningSlash(String path) { + if (path.startsWith(OM_KEY_PREFIX)) { + return path.substring(1); + } + + return path; + } + + private void handleFileTable(String dbPath, long volumeId, long bucketId, + OmKeyInfo value, StringBuilder keyName) + throws IOException, RocksDBException { + String prefix = + OM_KEY_PREFIX + volumeId + OM_KEY_PREFIX + bucketId + + OM_KEY_PREFIX; + Set dirObjIds = new HashSet<>(); + dirObjIds.add(value.getParentObjectID()); + Map absolutePaths = + getAbsolutePathForObjectIDs(bucketId, prefix, + Optional.of(dirObjIds), dbPath); + Path path = absolutePaths.get(value.getParentObjectID()); + String keyPath; + if (path.toString().equals(OM_KEY_PREFIX)) { + keyPath = path.toString(); + } else { + keyPath = path + OM_KEY_PREFIX; + } + + keyName.append(removeBeginningSlash(keyPath)); + } + + + private ColumnFamilyHandle getColumnFamilyHandle( + byte[] name, List columnFamilyHandles) { + return columnFamilyHandles + .stream() + .filter( + handle -> { + try { + return Arrays.equals(handle.getName(), name); + } catch (Exception ex) { + throw new RuntimeException(ex); + } + }) + .findAny() + .orElse(null); + } + + private String removeTrailingSlashIfNeeded(String dbPath) { + if (dbPath.endsWith(OzoneConsts.OZONE_URI_DELIMITER)) { + dbPath = dbPath.substring(0, dbPath.length() - 1); + } + return dbPath; + } + + private void printOutput(ContainerKeyInfoWrapper containerKeyInfoWrapper) { + List containerKeyInfos = + containerKeyInfoWrapper.getContainerKeyInfos(); + if (containerKeyInfos.isEmpty()) { + try (PrintWriter out = out()) { + out.println("No keys were found for container IDs: " + + containerIds); + out.println( + "Keys processed: " + containerKeyInfoWrapper.getKeysProcessed()); + } + return; + } + + Map> infoMap = new HashMap<>(); + + for (long id : containerIds) { + List tmpList = new ArrayList<>(); + + for (ContainerKeyInfo info : containerKeyInfos) { + if (id == info.getContainerID()) { + tmpList.add(info); + } + } + infoMap.put(id, tmpList); + } + + Gson gson = new GsonBuilder().setPrettyPrinting().create(); + String prettyJson = gson.toJson( + new ContainerKeyInfoResponse(containerKeyInfoWrapper.getKeysProcessed(), + infoMap)); + try (PrintWriter out = out()) { + out.println(prettyJson); + } + } + +} diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/debug/TestContainerKeyScanner.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/debug/TestContainerKeyScanner.java new file mode 100644 index 00000000000..85e7361871b --- /dev/null +++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/debug/TestContainerKeyScanner.java @@ -0,0 +1,299 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.debug; + +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.junit.jupiter.MockitoExtension; + +/** + * Unit tests for {@link ContainerKeyScanner}. + */ +@ExtendWith(MockitoExtension.class) +public class TestContainerKeyScanner { + + /* + private ContainerKeyScanner containerKeyScanner; + @Mock + private ContainerKeyInfoWrapper containerKeyInfoWrapper; + + @BeforeEach + void setup() { + containerKeyScanner = new ContainerKeyScanner(); + containerKeyScanner.setContainerIds( + Stream.of(1L, 2L, 3L).collect(Collectors.toSet())); + } + + @Test + void testOutputWhenContainerKeyInfosEmpty() { + when(containerKeyInfoWrapper.getContainerKeyInfos()) + .thenReturn(new ArrayList<>()); + long processedKeys = new Random().nextLong(); + when(containerKeyInfoWrapper.getKeysProcessed()).thenReturn(processedKeys); + + ByteArrayOutputStream outContent = new ByteArrayOutputStream(); + System.setOut(new PrintStream(outContent)); + + containerKeyScanner.printOutput(containerKeyInfoWrapper); + + String expectedOutput = "No keys were found for container IDs: " + + containerKeyScanner.getContainerIds() + "\n" + + "Keys processed: " + processedKeys + "\n"; + assertEquals(expectedOutput, outContent.toString()); + } + + @Test + void testOutputWhenContainerKeyInfosNotEmptyAndKeyMatchesContainerId() { + List containerKeyInfos = Stream.of( + new ContainerKeyInfo(1L, "vol1", "bucket1", "key1"), + new ContainerKeyInfo(2L, "vol2", "bucket2", "key2"), + new ContainerKeyInfo(3L, "vol3", "bucket3", "key3") + ).collect(Collectors.toList()); + when(containerKeyInfoWrapper.getContainerKeyInfos()) + .thenReturn(containerKeyInfos); + long processedKeys = containerKeyInfos.size(); + when(containerKeyInfoWrapper.getKeysProcessed()).thenReturn(processedKeys); + + ByteArrayOutputStream outContent = new ByteArrayOutputStream(); + System.setOut(new PrintStream(outContent)); + + containerKeyScanner.printOutput(containerKeyInfoWrapper); + + String expectedOutput = "{\n" + + " \"keysProcessed\": 3,\n" + + " \"containerKeys\": {\n" + + " \"1\": [\n" + + " {\n" + + " \"containerID\": 1,\n" + + " \"volumeName\": \"vol1\",\n" + + " \"bucketName\": \"bucket1\",\n" + + " \"keyName\": \"key1\"\n" + + " }\n" + + " ],\n" + + " \"2\": [\n" + + " {\n" + + " \"containerID\": 2,\n" + + " \"volumeName\": \"vol2\",\n" + + " \"bucketName\": \"bucket2\",\n" + + " \"keyName\": \"key2\"\n" + + " }\n" + + " ],\n" + + " \"3\": [\n" + + " {\n" + + " \"containerID\": 3,\n" + + " \"volumeName\": \"vol3\",\n" + + " \"bucketName\": \"bucket3\",\n" + + " \"keyName\": \"key3\"\n" + + " }\n" + + " ]\n" + + " }\n" + + "}\n"; + assertEquals(expectedOutput, outContent.toString()); + } + + @Test + void testOutputWhenContainerKeyInfosNotEmptyAndKeysDoNotMatchContainersId() { + List containerKeyInfos = Stream.of( + new ContainerKeyInfo(4L, "vol1", "bucket1", "key1"), + new ContainerKeyInfo(5L, "vol2", "bucket2", "key2"), + new ContainerKeyInfo(6L, "vol3", "bucket3", "key3") + ).collect(Collectors.toList()); + when(containerKeyInfoWrapper.getContainerKeyInfos()) + .thenReturn(containerKeyInfos); + long processedKeys = containerKeyInfos.size(); + when(containerKeyInfoWrapper.getKeysProcessed()).thenReturn(processedKeys); + + ByteArrayOutputStream outContent = new ByteArrayOutputStream(); + System.setOut(new PrintStream(outContent)); + + containerKeyScanner.printOutput(containerKeyInfoWrapper); + + String expectedOutput = "{\n" + + " \"keysProcessed\": 3,\n" + + " \"containerKeys\": {\n" + + " \"1\": [],\n" + + " \"2\": [],\n" + + " \"3\": []\n" + + " }\n" + + "}\n"; + assertEquals(expectedOutput, outContent.toString()); + } + + @Test + public void testExceptionThrownWhenColumnFamilyDefinitionIsNull() { + DBDefinition dbDefinition = mock(DBDefinition.class); + String tableName = "tableName"; + when(dbDefinition.getColumnFamily(tableName)).thenReturn(null); + + Exception e = Assertions.assertThrows(IllegalStateException.class, + () -> containerKeyScanner.processTable(dbDefinition, null, null, null, + tableName)); + Assertions.assertEquals("Table with name" + tableName + " not found", + e.getMessage()); + } + + @Test + public void testExceptionThrownWhenColumnFamilyHandleIsNull() + throws IOException { + ContainerKeyScanner containerKeyScannerMock = + mock(ContainerKeyScanner.class); + DBDefinition dbDefinition = mock(DBDefinition.class); + DBColumnFamilyDefinition dbColumnFamilyDefinition = + mock(DBColumnFamilyDefinition.class); + when(dbDefinition.getColumnFamily(any())).thenReturn( + dbColumnFamilyDefinition); + when(dbColumnFamilyDefinition.getName()).thenReturn("name"); + when( + containerKeyScannerMock.processTable(any(), isNull(), isNull(), + isNull(), isNull())).thenCallRealMethod(); + + Exception e = Assertions.assertThrows(IllegalStateException.class, + () -> containerKeyScannerMock.processTable(dbDefinition, null, null, + null, null)); + Assertions.assertEquals("columnFamilyHandle is null", e.getMessage()); + } + + @Test + public void testNoKeysProcessedWhenTableProcessed() + throws IOException { + ContainerKeyScanner containerKeyScannerMock = + mock(ContainerKeyScanner.class); + DBDefinition dbDefinition = mock(DBDefinition.class); + DBColumnFamilyDefinition dbColumnFamilyDefinition = + mock(DBColumnFamilyDefinition.class); + when(dbDefinition.getColumnFamily(any())).thenReturn( + dbColumnFamilyDefinition); + ColumnFamilyHandle columnFamilyHandle = mock(ColumnFamilyHandle.class); + when(dbColumnFamilyDefinition.getName()).thenReturn("name"); + when(containerKeyScannerMock.getColumnFamilyHandle(any(), + isNull())).thenReturn(columnFamilyHandle); + ManagedRocksDB db = mock(ManagedRocksDB.class); + RocksIterator iterator = mock(RocksIterator.class); + RocksDB rocksDB = mock(RocksDB.class); + when(db.get()).thenReturn(rocksDB); + when(rocksDB.newIterator(columnFamilyHandle)).thenReturn(iterator); + doNothing().when(iterator).seekToFirst(); + when( + containerKeyScannerMock.processTable(any(), isNull(), any(), + isNull(), isNull())).thenCallRealMethod(); + + long keysProcessed = + containerKeyScannerMock.processTable(dbDefinition, null, db, null, + null); + assertEquals(0, keysProcessed); + } + + @Test + public void testKeysProcessedWhenTableProcessedButNoContainerIdMatch() + throws IOException { + ContainerKeyScanner containerKeyScannerMock = + mock(ContainerKeyScanner.class); + DBDefinition dbDefinition = mock(DBDefinition.class); + DBColumnFamilyDefinition dbColumnFamilyDefinition = + mock(DBColumnFamilyDefinition.class); + when(dbDefinition.getColumnFamily(any())).thenReturn( + dbColumnFamilyDefinition); + ColumnFamilyHandle columnFamilyHandle = mock(ColumnFamilyHandle.class); + when(dbColumnFamilyDefinition.getName()).thenReturn("name"); + when(containerKeyScannerMock.getColumnFamilyHandle(any(), + isNull())).thenReturn(columnFamilyHandle); + ManagedRocksDB db = mock(ManagedRocksDB.class); + RocksIterator iterator = mock(RocksIterator.class); + RocksDB rocksDB = mock(RocksDB.class); + when(db.get()).thenReturn(rocksDB); + when(rocksDB.newIterator(columnFamilyHandle)).thenReturn(iterator); + when(iterator.isValid()) + .thenReturn(true) + .thenReturn(false); + Codec codec = mock(Codec.class); + when(dbColumnFamilyDefinition.getValueCodec()).thenReturn(codec); + OmKeyInfo omKeyInfo = new OmKeyInfo.Builder().build(); + omKeyInfo.setKeyLocationVersions(null); + when(iterator.value()).thenReturn(new byte[1]); + when(codec.fromPersistedFormat(any())).thenReturn(omKeyInfo); + when( + containerKeyScannerMock.processTable(any(), isNull(), any(), + isNull(), isNull())).thenCallRealMethod(); + + long keysProcessed = + containerKeyScannerMock.processTable(dbDefinition, null, db, null, + null); + assertEquals(1, keysProcessed); + } + + @Test + public void testKeysProcessedAndKeyInfoWhenTableProcessedButContainerIdMatch() + throws IOException { + ContainerKeyScanner containerKeyScannerMock = + mock(ContainerKeyScanner.class); + DBDefinition dbDefinition = mock(DBDefinition.class); + DBColumnFamilyDefinition dbColumnFamilyDefinition = + mock(DBColumnFamilyDefinition.class); + when(dbDefinition.getColumnFamily(any())).thenReturn( + dbColumnFamilyDefinition); + ColumnFamilyHandle columnFamilyHandle = mock(ColumnFamilyHandle.class); + when(dbColumnFamilyDefinition.getName()).thenReturn("name"); + when(containerKeyScannerMock.getColumnFamilyHandle(any(), + isNull())).thenReturn(columnFamilyHandle); + ManagedRocksDB db = mock(ManagedRocksDB.class); + RocksIterator iterator = mock(RocksIterator.class); + RocksDB rocksDB = mock(RocksDB.class); + when(db.get()).thenReturn(rocksDB); + when(rocksDB.newIterator(columnFamilyHandle)).thenReturn(iterator); + when(iterator.isValid()) + .thenReturn(true) + .thenReturn(false); + Codec codec = mock(Codec.class); + when(dbColumnFamilyDefinition.getValueCodec()).thenReturn(codec); + String volumeName = "vol1"; + String bucketName = "bucket1"; + String keyName = "key1"; + OmKeyInfo omKeyInfo = new OmKeyInfo.Builder() + .setVolumeName(volumeName) + .setBucketName(bucketName) + .setKeyName(keyName) + .build(); + List keyLocationVersions = new ArrayList<>(); + List keyLocationInfos = new ArrayList<>(); + long containerID = 1L; + keyLocationInfos.add( + new OmKeyLocationInfo.Builder().setBlockID(new BlockID(containerID, 1L)) + .build()); + keyLocationVersions.add(new OmKeyLocationInfoGroup(1L, keyLocationInfos)); + omKeyInfo.setKeyLocationVersions(keyLocationVersions); + when(iterator.value()).thenReturn(new byte[1]); + when(codec.fromPersistedFormat(any())).thenReturn(omKeyInfo); + when( + containerKeyScannerMock.processTable(any(), isNull(), any(), + anyList(), isNull())).thenCallRealMethod(); + List containerKeyInfos = new ArrayList<>(); + ContainerKeyInfo expectedContainerKeyInfo = + new ContainerKeyInfo(containerID, volumeName, bucketName, keyName); + doCallRealMethod().when(containerKeyScannerMock).setContainerIds(anySet()); + containerKeyScannerMock.setContainerIds( + containerKeyScanner.getContainerIds()); + + long keysProcessed = + containerKeyScannerMock.processTable(dbDefinition, null, db, + containerKeyInfos, null); + assertEquals(1, keysProcessed); + assertEquals(expectedContainerKeyInfo, containerKeyInfos.get(0)); + } + */ + +} diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/debug/package-info.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/debug/package-info.java new file mode 100644 index 00000000000..d3e47a86e86 --- /dev/null +++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/debug/package-info.java @@ -0,0 +1,24 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + *

+ * + */ + +/** + * Unit tests for Ozone Debug tools. + */ +package org.apache.hadoop.ozone.debug; From a89647e146f10ebba295a34ad7d0e436df068238 Mon Sep 17 00:00:00 2001 From: Mladjan Gadzic Date: Fri, 17 Nov 2023 13:17:42 +0100 Subject: [PATCH 02/33] Revert non-necessary changes --- hadoop-ozone/tools/pom.xml | 4 ---- 1 file changed, 4 deletions(-) diff --git a/hadoop-ozone/tools/pom.xml b/hadoop-ozone/tools/pom.xml index 5b2ac535b58..6eb9c036de4 100644 --- a/hadoop-ozone/tools/pom.xml +++ b/hadoop-ozone/tools/pom.xml @@ -113,10 +113,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> mockito-core test - - org.mockito - mockito-junit-jupiter - org.junit.jupiter junit-jupiter-params From fe3b75a35d5a747883e460d9dddf61388f91fbbe Mon Sep 17 00:00:00 2001 From: Mladjan Gadzic Date: Fri, 17 Nov 2023 13:29:14 +0100 Subject: [PATCH 03/33] Cleanup --- .../ozone/debug/TestContainerKeyScanner.java | 3 +- .../ozone/debug/TestContainerKeyScanner.java | 299 ------------------ 2 files changed, 2 insertions(+), 300 deletions(-) delete mode 100644 hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/debug/TestContainerKeyScanner.java diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestContainerKeyScanner.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestContainerKeyScanner.java index 3a52aca92b7..ccc1a7d7b58 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestContainerKeyScanner.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestContainerKeyScanner.java @@ -44,7 +44,8 @@ import static java.nio.charset.StandardCharsets.UTF_8; /** - * This class tests `ozone debug ldb` CLI that reads from a RocksDB directory. + * This class tests `ozone debug ldb ckscanner` CLI that reads from RocksDB + * and gets keys for container ids. */ public class TestContainerKeyScanner { private static final String KEY_TABLE = "keyTable"; diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/debug/TestContainerKeyScanner.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/debug/TestContainerKeyScanner.java deleted file mode 100644 index 85e7361871b..00000000000 --- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/debug/TestContainerKeyScanner.java +++ /dev/null @@ -1,299 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.debug; - -import org.junit.jupiter.api.extension.ExtendWith; -import org.mockito.junit.jupiter.MockitoExtension; - -/** - * Unit tests for {@link ContainerKeyScanner}. - */ -@ExtendWith(MockitoExtension.class) -public class TestContainerKeyScanner { - - /* - private ContainerKeyScanner containerKeyScanner; - @Mock - private ContainerKeyInfoWrapper containerKeyInfoWrapper; - - @BeforeEach - void setup() { - containerKeyScanner = new ContainerKeyScanner(); - containerKeyScanner.setContainerIds( - Stream.of(1L, 2L, 3L).collect(Collectors.toSet())); - } - - @Test - void testOutputWhenContainerKeyInfosEmpty() { - when(containerKeyInfoWrapper.getContainerKeyInfos()) - .thenReturn(new ArrayList<>()); - long processedKeys = new Random().nextLong(); - when(containerKeyInfoWrapper.getKeysProcessed()).thenReturn(processedKeys); - - ByteArrayOutputStream outContent = new ByteArrayOutputStream(); - System.setOut(new PrintStream(outContent)); - - containerKeyScanner.printOutput(containerKeyInfoWrapper); - - String expectedOutput = "No keys were found for container IDs: " + - containerKeyScanner.getContainerIds() + "\n" + - "Keys processed: " + processedKeys + "\n"; - assertEquals(expectedOutput, outContent.toString()); - } - - @Test - void testOutputWhenContainerKeyInfosNotEmptyAndKeyMatchesContainerId() { - List containerKeyInfos = Stream.of( - new ContainerKeyInfo(1L, "vol1", "bucket1", "key1"), - new ContainerKeyInfo(2L, "vol2", "bucket2", "key2"), - new ContainerKeyInfo(3L, "vol3", "bucket3", "key3") - ).collect(Collectors.toList()); - when(containerKeyInfoWrapper.getContainerKeyInfos()) - .thenReturn(containerKeyInfos); - long processedKeys = containerKeyInfos.size(); - when(containerKeyInfoWrapper.getKeysProcessed()).thenReturn(processedKeys); - - ByteArrayOutputStream outContent = new ByteArrayOutputStream(); - System.setOut(new PrintStream(outContent)); - - containerKeyScanner.printOutput(containerKeyInfoWrapper); - - String expectedOutput = "{\n" + - " \"keysProcessed\": 3,\n" + - " \"containerKeys\": {\n" + - " \"1\": [\n" + - " {\n" + - " \"containerID\": 1,\n" + - " \"volumeName\": \"vol1\",\n" + - " \"bucketName\": \"bucket1\",\n" + - " \"keyName\": \"key1\"\n" + - " }\n" + - " ],\n" + - " \"2\": [\n" + - " {\n" + - " \"containerID\": 2,\n" + - " \"volumeName\": \"vol2\",\n" + - " \"bucketName\": \"bucket2\",\n" + - " \"keyName\": \"key2\"\n" + - " }\n" + - " ],\n" + - " \"3\": [\n" + - " {\n" + - " \"containerID\": 3,\n" + - " \"volumeName\": \"vol3\",\n" + - " \"bucketName\": \"bucket3\",\n" + - " \"keyName\": \"key3\"\n" + - " }\n" + - " ]\n" + - " }\n" + - "}\n"; - assertEquals(expectedOutput, outContent.toString()); - } - - @Test - void testOutputWhenContainerKeyInfosNotEmptyAndKeysDoNotMatchContainersId() { - List containerKeyInfos = Stream.of( - new ContainerKeyInfo(4L, "vol1", "bucket1", "key1"), - new ContainerKeyInfo(5L, "vol2", "bucket2", "key2"), - new ContainerKeyInfo(6L, "vol3", "bucket3", "key3") - ).collect(Collectors.toList()); - when(containerKeyInfoWrapper.getContainerKeyInfos()) - .thenReturn(containerKeyInfos); - long processedKeys = containerKeyInfos.size(); - when(containerKeyInfoWrapper.getKeysProcessed()).thenReturn(processedKeys); - - ByteArrayOutputStream outContent = new ByteArrayOutputStream(); - System.setOut(new PrintStream(outContent)); - - containerKeyScanner.printOutput(containerKeyInfoWrapper); - - String expectedOutput = "{\n" + - " \"keysProcessed\": 3,\n" + - " \"containerKeys\": {\n" + - " \"1\": [],\n" + - " \"2\": [],\n" + - " \"3\": []\n" + - " }\n" + - "}\n"; - assertEquals(expectedOutput, outContent.toString()); - } - - @Test - public void testExceptionThrownWhenColumnFamilyDefinitionIsNull() { - DBDefinition dbDefinition = mock(DBDefinition.class); - String tableName = "tableName"; - when(dbDefinition.getColumnFamily(tableName)).thenReturn(null); - - Exception e = Assertions.assertThrows(IllegalStateException.class, - () -> containerKeyScanner.processTable(dbDefinition, null, null, null, - tableName)); - Assertions.assertEquals("Table with name" + tableName + " not found", - e.getMessage()); - } - - @Test - public void testExceptionThrownWhenColumnFamilyHandleIsNull() - throws IOException { - ContainerKeyScanner containerKeyScannerMock = - mock(ContainerKeyScanner.class); - DBDefinition dbDefinition = mock(DBDefinition.class); - DBColumnFamilyDefinition dbColumnFamilyDefinition = - mock(DBColumnFamilyDefinition.class); - when(dbDefinition.getColumnFamily(any())).thenReturn( - dbColumnFamilyDefinition); - when(dbColumnFamilyDefinition.getName()).thenReturn("name"); - when( - containerKeyScannerMock.processTable(any(), isNull(), isNull(), - isNull(), isNull())).thenCallRealMethod(); - - Exception e = Assertions.assertThrows(IllegalStateException.class, - () -> containerKeyScannerMock.processTable(dbDefinition, null, null, - null, null)); - Assertions.assertEquals("columnFamilyHandle is null", e.getMessage()); - } - - @Test - public void testNoKeysProcessedWhenTableProcessed() - throws IOException { - ContainerKeyScanner containerKeyScannerMock = - mock(ContainerKeyScanner.class); - DBDefinition dbDefinition = mock(DBDefinition.class); - DBColumnFamilyDefinition dbColumnFamilyDefinition = - mock(DBColumnFamilyDefinition.class); - when(dbDefinition.getColumnFamily(any())).thenReturn( - dbColumnFamilyDefinition); - ColumnFamilyHandle columnFamilyHandle = mock(ColumnFamilyHandle.class); - when(dbColumnFamilyDefinition.getName()).thenReturn("name"); - when(containerKeyScannerMock.getColumnFamilyHandle(any(), - isNull())).thenReturn(columnFamilyHandle); - ManagedRocksDB db = mock(ManagedRocksDB.class); - RocksIterator iterator = mock(RocksIterator.class); - RocksDB rocksDB = mock(RocksDB.class); - when(db.get()).thenReturn(rocksDB); - when(rocksDB.newIterator(columnFamilyHandle)).thenReturn(iterator); - doNothing().when(iterator).seekToFirst(); - when( - containerKeyScannerMock.processTable(any(), isNull(), any(), - isNull(), isNull())).thenCallRealMethod(); - - long keysProcessed = - containerKeyScannerMock.processTable(dbDefinition, null, db, null, - null); - assertEquals(0, keysProcessed); - } - - @Test - public void testKeysProcessedWhenTableProcessedButNoContainerIdMatch() - throws IOException { - ContainerKeyScanner containerKeyScannerMock = - mock(ContainerKeyScanner.class); - DBDefinition dbDefinition = mock(DBDefinition.class); - DBColumnFamilyDefinition dbColumnFamilyDefinition = - mock(DBColumnFamilyDefinition.class); - when(dbDefinition.getColumnFamily(any())).thenReturn( - dbColumnFamilyDefinition); - ColumnFamilyHandle columnFamilyHandle = mock(ColumnFamilyHandle.class); - when(dbColumnFamilyDefinition.getName()).thenReturn("name"); - when(containerKeyScannerMock.getColumnFamilyHandle(any(), - isNull())).thenReturn(columnFamilyHandle); - ManagedRocksDB db = mock(ManagedRocksDB.class); - RocksIterator iterator = mock(RocksIterator.class); - RocksDB rocksDB = mock(RocksDB.class); - when(db.get()).thenReturn(rocksDB); - when(rocksDB.newIterator(columnFamilyHandle)).thenReturn(iterator); - when(iterator.isValid()) - .thenReturn(true) - .thenReturn(false); - Codec codec = mock(Codec.class); - when(dbColumnFamilyDefinition.getValueCodec()).thenReturn(codec); - OmKeyInfo omKeyInfo = new OmKeyInfo.Builder().build(); - omKeyInfo.setKeyLocationVersions(null); - when(iterator.value()).thenReturn(new byte[1]); - when(codec.fromPersistedFormat(any())).thenReturn(omKeyInfo); - when( - containerKeyScannerMock.processTable(any(), isNull(), any(), - isNull(), isNull())).thenCallRealMethod(); - - long keysProcessed = - containerKeyScannerMock.processTable(dbDefinition, null, db, null, - null); - assertEquals(1, keysProcessed); - } - - @Test - public void testKeysProcessedAndKeyInfoWhenTableProcessedButContainerIdMatch() - throws IOException { - ContainerKeyScanner containerKeyScannerMock = - mock(ContainerKeyScanner.class); - DBDefinition dbDefinition = mock(DBDefinition.class); - DBColumnFamilyDefinition dbColumnFamilyDefinition = - mock(DBColumnFamilyDefinition.class); - when(dbDefinition.getColumnFamily(any())).thenReturn( - dbColumnFamilyDefinition); - ColumnFamilyHandle columnFamilyHandle = mock(ColumnFamilyHandle.class); - when(dbColumnFamilyDefinition.getName()).thenReturn("name"); - when(containerKeyScannerMock.getColumnFamilyHandle(any(), - isNull())).thenReturn(columnFamilyHandle); - ManagedRocksDB db = mock(ManagedRocksDB.class); - RocksIterator iterator = mock(RocksIterator.class); - RocksDB rocksDB = mock(RocksDB.class); - when(db.get()).thenReturn(rocksDB); - when(rocksDB.newIterator(columnFamilyHandle)).thenReturn(iterator); - when(iterator.isValid()) - .thenReturn(true) - .thenReturn(false); - Codec codec = mock(Codec.class); - when(dbColumnFamilyDefinition.getValueCodec()).thenReturn(codec); - String volumeName = "vol1"; - String bucketName = "bucket1"; - String keyName = "key1"; - OmKeyInfo omKeyInfo = new OmKeyInfo.Builder() - .setVolumeName(volumeName) - .setBucketName(bucketName) - .setKeyName(keyName) - .build(); - List keyLocationVersions = new ArrayList<>(); - List keyLocationInfos = new ArrayList<>(); - long containerID = 1L; - keyLocationInfos.add( - new OmKeyLocationInfo.Builder().setBlockID(new BlockID(containerID, 1L)) - .build()); - keyLocationVersions.add(new OmKeyLocationInfoGroup(1L, keyLocationInfos)); - omKeyInfo.setKeyLocationVersions(keyLocationVersions); - when(iterator.value()).thenReturn(new byte[1]); - when(codec.fromPersistedFormat(any())).thenReturn(omKeyInfo); - when( - containerKeyScannerMock.processTable(any(), isNull(), any(), - anyList(), isNull())).thenCallRealMethod(); - List containerKeyInfos = new ArrayList<>(); - ContainerKeyInfo expectedContainerKeyInfo = - new ContainerKeyInfo(containerID, volumeName, bucketName, keyName); - doCallRealMethod().when(containerKeyScannerMock).setContainerIds(anySet()); - containerKeyScannerMock.setContainerIds( - containerKeyScanner.getContainerIds()); - - long keysProcessed = - containerKeyScannerMock.processTable(dbDefinition, null, db, - containerKeyInfos, null); - assertEquals(1, keysProcessed); - assertEquals(expectedContainerKeyInfo, containerKeyInfos.get(0)); - } - */ - -} From 18175c8e7b5b2e2e6d56949772ed50d27c352cc4 Mon Sep 17 00:00:00 2001 From: Mladjan Gadzic Date: Mon, 20 Nov 2023 09:40:21 +0100 Subject: [PATCH 04/33] Fix spotbugs --- .../java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java index e31cd7bec3e..4387fb2ff27 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java @@ -21,6 +21,7 @@ import com.google.common.collect.Sets; import com.google.gson.Gson; import com.google.gson.GsonBuilder; +import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.cli.SubcommandWithParent; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -117,6 +118,7 @@ private static PrintWriter out() { // and to return pair of objectId and path instead of a map. // Further optimization could be done to reuse db // and not connect to it for every method call + @SuppressFBWarnings("DMI_HARDCODED_ABSOLUTE_FILENAME") public Map getAbsolutePathForObjectIDs( long bucketId, String prefix, Optional> dirObjIds, String dbPath) From ac86dcbb96163a14153c2d1dce7a72da53d366b1 Mon Sep 17 00:00:00 2001 From: Mladjan Gadzic Date: Wed, 22 Nov 2023 18:31:50 +0100 Subject: [PATCH 05/33] Increase readability --- .../ozone/debug/ContainerKeyScanner.java | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java index 4387fb2ff27..0d29e118948 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java @@ -70,6 +70,7 @@ description = "Parse a list of container IDs" ) @MetaInfServices(SubcommandWithParent.class) +// TODO use dirinfotable field as FSOdirpathresolver public class ContainerKeyScanner implements Callable, SubcommandWithParent { @@ -137,11 +138,13 @@ public Map getAbsolutePathForObjectIDs( while (!objectIdPathVals.isEmpty() && !objIds.isEmpty()) { Pair parentPair = objectIdPathVals.poll(); - // read directoryTable + + // Get all tables from RocksDB List columnFamilyDescriptors = RocksDBUtils.getColumnFamilyDescriptors(dbPath); final List columnFamilyHandles = new ArrayList<>(); + // Get all table handles try (ManagedRocksDB db = ManagedRocksDB.openReadOnly(dbPath, columnFamilyDescriptors, columnFamilyHandles)) { dbPath = removeTrailingSlashIfNeeded(dbPath); @@ -151,6 +154,7 @@ public Map getAbsolutePathForObjectIDs( throw new IllegalStateException("Incorrect DB Path"); } + // Get directory table DBColumnFamilyDefinition columnFamilyDefinition = dbDefinition.getColumnFamily(DIRECTORY_TABLE); if (columnFamilyDefinition == null) { @@ -158,6 +162,7 @@ public Map getAbsolutePathForObjectIDs( "Table with name" + DIRECTORY_TABLE + " not found"); } + // Get directory table handle ColumnFamilyHandle columnFamilyHandle = getColumnFamilyHandle( columnFamilyDefinition.getName().getBytes(UTF_8), columnFamilyHandles); @@ -165,12 +170,15 @@ public Map getAbsolutePathForObjectIDs( throw new IllegalStateException("columnFamilyHandle is null"); } + // Get iterator for directory table try (ManagedRocksIterator iterator = new ManagedRocksIterator( db.get().newIterator(columnFamilyHandle))) { iterator.get().seekToFirst(); while (!objIds.isEmpty() && iterator.get().isValid()) { String subDir = prefix + parentPair.getKey() + OM_KEY_PREFIX; String key = new String(iterator.get().key(), UTF_8); + + // Skip key if it does not contain subDir if (!key.contains(subDir)) { iterator.get().next(); continue; @@ -285,7 +293,8 @@ private long processTable(DBDefinition dbDefinition, // Generate asbolute key path for FSO keys StringBuilder keyName = new StringBuilder(); if (tableName.equals(FILE_TABLE)) { - handleFileTable(dbPath, volumeId, bucketId, value, keyName); + keyName.append( + getFsoKeyPrefix(dbPath, volumeId, bucketId, value)); } keyName.append(value.getKeyName()); containerKeyInfos.add( @@ -312,8 +321,8 @@ private static String removeBeginningSlash(String path) { return path; } - private void handleFileTable(String dbPath, long volumeId, long bucketId, - OmKeyInfo value, StringBuilder keyName) + private String getFsoKeyPrefix(String dbPath, long volumeId, long bucketId, + OmKeyInfo value) throws IOException, RocksDBException { String prefix = OM_KEY_PREFIX + volumeId + OM_KEY_PREFIX + bucketId + @@ -331,7 +340,7 @@ private void handleFileTable(String dbPath, long volumeId, long bucketId, keyPath = path + OM_KEY_PREFIX; } - keyName.append(removeBeginningSlash(keyPath)); + return removeBeginningSlash(keyPath); } From 2de4fc3888fcf413a22d6e7a462aa17e5d35d497 Mon Sep 17 00:00:00 2001 From: Mladjan Gadzic Date: Thu, 23 Nov 2023 19:48:17 +0100 Subject: [PATCH 06/33] Make getAbsolutePathForObjectIDs as similar as possible to the one in FSODirectoryPathResolver --- .../ozone/debug/ContainerKeyScanner.java | 172 +++++++++++------- 1 file changed, 106 insertions(+), 66 deletions(-) diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java index 0d29e118948..7c41beb0643 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java @@ -92,13 +92,19 @@ public class ContainerKeyScanner implements Callable, "their keys. Example-usage: 1,11,2.(Separated by ',')") private Set containerIds; + private DbAccessInfo dbAccessInfo; + @Override public Void call() throws Exception { + dbAccessInfo = DbAccessInfo.createDbAccessInfo(parent.getDbPath()); + ContainerKeyInfoWrapper containerKeyInfoWrapper = scanDBForContainerKeys(parent.getDbPath()); printOutput(containerKeyInfoWrapper); + dbAccessInfo.close(); + return null; } @@ -121,9 +127,8 @@ private static PrintWriter out() { // and not connect to it for every method call @SuppressFBWarnings("DMI_HARDCODED_ABSOLUTE_FILENAME") public Map getAbsolutePathForObjectIDs( - long bucketId, String prefix, Optional> dirObjIds, - String dbPath) - throws IOException, RocksDBException { + long bucketId, String prefix, Optional> dirObjIds) + throws IOException { // Root of a bucket would always have the // key as /volumeId/bucketId/bucketId/ if (!dirObjIds.isPresent() || dirObjIds.get().isEmpty()) { @@ -139,60 +144,28 @@ public Map getAbsolutePathForObjectIDs( while (!objectIdPathVals.isEmpty() && !objIds.isEmpty()) { Pair parentPair = objectIdPathVals.poll(); - // Get all tables from RocksDB - List columnFamilyDescriptors = - RocksDBUtils.getColumnFamilyDescriptors(dbPath); - final List columnFamilyHandles = new ArrayList<>(); - - // Get all table handles - try (ManagedRocksDB db = ManagedRocksDB.openReadOnly(dbPath, - columnFamilyDescriptors, columnFamilyHandles)) { - dbPath = removeTrailingSlashIfNeeded(dbPath); - DBDefinition dbDefinition = DBDefinitionFactory.getDefinition( - Paths.get(dbPath), new OzoneConfiguration()); - if (dbDefinition == null) { - throw new IllegalStateException("Incorrect DB Path"); - } - - // Get directory table - DBColumnFamilyDefinition columnFamilyDefinition = - dbDefinition.getColumnFamily(DIRECTORY_TABLE); - if (columnFamilyDefinition == null) { - throw new IllegalStateException( - "Table with name" + DIRECTORY_TABLE + " not found"); - } - - // Get directory table handle - ColumnFamilyHandle columnFamilyHandle = getColumnFamilyHandle( - columnFamilyDefinition.getName().getBytes(UTF_8), - columnFamilyHandles); - if (columnFamilyHandle == null) { - throw new IllegalStateException("columnFamilyHandle is null"); - } - - // Get iterator for directory table - try (ManagedRocksIterator iterator = new ManagedRocksIterator( - db.get().newIterator(columnFamilyHandle))) { - iterator.get().seekToFirst(); - while (!objIds.isEmpty() && iterator.get().isValid()) { - String subDir = prefix + parentPair.getKey() + OM_KEY_PREFIX; - String key = new String(iterator.get().key(), UTF_8); - - // Skip key if it does not contain subDir - if (!key.contains(subDir)) { - iterator.get().next(); - continue; - } + // Get iterator for directory table + try (ManagedRocksIterator iterator = new ManagedRocksIterator( + dbAccessInfo.getDb().get().newIterator(dbAccessInfo.getHandle()))) { + iterator.get().seekToFirst(); + while (!objIds.isEmpty() && iterator.get().isValid()) { + String subDir = prefix + parentPair.getKey() + OM_KEY_PREFIX; + String key = new String(iterator.get().key(), UTF_8); - OmDirectoryInfo childDir = - ((OmDirectoryInfo) columnFamilyDefinition.getValueCodec() - .fromPersistedFormat(iterator.get().value())); - Pair pathVal = Pair.of(childDir.getObjectID(), - parentPair.getValue().resolve(childDir.getName())); - addToPathMap(pathVal, objIds, objectIdPathMap); - objectIdPathVals.add(pathVal); + // Skip key if it does not contain subDir + if (!key.contains(subDir)) { iterator.get().next(); + continue; } + + OmDirectoryInfo childDir = + ((OmDirectoryInfo) dbAccessInfo.getDefinition().getValueCodec() + .fromPersistedFormat(iterator.get().value())); + Pair pathVal = Pair.of(childDir.getObjectID(), + parentPair.getValue().resolve(childDir.getName())); + addToPathMap(pathVal, objIds, objectIdPathMap); + objectIdPathVals.add(pathVal); + iterator.get().next(); } } } @@ -232,10 +205,10 @@ private ContainerKeyInfoWrapper scanDBForContainerKeys(String dbPath) keysProcessed += processTable(dbDefinition, columnFamilyHandles, db, - containerKeyInfos, FILE_TABLE, dbPath); + containerKeyInfos, FILE_TABLE); keysProcessed += processTable(dbDefinition, columnFamilyHandles, db, - containerKeyInfos, KEY_TABLE, dbPath); + containerKeyInfos, KEY_TABLE); } return new ContainerKeyInfoWrapper(keysProcessed, containerKeyInfos); } @@ -244,8 +217,8 @@ private long processTable(DBDefinition dbDefinition, List columnFamilyHandles, ManagedRocksDB db, List containerKeyInfos, - String tableName, String dbPath) - throws IOException, RocksDBException { + String tableName) + throws IOException { long keysProcessed = 0; DBColumnFamilyDefinition columnFamilyDefinition = dbDefinition.getColumnFamily(tableName); @@ -294,7 +267,7 @@ private long processTable(DBDefinition dbDefinition, StringBuilder keyName = new StringBuilder(); if (tableName.equals(FILE_TABLE)) { keyName.append( - getFsoKeyPrefix(dbPath, volumeId, bucketId, value)); + getFsoKeyPrefix(volumeId, bucketId, value)); } keyName.append(value.getKeyName()); containerKeyInfos.add( @@ -321,17 +294,15 @@ private static String removeBeginningSlash(String path) { return path; } - private String getFsoKeyPrefix(String dbPath, long volumeId, long bucketId, - OmKeyInfo value) - throws IOException, RocksDBException { + private String getFsoKeyPrefix(long volumeId, long bucketId, OmKeyInfo value) + throws IOException { String prefix = OM_KEY_PREFIX + volumeId + OM_KEY_PREFIX + bucketId + OM_KEY_PREFIX; Set dirObjIds = new HashSet<>(); dirObjIds.add(value.getParentObjectID()); Map absolutePaths = - getAbsolutePathForObjectIDs(bucketId, prefix, - Optional.of(dirObjIds), dbPath); + getAbsolutePathForObjectIDs(bucketId, prefix, Optional.of(dirObjIds)); Path path = absolutePaths.get(value.getParentObjectID()); String keyPath; if (path.toString().equals(OM_KEY_PREFIX)) { @@ -344,7 +315,7 @@ private String getFsoKeyPrefix(String dbPath, long volumeId, long bucketId, } - private ColumnFamilyHandle getColumnFamilyHandle( + private static ColumnFamilyHandle getColumnFamilyHandle( byte[] name, List columnFamilyHandles) { return columnFamilyHandles .stream() @@ -360,7 +331,7 @@ private ColumnFamilyHandle getColumnFamilyHandle( .orElse(null); } - private String removeTrailingSlashIfNeeded(String dbPath) { + private static String removeTrailingSlashIfNeeded(String dbPath) { if (dbPath.endsWith(OzoneConsts.OZONE_URI_DELIMITER)) { dbPath = dbPath.substring(0, dbPath.length() - 1); } @@ -402,4 +373,73 @@ private void printOutput(ContainerKeyInfoWrapper containerKeyInfoWrapper) { } } + static class DbAccessInfo { + private final ManagedRocksDB db; + private final ColumnFamilyHandle handle; + + private final DBColumnFamilyDefinition definition; + + DbAccessInfo(ManagedRocksDB db, ColumnFamilyHandle handle, + DBColumnFamilyDefinition definition) { + this.db = db; + this.handle = handle; + this.definition = definition; + } + + public ManagedRocksDB getDb() { + return db; + } + + public ColumnFamilyHandle getHandle() { + return handle; + } + + public DBColumnFamilyDefinition getDefinition() { + return definition; + } + + public static DbAccessInfo createDbAccessInfo(String dbPath) + throws RocksDBException { + // Get all tables from RocksDB + List columnFamilyDescriptors = + RocksDBUtils.getColumnFamilyDescriptors(dbPath); + final List columnFamilyHandles = new ArrayList<>(); + + // Get all table handles + ManagedRocksDB managedRocksDB = ManagedRocksDB.openReadOnly(dbPath, + columnFamilyDescriptors, columnFamilyHandles); + dbPath = removeTrailingSlashIfNeeded(dbPath); + DBDefinition dbDefinition = DBDefinitionFactory.getDefinition( + Paths.get(dbPath), new OzoneConfiguration()); + if (dbDefinition == null) { + throw new IllegalStateException("Incorrect DB Path"); + } + + // Get directory table + DBColumnFamilyDefinition columnFamilyDefinition = + dbDefinition.getColumnFamily(DIRECTORY_TABLE); + if (columnFamilyDefinition == null) { + throw new IllegalStateException( + "Table with name" + DIRECTORY_TABLE + " not found"); + } + + // Get directory table handle + ColumnFamilyHandle columnFamilyHandle = getColumnFamilyHandle( + columnFamilyDefinition.getName().getBytes(UTF_8), + columnFamilyHandles); + if (columnFamilyHandle == null) { + throw new IllegalStateException("columnFamilyHandle is null"); + } + + return new DbAccessInfo(managedRocksDB, columnFamilyHandle, + columnFamilyDefinition); + } + + public void close() { + db.close(); + handle.close(); + } + + } + } From f557b59d6dc1cbfb87998e088add4cae40a14bfa Mon Sep 17 00:00:00 2001 From: Mladjan Gadzic Date: Tue, 28 Nov 2023 11:34:17 +0100 Subject: [PATCH 07/33] Revert "Make getAbsolutePathForObjectIDs as similar as possible to the one in FSODirectoryPathResolver" This reverts commit d6ea1d455d4b7f5a4654c3eb3693313a0c44edbb. --- .../ozone/debug/ContainerKeyScanner.java | 172 +++++++----------- 1 file changed, 66 insertions(+), 106 deletions(-) diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java index 7c41beb0643..0d29e118948 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java @@ -92,19 +92,13 @@ public class ContainerKeyScanner implements Callable, "their keys. Example-usage: 1,11,2.(Separated by ',')") private Set containerIds; - private DbAccessInfo dbAccessInfo; - @Override public Void call() throws Exception { - dbAccessInfo = DbAccessInfo.createDbAccessInfo(parent.getDbPath()); - ContainerKeyInfoWrapper containerKeyInfoWrapper = scanDBForContainerKeys(parent.getDbPath()); printOutput(containerKeyInfoWrapper); - dbAccessInfo.close(); - return null; } @@ -127,8 +121,9 @@ private static PrintWriter out() { // and not connect to it for every method call @SuppressFBWarnings("DMI_HARDCODED_ABSOLUTE_FILENAME") public Map getAbsolutePathForObjectIDs( - long bucketId, String prefix, Optional> dirObjIds) - throws IOException { + long bucketId, String prefix, Optional> dirObjIds, + String dbPath) + throws IOException, RocksDBException { // Root of a bucket would always have the // key as /volumeId/bucketId/bucketId/ if (!dirObjIds.isPresent() || dirObjIds.get().isEmpty()) { @@ -144,28 +139,60 @@ public Map getAbsolutePathForObjectIDs( while (!objectIdPathVals.isEmpty() && !objIds.isEmpty()) { Pair parentPair = objectIdPathVals.poll(); - // Get iterator for directory table - try (ManagedRocksIterator iterator = new ManagedRocksIterator( - dbAccessInfo.getDb().get().newIterator(dbAccessInfo.getHandle()))) { - iterator.get().seekToFirst(); - while (!objIds.isEmpty() && iterator.get().isValid()) { - String subDir = prefix + parentPair.getKey() + OM_KEY_PREFIX; - String key = new String(iterator.get().key(), UTF_8); + // Get all tables from RocksDB + List columnFamilyDescriptors = + RocksDBUtils.getColumnFamilyDescriptors(dbPath); + final List columnFamilyHandles = new ArrayList<>(); + + // Get all table handles + try (ManagedRocksDB db = ManagedRocksDB.openReadOnly(dbPath, + columnFamilyDescriptors, columnFamilyHandles)) { + dbPath = removeTrailingSlashIfNeeded(dbPath); + DBDefinition dbDefinition = DBDefinitionFactory.getDefinition( + Paths.get(dbPath), new OzoneConfiguration()); + if (dbDefinition == null) { + throw new IllegalStateException("Incorrect DB Path"); + } + + // Get directory table + DBColumnFamilyDefinition columnFamilyDefinition = + dbDefinition.getColumnFamily(DIRECTORY_TABLE); + if (columnFamilyDefinition == null) { + throw new IllegalStateException( + "Table with name" + DIRECTORY_TABLE + " not found"); + } - // Skip key if it does not contain subDir - if (!key.contains(subDir)) { + // Get directory table handle + ColumnFamilyHandle columnFamilyHandle = getColumnFamilyHandle( + columnFamilyDefinition.getName().getBytes(UTF_8), + columnFamilyHandles); + if (columnFamilyHandle == null) { + throw new IllegalStateException("columnFamilyHandle is null"); + } + + // Get iterator for directory table + try (ManagedRocksIterator iterator = new ManagedRocksIterator( + db.get().newIterator(columnFamilyHandle))) { + iterator.get().seekToFirst(); + while (!objIds.isEmpty() && iterator.get().isValid()) { + String subDir = prefix + parentPair.getKey() + OM_KEY_PREFIX; + String key = new String(iterator.get().key(), UTF_8); + + // Skip key if it does not contain subDir + if (!key.contains(subDir)) { + iterator.get().next(); + continue; + } + + OmDirectoryInfo childDir = + ((OmDirectoryInfo) columnFamilyDefinition.getValueCodec() + .fromPersistedFormat(iterator.get().value())); + Pair pathVal = Pair.of(childDir.getObjectID(), + parentPair.getValue().resolve(childDir.getName())); + addToPathMap(pathVal, objIds, objectIdPathMap); + objectIdPathVals.add(pathVal); iterator.get().next(); - continue; } - - OmDirectoryInfo childDir = - ((OmDirectoryInfo) dbAccessInfo.getDefinition().getValueCodec() - .fromPersistedFormat(iterator.get().value())); - Pair pathVal = Pair.of(childDir.getObjectID(), - parentPair.getValue().resolve(childDir.getName())); - addToPathMap(pathVal, objIds, objectIdPathMap); - objectIdPathVals.add(pathVal); - iterator.get().next(); } } } @@ -205,10 +232,10 @@ private ContainerKeyInfoWrapper scanDBForContainerKeys(String dbPath) keysProcessed += processTable(dbDefinition, columnFamilyHandles, db, - containerKeyInfos, FILE_TABLE); + containerKeyInfos, FILE_TABLE, dbPath); keysProcessed += processTable(dbDefinition, columnFamilyHandles, db, - containerKeyInfos, KEY_TABLE); + containerKeyInfos, KEY_TABLE, dbPath); } return new ContainerKeyInfoWrapper(keysProcessed, containerKeyInfos); } @@ -217,8 +244,8 @@ private long processTable(DBDefinition dbDefinition, List columnFamilyHandles, ManagedRocksDB db, List containerKeyInfos, - String tableName) - throws IOException { + String tableName, String dbPath) + throws IOException, RocksDBException { long keysProcessed = 0; DBColumnFamilyDefinition columnFamilyDefinition = dbDefinition.getColumnFamily(tableName); @@ -267,7 +294,7 @@ private long processTable(DBDefinition dbDefinition, StringBuilder keyName = new StringBuilder(); if (tableName.equals(FILE_TABLE)) { keyName.append( - getFsoKeyPrefix(volumeId, bucketId, value)); + getFsoKeyPrefix(dbPath, volumeId, bucketId, value)); } keyName.append(value.getKeyName()); containerKeyInfos.add( @@ -294,15 +321,17 @@ private static String removeBeginningSlash(String path) { return path; } - private String getFsoKeyPrefix(long volumeId, long bucketId, OmKeyInfo value) - throws IOException { + private String getFsoKeyPrefix(String dbPath, long volumeId, long bucketId, + OmKeyInfo value) + throws IOException, RocksDBException { String prefix = OM_KEY_PREFIX + volumeId + OM_KEY_PREFIX + bucketId + OM_KEY_PREFIX; Set dirObjIds = new HashSet<>(); dirObjIds.add(value.getParentObjectID()); Map absolutePaths = - getAbsolutePathForObjectIDs(bucketId, prefix, Optional.of(dirObjIds)); + getAbsolutePathForObjectIDs(bucketId, prefix, + Optional.of(dirObjIds), dbPath); Path path = absolutePaths.get(value.getParentObjectID()); String keyPath; if (path.toString().equals(OM_KEY_PREFIX)) { @@ -315,7 +344,7 @@ private String getFsoKeyPrefix(long volumeId, long bucketId, OmKeyInfo value) } - private static ColumnFamilyHandle getColumnFamilyHandle( + private ColumnFamilyHandle getColumnFamilyHandle( byte[] name, List columnFamilyHandles) { return columnFamilyHandles .stream() @@ -331,7 +360,7 @@ private static ColumnFamilyHandle getColumnFamilyHandle( .orElse(null); } - private static String removeTrailingSlashIfNeeded(String dbPath) { + private String removeTrailingSlashIfNeeded(String dbPath) { if (dbPath.endsWith(OzoneConsts.OZONE_URI_DELIMITER)) { dbPath = dbPath.substring(0, dbPath.length() - 1); } @@ -373,73 +402,4 @@ private void printOutput(ContainerKeyInfoWrapper containerKeyInfoWrapper) { } } - static class DbAccessInfo { - private final ManagedRocksDB db; - private final ColumnFamilyHandle handle; - - private final DBColumnFamilyDefinition definition; - - DbAccessInfo(ManagedRocksDB db, ColumnFamilyHandle handle, - DBColumnFamilyDefinition definition) { - this.db = db; - this.handle = handle; - this.definition = definition; - } - - public ManagedRocksDB getDb() { - return db; - } - - public ColumnFamilyHandle getHandle() { - return handle; - } - - public DBColumnFamilyDefinition getDefinition() { - return definition; - } - - public static DbAccessInfo createDbAccessInfo(String dbPath) - throws RocksDBException { - // Get all tables from RocksDB - List columnFamilyDescriptors = - RocksDBUtils.getColumnFamilyDescriptors(dbPath); - final List columnFamilyHandles = new ArrayList<>(); - - // Get all table handles - ManagedRocksDB managedRocksDB = ManagedRocksDB.openReadOnly(dbPath, - columnFamilyDescriptors, columnFamilyHandles); - dbPath = removeTrailingSlashIfNeeded(dbPath); - DBDefinition dbDefinition = DBDefinitionFactory.getDefinition( - Paths.get(dbPath), new OzoneConfiguration()); - if (dbDefinition == null) { - throw new IllegalStateException("Incorrect DB Path"); - } - - // Get directory table - DBColumnFamilyDefinition columnFamilyDefinition = - dbDefinition.getColumnFamily(DIRECTORY_TABLE); - if (columnFamilyDefinition == null) { - throw new IllegalStateException( - "Table with name" + DIRECTORY_TABLE + " not found"); - } - - // Get directory table handle - ColumnFamilyHandle columnFamilyHandle = getColumnFamilyHandle( - columnFamilyDefinition.getName().getBytes(UTF_8), - columnFamilyHandles); - if (columnFamilyHandle == null) { - throw new IllegalStateException("columnFamilyHandle is null"); - } - - return new DbAccessInfo(managedRocksDB, columnFamilyHandle, - columnFamilyDefinition); - } - - public void close() { - db.close(); - handle.close(); - } - - } - } From 1f9e8b4749cfdef3b0646223caf9fb2b5e3c6063 Mon Sep 17 00:00:00 2001 From: Mladjan Gadzic Date: Tue, 28 Nov 2023 12:48:40 +0100 Subject: [PATCH 08/33] Put directoryTable in memory instead of re-reading it every time --- .../ozone/debug/ContainerKeyScanner.java | 134 ++++++++++-------- 1 file changed, 72 insertions(+), 62 deletions(-) diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java index 0d29e118948..a55356532a2 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java @@ -49,6 +49,7 @@ import java.util.Collections; import java.util.HashMap; import java.util.HashSet; +import java.util.Iterator; import java.util.LinkedList; import java.util.List; import java.util.Map; @@ -57,6 +58,7 @@ import java.util.Queue; import java.util.Set; import java.util.concurrent.Callable; +import java.util.stream.Collectors; import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; @@ -92,8 +94,12 @@ public class ContainerKeyScanner implements Callable, "their keys. Example-usage: 1,11,2.(Separated by ',')") private Set containerIds; + private static Map directoryTable; + @Override public Void call() throws Exception { + directoryTable = getDirectoryTableData(parent.getDbPath()); + ContainerKeyInfoWrapper containerKeyInfoWrapper = scanDBForContainerKeys(parent.getDbPath()); @@ -102,6 +108,57 @@ public Void call() throws Exception { return null; } + private Map getDirectoryTableData(String dbPath) + throws RocksDBException, IOException { + Map directoryTableData = new HashMap<>(); + + // Get all tables from RocksDB + List columnFamilyDescriptors = + RocksDBUtils.getColumnFamilyDescriptors(dbPath); + final List columnFamilyHandles = new ArrayList<>(); + + // Get all table handles + try (ManagedRocksDB db = ManagedRocksDB.openReadOnly(dbPath, + columnFamilyDescriptors, columnFamilyHandles)) { + dbPath = removeTrailingSlashIfNeeded(dbPath); + DBDefinition dbDefinition = DBDefinitionFactory.getDefinition( + Paths.get(dbPath), new OzoneConfiguration()); + if (dbDefinition == null) { + throw new IllegalStateException("Incorrect DB Path"); + } + + // Get directory table + DBColumnFamilyDefinition columnFamilyDefinition = + dbDefinition.getColumnFamily(DIRECTORY_TABLE); + if (columnFamilyDefinition == null) { + throw new IllegalStateException( + "Table with name" + DIRECTORY_TABLE + " not found"); + } + + // Get directory table handle + ColumnFamilyHandle columnFamilyHandle = getColumnFamilyHandle( + columnFamilyDefinition.getName().getBytes(UTF_8), + columnFamilyHandles); + if (columnFamilyHandle == null) { + throw new IllegalStateException("columnFamilyHandle is null"); + } + + // Get iterator for directory table + try (ManagedRocksIterator iterator = new ManagedRocksIterator( + db.get().newIterator(columnFamilyHandle))) { + iterator.get().seekToFirst(); + while (iterator.get().isValid()) { + directoryTableData.put(new String(iterator.get().key(), UTF_8), + ((OmDirectoryInfo) columnFamilyDefinition.getValueCodec() + .fromPersistedFormat(iterator.get().value()))); + iterator.get().next(); + } + } + } + + return directoryTableData; + } + @Override public Class getParentType() { return RDBParser.class; @@ -117,12 +174,9 @@ private static PrintWriter out() { // TODO optimize this method to use single objectId instead of a set // and to return pair of objectId and path instead of a map. - // Further optimization could be done to reuse db - // and not connect to it for every method call @SuppressFBWarnings("DMI_HARDCODED_ABSOLUTE_FILENAME") public Map getAbsolutePathForObjectIDs( - long bucketId, String prefix, Optional> dirObjIds, - String dbPath) + long bucketId, String prefix, Optional> dirObjIds) throws IOException, RocksDBException { // Root of a bucket would always have the // key as /volumeId/bucketId/bucketId/ @@ -138,62 +192,19 @@ public Map getAbsolutePathForObjectIDs( while (!objectIdPathVals.isEmpty() && !objIds.isEmpty()) { Pair parentPair = objectIdPathVals.poll(); - - // Get all tables from RocksDB - List columnFamilyDescriptors = - RocksDBUtils.getColumnFamilyDescriptors(dbPath); - final List columnFamilyHandles = new ArrayList<>(); - - // Get all table handles - try (ManagedRocksDB db = ManagedRocksDB.openReadOnly(dbPath, - columnFamilyDescriptors, columnFamilyHandles)) { - dbPath = removeTrailingSlashIfNeeded(dbPath); - DBDefinition dbDefinition = DBDefinitionFactory.getDefinition( - Paths.get(dbPath), new OzoneConfiguration()); - if (dbDefinition == null) { - throw new IllegalStateException("Incorrect DB Path"); - } - - // Get directory table - DBColumnFamilyDefinition columnFamilyDefinition = - dbDefinition.getColumnFamily(DIRECTORY_TABLE); - if (columnFamilyDefinition == null) { - throw new IllegalStateException( - "Table with name" + DIRECTORY_TABLE + " not found"); - } - - // Get directory table handle - ColumnFamilyHandle columnFamilyHandle = getColumnFamilyHandle( - columnFamilyDefinition.getName().getBytes(UTF_8), - columnFamilyHandles); - if (columnFamilyHandle == null) { - throw new IllegalStateException("columnFamilyHandle is null"); - } - - // Get iterator for directory table - try (ManagedRocksIterator iterator = new ManagedRocksIterator( - db.get().newIterator(columnFamilyHandle))) { - iterator.get().seekToFirst(); - while (!objIds.isEmpty() && iterator.get().isValid()) { - String subDir = prefix + parentPair.getKey() + OM_KEY_PREFIX; - String key = new String(iterator.get().key(), UTF_8); - - // Skip key if it does not contain subDir - if (!key.contains(subDir)) { - iterator.get().next(); - continue; - } - - OmDirectoryInfo childDir = - ((OmDirectoryInfo) columnFamilyDefinition.getValueCodec() - .fromPersistedFormat(iterator.get().value())); - Pair pathVal = Pair.of(childDir.getObjectID(), - parentPair.getValue().resolve(childDir.getName())); - addToPathMap(pathVal, objIds, objectIdPathMap); - objectIdPathVals.add(pathVal); - iterator.get().next(); - } - } + String subDir = prefix + parentPair.getKey() + OM_KEY_PREFIX; + + Iterator subDirIterator = + directoryTable.keySet().stream() + .filter(k -> k.startsWith(subDir)) + .collect(Collectors.toList()).iterator(); + while (!objIds.isEmpty() && subDirIterator.hasNext()) { + OmDirectoryInfo childDir = + directoryTable.get(subDirIterator.next()); + Pair pathVal = Pair.of(childDir.getObjectID(), + parentPair.getValue().resolve(childDir.getName())); + addToPathMap(pathVal, objIds, objectIdPathMap); + objectIdPathVals.add(pathVal); } } // Invalid directory objectId which does not exist in the given bucket. @@ -330,8 +341,7 @@ private String getFsoKeyPrefix(String dbPath, long volumeId, long bucketId, Set dirObjIds = new HashSet<>(); dirObjIds.add(value.getParentObjectID()); Map absolutePaths = - getAbsolutePathForObjectIDs(bucketId, prefix, - Optional.of(dirObjIds), dbPath); + getAbsolutePathForObjectIDs(bucketId, prefix, Optional.of(dirObjIds)); Path path = absolutePaths.get(value.getParentObjectID()); String keyPath; if (path.toString().equals(OM_KEY_PREFIX)) { From a4187870d7cf7b5227fd9fcda4990f8118fd531d Mon Sep 17 00:00:00 2001 From: Mladjan Gadzic Date: Tue, 28 Nov 2023 12:50:15 +0100 Subject: [PATCH 09/33] Increase encapsulation --- .../org/apache/hadoop/ozone/debug/ContainerKeyScanner.java | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java index a55356532a2..cf50aa41c81 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java @@ -72,13 +72,12 @@ description = "Parse a list of container IDs" ) @MetaInfServices(SubcommandWithParent.class) -// TODO use dirinfotable field as FSOdirpathresolver public class ContainerKeyScanner implements Callable, SubcommandWithParent { - public static final String FILE_TABLE = "fileTable"; - public static final String KEY_TABLE = "keyTable"; - public static final String DIRECTORY_TABLE = "directoryTable"; + private static final String FILE_TABLE = "fileTable"; + private static final String KEY_TABLE = "keyTable"; + private static final String DIRECTORY_TABLE = "directoryTable"; @CommandLine.Spec private static CommandLine.Model.CommandSpec spec; From f960d41487a4a1abb8e4da7d1467cea9c5ed8f04 Mon Sep 17 00:00:00 2001 From: Mladjan Gadzic Date: Fri, 1 Dec 2023 14:13:32 +0100 Subject: [PATCH 10/33] Add printout for the duration of load of directoryTable and increase readability --- .../ozone/debug/TestContainerKeyScanner.java | 4 +- .../ozone/debug/ContainerKeyScanner.java | 39 ++++++++++--------- 2 files changed, 22 insertions(+), 21 deletions(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestContainerKeyScanner.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestContainerKeyScanner.java index ccc1a7d7b58..8dd6366d2f0 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestContainerKeyScanner.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestContainerKeyScanner.java @@ -150,7 +150,7 @@ void testWhenThereAreKeysForConatainerIds() throws IOException { int exitCode = cmd.execute(cmdArgs); Assertions.assertEquals(0, exitCode); - Assertions.assertEquals(KEYS_FOUND_OUTPUT, stdout.toString()); + Assertions.assertTrue(stdout.toString().contains(KEYS_FOUND_OUTPUT)); Assertions.assertTrue(stderr.toString().isEmpty()); } @@ -172,7 +172,7 @@ void testWhenThereAreNotKeysForConatainerIds() throws IOException { int exitCode = cmd.execute(cmdArgs); Assertions.assertEquals(0, exitCode); - Assertions.assertEquals(KEYS_NOT_FOUND_OUTPUT, stdout.toString()); + Assertions.assertTrue(stdout.toString().contains(KEYS_NOT_FOUND_OUTPUT)); Assertions.assertTrue(stderr.toString().isEmpty()); } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java index cf50aa41c81..6299a9745f4 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java @@ -97,16 +97,26 @@ public class ContainerKeyScanner implements Callable, @Override public Void call() throws Exception { + long start = System.currentTimeMillis(); directoryTable = getDirectoryTableData(parent.getDbPath()); + long end = System.currentTimeMillis(); + out().println("directoryTable loaded in " + (end - start) + " ms."); ContainerKeyInfoWrapper containerKeyInfoWrapper = scanDBForContainerKeys(parent.getDbPath()); printOutput(containerKeyInfoWrapper); + closeStdChannels(); + return null; } + private void closeStdChannels() { + out().close(); + err().close(); + } + private Map getDirectoryTableData(String dbPath) throws RocksDBException, IOException { Map directoryTableData = new HashMap<>(); @@ -171,12 +181,9 @@ private static PrintWriter out() { return spec.commandLine().getOut(); } - // TODO optimize this method to use single objectId instead of a set - // and to return pair of objectId and path instead of a map. @SuppressFBWarnings("DMI_HARDCODED_ABSOLUTE_FILENAME") public Map getAbsolutePathForObjectIDs( - long bucketId, String prefix, Optional> dirObjIds) - throws IOException, RocksDBException { + long bucketId, String prefix, Optional> dirObjIds) { // Root of a bucket would always have the // key as /volumeId/bucketId/bucketId/ if (!dirObjIds.isPresent() || dirObjIds.get().isEmpty()) { @@ -255,7 +262,7 @@ private long processTable(DBDefinition dbDefinition, ManagedRocksDB db, List containerKeyInfos, String tableName, String dbPath) - throws IOException, RocksDBException { + throws IOException { long keysProcessed = 0; DBColumnFamilyDefinition columnFamilyDefinition = dbDefinition.getColumnFamily(tableName); @@ -303,8 +310,7 @@ private long processTable(DBDefinition dbDefinition, // Generate asbolute key path for FSO keys StringBuilder keyName = new StringBuilder(); if (tableName.equals(FILE_TABLE)) { - keyName.append( - getFsoKeyPrefix(dbPath, volumeId, bucketId, value)); + keyName.append(getFsoKeyPrefix(volumeId, bucketId, value)); } keyName.append(value.getKeyName()); containerKeyInfos.add( @@ -331,9 +337,8 @@ private static String removeBeginningSlash(String path) { return path; } - private String getFsoKeyPrefix(String dbPath, long volumeId, long bucketId, - OmKeyInfo value) - throws IOException, RocksDBException { + private String getFsoKeyPrefix(long volumeId, long bucketId, + OmKeyInfo value) { String prefix = OM_KEY_PREFIX + volumeId + OM_KEY_PREFIX + bucketId + OM_KEY_PREFIX; @@ -380,12 +385,9 @@ private void printOutput(ContainerKeyInfoWrapper containerKeyInfoWrapper) { List containerKeyInfos = containerKeyInfoWrapper.getContainerKeyInfos(); if (containerKeyInfos.isEmpty()) { - try (PrintWriter out = out()) { - out.println("No keys were found for container IDs: " + - containerIds); - out.println( - "Keys processed: " + containerKeyInfoWrapper.getKeysProcessed()); - } + out().println("No keys were found for container IDs: " + containerIds); + out().println( + "Keys processed: " + containerKeyInfoWrapper.getKeysProcessed()); return; } @@ -406,9 +408,8 @@ private void printOutput(ContainerKeyInfoWrapper containerKeyInfoWrapper) { String prettyJson = gson.toJson( new ContainerKeyInfoResponse(containerKeyInfoWrapper.getKeysProcessed(), infoMap)); - try (PrintWriter out = out()) { - out.println(prettyJson); - } + + out().println(prettyJson); } } From 63f0073c3f7888bf204fe28d30f4c2ec5be9db97 Mon Sep 17 00:00:00 2001 From: Mladjan Gadzic Date: Fri, 1 Dec 2023 14:31:53 +0100 Subject: [PATCH 11/33] Load directoryTable only after the first fso key is found to reduce unnecessary load --- .../ozone/debug/ContainerKeyScanner.java | 24 ++++++++++++------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java index 6299a9745f4..28bd19b7a90 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java @@ -94,14 +94,10 @@ public class ContainerKeyScanner implements Callable, private Set containerIds; private static Map directoryTable; + private static boolean isDirTableLoaded = false; @Override public Void call() throws Exception { - long start = System.currentTimeMillis(); - directoryTable = getDirectoryTableData(parent.getDbPath()); - long end = System.currentTimeMillis(); - out().println("directoryTable loaded in " + (end - start) + " ms."); - ContainerKeyInfoWrapper containerKeyInfoWrapper = scanDBForContainerKeys(parent.getDbPath()); @@ -249,10 +245,10 @@ private ContainerKeyInfoWrapper scanDBForContainerKeys(String dbPath) keysProcessed += processTable(dbDefinition, columnFamilyHandles, db, - containerKeyInfos, FILE_TABLE, dbPath); + containerKeyInfos, FILE_TABLE); keysProcessed += processTable(dbDefinition, columnFamilyHandles, db, - containerKeyInfos, KEY_TABLE, dbPath); + containerKeyInfos, KEY_TABLE); } return new ContainerKeyInfoWrapper(keysProcessed, containerKeyInfos); } @@ -261,7 +257,7 @@ private long processTable(DBDefinition dbDefinition, List columnFamilyHandles, ManagedRocksDB db, List containerKeyInfos, - String tableName, String dbPath) + String tableName) throws IOException { long keysProcessed = 0; DBColumnFamilyDefinition columnFamilyDefinition = @@ -310,6 +306,16 @@ private long processTable(DBDefinition dbDefinition, // Generate asbolute key path for FSO keys StringBuilder keyName = new StringBuilder(); if (tableName.equals(FILE_TABLE)) { + // Load directory table only after the first fso key is found + // to reduce necessary load if there are not fso keys + if (!isDirTableLoaded) { + long start = System.currentTimeMillis(); + directoryTable = getDirectoryTableData(parent.getDbPath()); + long end = System.currentTimeMillis(); + out().println( + "directoryTable loaded in " + (end - start) + " ms."); + isDirTableLoaded = true; + } keyName.append(getFsoKeyPrefix(volumeId, bucketId, value)); } keyName.append(value.getKeyName()); @@ -326,6 +332,8 @@ private long processTable(DBDefinition dbDefinition, keysProcessed++; } return keysProcessed; + } catch (RocksDBException e) { + throw new RuntimeException(e); } } From cb8b494973499206275bb72fb8176cbee70b3019 Mon Sep 17 00:00:00 2001 From: Mladjan Gadzic Date: Fri, 1 Dec 2023 15:23:29 +0100 Subject: [PATCH 12/33] Fix warnings --- .../org/apache/hadoop/ozone/debug/ContainerKeyScanner.java | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java index 28bd19b7a90..18ef787fa7e 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java @@ -21,7 +21,6 @@ import com.google.common.collect.Sets; import com.google.gson.Gson; import com.google.gson.GsonBuilder; -import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.cli.SubcommandWithParent; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -62,7 +61,7 @@ import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; -import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; +import static org.apache.hadoop.ozone.OzoneConsts.ROOT_PATH; /** * Parser for a list of container IDs, to scan for keys. @@ -177,7 +176,6 @@ private static PrintWriter out() { return spec.commandLine().getOut(); } - @SuppressFBWarnings("DMI_HARDCODED_ABSOLUTE_FILENAME") public Map getAbsolutePathForObjectIDs( long bucketId, String prefix, Optional> dirObjIds) { // Root of a bucket would always have the @@ -188,7 +186,7 @@ public Map getAbsolutePathForObjectIDs( Set objIds = Sets.newHashSet(dirObjIds.get()); Map objectIdPathMap = new HashMap<>(); Queue> objectIdPathVals = new LinkedList<>(); - Pair root = Pair.of(bucketId, Paths.get(OZONE_URI_DELIMITER)); + Pair root = Pair.of(bucketId, ROOT_PATH); objectIdPathVals.add(root); addToPathMap(root, objIds, objectIdPathMap); From c6d1a48705d1b64cc724ee1564892bf58d12ef7c Mon Sep 17 00:00:00 2001 From: Mladjan Gadzic Date: Tue, 5 Dec 2023 17:37:09 +0100 Subject: [PATCH 13/33] Remove confusing full stop Co-authored-by: Ritesh H Shukla --- .../java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java index 18ef787fa7e..94ebaceb746 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java @@ -89,7 +89,7 @@ public class ContainerKeyScanner implements Callable, paramLabel = "containerIDs", required = true, description = "Set of container IDs to be used for getting all " + - "their keys. Example-usage: 1,11,2.(Separated by ',')") + "their keys. Example-usage: 1,11,2 (Separated by ',')") private Set containerIds; private static Map directoryTable; From 653533a7c49880a4843c24fa5954f5766139799b Mon Sep 17 00:00:00 2001 From: Mladjan Gadzic Date: Tue, 5 Dec 2023 17:37:48 +0100 Subject: [PATCH 14/33] Simplify description Co-authored-by: Ritesh H Shukla --- .../java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java index 94ebaceb746..3760fb3e5f4 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java @@ -68,7 +68,7 @@ */ @CommandLine.Command( name = "ckscanner", - description = "Parse a list of container IDs" + description = "Find keys that reference a container" ) @MetaInfServices(SubcommandWithParent.class) public class ContainerKeyScanner implements Callable, From 06af3f832b66d460f24b607e1679e142083d8117 Mon Sep 17 00:00:00 2001 From: Mladjan Gadzic Date: Mon, 11 Dec 2023 14:12:25 +0100 Subject: [PATCH 15/33] Update hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyInfo.java Co-authored-by: Hemant Kumar --- .../java/org/apache/hadoop/ozone/debug/ContainerKeyInfo.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyInfo.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyInfo.java index bc4d45fc5be..d755a33d97e 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyInfo.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyInfo.java @@ -65,7 +65,7 @@ public boolean equals(Object o) { if (this == o) { return true; } - if (o == null || getClass() != o.getClass()) { + if (!(o instanceof ContainerKeyInfo)) { return false; } ContainerKeyInfo that = (ContainerKeyInfo) o; From cb54308212a1ee0a540a540235b26cdcb59f44fc Mon Sep 17 00:00:00 2001 From: Mladjan Gadzic Date: Mon, 11 Dec 2023 14:20:03 +0100 Subject: [PATCH 16/33] Add period at the end --- .../java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java index 3760fb3e5f4..5d66a7ddeb6 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java @@ -89,7 +89,7 @@ public class ContainerKeyScanner implements Callable, paramLabel = "containerIDs", required = true, description = "Set of container IDs to be used for getting all " + - "their keys. Example-usage: 1,11,2 (Separated by ',')") + "their keys. Example-usage: 1,11,2 (Separated by ',').") private Set containerIds; private static Map directoryTable; From 49f39654dcf119c348b640915136f12033064881 Mon Sep 17 00:00:00 2001 From: Mladjan Gadzic Date: Wed, 10 Jan 2024 16:50:22 +0100 Subject: [PATCH 17/33] Remove unnecessary code --- .../ozone/debug/ContainerKeyScanner.java | 41 ++++--------------- 1 file changed, 9 insertions(+), 32 deletions(-) diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java index 5d66a7ddeb6..b9068e1cb23 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java @@ -24,7 +24,6 @@ import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.cli.SubcommandWithParent; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.utils.db.DBColumnFamilyDefinition; import org.apache.hadoop.hdds.utils.db.DBDefinition; import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB; import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksIterator; @@ -121,28 +120,13 @@ private Map getDirectoryTableData(String dbPath) RocksDBUtils.getColumnFamilyDescriptors(dbPath); final List columnFamilyHandles = new ArrayList<>(); - // Get all table handles try (ManagedRocksDB db = ManagedRocksDB.openReadOnly(dbPath, columnFamilyDescriptors, columnFamilyHandles)) { - dbPath = removeTrailingSlashIfNeeded(dbPath); - DBDefinition dbDefinition = DBDefinitionFactory.getDefinition( - Paths.get(dbPath), new OzoneConfiguration()); - if (dbDefinition == null) { - throw new IllegalStateException("Incorrect DB Path"); - } - - // Get directory table - DBColumnFamilyDefinition columnFamilyDefinition = - dbDefinition.getColumnFamily(DIRECTORY_TABLE); - if (columnFamilyDefinition == null) { - throw new IllegalStateException( - "Table with name" + DIRECTORY_TABLE + " not found"); - } // Get directory table handle - ColumnFamilyHandle columnFamilyHandle = getColumnFamilyHandle( - columnFamilyDefinition.getName().getBytes(UTF_8), - columnFamilyHandles); + ColumnFamilyHandle columnFamilyHandle = + getColumnFamilyHandle(DIRECTORY_TABLE.getBytes(UTF_8), + columnFamilyHandles); if (columnFamilyHandle == null) { throw new IllegalStateException("columnFamilyHandle is null"); } @@ -153,8 +137,8 @@ private Map getDirectoryTableData(String dbPath) iterator.get().seekToFirst(); while (iterator.get().isValid()) { directoryTableData.put(new String(iterator.get().key(), UTF_8), - ((OmDirectoryInfo) columnFamilyDefinition.getValueCodec() - .fromPersistedFormat(iterator.get().value()))); + OmDirectoryInfo.getCodec() + .fromPersistedFormat(iterator.get().value())); iterator.get().next(); } } @@ -258,16 +242,9 @@ private long processTable(DBDefinition dbDefinition, String tableName) throws IOException { long keysProcessed = 0; - DBColumnFamilyDefinition columnFamilyDefinition = - dbDefinition.getColumnFamily(tableName); - if (columnFamilyDefinition == null) { - throw new IllegalStateException( - "Table with name" + tableName + " not found"); - } - ColumnFamilyHandle columnFamilyHandle = getColumnFamilyHandle( - columnFamilyDefinition.getName().getBytes(UTF_8), - columnFamilyHandles); + ColumnFamilyHandle columnFamilyHandle = + getColumnFamilyHandle(tableName.getBytes(UTF_8), columnFamilyHandles); if (columnFamilyHandle == null) { throw new IllegalStateException("columnFamilyHandle is null"); } @@ -276,8 +253,8 @@ private long processTable(DBDefinition dbDefinition, db.get().newIterator(columnFamilyHandle))) { iterator.get().seekToFirst(); while (iterator.get().isValid()) { - OmKeyInfo value = ((OmKeyInfo) columnFamilyDefinition.getValueCodec() - .fromPersistedFormat(iterator.get().value())); + OmKeyInfo value = OmKeyInfo.getCodec(true) + .fromPersistedFormat(iterator.get().value()); List keyLocationVersions = value.getKeyLocationVersions(); if (Objects.isNull(keyLocationVersions)) { From d0598ae75668160141db7d0e0a9268694798d9de Mon Sep 17 00:00:00 2001 From: Mladjan Gadzic Date: Wed, 10 Jan 2024 17:06:42 +0100 Subject: [PATCH 18/33] Use StringUtils --- .../org/apache/hadoop/ozone/debug/ContainerKeyScanner.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java index b9068e1cb23..3d833d47bd7 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java @@ -22,6 +22,7 @@ import com.google.gson.Gson; import com.google.gson.GsonBuilder; import org.apache.commons.lang3.tuple.Pair; +import org.apache.hadoop.hdds.StringUtils; import org.apache.hadoop.hdds.cli.SubcommandWithParent; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.utils.db.DBDefinition; @@ -136,7 +137,7 @@ private Map getDirectoryTableData(String dbPath) db.get().newIterator(columnFamilyHandle))) { iterator.get().seekToFirst(); while (iterator.get().isValid()) { - directoryTableData.put(new String(iterator.get().key(), UTF_8), + directoryTableData.put(StringUtils.bytes2String(iterator.get().key()), OmDirectoryInfo.getCodec() .fromPersistedFormat(iterator.get().value())); iterator.get().next(); From 6a90b3b4e2d847b48985cf3e844d9fac465a1742 Mon Sep 17 00:00:00 2001 From: Mladjan Gadzic Date: Wed, 10 Jan 2024 17:08:10 +0100 Subject: [PATCH 19/33] Revert reordering --- .../main/java/org/apache/hadoop/ozone/admin/om/OMAdmin.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/OMAdmin.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/OMAdmin.java index 3ae60bb7e47..ce7d4ed7a7c 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/OMAdmin.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/OMAdmin.java @@ -32,6 +32,9 @@ import org.apache.hadoop.ozone.om.protocolPB.OmTransport; import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolClientSideTranslatorPB; import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolPB; + +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SERVICE_IDS_KEY; import org.apache.ratis.protocol.ClientId; import org.kohsuke.MetaInfServices; import picocli.CommandLine; @@ -40,9 +43,6 @@ import java.util.Collection; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SERVICE_IDS_KEY; - /** * Subcommand for admin operations related to OM. */ From bf0e984478ee44885d24b1afa875708299c78fca Mon Sep 17 00:00:00 2001 From: Mladjan Gadzic Date: Wed, 10 Jan 2024 17:10:21 +0100 Subject: [PATCH 20/33] Move processing of data to a helper function because of too much nesting --- .../ozone/debug/ContainerKeyScanner.java | 69 +++++++++++-------- 1 file changed, 39 insertions(+), 30 deletions(-) diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java index 3d833d47bd7..ccdb738ec28 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java @@ -274,36 +274,8 @@ private long processTable(DBDefinition dbDefinition, bucketId = Long.parseLong(keyParts[2]); } - for (OmKeyLocationInfoGroup locationInfoGroup : keyLocationVersions) { - for (List locationInfos : - locationInfoGroup.getLocationVersionMap().values()) { - for (OmKeyLocationInfo locationInfo : locationInfos) { - if (containerIds.contains(locationInfo.getContainerID())) { - // Generate asbolute key path for FSO keys - StringBuilder keyName = new StringBuilder(); - if (tableName.equals(FILE_TABLE)) { - // Load directory table only after the first fso key is found - // to reduce necessary load if there are not fso keys - if (!isDirTableLoaded) { - long start = System.currentTimeMillis(); - directoryTable = getDirectoryTableData(parent.getDbPath()); - long end = System.currentTimeMillis(); - out().println( - "directoryTable loaded in " + (end - start) + " ms."); - isDirTableLoaded = true; - } - keyName.append(getFsoKeyPrefix(volumeId, bucketId, value)); - } - keyName.append(value.getKeyName()); - containerKeyInfos.add( - new ContainerKeyInfo(locationInfo.getContainerID(), - value.getVolumeName(), volumeId, value.getBucketName(), - bucketId, keyName.toString(), - value.getParentObjectID())); - } - } - } - } + processData(containerKeyInfos, tableName, keyLocationVersions, volumeId, + bucketId, value); iterator.get().next(); keysProcessed++; } @@ -313,6 +285,43 @@ private long processTable(DBDefinition dbDefinition, } } + private void processData(List containerKeyInfos, + String tableName, + List keyLocationVersions, + long volumeId, long bucketId, OmKeyInfo value) + throws RocksDBException, IOException { + for (OmKeyLocationInfoGroup locationInfoGroup : keyLocationVersions) { + for (List locationInfos : + locationInfoGroup.getLocationVersionMap().values()) { + for (OmKeyLocationInfo locationInfo : locationInfos) { + if (containerIds.contains(locationInfo.getContainerID())) { + // Generate asbolute key path for FSO keys + StringBuilder keyName = new StringBuilder(); + if (tableName.equals(FILE_TABLE)) { + // Load directory table only after the first fso key is found + // to reduce necessary load if there are not fso keys + if (!isDirTableLoaded) { + long start = System.currentTimeMillis(); + directoryTable = getDirectoryTableData(parent.getDbPath()); + long end = System.currentTimeMillis(); + out().println( + "directoryTable loaded in " + (end - start) + " ms."); + isDirTableLoaded = true; + } + keyName.append(getFsoKeyPrefix(volumeId, bucketId, value)); + } + keyName.append(value.getKeyName()); + containerKeyInfos.add( + new ContainerKeyInfo(locationInfo.getContainerID(), + value.getVolumeName(), volumeId, value.getBucketName(), + bucketId, keyName.toString(), + value.getParentObjectID())); + } + } + } + } + } + private static String removeBeginningSlash(String path) { if (path.startsWith(OM_KEY_PREFIX)) { return path.substring(1); From 1978578a5450cc59901687f77141b851c8a27a21 Mon Sep 17 00:00:00 2001 From: Mladjan Gadzic Date: Wed, 10 Jan 2024 17:11:40 +0100 Subject: [PATCH 21/33] Fix typo --- .../apache/hadoop/ozone/debug/TestContainerKeyScanner.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestContainerKeyScanner.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestContainerKeyScanner.java index 8dd6366d2f0..6c58b670643 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestContainerKeyScanner.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestContainerKeyScanner.java @@ -132,7 +132,7 @@ public void shutdown() throws IOException { } @Test - void testWhenThereAreKeysForConatainerIds() throws IOException { + void testWhenThereAreKeysForContainerIds() throws IOException { // create keys for tables long volumeId = -123L; @@ -156,7 +156,7 @@ void testWhenThereAreKeysForConatainerIds() throws IOException { } @Test - void testWhenThereAreNotKeysForConatainerIds() throws IOException { + void testWhenThereAreNotKeysForContainerIds() throws IOException { // create keys for tables long volumeId = -123L; From 6f2d271b6f351747af5016b15789f1634e30087c Mon Sep 17 00:00:00 2001 From: Mladjan Gadzic Date: Thu, 11 Jan 2024 14:12:41 +0100 Subject: [PATCH 22/33] Compare objects instead of string for json --- .../ozone/debug/TestContainerKeyScanner.java | 73 +++++++++---------- .../ozone/debug/ContainerKeyInfoResponse.java | 19 ++++- .../ozone/debug/ContainerKeyScanner.java | 9 ++- 3 files changed, 55 insertions(+), 46 deletions(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestContainerKeyScanner.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestContainerKeyScanner.java index 6c58b670643..2966f61fd91 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestContainerKeyScanner.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestContainerKeyScanner.java @@ -16,6 +16,8 @@ */ package org.apache.hadoop.ozone.debug; +import com.google.gson.Gson; +import com.google.gson.GsonBuilder; import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; @@ -40,6 +42,9 @@ import java.io.StringWriter; import java.util.ArrayList; import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; import static java.nio.charset.StandardCharsets.UTF_8; @@ -57,45 +62,33 @@ public class TestContainerKeyScanner { private StringWriter stdout, stderr; private PrintWriter pstdout, pstderr; private CommandLine cmd; + private static final Gson GSON = + new GsonBuilder().setPrettyPrinting().create(); + private static final ContainerKeyInfo KEY_ONE = + new ContainerKeyInfo(1L, "vol1", -123L, "bucket1", -456L, "dir1/key1", + -789L); + private static final ContainerKeyInfo KEY_TWO = + new ContainerKeyInfo(2L, "vol1", 0L, "bucket1", 0L, "key2", 0L); + private static final ContainerKeyInfo KEY_THREE = + new ContainerKeyInfo(3L, "vol1", 0L, "bucket1", 0L, "key3", 0L); + + private static final Map> CONTAINER_KEYS = + new HashMap<>(); + + static { + List list1 = new ArrayList<>(); + list1.add(KEY_ONE); + List list2 = new ArrayList<>(); + list2.add(KEY_TWO); + List list3 = new ArrayList<>(); + list3.add(KEY_THREE); + CONTAINER_KEYS.put(1L, list1); + CONTAINER_KEYS.put(2L, list2); + CONTAINER_KEYS.put(3L, list3); + } - private static final String KEYS_FOUND_OUTPUT = "{\n" + - " \"keysProcessed\": 3,\n" + - " \"containerKeys\": {\n" + - " \"1\": [\n" + - " {\n" + - " \"containerID\": 1,\n" + - " \"volumeName\": \"vol1\",\n" + - " \"volumeId\": -123,\n" + - " \"bucketName\": \"bucket1\",\n" + - " \"bucketId\": -456,\n" + - " \"keyName\": \"dir1/key1\",\n" + - " \"parentId\": -789\n" + - " }\n" + - " ],\n" + - " \"2\": [\n" + - " {\n" + - " \"containerID\": 2,\n" + - " \"volumeName\": \"vol1\",\n" + - " \"volumeId\": 0,\n" + - " \"bucketName\": \"bucket1\",\n" + - " \"bucketId\": 0,\n" + - " \"keyName\": \"key2\",\n" + - " \"parentId\": 0\n" + - " }\n" + - " ],\n" + - " \"3\": [\n" + - " {\n" + - " \"containerID\": 3,\n" + - " \"volumeName\": \"vol1\",\n" + - " \"volumeId\": 0,\n" + - " \"bucketName\": \"bucket1\",\n" + - " \"bucketId\": 0,\n" + - " \"keyName\": \"key3\",\n" + - " \"parentId\": 0\n" + - " }\n" + - " ]\n" + - " }\n" + - "}\n"; + private static final ContainerKeyInfoResponse KEYS_FOUND_OUTPUT = + new ContainerKeyInfoResponse(3, CONTAINER_KEYS); private static final String KEYS_NOT_FOUND_OUTPUT = "No keys were found for container IDs: [1, 2, 3]\n" + @@ -150,7 +143,9 @@ void testWhenThereAreKeysForContainerIds() throws IOException { int exitCode = cmd.execute(cmdArgs); Assertions.assertEquals(0, exitCode); - Assertions.assertTrue(stdout.toString().contains(KEYS_FOUND_OUTPUT)); + Assertions.assertEquals( + GSON.fromJson(stdout.toString(), ContainerKeyInfoResponse.class), + KEYS_FOUND_OUTPUT); Assertions.assertTrue(stderr.toString().isEmpty()); } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyInfoResponse.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyInfoResponse.java index b29283ecdb3..865861231ae 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyInfoResponse.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyInfoResponse.java @@ -21,6 +21,7 @@ import java.util.List; import java.util.Map; +import java.util.Objects; /** * Class for response for container key scanner. @@ -37,11 +38,21 @@ public ContainerKeyInfoResponse( this.containerKeys = containerKeys; } - public long getKeysProcessed() { - return keysProcessed; + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + ContainerKeyInfoResponse that = (ContainerKeyInfoResponse) o; + return keysProcessed == that.keysProcessed && + Objects.equals(containerKeys, that.containerKeys); } - public Map> getContainerKeys() { - return containerKeys; + @Override + public int hashCode() { + return Objects.hash(keysProcessed, containerKeys); } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java index ccdb738ec28..c0812411313 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java @@ -37,6 +37,8 @@ import org.rocksdb.ColumnFamilyDescriptor; import org.rocksdb.ColumnFamilyHandle; import org.rocksdb.RocksDBException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import picocli.CommandLine; import java.io.IOException; @@ -74,6 +76,8 @@ public class ContainerKeyScanner implements Callable, SubcommandWithParent { + public static final Logger LOG = + LoggerFactory.getLogger(ContainerKeyScanner.class); private static final String FILE_TABLE = "fileTable"; private static final String KEY_TABLE = "keyTable"; private static final String DIRECTORY_TABLE = "directoryTable"; @@ -304,8 +308,7 @@ private void processData(List containerKeyInfos, long start = System.currentTimeMillis(); directoryTable = getDirectoryTableData(parent.getDbPath()); long end = System.currentTimeMillis(); - out().println( - "directoryTable loaded in " + (end - start) + " ms."); + LOG.info("directoryTable loaded in " + (end - start) + " ms."); isDirTableLoaded = true; } keyName.append(getFsoKeyPrefix(volumeId, bucketId, value)); @@ -402,7 +405,7 @@ private void printOutput(ContainerKeyInfoWrapper containerKeyInfoWrapper) { new ContainerKeyInfoResponse(containerKeyInfoWrapper.getKeysProcessed(), infoMap)); - out().println(prettyJson); + out().print(prettyJson); } } From 5cffd271ad5fd23b2c817e0288f4e7dca7c3c2d5 Mon Sep 17 00:00:00 2001 From: Mladjan Gadzic Date: Thu, 11 Jan 2024 14:16:40 +0100 Subject: [PATCH 23/33] Uze OM_KEY_PREFIX instead of "/" --- .../hadoop/ozone/debug/TestContainerKeyScanner.java | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestContainerKeyScanner.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestContainerKeyScanner.java index 2966f61fd91..d8ed7704169 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestContainerKeyScanner.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestContainerKeyScanner.java @@ -47,6 +47,7 @@ import java.util.Map; import static java.nio.charset.StandardCharsets.UTF_8; +import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; /** * This class tests `ozone debug ldb ckscanner` CLI that reads from RocksDB @@ -179,7 +180,9 @@ private void createFile(long volumeId, long bucketId, String keyName, // format: /volumeId/bucketId/parentId(bucketId)/keyName String key = - "/" + volumeId + "/" + bucketId + "/" + parentId + "/" + keyName; + OM_KEY_PREFIX + volumeId + OM_KEY_PREFIX + + bucketId + OM_KEY_PREFIX + parentId + + OM_KEY_PREFIX + keyName; OmKeyInfo value = getOmKeyInfo("vol1", "bucket1", keyName, containerId, objectId, @@ -195,7 +198,8 @@ private void createKey(String keyName, long containerId) throws IOException { String volumeName = "vol1"; String bucketName = "bucket1"; // format: /volumeName/bucketName/keyName - String key = "/" + volumeName + "/" + bucketName + "/" + keyName; + String key = OM_KEY_PREFIX + volumeName + OM_KEY_PREFIX + bucketName + + OM_KEY_PREFIX + keyName; // generate table value OmKeyInfo value = @@ -212,7 +216,8 @@ private void createDirectory(long volumeId, long bucketId, long parentId, // format: /volumeId/bucketId/parentId(bucketId)/keyName String key = - "/" + volumeId + "/" + bucketId + "/" + parentId + "/" + keyName; + OM_KEY_PREFIX + volumeId + OM_KEY_PREFIX + bucketId + OM_KEY_PREFIX + + parentId + OM_KEY_PREFIX + keyName; OmDirectoryInfo value = OMRequestTestUtils.createOmDirectoryInfo(keyName, objectId, parentId); From af9c49493cdadd61611b0c3c2f08a65e541f1fa5 Mon Sep 17 00:00:00 2001 From: Mladjan Gadzic Date: Thu, 11 Jan 2024 14:19:26 +0100 Subject: [PATCH 24/33] Print to stderr instead of stdout in order to be compliant with piping tools like jq --- .../apache/hadoop/ozone/debug/TestContainerKeyScanner.java | 4 ++-- .../org/apache/hadoop/ozone/debug/ContainerKeyScanner.java | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestContainerKeyScanner.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestContainerKeyScanner.java index d8ed7704169..1f4e2722695 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestContainerKeyScanner.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestContainerKeyScanner.java @@ -168,9 +168,9 @@ void testWhenThereAreNotKeysForContainerIds() throws IOException { int exitCode = cmd.execute(cmdArgs); Assertions.assertEquals(0, exitCode); - Assertions.assertTrue(stdout.toString().contains(KEYS_NOT_FOUND_OUTPUT)); + Assertions.assertTrue(stderr.toString().contains(KEYS_NOT_FOUND_OUTPUT)); - Assertions.assertTrue(stderr.toString().isEmpty()); + Assertions.assertTrue(stdout.toString().isEmpty()); } private void createFile(long volumeId, long bucketId, String keyName, diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java index c0812411313..e2fe6cb34af 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java @@ -381,8 +381,8 @@ private void printOutput(ContainerKeyInfoWrapper containerKeyInfoWrapper) { List containerKeyInfos = containerKeyInfoWrapper.getContainerKeyInfos(); if (containerKeyInfos.isEmpty()) { - out().println("No keys were found for container IDs: " + containerIds); - out().println( + err().println("No keys were found for container IDs: " + containerIds); + err().println( "Keys processed: " + containerKeyInfoWrapper.getKeysProcessed()); return; } From ad94ff345ade387446c5ecc43883c9b2676596f7 Mon Sep 17 00:00:00 2001 From: Mladjan Gadzic Date: Thu, 11 Jan 2024 15:00:40 +0100 Subject: [PATCH 25/33] Replace usage of ContainerKeyInfoWrapper with ContainerKeyInfoResponse --- .../ozone/debug/ContainerKeyInfoResponse.java | 8 +++ .../ozone/debug/ContainerKeyInfoWrapper.java | 43 ------------- .../ozone/debug/ContainerKeyScanner.java | 60 +++++++------------ 3 files changed, 31 insertions(+), 80 deletions(-) delete mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyInfoWrapper.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyInfoResponse.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyInfoResponse.java index 865861231ae..ebfb9dc46ee 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyInfoResponse.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyInfoResponse.java @@ -38,6 +38,14 @@ public ContainerKeyInfoResponse( this.containerKeys = containerKeys; } + public long getKeysProcessed() { + return keysProcessed; + } + + public Map> getContainerKeys() { + return containerKeys; + } + @Override public boolean equals(Object o) { if (this == o) { diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyInfoWrapper.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyInfoWrapper.java deleted file mode 100644 index 41c3ee9cdd9..00000000000 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyInfoWrapper.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.debug; - -import java.util.List; - -/** - * Class for aggregation of collected data. - */ -public class ContainerKeyInfoWrapper { - - private final long keysProcessed; - private final List containerKeyInfos; - - public ContainerKeyInfoWrapper(long keysProcessed, - List containerKeyInfos) { - this.keysProcessed = keysProcessed; - this.containerKeyInfos = containerKeyInfos; - } - - public long getKeysProcessed() { - return keysProcessed; - } - - public List getContainerKeyInfos() { - return containerKeyInfos; - } -} diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java index e2fe6cb34af..37d1e7ed63c 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java @@ -19,7 +19,6 @@ package org.apache.hadoop.ozone.debug; import com.google.common.collect.Sets; -import com.google.gson.Gson; import com.google.gson.GsonBuilder; import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.StringUtils; @@ -101,10 +100,10 @@ public class ContainerKeyScanner implements Callable, @Override public Void call() throws Exception { - ContainerKeyInfoWrapper containerKeyInfoWrapper = + ContainerKeyInfoResponse containerKeyInfoResponse = scanDBForContainerKeys(parent.getDbPath()); - printOutput(containerKeyInfoWrapper); + printOutput(containerKeyInfoResponse); closeStdChannels(); @@ -212,9 +211,9 @@ private void addToPathMap(Pair objectIDPath, } } - private ContainerKeyInfoWrapper scanDBForContainerKeys(String dbPath) + private ContainerKeyInfoResponse scanDBForContainerKeys(String dbPath) throws RocksDBException, IOException { - List containerKeyInfos = new ArrayList<>(); + Map> containerKeyInfos = new HashMap<>(); List columnFamilyDescriptors = RocksDBUtils.getColumnFamilyDescriptors(dbPath); @@ -237,13 +236,13 @@ private ContainerKeyInfoWrapper scanDBForContainerKeys(String dbPath) processTable(dbDefinition, columnFamilyHandles, db, containerKeyInfos, KEY_TABLE); } - return new ContainerKeyInfoWrapper(keysProcessed, containerKeyInfos); + return new ContainerKeyInfoResponse(keysProcessed, containerKeyInfos); } private long processTable(DBDefinition dbDefinition, List columnFamilyHandles, ManagedRocksDB db, - List containerKeyInfos, + Map> containerKeyInfos, String tableName) throws IOException { long keysProcessed = 0; @@ -289,7 +288,7 @@ private long processTable(DBDefinition dbDefinition, } } - private void processData(List containerKeyInfos, + private void processData(Map> containerKeyInfos, String tableName, List keyLocationVersions, long volumeId, long bucketId, OmKeyInfo value) @@ -314,11 +313,17 @@ private void processData(List containerKeyInfos, keyName.append(getFsoKeyPrefix(volumeId, bucketId, value)); } keyName.append(value.getKeyName()); - containerKeyInfos.add( - new ContainerKeyInfo(locationInfo.getContainerID(), - value.getVolumeName(), volumeId, value.getBucketName(), - bucketId, keyName.toString(), - value.getParentObjectID())); + + containerKeyInfos.merge(locationInfo.getContainerID(), + new ArrayList<>(Collections.singletonList( + new ContainerKeyInfo(locationInfo.getContainerID(), + value.getVolumeName(), volumeId, value.getBucketName(), + bucketId, keyName.toString(), + value.getParentObjectID()))), + (existingList, newList) -> { + existingList.addAll(newList); + return existingList; + }); } } } @@ -377,35 +382,16 @@ private String removeTrailingSlashIfNeeded(String dbPath) { return dbPath; } - private void printOutput(ContainerKeyInfoWrapper containerKeyInfoWrapper) { - List containerKeyInfos = - containerKeyInfoWrapper.getContainerKeyInfos(); - if (containerKeyInfos.isEmpty()) { + private void printOutput(ContainerKeyInfoResponse containerKeyInfoResponse) { + if (containerKeyInfoResponse.getContainerKeys().isEmpty()) { err().println("No keys were found for container IDs: " + containerIds); err().println( - "Keys processed: " + containerKeyInfoWrapper.getKeysProcessed()); + "Keys processed: " + containerKeyInfoResponse.getKeysProcessed()); return; } - Map> infoMap = new HashMap<>(); - - for (long id : containerIds) { - List tmpList = new ArrayList<>(); - - for (ContainerKeyInfo info : containerKeyInfos) { - if (id == info.getContainerID()) { - tmpList.add(info); - } - } - infoMap.put(id, tmpList); - } - - Gson gson = new GsonBuilder().setPrettyPrinting().create(); - String prettyJson = gson.toJson( - new ContainerKeyInfoResponse(containerKeyInfoWrapper.getKeysProcessed(), - infoMap)); - - out().print(prettyJson); + out().print(new GsonBuilder().setPrettyPrinting().create() + .toJson(containerKeyInfoResponse)); } } From 5cd90e1983e51925a5093714c193f922649b0b9e Mon Sep 17 00:00:00 2001 From: Mladjan Gadzic Date: Thu, 11 Jan 2024 15:08:05 +0100 Subject: [PATCH 26/33] Fix import order --- .../ozone/om/request/OMRequestTestUtils.java | 51 +++++++++++-------- 1 file changed, 31 insertions(+), 20 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java index cef0acb41b5..21b94ce5f05 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java @@ -19,6 +19,12 @@ package org.apache.hadoop.ozone.om.request; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.UUID; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdds.HddsUtils; import org.apache.hadoop.hdds.client.BlockID; @@ -28,8 +34,6 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; -import org.apache.hadoop.hdds.utils.db.cache.CacheKey; -import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.ClientVersion; import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.OzoneAcl; @@ -38,9 +42,7 @@ import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; -import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; @@ -48,27 +50,39 @@ import org.apache.hadoop.ozone.om.helpers.OmMultipartUpload; import org.apache.hadoop.ozone.om.helpers.OmPrefixInfo; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; +import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.AddAclRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.BucketInfo; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateTenantRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteTenantRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetS3VolumeContextRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyInfo; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListTenantRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartCommitUploadPartRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartInfoInitiateRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartUploadAbortRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartUploadCompleteRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .MultipartUploadAbortRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .MultipartCommitUploadPartRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .MultipartUploadCompleteRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .KeyArgs; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .MultipartInfoInitiateRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PartKeyInfo; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RemoveAclRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetAclRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetVolumePropertyRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .SetVolumePropertyRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .AddAclRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .RemoveAclRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .SetAclRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.TenantAssignAdminRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.TenantAssignUserAccessIdRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.TenantGetUserInfoRequest; @@ -79,16 +93,13 @@ import org.apache.hadoop.ozone.security.acl.OzoneObj; import org.apache.hadoop.ozone.security.acl.OzoneObj.ResourceType; import org.apache.hadoop.ozone.security.acl.OzoneObj.StoreType; + import org.apache.hadoop.ozone.security.acl.OzoneObjInfo; import org.apache.hadoop.ozone.storage.proto.OzoneManagerStorageProtos; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.UUID; +import org.apache.hadoop.hdds.utils.db.cache.CacheKey; +import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import static org.mockito.Mockito.any; import static org.mockito.Mockito.doCallRealMethod; From 88488e59c09dcd00b92769403ccc600d2783349f Mon Sep 17 00:00:00 2001 From: Mladjan Gadzic Date: Thu, 11 Jan 2024 16:26:24 +0100 Subject: [PATCH 27/33] Remove multi character flags with a single dash --- .../apache/hadoop/ozone/debug/TestContainerKeyScanner.java | 4 ++-- .../org/apache/hadoop/ozone/debug/ContainerKeyScanner.java | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestContainerKeyScanner.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestContainerKeyScanner.java index 1f4e2722695..72bb125b956 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestContainerKeyScanner.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestContainerKeyScanner.java @@ -139,7 +139,7 @@ void testWhenThereAreKeysForContainerIds() throws IOException { String[] cmdArgs = {"--db", dbStore.getDbLocation().getAbsolutePath(), "ckscanner", - "-ids", "1,2,3"}; + "--container-ids", "1,2,3"}; int exitCode = cmd.execute(cmdArgs); Assertions.assertEquals(0, exitCode); @@ -163,7 +163,7 @@ void testWhenThereAreNotKeysForContainerIds() throws IOException { String[] cmdArgs = {"--db", dbStore.getDbLocation().getAbsolutePath(), "ckscanner", - "-ids", "1,2,3"}; + "--container-ids", "1,2,3"}; int exitCode = cmd.execute(cmdArgs); Assertions.assertEquals(0, exitCode); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java index 37d1e7ed63c..3227f933477 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java @@ -87,7 +87,7 @@ public class ContainerKeyScanner implements Callable, @CommandLine.ParentCommand private RDBParser parent; - @CommandLine.Option(names = {"-ids", "--container-ids"}, + @CommandLine.Option(names = {"--container-ids"}, split = ",", paramLabel = "containerIDs", required = true, From 7efab907cd46ee140ae81c9381c6199011666131 Mon Sep 17 00:00:00 2001 From: Mladjan Gadzic Date: Thu, 11 Jan 2024 16:48:38 +0100 Subject: [PATCH 28/33] Split containers ids by spaces not by commas --- .../apache/hadoop/ozone/debug/TestContainerKeyScanner.java | 4 ++-- .../org/apache/hadoop/ozone/debug/ContainerKeyScanner.java | 7 +++---- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestContainerKeyScanner.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestContainerKeyScanner.java index 72bb125b956..090bf32a1c3 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestContainerKeyScanner.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestContainerKeyScanner.java @@ -139,7 +139,7 @@ void testWhenThereAreKeysForContainerIds() throws IOException { String[] cmdArgs = {"--db", dbStore.getDbLocation().getAbsolutePath(), "ckscanner", - "--container-ids", "1,2,3"}; + "--container-ids", "1 2 3"}; int exitCode = cmd.execute(cmdArgs); Assertions.assertEquals(0, exitCode); @@ -163,7 +163,7 @@ void testWhenThereAreNotKeysForContainerIds() throws IOException { String[] cmdArgs = {"--db", dbStore.getDbLocation().getAbsolutePath(), "ckscanner", - "--container-ids", "1,2,3"}; + "--container-ids", "1 2 3"}; int exitCode = cmd.execute(cmdArgs); Assertions.assertEquals(0, exitCode); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java index 3227f933477..56c30098192 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java @@ -88,11 +88,10 @@ public class ContainerKeyScanner implements Callable, private RDBParser parent; @CommandLine.Option(names = {"--container-ids"}, - split = ",", - paramLabel = "containerIDs", + split = " ", + paramLabel = "", required = true, - description = "Set of container IDs to be used for getting all " + - "their keys. Example-usage: 1,11,2 (Separated by ',').") + description = "One or more container IDs separated by spaces.") private Set containerIds; private static Map directoryTable; From f88352402c1dd5b5ef0b3328f9e263361f2e68da Mon Sep 17 00:00:00 2001 From: Mladjan Gadzic Date: Thu, 11 Jan 2024 17:26:51 +0100 Subject: [PATCH 29/33] Use table names from OmMetadataManagerImpl --- .../apache/hadoop/ozone/debug/TestContainerKeyScanner.java | 6 +++--- .../org/apache/hadoop/ozone/debug/ContainerKeyScanner.java | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestContainerKeyScanner.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestContainerKeyScanner.java index 090bf32a1c3..eb92c4b3bdc 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestContainerKeyScanner.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestContainerKeyScanner.java @@ -48,15 +48,15 @@ import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DIRECTORY_TABLE; +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.FILE_TABLE; +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.KEY_TABLE; /** * This class tests `ozone debug ldb ckscanner` CLI that reads from RocksDB * and gets keys for container ids. */ public class TestContainerKeyScanner { - private static final String KEY_TABLE = "keyTable"; - private static final String FILE_TABLE = "fileTable"; - private static final String DIRECTORY_TABLE = "directoryTable"; private DBStore dbStore; @TempDir private File tempDir; diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java index 56c30098192..4aff0f57554 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java @@ -63,6 +63,9 @@ import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; import static org.apache.hadoop.ozone.OzoneConsts.ROOT_PATH; +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DIRECTORY_TABLE; +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.FILE_TABLE; +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.KEY_TABLE; /** * Parser for a list of container IDs, to scan for keys. @@ -77,9 +80,6 @@ public class ContainerKeyScanner implements Callable, public static final Logger LOG = LoggerFactory.getLogger(ContainerKeyScanner.class); - private static final String FILE_TABLE = "fileTable"; - private static final String KEY_TABLE = "keyTable"; - private static final String DIRECTORY_TABLE = "directoryTable"; @CommandLine.Spec private static CommandLine.Model.CommandSpec spec; From 5d581c9733a4bf4fa661bbc6af5361dbd4e48461 Mon Sep 17 00:00:00 2001 From: Mladjan Gadzic Date: Fri, 12 Jan 2024 12:35:13 +0100 Subject: [PATCH 30/33] Use OmMetadataManagerImpl as wrapper for RocksDB instance --- .../ozone/debug/TestContainerKeyScanner.java | 17 +- .../ozone/debug/ContainerKeyScanner.java | 244 +++++++++--------- 2 files changed, 138 insertions(+), 123 deletions(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestContainerKeyScanner.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestContainerKeyScanner.java index eb92c4b3bdc..b5b91f5ebc6 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestContainerKeyScanner.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestContainerKeyScanner.java @@ -116,9 +116,7 @@ public void setup() throws IOException { @AfterEach public void shutdown() throws IOException { - if (dbStore != null) { - dbStore.close(); - } + closeDbStore(); pstderr.close(); stderr.close(); pstdout.close(); @@ -141,6 +139,8 @@ void testWhenThereAreKeysForContainerIds() throws IOException { {"--db", dbStore.getDbLocation().getAbsolutePath(), "ckscanner", "--container-ids", "1 2 3"}; + closeDbStore(); + int exitCode = cmd.execute(cmdArgs); Assertions.assertEquals(0, exitCode); @@ -151,6 +151,15 @@ void testWhenThereAreKeysForContainerIds() throws IOException { Assertions.assertTrue(stderr.toString().isEmpty()); } + /** + * Close db store because of the lock. + */ + private void closeDbStore() throws IOException { + if (dbStore != null) { + dbStore.close(); + } + } + @Test void testWhenThereAreNotKeysForContainerIds() throws IOException { @@ -165,6 +174,8 @@ void testWhenThereAreNotKeysForContainerIds() throws IOException { {"--db", dbStore.getDbLocation().getAbsolutePath(), "ckscanner", "--container-ids", "1 2 3"}; + closeDbStore(); + int exitCode = cmd.execute(cmdArgs); Assertions.assertEquals(0, exitCode); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java index 4aff0f57554..6bc95aea7db 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java @@ -21,21 +21,19 @@ import com.google.common.collect.Sets; import com.google.gson.GsonBuilder; import org.apache.commons.lang3.tuple.Pair; -import org.apache.hadoop.hdds.StringUtils; import org.apache.hadoop.hdds.cli.SubcommandWithParent; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.utils.db.DBDefinition; -import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB; -import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksIterator; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.kohsuke.MetaInfServices; -import org.rocksdb.ColumnFamilyDescriptor; import org.rocksdb.ColumnFamilyHandle; -import org.rocksdb.RocksDBException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import picocli.CommandLine; @@ -43,7 +41,6 @@ import java.io.IOException; import java.io.PrintWriter; import java.nio.file.Path; -import java.nio.file.Paths; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -60,12 +57,8 @@ import java.util.concurrent.Callable; import java.util.stream.Collectors; -import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; import static org.apache.hadoop.ozone.OzoneConsts.ROOT_PATH; -import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DIRECTORY_TABLE; -import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.FILE_TABLE; -import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.KEY_TABLE; /** * Parser for a list of container IDs, to scan for keys. @@ -99,8 +92,14 @@ public class ContainerKeyScanner implements Callable, @Override public Void call() throws Exception { + OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); + ozoneConfiguration.set("ozone.om.db.dirs", + parent.getDbPath().substring(0, parent.getDbPath().lastIndexOf("/"))); + OmMetadataManagerImpl omMetadataManager = + new OmMetadataManagerImpl(ozoneConfiguration, null); + ContainerKeyInfoResponse containerKeyInfoResponse = - scanDBForContainerKeys(parent.getDbPath()); + scanDBForContainerKeys(omMetadataManager); printOutput(containerKeyInfoResponse); @@ -114,36 +113,17 @@ private void closeStdChannels() { err().close(); } - private Map getDirectoryTableData(String dbPath) - throws RocksDBException, IOException { + private Map getDirectoryTableData( + OmMetadataManagerImpl metadataManager) + throws IOException { Map directoryTableData = new HashMap<>(); - // Get all tables from RocksDB - List columnFamilyDescriptors = - RocksDBUtils.getColumnFamilyDescriptors(dbPath); - final List columnFamilyHandles = new ArrayList<>(); - - try (ManagedRocksDB db = ManagedRocksDB.openReadOnly(dbPath, - columnFamilyDescriptors, columnFamilyHandles)) { - - // Get directory table handle - ColumnFamilyHandle columnFamilyHandle = - getColumnFamilyHandle(DIRECTORY_TABLE.getBytes(UTF_8), - columnFamilyHandles); - if (columnFamilyHandle == null) { - throw new IllegalStateException("columnFamilyHandle is null"); - } - - // Get iterator for directory table - try (ManagedRocksIterator iterator = new ManagedRocksIterator( - db.get().newIterator(columnFamilyHandle))) { - iterator.get().seekToFirst(); - while (iterator.get().isValid()) { - directoryTableData.put(StringUtils.bytes2String(iterator.get().key()), - OmDirectoryInfo.getCodec() - .fromPersistedFormat(iterator.get().value())); - iterator.get().next(); - } + try ( + TableIterator> + iterator = metadataManager.getDirectoryTable().iterator()) { + while (iterator.hasNext()) { + Table.KeyValue next = iterator.next(); + directoryTableData.put(next.getKey(), next.getValue()); } } @@ -210,115 +190,139 @@ private void addToPathMap(Pair objectIDPath, } } - private ContainerKeyInfoResponse scanDBForContainerKeys(String dbPath) - throws RocksDBException, IOException { + private ContainerKeyInfoResponse scanDBForContainerKeys( + OmMetadataManagerImpl omMetadataManager) + throws IOException { Map> containerKeyInfos = new HashMap<>(); - List columnFamilyDescriptors = - RocksDBUtils.getColumnFamilyDescriptors(dbPath); - final List columnFamilyHandles = new ArrayList<>(); long keysProcessed = 0; - try (ManagedRocksDB db = ManagedRocksDB.openReadOnly(dbPath, - columnFamilyDescriptors, columnFamilyHandles)) { - dbPath = removeTrailingSlashIfNeeded(dbPath); - DBDefinition dbDefinition = DBDefinitionFactory.getDefinition( - Paths.get(dbPath), new OzoneConfiguration()); - if (dbDefinition == null) { - throw new IllegalStateException("Incorrect DB Path"); - } + keysProcessed += processFileTable(containerKeyInfos, omMetadataManager); + keysProcessed += processKeyTable(containerKeyInfos, omMetadataManager); - keysProcessed += - processTable(dbDefinition, columnFamilyHandles, db, - containerKeyInfos, FILE_TABLE); - keysProcessed += - processTable(dbDefinition, columnFamilyHandles, db, - containerKeyInfos, KEY_TABLE); - } return new ContainerKeyInfoResponse(keysProcessed, containerKeyInfos); } - private long processTable(DBDefinition dbDefinition, - List columnFamilyHandles, - ManagedRocksDB db, - Map> containerKeyInfos, - String tableName) - throws IOException { - long keysProcessed = 0; + private long processKeyTable( + Map> containerKeyInfos, + OmMetadataManagerImpl omMetadataManager) throws IOException { + long keysProcessed = 0L; + + // Anything but not FSO bucket layout + Table fileTable = omMetadataManager.getKeyTable( + BucketLayout.DEFAULT); + try (TableIterator> + iterator = fileTable.iterator()) { + while (iterator.hasNext()) { + Table.KeyValue next = iterator.next(); + keysProcessed++; - ColumnFamilyHandle columnFamilyHandle = - getColumnFamilyHandle(tableName.getBytes(UTF_8), columnFamilyHandles); - if (columnFamilyHandle == null) { - throw new IllegalStateException("columnFamilyHandle is null"); + if (Objects.isNull(next.getValue().getKeyLocationVersions())) { + continue; + } + + processKeyData(containerKeyInfos, next.getKey(), next.getValue()); + } } - try (ManagedRocksIterator iterator = new ManagedRocksIterator( - db.get().newIterator(columnFamilyHandle))) { - iterator.get().seekToFirst(); - while (iterator.get().isValid()) { - OmKeyInfo value = OmKeyInfo.getCodec(true) - .fromPersistedFormat(iterator.get().value()); - List keyLocationVersions = - value.getKeyLocationVersions(); - if (Objects.isNull(keyLocationVersions)) { - iterator.get().next(); - keysProcessed++; + return keysProcessed; + } + + + private long processFileTable( + Map> containerKeyInfos, + OmMetadataManagerImpl omMetadataManager) + throws IOException { + long keysProcessed = 0L; + + try (TableIterator> + iterator = omMetadataManager.getFileTable().iterator()) { + while (iterator.hasNext()) { + Table.KeyValue next = iterator.next(); + keysProcessed++; + + if (Objects.isNull(next.getValue().getKeyLocationVersions())) { continue; } - long volumeId = 0; - long bucketId = 0; - // volumeId and bucketId are only applicable to file table - if (tableName.equals(FILE_TABLE)) { - String key = new String(iterator.get().key(), UTF_8); - String[] keyParts = key.split(OM_KEY_PREFIX); - volumeId = Long.parseLong(keyParts[1]); - bucketId = Long.parseLong(keyParts[2]); - } + processFileData(containerKeyInfos, next.getKey(), next.getValue(), + omMetadataManager); + } + } - processData(containerKeyInfos, tableName, keyLocationVersions, volumeId, - bucketId, value); - iterator.get().next(); - keysProcessed++; + return keysProcessed; + } + + /** + * @param key file table key. + * @return Pair of volume id and bucket id. + */ + private Pair parseKey(String key) { + String[] keyParts = key.split(OM_KEY_PREFIX); + return Pair.of(Long.parseLong(keyParts[1]), Long.parseLong(keyParts[2])); + } + + private void processKeyData( + Map> containerKeyInfos, + String key, OmKeyInfo keyInfo) { + long volumeId = 0L; + long bucketId = 0L; + + for (OmKeyLocationInfoGroup locationInfoGroup : + keyInfo.getKeyLocationVersions()) { + for (List locationInfos : + locationInfoGroup.getLocationVersionMap().values()) { + for (OmKeyLocationInfo locationInfo : locationInfos) { + if (containerIds.contains(locationInfo.getContainerID())) { + + containerKeyInfos.merge(locationInfo.getContainerID(), + new ArrayList<>(Collections.singletonList( + new ContainerKeyInfo(locationInfo.getContainerID(), + keyInfo.getVolumeName(), volumeId, + keyInfo.getBucketName(), bucketId, keyInfo.getKeyName(), + keyInfo.getParentObjectID()))), + (existingList, newList) -> { + existingList.addAll(newList); + return existingList; + }); + } + } } - return keysProcessed; - } catch (RocksDBException e) { - throw new RuntimeException(e); } } - private void processData(Map> containerKeyInfos, - String tableName, - List keyLocationVersions, - long volumeId, long bucketId, OmKeyInfo value) - throws RocksDBException, IOException { - for (OmKeyLocationInfoGroup locationInfoGroup : keyLocationVersions) { + private void processFileData( + Map> containerKeyInfos, + String key, OmKeyInfo keyInfo, OmMetadataManagerImpl omMetadataManager) + throws IOException { + + Pair volumeAndBucketId = parseKey(key); + Long volumeId = volumeAndBucketId.getLeft(); + Long bucketId = volumeAndBucketId.getRight(); + + for (OmKeyLocationInfoGroup locationInfoGroup : + keyInfo.getKeyLocationVersions()) { for (List locationInfos : locationInfoGroup.getLocationVersionMap().values()) { for (OmKeyLocationInfo locationInfo : locationInfos) { if (containerIds.contains(locationInfo.getContainerID())) { - // Generate asbolute key path for FSO keys StringBuilder keyName = new StringBuilder(); - if (tableName.equals(FILE_TABLE)) { - // Load directory table only after the first fso key is found - // to reduce necessary load if there are not fso keys - if (!isDirTableLoaded) { - long start = System.currentTimeMillis(); - directoryTable = getDirectoryTableData(parent.getDbPath()); - long end = System.currentTimeMillis(); - LOG.info("directoryTable loaded in " + (end - start) + " ms."); - isDirTableLoaded = true; - } - keyName.append(getFsoKeyPrefix(volumeId, bucketId, value)); + if (!isDirTableLoaded) { + long start = System.currentTimeMillis(); + directoryTable = getDirectoryTableData(omMetadataManager); + long end = System.currentTimeMillis(); + LOG.info("directoryTable loaded in " + (end - start) + " ms."); + isDirTableLoaded = true; } - keyName.append(value.getKeyName()); + keyName.append(getFsoKeyPrefix(volumeId, bucketId, keyInfo)); + keyName.append(keyInfo.getKeyName()); containerKeyInfos.merge(locationInfo.getContainerID(), new ArrayList<>(Collections.singletonList( new ContainerKeyInfo(locationInfo.getContainerID(), - value.getVolumeName(), volumeId, value.getBucketName(), - bucketId, keyName.toString(), - value.getParentObjectID()))), + keyInfo.getVolumeName(), volumeId, + keyInfo.getBucketName(), bucketId, keyName.toString(), + keyInfo.getParentObjectID()))), (existingList, newList) -> { existingList.addAll(newList); return existingList; From 0d037726ebaa46d93d0bc2cde4e98ca9ac9ce361 Mon Sep 17 00:00:00 2001 From: Mladjan Gadzic Date: Fri, 12 Jan 2024 13:05:56 +0100 Subject: [PATCH 31/33] Make ckscanner command as subcommand to debug command --- .../ozone/debug/TestContainerKeyScanner.java | 8 ++-- .../ozone/debug/ContainerKeyScanner.java | 46 ++++--------------- 2 files changed, 13 insertions(+), 41 deletions(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestContainerKeyScanner.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestContainerKeyScanner.java index b5b91f5ebc6..1ed2ed57ad8 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestContainerKeyScanner.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestContainerKeyScanner.java @@ -53,7 +53,7 @@ import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.KEY_TABLE; /** - * This class tests `ozone debug ldb ckscanner` CLI that reads from RocksDB + * This class tests `ozone debug ckscanner` CLI that reads from RocksDB * and gets keys for container ids. */ public class TestContainerKeyScanner { @@ -103,7 +103,7 @@ public void setup() throws IOException { stderr = new StringWriter(); pstderr = new PrintWriter(stderr); - cmd = new CommandLine(new RDBParser()) + cmd = new CommandLine(new OzoneDebug()) .addSubcommand(new ContainerKeyScanner()) .setOut(pstdout) .setErr(pstderr); @@ -136,7 +136,7 @@ void testWhenThereAreKeysForContainerIds() throws IOException { createKey("key3", 3L); String[] cmdArgs = - {"--db", dbStore.getDbLocation().getAbsolutePath(), "ckscanner", + {"ckscanner", "--om-db", dbStore.getDbLocation().getAbsolutePath(), "--container-ids", "1 2 3"}; closeDbStore(); @@ -171,7 +171,7 @@ void testWhenThereAreNotKeysForContainerIds() throws IOException { createKey("key3", 6L); String[] cmdArgs = - {"--db", dbStore.getDbLocation().getAbsolutePath(), "ckscanner", + {"ckscanner", "--om-db", dbStore.getDbLocation().getAbsolutePath(), "--container-ids", "1 2 3"}; closeDbStore(); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java index 6bc95aea7db..cec44d21058 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java @@ -25,7 +25,6 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.TableIterator; -import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; @@ -33,7 +32,6 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.kohsuke.MetaInfServices; -import org.rocksdb.ColumnFamilyHandle; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import picocli.CommandLine; @@ -42,7 +40,6 @@ import java.io.PrintWriter; import java.nio.file.Path; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; @@ -68,25 +65,24 @@ description = "Find keys that reference a container" ) @MetaInfServices(SubcommandWithParent.class) -public class ContainerKeyScanner implements Callable, - SubcommandWithParent { +public class ContainerKeyScanner + implements Callable, SubcommandWithParent { public static final Logger LOG = LoggerFactory.getLogger(ContainerKeyScanner.class); - @CommandLine.Spec private static CommandLine.Model.CommandSpec spec; - - @CommandLine.ParentCommand - private RDBParser parent; - + @CommandLine.Option(names = {"--om-db"}, + paramLabel = "", + required = true, + description = "Path to OM DB.") + private String dbPath; @CommandLine.Option(names = {"--container-ids"}, split = " ", paramLabel = "", required = true, description = "One or more container IDs separated by spaces.") private Set containerIds; - private static Map directoryTable; private static boolean isDirTableLoaded = false; @@ -94,7 +90,7 @@ public class ContainerKeyScanner implements Callable, public Void call() throws Exception { OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); ozoneConfiguration.set("ozone.om.db.dirs", - parent.getDbPath().substring(0, parent.getDbPath().lastIndexOf("/"))); + dbPath.substring(0, dbPath.lastIndexOf("/"))); OmMetadataManagerImpl omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration, null); @@ -132,7 +128,7 @@ private Map getDirectoryTableData( @Override public Class getParentType() { - return RDBParser.class; + return OzoneDebug.class; } private static PrintWriter err() { @@ -361,30 +357,6 @@ private String getFsoKeyPrefix(long volumeId, long bucketId, return removeBeginningSlash(keyPath); } - - private ColumnFamilyHandle getColumnFamilyHandle( - byte[] name, List columnFamilyHandles) { - return columnFamilyHandles - .stream() - .filter( - handle -> { - try { - return Arrays.equals(handle.getName(), name); - } catch (Exception ex) { - throw new RuntimeException(ex); - } - }) - .findAny() - .orElse(null); - } - - private String removeTrailingSlashIfNeeded(String dbPath) { - if (dbPath.endsWith(OzoneConsts.OZONE_URI_DELIMITER)) { - dbPath = dbPath.substring(0, dbPath.length() - 1); - } - return dbPath; - } - private void printOutput(ContainerKeyInfoResponse containerKeyInfoResponse) { if (containerKeyInfoResponse.getContainerKeys().isEmpty()) { err().println("No keys were found for container IDs: " + containerIds); From ab9b3f065384ee7e95ccb0c3b182513853e4091b Mon Sep 17 00:00:00 2001 From: Mladjan Gadzic Date: Sat, 10 Feb 2024 22:02:50 +0100 Subject: [PATCH 32/33] Rename command and put under different parent --- ...KeyScanner.java => TestFindContainerKeys.java} | 7 +++++-- .../ozone/debug/container/ContainerCommands.java | 3 ++- .../debug/{ => container}/ContainerKeyInfo.java | 2 +- .../{ => container}/ContainerKeyInfoResponse.java | 2 +- .../FindContainerKeys.java} | 15 ++++++++------- 5 files changed, 17 insertions(+), 12 deletions(-) rename hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/{TestContainerKeyScanner.java => TestFindContainerKeys.java} (96%) rename hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/{ => container}/ContainerKeyInfo.java (98%) rename hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/{ => container}/ContainerKeyInfoResponse.java (97%) rename hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/{ContainerKeyScanner.java => container/FindContainerKeys.java} (97%) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestContainerKeyScanner.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestFindContainerKeys.java similarity index 96% rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestContainerKeyScanner.java rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestFindContainerKeys.java index 1ed2ed57ad8..74ec5ea494f 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestContainerKeyScanner.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestFindContainerKeys.java @@ -25,6 +25,9 @@ import org.apache.hadoop.hdds.utils.db.DBStoreBuilder; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.ozone.ClientVersion; +import org.apache.hadoop.ozone.debug.container.ContainerKeyInfo; +import org.apache.hadoop.ozone.debug.container.ContainerKeyInfoResponse; +import org.apache.hadoop.ozone.debug.container.FindContainerKeys; import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; @@ -56,7 +59,7 @@ * This class tests `ozone debug ckscanner` CLI that reads from RocksDB * and gets keys for container ids. */ -public class TestContainerKeyScanner { +public class TestFindContainerKeys { private DBStore dbStore; @TempDir private File tempDir; @@ -104,7 +107,7 @@ public void setup() throws IOException { pstderr = new PrintWriter(stderr); cmd = new CommandLine(new OzoneDebug()) - .addSubcommand(new ContainerKeyScanner()) + .addSubcommand(new FindContainerKeys()) .setOut(pstdout) .setErr(pstderr); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/ContainerCommands.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/ContainerCommands.java index 5592926bf88..79f9e9f6dab 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/ContainerCommands.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/ContainerCommands.java @@ -79,7 +79,8 @@ ListSubcommand.class, InfoSubcommand.class, ExportSubcommand.class, - InspectSubcommand.class + InspectSubcommand.class, + FindContainerKeys.class }) @MetaInfServices(SubcommandWithParent.class) public class ContainerCommands implements Callable, SubcommandWithParent { diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyInfo.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/ContainerKeyInfo.java similarity index 98% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyInfo.java rename to hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/ContainerKeyInfo.java index d755a33d97e..12bb2f5cd03 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyInfo.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/ContainerKeyInfo.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.debug; +package org.apache.hadoop.ozone.debug.container; import java.util.Objects; diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyInfoResponse.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/ContainerKeyInfoResponse.java similarity index 97% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyInfoResponse.java rename to hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/ContainerKeyInfoResponse.java index ebfb9dc46ee..410c15f3c1a 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyInfoResponse.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/ContainerKeyInfoResponse.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.debug; +package org.apache.hadoop.ozone.debug.container; import com.fasterxml.jackson.annotation.JsonInclude; diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/FindContainerKeys.java similarity index 97% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java rename to hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/FindContainerKeys.java index cec44d21058..ddca893f471 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerKeyScanner.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/FindContainerKeys.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.debug; +package org.apache.hadoop.ozone.debug.container; import com.google.common.collect.Sets; import com.google.gson.GsonBuilder; @@ -25,6 +25,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.TableIterator; +import org.apache.hadoop.ozone.debug.OzoneDebug; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; @@ -58,18 +59,18 @@ import static org.apache.hadoop.ozone.OzoneConsts.ROOT_PATH; /** - * Parser for a list of container IDs, to scan for keys. + * Finds keys that reference a container/s. */ @CommandLine.Command( - name = "ckscanner", + name = "find-keys", description = "Find keys that reference a container" ) @MetaInfServices(SubcommandWithParent.class) -public class ContainerKeyScanner +public class FindContainerKeys implements Callable, SubcommandWithParent { public static final Logger LOG = - LoggerFactory.getLogger(ContainerKeyScanner.class); + LoggerFactory.getLogger(FindContainerKeys.class); @CommandLine.Spec private static CommandLine.Model.CommandSpec spec; @CommandLine.Option(names = {"--om-db"}, @@ -78,10 +79,10 @@ public class ContainerKeyScanner description = "Path to OM DB.") private String dbPath; @CommandLine.Option(names = {"--container-ids"}, - split = " ", + split = ",", paramLabel = "", required = true, - description = "One or more container IDs separated by spaces.") + description = "One or more container IDs separated by comma.") private Set containerIds; private static Map directoryTable; private static boolean isDirTableLoaded = false; From d860f710bbc84275963811d580742fa6544bec2b Mon Sep 17 00:00:00 2001 From: Mladjan Gadzic Date: Sat, 10 Feb 2024 23:20:02 +0100 Subject: [PATCH 33/33] Address review comments --- .../ozone/debug/TestFindContainerKeys.java | 245 ++++++++++-------- .../hadoop/ozone/om/OMMetadataManager.java | 24 +- .../ozone/om/OmMetadataManagerImpl.java | 13 + .../hadoop/ozone/om/OmSnapshotManager.java | 5 +- .../SnapshotDirectoryCleaningService.java | 6 +- .../ozone/om/snapshot/SnapshotUtils.java | 27 +- .../ozone/om/request/OMRequestTestUtils.java | 18 ++ .../TestOMSnapshotCreateResponse.java | 5 +- hadoop-ozone/tools/pom.xml | 15 -- .../debug/container/ContainerKeyInfo.java | 13 +- .../container/ContainerKeyInfoResponse.java | 20 +- .../debug/container/FindContainerKeys.java | 104 ++++---- 12 files changed, 271 insertions(+), 224 deletions(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestFindContainerKeys.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestFindContainerKeys.java index 74ec5ea494f..da499fde6f6 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestFindContainerKeys.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestFindContainerKeys.java @@ -19,21 +19,22 @@ import com.google.gson.Gson; import com.google.gson.GsonBuilder; import org.apache.hadoop.hdds.client.BlockID; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.utils.db.DBStore; import org.apache.hadoop.hdds.utils.db.DBStoreBuilder; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.ozone.ClientVersion; +import org.apache.hadoop.ozone.debug.container.ContainerCommands; import org.apache.hadoop.ozone.debug.container.ContainerKeyInfo; import org.apache.hadoop.ozone.debug.container.ContainerKeyInfoResponse; import org.apache.hadoop.ozone.debug.container.FindContainerKeys; import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; @@ -50,14 +51,15 @@ import java.util.Map; import static java.nio.charset.StandardCharsets.UTF_8; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DIRECTORY_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.FILE_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.KEY_TABLE; +import static org.assertj.core.api.Assertions.assertThat; /** - * This class tests `ozone debug ckscanner` CLI that reads from RocksDB - * and gets keys for container ids. + * Test class for {@link FindContainerKeys}. */ public class TestFindContainerKeys { private DBStore dbStore; @@ -66,37 +68,8 @@ public class TestFindContainerKeys { private StringWriter stdout, stderr; private PrintWriter pstdout, pstderr; private CommandLine cmd; - private static final Gson GSON = - new GsonBuilder().setPrettyPrinting().create(); - private static final ContainerKeyInfo KEY_ONE = - new ContainerKeyInfo(1L, "vol1", -123L, "bucket1", -456L, "dir1/key1", - -789L); - private static final ContainerKeyInfo KEY_TWO = - new ContainerKeyInfo(2L, "vol1", 0L, "bucket1", 0L, "key2", 0L); - private static final ContainerKeyInfo KEY_THREE = - new ContainerKeyInfo(3L, "vol1", 0L, "bucket1", 0L, "key3", 0L); - - private static final Map> CONTAINER_KEYS = - new HashMap<>(); - - static { - List list1 = new ArrayList<>(); - list1.add(KEY_ONE); - List list2 = new ArrayList<>(); - list2.add(KEY_TWO); - List list3 = new ArrayList<>(); - list3.add(KEY_THREE); - CONTAINER_KEYS.put(1L, list1); - CONTAINER_KEYS.put(2L, list2); - CONTAINER_KEYS.put(3L, list3); - } - - private static final ContainerKeyInfoResponse KEYS_FOUND_OUTPUT = - new ContainerKeyInfoResponse(3, CONTAINER_KEYS); - - private static final String KEYS_NOT_FOUND_OUTPUT = - "No keys were found for container IDs: [1, 2, 3]\n" + - "Keys processed: 3\n"; + private static final Gson GSON = new GsonBuilder().setPrettyPrinting().create(); + private String[] cmdArgs; @BeforeEach public void setup() throws IOException { @@ -107,6 +80,7 @@ public void setup() throws IOException { pstderr = new PrintWriter(stderr); cmd = new CommandLine(new OzoneDebug()) + .addSubcommand(new ContainerCommands()) .addSubcommand(new FindContainerKeys()) .setOut(pstdout) .setErr(pstderr); @@ -115,6 +89,9 @@ public void setup() throws IOException { .setPath(tempDir.toPath()).addTable(KEY_TABLE).addTable(FILE_TABLE) .addTable(DIRECTORY_TABLE) .build(); + + cmdArgs = + new String[]{"find-keys", "--om-db", dbStore.getDbLocation().getAbsolutePath(), "--container-ids", "1 2 3"}; } @AfterEach @@ -127,44 +104,114 @@ public void shutdown() throws IOException { } @Test - void testWhenThereAreKeysForContainerIds() throws IOException { - - // create keys for tables + void testFSO() throws Exception { + /* + Structure: + keyName (container id) + + /vol1/bucet1 + - key1 (1) + - dir1 + - key2 (2) + - dir2 + - key3 (3) + - key4 (3) + - key5 (4) + */ long volumeId = -123L; long bucketId = -456L; - long dirObjectId = -789L; - createDirectory(volumeId, bucketId, bucketId, dirObjectId, "dir1"); - createFile(volumeId, bucketId, "key1", -987L, dirObjectId, 1L); - createKey("key2", 2L); - createKey("key3", 3L); - - String[] cmdArgs = - {"ckscanner", "--om-db", dbStore.getDbLocation().getAbsolutePath(), - "--container-ids", "1 2 3"}; + long dirObjectId1 = -789L; + long dirObjectId2 = -788L; + createDirectory(volumeId, bucketId, bucketId, dirObjectId1, "dir1"); + createDirectory(volumeId, bucketId, dirObjectId1, dirObjectId2, "dir2"); + createFile(volumeId, bucketId, "key1", -987L, bucketId, 1L); + createFile(volumeId, bucketId, "key2", -986L, dirObjectId1, 2L); + createFile(volumeId, bucketId, "key3", -985L, dirObjectId2, 3L); + createFile(volumeId, bucketId, "key4", -984L, dirObjectId2, 3L); + createFile(volumeId, bucketId, "key5", -983L, dirObjectId2, 4L); closeDbStore(); int exitCode = cmd.execute(cmdArgs); - Assertions.assertEquals(0, exitCode); + assertThat(exitCode).isEqualTo(0); + + // Create expected response + List expectedKeysForContainer1 = new ArrayList<>(); + expectedKeysForContainer1.add(new ContainerKeyInfo(1L, "vol1", volumeId, "bucket1", bucketId, "key1", bucketId)); + List expectedKeysForContainer2 = new ArrayList<>(); + expectedKeysForContainer2.add( + new ContainerKeyInfo(2L, "vol1", volumeId, "bucket1", bucketId, "dir1/key2", dirObjectId1)); + List expectedKeysForContainer3 = new ArrayList<>(); + expectedKeysForContainer3.add( + new ContainerKeyInfo(3L, "vol1", volumeId, "bucket1", bucketId, "dir1/dir2/key3", dirObjectId2)); + expectedKeysForContainer3.add( + new ContainerKeyInfo(3L, "vol1", volumeId, "bucket1", bucketId, "dir1/dir2/key4", dirObjectId2)); + Map> expectedContainerIdToKeyInfos = new HashMap<>(); + expectedContainerIdToKeyInfos.put(1L, expectedKeysForContainer1); + expectedContainerIdToKeyInfos.put(2L, expectedKeysForContainer2); + expectedContainerIdToKeyInfos.put(3L, expectedKeysForContainer3); + ContainerKeyInfoResponse expectedResponse = new ContainerKeyInfoResponse(5, expectedContainerIdToKeyInfos); + assertThat(GSON.fromJson(stdout.toString(), ContainerKeyInfoResponse.class)).isEqualTo(expectedResponse); + + assertThat(stderr.toString()).isEmpty(); + } - Assertions.assertEquals( - GSON.fromJson(stdout.toString(), ContainerKeyInfoResponse.class), - KEYS_FOUND_OUTPUT); + @Test + void testNonFSO() throws Exception { + /* + Structure: + keyName (container id) + + /vol1/bucket1 + - key1 (1) + - dir1/key2 (2) + - dir1/dir2/key3 (3) + - dir1/dir2/key4 (3) + - key5 (4) + */ + createKey("key1", 1L); + createKey("dir1/key2", 2L); + createKey("dir1/dir2/key3", 3L); + createKey("dir1/dir2/key4", 3L); + createKey("key5", 4L); - Assertions.assertTrue(stderr.toString().isEmpty()); + closeDbStore(); + + int exitCode = cmd.execute(cmdArgs); + assertThat(exitCode).isEqualTo(0); + + // Create expected response + List expectedKeysForContainer1 = new ArrayList<>(); + expectedKeysForContainer1.add(new ContainerKeyInfo(1L, "vol1", 0, "bucket1", 0, "key1", 0)); + List expectedKeysForContainer2 = new ArrayList<>(); + expectedKeysForContainer2.add( + new ContainerKeyInfo(2L, "vol1", 0, "bucket1", 0, "dir1/key2", 0)); + List expectedKeysForContainer3 = new ArrayList<>(); + expectedKeysForContainer3.add( + new ContainerKeyInfo(3L, "vol1", 0, "bucket1", 0, "dir1/dir2/key3", 0)); + expectedKeysForContainer3.add( + new ContainerKeyInfo(3L, "vol1", 0, "bucket1", 0, "dir1/dir2/key4", 0)); + Map> expectedContainerIdToKeyInfos = new HashMap<>(); + expectedContainerIdToKeyInfos.put(1L, expectedKeysForContainer1); + expectedContainerIdToKeyInfos.put(2L, expectedKeysForContainer2); + expectedContainerIdToKeyInfos.put(3L, expectedKeysForContainer3); + ContainerKeyInfoResponse expectedResponse = new ContainerKeyInfoResponse(5, expectedContainerIdToKeyInfos); + assertThat(GSON.fromJson(stdout.toString(), ContainerKeyInfoResponse.class)).isEqualTo(expectedResponse); + + assertThat(stderr.toString()).isEmpty(); } /** * Close db store because of the lock. */ private void closeDbStore() throws IOException { - if (dbStore != null) { + if (dbStore != null && !dbStore.isClosed()) { dbStore.close(); } } @Test - void testWhenThereAreNotKeysForContainerIds() throws IOException { + void testWhenThereAreNoKeysForContainerIds() throws Exception { // create keys for tables long volumeId = -123L; @@ -173,83 +220,67 @@ void testWhenThereAreNotKeysForContainerIds() throws IOException { createKey("key2", 5L); createKey("key3", 6L); - String[] cmdArgs = - {"ckscanner", "--om-db", dbStore.getDbLocation().getAbsolutePath(), - "--container-ids", "1 2 3"}; - closeDbStore(); int exitCode = cmd.execute(cmdArgs); - Assertions.assertEquals(0, exitCode); + assertThat(exitCode).isEqualTo(0); - Assertions.assertTrue(stderr.toString().contains(KEYS_NOT_FOUND_OUTPUT)); + assertThat(stderr.toString()).contains("No keys were found for container IDs: [1, 2, 3]\n" + "Keys processed: 3\n"); - Assertions.assertTrue(stdout.toString().isEmpty()); + assertThat(stdout.toString()).isEmpty(); } - private void createFile(long volumeId, long bucketId, String keyName, - long objectId, long parentId, long containerId) - throws IOException { - Table table = dbStore.getTable(FILE_TABLE); + private void createFile(long volumeId, long bucketId, String keyName, long objectId, long parentId, long containerId) + throws Exception { + try (Table table = dbStore.getTable(FILE_TABLE)) { + // format: /volumeId/bucketId/parentId(bucketId)/keyName + String key = + OM_KEY_PREFIX + volumeId + OM_KEY_PREFIX + bucketId + OM_KEY_PREFIX + parentId + OM_KEY_PREFIX + keyName; - // format: /volumeId/bucketId/parentId(bucketId)/keyName - String key = - OM_KEY_PREFIX + volumeId + OM_KEY_PREFIX + - bucketId + OM_KEY_PREFIX + parentId + - OM_KEY_PREFIX + keyName; + OmKeyInfo value = getOmKeyInfo("vol1", "bucket1", keyName, containerId, objectId, parentId); - OmKeyInfo value = - getOmKeyInfo("vol1", "bucket1", keyName, containerId, objectId, - parentId); - - table.put(key.getBytes(UTF_8), - value.getProtobuf(ClientVersion.CURRENT_VERSION).toByteArray()); + table.put(key.getBytes(UTF_8), value.getProtobuf(ClientVersion.CURRENT_VERSION).toByteArray()); + } } - private void createKey(String keyName, long containerId) throws IOException { - Table table = dbStore.getTable(KEY_TABLE); + private void createKey(String keyName, long containerId) throws Exception { + try (Table table = dbStore.getTable(KEY_TABLE)) { + String volumeName = "vol1"; + String bucketName = "bucket1"; + // format: /volumeName/bucketName/keyName + String key = OM_KEY_PREFIX + volumeName + OM_KEY_PREFIX + bucketName + OM_KEY_PREFIX + keyName; - String volumeName = "vol1"; - String bucketName = "bucket1"; - // format: /volumeName/bucketName/keyName - String key = OM_KEY_PREFIX + volumeName + OM_KEY_PREFIX + bucketName + - OM_KEY_PREFIX + keyName; + // generate table value + OmKeyInfo value = getOmKeyInfo(volumeName, bucketName, keyName, containerId, 0, 0); - // generate table value - OmKeyInfo value = - getOmKeyInfo(volumeName, bucketName, keyName, containerId, 0, 0); - - table.put(key.getBytes(UTF_8), - value.getProtobuf(ClientVersion.CURRENT_VERSION).toByteArray()); + table.put(key.getBytes(UTF_8), value.getProtobuf(ClientVersion.CURRENT_VERSION).toByteArray()); + } } - private void createDirectory(long volumeId, long bucketId, long parentId, - long objectId, String keyName) - throws IOException { - Table table = dbStore.getTable(DIRECTORY_TABLE); + private void createDirectory(long volumeId, long bucketId, long parentId, long objectId, String keyName) + throws Exception { + try (Table table = dbStore.getTable(DIRECTORY_TABLE)) { - // format: /volumeId/bucketId/parentId(bucketId)/keyName - String key = - OM_KEY_PREFIX + volumeId + OM_KEY_PREFIX + bucketId + OM_KEY_PREFIX + - parentId + OM_KEY_PREFIX + keyName; + // format: /volumeId/bucketId/parentId(bucketId)/keyName + String key = + OM_KEY_PREFIX + volumeId + OM_KEY_PREFIX + bucketId + OM_KEY_PREFIX + parentId + OM_KEY_PREFIX + keyName; - OmDirectoryInfo value = - OMRequestTestUtils.createOmDirectoryInfo(keyName, objectId, parentId); + OmDirectoryInfo value = OMRequestTestUtils.createOmDirectoryInfo(keyName, objectId, parentId); - table.put(key.getBytes(UTF_8), value.getProtobuf().toByteArray()); + table.put(key.getBytes(UTF_8), value.getProtobuf().toByteArray()); + } } private static OmKeyInfo getOmKeyInfo(String volumeName, String bucketName, String keyName, long containerId, long objectId, long parentId) { - return OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, - keyName, HddsProtos.ReplicationType.STAND_ALONE, - HddsProtos.ReplicationFactor.ONE, objectId, parentId, 1, 1, 1, false, - new ArrayList<>( - Collections.singletonList( - new OmKeyLocationInfo.Builder().setBlockID( - new BlockID(containerId, 1)).build()))); + return OMRequestTestUtils + .createOmKeyInfo(volumeName, bucketName, keyName, RatisReplicationConfig.getInstance(ONE), objectId, parentId, + new OmKeyLocationInfoGroup(0L, Collections.singletonList(new OmKeyLocationInfo.Builder() + .setBlockID(new BlockID(containerId, 1)) + .build()))) + .build(); } } diff --git a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java index 9651c16175a..b3f3c400b34 100644 --- a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java +++ b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java @@ -510,7 +510,7 @@ default String getOzonePathKey(long volumeId, long bucketId, } /** - * Given ozone path key, component id, return the corresponding + * Given ozone path key, component id, return the corresponding * DB path key for delete table. * * @param objectId - object Id @@ -611,4 +611,26 @@ String getMultipartKey(long volumeId, long bucketId, */ boolean containsIncompleteMPUs(String volume, String bucket) throws IOException; + + /** + * Helper method to generate /volumeId/bucketId/ DB key prefix from given + * volume name and bucket name as a prefix for FSO buckets. + * + * @param volumeName volume name + * @param bucketName bucket name + * @return /volumeId/bucketId/ + * e.g. /-9223372036854772480/-9223372036854771968/ + */ + String getOzonePathKeyForFso(String volumeName, String bucketName) + throws IOException; + + /** + * Helper method to generate /volumeId/bucketId DB key prefix from given + * volume id and bucket id as a prefix for FSO buckets. + * @param volumeId volume id + * @param bucketId bucket id + * @return /volumeId/bucketId + * e.g. /-9223372036854772480/-9223372036854771968/ + */ + String getOzonePathKeyForFso(long volumeId, long bucketId); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java index 982e04df04d..b06b4c256fb 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java @@ -2185,4 +2185,17 @@ public void deleteWithBatch(AutoCloseable batchOperator, String id) } } } + + @Override + public String getOzonePathKeyForFso(String volumeName, String bucketName) + throws IOException { + final long volumeId = getVolumeId(volumeName); + final long bucketId = getBucketId(volumeName, bucketName); + return getOzonePathKeyForFso(volumeId, bucketId); + } + + @Override + public String getOzonePathKeyForFso(long volumeId, long bucketId) { + return OM_KEY_PREFIX + volumeId + OM_KEY_PREFIX + bucketId + OM_KEY_PREFIX; + } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java index eb37e399dfe..5d16e1829ed 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java @@ -98,7 +98,6 @@ import static org.apache.hadoop.ozone.om.snapshot.SnapshotDiffManager.getSnapshotRootPath; import static org.apache.hadoop.ozone.om.snapshot.SnapshotUtils.checkSnapshotActive; import static org.apache.hadoop.ozone.om.snapshot.SnapshotUtils.dropColumnFamilyHandle; -import static org.apache.hadoop.ozone.om.snapshot.SnapshotUtils.getOzonePathKeyForFso; import static org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse.JobStatus.DONE; /** @@ -507,8 +506,8 @@ private static void deleteKeysFromDelDirTableInSnapshotScope( String bucketName) throws IOException { // Range delete start key (inclusive) - final String keyPrefix = getOzonePathKeyForFso(omMetadataManager, - volumeName, bucketName); + final String keyPrefix = omMetadataManager + .getOzonePathKeyForFso(volumeName, bucketName); try (TableIterator> iter = omMetadataManager.getDeletedDirTable().iterator(keyPrefix)) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDirectoryCleaningService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDirectoryCleaningService.java index fe0f6e111ed..e928c5be63f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDirectoryCleaningService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDirectoryCleaningService.java @@ -63,7 +63,6 @@ import static org.apache.hadoop.ozone.om.helpers.SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE; import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.getDirectoryInfo; -import static org.apache.hadoop.ozone.om.snapshot.SnapshotUtils.getOzonePathKeyForFso; /** * Snapshot BG Service for deleted directory deep clean and exclusive size @@ -211,8 +210,9 @@ public BackgroundTaskResult call() { .getKeyTable(bucketInfo.getBucketLayout()); } - String dbBucketKeyForDir = getOzonePathKeyForFso(metadataManager, - currSnapInfo.getVolumeName(), currSnapInfo.getBucketName()); + String dbBucketKeyForDir = metadataManager + .getOzonePathKeyForFso(currSnapInfo.getVolumeName(), + currSnapInfo.getBucketName()); try (ReferenceCounted rcCurrOmSnapshot = omSnapshotManager.getActiveSnapshot( currSnapInfo.getVolumeName(), diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotUtils.java index 89823995d0c..77b3aebef67 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotUtils.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotUtils.java @@ -186,8 +186,8 @@ public static Map getColumnFamilyToKeyPrefixMap( String bucketName ) throws IOException { String keyPrefix = getOzonePathKey(volumeName, bucketName); - String keyPrefixFso = getOzonePathKeyForFso(omMetadataManager, volumeName, - bucketName); + String keyPrefixFso = omMetadataManager + .getOzonePathKeyForFso(volumeName, bucketName); Map columnFamilyToPrefixMap = new HashMap<>(); columnFamilyToPrefixMap.put(KEY_TABLE, keyPrefix); @@ -216,27 +216,4 @@ public static String getOzonePathKey(String volumeName, OM_KEY_PREFIX; } - /** - * Helper method to generate /volumeId/bucketId/ DB key prefix from given - * volume name and bucket name as a prefix for FSO buckets. - * Follows: - * {@link OmMetadataManagerImpl#getOzonePathKey(long, long, long, String)}. - *

- * Note: Currently, this is only intended to be a special use case in - * Snapshot. If this is used elsewhere, consider moving this to - * {@link OMMetadataManager}. - * - * @param volumeName volume name - * @param bucketName bucket name - * @return /volumeId/bucketId/ - * e.g. /-9223372036854772480/-9223372036854771968/ - */ - public static String getOzonePathKeyForFso(OMMetadataManager metadataManager, - String volumeName, - String bucketName) - throws IOException { - final long volumeId = metadataManager.getVolumeId(volumeName); - final long bucketId = metadataManager.getBucketId(volumeName, bucketName); - return OM_KEY_PREFIX + volumeId + OM_KEY_PREFIX + bucketId + OM_KEY_PREFIX; - } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java index 21b94ce5f05..2f5daf1ce28 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java @@ -28,6 +28,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdds.HddsUtils; import org.apache.hadoop.hdds.client.BlockID; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfigValidator; import org.apache.hadoop.hdds.conf.ConfigurationSource; @@ -559,6 +560,23 @@ public static OmKeyInfo.Builder createOmKeyInfo(String volumeName, String bucket new OmKeyLocationInfoGroup(0L, new ArrayList<>(), false)); } + public static OmKeyInfo.Builder createOmKeyInfo(String volumeName, String bucketName, String keyName, + RatisReplicationConfig replicationConfig, long objectId, + long parentId, OmKeyLocationInfoGroup omKeyLocationInfoGroup) { + return new OmKeyInfo.Builder() + .setVolumeName(volumeName) + .setBucketName(bucketName) + .setKeyName(keyName) + .setFileName(OzoneFSUtils.getFileName(keyName)) + .setReplicationConfig(replicationConfig) + .setParentObjectID(parentId) + .setObjectID(objectId) + .setUpdateID(0L) + .setCreationTime(Time.now()) + .addOmKeyLocationInfoGroup(omKeyLocationInfoGroup) + .setDataSize(1000L); + } + /** * Create OmDirectoryInfo. */ diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotCreateResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotCreateResponse.java index 7f74f3d17ec..27c289a0a46 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotCreateResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotCreateResponse.java @@ -32,7 +32,6 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; -import org.apache.hadoop.ozone.om.snapshot.SnapshotUtils; import org.apache.hadoop.util.Time; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; @@ -215,8 +214,8 @@ private Set addTestKeysToDeletedDirTable(String volumeName, // Add deletedDirectoryTable key entries that "surround" the snapshot scope Set sentinelKeys = new HashSet<>(); - final String dbKeyPfx = SnapshotUtils.getOzonePathKeyForFso( - omMetadataManager, volumeName, bucketName); + final String dbKeyPfx = omMetadataManager + .getOzonePathKeyForFso(volumeName, bucketName); // Calculate offset to bucketId's last character in dbKeyPfx. // First -1 for offset, second -1 for second to last char (before '/') diff --git a/hadoop-ozone/tools/pom.xml b/hadoop-ozone/tools/pom.xml index 6eb9c036de4..839d01f0fa8 100644 --- a/hadoop-ozone/tools/pom.xml +++ b/hadoop-ozone/tools/pom.xml @@ -108,21 +108,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> hdds-test-utils test - - org.mockito - mockito-core - test - - - org.junit.jupiter - junit-jupiter-params - test - - - org.mockito - mockito-junit-jupiter - test - diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/ContainerKeyInfo.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/ContainerKeyInfo.java index 12bb2f5cd03..6d068108f0a 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/ContainerKeyInfo.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/ContainerKeyInfo.java @@ -22,7 +22,7 @@ /** * Class that holds basic key data in relation to container it is in. */ -public class ContainerKeyInfo { +public final class ContainerKeyInfo { private final long containerID; private final String volumeName; @@ -69,16 +69,13 @@ public boolean equals(Object o) { return false; } ContainerKeyInfo that = (ContainerKeyInfo) o; - return containerID == that.containerID && volumeId == that.volumeId && - bucketId == that.bucketId && parentId == that.parentId && - Objects.equals(volumeName, that.volumeName) && - Objects.equals(bucketName, that.bucketName) && - Objects.equals(keyName, that.keyName); + return containerID == that.containerID && volumeId == that.volumeId && bucketId == that.bucketId && + parentId == that.parentId && Objects.equals(volumeName, that.volumeName) && + Objects.equals(bucketName, that.bucketName) && Objects.equals(keyName, that.keyName); } @Override public int hashCode() { - return Objects.hash(containerID, volumeName, volumeId, bucketName, bucketId, - keyName, parentId); + return Objects.hash(containerID, volumeName, volumeId, bucketName, bucketId, keyName, parentId); } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/ContainerKeyInfoResponse.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/ContainerKeyInfoResponse.java index 410c15f3c1a..ccf00a65b74 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/ContainerKeyInfoResponse.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/ContainerKeyInfoResponse.java @@ -27,23 +27,22 @@ * Class for response for container key scanner. */ @JsonInclude(JsonInclude.Include.NON_NULL) -public class ContainerKeyInfoResponse { +public final class ContainerKeyInfoResponse { private final long keysProcessed; - private final Map> containerKeys; + private final Map> containerIdToKeyInfos; - public ContainerKeyInfoResponse( - long keysProcessed, Map> containerKeys) { + public ContainerKeyInfoResponse(long keysProcessed, Map> containerIdToKeyInfos) { this.keysProcessed = keysProcessed; - this.containerKeys = containerKeys; + this.containerIdToKeyInfos = containerIdToKeyInfos; } public long getKeysProcessed() { return keysProcessed; } - public Map> getContainerKeys() { - return containerKeys; + public Map> getContainerIdToKeyInfos() { + return containerIdToKeyInfos; } @Override @@ -51,16 +50,15 @@ public boolean equals(Object o) { if (this == o) { return true; } - if (o == null || getClass() != o.getClass()) { + if (!(o instanceof ContainerKeyInfoResponse)) { return false; } ContainerKeyInfoResponse that = (ContainerKeyInfoResponse) o; - return keysProcessed == that.keysProcessed && - Objects.equals(containerKeys, that.containerKeys); + return keysProcessed == that.keysProcessed && Objects.equals(containerIdToKeyInfos, that.containerIdToKeyInfos); } @Override public int hashCode() { - return Objects.hash(keysProcessed, containerKeys); + return Objects.hash(keysProcessed, containerIdToKeyInfos); } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/FindContainerKeys.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/FindContainerKeys.java index ddca893f471..ffed74759e5 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/FindContainerKeys.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/FindContainerKeys.java @@ -26,6 +26,7 @@ import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.ozone.debug.OzoneDebug; +import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; @@ -79,10 +80,10 @@ public class FindContainerKeys description = "Path to OM DB.") private String dbPath; @CommandLine.Option(names = {"--container-ids"}, - split = ",", + split = " ", paramLabel = "", required = true, - description = "One or more container IDs separated by comma.") + description = "One or more container IDs separated by space.") private Set containerIds; private static Map directoryTable; private static boolean isDirTableLoaded = false; @@ -98,9 +99,12 @@ public Void call() throws Exception { ContainerKeyInfoResponse containerKeyInfoResponse = scanDBForContainerKeys(omMetadataManager); - printOutput(containerKeyInfoResponse); - - closeStdChannels(); + try { + printOutput(containerKeyInfoResponse); + } finally { + closeStdChannels(); + omMetadataManager.stop(); + } return null; } @@ -218,7 +222,7 @@ private long processKeyTable( continue; } - processKeyData(containerKeyInfos, next.getKey(), next.getValue()); + processKeyData(containerKeyInfos, next.getValue()); } } @@ -261,7 +265,7 @@ private Pair parseKey(String key) { private void processKeyData( Map> containerKeyInfos, - String key, OmKeyInfo keyInfo) { + OmKeyInfo keyInfo) { long volumeId = 0L; long bucketId = 0L; @@ -270,19 +274,22 @@ private void processKeyData( for (List locationInfos : locationInfoGroup.getLocationVersionMap().values()) { for (OmKeyLocationInfo locationInfo : locationInfos) { - if (containerIds.contains(locationInfo.getContainerID())) { - - containerKeyInfos.merge(locationInfo.getContainerID(), - new ArrayList<>(Collections.singletonList( - new ContainerKeyInfo(locationInfo.getContainerID(), - keyInfo.getVolumeName(), volumeId, - keyInfo.getBucketName(), bucketId, keyInfo.getKeyName(), - keyInfo.getParentObjectID()))), - (existingList, newList) -> { - existingList.addAll(newList); - return existingList; - }); + if (!containerIds.contains(locationInfo.getContainerID())) { + continue; } + + List containerKeyInfoList = new ArrayList<>(); + containerKeyInfoList.add( + new ContainerKeyInfo(locationInfo.getContainerID(), + keyInfo.getVolumeName(), volumeId, keyInfo.getBucketName(), + bucketId, keyInfo.getKeyName(), keyInfo.getParentObjectID())); + + containerKeyInfos.merge(locationInfo.getContainerID(), + containerKeyInfoList, + (existingList, newList) -> { + existingList.addAll(newList); + return existingList; + }); } } } @@ -302,29 +309,31 @@ private void processFileData( for (List locationInfos : locationInfoGroup.getLocationVersionMap().values()) { for (OmKeyLocationInfo locationInfo : locationInfos) { - if (containerIds.contains(locationInfo.getContainerID())) { - StringBuilder keyName = new StringBuilder(); - if (!isDirTableLoaded) { - long start = System.currentTimeMillis(); - directoryTable = getDirectoryTableData(omMetadataManager); - long end = System.currentTimeMillis(); - LOG.info("directoryTable loaded in " + (end - start) + " ms."); - isDirTableLoaded = true; - } - keyName.append(getFsoKeyPrefix(volumeId, bucketId, keyInfo)); - keyName.append(keyInfo.getKeyName()); - - containerKeyInfos.merge(locationInfo.getContainerID(), - new ArrayList<>(Collections.singletonList( - new ContainerKeyInfo(locationInfo.getContainerID(), - keyInfo.getVolumeName(), volumeId, - keyInfo.getBucketName(), bucketId, keyName.toString(), - keyInfo.getParentObjectID()))), - (existingList, newList) -> { - existingList.addAll(newList); - return existingList; - }); + if (!containerIds.contains(locationInfo.getContainerID())) { + continue; } + + if (!isDirTableLoaded) { + long start = System.currentTimeMillis(); + directoryTable = getDirectoryTableData(omMetadataManager); + long end = System.currentTimeMillis(); + LOG.info("directoryTable loaded in " + (end - start) + " ms."); + isDirTableLoaded = true; + } + + String keyName = getFsoKeyWithPrefix(volumeId, bucketId, keyInfo, + omMetadataManager); + + containerKeyInfos.merge(locationInfo.getContainerID(), + new ArrayList<>(Collections.singletonList( + new ContainerKeyInfo(locationInfo.getContainerID(), + keyInfo.getVolumeName(), volumeId, + keyInfo.getBucketName(), bucketId, keyName, + keyInfo.getParentObjectID()))), + (existingList, newList) -> { + existingList.addAll(newList); + return existingList; + }); } } } @@ -338,11 +347,10 @@ private static String removeBeginningSlash(String path) { return path; } - private String getFsoKeyPrefix(long volumeId, long bucketId, - OmKeyInfo value) { - String prefix = - OM_KEY_PREFIX + volumeId + OM_KEY_PREFIX + bucketId + - OM_KEY_PREFIX; + private String getFsoKeyWithPrefix(long volumeId, long bucketId, + OmKeyInfo value, + OMMetadataManager omMetadataManager) { + String prefix = omMetadataManager.getOzonePathKeyForFso(volumeId, bucketId); Set dirObjIds = new HashSet<>(); dirObjIds.add(value.getParentObjectID()); Map absolutePaths = @@ -355,11 +363,11 @@ private String getFsoKeyPrefix(long volumeId, long bucketId, keyPath = path + OM_KEY_PREFIX; } - return removeBeginningSlash(keyPath); + return removeBeginningSlash(keyPath + value.getKeyName()); } private void printOutput(ContainerKeyInfoResponse containerKeyInfoResponse) { - if (containerKeyInfoResponse.getContainerKeys().isEmpty()) { + if (containerKeyInfoResponse.getContainerIdToKeyInfos().isEmpty()) { err().println("No keys were found for container IDs: " + containerIds); err().println( "Keys processed: " + containerKeyInfoResponse.getKeysProcessed());