From ed8248a4f9b4d2ae8fc7cef117f4dafbf279534a Mon Sep 17 00:00:00 2001 From: sarvekshayr Date: Mon, 24 Mar 2025 13:06:58 +0530 Subject: [PATCH 1/8] HDDS-12645. Improve output of --- .../src/main/compose/common/replicas-test.sh | 4 +- .../ozone/shell/TestOzoneDebugShell.java | 2 +- .../replicas/chunk/ChunkDataNodeDetails.java | 43 ------- .../debug/replicas/chunk/ChunkDetails.java | 53 --------- .../debug/replicas/chunk/ChunkKeyHandler.java | 66 ++++------- .../replicas/chunk/ContainerChunkInfo.java | 106 ------------------ 6 files changed, 26 insertions(+), 248 deletions(-) delete mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/replicas/chunk/ChunkDataNodeDetails.java delete mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/replicas/chunk/ChunkDetails.java delete mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/replicas/chunk/ContainerChunkInfo.java diff --git a/hadoop-ozone/dist/src/main/compose/common/replicas-test.sh b/hadoop-ozone/dist/src/main/compose/common/replicas-test.sh index cd129bb07872..8d144276c23c 100755 --- a/hadoop-ozone/dist/src/main/compose/common/replicas-test.sh +++ b/hadoop-ozone/dist/src/main/compose/common/replicas-test.sh @@ -30,7 +30,7 @@ host="$(jq -r '.KeyLocations[0][0]["Datanode-HostName"]' ${chunkinfo})" container="${host%%.*}" # corrupt the first block of key on one of the datanodes -datafile="$(jq -r '.KeyLocations[0][0].Locations.files[0]' ${chunkinfo})" +datafile="$(jq -r '.KeyLocations[0][0].Chunk-Files[0]' ${chunkinfo})" docker exec "${container}" sed -i -e '1s/^/a/' "${datafile}" execute_robot_test ${SCM} -v "PREFIX:${prefix}" -v "CORRUPT_DATANODE:${host}" debug/ozone-debug-corrupt-block.robot @@ -49,4 +49,4 @@ wait_for_datanode "${container}" HEALTHY 60 start_docker_env 9 execute_robot_test ${SCM} -v "PREFIX:${prefix}" debug/ozone-debug-tests-ec3-2.robot -execute_robot_test ${SCM} -v "PREFIX:${prefix}" debug/ozone-debug-tests-ec6-3.robot \ No newline at end of file +execute_robot_test ${SCM} -v "PREFIX:${prefix}" debug/ozone-debug-tests-ec6-3.robot diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneDebugShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneDebugShell.java index a1ace9f97f38..a6de77ffe348 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneDebugShell.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneDebugShell.java @@ -227,7 +227,7 @@ private int runChunkInfoAndVerifyPaths(String volumeName, String bucketName, JsonNode keyLocations = jsonNode.get("KeyLocations").get(0); for (JsonNode element : keyLocations) { String fileName = - element.get("Locations").get("files").get(0).toString(); + element.get("Chunk-Files").get(0).toString(); blockFilePaths.add(fileName); } // DN storage directories are set differently for each DN diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/replicas/chunk/ChunkDataNodeDetails.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/replicas/chunk/ChunkDataNodeDetails.java deleted file mode 100644 index 84db53e39db7..000000000000 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/replicas/chunk/ChunkDataNodeDetails.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.debug.replicas.chunk; - -/** - * Class that gives datanode details on which the chunk is present. - */ -public class ChunkDataNodeDetails { - private String ipAddress; - private String hostName; - - public ChunkDataNodeDetails(String ipAddress, String hostName) { - this.ipAddress = ipAddress; - this.hostName = hostName; - } - - @Override - public String toString() { - return "{" - + "ipAddress='" - + ipAddress - + '\'' - + ", hostName='" - + hostName - + '\'' - + '}'; - } -} diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/replicas/chunk/ChunkDetails.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/replicas/chunk/ChunkDetails.java deleted file mode 100644 index a5c2deededc5..000000000000 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/replicas/chunk/ChunkDetails.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.debug.replicas.chunk; - -/** - * Class that gives chunkDetails. - */ -public class ChunkDetails { - private String chunkName; - private long chunkOffset; - - public String getChunkName() { - return chunkName; - } - - public void setChunkName(String chunkName) { - this.chunkName = chunkName; - } - - @Override - public String toString() { - return "{" - + "chunkName='" - + chunkName - + '\'' - + ", chunkOffset=" - + chunkOffset - + '}'; - } - - public long getChunkOffset() { - return chunkOffset; - } - - public void setChunkOffset(long chunkOffset) { - this.chunkOffset = chunkOffset; - } -} diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/replicas/chunk/ChunkKeyHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/replicas/chunk/ChunkKeyHandler.java index f8246e2edefe..cc11dae11c1b 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/replicas/chunk/ChunkKeyHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/replicas/chunk/ChunkKeyHandler.java @@ -19,12 +19,12 @@ import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.ObjectNode; import java.io.File; import java.io.IOException; -import java.util.ArrayList; -import java.util.HashSet; import java.util.List; import java.util.Map; import org.apache.hadoop.hdds.client.ECReplicationConfig; @@ -47,6 +47,7 @@ import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; import org.apache.hadoop.ozone.shell.OzoneAddress; import org.apache.hadoop.ozone.shell.keys.KeyHandler; +import org.apache.ratis.thirdparty.com.google.protobuf.util.JsonFormat; import picocli.CommandLine.Command; /** @@ -72,8 +73,11 @@ protected void execute(OzoneClient client, OzoneAddress address) String bucketName = address.getBucketName(); String keyName = address.getKeyName(); List tempchunks; - List chunkDetailsList = new ArrayList<>(); - HashSet chunkPaths = new HashSet<>(); + + result.put("Volume-Name", volumeName); + result.put("Bucket-Name", bucketName); + result.put("Key-Name", keyName); + OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName) .setBucketName(bucketName).setKeyName(keyName).build(); OmKeyInfo keyInfo = ozoneManagerClient.lookupKey(keyArgs); @@ -90,10 +94,6 @@ protected void execute(OzoneClient client, OzoneAddress address) .getConfiguredVersion(getConf()); ArrayNode responseArrayList = JsonUtils.createArrayNode(); for (OmKeyLocationInfo keyLocation : locationInfos) { - ContainerChunkInfo containerChunkInfoVerbose = new ContainerChunkInfo(); - ContainerChunkInfo containerChunkInfo = new ContainerChunkInfo(); - long containerId = keyLocation.getContainerID(); - chunkPaths.clear(); Pipeline keyPipeline = keyLocation.getPipeline(); boolean isECKey = keyPipeline.getReplicationConfig().getReplicationType() == @@ -108,11 +108,6 @@ protected void execute(OzoneClient client, OzoneAddress address) } XceiverClientSpi xceiverClient = xceiverClientManager.acquireClientForReadData(pipeline); try { - // Datanode is queried to get chunk information.Thus querying the - // OM,SCM and datanode helps us get chunk location information - ContainerProtos.DatanodeBlockID datanodeBlockID = - keyLocation.getBlockID().getDatanodeBlockIDProtobuf(); - // doing a getBlock on all nodes Map responses = ContainerProtocolCalls.getBlockFromAllNodes(xceiverClient, @@ -123,51 +118,36 @@ protected void execute(OzoneClient client, OzoneAddress address) keyLocation.getContainerID(), pipeline); ArrayNode responseFromAllNodes = JsonUtils.createArrayNode(); for (Map.Entry entry : responses.entrySet()) { - chunkPaths.clear(); ObjectNode jsonObj = JsonUtils.createObjectNode(null); if (entry.getValue() == null) { - LOG.error("Cant execute getBlock on this node"); + LOG.error("Can't execute getBlock on this node"); continue; } tempchunks = entry.getValue().getBlockData().getChunksList(); ContainerProtos.ContainerDataProto containerData = readContainerResponses.get(entry.getKey()).getContainerData(); + ArrayNode chunkFilePaths = JsonUtils.createArrayNode(); for (ContainerProtos.ChunkInfo chunkInfo : tempchunks) { String fileName = containerLayoutVersion.getChunkFile(new File( getChunkLocationPath(containerData.getContainerPath())), keyLocation.getBlockID(), chunkInfo.getChunkName()).toString(); - chunkPaths.add(fileName); - ChunkDetails chunkDetails = new ChunkDetails(); - chunkDetails.setChunkName(fileName); - chunkDetails.setChunkOffset(chunkInfo.getOffset()); - chunkDetailsList.add(chunkDetails); - } - containerChunkInfoVerbose.setContainerPath(containerData - .getContainerPath()); - containerChunkInfoVerbose.setPipeline(keyPipeline); - containerChunkInfoVerbose.setChunkInfos(chunkDetailsList); - containerChunkInfo.setFiles(chunkPaths); - containerChunkInfo.setPipelineID(keyPipeline.getId().getId()); - if (isECKey) { - ChunkType blockChunksType = - isECParityBlock(keyPipeline, entry.getKey()) ? - ChunkType.PARITY : ChunkType.DATA; - containerChunkInfoVerbose.setChunkType(blockChunksType); - containerChunkInfo.setChunkType(blockChunksType); - } - - if (isVerbose()) { - jsonObj.set("Locations", - JsonUtils.createObjectNode(containerChunkInfoVerbose)); - } else { - jsonObj.set("Locations", - JsonUtils.createObjectNode(containerChunkInfo)); + chunkFilePaths.add(fileName); } jsonObj.put("Datanode-HostName", entry.getKey().getHostName()); jsonObj.put("Datanode-IP", entry.getKey().getIpAddress()); - jsonObj.put("Container-ID", containerId); - jsonObj.put("Block-ID", keyLocation.getLocalID()); + + ObjectMapper objectMapper = new ObjectMapper(); + JsonNode blockDataNode = objectMapper.readTree(JsonFormat.printer().print(entry.getValue().getBlockData())); + jsonObj.set("Block-Data", blockDataNode); + + jsonObj.set("Chunk-Files", chunkFilePaths); + + if (isECKey) { + ChunkType blockChunksType = isECParityBlock(keyPipeline, entry.getKey()) ? + ChunkType.PARITY : ChunkType.DATA; + jsonObj.put("Chunk-Type", blockChunksType.name()); + } responseFromAllNodes.add(jsonObj); } responseArrayList.add(responseFromAllNodes); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/replicas/chunk/ContainerChunkInfo.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/replicas/chunk/ContainerChunkInfo.java deleted file mode 100644 index a5988cac73e9..000000000000 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/replicas/chunk/ContainerChunkInfo.java +++ /dev/null @@ -1,106 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.debug.replicas.chunk; - -import com.fasterxml.jackson.annotation.JsonInclude; -import java.util.HashSet; -import java.util.List; -import java.util.UUID; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; - -/** - * Class that gives container and chunk Information. - */ -@JsonInclude(JsonInclude.Include.NON_NULL) -public class ContainerChunkInfo { - private String containerPath; - private List chunkInfos; - - private HashSet files; - private UUID pipelineID; - private Pipeline pipeline; - private ChunkType chunkType; - - public void setFiles(HashSet files) { - this.files = files; - } - - public void setPipelineID(UUID pipelineID) { - this.pipelineID = pipelineID; - } - - public Pipeline getPipeline() { - return pipeline; - } - - public void setPipeline(Pipeline pipeline) { - this.pipeline = pipeline; - } - - public void setContainerPath(String containerPath) { - this.containerPath = containerPath; - } - - public void setChunkInfos(List chunkInfos) { - this.chunkInfos = chunkInfos; - } - - public void setChunkType(ChunkType chunkType) { - this.chunkType = chunkType; - } - - public String getContainerPath() { - return containerPath; - } - - public List getChunkInfos() { - return chunkInfos; - } - - public HashSet getFiles() { - return files; - } - - public UUID getPipelineID() { - return pipelineID; - } - - public ChunkType getChunkType() { - return chunkType; - } - - - @Override - public String toString() { - return "Container{" - + "containerPath='" - + containerPath - + '\'' - + ", chunkInfos=" - + chunkInfos - + ", pipeline=" - + pipeline - + '}' - + "files=" - + files - + "PipelineID=" - + pipelineID - + "ChunkType=" - + chunkType; - } -} From a62117d39a3060cc20f2285272364ffbb502817f Mon Sep 17 00:00:00 2001 From: sarvekshayr Date: Mon, 24 Mar 2025 14:44:24 +0530 Subject: [PATCH 2/8] Corrected jq command --- hadoop-ozone/dist/src/main/compose/common/replicas-test.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-ozone/dist/src/main/compose/common/replicas-test.sh b/hadoop-ozone/dist/src/main/compose/common/replicas-test.sh index 8d144276c23c..67d812b5d33a 100755 --- a/hadoop-ozone/dist/src/main/compose/common/replicas-test.sh +++ b/hadoop-ozone/dist/src/main/compose/common/replicas-test.sh @@ -30,7 +30,7 @@ host="$(jq -r '.KeyLocations[0][0]["Datanode-HostName"]' ${chunkinfo})" container="${host%%.*}" # corrupt the first block of key on one of the datanodes -datafile="$(jq -r '.KeyLocations[0][0].Chunk-Files[0]' ${chunkinfo})" +datafile="$(jq -r '.KeyLocations[0][0]["Chunk-Files"][0]' ${chunkinfo})" docker exec "${container}" sed -i -e '1s/^/a/' "${datafile}" execute_robot_test ${SCM} -v "PREFIX:${prefix}" -v "CORRUPT_DATANODE:${host}" debug/ozone-debug-corrupt-block.robot From 3b26c9f544211725a61454f761174ad95da4c5f5 Mon Sep 17 00:00:00 2001 From: sarvekshayr Date: Wed, 26 Mar 2025 10:26:31 +0530 Subject: [PATCH 3/8] Convert JSON property names to camelCase --- .../src/main/compose/common/replicas-test.sh | 4 ++-- .../ozone/shell/TestOzoneDebugShell.java | 4 ++-- .../debug/replicas/chunk/ChunkKeyHandler.java | 18 +++++++++--------- 3 files changed, 13 insertions(+), 13 deletions(-) diff --git a/hadoop-ozone/dist/src/main/compose/common/replicas-test.sh b/hadoop-ozone/dist/src/main/compose/common/replicas-test.sh index 67d812b5d33a..8ea7741bf554 100755 --- a/hadoop-ozone/dist/src/main/compose/common/replicas-test.sh +++ b/hadoop-ozone/dist/src/main/compose/common/replicas-test.sh @@ -26,11 +26,11 @@ execute_robot_test ${SCM} -v "PREFIX:${prefix}" debug/ozone-debug-tests.robot # get block locations for key chunkinfo="${key}-blocks-${prefix}" docker-compose exec -T ${SCM} bash -c "ozone debug replicas chunk-info ${volume}/${bucket}/${key}" > "$chunkinfo" -host="$(jq -r '.KeyLocations[0][0]["Datanode-HostName"]' ${chunkinfo})" +host="$(jq -r '.keyLocations[0][0]["datanodeHostName"]' ${chunkinfo})" container="${host%%.*}" # corrupt the first block of key on one of the datanodes -datafile="$(jq -r '.KeyLocations[0][0]["Chunk-Files"][0]' ${chunkinfo})" +datafile="$(jq -r '.keyLocations[0][0]["chunkFiles"][0]' ${chunkinfo})" docker exec "${container}" sed -i -e '1s/^/a/' "${datafile}" execute_robot_test ${SCM} -v "PREFIX:${prefix}" -v "CORRUPT_DATANODE:${host}" debug/ozone-debug-corrupt-block.robot diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneDebugShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneDebugShell.java index a6de77ffe348..daa14adb0dd3 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneDebugShell.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneDebugShell.java @@ -224,10 +224,10 @@ private int runChunkInfoAndVerifyPaths(String volumeName, String bucketName, ObjectMapper objectMapper = new ObjectMapper(); // Parse the JSON array string into a JsonNode JsonNode jsonNode = objectMapper.readTree(output); - JsonNode keyLocations = jsonNode.get("KeyLocations").get(0); + JsonNode keyLocations = jsonNode.get("keyLocations").get(0); for (JsonNode element : keyLocations) { String fileName = - element.get("Chunk-Files").get(0).toString(); + element.get("chunkFiles").get(0).toString(); blockFilePaths.add(fileName); } // DN storage directories are set differently for each DN diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/replicas/chunk/ChunkKeyHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/replicas/chunk/ChunkKeyHandler.java index cc11dae11c1b..eb42aa981710 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/replicas/chunk/ChunkKeyHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/replicas/chunk/ChunkKeyHandler.java @@ -74,9 +74,9 @@ protected void execute(OzoneClient client, OzoneAddress address) String keyName = address.getKeyName(); List tempchunks; - result.put("Volume-Name", volumeName); - result.put("Bucket-Name", bucketName); - result.put("Key-Name", keyName); + result.put("volumeName", volumeName); + result.put("bucketName", bucketName); + result.put("keyName", keyName); OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName) .setBucketName(bucketName).setKeyName(keyName).build(); @@ -134,19 +134,19 @@ protected void execute(OzoneClient client, OzoneAddress address) chunkInfo.getChunkName()).toString(); chunkFilePaths.add(fileName); } - jsonObj.put("Datanode-HostName", entry.getKey().getHostName()); - jsonObj.put("Datanode-IP", entry.getKey().getIpAddress()); + jsonObj.put("datanodeHostName", entry.getKey().getHostName()); + jsonObj.put("datanodeIP", entry.getKey().getIpAddress()); ObjectMapper objectMapper = new ObjectMapper(); JsonNode blockDataNode = objectMapper.readTree(JsonFormat.printer().print(entry.getValue().getBlockData())); - jsonObj.set("Block-Data", blockDataNode); + jsonObj.set("blockData", blockDataNode); - jsonObj.set("Chunk-Files", chunkFilePaths); + jsonObj.set("chunkFiles", chunkFilePaths); if (isECKey) { ChunkType blockChunksType = isECParityBlock(keyPipeline, entry.getKey()) ? ChunkType.PARITY : ChunkType.DATA; - jsonObj.put("Chunk-Type", blockChunksType.name()); + jsonObj.put("chunkType", blockChunksType.name()); } responseFromAllNodes.add(jsonObj); } @@ -157,7 +157,7 @@ protected void execute(OzoneClient client, OzoneAddress address) xceiverClientManager.releaseClientForReadData(xceiverClient, false); } } - result.set("KeyLocations", responseArrayList); + result.set("keyLocations", responseArrayList); String prettyJson = JsonUtils.toJsonStringWithDefaultPrettyPrinter(result); System.out.println(prettyJson); } From a48b37da753359e131620cb876d27d576ee2064c Mon Sep 17 00:00:00 2001 From: sarvekshayr Date: Fri, 28 Mar 2025 11:47:14 +0530 Subject: [PATCH 4/8] Addressed comments --- .../debug/replicas/chunk/ChunkKeyHandler.java | 62 ++++++++++++++----- 1 file changed, 48 insertions(+), 14 deletions(-) diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/replicas/chunk/ChunkKeyHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/replicas/chunk/ChunkKeyHandler.java index eb42aa981710..98618ca2fc0e 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/replicas/chunk/ChunkKeyHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/replicas/chunk/ChunkKeyHandler.java @@ -25,8 +25,10 @@ import com.fasterxml.jackson.databind.node.ObjectNode; import java.io.File; import java.io.IOException; +import java.util.Base64; import java.util.List; import java.util.Map; +import org.apache.commons.codec.binary.Hex; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; import org.apache.hadoop.hdds.protocol.DatanodeDetails; @@ -72,11 +74,10 @@ protected void execute(OzoneClient client, OzoneAddress address) String volumeName = address.getVolumeName(); String bucketName = address.getBucketName(); String keyName = address.getKeyName(); - List tempchunks; result.put("volumeName", volumeName); result.put("bucketName", bucketName); - result.put("keyName", keyName); + result.put("name", keyName); OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName) .setBucketName(bucketName).setKeyName(keyName).build(); @@ -123,25 +124,53 @@ protected void execute(OzoneClient client, OzoneAddress address) LOG.error("Can't execute getBlock on this node"); continue; } - tempchunks = entry.getValue().getBlockData().getChunksList(); - ContainerProtos.ContainerDataProto containerData = - readContainerResponses.get(entry.getKey()).getContainerData(); - ArrayNode chunkFilePaths = JsonUtils.createArrayNode(); - for (ContainerProtos.ChunkInfo chunkInfo : tempchunks) { - String fileName = containerLayoutVersion.getChunkFile(new File( + ContainerProtos.ChunkInfo chunks = null; + String fileName = ""; + if (entry.getValue().getBlockData().getChunksCount() > 0) { + chunks = entry.getValue().getBlockData().getChunks(0); + ContainerProtos.ContainerDataProto containerData = + readContainerResponses.get(entry.getKey()).getContainerData(); + fileName = containerLayoutVersion.getChunkFile(new File( getChunkLocationPath(containerData.getContainerPath())), keyLocation.getBlockID(), - chunkInfo.getChunkName()).toString(); - chunkFilePaths.add(fileName); + chunks.getChunkName()).toString(); } - jsonObj.put("datanodeHostName", entry.getKey().getHostName()); - jsonObj.put("datanodeIP", entry.getKey().getIpAddress()); + + ObjectNode dnObj = JsonUtils.createObjectNode(null); + dnObj.put("hostname", entry.getKey().getHostName()); + dnObj.put("ip", entry.getKey().getIpAddress()); + dnObj.put("uuid", entry.getKey().getUuidString()); + jsonObj.set("datanode", dnObj); + + jsonObj.put("file", fileName); ObjectMapper objectMapper = new ObjectMapper(); JsonNode blockDataNode = objectMapper.readTree(JsonFormat.printer().print(entry.getValue().getBlockData())); - jsonObj.set("blockData", blockDataNode); - jsonObj.set("chunkFiles", chunkFilePaths); + if (entry.getValue().hasBlockData()) { + ObjectNode blockIdNode = (ObjectNode) blockDataNode.get("blockID"); + blockIdNode.put("containerID", blockIdNode.get("containerID").asLong()); + blockIdNode.put("localID", blockIdNode.get("localID").asLong()); + blockIdNode.put("blockCommitSequenceId", blockIdNode.get("blockCommitSequenceId").asLong()); + + ArrayNode chunkArray = (ArrayNode) blockDataNode.get("chunks"); + for (JsonNode chunk : chunkArray) { + ((ObjectNode) chunk).put("offset", chunk.get("offset").asLong()); + ((ObjectNode) chunk).put("len", chunk.get("len").asLong()); + + JsonNode checksumData = chunk.get("checksumData"); + if (checksumData != null) { + ArrayNode checksums = (ArrayNode) checksumData.get("checksums"); + for (int i = 0; i < checksums.size(); i++) { + String base64Checksum = checksums.get(i).asText(); + checksums.set(i, convertBase64ToHex(base64Checksum)); + } + } + } + ((ObjectNode) blockDataNode).put("size", blockDataNode.get("size").asLong()); + } + + jsonObj.set("blockData", blockDataNode); if (isECKey) { ChunkType blockChunksType = isECParityBlock(keyPipeline, entry.getKey()) ? @@ -163,6 +192,11 @@ protected void execute(OzoneClient client, OzoneAddress address) } } + private String convertBase64ToHex(String base64Checksum) { + byte[] decodedBytes = Base64.getDecoder().decode(base64Checksum); + return Hex.encodeHexString(decodedBytes); + } + private boolean isECParityBlock(Pipeline pipeline, DatanodeDetails dn) { //index is 1-based, //e.g. for RS-3-2 we will have data indexes 1,2,3 and parity indexes 4,5 From ffc9e1daa3d0744f81188ea8c905ab2392c27941 Mon Sep 17 00:00:00 2001 From: sarvekshayr Date: Fri, 28 Mar 2025 12:19:44 +0530 Subject: [PATCH 5/8] Corrected tests --- hadoop-ozone/dist/src/main/compose/common/replicas-test.sh | 4 ++-- .../org/apache/hadoop/ozone/shell/TestOzoneDebugShell.java | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/hadoop-ozone/dist/src/main/compose/common/replicas-test.sh b/hadoop-ozone/dist/src/main/compose/common/replicas-test.sh index 8ea7741bf554..a7e70e067033 100755 --- a/hadoop-ozone/dist/src/main/compose/common/replicas-test.sh +++ b/hadoop-ozone/dist/src/main/compose/common/replicas-test.sh @@ -26,11 +26,11 @@ execute_robot_test ${SCM} -v "PREFIX:${prefix}" debug/ozone-debug-tests.robot # get block locations for key chunkinfo="${key}-blocks-${prefix}" docker-compose exec -T ${SCM} bash -c "ozone debug replicas chunk-info ${volume}/${bucket}/${key}" > "$chunkinfo" -host="$(jq -r '.keyLocations[0][0]["datanodeHostName"]' ${chunkinfo})" +host="$(jq -r '.keyLocations[0][0]["datanode"]["hostname"]' ${chunkinfo})" container="${host%%.*}" # corrupt the first block of key on one of the datanodes -datafile="$(jq -r '.keyLocations[0][0]["chunkFiles"][0]' ${chunkinfo})" +datafile="$(jq -r '.keyLocations[0][0]["file"]' ${chunkinfo})" docker exec "${container}" sed -i -e '1s/^/a/' "${datafile}" execute_robot_test ${SCM} -v "PREFIX:${prefix}" -v "CORRUPT_DATANODE:${host}" debug/ozone-debug-corrupt-block.robot diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneDebugShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneDebugShell.java index daa14adb0dd3..fe6ddd10121c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneDebugShell.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneDebugShell.java @@ -227,7 +227,7 @@ private int runChunkInfoAndVerifyPaths(String volumeName, String bucketName, JsonNode keyLocations = jsonNode.get("keyLocations").get(0); for (JsonNode element : keyLocations) { String fileName = - element.get("chunkFiles").get(0).toString(); + element.get("file").toString(); blockFilePaths.add(fileName); } // DN storage directories are set differently for each DN From ff589662bdae102f51d10cec685294c11e1e498f Mon Sep 17 00:00:00 2001 From: sarvekshayr Date: Sun, 20 Apr 2025 23:56:34 +0530 Subject: [PATCH 6/8] Updated json format --- .../debug/replicas/chunk/ChunkKeyHandler.java | 119 ++++++++++-------- 1 file changed, 64 insertions(+), 55 deletions(-) diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/replicas/chunk/ChunkKeyHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/replicas/chunk/ChunkKeyHandler.java index 98618ca2fc0e..253b98ec42f2 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/replicas/chunk/ChunkKeyHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/replicas/chunk/ChunkKeyHandler.java @@ -19,20 +19,18 @@ import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; -import com.fasterxml.jackson.databind.JsonNode; -import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.ObjectNode; import java.io.File; import java.io.IOException; -import java.util.Base64; +import java.util.Arrays; import java.util.List; import java.util.Map; -import org.apache.commons.codec.binary.Hex; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.GetBlockResponseProto; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.XceiverClientManager; import org.apache.hadoop.hdds.scm.XceiverClientSpi; @@ -49,7 +47,8 @@ import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; import org.apache.hadoop.ozone.shell.OzoneAddress; import org.apache.hadoop.ozone.shell.keys.KeyHandler; -import org.apache.ratis.thirdparty.com.google.protobuf.util.JsonFormat; +import org.apache.hadoop.util.StringUtils; +import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; import picocli.CommandLine.Command; /** @@ -84,16 +83,22 @@ protected void execute(OzoneClient client, OzoneAddress address) OmKeyInfo keyInfo = ozoneManagerClient.lookupKey(keyArgs); // querying the keyLocations.The OM is queried to get containerID and // localID pertaining to a given key - List locationInfos = - keyInfo.getLatestVersionLocations().getBlocksLatestVersionOnly(); + List locationInfos = keyInfo.getLatestVersionLocations() != null ? + keyInfo.getLatestVersionLocations().getBlocksLatestVersionOnly() : null; + // if key has no replicas + if (locationInfos == null) { + System.err.println("No replica/s found."); + return; + } + // for zero-sized key if (locationInfos.isEmpty()) { - System.out.println("No Key Locations Found"); + System.err.println("No key locations found."); return; } ContainerLayoutVersion containerLayoutVersion = ContainerLayoutVersion .getConfiguredVersion(getConf()); - ArrayNode responseArrayList = JsonUtils.createArrayNode(); + ArrayNode responseArrayList = result.putArray("keyLocations"); for (OmKeyLocationInfo keyLocation : locationInfos) { Pipeline keyPipeline = keyLocation.getPipeline(); boolean isECKey = @@ -117,86 +122,90 @@ protected void execute(OzoneClient client, OzoneAddress address) Map readContainerResponses = containerOperationClient.readContainerFromAllNodes( keyLocation.getContainerID(), pipeline); - ArrayNode responseFromAllNodes = JsonUtils.createArrayNode(); + ArrayNode responseFromAllNodes = responseArrayList.addArray(); for (Map.Entry entry : responses.entrySet()) { - ObjectNode jsonObj = JsonUtils.createObjectNode(null); - if (entry.getValue() == null) { - LOG.error("Can't execute getBlock on this node"); + DatanodeDetails datanodeDetails = entry.getKey(); + GetBlockResponseProto blockResponse = entry.getValue(); + + if (blockResponse == null || !blockResponse.hasBlockData()) { + System.err.printf("GetBlock call failed on %s datanode and %s block.\n", + datanodeDetails.getHostName(), keyLocation.getBlockID()); continue; } - ContainerProtos.ChunkInfo chunks = null; + + ContainerProtos.BlockData blockData = blockResponse.getBlockData(); + ContainerProtos.ChunkInfo chunkInfo = blockData.getChunksCount() > 0 ? + blockData.getChunks(0) : null; + String fileName = ""; - if (entry.getValue().getBlockData().getChunksCount() > 0) { - chunks = entry.getValue().getBlockData().getChunks(0); + if (chunkInfo != null) { ContainerProtos.ContainerDataProto containerData = - readContainerResponses.get(entry.getKey()).getContainerData(); + readContainerResponses.get(datanodeDetails).getContainerData(); fileName = containerLayoutVersion.getChunkFile(new File( getChunkLocationPath(containerData.getContainerPath())), keyLocation.getBlockID(), - chunks.getChunkName()).toString(); + chunkInfo.getChunkName()).toString(); } - ObjectNode dnObj = JsonUtils.createObjectNode(null); - dnObj.put("hostname", entry.getKey().getHostName()); - dnObj.put("ip", entry.getKey().getIpAddress()); - dnObj.put("uuid", entry.getKey().getUuidString()); - jsonObj.set("datanode", dnObj); + ObjectNode jsonObj = responseFromAllNodes.addObject(); + ObjectNode dnObj = jsonObj.putObject("datanode"); + dnObj.put("hostname", datanodeDetails.getHostName()); + dnObj.put("ip", datanodeDetails.getIpAddress()); + dnObj.put("uuid", datanodeDetails.getUuidString()); jsonObj.put("file", fileName); - ObjectMapper objectMapper = new ObjectMapper(); - JsonNode blockDataNode = objectMapper.readTree(JsonFormat.printer().print(entry.getValue().getBlockData())); - - if (entry.getValue().hasBlockData()) { - ObjectNode blockIdNode = (ObjectNode) blockDataNode.get("blockID"); - blockIdNode.put("containerID", blockIdNode.get("containerID").asLong()); - blockIdNode.put("localID", blockIdNode.get("localID").asLong()); - blockIdNode.put("blockCommitSequenceId", blockIdNode.get("blockCommitSequenceId").asLong()); - - ArrayNode chunkArray = (ArrayNode) blockDataNode.get("chunks"); - for (JsonNode chunk : chunkArray) { - ((ObjectNode) chunk).put("offset", chunk.get("offset").asLong()); - ((ObjectNode) chunk).put("len", chunk.get("len").asLong()); - - JsonNode checksumData = chunk.get("checksumData"); - if (checksumData != null) { - ArrayNode checksums = (ArrayNode) checksumData.get("checksums"); - for (int i = 0; i < checksums.size(); i++) { - String base64Checksum = checksums.get(i).asText(); - checksums.set(i, convertBase64ToHex(base64Checksum)); - } + ObjectNode blockDataNode = jsonObj.putObject("blockData"); + ObjectNode blockIdNode = blockDataNode.putObject("blockID"); + blockIdNode.put("containerID", blockData.getBlockID().getContainerID()); + blockIdNode.put("localID", blockData.getBlockID().getLocalID()); + blockIdNode.put("blockCommitSequenceId", blockData.getBlockID().getBlockCommitSequenceId()); + blockDataNode.put("size", blockData.getSize()); + + ArrayNode chunkArray = blockDataNode.putArray("chunks"); + for (ContainerProtos.ChunkInfo chunk : blockData.getChunksList()) { + ObjectNode chunkNode = chunkArray.addObject(); + chunkNode.put("offset", chunk.getOffset()); + chunkNode.put("len", chunk.getLen()); + + if (chunk.hasChecksumData()) { + ArrayNode checksums = chunkNode.putArray("checksums"); + for (ByteString bs : chunk.getChecksumData().getChecksumsList()) { + checksums.add(StringUtils.byteToHexString(bs.toByteArray())); } + chunkNode.put("checksumType", chunk.getChecksumData().getType().name()); + chunkNode.put("bytesPerChecksum", chunk.getChecksumData().getBytesPerChecksum()); } - ((ObjectNode) blockDataNode).put("size", blockDataNode.get("size").asLong()); - } - jsonObj.set("blockData", blockDataNode); + if (chunk.hasStripeChecksum()) { + byte[] stripeBytes = chunk.getStripeChecksum().toByteArray(); + int checksumLen = chunk.getChecksumData().getChecksumsList().get(0).size(); + + ArrayNode stripeChecksums = chunkNode.putArray("stripeChecksum"); + for (int i = 0; i <= stripeBytes.length - checksumLen; i += checksumLen) { + byte[] slice = Arrays.copyOfRange(stripeBytes, i, i + checksumLen); + stripeChecksums.add(StringUtils.byteToHexString(slice)); + } + } + } if (isECKey) { ChunkType blockChunksType = isECParityBlock(keyPipeline, entry.getKey()) ? ChunkType.PARITY : ChunkType.DATA; jsonObj.put("chunkType", blockChunksType.name()); } - responseFromAllNodes.add(jsonObj); } - responseArrayList.add(responseFromAllNodes); } catch (InterruptedException e) { throw new RuntimeException(e); } finally { xceiverClientManager.releaseClientForReadData(xceiverClient, false); } } - result.set("keyLocations", responseArrayList); String prettyJson = JsonUtils.toJsonStringWithDefaultPrettyPrinter(result); System.out.println(prettyJson); } } - private String convertBase64ToHex(String base64Checksum) { - byte[] decodedBytes = Base64.getDecoder().decode(base64Checksum); - return Hex.encodeHexString(decodedBytes); - } - private boolean isECParityBlock(Pipeline pipeline, DatanodeDetails dn) { //index is 1-based, //e.g. for RS-3-2 we will have data indexes 1,2,3 and parity indexes 4,5 From c4b85dea12e441127d81c2d44a7fa4dc65070e68 Mon Sep 17 00:00:00 2001 From: sarvekshayr Date: Mon, 21 Apr 2025 09:00:05 +0530 Subject: [PATCH 7/8] Addressed findbugs --- .../hadoop/ozone/debug/replicas/chunk/ChunkKeyHandler.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/replicas/chunk/ChunkKeyHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/replicas/chunk/ChunkKeyHandler.java index 253b98ec42f2..213187776563 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/replicas/chunk/ChunkKeyHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/replicas/chunk/ChunkKeyHandler.java @@ -128,7 +128,7 @@ protected void execute(OzoneClient client, OzoneAddress address) GetBlockResponseProto blockResponse = entry.getValue(); if (blockResponse == null || !blockResponse.hasBlockData()) { - System.err.printf("GetBlock call failed on %s datanode and %s block.\n", + System.err.printf("GetBlock call failed on %s datanode and %s block.%n", datanodeDetails.getHostName(), keyLocation.getBlockID()); continue; } From 91907bf3b0d6590bcf414436a6cd03c0956d8c23 Mon Sep 17 00:00:00 2001 From: sarvekshayr Date: Wed, 23 Apr 2025 13:20:22 +0530 Subject: [PATCH 8/8] Added robot test for EC keys --- .../src/main/smoketest/debug/ozone-debug-tests-ec3-2.robot | 5 +++++ .../src/main/smoketest/debug/ozone-debug-tests-ec6-3.robot | 5 +++++ 2 files changed, 10 insertions(+) diff --git a/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug-tests-ec3-2.robot b/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug-tests-ec3-2.robot index c578ac76cc29..0c310f62e89d 100644 --- a/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug-tests-ec3-2.robot +++ b/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug-tests-ec3-2.robot @@ -84,3 +84,8 @@ Create EC key ${directory} = Execute replicas verify checksums CLI tool ${count_files} = Count Files In Directory ${directory} Should Be Equal As Integers ${count_files} 1 + +Test ozone debug replicas chunk-info + Create EC key 1048576 3 + ${count} = Execute ozone debug replicas chunk-info o3://om/${VOLUME}/${BUCKET}/testfile | jq '[.keyLocations[0][] | select(.file | test("\\\\.block$")) | .file] | length' + Should Be Equal As Integers ${count} 5 diff --git a/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug-tests-ec6-3.robot b/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug-tests-ec6-3.robot index 7815e8ef4f75..9c83cad6971a 100644 --- a/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug-tests-ec6-3.robot +++ b/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug-tests-ec6-3.robot @@ -92,3 +92,8 @@ Create EC key ${count_files} = Count Files In Directory ${directory} ${sum_size_last_stripe} = Evaluate 1048576 * 4 + ((1000000 * 8) % 1048576) Should Be Equal As Integers ${count_files} 1 + +Test ozone debug replicas chunk-info + Create EC key 1048576 6 + ${count} = Execute ozone debug replicas chunk-info o3://om/${VOLUME}/${BUCKET}/testfile | jq '[.keyLocations[0][] | select(.file | test("\\\\.block$")) | .file] | length' + Should Be Equal As Integers ${count} 9