diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java index 02747f53ca6d..dac582a14176 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java @@ -285,6 +285,7 @@ public ContainerCommandResponseProto sendCommand( } for (DatanodeDetails dn : datanodeList) { try { + request = reconstructRequestIfNeeded(request, dn); futureHashMap.put(dn, sendCommandAsync(request, dn).getResponse()); } catch (InterruptedException e) { LOG.error("Command execution was interrupted."); @@ -316,6 +317,29 @@ public ContainerCommandResponseProto sendCommand( return responseProtoHashMap; } + /** + * @param request + * @param dn + * @param pipeline + * In case of getBlock for EC keys, it is required to set replicaIndex for + * every request with the replicaIndex for that DN for which the request is + * sent to. This method unpacks proto and reconstructs request after setting + * the replicaIndex field. + * @return new updated Request + */ + private ContainerCommandRequestProto reconstructRequestIfNeeded( + ContainerCommandRequestProto request, DatanodeDetails dn) { + boolean isEcRequest = pipeline.getReplicationConfig() + .getReplicationType() == HddsProtos.ReplicationType.EC; + if (request.hasGetBlock() && isEcRequest) { + ContainerProtos.GetBlockRequestProto gbr = request.getGetBlock(); + request = request.toBuilder().setGetBlock(gbr.toBuilder().setBlockID( + gbr.getBlockID().toBuilder().setReplicaIndex( + pipeline.getReplicaIndex(dn)).build()).build()).build(); + } + return request; + } + @Override public ContainerCommandResponseProto sendCommand( ContainerCommandRequestProto request, List validators) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneDebugShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneDebugShell.java index 15d9746fcb6a..9b1747b4c272 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneDebugShell.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneDebugShell.java @@ -21,6 +21,8 @@ import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectMapper; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdds.client.ECReplicationConfig; +import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationFactor; import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -46,6 +48,8 @@ import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; import picocli.CommandLine; import java.io.File; @@ -86,7 +90,7 @@ public class TestOzoneDebugShell { protected static void startCluster() throws Exception { // Init HA cluster omServiceId = "om-service-test1"; - final int numDNs = 3; + final int numDNs = 5; cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(numDNs) .build(); @@ -112,13 +116,14 @@ public static void init() throws Exception { startCluster(); } - @Test - public void testChunkInfoCmdBeforeAfterCloseContainer() throws Exception { + @ParameterizedTest + @ValueSource(booleans = {true, false}) + public void testChunkInfoCmdBeforeAfterCloseContainer(boolean isEcKey) throws Exception { final String volumeName = UUID.randomUUID().toString(); final String bucketName = UUID.randomUUID().toString(); final String keyName = UUID.randomUUID().toString(); - writeKey(volumeName, bucketName, keyName); + writeKey(volumeName, bucketName, keyName, isEcKey); int exitCode = runChunkInfoCommand(volumeName, bucketName, keyName); assertEquals(0, exitCode); @@ -134,7 +139,7 @@ public void testChunkInfoVerifyPathsAreDifferent() throws Exception { final String volumeName = UUID.randomUUID().toString(); final String bucketName = UUID.randomUUID().toString(); final String keyName = UUID.randomUUID().toString(); - writeKey(volumeName, bucketName, keyName); + writeKey(volumeName, bucketName, keyName, false); int exitCode = runChunkInfoAndVerifyPaths(volumeName, bucketName, keyName); assertEquals(0, exitCode); } @@ -150,7 +155,7 @@ public void testLdbCliForOzoneSnapshot() throws Exception { final String bucketName = UUID.randomUUID().toString(); final String keyName = UUID.randomUUID().toString(); - writeKey(volumeName, bucketName, keyName); + writeKey(volumeName, bucketName, keyName, false); String snapshotName = client.getObjectStore().createSnapshot(volumeName, bucketName, "snap1"); @@ -176,15 +181,22 @@ private static String getSnapshotDBPath(String checkPointDir) { OM_DB_NAME + checkPointDir; } - private static void writeKey(String volumeName, String bucketName, - String keyName) throws IOException { + String keyName, boolean isEcKey) throws IOException { + ReplicationConfig repConfig; + if (isEcKey) { + repConfig = new ECReplicationConfig(3, 2); + } else { + repConfig = ReplicationConfig.fromTypeAndFactor(ReplicationType.RATIS, + ReplicationFactor.THREE); + } try (OzoneClient client = OzoneClientFactory.getRpcClient(conf)) { // see HDDS-10091 for making this work with FILE_SYSTEM_OPTIMIZED layout - TestDataUtil.createVolumeAndBucket(client, volumeName, bucketName, BucketLayout.LEGACY); + TestDataUtil.createVolumeAndBucket(client, volumeName, bucketName, + BucketLayout.LEGACY); TestDataUtil.createKey( client.getObjectStore().getVolume(volumeName).getBucket(bucketName), - keyName, ReplicationFactor.THREE, ReplicationType.RATIS, "test"); + keyName, repConfig, "test"); } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ChunkKeyHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ChunkKeyHandler.java index 5c311d49c93f..b5b2364007f3 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ChunkKeyHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ChunkKeyHandler.java @@ -111,7 +111,7 @@ protected void execute(OzoneClient client, OzoneAddress address) keyPipeline.getReplicationConfig().getReplicationType() == HddsProtos.ReplicationType.EC; Pipeline pipeline; - if (keyPipeline.getType() != HddsProtos.ReplicationType.STAND_ALONE) { + if (!isECKey && keyPipeline.getType() != HddsProtos.ReplicationType.STAND_ALONE) { pipeline = Pipeline.newBuilder(keyPipeline) .setReplicationConfig(StandaloneReplicationConfig .getInstance(ONE)).build();