Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -285,6 +285,7 @@ public ContainerCommandResponseProto sendCommand(
}
for (DatanodeDetails dn : datanodeList) {
try {
request = reconstructRequestIfNeeded(request, dn);
futureHashMap.put(dn, sendCommandAsync(request, dn).getResponse());
} catch (InterruptedException e) {
LOG.error("Command execution was interrupted.");
Expand Down Expand Up @@ -316,6 +317,29 @@ public ContainerCommandResponseProto sendCommand(
return responseProtoHashMap;
}

/**
* @param request
* @param dn
* @param pipeline
* In case of getBlock for EC keys, it is required to set replicaIndex for
* every request with the replicaIndex for that DN for which the request is
* sent to. This method unpacks proto and reconstructs request after setting
* the replicaIndex field.
* @return new updated Request
*/
private ContainerCommandRequestProto reconstructRequestIfNeeded(
ContainerCommandRequestProto request, DatanodeDetails dn) {
boolean isEcRequest = pipeline.getReplicationConfig()
.getReplicationType() == HddsProtos.ReplicationType.EC;
if (request.hasGetBlock() && isEcRequest) {
ContainerProtos.GetBlockRequestProto gbr = request.getGetBlock();
request = request.toBuilder().setGetBlock(gbr.toBuilder().setBlockID(
gbr.getBlockID().toBuilder().setReplicaIndex(
pipeline.getReplicaIndex(dn)).build()).build()).build();
}
return request;
}

@Override
public ContainerCommandResponseProto sendCommand(
ContainerCommandRequestProto request, List<Validator> validators)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,8 @@
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdds.client.ECReplicationConfig;
import org.apache.hadoop.hdds.client.ReplicationConfig;
import org.apache.hadoop.hdds.client.ReplicationFactor;
import org.apache.hadoop.hdds.client.ReplicationType;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
Expand All @@ -46,6 +48,8 @@
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.ValueSource;
import picocli.CommandLine;

import java.io.File;
Expand Down Expand Up @@ -86,7 +90,7 @@ public class TestOzoneDebugShell {
protected static void startCluster() throws Exception {
// Init HA cluster
omServiceId = "om-service-test1";
final int numDNs = 3;
final int numDNs = 5;
cluster = MiniOzoneCluster.newBuilder(conf)
.setNumDatanodes(numDNs)
.build();
Expand All @@ -112,13 +116,14 @@ public static void init() throws Exception {
startCluster();
}

@Test
public void testChunkInfoCmdBeforeAfterCloseContainer() throws Exception {
@ParameterizedTest
@ValueSource(booleans = {true, false})
public void testChunkInfoCmdBeforeAfterCloseContainer(boolean isEcKey) throws Exception {
final String volumeName = UUID.randomUUID().toString();
final String bucketName = UUID.randomUUID().toString();
final String keyName = UUID.randomUUID().toString();

writeKey(volumeName, bucketName, keyName);
writeKey(volumeName, bucketName, keyName, isEcKey);

int exitCode = runChunkInfoCommand(volumeName, bucketName, keyName);
assertEquals(0, exitCode);
Expand All @@ -134,7 +139,7 @@ public void testChunkInfoVerifyPathsAreDifferent() throws Exception {
final String volumeName = UUID.randomUUID().toString();
final String bucketName = UUID.randomUUID().toString();
final String keyName = UUID.randomUUID().toString();
writeKey(volumeName, bucketName, keyName);
writeKey(volumeName, bucketName, keyName, false);
int exitCode = runChunkInfoAndVerifyPaths(volumeName, bucketName, keyName);
assertEquals(0, exitCode);
}
Expand All @@ -150,7 +155,7 @@ public void testLdbCliForOzoneSnapshot() throws Exception {
final String bucketName = UUID.randomUUID().toString();
final String keyName = UUID.randomUUID().toString();

writeKey(volumeName, bucketName, keyName);
writeKey(volumeName, bucketName, keyName, false);

String snapshotName =
client.getObjectStore().createSnapshot(volumeName, bucketName, "snap1");
Expand All @@ -176,15 +181,22 @@ private static String getSnapshotDBPath(String checkPointDir) {
OM_DB_NAME + checkPointDir;
}


private static void writeKey(String volumeName, String bucketName,
String keyName) throws IOException {
String keyName, boolean isEcKey) throws IOException {
ReplicationConfig repConfig;
if (isEcKey) {
repConfig = new ECReplicationConfig(3, 2);
} else {
repConfig = ReplicationConfig.fromTypeAndFactor(ReplicationType.RATIS,
ReplicationFactor.THREE);
}
try (OzoneClient client = OzoneClientFactory.getRpcClient(conf)) {
// see HDDS-10091 for making this work with FILE_SYSTEM_OPTIMIZED layout
TestDataUtil.createVolumeAndBucket(client, volumeName, bucketName, BucketLayout.LEGACY);
TestDataUtil.createVolumeAndBucket(client, volumeName, bucketName,
BucketLayout.LEGACY);
TestDataUtil.createKey(
client.getObjectStore().getVolume(volumeName).getBucket(bucketName),
keyName, ReplicationFactor.THREE, ReplicationType.RATIS, "test");
keyName, repConfig, "test");
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ protected void execute(OzoneClient client, OzoneAddress address)
keyPipeline.getReplicationConfig().getReplicationType() ==
HddsProtos.ReplicationType.EC;
Pipeline pipeline;
if (keyPipeline.getType() != HddsProtos.ReplicationType.STAND_ALONE) {
if (!isECKey && keyPipeline.getType() != HddsProtos.ReplicationType.STAND_ALONE) {
pipeline = Pipeline.newBuilder(keyPipeline)
.setReplicationConfig(StandaloneReplicationConfig
.getInstance(ONE)).build();
Expand Down