diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStream.java index e3c67a99a0c4..224e80aaa12b 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStream.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStream.java @@ -23,7 +23,6 @@ import java.io.IOException; import java.nio.ByteBuffer; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collections; import java.util.Deque; import java.util.LinkedList; @@ -174,8 +173,8 @@ protected BlockExtendedInputStream getOrOpenStream(int locationIndex) throws IOE Pipeline pipeline = Pipeline.newBuilder() .setReplicationConfig(StandaloneReplicationConfig.getInstance( HddsProtos.ReplicationFactor.ONE)) - .setNodes(Arrays.asList(dataLocation)) - .setId(PipelineID.valueOf(dataLocation.getUuid())) + .setNodes(Collections.singletonList(dataLocation)) + .setId(dataLocation.getID()) .setReplicaIndexes(ImmutableMap.of(dataLocation, locationIndex + 1)) .setState(Pipeline.PipelineState.CLOSED) .build(); diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockOutputStreamCorrectness.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockOutputStreamCorrectness.java index a6f589e9cc7c..440b5b3d4d52 100644 --- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockOutputStreamCorrectness.java +++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockOutputStreamCorrectness.java @@ -50,7 +50,6 @@ import org.apache.hadoop.hdds.scm.XceiverClientSpi; import org.apache.hadoop.hdds.scm.pipeline.MockPipeline; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.scm.pipeline.PipelineID; import org.apache.hadoop.ozone.ClientVersion; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.container.common.helpers.BlockData; @@ -106,7 +105,7 @@ public void testMissingStripeChecksumDoesNotMakeExecutePutBlockFailDuringECRecon BlockID blockID = new BlockID(1, 1); DatanodeDetails datanodeDetails = MockDatanodeDetails.randomDatanodeDetails(); Pipeline pipeline = Pipeline.newBuilder() - .setId(PipelineID.valueOf(datanodeDetails.getUuid())) + .setId(datanodeDetails.getID()) .setReplicationConfig(replicationConfig) .setNodes(ImmutableList.of(datanodeDetails)) .setState(Pipeline.PipelineState.CLOSED) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeID.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeID.java index 53a6b7b4dfdf..6a0ee0b43c36 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeID.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeID.java @@ -23,6 +23,7 @@ import java.util.concurrent.ConcurrentMap; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeIDProto; +import org.apache.hadoop.hdds.scm.pipeline.PipelineID; import org.apache.hadoop.ozone.util.StringWithByteString; /** @@ -78,6 +79,10 @@ public ByteString getByteString() { return uuidByteString.getBytes(); } + public PipelineID toPipelineID() { + return PipelineID.valueOf(uuid); + } + public DatanodeIDProto toProto() { return DatanodeIDProto.newBuilder().setUuid(toProto(uuid)).build(); } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java index f2da85349d47..e7e9068e0b5b 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java @@ -573,6 +573,11 @@ public Builder(Pipeline pipeline) { } } + public Builder setId(DatanodeID datanodeID) { + this.id = datanodeID.toPipelineID(); + return this; + } + public Builder setId(PipelineID id1) { this.id = id1; return this; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineID.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineID.java index fdf4dca51935..d7ea21d024eb 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineID.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineID.java @@ -19,10 +19,12 @@ import com.fasterxml.jackson.annotation.JsonIgnore; import java.util.UUID; +import java.util.function.Supplier; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.utils.db.Codec; import org.apache.hadoop.hdds.utils.db.DelegatedCodec; import org.apache.hadoop.hdds.utils.db.UuidCodec; +import org.apache.ratis.util.MemoizedSupplier; /** * ID for the pipeline, the ID is based on UUID. @@ -35,6 +37,7 @@ public final class PipelineID { PipelineID.class, DelegatedCodec.CopyType.SHALLOW); private final UUID id; + private final Supplier protoSupplier; public static Codec getCodec() { return CODEC; @@ -42,6 +45,7 @@ public static Codec getCodec() { private PipelineID(UUID id) { this.id = id; + this.protoSupplier = MemoizedSupplier.valueOf(() -> buildProtobuf(id)); } public static PipelineID randomId() { @@ -62,6 +66,10 @@ public UUID getId() { @JsonIgnore public HddsProtos.PipelineID getProtobuf() { + return protoSupplier.get(); + } + + static HddsProtos.PipelineID buildProtobuf(UUID id) { HddsProtos.UUID uuid128 = HddsProtos.UUID.newBuilder() .setMostSigBits(id.getMostSignificantBits()) .setLeastSigBits(id.getLeastSignificantBits()) @@ -86,7 +94,7 @@ public static PipelineID getFromProtobuf(HddsProtos.PipelineID protos) { @Override public String toString() { - return "PipelineID=" + id.toString(); + return "Pipeline-" + id; } @Override diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/BlockDeletingService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/BlockDeletingService.java index 92e9c63c185b..7af6e10faaa0 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/BlockDeletingService.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/BlockDeletingService.java @@ -22,7 +22,6 @@ import java.time.Duration; import java.util.List; import java.util.Map; -import java.util.UUID; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; import java.util.stream.Collectors; @@ -210,15 +209,14 @@ private boolean isDeletionAllowed(ContainerData containerData, // TODO: currently EC container goes through this path. return true; } - UUID pipelineUUID; + final PipelineID pipelineID; try { - pipelineUUID = UUID.fromString(originPipelineId); + pipelineID = PipelineID.valueOf(originPipelineId); } catch (IllegalArgumentException e) { LOG.warn("Invalid pipelineID {} for container {}", originPipelineId, containerData.getContainerID()); return false; } - PipelineID pipelineID = PipelineID.valueOf(pipelineUUID); // in case the ratis group does not exist, just mark it for deletion. if (!ratisServer.isExist(pipelineID.getProtobuf())) { return true; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CreatePipelineCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CreatePipelineCommandHandler.java index 30ffe7ed4159..53734c9ffefb 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CreatePipelineCommandHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CreatePipelineCommandHandler.java @@ -112,8 +112,7 @@ public void handle(SCMCommand command, OzoneContainer ozoneContainer, final RaftGroup group = RatisHelper.newRaftGroup(groupId, peers, priorityList); server.addGroup(pipelineIdProto, peers, priorityList); - peers.stream().filter( - d -> !d.getUuid().equals(dn.getUuid())) + peers.stream().filter(d -> !d.getID().equals(dn.getID())) .forEach(d -> { final RaftPeer peer = RatisHelper.toRaftPeer(d); try (RaftClient client = newRaftClient.apply(peer, diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java index eb2f21a0be14..3aeee3f2724b 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java @@ -375,8 +375,7 @@ private void processCmd(DeleteCmdInfo cmd) { ContainerBlocksDeletionACKProto.Builder resultBuilder = ContainerBlocksDeletionACKProto.newBuilder().addAllResults(results); - resultBuilder.setDnId(cmd.getContext().getParent().getDatanodeDetails() - .getUuid().toString()); + resultBuilder.setDnId(cmd.getContext().getParent().getDatanodeDetails().getUuidString()); blockDeletionACK = resultBuilder.build(); // Send ACK back to SCM as long as meta updated diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java index 9b538f187cfc..9055ee15757a 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java @@ -20,9 +20,7 @@ import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMRegisteredResponseProto.ErrorCode.success; import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; import java.io.IOException; -import java.util.UUID; import java.util.concurrent.Callable; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.hdds.conf.ConfigurationSource; @@ -36,6 +34,7 @@ import org.apache.hadoop.ozone.container.common.statemachine.EndpointStateMachine; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; +import org.apache.ratis.util.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -129,15 +128,10 @@ public EndpointStateMachine.EndPointStates call() throws Exception { SCMRegisteredResponseProto response = rpcEndPoint.getEndPoint() .register(datanodeDetails.getExtendedProtoBufMessage(), nodeReport, containerReport, pipelineReportsProto, layoutInfo); - Preconditions.checkState(UUID.fromString(response.getDatanodeUUID()) - .equals(datanodeDetails.getUuid()), - "Unexpected datanode ID in the response."); - Preconditions.checkState(!StringUtils.isBlank(response.getClusterID()), + Preconditions.assertEquals(datanodeDetails.getUuidString(), response.getDatanodeUUID(), "datanodeID"); + Preconditions.assertTrue(!StringUtils.isBlank(response.getClusterID()), "Invalid cluster ID in the response."); - Preconditions.checkState(response.getErrorCode() == success, - "DataNode has different Software Layout Version" + - " than SCM or RECON. EndPoint address is: " + - rpcEndPoint.getAddressString()); + Preconditions.assertSame(success, response.getErrorCode(), "ErrorCode"); if (response.hasHostname() && response.hasIpAddress()) { datanodeDetails.setHostName(response.getHostname()); datanodeDetails.setIpAddress(response.getIpAddress()); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java index f43f83dcfa81..a81e992bb01b 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java @@ -26,19 +26,18 @@ import java.net.BindException; import java.util.Collections; import java.util.List; -import java.util.UUID; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.ThreadFactory; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.DatanodeID; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReport; import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; -import org.apache.hadoop.hdds.scm.pipeline.PipelineID; import org.apache.hadoop.hdds.security.SecurityConfig; import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; import org.apache.hadoop.hdds.tracing.GrpcServerInterceptor; @@ -71,7 +70,7 @@ public final class XceiverServerGrpc implements XceiverServerSpi { private static final Logger LOG = LoggerFactory.getLogger(XceiverServerGrpc.class); private int port; - private UUID id; + private DatanodeID id; private Server server; private final ContainerDispatcher storageContainer; private boolean isStarted; @@ -90,7 +89,7 @@ public XceiverServerGrpc(DatanodeDetails datanodeDetails, ContainerDispatcher dispatcher, CertificateClient caClient) { Preconditions.checkNotNull(conf); - this.id = datanodeDetails.getUuid(); + this.id = datanodeDetails.getID(); this.datanodeDetails = datanodeDetails; this.port = conf.getInt(OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT, OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT_DEFAULT); @@ -237,14 +236,13 @@ public void submitRequest(ContainerCommandRequestProto request, @Override public boolean isExist(HddsProtos.PipelineID pipelineId) { - return PipelineID.valueOf(id).getProtobuf().equals(pipelineId); + return id.toPipelineID().getProtobuf().equals(pipelineId); } @Override public List getPipelineReport() { - return Collections.singletonList( - PipelineReport.newBuilder() - .setPipelineID(PipelineID.valueOf(id).getProtobuf()) - .build()); + return Collections.singletonList(PipelineReport.newBuilder() + .setPipelineID(id.toPipelineID().getProtobuf()) + .build()); } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECContainerOperationClient.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECContainerOperationClient.java index 51014299a598..c967501591e5 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECContainerOperationClient.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECContainerOperationClient.java @@ -21,10 +21,10 @@ import jakarta.annotation.Nonnull; import java.io.Closeable; import java.io.IOException; +import java.util.Collections; import java.util.List; import java.util.Objects; import java.util.Set; -import org.apache.commons.collections.map.SingletonMap; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.DatanodeDetails; @@ -34,7 +34,6 @@ import org.apache.hadoop.hdds.scm.XceiverClientSpi; import org.apache.hadoop.hdds.scm.client.ClientTrustManager; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.scm.pipeline.PipelineID; import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls; import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; import org.apache.hadoop.ozone.OzoneSecurityUtil; @@ -181,10 +180,10 @@ Pipeline singleNodePipeline(DatanodeDetails dn, // To get the same client from cache, we try to use the DN UUID as // pipelineID for uniqueness. Please note, pipeline does not have any // significance after it's close. So, we are ok to use any ID. - return Pipeline.newBuilder().setId(PipelineID.valueOf(dn.getUuid())) + return Pipeline.newBuilder().setId(dn.getID()) .setReplicationConfig(repConfig).setNodes(ImmutableList.of(dn)) .setState(Pipeline.PipelineState.CLOSED) - .setReplicaIndexes(new SingletonMap(dn, replicaIndex)).build(); + .setReplicaIndexes(Collections.singletonMap(dn, replicaIndex)).build(); } public XceiverClientManager getXceiverClientManager() { diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestDatanodeIdYaml.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestDatanodeIdYaml.java index 65484347b16d..3cde9207bd11 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestDatanodeIdYaml.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestDatanodeIdYaml.java @@ -79,7 +79,6 @@ void testWriteReadBeforeRatisDatastreamPortLayoutVersion(@TempDir File dir) void testWriteReadAfterRatisDatastreamPortLayoutVersion(@TempDir File dir) throws IOException { DatanodeDetails original = MockDatanodeDetails.randomDatanodeDetails(); - assertEquals(original.getUuid().toString(), original.getUuidString()); File file = new File(dir, "datanode.yaml"); OzoneConfiguration conf = new OzoneConfiguration(); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisorScheduling.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisorSchedulingBenchmark.java similarity index 89% rename from hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisorScheduling.java rename to hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisorSchedulingBenchmark.java index 88620156a543..1b8c041bacb8 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisorScheduling.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisorSchedulingBenchmark.java @@ -25,8 +25,8 @@ import java.util.List; import java.util.Map; import java.util.Random; -import java.util.UUID; import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.DatanodeID; import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; import org.apache.hadoop.util.Time; import org.junit.jupiter.api.Test; @@ -37,7 +37,7 @@ * This unit test is not enabled (doesn't start with Test) but can be used * to validate changes manually. */ -public class ReplicationSupervisorScheduling { +public class ReplicationSupervisorSchedulingBenchmark { private final Random random = new Random(); @@ -50,16 +50,16 @@ public void test() throws InterruptedException { //locks representing the limited resource of remote and local disks //datanode -> disk -> lock object (remote resources) - Map> volumeLocks = new HashMap<>(); + final Map> volumeLocks = new HashMap<>(); //disk -> lock (local resources) Map destinationLocks = new HashMap<>(); //init the locks for (DatanodeDetails datanode : datanodes) { - volumeLocks.put(datanode.getUuid(), new HashMap<>()); + volumeLocks.put(datanode.getID(), new HashMap<>()); for (int i = 0; i < 10; i++) { - volumeLocks.get(datanode.getUuid()).put(i, new Object()); + volumeLocks.get(datanode.getID()).put(i, new Object()); } } @@ -75,15 +75,14 @@ public void test() throws InterruptedException { task.getSources().get(random.nextInt(task.getSources().size())); final Map volumes = - volumeLocks.get(sourceDatanode.getUuid()); + volumeLocks.get(sourceDatanode.getID()); Object volumeLock = volumes.get(random.nextInt(volumes.size())); synchronized (volumeLock) { - System.out.println("Downloading " + task.getContainerId() + " from " - + sourceDatanode.getUuid()); + System.out.println("Downloading " + task.getContainerId() + " from " + sourceDatanode); try { volumeLock.wait(1000); } catch (InterruptedException ex) { - ex.printStackTrace(); + throw new IllegalStateException(ex); } } @@ -98,7 +97,7 @@ public void test() throws InterruptedException { try { destinationLock.wait(1000); } catch (InterruptedException ex) { - ex.printStackTrace(); + throw new IllegalStateException(ex); } } };