diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/AbstractContainerReportHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/AbstractContainerReportHandler.java index 23d5ccf8f799..6315596a3a27 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/AbstractContainerReportHandler.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/AbstractContainerReportHandler.java @@ -23,10 +23,10 @@ import java.util.List; import java.util.Objects; import java.util.Set; -import java.util.UUID; import java.util.function.Supplier; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.DatanodeID; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto; @@ -356,7 +356,7 @@ private void updateContainerReplica(final DatanodeDetails datanodeDetails, .setContainerID(containerId) .setContainerState(replicaProto.getState()) .setDatanodeDetails(datanodeDetails) - .setOriginNodeId(UUID.fromString(replicaProto.getOriginNodeId())) + .setOriginNodeId(DatanodeID.fromUuidString(replicaProto.getOriginNodeId())) .setSequenceId(replicaProto.getBlockCommitSequenceId()) .setKeyCount(replicaProto.getKeyCount()) .setReplicaIndex(replicaProto.getReplicaIndex()) diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReplica.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReplica.java index 43267d426571..ce08cbb3f25a 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReplica.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReplica.java @@ -17,13 +17,12 @@ package org.apache.hadoop.hdds.scm.container; -import com.google.common.base.Preconditions; -import java.util.Optional; -import java.util.UUID; +import java.util.Objects; import org.apache.commons.lang3.builder.CompareToBuilder; import org.apache.commons.lang3.builder.EqualsBuilder; import org.apache.commons.lang3.builder.HashCodeBuilder; import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.DatanodeID; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto; /** @@ -34,7 +33,12 @@ public final class ContainerReplica implements Comparable { private final ContainerID containerID; private final ContainerReplicaProto.State state; private final DatanodeDetails datanodeDetails; - private final UUID placeOfBirth; + /** + * The origin creation of this replica. + * null: origin is the same as {@link #datanodeDetails}. + */ + private final DatanodeID originDatanodeId; + /** The position at the pipeline. */ private final int replicaIndex; private final Long sequenceId; @@ -43,15 +47,15 @@ public final class ContainerReplica implements Comparable { private final boolean isEmpty; private ContainerReplica(ContainerReplicaBuilder b) { - containerID = b.containerID; - state = b.state; - datanodeDetails = b.datanode; - placeOfBirth = Optional.ofNullable(b.placeOfBirth).orElse(datanodeDetails.getUuid()); - keyCount = b.keyCount; - bytesUsed = b.bytesUsed; - replicaIndex = b.replicaIndex; - isEmpty = b.isEmpty; - sequenceId = b.sequenceId; + this.containerID = Objects.requireNonNull(b.containerID, "containerID == null"); + this.state = Objects.requireNonNull(b.state, "state == null"); + this.datanodeDetails = Objects.requireNonNull(b.datanode, "datanode == null"); + this.originDatanodeId = b.placeOfBirth; + this.keyCount = b.keyCount; + this.bytesUsed = b.bytesUsed; + this.replicaIndex = b.replicaIndex; + this.isEmpty = b.isEmpty; + this.sequenceId = b.sequenceId; } public ContainerID getContainerID() { @@ -72,8 +76,8 @@ public DatanodeDetails getDatanodeDetails() { * * @return UUID */ - public UUID getOriginDatanodeId() { - return placeOfBirth; + public DatanodeID getOriginDatanodeId() { + return originDatanodeId != null ? originDatanodeId : datanodeDetails.getID(); } /** @@ -144,7 +148,7 @@ public boolean equals(Object o) { @Override public int compareTo(ContainerReplica that) { - Preconditions.checkNotNull(that); + Objects.requireNonNull(that); return new CompareToBuilder() .append(this.containerID, that.containerID) .append(this.datanodeDetails, that.datanodeDetails) @@ -167,7 +171,7 @@ public ContainerReplicaBuilder toBuilder() { .setContainerState(state) .setDatanodeDetails(datanodeDetails) .setKeyCount(keyCount) - .setOriginNodeId(placeOfBirth) + .setOriginNodeId(originDatanodeId) .setReplicaIndex(replicaIndex) .setSequenceId(sequenceId) .setEmpty(isEmpty); @@ -175,18 +179,16 @@ public ContainerReplicaBuilder toBuilder() { @Override public String toString() { - return "ContainerReplica{" + - "containerID=" + containerID + - ", state=" + state + - ", datanodeDetails=" + datanodeDetails + - ", placeOfBirth=" + placeOfBirth + - ", sequenceId=" + sequenceId + - ", keyCount=" + keyCount + - ", bytesUsed=" + bytesUsed + ((replicaIndex > 0) ? - ",replicaIndex=" + replicaIndex : - "") + - ", isEmpty=" + isEmpty + - '}'; + return "ContainerReplica{" + containerID + + " (" + state + + ") currentDN=" + datanodeDetails + + (originDatanodeId != null ? ", originDN=" + originDatanodeId : " (origin)") + + ", bcsid=" + sequenceId + + (replicaIndex > 0 ? ", replicaIndex=" + replicaIndex : "") + + ", keyCount=" + keyCount + + ", bytesUsed=" + bytesUsed + + ", " + (isEmpty ? "empty" : "non-empty") + + '}'; } /** @@ -197,7 +199,7 @@ public static class ContainerReplicaBuilder { private ContainerID containerID; private ContainerReplicaProto.State state; private DatanodeDetails datanode; - private UUID placeOfBirth; + private DatanodeID placeOfBirth; private Long sequenceId; private long bytesUsed; private long keyCount; @@ -246,7 +248,7 @@ public ContainerReplicaBuilder setReplicaIndex( * @param originNodeId origin node UUID * @return ContainerReplicaBuilder */ - public ContainerReplicaBuilder setOriginNodeId(UUID originNodeId) { + public ContainerReplicaBuilder setOriginNodeId(DatanodeID originNodeId) { placeOfBirth = originNodeId; return this; } @@ -283,12 +285,6 @@ public ContainerReplicaBuilder setEmpty(boolean empty) { * @return ContainerReplicaBuilder */ public ContainerReplica build() { - Preconditions.checkNotNull(containerID, - "Container Id can't be null"); - Preconditions.checkNotNull(state, - "Container state can't be null"); - Preconditions.checkNotNull(datanode, - "DatanodeDetails can't be null"); return new ContainerReplica(this); } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/QuasiClosedStuckReplicaCount.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/QuasiClosedStuckReplicaCount.java index 412978c240ef..97df8cf958f7 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/QuasiClosedStuckReplicaCount.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/QuasiClosedStuckReplicaCount.java @@ -24,7 +24,7 @@ import java.util.List; import java.util.Map; import java.util.Set; -import java.util.UUID; +import org.apache.hadoop.hdds.protocol.DatanodeID; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; import org.apache.hadoop.hdds.scm.container.ContainerReplica; @@ -34,18 +34,20 @@ */ public class QuasiClosedStuckReplicaCount { - private final Map> replicasByOrigin = new HashMap<>(); - private final Map> inServiceReplicasByOrigin = new HashMap<>(); - private final Map> maintenanceReplicasByOrigin = new HashMap<>(); - private boolean hasOutOfServiceReplicas = false; - private int minHealthyForMaintenance; - private boolean hasHealthyReplicas = false; + private final Map> replicasByOrigin = new HashMap<>(); + private final Map> inServiceReplicasByOrigin = new HashMap<>(); + private final Map> maintenanceReplicasByOrigin = new HashMap<>(); + private final int minHealthyForMaintenance; + private final boolean hasHealthyReplicas; + private final boolean hasOutOfServiceReplicas; public QuasiClosedStuckReplicaCount(Set replicas, int minHealthyForMaintenance) { this.minHealthyForMaintenance = minHealthyForMaintenance; + boolean hasHealthy = false; + boolean hasOutOfService = false; for (ContainerReplica r : replicas) { if (r.getState() != StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State.UNHEALTHY) { - hasHealthyReplicas = true; + hasHealthy = true; } replicasByOrigin.computeIfAbsent(r.getOriginDatanodeId(), k -> new HashSet<>()).add(r); HddsProtos.NodeOperationalState opState = r.getDatanodeDetails().getPersistedOpState(); @@ -54,11 +56,14 @@ public QuasiClosedStuckReplicaCount(Set replicas, int minHealt } else if (opState == HddsProtos.NodeOperationalState.IN_MAINTENANCE || opState == HddsProtos.NodeOperationalState.ENTERING_MAINTENANCE) { maintenanceReplicasByOrigin.computeIfAbsent(r.getOriginDatanodeId(), k -> new HashSet<>()).add(r); - hasOutOfServiceReplicas = true; + hasOutOfService = true; } else { - hasOutOfServiceReplicas = true; + hasOutOfService = true; } } + + this.hasHealthyReplicas = hasHealthy; + this.hasOutOfServiceReplicas = hasOutOfService; } public int availableOrigins() { @@ -77,17 +82,23 @@ public boolean isUnderReplicated() { return !getUnderReplicatedReplicas().isEmpty(); } + private Set getInService(DatanodeID origin) { + final Set set = inServiceReplicasByOrigin.get(origin); + return set == null ? Collections.emptySet() : set; + } + + private int getMaintenanceCount(DatanodeID origin) { + final Set maintenance = maintenanceReplicasByOrigin.get(origin); + return maintenance == null ? 0 : maintenance.size(); + } + public List getUnderReplicatedReplicas() { List misReplicatedOrigins = new ArrayList<>(); if (replicasByOrigin.size() == 1) { - Map.Entry> entry = replicasByOrigin.entrySet().iterator().next(); - Set inService = inServiceReplicasByOrigin.get(entry.getKey()); - if (inService == null) { - inService = Collections.emptySet(); - } - Set maintenance = maintenanceReplicasByOrigin.get(entry.getKey()); - int maintenanceCount = maintenance == null ? 0 : maintenance.size(); + final Map.Entry> entry = replicasByOrigin.entrySet().iterator().next(); + final Set inService = getInService(entry.getKey()); + final int maintenanceCount = getMaintenanceCount(entry.getKey()); if (maintenanceCount > 0) { if (inService.size() < minHealthyForMaintenance) { @@ -105,13 +116,9 @@ public List getUnderReplicatedReplicas() { // If there are multiple origins, we expect 2 copies of each origin // For maintenance, we expect 1 copy of each origin and ignore the minHealthyForMaintenance parameter - for (Map.Entry> entry : replicasByOrigin.entrySet()) { - Set inService = inServiceReplicasByOrigin.get(entry.getKey()); - if (inService == null) { - inService = Collections.emptySet(); - } - Set maintenance = maintenanceReplicasByOrigin.get(entry.getKey()); - int maintenanceCount = maintenance == null ? 0 : maintenance.size(); + for (Map.Entry> entry : replicasByOrigin.entrySet()) { + final Set inService = getInService(entry.getKey()); + final int maintenanceCount = getMaintenanceCount(entry.getKey()); if (inService.size() < 2) { if (maintenanceCount > 0) { @@ -142,9 +149,9 @@ public boolean isOverReplicated() { public List getOverReplicatedOrigins() { // If there is only a single origin, we expect 3 copies, otherwise we expect 2 copies of each origin if (replicasByOrigin.size() == 1) { - UUID origin = replicasByOrigin.keySet().iterator().next(); - Set inService = inServiceReplicasByOrigin.get(origin); - if (inService != null && inService.size() > 3) { + final DatanodeID origin = replicasByOrigin.keySet().iterator().next(); + final Set inService = getInService(origin); + if (inService.size() > 3) { return Collections.singletonList(new MisReplicatedOrigin(inService, inService.size() - 3)); } return Collections.emptyList(); @@ -152,9 +159,9 @@ public List getOverReplicatedOrigins() { // If there are multiple origins, we expect 2 copies of each origin List overReplicatedOrigins = new ArrayList<>(); - for (UUID origin : replicasByOrigin.keySet()) { - Set replicas = inServiceReplicasByOrigin.get(origin); - if (replicas != null && replicas.size() > 2) { + for (DatanodeID origin : replicasByOrigin.keySet()) { + final Set replicas = getInService(origin); + if (replicas.size() > 2) { overReplicatedOrigins.add(new MisReplicatedOrigin(replicas, replicas.size() - 2)); } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/RatisContainerReplicaCount.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/RatisContainerReplicaCount.java index 7bef3e612764..5ac44785dd34 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/RatisContainerReplicaCount.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/RatisContainerReplicaCount.java @@ -30,10 +30,10 @@ import java.util.HashSet; import java.util.List; import java.util.Set; -import java.util.UUID; import java.util.function.Function; import java.util.stream.Collectors; import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.DatanodeID; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto; import org.apache.hadoop.hdds.scm.container.ContainerInfo; @@ -511,7 +511,7 @@ public List getVulnerableUnhealthyReplicas(Function originsOfInServiceReplicas = new HashSet<>(); + final Set originsOfInServiceReplicas = new HashSet<>(); for (ContainerReplica replica : replicas) { if (replica.getDatanodeDetails().getPersistedOpState() .equals(IN_SERVICE) && replica.getSequenceId().equals(container.getSequenceId())) { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManagerUtil.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManagerUtil.java index 75bffa01e242..267da521736e 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManagerUtil.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManagerUtil.java @@ -25,10 +25,10 @@ import java.util.HashSet; import java.util.List; import java.util.Set; -import java.util.UUID; import java.util.function.Function; import java.util.stream.Collectors; import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.DatanodeID; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto; import org.apache.hadoop.hdds.scm.PlacementPolicy; @@ -331,7 +331,7 @@ static List findNonUniqueDeleteCandidates( Function nodeStatusFn) { // Gather the origin node IDs of replicas which are not candidates for // deletion. - Set existingOriginNodeIDs = allReplicas.stream() + final Set existingOriginNodeIDs = allReplicas.stream() .filter(r -> !deleteCandidates.contains(r)) .filter(r -> { NodeStatus status = nodeStatusFn.apply(r.getDatanodeDetails()); @@ -374,7 +374,7 @@ both the first and last replicas have the same origin node ID (and no return nonUniqueDeleteCandidates; } - private static void checkUniqueness(Set existingOriginNodeIDs, + private static void checkUniqueness(Set existingOriginNodeIDs, List nonUniqueDeleteCandidates, ContainerReplica replica) { if (existingOriginNodeIDs.contains(replica.getOriginDatanodeId())) { diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsTestUtils.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsTestUtils.java index ba6597500efe..a61562c361a4 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsTestUtils.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsTestUtils.java @@ -679,7 +679,7 @@ public static List getReplicas( List replicas = new ArrayList<>(); for (DatanodeDetails datanode : datanodeDetails) { replicas.add(getReplicas(containerId, state, - sequenceId, datanode.getUuid(), datanode)); + sequenceId, datanode.getID(), datanode)); } return replicas; } @@ -688,11 +688,11 @@ public static ContainerReplica getReplicas( final ContainerID containerId, final ContainerReplicaProto.State state, final long sequenceId, - final UUID originNodeId, + final DatanodeID originNodeId, final DatanodeDetails datanodeDetails) { - return getReplicas(containerId, state, CONTAINER_USED_BYTES_DEFAULT, + return getReplicaBuilder(containerId, state, CONTAINER_USED_BYTES_DEFAULT, CONTAINER_NUM_KEYS_DEFAULT, sequenceId, originNodeId, - datanodeDetails); + datanodeDetails).build(); } public static ContainerReplica.ContainerReplicaBuilder getReplicaBuilder( @@ -701,7 +701,7 @@ public static ContainerReplica.ContainerReplicaBuilder getReplicaBuilder( final long usedBytes, final long keyCount, final long sequenceId, - final UUID originNodeId, + final DatanodeID originNodeId, final DatanodeDetails datanodeDetails) { return ContainerReplica.newBuilder() .setContainerID(containerId).setContainerState(state) @@ -712,20 +712,6 @@ public static ContainerReplica.ContainerReplicaBuilder getReplicaBuilder( .setEmpty(keyCount == 0); } - public static ContainerReplica getReplicas( - final ContainerID containerId, - final ContainerReplicaProto.State state, - final long usedBytes, - final long keyCount, - final long sequenceId, - final UUID originNodeId, - final DatanodeDetails datanodeDetails) { - ContainerReplica.ContainerReplicaBuilder builder = - getReplicaBuilder(containerId, state, usedBytes, keyCount, - sequenceId, originNodeId, datanodeDetails); - return builder.build(); - } - public static List getReplicasWithReplicaIndex( final ContainerID containerId, final ContainerReplicaProto.State state, @@ -737,7 +723,7 @@ public static List getReplicasWithReplicaIndex( int replicaIndex = 1; for (DatanodeDetails datanode : datanodeDetails) { replicas.add(getReplicaBuilder(containerId, state, - usedBytes, keyCount, sequenceId, datanode.getUuid(), datanode) + usedBytes, keyCount, sequenceId, datanode.getID(), datanode) .setReplicaIndex(replicaIndex).build()); replicaIndex += 1; } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestSCMCommonPlacementPolicy.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestSCMCommonPlacementPolicy.java index d1a2f6e65763..8646fd9ec2b0 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestSCMCommonPlacementPolicy.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestSCMCommonPlacementPolicy.java @@ -357,10 +357,10 @@ public void testReplicasToRemoveWith2CountPerUniqueReplica() { ContainerID.valueOf(1), CLOSED, 0, 0, 0, list.subList(3, 6))); Set replicasToBeRemoved = Sets.newHashSet( HddsTestUtils.getReplicaBuilder(ContainerID.valueOf(1), CLOSED, 0, 0, 0, - list.get(7).getUuid(), list.get(7)) + list.get(7).getID(), list.get(7)) .setReplicaIndex(1).build(), HddsTestUtils.getReplicaBuilder(ContainerID.valueOf(1), CLOSED, 0, 0, 0, - list.get(8).getUuid(), list.get(8)).setReplicaIndex(1) + list.get(8).getID(), list.get(8)).setReplicaIndex(1) .build()); replicas.addAll(replicasToBeRemoved); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReplica.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReplica.java index 4efca860cf9a..14366b4d813d 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReplica.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReplica.java @@ -20,8 +20,8 @@ import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State.CLOSED; import static org.junit.jupiter.api.Assertions.assertEquals; -import java.util.UUID; import java.util.concurrent.ThreadLocalRandom; +import org.apache.hadoop.hdds.protocol.DatanodeID; import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; import org.junit.jupiter.api.Test; @@ -38,7 +38,7 @@ void toBuilder() { ThreadLocalRandom.current().nextLong(Long.MAX_VALUE - 1) + 1)) .setContainerState(CLOSED) .setKeyCount(ThreadLocalRandom.current().nextLong()) - .setOriginNodeId(UUID.randomUUID()) + .setOriginNodeId(DatanodeID.randomID()) .setSequenceId(ThreadLocalRandom.current().nextLong()) .setReplicaIndex(ThreadLocalRandom.current().nextInt()) .setDatanodeDetails(MockDatanodeDetails.randomDatanodeDetails()) diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerTask.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerTask.java index 60f75c17a0b2..a8e22895ea1e 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerTask.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerTask.java @@ -573,7 +573,7 @@ private ContainerReplica createReplica(ContainerID containerID, .setContainerID(containerID) .setContainerState(ContainerReplicaProto.State.CLOSED) .setDatanodeDetails(datanodeDetails) - .setOriginNodeId(datanodeDetails.getUuid()) + .setOriginNodeId(datanodeDetails.getID()) .setSequenceId(1000L) .setBytesUsed(usedBytes) .build(); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestableCluster.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestableCluster.java index cdb539c48c8c..0e86dea26966 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestableCluster.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestableCluster.java @@ -248,7 +248,7 @@ private static double[] createUtilizationList(int count) throws IllegalArgumentE .setContainerID(containerID) .setContainerState(ContainerReplicaProto.State.CLOSED) .setDatanodeDetails(datanodeDetails) - .setOriginNodeId(datanodeDetails.getUuid()) + .setOriginNodeId(datanodeDetails.getID()) .setSequenceId(1000L) .setBytesUsed(usedBytes) .build(); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationTestUtil.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationTestUtil.java index 999aadaa6e90..ad5a30e3aced 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationTestUtil.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationTestUtil.java @@ -32,13 +32,13 @@ import java.util.HashSet; import java.util.List; import java.util.Set; -import java.util.UUID; import java.util.concurrent.atomic.AtomicBoolean; import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.StorageUnit; import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.DatanodeID; import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto; @@ -121,7 +121,7 @@ public static Set createReplicas(ContainerID containerID, DatanodeDetails dn = MockDatanodeDetails.randomDatanodeDetails(); replicas.add(createContainerReplica(containerID, i, IN_SERVICE, replicaState, keyCount, bytesUsed, - dn, dn.getUuid())); + dn, dn.getID())); } return replicas; } @@ -130,7 +130,7 @@ public static Set createReplicasWithSameOrigin( ContainerID containerID, ContainerReplicaProto.State replicaState, int... indexes) { Set replicas = new HashSet<>(); - UUID originNodeId = MockDatanodeDetails.randomDatanodeDetails().getUuid(); + final DatanodeID originNodeId = DatanodeID.randomID(); for (int i : indexes) { replicas.add(createContainerReplica( containerID, i, IN_SERVICE, replicaState, 123L, 1234L, @@ -146,14 +146,14 @@ public static ContainerReplica createEmptyContainerReplica(ContainerID container = MockDatanodeDetails.randomDatanodeDetails(); return createContainerReplica(containerID, replicaIndex, opState, replicaState, 0L, 0L, - datanodeDetails, datanodeDetails.getUuid()); + datanodeDetails, datanodeDetails.getID()); } public static Set createReplicasWithOriginAndOpState( ContainerID containerID, ContainerReplicaProto.State replicaState, - Pair... nodes) { + Pair... nodes) { Set replicas = new HashSet<>(); - for (Pair i : nodes) { + for (Pair i : nodes) { replicas.add(createContainerReplica( containerID, 0, i.getRight(), replicaState, 123L, 1234L, MockDatanodeDetails.randomDatanodeDetails(), i.getLeft())); @@ -168,7 +168,7 @@ public static ContainerReplica createContainerReplica(ContainerID containerID, = MockDatanodeDetails.randomDatanodeDetails(); return createContainerReplica(containerID, replicaIndex, opState, replicaState, 123L, 1234L, - datanodeDetails, datanodeDetails.getUuid()); + datanodeDetails, datanodeDetails.getID()); } public static ContainerReplica createContainerReplica(ContainerID containerID, @@ -178,14 +178,14 @@ public static ContainerReplica createContainerReplica(ContainerID containerID, = MockDatanodeDetails.randomDatanodeDetails(); return createContainerReplica(containerID, replicaIndex, opState, replicaState, 123L, 1234L, - datanodeDetails, datanodeDetails.getUuid(), seqId); + datanodeDetails, datanodeDetails.getID(), seqId); } @SuppressWarnings("checkstyle:ParameterNumber") public static ContainerReplica createContainerReplica(ContainerID containerID, int replicaIndex, HddsProtos.NodeOperationalState opState, ContainerReplicaProto.State replicaState, long keyCount, long bytesUsed, - DatanodeDetails datanodeDetails, UUID originNodeId) { + DatanodeDetails datanodeDetails, DatanodeID originNodeId) { ContainerReplica.ContainerReplicaBuilder builder = ContainerReplica.newBuilder(); datanodeDetails.setPersistedOpState(opState); @@ -205,7 +205,7 @@ public static ContainerReplica createContainerReplica(ContainerID containerID, public static ContainerReplica createContainerReplica(ContainerID containerID, int replicaIndex, HddsProtos.NodeOperationalState opState, ContainerReplicaProto.State replicaState, long keyCount, long bytesUsed, - DatanodeDetails datanodeDetails, UUID originNodeId, long seqId) { + DatanodeDetails datanodeDetails, DatanodeID originNodeId, long seqId) { ContainerReplica.ContainerReplicaBuilder builder = ContainerReplica.newBuilder(); datanodeDetails.setPersistedOpState(opState); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestQuasiClosedStuckOverReplicationHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestQuasiClosedStuckOverReplicationHandler.java index 5b4b64537942..a65ba0446ff7 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestQuasiClosedStuckOverReplicationHandler.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestQuasiClosedStuckOverReplicationHandler.java @@ -30,12 +30,12 @@ import java.util.HashSet; import java.util.List; import java.util.Set; -import java.util.UUID; import java.util.concurrent.atomic.AtomicBoolean; import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.DatanodeID; import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; @@ -59,8 +59,8 @@ public class TestQuasiClosedStuckOverReplicationHandler { private ReplicationManagerMetrics metrics; private Set>> commandsSent; private QuasiClosedStuckOverReplicationHandler handler; - private UUID origin1 = UUID.randomUUID(); - private UUID origin2 = UUID.randomUUID(); + private final DatanodeID origin1 = DatanodeID.randomID(); + private final DatanodeID origin2 = DatanodeID.randomID(); @BeforeEach void setup() throws NodeNotFoundException, diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestQuasiClosedStuckReplicaCount.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestQuasiClosedStuckReplicaCount.java index 2e19e788509e..3b74eccc72fd 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestQuasiClosedStuckReplicaCount.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestQuasiClosedStuckReplicaCount.java @@ -29,11 +29,10 @@ import java.util.List; import java.util.Set; -import java.util.UUID; import org.apache.commons.lang3.tuple.Pair; +import org.apache.hadoop.hdds.protocol.DatanodeID; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerReplica; -import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; /** @@ -41,16 +40,9 @@ */ public class TestQuasiClosedStuckReplicaCount { - private UUID origin1; - private UUID origin2; - private UUID origin3; - - @BeforeEach - public void setUp() { - origin1 = UUID.randomUUID(); - origin2 = UUID.randomUUID(); - origin3 = UUID.randomUUID(); - } + private final DatanodeID origin1 = DatanodeID.randomID(); + private final DatanodeID origin2 = DatanodeID.randomID(); + private final DatanodeID origin3 = DatanodeID.randomID(); @Test public void testCorrectlyReplicationWithThreeOrigins() { @@ -122,7 +114,7 @@ public void testUnderReplicationWithThreeOriginsTwoUnderReplicated() { assertTrue(misReplicatedOrigins.size() == 2); for (QuasiClosedStuckReplicaCount.MisReplicatedOrigin misReplicatedOrigin : misReplicatedOrigins) { - UUID source = misReplicatedOrigin.getSources().iterator().next().getOriginDatanodeId(); + final DatanodeID source = misReplicatedOrigin.getSources().iterator().next().getOriginDatanodeId(); assertTrue(source.equals(origin1) || source.equals(origin3)); } } @@ -336,7 +328,7 @@ public void testNoOverReplicationWithExcessMaintenanceReplicasOneOrigin() { private void validateMisReplicatedOrigins( List misReplicatedOrigins, - int expectedUnderRepOrigins, int expectedSources, int expectedDelta, UUID expectedOrigin) { + int expectedUnderRepOrigins, int expectedSources, int expectedDelta, DatanodeID expectedOrigin) { assertTrue(misReplicatedOrigins.size() == expectedUnderRepOrigins); Set sources = misReplicatedOrigins.get(0).getSources(); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestQuasiClosedStuckUnderReplicationHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestQuasiClosedStuckUnderReplicationHandler.java index 0e32e8220c4b..d88bcf86af9b 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestQuasiClosedStuckUnderReplicationHandler.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestQuasiClosedStuckUnderReplicationHandler.java @@ -31,12 +31,12 @@ import java.util.HashSet; import java.util.List; import java.util.Set; -import java.util.UUID; import java.util.concurrent.atomic.AtomicBoolean; import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.DatanodeID; import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; @@ -114,7 +114,7 @@ void setup(@TempDir File testDir) throws NodeNotFoundException, @Test public void testReturnsZeroIfNotUnderReplicated() throws IOException { - UUID origin = UUID.randomUUID(); + final DatanodeID origin = DatanodeID.randomID(); Set replicas = ReplicationTestUtil.createReplicasWithOriginAndOpState(container.containerID(), StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State.QUASI_CLOSED, Pair.of(origin, HddsProtos.NodeOperationalState.IN_SERVICE), @@ -127,7 +127,7 @@ public void testReturnsZeroIfNotUnderReplicated() throws IOException { @Test public void testNoCommandsScheduledIfPendingOps() throws IOException { - UUID origin = UUID.randomUUID(); + final DatanodeID origin = DatanodeID.randomID(); Set replicas = ReplicationTestUtil.createReplicasWithOriginAndOpState(container.containerID(), StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State.QUASI_CLOSED, Pair.of(origin, HddsProtos.NodeOperationalState.IN_SERVICE), @@ -142,7 +142,7 @@ public void testNoCommandsScheduledIfPendingOps() throws IOException { @Test public void testCommandScheduledForUnderReplicatedContainer() throws IOException { - UUID origin = UUID.randomUUID(); + final DatanodeID origin = DatanodeID.randomID(); Set replicas = ReplicationTestUtil.createReplicasWithOriginAndOpState(container.containerID(), StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State.QUASI_CLOSED, Pair.of(origin, HddsProtos.NodeOperationalState.IN_SERVICE)); @@ -154,8 +154,8 @@ public void testCommandScheduledForUnderReplicatedContainer() throws IOException @Test public void testOverloadedExceptionContinuesAndThrows() throws NotLeaderException, CommandTargetOverloadedException { - UUID origin1 = UUID.randomUUID(); - UUID origin2 = UUID.randomUUID(); + final DatanodeID origin1 = DatanodeID.randomID(); + final DatanodeID origin2 = DatanodeID.randomID(); Set replicas = ReplicationTestUtil.createReplicasWithOriginAndOpState(container.containerID(), StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State.QUASI_CLOSED, Pair.of(origin1, HddsProtos.NodeOperationalState.IN_SERVICE), @@ -170,8 +170,8 @@ public void testOverloadedExceptionContinuesAndThrows() throws NotLeaderExceptio @Test public void testInsufficientNodesExceptionThrown() { - UUID origin1 = UUID.randomUUID(); - UUID origin2 = UUID.randomUUID(); + final DatanodeID origin1 = DatanodeID.randomID(); + final DatanodeID origin2 = DatanodeID.randomID(); Set replicas = ReplicationTestUtil.createReplicasWithOriginAndOpState(container.containerID(), StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State.QUASI_CLOSED, Pair.of(origin1, HddsProtos.NodeOperationalState.IN_SERVICE), @@ -187,7 +187,7 @@ public void testInsufficientNodesExceptionThrown() { @Test public void testPartialReplicationExceptionThrown() { - UUID origin1 = UUID.randomUUID(); + final DatanodeID origin1 = DatanodeID.randomID(); Set replicas = ReplicationTestUtil.createReplicasWithOriginAndOpState(container.containerID(), StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State.QUASI_CLOSED, Pair.of(origin1, HddsProtos.NodeOperationalState.IN_SERVICE)); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestRatisContainerReplicaCount.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestRatisContainerReplicaCount.java index 51768fd887fd..70116ef3ccff 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestRatisContainerReplicaCount.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestRatisContainerReplicaCount.java @@ -415,7 +415,7 @@ void testIsHealthyWithDifferentReplicaStateNotHealthy() { .setContainerID(ContainerID.valueOf(1)) .setContainerState(OPEN) .setDatanodeDetails(dn) - .setOriginNodeId(dn.getUuid()) + .setOriginNodeId(dn.getID()) .setSequenceId(1) .build(); replica.remove(r); @@ -841,7 +841,7 @@ private Set registerNodes( .setContainerID(ContainerID.valueOf(1)) .setContainerState(State.CLOSED) .setDatanodeDetails(dn) - .setOriginNodeId(dn.getUuid()) + .setOriginNodeId(dn.getID()) .setSequenceId(1) .build()); } @@ -850,7 +850,7 @@ private Set registerNodes( private ContainerInfo createContainer(HddsProtos.LifeCycleState state) { return new ContainerInfo.Builder() - .setContainerID(ContainerID.valueOf(1).getId()) + .setContainerID(1) .setState(state) .build(); } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java index 935d371bd0d4..b9d42a4838cc 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java @@ -70,6 +70,7 @@ import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.DatanodeID; import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto; @@ -373,7 +374,7 @@ public void testQuasiClosedContainerWithExcessUnhealthyReplica() Set replicas = createReplicasWithSameOrigin(container.containerID(), ContainerReplicaProto.State.QUASI_CLOSED, 0, 0, 0); - UUID origin = replicas.iterator().next().getOriginDatanodeId(); + final DatanodeID origin = replicas.iterator().next().getOriginDatanodeId(); ContainerReplica unhealthy = createContainerReplica(container.containerID(), 0, IN_SERVICE, ContainerReplicaProto.State.UNHEALTHY, 1, 123, @@ -660,7 +661,7 @@ public void testUnrecoverableAndEmpty() ContainerReplica replica = createContainerReplica(container.containerID(), 1, IN_SERVICE, ContainerReplicaProto.State.CLOSED, - 0, 0, MockDatanodeDetails.randomDatanodeDetails(), UUID.randomUUID()); + 0, 0, MockDatanodeDetails.randomDatanodeDetails(), DatanodeID.randomID()); storeContainerAndReplicas(container, Collections.singleton(replica)); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManagerScenarios.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManagerScenarios.java index 7b35ee269bba..6e5e6e73f919 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManagerScenarios.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManagerScenarios.java @@ -51,6 +51,7 @@ import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.DatanodeID; import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto; @@ -89,7 +90,7 @@ */ public class TestReplicationManagerScenarios { - private static final Map ORIGINS = new HashMap<>(); + private static final Map ORIGINS = new HashMap<>(); private static final Map DATANODE_ALIASES = new HashMap<>(); private static final Map NODE_STATUS_MAP @@ -238,8 +239,8 @@ protected void startSubServices() { }; } - protected static UUID getOrCreateOrigin(String origin) { - return ORIGINS.computeIfAbsent(origin, (k) -> UUID.randomUUID()); + protected static DatanodeID getOrCreateOrigin(String origin) { + return ORIGINS.computeIfAbsent(origin, k -> DatanodeID.randomID()); } private static Stream getTestScenarios() { @@ -382,7 +383,7 @@ public static class TestReplica { private long used = 10; private boolean isEmpty = false; private String origin; - private UUID originId; + private DatanodeID originId; public void setContainerId(long containerId) { this.containerId = containerId; @@ -481,7 +482,7 @@ private void createOrigin() { if (origin != null) { originId = getOrCreateOrigin(origin); } else { - originId = UUID.randomUUID(); + originId = DatanodeID.randomID(); } } } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/health/TestEmptyContainerHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/health/TestEmptyContainerHandler.java index e049707d5013..e956956e4685 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/health/TestEmptyContainerHandler.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/health/TestEmptyContainerHandler.java @@ -231,7 +231,7 @@ public void testEmptyECContainerWithNonEmptyReplicaReturnsFalse() ReplicationTestUtil.createContainerReplica(containerInfo.containerID(), 5, HddsProtos.NodeOperationalState.IN_SERVICE, ContainerReplicaProto.State.CLOSED, 1L, 100L, mockDn, - mockDn.getUuid())); + mockDn.getID())); ContainerCheckRequest request = new ContainerCheckRequest.Builder() .setPendingOps(Collections.emptyList()) diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/health/TestQuasiClosedContainerHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/health/TestQuasiClosedContainerHandler.java index 8165be5b937b..09ce075bd22c 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/health/TestQuasiClosedContainerHandler.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/health/TestQuasiClosedContainerHandler.java @@ -228,11 +228,11 @@ public void testQuasiClosedWithUnhealthyHavingHighestSeq() { // 1001 is the highest sequence id final ContainerReplica replicaOne = getReplicas( - id, State.QUASI_CLOSED, 1000L, dnOne.getUuid(), dnOne); + id, State.QUASI_CLOSED, 1000L, dnOne.getID(), dnOne); final ContainerReplica replicaTwo = getReplicas( - id, State.QUASI_CLOSED, 1000L, dnTwo.getUuid(), dnTwo); + id, State.QUASI_CLOSED, 1000L, dnTwo.getID(), dnTwo); final ContainerReplica replicaThree = getReplicas( - id, State.UNHEALTHY, 1001L, dnThree.getUuid(), dnThree); + id, State.UNHEALTHY, 1001L, dnThree.getID(), dnThree); Set containerReplicas = new HashSet<>(); containerReplicas.add(replicaOne); containerReplicas.add(replicaTwo); @@ -332,11 +332,11 @@ public void testReplicasWithHighestBCSIDAreClosed() { // 1001 is the highest sequence id final ContainerReplica replicaOne = getReplicas( - id, State.QUASI_CLOSED, 1000L, dnOne.getUuid(), dnOne); + id, State.QUASI_CLOSED, 1000L, dnOne.getID(), dnOne); final ContainerReplica replicaTwo = getReplicas( - id, State.QUASI_CLOSED, 1001L, dnTwo.getUuid(), dnTwo); + id, State.QUASI_CLOSED, 1001L, dnTwo.getID(), dnTwo); final ContainerReplica replicaThree = getReplicas( - id, State.QUASI_CLOSED, 1001L, dnThree.getUuid(), dnThree); + id, State.QUASI_CLOSED, 1001L, dnThree.getID(), dnThree); Set containerReplicas = new HashSet<>(); containerReplicas.add(replicaOne); containerReplicas.add(replicaTwo); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/health/TestQuasiClosedStuckReplicationCheck.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/health/TestQuasiClosedStuckReplicationCheck.java index 7fc7058577f3..3e0adbf70d3e 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/health/TestQuasiClosedStuckReplicationCheck.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/health/TestQuasiClosedStuckReplicationCheck.java @@ -30,9 +30,9 @@ import java.util.HashSet; import java.util.List; import java.util.Set; -import java.util.UUID; import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.protocol.DatanodeID; import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State; import org.apache.hadoop.hdds.scm.container.ContainerInfo; @@ -51,9 +51,10 @@ public class TestQuasiClosedStuckReplicationCheck { private QuasiClosedStuckReplicationCheck handler; - private final UUID origin1 = UUID.randomUUID(); - private final UUID origin2 = UUID.randomUUID(); - private final UUID origin3 = UUID.randomUUID(); + private final DatanodeID origin1 = DatanodeID.randomID(); + private final DatanodeID origin2 = DatanodeID.randomID(); + private final DatanodeID origin3 = DatanodeID.randomID(); + private ReplicationManagerReport report; private ReplicationQueue queue; diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/health/TestRatisReplicationCheckHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/health/TestRatisReplicationCheckHandler.java index 759ff52a8ece..317f57234d26 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/health/TestRatisReplicationCheckHandler.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/health/TestRatisReplicationCheckHandler.java @@ -41,12 +41,12 @@ import java.util.HashSet; import java.util.List; import java.util.Set; -import java.util.UUID; import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.DatanodeID; import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto; @@ -992,23 +992,23 @@ public void testExcessReplicasButNotOverReplicatedDuetoUniqueOrigins() { replicas.add(createContainerReplica(container.containerID(), 0, IN_SERVICE, State.QUASI_CLOSED, 1, 1, MockDatanodeDetails.randomDatanodeDetails(), - MockDatanodeDetails.randomDatanodeDetails().getUuid(), + DatanodeID.randomID(), sequenceID - 1)); replicas.add(createContainerReplica(container.containerID(), 0, IN_SERVICE, State.QUASI_CLOSED, 1, 1, MockDatanodeDetails.randomDatanodeDetails(), - MockDatanodeDetails.randomDatanodeDetails().getUuid(), + DatanodeID.randomID(), sequenceID - 1)); replicas.add(createContainerReplica(container.containerID(), 0, IN_SERVICE, State.QUASI_CLOSED, 1, 1, MockDatanodeDetails.randomDatanodeDetails(), - MockDatanodeDetails.randomDatanodeDetails().getUuid(), + DatanodeID.randomID(), sequenceID - 1)); replicas.add(createContainerReplica(container.containerID(), 0, IN_SERVICE, State.UNHEALTHY, 1, 1, MockDatanodeDetails.randomDatanodeDetails(), - MockDatanodeDetails.randomDatanodeDetails().getUuid(), + DatanodeID.randomID(), sequenceID)); requestBuilder.setContainerReplicas(replicas) @@ -1030,17 +1030,17 @@ public void testExcessReplicasAndOverReplicatedDuetoNonUniqueOrigins() { repConfig, 1, HddsProtos.LifeCycleState.QUASI_CLOSED, sequenceID); - UUID origin = UUID.randomUUID(); + final DatanodeID origin = DatanodeID.randomID(); final Set replicas = new HashSet<>(2); replicas.add(createContainerReplica(container.containerID(), 0, IN_SERVICE, State.QUASI_CLOSED, 1, 1, MockDatanodeDetails.randomDatanodeDetails(), - MockDatanodeDetails.randomDatanodeDetails().getUuid(), + DatanodeID.randomID(), sequenceID - 1)); replicas.add(createContainerReplica(container.containerID(), 0, IN_SERVICE, State.QUASI_CLOSED, 1, 1, MockDatanodeDetails.randomDatanodeDetails(), - MockDatanodeDetails.randomDatanodeDetails().getUuid(), + DatanodeID.randomID(), sequenceID - 1)); replicas.add(createContainerReplica(container.containerID(), 0, IN_SERVICE, State.QUASI_CLOSED, 1, 1, diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminMonitor.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminMonitor.java index b97d6a2bbb63..755f21714b0b 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminMonitor.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminMonitor.java @@ -257,7 +257,7 @@ public void testDecommissionWaitsForUnhealthyReplicaToReplicateNewRM() ReplicationTestUtil.createContainerReplica(containerID, 0, dn1.getPersistedOpState(), State.UNHEALTHY, container.getNumberOfKeys(), container.getUsedBytes(), dn1, - dn1.getUuid(), container.getSequenceId()); + dn1.getID(), container.getSequenceId()); replicas.add(unhealthy); nodeManager.setContainers(dn1, ImmutableSet.of(containerID)); @@ -321,7 +321,7 @@ public void testDecommissionWaitsForUnhealthyReplicaWithUniqueOriginToReplicateN ReplicationTestUtil.createContainerReplica(containerID, 0, dn1.getPersistedOpState(), State.UNHEALTHY, container.getNumberOfKeys(), container.getUsedBytes(), dn1, - dn1.getUuid(), container.getSequenceId()); + dn1.getID(), container.getSequenceId()); replicas.add(unhealthy); nodeManager.setContainers(dn1, ImmutableSet.of(containerID)); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestECPipelineProvider.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestECPipelineProvider.java index ae7eb60c4754..c6d42ffc2056 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestECPipelineProvider.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestECPipelineProvider.java @@ -41,11 +41,11 @@ import java.util.Iterator; import java.util.List; import java.util.Set; -import java.util.UUID; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.StorageUnit; import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.DatanodeID; import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; import org.apache.hadoop.hdds.scm.PlacementPolicy; @@ -213,7 +213,7 @@ private Set createContainerReplicas(int number) { .setContainerState(StorageContainerDatanodeProtocolProtos .ContainerReplicaProto.State.CLOSED) .setKeyCount(1) - .setOriginNodeId(UUID.randomUUID()) + .setOriginNodeId(DatanodeID.randomID()) .setSequenceId(1) .setReplicaIndex(i + 1) .setDatanodeDetails(MockDatanodeDetails.randomDatanodeDetails()) diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java index f232890c96b8..597725dfc8a1 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java @@ -68,6 +68,7 @@ import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.DatanodeID; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; @@ -945,7 +946,7 @@ private Set createContainerReplicasList( .setContainerState(StorageContainerDatanodeProtocolProtos .ContainerReplicaProto.State.CLOSED) .setKeyCount(1) - .setOriginNodeId(UUID.randomUUID()) + .setOriginNodeId(DatanodeID.randomID()) .setSequenceId(1) .setReplicaIndex(0) .setDatanodeDetails(dn) diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java index 4c53b972c4e3..62ad073f4e91 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java @@ -35,7 +35,6 @@ import java.util.HashSet; import java.util.List; import java.util.Set; -import java.util.UUID; import java.util.concurrent.TimeoutException; import java.util.stream.Collectors; import org.apache.hadoop.hdds.HddsConfigKeys; @@ -43,6 +42,7 @@ import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.DatanodeID; import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; @@ -419,7 +419,7 @@ private Set createContainerReplicas( .setContainerState(StorageContainerDatanodeProtocolProtos .ContainerReplicaProto.State.CLOSED) .setKeyCount(1) - .setOriginNodeId(UUID.randomUUID()) + .setOriginNodeId(DatanodeID.randomID()) .setSequenceId(1) .setReplicaIndex(0) .setDatanodeDetails(dn)