diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java index 113f620ebcd5..fe8dcc47001f 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java @@ -17,16 +17,11 @@ package org.apache.hadoop.hdds.scm.container; -import static java.util.Comparator.reverseOrder; import static org.apache.hadoop.hdds.scm.ha.SequenceIdGenerator.CONTAINER_ID; -import static org.apache.hadoop.hdds.utils.CollectionUtils.findTopN; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import java.io.IOException; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; import java.util.Iterator; import java.util.List; import java.util.Map; @@ -144,21 +139,20 @@ public ContainerInfo getContainer(final ContainerID id) @Override public List getContainers(ReplicationType type) { - return toContainers(containerStateManager.getContainerIDs(type)); + return containerStateManager.getContainerInfos(type); } @Override public List getContainers(final ContainerID startID, final int count) { scmContainerManagerMetrics.incNumListContainersOps(); - return toContainers(filterSortAndLimit(startID, count, - containerStateManager.getContainerIDs())); + return containerStateManager.getContainerInfos(startID, count); } @Override public List getContainers(final LifeCycleState state) { scmContainerManagerMetrics.incNumListContainersOps(); - return toContainers(containerStateManager.getContainerIDs(state)); + return containerStateManager.getContainerInfos(state); } @Override @@ -166,13 +160,12 @@ public List getContainers(final ContainerID startID, final int count, final LifeCycleState state) { scmContainerManagerMetrics.incNumListContainersOps(); - return toContainers(filterSortAndLimit(startID, count, - containerStateManager.getContainerIDs(state))); + return containerStateManager.getContainerInfos(state, startID, count); } @Override public int getContainerStateCount(final LifeCycleState state) { - return containerStateManager.getContainerIDs(state).size(); + return containerStateManager.getContainerCount(state); } @Override @@ -469,33 +462,4 @@ public ContainerStateManager getContainerStateManager() { public SCMHAManager getSCMHAManager() { return haManager; } - - private static List filterSortAndLimit( - ContainerID startID, int count, Set set) { - - if (ContainerID.MIN.equals(startID) && count >= set.size()) { - List list = new ArrayList<>(set); - Collections.sort(list); - return list; - } - - return findTopN(set, count, reverseOrder(), - id -> id.compareTo(startID) >= 0); - } - - /** - * Returns a list of all containers identified by {@code ids}. - */ - private List toContainers(Collection ids) { - List containers = new ArrayList<>(ids.size()); - - for (ContainerID id : ids) { - ContainerInfo container = containerStateManager.getContainer(id); - if (container != null) { - containers.add(container); - } - } - - return containers; - } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReplica.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReplica.java index 99ddba2aa46d..43267d426571 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReplica.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReplica.java @@ -54,6 +54,10 @@ private ContainerReplica(ContainerReplicaBuilder b) { sequenceId = b.sequenceId; } + public ContainerID getContainerID() { + return containerID; + } + /** * Returns the DatanodeDetails to which this replica belongs. * diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java index 0ec9c2593ec5..624279c947d1 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdds.scm.container; import java.io.IOException; +import java.util.List; import java.util.Map; import java.util.NavigableSet; import java.util.Set; @@ -103,22 +104,33 @@ public interface ContainerStateManager { boolean contains(ContainerID containerID); /** - * Returns the ID of all the managed containers. + * Get {@link ContainerInfo}s. * - * @return Set of {@link ContainerID} + * @param start the start {@link ContainerID} (inclusive) + * @param count the size limit + * @return a list of {@link ContainerInfo}; */ - Set getContainerIDs(); + List getContainerInfos(ContainerID start, int count); /** + * Get {@link ContainerInfo}s for the given state. * + * @param start the start {@link ContainerID} (inclusive) + * @param count the size limit + * @return a list of {@link ContainerInfo}; */ - Set getContainerIDs(LifeCycleState state); + List getContainerInfos(LifeCycleState state, ContainerID start, int count); + /** @return all {@link ContainerInfo}s for the given state. */ + List getContainerInfos(LifeCycleState state); /** - * Returns the IDs of the Containers whose ReplicationType matches the given type. + * @return number of containers for the given state. */ - Set getContainerIDs(ReplicationType type); + int getContainerCount(LifeCycleState state); + + /** @return all {@link ContainerInfo}s for the given type. */ + List getContainerInfos(ReplicationType type); /** * diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManagerImpl.java index 693eed710bb0..28480085e097 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManagerImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManagerImpl.java @@ -32,13 +32,14 @@ import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_LOCK_STRIPE_SIZE; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_LOCK_STRIPE_SIZE_DEFAULT; -import com.google.common.base.Preconditions; import com.google.common.util.concurrent.Striped; import java.io.IOException; import java.util.EnumMap; import java.util.HashSet; +import java.util.List; import java.util.Map; import java.util.NavigableSet; +import java.util.Objects; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.locks.ReadWriteLock; @@ -68,6 +69,7 @@ import org.apache.hadoop.ozone.common.statemachine.InvalidStateTransitionException; import org.apache.hadoop.ozone.common.statemachine.StateMachine; import org.apache.ratis.util.AutoCloseableLock; +import org.apache.ratis.util.Preconditions; import org.apache.ratis.util.function.CheckedConsumer; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -239,7 +241,7 @@ private void initialize() throws IOException { while (iterator.hasNext()) { final ContainerInfo container = iterator.next().getValue(); - Preconditions.checkNotNull(container); + Objects.requireNonNull(container, "container == null"); containers.addContainer(container); if (container.getState() == LifeCycleState.OPEN) { try { @@ -268,23 +270,37 @@ private void initialize() throws IOException { } @Override - public Set getContainerIDs() { + public List getContainerInfos(ContainerID start, int count) { try (AutoCloseableLock ignored = readLock()) { - return containers.getAllContainerIDs(); + return containers.getContainerInfos(start, count); } } @Override - public Set getContainerIDs(final LifeCycleState state) { + public List getContainerInfos(LifeCycleState state, ContainerID start, int count) { try (AutoCloseableLock ignored = readLock()) { - return containers.getContainerIDsByState(state); + return containers.getContainerInfos(state, start, count); } } @Override - public Set getContainerIDs(final ReplicationType type) { + public List getContainerInfos(final LifeCycleState state) { try (AutoCloseableLock ignored = readLock()) { - return containers.getContainerIDsByType(type); + return containers.getContainerInfos(state); + } + } + + @Override + public List getContainerInfos(ReplicationType type) { + try (AutoCloseableLock ignored = readLock()) { + return containers.getContainerInfos(type); + } + } + + @Override + public int getContainerCount(final LifeCycleState state) { + try (AutoCloseableLock ignored = readLock()) { + return containers.getContainerCount(state); } } @@ -303,7 +319,7 @@ public void addContainer(final ContainerInfoProto containerInfo) // ClosedPipelineException once ClosedPipelineException is introduced // in PipelineManager. - Preconditions.checkNotNull(containerInfo); + Objects.requireNonNull(containerInfo, "containerInfo == null"); final ContainerInfo container = ContainerInfo.fromProtobuf(containerInfo); final ContainerID containerID = container.containerID(); final PipelineID pipelineID = container.getPipelineID(); @@ -403,7 +419,7 @@ public Set getContainerReplicas(final ContainerID id) { public void updateContainerReplica(final ContainerID id, final ContainerReplica replica) { try (AutoCloseableLock ignored = writeLock(id)) { - containers.updateContainerReplica(id, replica); + containers.updateContainerReplica(replica); // Clear any pending additions for this replica as we have now seen it. containerReplicaPendingOps.completeAddReplica(id, replica.getDatanodeDetails(), replica.getReplicaIndex()); @@ -413,8 +429,10 @@ public void updateContainerReplica(final ContainerID id, @Override public void removeContainerReplica(final ContainerID id, final ContainerReplica replica) { + //TODO remove ContainerID parameter + Preconditions.assertEquals(id, replica.getContainerID(), "containerID"); try (AutoCloseableLock ignored = writeLock(id)) { - containers.removeContainerReplica(id, replica); + containers.removeContainerReplica(id, replica.getDatanodeDetails().getID()); // Remove any pending delete replication operations for the deleted // replica. containerReplicaPendingOps.completeDeleteReplica(id, @@ -601,9 +619,9 @@ public Builder setContainerReplicaPendingOps( } public ContainerStateManager build() throws IOException { - Preconditions.checkNotNull(conf); - Preconditions.checkNotNull(pipelineMgr); - Preconditions.checkNotNull(table); + Objects.requireNonNull(conf, "conf == null"); + Objects.requireNonNull(pipelineMgr, "pipelineMgr == null"); + Objects.requireNonNull(table, "table == null"); final ContainerStateManager csm = new ContainerStateManagerImpl( conf, pipelineMgr, table, transactionBuffer, diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerAttribute.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerAttribute.java index 4f4ad53ace53..1888471ea29e 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerAttribute.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerAttribute.java @@ -25,6 +25,7 @@ import java.util.EnumMap; import java.util.NavigableSet; import java.util.Objects; +import java.util.SortedSet; import java.util.TreeSet; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.exceptions.SCMException; @@ -138,6 +139,15 @@ public NavigableSet getCollection(T key) { return ImmutableSortedSet.copyOf(get(key)); } + public SortedSet tailSet(T key, ContainerID start) { + Objects.requireNonNull(start, "start == null"); + return get(key).tailSet(start); + } + + public int count(T key) { + return get(key).size(); + } + /** * Moves a ContainerID from one bucket to another. * diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java index 40b7d51c8c75..a424183a6131 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java @@ -18,20 +18,23 @@ package org.apache.hadoop.hdds.scm.container.states; import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import com.google.common.collect.ImmutableSet; -import java.util.Collections; +import java.util.HashMap; import java.util.HashSet; +import java.util.List; import java.util.Map; -import java.util.NavigableSet; +import java.util.NavigableMap; +import java.util.Objects; import java.util.Set; -import java.util.concurrent.ConcurrentHashMap; +import java.util.TreeMap; +import java.util.stream.Collectors; +import org.apache.hadoop.hdds.protocol.DatanodeID; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.ContainerReplica; import org.apache.hadoop.hdds.scm.exceptions.SCMException; +import org.apache.ratis.util.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -72,17 +75,107 @@ public class ContainerStateMap { private static final Logger LOG = LoggerFactory.getLogger(ContainerStateMap.class); + /** + * Two levels map. + * Outer container map: {@link ContainerID} -> {@link Entry} (info and replicas) + * Inner replica map: {@link DatanodeID} -> {@link ContainerReplica} + */ + private static class ContainerMap { + private static class Entry { + private final ContainerInfo info; + private final Map replicas = new HashMap<>(); + + Entry(ContainerInfo info) { + this.info = info; + } + + ContainerInfo getInfo() { + return info; + } + + Set getReplicas() { + return new HashSet<>(replicas.values()); + } + + ContainerReplica put(ContainerReplica r) { + return replicas.put(r.getDatanodeDetails().getID(), r); + } + + ContainerReplica removeReplica(DatanodeID datanodeID) { + return replicas.remove(datanodeID); + } + } + + private final NavigableMap map = new TreeMap<>(); + + boolean contains(ContainerID id) { + return map.containsKey(id); + } + + ContainerInfo getInfo(ContainerID id) { + final Entry entry = map.get(id); + return entry == null ? null : entry.getInfo(); + } + + List getInfos(ContainerID start, int count) { + Objects.requireNonNull(start, "start == null"); + Preconditions.assertTrue(count >= 0, "count < 0"); + return map.tailMap(start).values().stream() + .map(Entry::getInfo) + .limit(count) + .collect(Collectors.toList()); + } + + Set getReplicas(ContainerID id) { + Objects.requireNonNull(id, "id == null"); + final Entry entry = map.get(id); + return entry == null ? null : entry.getReplicas(); + } + + /** + * Add if the given info not already in this map. + * + * @return true iff the given info is added. + */ + boolean addIfAbsent(ContainerInfo info) { + Objects.requireNonNull(info, "info == null"); + final ContainerID id = info.containerID(); + if (map.containsKey(id)) { + return false; // already exist + } + final Entry previous = map.put(id, new Entry(info)); + Preconditions.assertNull(previous, "previous"); + return true; + } + + ContainerReplica put(ContainerReplica replica) { + Objects.requireNonNull(replica, "replica == null"); + final Entry entry = map.get(replica.getContainerID()); + return entry == null ? null : entry.put(replica); + } + + ContainerInfo remove(ContainerID id) { + Objects.requireNonNull(id, "id == null"); + final Entry removed = map.remove(id); + return removed == null ? null : removed.getInfo(); + } + + ContainerReplica removeReplica(ContainerID containerID, DatanodeID datanodeID) { + Objects.requireNonNull(containerID, "containerID == null"); + Objects.requireNonNull(datanodeID, "datanodeID == null"); + final Entry entry = map.get(containerID); + return entry == null ? null : entry.removeReplica(datanodeID); + } + } + private final ContainerAttribute lifeCycleStateMap = new ContainerAttribute<>(LifeCycleState.class); private final ContainerAttribute typeMap = new ContainerAttribute<>(ReplicationType.class); - private final Map containerMap; - private final Map> replicaMap; + private final ContainerMap containerMap = new ContainerMap(); /** * Create a ContainerStateMap. */ public ContainerStateMap() { - this.containerMap = new ConcurrentHashMap<>(); - this.replicaMap = new ConcurrentHashMap<>(); } @VisibleForTesting @@ -94,24 +187,19 @@ public static Logger getLogger() { * Adds a ContainerInfo Entry in the ContainerStateMap. * * @param info - container info - * @throws SCMException - throws if create failed. */ - public void addContainer(final ContainerInfo info) - throws SCMException { - Preconditions.checkNotNull(info, "Container Info cannot be null"); - final ContainerID id = info.containerID(); - if (!contains(id)) { - containerMap.put(id, info); + public void addContainer(final ContainerInfo info) { + Objects.requireNonNull(info, "info == null"); + if (containerMap.addIfAbsent(info)) { + final ContainerID id = info.containerID(); lifeCycleStateMap.insert(info.getState(), id); typeMap.insert(info.getReplicationType(), id); - replicaMap.put(id, Collections.emptySet()); - - LOG.trace("Container {} added to ContainerStateMap.", id); + LOG.trace("Added {}", info); } } public boolean contains(final ContainerID id) { - return containerMap.containsKey(id); + return containerMap.contains(id); } /** @@ -120,15 +208,12 @@ public boolean contains(final ContainerID id) { * @param id - ContainerID */ public void removeContainer(final ContainerID id) { - Preconditions.checkNotNull(id, "ContainerID cannot be null"); - if (contains(id)) { - // Should we revert back to the original state if any of the below - // remove operation fails? - final ContainerInfo info = containerMap.remove(id); + Objects.requireNonNull(id, "id == null"); + final ContainerInfo info = containerMap.remove(id); + if (info != null) { lifeCycleStateMap.remove(info.getState(), id); typeMap.remove(info.getReplicationType(), id); - replicaMap.remove(id); - LOG.trace("Container {} removed from ContainerStateMap.", id); + LOG.trace("Removed {}", info); } } @@ -139,7 +224,7 @@ public void removeContainer(final ContainerID id) { * @return container info, if found else null. */ public ContainerInfo getContainerInfo(final ContainerID containerID) { - return containerMap.get(containerID); + return containerMap.getInfo(containerID); } /** @@ -148,8 +233,8 @@ public ContainerInfo getContainerInfo(final ContainerID containerID) { */ public Set getContainerReplicas( final ContainerID containerID) { - Preconditions.checkNotNull(containerID); - return replicaMap.get(containerID); + Objects.requireNonNull(containerID, "containerID == null"); + return containerMap.getReplicas(containerID); } /** @@ -157,39 +242,18 @@ public Set getContainerReplicas( * Logs a debug entry if a datanode is already added as replica for given * ContainerId. */ - public void updateContainerReplica(final ContainerID containerID, - final ContainerReplica replica) { - Preconditions.checkNotNull(containerID); - if (contains(containerID)) { - final Set newSet = createNewReplicaSet(containerID); - newSet.remove(replica); - newSet.add(replica); - replaceReplicaSet(containerID, newSet); - } + public void updateContainerReplica(ContainerReplica replica) { + Objects.requireNonNull(replica, "replica == null"); + containerMap.put(replica); } /** * Remove a container Replica for given DataNode. */ - public void removeContainerReplica(final ContainerID containerID, - final ContainerReplica replica) { - Preconditions.checkNotNull(containerID); - Preconditions.checkNotNull(replica); - if (contains(containerID)) { - final Set newSet = createNewReplicaSet(containerID); - newSet.remove(replica); - replaceReplicaSet(containerID, newSet); - } - } - - private Set createNewReplicaSet(ContainerID containerID) { - Set existingSet = replicaMap.get(containerID); - return existingSet == null ? new HashSet<>() : new HashSet<>(existingSet); - } - - private void replaceReplicaSet(ContainerID containerID, - Set newSet) { - replicaMap.put(containerID, Collections.unmodifiableSet(newSet)); + public void removeContainerReplica(final ContainerID containerID, DatanodeID datanodeID) { + Objects.requireNonNull(containerID, "containerID == null"); + Objects.requireNonNull(datanodeID, "datanodeID == null"); + containerMap.removeReplica(containerID, datanodeID); } /** @@ -205,7 +269,7 @@ public void updateState(ContainerID containerID, LifeCycleState currentState, if (currentState == newState) { // state not changed return; } - final ContainerInfo currentInfo = containerMap.get(containerID); + final ContainerInfo currentInfo = containerMap.getInfo(containerID); if (currentInfo == null) { // container not found return; } @@ -214,30 +278,39 @@ public void updateState(ContainerID containerID, LifeCycleState currentState, currentInfo.setState(newState); } - public Set getAllContainerIDs() { - return ImmutableSet.copyOf(containerMap.keySet()); + public List getContainerInfos(ContainerID start, int count) { + return containerMap.getInfos(start, count); } /** - * Returns Containers in the System by the Type. * - * @param type - Replication type -- StandAlone, Ratis etc. - * @return NavigableSet + * @param state the state of the {@link ContainerInfo}s + * @param start the start id + * @param count the maximum size of the returned list + * @return a list of {@link ContainerInfo}s sorted by {@link ContainerID} */ - public NavigableSet getContainerIDsByType(final ReplicationType type) { - Preconditions.checkNotNull(type); - return typeMap.getCollection(type); + public List getContainerInfos(LifeCycleState state, ContainerID start, int count) { + Preconditions.assertTrue(count >= 0, "count < 0"); + return lifeCycleStateMap.tailSet(state, start).stream() + .map(this::getContainerInfo) + .limit(count) + .collect(Collectors.toList()); } - /** - * Returns Containers by State. - * - * @param state - State - Open, Closed etc. - * @return List of containers by state. - */ - public NavigableSet getContainerIDsByState( - final LifeCycleState state) { - Preconditions.checkNotNull(state); - return lifeCycleStateMap.getCollection(state); + public List getContainerInfos(LifeCycleState state) { + return lifeCycleStateMap.getCollection(state).stream() + .map(this::getContainerInfo) + .collect(Collectors.toList()); + } + + public List getContainerInfos(ReplicationType type) { + return typeMap.getCollection(type).stream() + .map(this::getContainerInfo) + .collect(Collectors.toList()); + } + + /** @return the number of containers for the given state. */ + public int getContainerCount(LifeCycleState state) { + return lifeCycleStateMap.count(state); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManagerIntegration.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManagerIntegration.java index 8ae2f59ebb4d..560325b8a38e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManagerIntegration.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManagerIntegration.java @@ -39,6 +39,8 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; @@ -134,9 +136,6 @@ public void testAllocateContainerWithDifferentOwner() throws IOException { assertNotNull(info); String newContainerOwner = "OZONE_NEW"; - ContainerWithPipeline container2 = scm.getClientProtocolServer() - .allocateContainer(SCMTestUtils.getReplicationType(conf), - SCMTestUtils.getReplicationFactor(conf), newContainerOwner); ContainerInfo info2 = containerManager .getMatchingContainer(OzoneConsts.GB * 3, newContainerOwner, container1.getPipeline()); @@ -267,13 +266,15 @@ public void testGetMatchingContainerMultipleThreads() } } + void assertContainerCount(LifeCycleState state, int expected) { + final int computed = containerStateManager.getContainerCount(state); + assertEquals(expected, computed); + } + @Test public void testUpdateContainerState() throws IOException, - InvalidStateTransitionException, TimeoutException { - Set containerList = containerStateManager - .getContainerIDs(HddsProtos.LifeCycleState.OPEN); - int containers = containerList == null ? 0 : containerList.size(); - assertEquals(0, containers); + InvalidStateTransitionException { + assertContainerCount(LifeCycleState.OPEN, 0); // Allocate container1 and update its state from // OPEN -> CLOSING -> CLOSED -> DELETING -> DELETED @@ -281,37 +282,20 @@ public void testUpdateContainerState() throws IOException, .allocateContainer( SCMTestUtils.getReplicationType(conf), SCMTestUtils.getReplicationFactor(conf), OzoneConsts.OZONE); - containerList = containerStateManager - .getContainerIDs(HddsProtos.LifeCycleState.OPEN); - assertEquals(1, containerList.size()); + final ContainerID id1 = container1.getContainerInfo().containerID(); + assertContainerCount(LifeCycleState.OPEN, 1); - containerManager - .updateContainerState(container1.getContainerInfo().containerID(), - HddsProtos.LifeCycleEvent.FINALIZE); - containerList = containerStateManager - .getContainerIDs(HddsProtos.LifeCycleState.CLOSING); - assertEquals(1, containerList.size()); + containerManager.updateContainerState(id1, LifeCycleEvent.FINALIZE); + assertContainerCount(LifeCycleState.CLOSING, 1); - containerManager - .updateContainerState(container1.getContainerInfo().containerID(), - HddsProtos.LifeCycleEvent.CLOSE); - containerList = containerStateManager - .getContainerIDs(HddsProtos.LifeCycleState.CLOSED); - assertEquals(1, containerList.size()); + containerManager.updateContainerState(id1, LifeCycleEvent.CLOSE); + assertContainerCount(LifeCycleState.CLOSED, 1); - containerManager - .updateContainerState(container1.getContainerInfo().containerID(), - HddsProtos.LifeCycleEvent.DELETE); - containerList = containerStateManager - .getContainerIDs(HddsProtos.LifeCycleState.DELETING); - assertEquals(1, containerList.size()); + containerManager.updateContainerState(id1, LifeCycleEvent.DELETE); + assertContainerCount(LifeCycleState.DELETING, 1); - containerManager - .updateContainerState(container1.getContainerInfo().containerID(), - HddsProtos.LifeCycleEvent.CLEANUP); - containerList = containerStateManager - .getContainerIDs(HddsProtos.LifeCycleState.DELETED); - assertEquals(1, containerList.size()); + containerManager.updateContainerState(id1, LifeCycleEvent.CLEANUP); + assertContainerCount(LifeCycleState.DELETED, 1); // Allocate container1 and update its state from // OPEN -> CLOSING -> CLOSED @@ -328,9 +312,7 @@ public void testUpdateContainerState() throws IOException, containerManager .updateContainerState(container1.getContainerInfo().containerID(), HddsProtos.LifeCycleEvent.CLOSE); - containerList = containerStateManager - .getContainerIDs(HddsProtos.LifeCycleState.CLOSED); - assertEquals(1, containerList.size()); + assertContainerCount(LifeCycleState.CLOSED, 1); } diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java index 9353f406a779..b87d9dc45902 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java @@ -1144,6 +1144,11 @@ protected ContainerWithPipeline getTestContainer( return new ContainerWithPipeline(containerInfo, localPipeline); } + void assertContainerCount(HddsProtos.LifeCycleState state, int expected) { + final int computed = containerStateManager.getContainerCount(state); + assertEquals(expected, computed); + } + @Test public void testGetSCMDeletedContainers() throws Exception { reconContainerManager.addNewContainer( @@ -1161,9 +1166,7 @@ public void testGetSCMDeletedContainers() throws Exception { reconContainerManager .updateContainerState(ContainerID.valueOf(102L), HddsProtos.LifeCycleEvent.CLEANUP); - Set containerIDs = containerStateManager - .getContainerIDs(HddsProtos.LifeCycleState.DELETED); - assertEquals(1, containerIDs.size()); + assertContainerCount(HddsProtos.LifeCycleState.DELETED, 1); reconContainerManager.updateContainerState(ContainerID.valueOf(103L), HddsProtos.LifeCycleEvent.FINALIZE); @@ -1172,14 +1175,10 @@ public void testGetSCMDeletedContainers() throws Exception { reconContainerManager .updateContainerState(ContainerID.valueOf(103L), HddsProtos.LifeCycleEvent.DELETE); - containerIDs = containerStateManager - .getContainerIDs(HddsProtos.LifeCycleState.DELETING); reconContainerManager .updateContainerState(ContainerID.valueOf(103L), HddsProtos.LifeCycleEvent.CLEANUP); - containerIDs = containerStateManager - .getContainerIDs(HddsProtos.LifeCycleState.DELETED); - assertEquals(2, containerIDs.size()); + assertContainerCount(HddsProtos.LifeCycleState.DELETED, 2); Response scmDeletedContainers = containerEndpoint.getSCMDeletedContainers(2, 0); @@ -1212,9 +1211,7 @@ public void testGetSCMDeletedContainersLimitParam() throws Exception { reconContainerManager .updateContainerState(ContainerID.valueOf(104L), HddsProtos.LifeCycleEvent.CLEANUP); - Set containerIDs = containerStateManager - .getContainerIDs(HddsProtos.LifeCycleState.DELETED); - assertEquals(1, containerIDs.size()); + assertContainerCount(HddsProtos.LifeCycleState.DELETED, 1); reconContainerManager.updateContainerState(ContainerID.valueOf(105L), HddsProtos.LifeCycleEvent.FINALIZE); @@ -1226,9 +1223,7 @@ public void testGetSCMDeletedContainersLimitParam() throws Exception { reconContainerManager .updateContainerState(ContainerID.valueOf(105L), HddsProtos.LifeCycleEvent.CLEANUP); - containerIDs = containerStateManager - .getContainerIDs(HddsProtos.LifeCycleState.DELETED); - assertEquals(2, containerIDs.size()); + assertContainerCount(HddsProtos.LifeCycleState.DELETED, 2); Response scmDeletedContainers = containerEndpoint.getSCMDeletedContainers(1, 0); @@ -1258,9 +1253,7 @@ public void testGetSCMDeletedContainersPrevKeyParam() throws Exception { reconContainerManager .updateContainerState(ContainerID.valueOf(106L), HddsProtos.LifeCycleEvent.CLEANUP); - Set containerIDs = containerStateManager - .getContainerIDs(HddsProtos.LifeCycleState.DELETED); - assertEquals(1, containerIDs.size()); + assertContainerCount(HddsProtos.LifeCycleState.DELETED, 1); reconContainerManager.updateContainerState(ContainerID.valueOf(107L), HddsProtos.LifeCycleEvent.FINALIZE); @@ -1272,9 +1265,7 @@ public void testGetSCMDeletedContainersPrevKeyParam() throws Exception { reconContainerManager .updateContainerState(ContainerID.valueOf(107L), HddsProtos.LifeCycleEvent.CLEANUP); - containerIDs = containerStateManager - .getContainerIDs(HddsProtos.LifeCycleState.DELETED); - assertEquals(2, containerIDs.size()); + assertContainerCount(HddsProtos.LifeCycleState.DELETED, 2); Response scmDeletedContainers = containerEndpoint.getSCMDeletedContainers(2, 106L); @@ -1554,16 +1545,12 @@ public void testGetOmContainersDeletedInSCM() throws Exception { reconContainerManager .updateContainerState(ContainerID.valueOf(1), HddsProtos.LifeCycleEvent.DELETE); - Set containerIDs = containerStateManager - .getContainerIDs(HddsProtos.LifeCycleState.DELETING); - assertEquals(1, containerIDs.size()); + assertContainerCount(HddsProtos.LifeCycleState.DELETING, 1); reconContainerManager .updateContainerState(ContainerID.valueOf(1), HddsProtos.LifeCycleEvent.CLEANUP); - containerIDs = containerStateManager - .getContainerIDs(HddsProtos.LifeCycleState.DELETED); - assertEquals(1, containerIDs.size()); + assertContainerCount(HddsProtos.LifeCycleState.DELETED, 1); List deletedSCMContainers = reconContainerManager.getContainers(HddsProtos.LifeCycleState.DELETED); @@ -1603,9 +1590,7 @@ public void testGetOmContainersDeletedInSCMLimitParam() throws Exception { // and then to DELETED updateContainerStateToDeleted(1); - Set containerIDs = containerStateManager - .getContainerIDs(HddsProtos.LifeCycleState.DELETED); - assertEquals(1, containerIDs.size()); + assertContainerCount(HddsProtos.LifeCycleState.DELETED, 1); List deletedSCMContainers = reconContainerManager.getContainers(HddsProtos.LifeCycleState.DELETED); @@ -1647,9 +1632,7 @@ public void testGetOmContainersDeletedInSCMPrevContainerParam() updateContainerStateToDeleted(1); updateContainerStateToDeleted(2); - Set containerIDs = containerStateManager - .getContainerIDs(HddsProtos.LifeCycleState.DELETED); - assertEquals(2, containerIDs.size()); + assertContainerCount(HddsProtos.LifeCycleState.DELETED, 2); List deletedSCMContainers = reconContainerManager.getContainers(HddsProtos.LifeCycleState.DELETED);