From dce600fb03f1ab759696448e8e70b56d511d9fee Mon Sep 17 00:00:00 2001 From: DaveTeng0 Date: Mon, 8 Apr 2024 09:26:22 -0700 Subject: [PATCH 01/14] HDDS-8188. Support unlimited length in ozone admin container list and make it default --- ...ocationProtocolClientSideTranslatorPB.java | 6 +- .../hdds/scm/container/ContainerManager.java | 4 +- .../scm/container/ContainerManagerImpl.java | 3 +- .../scm/server/SCMClientProtocolServer.java | 49 +++++++++------- .../container/TestContainerManagerImpl.java | 5 ++ .../server/TestSCMClientProtocolServer.java | 58 +++++++++++++++++++ .../scm/cli/container/ListSubcommand.java | 5 +- 7 files changed, 103 insertions(+), 27 deletions(-) diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java index b573ee0d040c..a6349bf06be2 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java @@ -399,8 +399,10 @@ public List listContainer(long startContainerID, int count, throws IOException { Preconditions.checkState(startContainerID >= 0, "Container ID cannot be negative."); - Preconditions.checkState(count > 0, - "Container count must be greater than 0."); + if (count <= 0 && count != -1) { + throw new IllegalStateException( + "Container count must be greater than 0, or equal to -1"); + } SCMListContainerRequestProto.Builder builder = SCMListContainerRequestProto .newBuilder(); builder.setStartContainerID(startContainerID); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java index 2a60e268ff4a..0f5f3bac4708 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java @@ -63,7 +63,7 @@ default List getContainers() { * * @param startID start containerID, >=0, * start searching at the head if 0. - * @param count count must be >= 0 + * @param count count must be >= -1 * Usually the count will be replace with a very big * value instead of being unlimited in case the db is very big. * @@ -87,7 +87,7 @@ default List getContainers() { * * @param startID start containerID, >=0, * start searching at the head if 0. - * @param count count must be >= 0 + * @param count count must be >= -1 * Usually the count will be replace with a very big * value instead of being unlimited in case the db is very big. * @param state container state diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java index 8e1e881c44ea..9e2ab2503d00 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java @@ -451,7 +451,8 @@ public SCMHAManager getSCMHAManager() { private static List filterSortAndLimit( ContainerID startID, int count, Set set) { - if (ContainerID.MIN.equals(startID) && count >= set.size()) { + if (ContainerID.MIN.equals(startID) && + (count >= set.size() || count == -1)) { List list = new ArrayList<>(set); Collections.sort(list); return list; diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java index ecfb92104da2..041c46ba1386 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java @@ -462,27 +462,36 @@ public List listContainer(long startContainerID, final ContainerID containerId = ContainerID.valueOf(startContainerID); if (state != null) { if (factor != null) { - return scm.getContainerManager().getContainers(state).stream() - .filter(info -> info.containerID().getId() >= startContainerID) - //Filtering EC replication type as EC will not have factor. - .filter(info -> info - .getReplicationType() != HddsProtos.ReplicationType.EC) - .filter(info -> (info.getReplicationFactor() == factor)) - .sorted().limit(count).collect(Collectors.toList()); + Stream containerInfoStream = + scm.getContainerManager().getContainers(state).stream() + .filter(info -> info.containerID().getId() >= startContainerID) + //Filtering EC replication type as EC will not have factor. + .filter(info -> info + .getReplicationType() != HddsProtos.ReplicationType.EC) + .filter(info -> (info.getReplicationFactor() == factor)) + .sorted(); + return count == -1 ? containerInfoStream.collect(Collectors.toList()) : + containerInfoStream.limit(count).collect(Collectors.toList()); } else { - return scm.getContainerManager().getContainers(state).stream() - .filter(info -> info.containerID().getId() >= startContainerID) - .sorted().limit(count).collect(Collectors.toList()); + Stream containerInfoStream = + scm.getContainerManager().getContainers(state).stream() + .filter(info -> info.containerID().getId() >= startContainerID) + .sorted(); + return count == -1 ? containerInfoStream.collect(Collectors.toList()) : + containerInfoStream.limit(count).collect(Collectors.toList()); } } else { if (factor != null) { - return scm.getContainerManager().getContainers().stream() - .filter(info -> info.containerID().getId() >= startContainerID) - //Filtering EC replication type as EC will not have factor. - .filter(info -> info - .getReplicationType() != HddsProtos.ReplicationType.EC) - .filter(info -> info.getReplicationFactor() == factor) - .sorted().limit(count).collect(Collectors.toList()); + Stream containerInfoStream = + scm.getContainerManager().getContainers().stream() + .filter(info -> info.containerID().getId() >= startContainerID) + //Filtering EC replication type as EC will not have factor. + .filter(info -> info + .getReplicationType() != HddsProtos.ReplicationType.EC) + .filter(info -> info.getReplicationFactor() == factor) + .sorted(); + return count == -1 ? containerInfoStream.collect(Collectors.toList()) + : containerInfoStream.limit(count).collect(Collectors.toList()); } else { return scm.getContainerManager().getContainers(containerId, count); } @@ -554,9 +563,9 @@ public List listContainer(long startContainerID, containerStream = containerStream .filter(info -> info.getReplicationType() == replicationType); } - return containerStream.sorted() - .limit(count) - .collect(Collectors.toList()); + Stream containerInfoStream = containerStream.sorted(); + return count == -1 ? containerInfoStream.collect(Collectors.toList()) : + containerInfoStream.limit(count).collect(Collectors.toList()); } catch (Exception ex) { auditSuccess = false; AUDIT.logReadFailure( diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerManagerImpl.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerManagerImpl.java index 25a4a80f233e..001c00f89e09 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerManagerImpl.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerManagerImpl.java @@ -147,6 +147,8 @@ void testGetContainers() throws Exception { containerManager.getContainers(ContainerID.MIN, 10)); assertIds(ids.subList(0, 5), containerManager.getContainers(ContainerID.MIN, 5)); + assertIds(ids, + containerManager.getContainers(ContainerID.MIN, -1)); assertIds(ids, containerManager.getContainers(ids.get(0), 10)); assertIds(ids, containerManager.getContainers(ids.get(0), 100)); @@ -154,6 +156,9 @@ void testGetContainers() throws Exception { containerManager.getContainers(ids.get(5), 100)); assertIds(emptyList(), containerManager.getContainers(ids.get(5), 100, LifeCycleState.CLOSED)); + assertIds(ids, + containerManager.getContainers(ContainerID.MIN, -1, + LifeCycleState.OPEN)); containerManager.updateContainerState(ids.get(0), HddsProtos.LifeCycleEvent.FINALIZE); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMClientProtocolServer.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMClientProtocolServer.java index 7c06b79a2ffb..8c28b9601b07 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMClientProtocolServer.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMClientProtocolServer.java @@ -17,12 +17,19 @@ */ package org.apache.hadoop.hdds.scm.server; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.ReconfigurationHandler; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.DecommissionScmRequestProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.DecommissionScmResponseProto; +import org.apache.hadoop.hdds.scm.container.ContainerInfo; +import org.apache.hadoop.hdds.scm.container.ContainerManagerImpl; import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.ha.SCMContext; import org.apache.hadoop.hdds.scm.ha.SCMHAManagerStub; +import org.apache.hadoop.hdds.scm.ha.SCMNodeDetails; +import org.apache.hadoop.hdds.scm.pipeline.PipelineID; import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocolServerSideTranslatorPB; import org.apache.hadoop.hdds.utils.ProtocolMessageMetrics; import org.apache.hadoop.ozone.container.common.SCMTestUtils; @@ -35,9 +42,13 @@ import java.io.File; import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.List; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_READONLY_ADMINISTRATORS; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -112,4 +123,51 @@ public void testReadOnlyAdmins() throws IOException { UserGroupInformation.reset(); } } + + /** + * Tests listContainer of scm. + */ + @Test + public void testScmListContainer() throws Exception { + SCMClientProtocolServer scmServer = + new SCMClientProtocolServer(new OzoneConfiguration(), + mockStorageContainerManager(), mock(ReconfigurationHandler.class)); + + assertEquals(10, scmServer.listContainer(1, 10, + null, HddsProtos.ReplicationType.RATIS, null).size()); + assertEquals(20, scmServer.listContainer(1, -1, + null, HddsProtos.ReplicationType.RATIS, null).size()); + // Test call from a legacy client, which uses a different method of listContainer + assertEquals(10, scmServer.listContainer(1, 10, null, + HddsProtos.ReplicationFactor.THREE).size()); + assertEquals(20, scmServer.listContainer(1, -1, null, + HddsProtos.ReplicationFactor.THREE).size()); + } + + private StorageContainerManager mockStorageContainerManager() { + List infos = new ArrayList<>(); + for (int i = 0; i < 20; i++) { + infos.add(newContainerInfoForTest()); + } + ContainerManagerImpl containerManager = mock(ContainerManagerImpl.class); + when(containerManager.getContainers()).thenReturn(infos); + StorageContainerManager storageContainerManager = mock(StorageContainerManager.class); + when(storageContainerManager.getContainerManager()).thenReturn(containerManager); + + SCMNodeDetails scmNodeDetails = mock(SCMNodeDetails.class); + when(scmNodeDetails.getClientProtocolServerAddress()).thenReturn(new InetSocketAddress("localhost", 9876)); + when(scmNodeDetails.getClientProtocolServerAddressKey()).thenReturn("test"); + when(storageContainerManager.getScmNodeDetails()).thenReturn(scmNodeDetails); + return storageContainerManager; + } + + private ContainerInfo newContainerInfoForTest() { + return new ContainerInfo.Builder() + .setContainerID(1234) + .setPipelineID(PipelineID.randomId()) + .setReplicationConfig( + RatisReplicationConfig + .getInstance(HddsProtos.ReplicationFactor.THREE)) + .build(); + } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java index ecc43d04087a..071f7ad63621 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java @@ -55,8 +55,9 @@ public class ListSubcommand extends ScmSubcommand { private long startId; @Option(names = {"-c", "--count"}, - description = "Maximum number of containers to list", - defaultValue = "20", showDefaultValue = Visibility.ALWAYS) + description = "Maximum number of containers to list." + + "Make count=-1 default to list all containers", + defaultValue = "-1", showDefaultValue = Visibility.ALWAYS) private int count; @Option(names = {"--state"}, From f1e7c6eaaea54c5ead5c405fa52b420d28d10f91 Mon Sep 17 00:00:00 2001 From: DaveTeng0 Date: Mon, 8 Apr 2024 11:34:41 -0700 Subject: [PATCH 02/14] update error message of listContainer of StorageContainerLocationProtocolClientSideTranslatorPB class --- ...StorageContainerLocationProtocolClientSideTranslatorPB.java | 3 ++- .../hadoop/hdds/scm/server/TestSCMClientProtocolServer.java | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java index a6349bf06be2..32681f830148 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java @@ -401,7 +401,8 @@ public List listContainer(long startContainerID, int count, "Container ID cannot be negative."); if (count <= 0 && count != -1) { throw new IllegalStateException( - "Container count must be greater than 0, or equal to -1"); + "Container count must be either greater than 0 " + + "or equal to -1 (list all containers)"); } SCMListContainerRequestProto.Builder builder = SCMListContainerRequestProto .newBuilder(); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMClientProtocolServer.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMClientProtocolServer.java index 8c28b9601b07..20a4ff683956 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMClientProtocolServer.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMClientProtocolServer.java @@ -163,7 +163,7 @@ private StorageContainerManager mockStorageContainerManager() { private ContainerInfo newContainerInfoForTest() { return new ContainerInfo.Builder() - .setContainerID(1234) + .setContainerID(1) .setPipelineID(PipelineID.randomId()) .setReplicationConfig( RatisReplicationConfig From 28c3347d8e570a6cbf0aa8842d0d256c1bc01de7 Mon Sep 17 00:00:00 2001 From: DaveTeng0 Date: Mon, 15 Apr 2024 11:47:15 -0700 Subject: [PATCH 03/14] Change to make list all containers non-default, printing a message to stdout indicating the result is truncated --- .../apache/hadoop/hdds/scm/ScmConfigKeys.java | 6 +++ .../hadoop/hdds/scm/client/ScmClient.java | 4 +- .../StorageContainerLocationProtocol.java | 8 ++-- .../src/main/resources/ozone-default.xml | 7 ++++ ...ocationProtocolClientSideTranslatorPB.java | 12 +++--- .../src/main/proto/ScmAdminProtocol.proto | 1 + .../scm/container/ContainerManagerImpl.java | 6 +++ ...ocationProtocolServerSideTranslatorPB.java | 9 ++-- .../scm/server/SCMClientProtocolServer.java | 41 ++++++++++++------- .../container/TestContainerManagerImpl.java | 5 --- .../server/TestSCMClientProtocolServer.java | 4 -- .../scm/cli/ContainerOperationClient.java | 20 ++++++++- .../scm/cli/container/ContainerCommands.java | 8 ++++ .../scm/cli/container/ListSubcommand.java | 37 ++++++++++++++++- .../hadoop/ozone/TestContainerOperations.java | 19 +++++++++ .../hadoop/ozone/shell/TestOzoneShellHA.java | 27 +++++++++++- .../freon/ClosedContainerReplicator.java | 2 +- 17 files changed, 171 insertions(+), 45 deletions(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java index d8fdbc1063a9..c8c8f25f70be 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java @@ -131,6 +131,12 @@ public final class ScmConfigKeys { "hdds.ratis.snapshot.threshold"; public static final long HDDS_RATIS_SNAPSHOT_THRESHOLD_DEFAULT = 100000; + public static final String HDDS_CONTAINER_LIST_MAX_COUNT = + "hdds.container.list.max.count"; + + public static final int HDDS_CONTAINER_LIST_MAX_COUNT_DEFAULT = 4096; + + // TODO : this is copied from OzoneConsts, may need to move to a better place public static final String OZONE_SCM_CHUNK_SIZE_KEY = "ozone.scm.chunk.size"; // 4 MB by default diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java index 6a46741a06ec..a70665a6f0a0 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java @@ -124,7 +124,7 @@ void deleteContainer(long containerId, Pipeline pipeline, boolean force) * @return a list of pipeline. * @throws IOException */ - List listContainer(long startContainerID, + Pair, Long> listContainer(long startContainerID, int count) throws IOException; /** @@ -137,7 +137,7 @@ List listContainer(long startContainerID, * @return a list of pipeline. * @throws IOException */ - List listContainer(long startContainerID, int count, + Pair, Long> listContainer(long startContainerID, int count, HddsProtos.LifeCycleState state, HddsProtos.ReplicationType replicationType, ReplicationConfig replicationConfig) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java index 90838366317f..f99e2561dcba 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java @@ -148,7 +148,7 @@ List getExistContainerWithPipelinesInBatch( * @return a list of container. * @throws IOException */ - List listContainer(long startContainerID, + Pair, Long> listContainer(long startContainerID, int count) throws IOException; /** @@ -167,7 +167,7 @@ List listContainer(long startContainerID, * @return a list of container. * @throws IOException */ - List listContainer(long startContainerID, + Pair, Long> listContainer(long startContainerID, int count, HddsProtos.LifeCycleState state) throws IOException; /** @@ -186,7 +186,7 @@ List listContainer(long startContainerID, * @return a list of container. * @throws IOException */ - List listContainer(long startContainerID, + Pair, Long> listContainer(long startContainerID, int count, HddsProtos.LifeCycleState state, HddsProtos.ReplicationFactor factor) throws IOException; @@ -207,7 +207,7 @@ List listContainer(long startContainerID, * @return a list of container. * @throws IOException */ - List listContainer(long startContainerID, + Pair, Long> listContainer(long startContainerID, int count, HddsProtos.LifeCycleState state, HddsProtos.ReplicationType replicationType, ReplicationConfig replicationConfig) throws IOException; diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml index 5a4b9a22c887..747dfe22c31b 100644 --- a/hadoop-hdds/common/src/main/resources/ozone-default.xml +++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml @@ -161,6 +161,13 @@ this not set. Ideally, this should be mapped to a fast disk like an SSD. + + hdds.container.list.max.count + 4096 + OZONE, CONTAINER, MANAGEMENT + The max number of containers info could be included in + response of ListContainer request. + hdds.datanode.dir diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java index 32681f830148..eb42c637b385 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java @@ -380,19 +380,19 @@ public List getExistContainerWithPipelinesInBatch( * {@inheritDoc} */ @Override - public List listContainer(long startContainerID, int count) + public Pair, Long> listContainer(long startContainerID, int count) throws IOException { return listContainer(startContainerID, count, null, null, null); } @Override - public List listContainer(long startContainerID, int count, + public Pair, Long> listContainer(long startContainerID, int count, HddsProtos.LifeCycleState state) throws IOException { return listContainer(startContainerID, count, state, null, null); } @Override - public List listContainer(long startContainerID, int count, + public Pair, Long> listContainer(long startContainerID, int count, HddsProtos.LifeCycleState state, HddsProtos.ReplicationType replicationType, ReplicationConfig replicationConfig) @@ -437,12 +437,12 @@ public List listContainer(long startContainerID, int count, .getContainersList()) { containerList.add(ContainerInfo.fromProtobuf(containerInfoProto)); } - return containerList; + return Pair.of(containerList, response.getContainerCount()); } @Deprecated @Override - public List listContainer(long startContainerID, int count, + public Pair, Long> listContainer(long startContainerID, int count, HddsProtos.LifeCycleState state, HddsProtos.ReplicationFactor factor) throws IOException { throw new UnsupportedOperationException("Should no longer be called from " + @@ -1174,7 +1174,7 @@ public void close() { public List getListOfContainers( long startContainerID, int count, HddsProtos.LifeCycleState state) throws IOException { - return listContainer(startContainerID, count, state); + return listContainer(startContainerID, count, state).getLeft(); } @Override diff --git a/hadoop-hdds/interface-admin/src/main/proto/ScmAdminProtocol.proto b/hadoop-hdds/interface-admin/src/main/proto/ScmAdminProtocol.proto index eff95099371c..646edc887edd 100644 --- a/hadoop-hdds/interface-admin/src/main/proto/ScmAdminProtocol.proto +++ b/hadoop-hdds/interface-admin/src/main/proto/ScmAdminProtocol.proto @@ -293,6 +293,7 @@ message SCMListContainerRequestProto { message SCMListContainerResponseProto { repeated ContainerInfoProto containers = 1; + optional int64 containerCount = 2; } message SCMDeleteContainerRequestProto { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java index 9e2ab2503d00..fde57286099b 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java @@ -86,6 +86,8 @@ public class ContainerManagerImpl implements ContainerManager { @SuppressWarnings("java:S2245") // no need for secure random private final Random random = new Random(); + private int maxCountOfContainerList; + /** * */ @@ -115,6 +117,10 @@ public ContainerManagerImpl( .getInt(ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT, ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT_DEFAULT); + this.maxCountOfContainerList = conf + .getInt(ScmConfigKeys.HDDS_CONTAINER_LIST_MAX_COUNT, + ScmConfigKeys.HDDS_CONTAINER_LIST_MAX_COUNT_DEFAULT); + this.scmContainerManagerMetrics = SCMContainerManagerMetrics.create(); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java index 16a8cbd5a4f5..d37c4a7c8e2f 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java @@ -849,21 +849,22 @@ public SCMListContainerResponseProto listContainer( } else if (request.hasFactor()) { factor = request.getFactor(); } - List containerList; + Pair, Long> containerListAndTotalCount; if (factor != null) { // Call from a legacy client - containerList = + containerListAndTotalCount = impl.listContainer(startContainerID, count, state, factor); } else { - containerList = + containerListAndTotalCount = impl.listContainer(startContainerID, count, state, replicationType, repConfig); } SCMListContainerResponseProto.Builder builder = SCMListContainerResponseProto.newBuilder(); - for (ContainerInfo container : containerList) { + for (ContainerInfo container : containerListAndTotalCount.getLeft()) { builder.addContainers(container.getProtobuf()); } + builder.setContainerCount(containerListAndTotalCount.getRight()); return builder.build(); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java index 041c46ba1386..a83cd2de68b6 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java @@ -412,7 +412,7 @@ private boolean hasRequiredReplicas(ContainerInfo contInfo) { * @throws IOException */ @Override - public List listContainer(long startContainerID, + public Pair, Long> listContainer(long startContainerID, int count) throws IOException { return listContainer(startContainerID, count, null, null, null); } @@ -428,7 +428,7 @@ public List listContainer(long startContainerID, * @throws IOException */ @Override - public List listContainer(long startContainerID, + public Pair, Long> listContainer(long startContainerID, int count, HddsProtos.LifeCycleState state) throws IOException { return listContainer(startContainerID, count, state, null, null); } @@ -445,7 +445,7 @@ public List listContainer(long startContainerID, */ @Override @Deprecated - public List listContainer(long startContainerID, + public Pair, Long> listContainer(long startContainerID, int count, HddsProtos.LifeCycleState state, HddsProtos.ReplicationFactor factor) throws IOException { boolean auditSuccess = true; @@ -470,15 +470,20 @@ public List listContainer(long startContainerID, .getReplicationType() != HddsProtos.ReplicationType.EC) .filter(info -> (info.getReplicationFactor() == factor)) .sorted(); - return count == -1 ? containerInfoStream.collect(Collectors.toList()) : - containerInfoStream.limit(count).collect(Collectors.toList()); + List containerInfos = containerInfoStream.collect( + Collectors.toList()); + return Pair.of(containerInfos.stream().limit(count).collect(Collectors.toList()), + (long)containerInfos.size()); } else { Stream containerInfoStream = scm.getContainerManager().getContainers(state).stream() .filter(info -> info.containerID().getId() >= startContainerID) .sorted(); - return count == -1 ? containerInfoStream.collect(Collectors.toList()) : - containerInfoStream.limit(count).collect(Collectors.toList()); + List containerInfos = containerInfoStream.collect( + Collectors.toList()); + return Pair.of( + containerInfos.stream().limit(count).collect(Collectors.toList()), + (long)containerInfos.size()); } } else { if (factor != null) { @@ -490,10 +495,14 @@ public List listContainer(long startContainerID, .getReplicationType() != HddsProtos.ReplicationType.EC) .filter(info -> info.getReplicationFactor() == factor) .sorted(); - return count == -1 ? containerInfoStream.collect(Collectors.toList()) - : containerInfoStream.limit(count).collect(Collectors.toList()); + List containerInfos = containerInfoStream.collect( + Collectors.toList()); + return Pair.of(containerInfos.stream().limit(count).collect(Collectors.toList()), + (long)containerInfos.size()); } else { - return scm.getContainerManager().getContainers(containerId, count); + List containerInfos = + scm.getContainerManager().getContainers(containerId, count); + return Pair.of(containerInfos, (long)(containerInfos.size())); } } } catch (Exception ex) { @@ -520,7 +529,7 @@ public List listContainer(long startContainerID, * @throws IOException */ @Override - public List listContainer(long startContainerID, + public Pair, Long> listContainer(long startContainerID, int count, HddsProtos.LifeCycleState state, HddsProtos.ReplicationType replicationType, ReplicationConfig repConfig) throws IOException { @@ -541,7 +550,9 @@ public List listContainer(long startContainerID, final ContainerID containerId = ContainerID.valueOf(startContainerID); if (state == null && replicationType == null && repConfig == null) { // Not filters, so just return everything - return scm.getContainerManager().getContainers(containerId, count); + List containerInfos = + scm.getContainerManager().getContainers(containerId, count); + return Pair.of(containerInfos, (long)containerInfos.size()); } List containerList; @@ -564,8 +575,10 @@ public List listContainer(long startContainerID, .filter(info -> info.getReplicationType() == replicationType); } Stream containerInfoStream = containerStream.sorted(); - return count == -1 ? containerInfoStream.collect(Collectors.toList()) : - containerInfoStream.limit(count).collect(Collectors.toList()); + List containerInfos = containerInfoStream.collect( + Collectors.toList()); + return Pair.of(containerInfos.stream().limit(count).collect(Collectors.toList()), + (long)containerInfos.size()); } catch (Exception ex) { auditSuccess = false; AUDIT.logReadFailure( diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerManagerImpl.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerManagerImpl.java index 001c00f89e09..25a4a80f233e 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerManagerImpl.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerManagerImpl.java @@ -147,8 +147,6 @@ void testGetContainers() throws Exception { containerManager.getContainers(ContainerID.MIN, 10)); assertIds(ids.subList(0, 5), containerManager.getContainers(ContainerID.MIN, 5)); - assertIds(ids, - containerManager.getContainers(ContainerID.MIN, -1)); assertIds(ids, containerManager.getContainers(ids.get(0), 10)); assertIds(ids, containerManager.getContainers(ids.get(0), 100)); @@ -156,9 +154,6 @@ void testGetContainers() throws Exception { containerManager.getContainers(ids.get(5), 100)); assertIds(emptyList(), containerManager.getContainers(ids.get(5), 100, LifeCycleState.CLOSED)); - assertIds(ids, - containerManager.getContainers(ContainerID.MIN, -1, - LifeCycleState.OPEN)); containerManager.updateContainerState(ids.get(0), HddsProtos.LifeCycleEvent.FINALIZE); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMClientProtocolServer.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMClientProtocolServer.java index 20a4ff683956..f0958aeaa586 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMClientProtocolServer.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMClientProtocolServer.java @@ -135,13 +135,9 @@ public void testScmListContainer() throws Exception { assertEquals(10, scmServer.listContainer(1, 10, null, HddsProtos.ReplicationType.RATIS, null).size()); - assertEquals(20, scmServer.listContainer(1, -1, - null, HddsProtos.ReplicationType.RATIS, null).size()); // Test call from a legacy client, which uses a different method of listContainer assertEquals(10, scmServer.listContainer(1, 10, null, HddsProtos.ReplicationFactor.THREE).size()); - assertEquals(20, scmServer.listContainer(1, -1, null, - HddsProtos.ReplicationFactor.THREE).size()); } private StorageContainerManager mockStorageContainerManager() { diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java index d51479d44bbb..2d495e9c0db9 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java @@ -82,6 +82,7 @@ public class ContainerOperationClient implements ScmClient { private final boolean containerTokenEnabled; private final OzoneConfiguration configuration; private XceiverClientManager xceiverClientManager; + private int maxCountOfContainerList; public synchronized XceiverClientManager getXceiverClientManager() throws IOException { @@ -109,6 +110,9 @@ public ContainerOperationClient(OzoneConfiguration conf) throws IOException { } containerTokenEnabled = conf.getBoolean(HDDS_CONTAINER_TOKEN_ENABLED, HDDS_CONTAINER_TOKEN_ENABLED_DEFAULT); + maxCountOfContainerList = conf + .getInt(ScmConfigKeys.HDDS_CONTAINER_LIST_MAX_COUNT, + ScmConfigKeys.HDDS_CONTAINER_LIST_MAX_COUNT_DEFAULT); } private XceiverClientManager newXCeiverClientManager(ConfigurationSource conf) @@ -338,17 +342,29 @@ public void deleteContainer(long containerID, boolean force) } @Override - public List listContainer(long startContainerID, + public Pair, Long> listContainer(long startContainerID, int count) throws IOException { + if (count > maxCountOfContainerList) { + LOG.error("Attempting to list {} containers. However, this exceeds" + + " the cluster's current limit of {}. The results will be capped at the" + + " maximum allowed count.", count, maxCountOfContainerList); + count = maxCountOfContainerList; + } return storageContainerLocationClient.listContainer( startContainerID, count); } @Override - public List listContainer(long startContainerID, + public Pair, Long> listContainer(long startContainerID, int count, HddsProtos.LifeCycleState state, HddsProtos.ReplicationType repType, ReplicationConfig replicationConfig) throws IOException { + if (count > maxCountOfContainerList) { + LOG.error("Attempting to list {} containers. However, this exceeds" + + " the cluster's current limit of {}. The results will be capped at the" + + " maximum allowed count.", count, maxCountOfContainerList); + count = maxCountOfContainerList; + } return storageContainerLocationClient.listContainer( startContainerID, count, state, repType, replicationConfig); } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ContainerCommands.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ContainerCommands.java index 54c69273f0bc..15dd873491cc 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ContainerCommands.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ContainerCommands.java @@ -26,6 +26,7 @@ import org.kohsuke.MetaInfServices; import picocli.CommandLine.Command; +import picocli.CommandLine.ParentCommand; import picocli.CommandLine.Model.CommandSpec; import picocli.CommandLine.Spec; @@ -51,6 +52,9 @@ public class ContainerCommands implements Callable, SubcommandWithParent { @Spec private CommandSpec spec; + @ParentCommand + private OzoneAdmin parent; + @Override public Void call() throws Exception { GenericCli.missingSubcommand(spec); @@ -61,4 +65,8 @@ public Void call() throws Exception { public Class getParentType() { return OzoneAdmin.class; } + + public OzoneAdmin getParent() { + return parent; + } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java index 071f7ad63621..ec5eb71ee29c 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java @@ -21,11 +21,13 @@ import java.util.List; import com.google.common.base.Strings; +import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.cli.ScmSubcommand; import org.apache.hadoop.hdds.scm.client.ScmClient; import org.apache.hadoop.hdds.scm.container.ContainerInfo; @@ -37,6 +39,7 @@ import com.fasterxml.jackson.databind.SerializationFeature; import com.fasterxml.jackson.datatype.jsr310.JavaTimeModule; import picocli.CommandLine.Command; +import picocli.CommandLine.ParentCommand; import picocli.CommandLine.Help.Visibility; import picocli.CommandLine.Option; @@ -60,6 +63,14 @@ public class ListSubcommand extends ScmSubcommand { defaultValue = "-1", showDefaultValue = Visibility.ALWAYS) private int count; + @Option(names = {"-a", "--all"}, + description = "List all results. However the total number of containers might exceed " + + " the cluster's limit \"hdds.container.list.max.count\"." + + " The results will be capped at the " + + " maximum allowed count.", + defaultValue = "false") + private boolean all; + @Option(names = {"--state"}, description = "Container state(OPEN, CLOSING, QUASI_CLOSED, CLOSED, " + "DELETING, DELETED)") @@ -76,6 +87,9 @@ public class ListSubcommand extends ScmSubcommand { private static final ObjectWriter WRITER; + @ParentCommand + private ContainerCommands parent; + static { ObjectMapper mapper = new ObjectMapper() .registerModule(new JavaTimeModule()) @@ -106,12 +120,31 @@ public void execute(ScmClient scmClient) throws IOException { ReplicationType.fromProto(type), replication, new OzoneConfiguration()); } - List containerList = + + if (all) { + System.out.printf("Attempting to list all containers." + + " The total number of container might exceed" + + " the cluster's current limit of %s. The results will be capped at the" + + " maximum allowed count.%n", ScmConfigKeys.HDDS_CONTAINER_LIST_MAX_COUNT_DEFAULT); + count = parent.getParent().getOzoneConf() + .getInt(ScmConfigKeys.HDDS_CONTAINER_LIST_MAX_COUNT, + ScmConfigKeys.HDDS_CONTAINER_LIST_MAX_COUNT_DEFAULT); + } + + Pair, Long> containerListAndTotalCount = scmClient.listContainer(startId, count, state, type, repConfig); // Output data list - for (ContainerInfo container : containerList) { + for (ContainerInfo container : containerListAndTotalCount.getLeft()) { outputContainerInfo(container); } + + if (containerListAndTotalCount.getRight() > count) { + System.out.printf("Container List is truncated since it's too long. " + + "List the first %d records of %d. " + + "User won't be able to view the full list of containers until " + + "pagination feature is supported. %n", + count, containerListAndTotalCount.getRight()); + } } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerOperations.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerOperations.java index 5f8f34a2e3ce..43a5ea0158d3 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerOperations.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerOperations.java @@ -62,6 +62,7 @@ public static void setup() throws Exception { ozoneConf = new OzoneConfiguration(); ozoneConf.setClass(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY, SCMContainerPlacementCapacity.class, PlacementPolicy.class); + ozoneConf.set(ScmConfigKeys.HDDS_CONTAINER_LIST_MAX_COUNT, "1"); cluster = MiniOzoneCluster.newBuilder(ozoneConf).setNumDatanodes(3).build(); storageClient = new ContainerOperationClient(ozoneConf); cluster.waitForClusterToBeReady(); @@ -88,6 +89,24 @@ public void testCreate() throws Exception { .getContainerID()); } + /** + * Test to try to list number of containers over the max number Ozone allows. + * @throws Exception + */ + @Test + public void testListContainerExceedMaxAllowedCountOperations() throws Exception { + // create 2 containers in cluster where the limit of max count for + // listing container is set to 1 + for (int i = 0; i < 2; i++) { + storageClient.createContainer(HddsProtos + .ReplicationType.STAND_ALONE, HddsProtos.ReplicationFactor + .ONE, OzoneConsts.OZONE); + } + + assertEquals(1, storageClient.listContainer(0, 2) + .getLeft().size()); + } + /** * A simple test to get Pipeline with {@link ContainerOperationClient}. * @throws Exception diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java index f86fd46946c4..870ff4d9be82 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java @@ -33,6 +33,7 @@ import org.apache.hadoop.crypto.key.KeyProvider; import org.apache.hadoop.crypto.key.kms.KMSClientProvider; import org.apache.hadoop.crypto.key.kms.server.MiniKMS; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.FileChecksum; @@ -145,6 +146,8 @@ public class TestOzoneShellHA { private static String omServiceId; private static int numOfOMs; + private static OzoneConfiguration ozoneConfiguration; + /** * Create a MiniOzoneCluster for testing with using distributed Ozone * handler type. @@ -186,6 +189,8 @@ protected static void startCluster(OzoneConfiguration conf) throws Exception { conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH, getKeyProviderURI(miniKMS)); conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, true); + conf.setInt(ScmConfigKeys.HDDS_CONTAINER_LIST_MAX_COUNT, 1); + ozoneConfiguration = conf; MiniOzoneHAClusterImpl.Builder builder = MiniOzoneCluster.newHABuilder(conf); builder.setOMServiceId(omServiceId) .setNumOfOzoneManagers(numOfOMs) @@ -221,7 +226,7 @@ public static void shutdown() { @BeforeEach public void setup() throws UnsupportedEncodingException { ozoneShell = new OzoneShell(); - ozoneAdminShell = new OzoneAdmin(); + ozoneAdminShell = new OzoneAdmin(ozoneConfiguration); System.setOut(new PrintStream(out, false, DEFAULT_ENCODING)); System.setErr(new PrintStream(err, false, DEFAULT_ENCODING)); } @@ -572,6 +577,26 @@ public void testOzoneAdminCmdList() throws UnsupportedEncodingException { execute(ozoneAdminShell, args); } + @Test + public void testOzoneAdminCmdListAllContainer() + throws UnsupportedEncodingException { + String[] args1 = new String[] {"container", "create", "--scm", + "localhost:" + cluster.getStorageContainerManager().getClientRpcPort()}; + for (int i = 0; i < 2; i++) { + execute(ozoneAdminShell, args1); + } + + String[] args = new String[] {"container", "list", "-a", "--scm", + "localhost:" + cluster.getStorageContainerManager().getClientRpcPort()}; + execute(ozoneAdminShell, args); + assertEquals(1, getNumOfContainers()); + } + + private int getNumOfContainers() + throws UnsupportedEncodingException { + return out.toString(DEFAULT_ENCODING).split("\"containerID\" :").length - 1; + } + /** * Helper function to retrieve Ozone client configuration for trash testing. * @param hostPrefix Scheme + Authority. e.g. ofs://om-service-test1 diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ClosedContainerReplicator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ClosedContainerReplicator.java index d471c13462f7..ccfac3711a82 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ClosedContainerReplicator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ClosedContainerReplicator.java @@ -101,7 +101,7 @@ public Void call() throws Exception { new ContainerOperationClient(conf); final List containerInfos = - containerOperationClient.listContainer(0L, 1_000_000); + containerOperationClient.listContainer(0L, 1_000_000).getLeft(); //logic same as the download+import on the destination datanode initializeReplicationSupervisor(conf, containerInfos.size() * 2); From 675888a4aed7c606bad9ec3003e26f393c76bd99 Mon Sep 17 00:00:00 2001 From: DaveTeng0 Date: Mon, 15 Apr 2024 16:31:42 -0700 Subject: [PATCH 04/14] fix compilation error in TestSCMClientProtocolServer --- .../java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java | 1 - ...ageContainerLocationProtocolClientSideTranslatorPB.java | 7 ++----- .../apache/hadoop/hdds/scm/container/ContainerManager.java | 4 ++-- .../hadoop/hdds/scm/container/ContainerManagerImpl.java | 3 +-- .../hdds/scm/server/TestSCMClientProtocolServer.java | 4 ++-- 5 files changed, 7 insertions(+), 12 deletions(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java index c8c8f25f70be..b90c80e20d13 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java @@ -136,7 +136,6 @@ public final class ScmConfigKeys { public static final int HDDS_CONTAINER_LIST_MAX_COUNT_DEFAULT = 4096; - // TODO : this is copied from OzoneConsts, may need to move to a better place public static final String OZONE_SCM_CHUNK_SIZE_KEY = "ozone.scm.chunk.size"; // 4 MB by default diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java index eb42c637b385..b0ae35713b4d 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java @@ -399,11 +399,8 @@ public Pair, Long> listContainer(long startContainerID, int throws IOException { Preconditions.checkState(startContainerID >= 0, "Container ID cannot be negative."); - if (count <= 0 && count != -1) { - throw new IllegalStateException( - "Container count must be either greater than 0 " + - "or equal to -1 (list all containers)"); - } + Preconditions.checkState(count > 0, + "Container count must be greater than 0."); SCMListContainerRequestProto.Builder builder = SCMListContainerRequestProto .newBuilder(); builder.setStartContainerID(startContainerID); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java index 0f5f3bac4708..2a60e268ff4a 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java @@ -63,7 +63,7 @@ default List getContainers() { * * @param startID start containerID, >=0, * start searching at the head if 0. - * @param count count must be >= -1 + * @param count count must be >= 0 * Usually the count will be replace with a very big * value instead of being unlimited in case the db is very big. * @@ -87,7 +87,7 @@ default List getContainers() { * * @param startID start containerID, >=0, * start searching at the head if 0. - * @param count count must be >= -1 + * @param count count must be >= 0 * Usually the count will be replace with a very big * value instead of being unlimited in case the db is very big. * @param state container state diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java index fde57286099b..a7818ff4760d 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java @@ -457,8 +457,7 @@ public SCMHAManager getSCMHAManager() { private static List filterSortAndLimit( ContainerID startID, int count, Set set) { - if (ContainerID.MIN.equals(startID) && - (count >= set.size() || count == -1)) { + if (ContainerID.MIN.equals(startID) && count >= set.size()) { List list = new ArrayList<>(set); Collections.sort(list); return list; diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMClientProtocolServer.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMClientProtocolServer.java index f0958aeaa586..3629b9ca1b3b 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMClientProtocolServer.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMClientProtocolServer.java @@ -134,10 +134,10 @@ public void testScmListContainer() throws Exception { mockStorageContainerManager(), mock(ReconfigurationHandler.class)); assertEquals(10, scmServer.listContainer(1, 10, - null, HddsProtos.ReplicationType.RATIS, null).size()); + null, HddsProtos.ReplicationType.RATIS, null).getLeft().size()); // Test call from a legacy client, which uses a different method of listContainer assertEquals(10, scmServer.listContainer(1, 10, null, - HddsProtos.ReplicationFactor.THREE).size()); + HddsProtos.ReplicationFactor.THREE).getLeft().size()); } private StorageContainerManager mockStorageContainerManager() { From b5eb86b9eb87a2f9840a00bdb3492b676f246f20 Mon Sep 17 00:00:00 2001 From: DaveTeng0 Date: Mon, 15 Apr 2024 20:55:23 -0700 Subject: [PATCH 05/14] update method doc --- .../org/apache/hadoop/hdds/scm/client/ScmClient.java | 6 ++++-- .../protocol/StorageContainerLocationProtocol.java | 12 ++++++++---- .../hdds/scm/server/SCMClientProtocolServer.java | 12 ++++++++---- .../hdds/scm/server/TestSCMClientProtocolServer.java | 2 +- .../hdds/scm/cli/ContainerOperationClient.java | 2 +- 5 files changed, 22 insertions(+), 12 deletions(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java index a70665a6f0a0..a4debf1facc1 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java @@ -121,7 +121,8 @@ void deleteContainer(long containerId, Pipeline pipeline, boolean force) * @param startContainerID start containerID. * @param count count must be {@literal >} 0. * - * @return a list of pipeline. + * @return a list of containers capped by max count allowed + * in "hdds.container.list.max.count" and total number of containers. * @throws IOException */ Pair, Long> listContainer(long startContainerID, @@ -134,7 +135,8 @@ Pair, Long> listContainer(long startContainerID, * @param count count must be {@literal >} 0. * @param state Container of this state will be returned. * @param replicationConfig container replication Config. - * @return a list of pipeline. + * @return a list of containers capped by max count allowed + * in "hdds.container.list.max.count" and total number of containers. * @throws IOException */ Pair, Long> listContainer(long startContainerID, int count, diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java index f99e2561dcba..aa0aa5d78171 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java @@ -145,7 +145,8 @@ List getExistContainerWithPipelinesInBatch( * Usually the count will be replace with a very big * value instead of being unlimited in case the db is very big) * - * @return a list of container. + * @return a list of containers capped by max count allowed + * in "hdds.container.list.max.count" and total number of containers. * @throws IOException */ Pair, Long> listContainer(long startContainerID, @@ -164,7 +165,8 @@ Pair, Long> listContainer(long startContainerID, * value instead of being unlimited in case the db is very big) * @param state Container with this state will be returned. * - * @return a list of container. + * @return a list of containers capped by max count allowed + * in "hdds.container.list.max.count" and total number of containers. * @throws IOException */ Pair, Long> listContainer(long startContainerID, @@ -183,7 +185,8 @@ Pair, Long> listContainer(long startContainerID, * value instead of being unlimited in case the db is very big) * @param state Container with this state will be returned. * @param factor Container factor - * @return a list of container. + * @return a list of containers capped by max count allowed + * in "hdds.container.list.max.count" and total number of containers. * @throws IOException */ Pair, Long> listContainer(long startContainerID, @@ -204,7 +207,8 @@ Pair, Long> listContainer(long startContainerID, * value instead of being unlimited in case the db is very big) * @param state Container with this state will be returned. * @param replicationConfig Replication config for the containers - * @return a list of container. + * @return a list of containers capped by max count allowed + * in "hdds.container.list.max.count" and total number of containers. * @throws IOException */ Pair, Long> listContainer(long startContainerID, diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java index a83cd2de68b6..f26d15b37a88 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java @@ -408,7 +408,8 @@ private boolean hasRequiredReplicas(ContainerInfo contInfo) { * @param startContainerID start containerID. * @param count count must be {@literal >} 0. * - * @return a list of pipeline. + * @return a list of containers capped by max count allowed + * in "hdds.container.list.max.count" and total number of containers. * @throws IOException */ @Override @@ -424,7 +425,8 @@ public Pair, Long> listContainer(long startContainerID, * @param count count must be {@literal >} 0. * @param state Container with this state will be returned. * - * @return a list of pipeline. + * @return a list of containers capped by max count allowed + * in "hdds.container.list.max.count" and total number of containers. * @throws IOException */ @Override @@ -440,7 +442,8 @@ public Pair, Long> listContainer(long startContainerID, * @param count count must be {@literal >} 0. * @param state Container with this state will be returned. * @param factor Container factor. - * @return a list of pipeline. + * @return a list of containers capped by max count allowed + * in "hdds.container.list.max.count" and total number of containers. * @throws IOException */ @Override @@ -525,7 +528,8 @@ public Pair, Long> listContainer(long startContainerID, * @param count count must be {@literal >} 0. * @param state Container with this state will be returned. * @param repConfig Replication Config for the container. - * @return a list of pipeline. + * @return a list of containers capped by max count allowed + * in "hdds.container.list.max.count" and total number of containers. * @throws IOException */ @Override diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMClientProtocolServer.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMClientProtocolServer.java index 3629b9ca1b3b..06fb0321c479 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMClientProtocolServer.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMClientProtocolServer.java @@ -142,7 +142,7 @@ public void testScmListContainer() throws Exception { private StorageContainerManager mockStorageContainerManager() { List infos = new ArrayList<>(); - for (int i = 0; i < 20; i++) { + for (int i = 0; i < 10; i++) { infos.add(newContainerInfoForTest()); } ContainerManagerImpl containerManager = mock(ContainerManagerImpl.class); diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java index 2d495e9c0db9..7b12094a5f41 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java @@ -360,7 +360,7 @@ public Pair, Long> listContainer(long startContainerID, HddsProtos.ReplicationType repType, ReplicationConfig replicationConfig) throws IOException { if (count > maxCountOfContainerList) { - LOG.error("Attempting to list {} containers. However, this exceeds" + + LOG.warn("Attempting to list {} containers. However, this exceeds" + " the cluster's current limit of {}. The results will be capped at the" + " maximum allowed count.", count, maxCountOfContainerList); count = maxCountOfContainerList; From 5d83cbb89e45491edba2163d38f2d67e1e8861ec Mon Sep 17 00:00:00 2001 From: DaveTeng0 Date: Tue, 16 Apr 2024 12:13:05 -0700 Subject: [PATCH 06/14] update command message if count is greater than maxCountAllowed of configuration --- .../scm/cli/container/ListSubcommand.java | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java index ec5eb71ee29c..cc5178b2c11e 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java @@ -58,9 +58,8 @@ public class ListSubcommand extends ScmSubcommand { private long startId; @Option(names = {"-c", "--count"}, - description = "Maximum number of containers to list." + - "Make count=-1 default to list all containers", - defaultValue = "-1", showDefaultValue = Visibility.ALWAYS) + description = "Maximum number of containers to list.", + defaultValue = "20", showDefaultValue = Visibility.ALWAYS) private int count; @Option(names = {"-a", "--all"}, @@ -121,14 +120,22 @@ public void execute(ScmClient scmClient) throws IOException { replication, new OzoneConfiguration()); } + int maxCountAllowed = parent.getParent().getOzoneConf() + .getInt(ScmConfigKeys.HDDS_CONTAINER_LIST_MAX_COUNT, + ScmConfigKeys.HDDS_CONTAINER_LIST_MAX_COUNT_DEFAULT); if (all) { System.out.printf("Attempting to list all containers." + " The total number of container might exceed" + " the cluster's current limit of %s. The results will be capped at the" + " maximum allowed count.%n", ScmConfigKeys.HDDS_CONTAINER_LIST_MAX_COUNT_DEFAULT); - count = parent.getParent().getOzoneConf() - .getInt(ScmConfigKeys.HDDS_CONTAINER_LIST_MAX_COUNT, - ScmConfigKeys.HDDS_CONTAINER_LIST_MAX_COUNT_DEFAULT); + count = maxCountAllowed; + } else { + if (count > maxCountAllowed) { + System.out.printf("Attempting to list the first %d records of containers." + + " However it exceeds the cluster's current limit of %d. The results will be capped at the" + + " maximum allowed count.%n", count, ScmConfigKeys.HDDS_CONTAINER_LIST_MAX_COUNT_DEFAULT); + count = maxCountAllowed; + } } Pair, Long> containerListAndTotalCount = From b4bdd905ccb2a044768b4f10bf7122915048d771 Mon Sep 17 00:00:00 2001 From: DaveTeng0 Date: Thu, 2 May 2024 12:41:56 -0700 Subject: [PATCH 07/14] refactor listContainer method --- .../scm/server/SCMClientProtocolServer.java | 188 +++++++----------- 1 file changed, 73 insertions(+), 115 deletions(-) diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java index f26d15b37a88..fa2218ac2de5 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java @@ -103,6 +103,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Optional; @@ -451,63 +452,24 @@ public Pair, Long> listContainer(long startContainerID, public Pair, Long> listContainer(long startContainerID, int count, HddsProtos.LifeCycleState state, HddsProtos.ReplicationFactor factor) throws IOException { + return listContainerInternal(startContainerID, count, state, factor, null, null); + } + + private Pair, Long> listContainerInternal(long startContainerID, int count, + HddsProtos.LifeCycleState state, + HddsProtos.ReplicationFactor factor, + HddsProtos.ReplicationType replicationType, + ReplicationConfig repConfig) throws IOException { boolean auditSuccess = true; - Map auditMap = Maps.newHashMap(); - auditMap.put("startContainerID", String.valueOf(startContainerID)); - auditMap.put("count", String.valueOf(count)); - if (state != null) { - auditMap.put("state", state.name()); - } - if (factor != null) { - auditMap.put("factor", factor.name()); - } + Map auditMap = buildAuditMap(startContainerID, count, state, factor, replicationType, repConfig); + try { - final ContainerID containerId = ContainerID.valueOf(startContainerID); - if (state != null) { - if (factor != null) { - Stream containerInfoStream = - scm.getContainerManager().getContainers(state).stream() - .filter(info -> info.containerID().getId() >= startContainerID) - //Filtering EC replication type as EC will not have factor. - .filter(info -> info - .getReplicationType() != HddsProtos.ReplicationType.EC) - .filter(info -> (info.getReplicationFactor() == factor)) - .sorted(); - List containerInfos = containerInfoStream.collect( - Collectors.toList()); - return Pair.of(containerInfos.stream().limit(count).collect(Collectors.toList()), - (long)containerInfos.size()); - } else { - Stream containerInfoStream = - scm.getContainerManager().getContainers(state).stream() - .filter(info -> info.containerID().getId() >= startContainerID) - .sorted(); - List containerInfos = containerInfoStream.collect( - Collectors.toList()); - return Pair.of( - containerInfos.stream().limit(count).collect(Collectors.toList()), - (long)containerInfos.size()); - } - } else { - if (factor != null) { - Stream containerInfoStream = - scm.getContainerManager().getContainers().stream() - .filter(info -> info.containerID().getId() >= startContainerID) - //Filtering EC replication type as EC will not have factor. - .filter(info -> info - .getReplicationType() != HddsProtos.ReplicationType.EC) - .filter(info -> info.getReplicationFactor() == factor) - .sorted(); - List containerInfos = containerInfoStream.collect( - Collectors.toList()); - return Pair.of(containerInfos.stream().limit(count).collect(Collectors.toList()), - (long)containerInfos.size()); - } else { - List containerInfos = - scm.getContainerManager().getContainers(containerId, count); - return Pair.of(containerInfos, (long)(containerInfos.size())); - } - } + Stream containerStream = + buildContainerStream(factor, replicationType, repConfig, getBaseContainerStream(state)); + List containerInfos = + containerStream.filter(info -> info.containerID().getId() >= startContainerID) + .sorted().collect(Collectors.toList()); + return Pair.of(containerInfos.stream().limit(count).collect(Collectors.toList()), (long) containerInfos.size()); } catch (Exception ex) { auditSuccess = false; AUDIT.logReadFailure( @@ -521,81 +483,77 @@ public Pair, Long> listContainer(long startContainerID, } } - /** - * Lists a range of containers and get their info. - * - * @param startContainerID start containerID. - * @param count count must be {@literal >} 0. - * @param state Container with this state will be returned. - * @param repConfig Replication Config for the container. - * @return a list of containers capped by max count allowed - * in "hdds.container.list.max.count" and total number of containers. - * @throws IOException - */ - @Override - public Pair, Long> listContainer(long startContainerID, - int count, HddsProtos.LifeCycleState state, - HddsProtos.ReplicationType replicationType, - ReplicationConfig repConfig) throws IOException { - boolean auditSuccess = true; - Map auditMap = Maps.newHashMap(); + private Stream buildContainerStream(HddsProtos.ReplicationFactor factor, + HddsProtos.ReplicationType replicationType, + ReplicationConfig repConfig, + Stream containerStream) { + if (factor != null) { + containerStream = containerStream.filter(info -> info.getReplicationType() != HddsProtos.ReplicationType.EC) + .filter(info -> info.getReplicationFactor() == factor); + } else if (repConfig != null) { + // If we have repConfig filter by it, as it includes repType too. + // Otherwise, we may have a filter just for repType, eg all EC containers + // without filtering on their replication scheme + containerStream = containerStream + .filter(info -> info.getReplicationConfig().equals(repConfig)); + } else if (replicationType != null) { + containerStream = containerStream.filter(info -> info.getReplicationType() == replicationType); + } + return containerStream; + } + + private Stream getBaseContainerStream(HddsProtos.LifeCycleState state) { + if (state != null) { + return scm.getContainerManager().getContainers(state).stream(); + } else { + return scm.getContainerManager().getContainers().stream(); + } + } + + private Map buildAuditMap(long startContainerID, int count, + HddsProtos.LifeCycleState state, + HddsProtos.ReplicationFactor factor, + HddsProtos.ReplicationType replicationType, + ReplicationConfig repConfig) { + Map auditMap = new HashMap<>(); auditMap.put("startContainerID", String.valueOf(startContainerID)); auditMap.put("count", String.valueOf(count)); if (state != null) { auditMap.put("state", state.name()); } + if (factor != null) { + auditMap.put("factor", factor.name()); + } if (replicationType != null) { auditMap.put("replicationType", replicationType.toString()); } if (repConfig != null) { auditMap.put("replicationConfig", repConfig.toString()); } - try { - final ContainerID containerId = ContainerID.valueOf(startContainerID); - if (state == null && replicationType == null && repConfig == null) { - // Not filters, so just return everything - List containerInfos = - scm.getContainerManager().getContainers(containerId, count); - return Pair.of(containerInfos, (long)containerInfos.size()); - } - List containerList; - if (state != null) { - containerList = scm.getContainerManager().getContainers(state); - } else { - containerList = scm.getContainerManager().getContainers(); - } + return auditMap; + } - Stream containerStream = containerList.stream() - .filter(info -> info.containerID().getId() >= startContainerID); - // If we have repConfig filter by it, as it includes repType too. - // Otherwise, we may have a filter just for repType, eg all EC containers - // without filtering on their replication scheme - if (repConfig != null) { - containerStream = containerStream - .filter(info -> info.getReplicationConfig().equals(repConfig)); - } else if (replicationType != null) { - containerStream = containerStream - .filter(info -> info.getReplicationType() == replicationType); - } - Stream containerInfoStream = containerStream.sorted(); - List containerInfos = containerInfoStream.collect( - Collectors.toList()); - return Pair.of(containerInfos.stream().limit(count).collect(Collectors.toList()), - (long)containerInfos.size()); - } catch (Exception ex) { - auditSuccess = false; - AUDIT.logReadFailure( - buildAuditMessageForFailure(SCMAction.LIST_CONTAINER, auditMap, ex)); - throw ex; - } finally { - if (auditSuccess) { - AUDIT.logReadSuccess( - buildAuditMessageForSuccess(SCMAction.LIST_CONTAINER, auditMap)); - } - } + /** + * Lists a range of containers and get their info. + * + * @param startContainerID start containerID. + * @param count count must be {@literal >} 0. + * @param state Container with this state will be returned. + * @param repConfig Replication Config for the container. + * @return a list of containers capped by max count allowed + * in "hdds.container.list.max.count" and total number of containers. + * @throws IOException + */ + @Override + public Pair, Long> listContainer(long startContainerID, + int count, HddsProtos.LifeCycleState state, + HddsProtos.ReplicationType replicationType, + ReplicationConfig repConfig) throws IOException { + return listContainerInternal(startContainerID, count, state, null, replicationType, repConfig); } + @Override public void deleteContainer(long containerID) throws IOException { Map auditMap = Maps.newHashMap(); From afc56527b028a2f9ba700f257521480cb8de147f Mon Sep 17 00:00:00 2001 From: sarvekshayr Date: Wed, 11 Sep 2024 12:46:44 +0530 Subject: [PATCH 08/14] HDDS-8188. Addressed Backward Compatibility Isssues --- .../hadoop/hdds/scm/client/ScmClient.java | 13 +- .../StorageContainerLocationProtocol.java | 23 ++- ...ocationProtocolClientSideTranslatorPB.java | 77 +++++++- ...ocationProtocolServerSideTranslatorPB.java | 4 +- .../scm/server/SCMClientProtocolServer.java | 165 ++++++++++++++++-- .../server/TestSCMClientProtocolServer.java | 4 +- .../scm/cli/ContainerOperationClient.java | 25 ++- .../scm/cli/container/ListSubcommand.java | 2 +- .../hadoop/ozone/TestContainerOperations.java | 2 +- .../freon/ClosedContainerReplicator.java | 2 +- 10 files changed, 277 insertions(+), 40 deletions(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java index a4debf1facc1..769f3f279e76 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java @@ -125,7 +125,10 @@ void deleteContainer(long containerId, Pipeline pipeline, boolean force) * in "hdds.container.list.max.count" and total number of containers. * @throws IOException */ - Pair, Long> listContainer(long startContainerID, + List listContainer(long startContainerID, + int count) throws IOException; + + Pair, Long> listContainerWithCount(long startContainerID, int count) throws IOException; /** @@ -139,7 +142,13 @@ Pair, Long> listContainer(long startContainerID, * in "hdds.container.list.max.count" and total number of containers. * @throws IOException */ - Pair, Long> listContainer(long startContainerID, int count, + List listContainer(long startContainerID, int count, + HddsProtos.LifeCycleState state, + HddsProtos.ReplicationType replicationType, + ReplicationConfig replicationConfig) + throws IOException; + + Pair, Long> listContainerWithCount(long startContainerID, int count, HddsProtos.LifeCycleState state, HddsProtos.ReplicationType replicationType, ReplicationConfig replicationConfig) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java index aa0aa5d78171..4deb0081a767 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java @@ -149,7 +149,10 @@ List getExistContainerWithPipelinesInBatch( * in "hdds.container.list.max.count" and total number of containers. * @throws IOException */ - Pair, Long> listContainer(long startContainerID, + List listContainer(long startContainerID, + int count) throws IOException; + + Pair, Long> listContainerWithCount(long startContainerID, int count) throws IOException; /** @@ -169,7 +172,10 @@ Pair, Long> listContainer(long startContainerID, * in "hdds.container.list.max.count" and total number of containers. * @throws IOException */ - Pair, Long> listContainer(long startContainerID, + List listContainer(long startContainerID, + int count, HddsProtos.LifeCycleState state) throws IOException; + + Pair, Long> listContainerWithCount(long startContainerID, int count, HddsProtos.LifeCycleState state) throws IOException; /** @@ -189,7 +195,11 @@ Pair, Long> listContainer(long startContainerID, * in "hdds.container.list.max.count" and total number of containers. * @throws IOException */ - Pair, Long> listContainer(long startContainerID, + List listContainer(long startContainerID, + int count, HddsProtos.LifeCycleState state, + HddsProtos.ReplicationFactor factor) throws IOException; + + Pair, Long> listContainerWithCount(long startContainerID, int count, HddsProtos.LifeCycleState state, HddsProtos.ReplicationFactor factor) throws IOException; @@ -211,7 +221,12 @@ Pair, Long> listContainer(long startContainerID, * in "hdds.container.list.max.count" and total number of containers. * @throws IOException */ - Pair, Long> listContainer(long startContainerID, + List listContainer(long startContainerID, + int count, HddsProtos.LifeCycleState state, + HddsProtos.ReplicationType replicationType, + ReplicationConfig replicationConfig) throws IOException; + + Pair, Long> listContainerWithCount(long startContainerID, int count, HddsProtos.LifeCycleState state, HddsProtos.ReplicationType replicationType, ReplicationConfig replicationConfig) throws IOException; diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java index b0ae35713b4d..f7c1fb40c692 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java @@ -380,19 +380,77 @@ public List getExistContainerWithPipelinesInBatch( * {@inheritDoc} */ @Override - public Pair, Long> listContainer(long startContainerID, int count) + public List listContainer(long startContainerID, int count) throws IOException { return listContainer(startContainerID, count, null, null, null); } @Override - public Pair, Long> listContainer(long startContainerID, int count, + public Pair, Long> listContainerWithCount(long startContainerID, int count) + throws IOException { + return listContainerWithCount(startContainerID, count, null, null, null); + } + + @Override + public List listContainer(long startContainerID, int count, HddsProtos.LifeCycleState state) throws IOException { return listContainer(startContainerID, count, state, null, null); } @Override - public Pair, Long> listContainer(long startContainerID, int count, + public Pair, Long> listContainerWithCount(long startContainerID, int count, + HddsProtos.LifeCycleState state) throws IOException { + return listContainerWithCount(startContainerID, count, state, null, null); + } + + @Override + public List listContainer(long startContainerID, int count, + HddsProtos.LifeCycleState state, + HddsProtos.ReplicationType replicationType, + ReplicationConfig replicationConfig) + throws IOException { + Preconditions.checkState(startContainerID >= 0, + "Container ID cannot be negative."); + Preconditions.checkState(count > 0, + "Container count must be greater than 0."); + SCMListContainerRequestProto.Builder builder = SCMListContainerRequestProto + .newBuilder(); + builder.setStartContainerID(startContainerID); + builder.setCount(count); + builder.setTraceID(TracingUtil.exportCurrentSpan()); + if (state != null) { + builder.setState(state); + } + if (replicationConfig != null) { + if (replicationConfig.getReplicationType() == EC) { + builder.setType(EC); + builder.setEcReplicationConfig( + ((ECReplicationConfig)replicationConfig).toProto()); + } else { + builder.setType(replicationConfig.getReplicationType()); + builder.setFactor(((ReplicatedReplicationConfig)replicationConfig) + .getReplicationFactor()); + } + } else if (replicationType != null) { + builder.setType(replicationType); + } + + SCMListContainerRequestProto request = builder.build(); + + SCMListContainerResponseProto response = + submitRequest(Type.ListContainer, + builder1 -> builder1.setScmListContainerRequest(request)) + .getScmListContainerResponse(); + List containerList = new ArrayList<>(); + for (HddsProtos.ContainerInfoProto containerInfoProto : response + .getContainersList()) { + containerList.add(ContainerInfo.fromProtobuf(containerInfoProto)); + } + return containerList; + } + + @Override + public Pair, Long> listContainerWithCount(long startContainerID, int count, HddsProtos.LifeCycleState state, HddsProtos.ReplicationType replicationType, ReplicationConfig replicationConfig) @@ -439,7 +497,16 @@ public Pair, Long> listContainer(long startContainerID, int @Deprecated @Override - public Pair, Long> listContainer(long startContainerID, int count, + public List listContainer(long startContainerID, int count, + HddsProtos.LifeCycleState state, HddsProtos.ReplicationFactor factor) + throws IOException { + throw new UnsupportedOperationException("Should no longer be called from " + + "the client side"); + } + + @Deprecated + @Override + public Pair, Long> listContainerWithCount(long startContainerID, int count, HddsProtos.LifeCycleState state, HddsProtos.ReplicationFactor factor) throws IOException { throw new UnsupportedOperationException("Should no longer be called from " + @@ -1171,7 +1238,7 @@ public void close() { public List getListOfContainers( long startContainerID, int count, HddsProtos.LifeCycleState state) throws IOException { - return listContainer(startContainerID, count, state).getLeft(); + return listContainerWithCount(startContainerID, count, state).getLeft(); } @Override diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java index d37c4a7c8e2f..8340f113af14 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java @@ -853,10 +853,10 @@ public SCMListContainerResponseProto listContainer( if (factor != null) { // Call from a legacy client containerListAndTotalCount = - impl.listContainer(startContainerID, count, state, factor); + impl.listContainerWithCount(startContainerID, count, state, factor); } else { containerListAndTotalCount = - impl.listContainer(startContainerID, count, state, replicationType, + impl.listContainerWithCount(startContainerID, count, state, replicationType, repConfig); } SCMListContainerResponseProto.Builder builder = diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java index fa2218ac2de5..51252b0ee039 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java @@ -414,11 +414,17 @@ private boolean hasRequiredReplicas(ContainerInfo contInfo) { * @throws IOException */ @Override - public Pair, Long> listContainer(long startContainerID, + public List listContainer(long startContainerID, int count) throws IOException { return listContainer(startContainerID, count, null, null, null); } + @Override + public Pair, Long> listContainerWithCount(long startContainerID, + int count) throws IOException { + return listContainerWithCount(startContainerID, count, null, null, null); + } + /** * Lists a range of containers and get their info. * @@ -431,11 +437,17 @@ public Pair, Long> listContainer(long startContainerID, * @throws IOException */ @Override - public Pair, Long> listContainer(long startContainerID, + public List listContainer(long startContainerID, int count, HddsProtos.LifeCycleState state) throws IOException { return listContainer(startContainerID, count, state, null, null); } + @Override + public Pair, Long> listContainerWithCount(long startContainerID, + int count, HddsProtos.LifeCycleState state) throws IOException { + return listContainerWithCount(startContainerID, count, state, null, null); + } + /** * Lists a range of containers and get their info. * @@ -449,17 +461,74 @@ public Pair, Long> listContainer(long startContainerID, */ @Override @Deprecated - public Pair, Long> listContainer(long startContainerID, + public List listContainer(long startContainerID, + int count, HddsProtos.LifeCycleState state, + HddsProtos.ReplicationFactor factor) throws IOException { + boolean auditSuccess = true; + Map auditMap = Maps.newHashMap(); + auditMap.put("startContainerID", String.valueOf(startContainerID)); + auditMap.put("count", String.valueOf(count)); + if (state != null) { + auditMap.put("state", state.name()); + } + if (factor != null) { + auditMap.put("factor", factor.name()); + } + try { + final ContainerID containerId = ContainerID.valueOf(startContainerID); + if (state != null) { + if (factor != null) { + return scm.getContainerManager().getContainers(state).stream() + .filter(info -> info.containerID().getId() >= startContainerID) + //Filtering EC replication type as EC will not have factor. + .filter(info -> info + .getReplicationType() != HddsProtos.ReplicationType.EC) + .filter(info -> (info.getReplicationFactor() == factor)) + .sorted().limit(count).collect(Collectors.toList()); + } else { + return scm.getContainerManager().getContainers(state).stream() + .filter(info -> info.containerID().getId() >= startContainerID) + .sorted().limit(count).collect(Collectors.toList()); + } + } else { + if (factor != null) { + return scm.getContainerManager().getContainers().stream() + .filter(info -> info.containerID().getId() >= startContainerID) + //Filtering EC replication type as EC will not have factor. + .filter(info -> info + .getReplicationType() != HddsProtos.ReplicationType.EC) + .filter(info -> info.getReplicationFactor() == factor) + .sorted().limit(count).collect(Collectors.toList()); + } else { + return scm.getContainerManager().getContainers(containerId, count); + } + } + } catch (Exception ex) { + auditSuccess = false; + AUDIT.logReadFailure( + buildAuditMessageForFailure(SCMAction.LIST_CONTAINER, auditMap, ex)); + throw ex; + } finally { + if (auditSuccess) { + AUDIT.logReadSuccess( + buildAuditMessageForSuccess(SCMAction.LIST_CONTAINER, auditMap)); + } + } + } + + @Override + @Deprecated + public Pair, Long> listContainerWithCount(long startContainerID, int count, HddsProtos.LifeCycleState state, HddsProtos.ReplicationFactor factor) throws IOException { return listContainerInternal(startContainerID, count, state, factor, null, null); } private Pair, Long> listContainerInternal(long startContainerID, int count, - HddsProtos.LifeCycleState state, - HddsProtos.ReplicationFactor factor, - HddsProtos.ReplicationType replicationType, - ReplicationConfig repConfig) throws IOException { + HddsProtos.LifeCycleState state, + HddsProtos.ReplicationFactor factor, + HddsProtos.ReplicationType replicationType, + ReplicationConfig repConfig) throws IOException { boolean auditSuccess = true; Map auditMap = buildAuditMap(startContainerID, count, state, factor, replicationType, repConfig); @@ -484,9 +553,9 @@ private Pair, Long> listContainerInternal(long startContaine } private Stream buildContainerStream(HddsProtos.ReplicationFactor factor, - HddsProtos.ReplicationType replicationType, - ReplicationConfig repConfig, - Stream containerStream) { + HddsProtos.ReplicationType replicationType, + ReplicationConfig repConfig, + Stream containerStream) { if (factor != null) { containerStream = containerStream.filter(info -> info.getReplicationType() != HddsProtos.ReplicationType.EC) .filter(info -> info.getReplicationFactor() == factor); @@ -511,10 +580,10 @@ private Stream getBaseContainerStream(HddsProtos.LifeCycleState s } private Map buildAuditMap(long startContainerID, int count, - HddsProtos.LifeCycleState state, - HddsProtos.ReplicationFactor factor, - HddsProtos.ReplicationType replicationType, - ReplicationConfig repConfig) { + HddsProtos.LifeCycleState state, + HddsProtos.ReplicationFactor factor, + HddsProtos.ReplicationType replicationType, + ReplicationConfig repConfig) { Map auditMap = new HashMap<>(); auditMap.put("startContainerID", String.valueOf(startContainerID)); auditMap.put("count", String.valueOf(count)); @@ -546,10 +615,70 @@ private Map buildAuditMap(long startContainerID, int count, * @throws IOException */ @Override - public Pair, Long> listContainer(long startContainerID, - int count, HddsProtos.LifeCycleState state, - HddsProtos.ReplicationType replicationType, - ReplicationConfig repConfig) throws IOException { + public List listContainer(long startContainerID, + int count, HddsProtos.LifeCycleState state, + HddsProtos.ReplicationType replicationType, + ReplicationConfig repConfig) throws IOException { + boolean auditSuccess = true; + Map auditMap = Maps.newHashMap(); + auditMap.put("startContainerID", String.valueOf(startContainerID)); + auditMap.put("count", String.valueOf(count)); + if (state != null) { + auditMap.put("state", state.name()); + } + if (replicationType != null) { + auditMap.put("replicationType", replicationType.toString()); + } + if (repConfig != null) { + auditMap.put("replicationConfig", repConfig.toString()); + } + try { + final ContainerID containerId = ContainerID.valueOf(startContainerID); + if (state == null && replicationType == null && repConfig == null) { + // Not filters, so just return everything + return scm.getContainerManager().getContainers(containerId, count); + } + + List containerList; + if (state != null) { + containerList = scm.getContainerManager().getContainers(state); + } else { + containerList = scm.getContainerManager().getContainers(); + } + + Stream containerStream = containerList.stream() + .filter(info -> info.containerID().getId() >= startContainerID); + // If we have repConfig filter by it, as it includes repType too. + // Otherwise, we may have a filter just for repType, eg all EC containers + // without filtering on their replication scheme + if (repConfig != null) { + containerStream = containerStream + .filter(info -> info.getReplicationConfig().equals(repConfig)); + } else if (replicationType != null) { + containerStream = containerStream + .filter(info -> info.getReplicationType() == replicationType); + } + return containerStream.sorted() + .limit(count) + .collect(Collectors.toList()); + } catch (Exception ex) { + auditSuccess = false; + AUDIT.logReadFailure( + buildAuditMessageForFailure(SCMAction.LIST_CONTAINER, auditMap, ex)); + throw ex; + } finally { + if (auditSuccess) { + AUDIT.logReadSuccess( + buildAuditMessageForSuccess(SCMAction.LIST_CONTAINER, auditMap)); + } + } + } + + @Override + public Pair, Long> listContainerWithCount(long startContainerID, + int count, HddsProtos.LifeCycleState state, + HddsProtos.ReplicationType replicationType, + ReplicationConfig repConfig) throws IOException { return listContainerInternal(startContainerID, count, state, null, replicationType, repConfig); } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMClientProtocolServer.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMClientProtocolServer.java index 06fb0321c479..ded1ec5b4db5 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMClientProtocolServer.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMClientProtocolServer.java @@ -133,10 +133,10 @@ public void testScmListContainer() throws Exception { new SCMClientProtocolServer(new OzoneConfiguration(), mockStorageContainerManager(), mock(ReconfigurationHandler.class)); - assertEquals(10, scmServer.listContainer(1, 10, + assertEquals(10, scmServer.listContainerWithCount(1, 10, null, HddsProtos.ReplicationType.RATIS, null).getLeft().size()); // Test call from a legacy client, which uses a different method of listContainer - assertEquals(10, scmServer.listContainer(1, 10, null, + assertEquals(10, scmServer.listContainerWithCount(1, 10, null, HddsProtos.ReplicationFactor.THREE).getLeft().size()); } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java index 7b12094a5f41..c1a687f27224 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java @@ -342,7 +342,14 @@ public void deleteContainer(long containerID, boolean force) } @Override - public Pair, Long> listContainer(long startContainerID, + public List listContainer(long startContainerID, + int count) throws IOException { + return storageContainerLocationClient.listContainer( + startContainerID, count); + } + + @Override + public Pair, Long> listContainerWithCount(long startContainerID, int count) throws IOException { if (count > maxCountOfContainerList) { LOG.error("Attempting to list {} containers. However, this exceeds" + @@ -350,12 +357,22 @@ public Pair, Long> listContainer(long startContainerID, " maximum allowed count.", count, maxCountOfContainerList); count = maxCountOfContainerList; } - return storageContainerLocationClient.listContainer( + return storageContainerLocationClient.listContainerWithCount( startContainerID, count); } @Override - public Pair, Long> listContainer(long startContainerID, + public List listContainer(long startContainerID, + int count, HddsProtos.LifeCycleState state, + HddsProtos.ReplicationType repType, + ReplicationConfig replicationConfig) throws IOException { + return storageContainerLocationClient.listContainer( + startContainerID, count, state, repType, replicationConfig); + } + + + @Override + public Pair, Long> listContainerWithCount(long startContainerID, int count, HddsProtos.LifeCycleState state, HddsProtos.ReplicationType repType, ReplicationConfig replicationConfig) throws IOException { @@ -365,7 +382,7 @@ public Pair, Long> listContainer(long startContainerID, " maximum allowed count.", count, maxCountOfContainerList); count = maxCountOfContainerList; } - return storageContainerLocationClient.listContainer( + return storageContainerLocationClient.listContainerWithCount( startContainerID, count, state, repType, replicationConfig); } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java index cc5178b2c11e..5b68cb920adc 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java @@ -139,7 +139,7 @@ public void execute(ScmClient scmClient) throws IOException { } Pair, Long> containerListAndTotalCount = - scmClient.listContainer(startId, count, state, type, repConfig); + scmClient.listContainerWithCount(startId, count, state, type, repConfig); // Output data list for (ContainerInfo container : containerListAndTotalCount.getLeft()) { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerOperations.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerOperations.java index 43a5ea0158d3..39bbd15dbd68 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerOperations.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerOperations.java @@ -103,7 +103,7 @@ public void testListContainerExceedMaxAllowedCountOperations() throws Exception .ONE, OzoneConsts.OZONE); } - assertEquals(1, storageClient.listContainer(0, 2) + assertEquals(1, storageClient.listContainerWithCount(0, 2) .getLeft().size()); } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ClosedContainerReplicator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ClosedContainerReplicator.java index ccfac3711a82..b792e5d3c001 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ClosedContainerReplicator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ClosedContainerReplicator.java @@ -101,7 +101,7 @@ public Void call() throws Exception { new ContainerOperationClient(conf); final List containerInfos = - containerOperationClient.listContainer(0L, 1_000_000).getLeft(); + containerOperationClient.listContainerWithCount(0L, 1_000_000).getLeft(); //logic same as the download+import on the destination datanode initializeReplicationSupervisor(conf, containerInfos.size() * 2); From 6b2f0f2e8e0541d7175093f0acd73fe7fdff7bab Mon Sep 17 00:00:00 2001 From: sarvekshayr Date: Wed, 11 Sep 2024 16:57:55 +0530 Subject: [PATCH 09/14] Modified javadoc and fixed indentation --- .../hadoop/hdds/scm/client/ScmClient.java | 27 ++++++- .../StorageContainerLocationProtocol.java | 80 ++++++++++++++++--- .../scm/server/SCMClientProtocolServer.java | 69 ++++++++++++---- 3 files changed, 148 insertions(+), 28 deletions(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java index 91234fa9641f..b04f190a0883 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java @@ -122,13 +122,22 @@ void deleteContainer(long containerId, Pipeline pipeline, boolean force) * @param startContainerID start containerID. * @param count count must be {@literal >} 0. * - * @return a list of containers capped by max count allowed - * in "hdds.container.list.max.count" and total number of containers. + * @return a list of pipeline. * @throws IOException */ List listContainer(long startContainerID, int count) throws IOException; + /** + * Lists a range of containers and get their info. + * + * @param startContainerID start containerID. + * @param count count must be {@literal >} 0. + * + * @return a list of containers capped by max count allowed + * in "hdds.container.list.max.count" and total number of containers. + * @throws IOException + */ Pair, Long> listContainerWithCount(long startContainerID, int count) throws IOException; @@ -139,8 +148,7 @@ Pair, Long> listContainerWithCount(long startContainerID, * @param count count must be {@literal >} 0. * @param state Container of this state will be returned. * @param replicationConfig container replication Config. - * @return a list of containers capped by max count allowed - * in "hdds.container.list.max.count" and total number of containers. + * @return a list of pipeline. * @throws IOException */ List listContainer(long startContainerID, int count, @@ -149,6 +157,17 @@ List listContainer(long startContainerID, int count, ReplicationConfig replicationConfig) throws IOException; + /** + * Lists a range of containers and get their info. + * + * @param startContainerID start containerID. + * @param count count must be {@literal >} 0. + * @param state Container of this state will be returned. + * @param replicationConfig container replication Config. + * @return a list of containers capped by max count allowed + * in "hdds.container.list.max.count" and total number of containers. + * @throws IOException + */ Pair, Long> listContainerWithCount(long startContainerID, int count, HddsProtos.LifeCycleState state, HddsProtos.ReplicationType replicationType, diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java index 505d7672dab2..5ea1b3e3502d 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java @@ -146,13 +146,28 @@ List getExistContainerWithPipelinesInBatch( * Usually the count will be replace with a very big * value instead of being unlimited in case the db is very big) * - * @return a list of containers capped by max count allowed - * in "hdds.container.list.max.count" and total number of containers. + * @return a list of container. * @throws IOException */ List listContainer(long startContainerID, int count) throws IOException; + /** + * Ask SCM a list of containers with a range of container names + * and the limit of count. + * Search container names between start name(exclusive), and + * use prefix name to filter the result. the max size of the + * searching range cannot exceed the value of count. + * + * @param startContainerID start container ID. + * @param count count, if count {@literal <} 0, the max size is unlimited.( + * Usually the count will be replace with a very big + * value instead of being unlimited in case the db is very big) + * + * @return a list of containers capped by max count allowed + * in "hdds.container.list.max.count" and total number of containers. + * @throws IOException + */ Pair, Long> listContainerWithCount(long startContainerID, int count) throws IOException; @@ -169,13 +184,29 @@ Pair, Long> listContainerWithCount(long startContainerID, * value instead of being unlimited in case the db is very big) * @param state Container with this state will be returned. * - * @return a list of containers capped by max count allowed - * in "hdds.container.list.max.count" and total number of containers. + * @return a list of container. * @throws IOException */ List listContainer(long startContainerID, int count, HddsProtos.LifeCycleState state) throws IOException; + /** + * Ask SCM a list of containers with a range of container names + * and the limit of count. + * Search container names between start name(exclusive), and + * use prefix name to filter the result. the max size of the + * searching range cannot exceed the value of count. + * + * @param startContainerID start container ID. + * @param count count, if count {@literal <} 0, the max size is unlimited.( + * Usually the count will be replace with a very big + * value instead of being unlimited in case the db is very big) + * @param state Container with this state will be returned. + * + * @return a list of containers capped by max count allowed + * in "hdds.container.list.max.count" and total number of containers. + * @throws IOException + */ Pair, Long> listContainerWithCount(long startContainerID, int count, HddsProtos.LifeCycleState state) throws IOException; @@ -192,19 +223,34 @@ Pair, Long> listContainerWithCount(long startContainerID, * value instead of being unlimited in case the db is very big) * @param state Container with this state will be returned. * @param factor Container factor - * @return a list of containers capped by max count allowed - * in "hdds.container.list.max.count" and total number of containers. + * @return a list of container. * @throws IOException */ List listContainer(long startContainerID, int count, HddsProtos.LifeCycleState state, HddsProtos.ReplicationFactor factor) throws IOException; + /** + * Ask SCM a list of containers with a range of container names + * and the limit of count. + * Search container names between start name(exclusive), and + * use prefix name to filter the result. the max size of the + * searching range cannot exceed the value of count. + * + * @param startContainerID start container ID. + * @param count count, if count {@literal <} 0, the max size is unlimited.( + * Usually the count will be replace with a very big + * value instead of being unlimited in case the db is very big) + * @param state Container with this state will be returned. + * @param factor Container factor + * @return a list of containers capped by max count allowed + * in "hdds.container.list.max.count" and total number of containers. + * @throws IOException + */ Pair, Long> listContainerWithCount(long startContainerID, int count, HddsProtos.LifeCycleState state, HddsProtos.ReplicationFactor factor) throws IOException; - /** * Ask SCM for a list of containers with a range of container ID, state * and replication config, and the limit of count. @@ -218,8 +264,7 @@ Pair, Long> listContainerWithCount(long startContainerID, * value instead of being unlimited in case the db is very big) * @param state Container with this state will be returned. * @param replicationConfig Replication config for the containers - * @return a list of containers capped by max count allowed - * in "hdds.container.list.max.count" and total number of containers. + * @return a list of container. * @throws IOException */ List listContainer(long startContainerID, @@ -227,6 +272,23 @@ List listContainer(long startContainerID, HddsProtos.ReplicationType replicationType, ReplicationConfig replicationConfig) throws IOException; + /** + * Ask SCM for a list of containers with a range of container ID, state + * and replication config, and the limit of count. + * The containers are returned from startID (exclusive), and + * filtered by state and replication config. The returned list is limited to + * count entries. + * + * @param startContainerID start container ID. + * @param count count, if count {@literal <} 0, the max size is unlimited.( + * Usually the count will be replace with a very big + * value instead of being unlimited in case the db is very big) + * @param state Container with this state will be returned. + * @param replicationConfig Replication config for the containers + * @return a list of containers capped by max count allowed + * in "hdds.container.list.max.count" and total number of containers. + * @throws IOException + */ Pair, Long> listContainerWithCount(long startContainerID, int count, HddsProtos.LifeCycleState state, HddsProtos.ReplicationType replicationType, diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java index 3868acc426de..fd9e02649f1c 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java @@ -418,8 +418,7 @@ private boolean hasRequiredReplicas(ContainerInfo contInfo) { * @param startContainerID start containerID. * @param count count must be {@literal >} 0. * - * @return a list of containers capped by max count allowed - * in "hdds.container.list.max.count" and total number of containers. + * @return a list of pipeline. * @throws IOException */ @Override @@ -428,6 +427,16 @@ public List listContainer(long startContainerID, return listContainer(startContainerID, count, null, null, null); } + /** + * Lists a range of containers and get their info. + * + * @param startContainerID start containerID. + * @param count count must be {@literal >} 0. + * + * @return a list of containers capped by max count allowed + * in "hdds.container.list.max.count" and total number of containers. + * @throws IOException + */ @Override public Pair, Long> listContainerWithCount(long startContainerID, int count) throws IOException { @@ -441,8 +450,7 @@ public Pair, Long> listContainerWithCount(long startContaine * @param count count must be {@literal >} 0. * @param state Container with this state will be returned. * - * @return a list of containers capped by max count allowed - * in "hdds.container.list.max.count" and total number of containers. + * @return a list of pipeline. * @throws IOException */ @Override @@ -451,6 +459,17 @@ public List listContainer(long startContainerID, return listContainer(startContainerID, count, state, null, null); } + /** + * Lists a range of containers and get their info. + * + * @param startContainerID start containerID. + * @param count count must be {@literal >} 0. + * @param state Container with this state will be returned. + * + * @return a list of containers capped by max count allowed + * in "hdds.container.list.max.count" and total number of containers. + * @throws IOException + */ @Override public Pair, Long> listContainerWithCount(long startContainerID, int count, HddsProtos.LifeCycleState state) throws IOException { @@ -464,8 +483,7 @@ public Pair, Long> listContainerWithCount(long startContaine * @param count count must be {@literal >} 0. * @param state Container with this state will be returned. * @param factor Container factor. - * @return a list of containers capped by max count allowed - * in "hdds.container.list.max.count" and total number of containers. + * @return a list of pipeline. * @throws IOException */ @Override @@ -525,6 +543,17 @@ public List listContainer(long startContainerID, } } + /** + * Lists a range of containers and get their info. + * + * @param startContainerID start containerID. + * @param count count must be {@literal >} 0. + * @param state Container with this state will be returned. + * @param factor Container factor. + * @return a list of containers capped by max count allowed + * in "hdds.container.list.max.count" and total number of containers. + * @throws IOException + */ @Override @Deprecated public Pair, Long> listContainerWithCount(long startContainerID, @@ -619,8 +648,7 @@ private Map buildAuditMap(long startContainerID, int count, * @param count count must be {@literal >} 0. * @param state Container with this state will be returned. * @param repConfig Replication Config for the container. - * @return a list of containers capped by max count allowed - * in "hdds.container.list.max.count" and total number of containers. + * @return a list of pipeline. * @throws IOException */ @Override @@ -656,33 +684,44 @@ public List listContainer(long startContainerID, } Stream containerStream = containerList.stream() - .filter(info -> info.containerID().getId() >= startContainerID); + .filter(info -> info.containerID().getId() >= startContainerID); // If we have repConfig filter by it, as it includes repType too. // Otherwise, we may have a filter just for repType, eg all EC containers // without filtering on their replication scheme if (repConfig != null) { containerStream = containerStream - .filter(info -> info.getReplicationConfig().equals(repConfig)); + .filter(info -> info.getReplicationConfig().equals(repConfig)); } else if (replicationType != null) { containerStream = containerStream - .filter(info -> info.getReplicationType() == replicationType); + .filter(info -> info.getReplicationType() == replicationType); } return containerStream.sorted() - .limit(count) - .collect(Collectors.toList()); + .limit(count) + .collect(Collectors.toList()); } catch (Exception ex) { auditSuccess = false; AUDIT.logReadFailure( - buildAuditMessageForFailure(SCMAction.LIST_CONTAINER, auditMap, ex)); + buildAuditMessageForFailure(SCMAction.LIST_CONTAINER, auditMap, ex)); throw ex; } finally { if (auditSuccess) { AUDIT.logReadSuccess( - buildAuditMessageForSuccess(SCMAction.LIST_CONTAINER, auditMap)); + buildAuditMessageForSuccess(SCMAction.LIST_CONTAINER, auditMap)); } } } + /** + * Lists a range of containers and get their info. + * + * @param startContainerID start containerID. + * @param count count must be {@literal >} 0. + * @param state Container with this state will be returned. + * @param repConfig Replication Config for the container. + * @return a list of containers capped by max count allowed + * in "hdds.container.list.max.count" and total number of containers. + * @throws IOException + */ @Override public Pair, Long> listContainerWithCount(long startContainerID, int count, HddsProtos.LifeCycleState state, From 66148f131e8e9e35ab5e8d113cd7918e9aebc1c3 Mon Sep 17 00:00:00 2001 From: sarvekshayr Date: Mon, 16 Sep 2024 16:40:56 +0530 Subject: [PATCH 10/14] Added a wrapper class and removed the deprecated method --- .../hdds/scm/client/ContainerListResult.java | 59 +++++++++++++++++++ .../hadoop/hdds/scm/client/ScmClient.java | 4 +- .../StorageContainerLocationProtocol.java | 28 ++------- ...ocationProtocolClientSideTranslatorPB.java | 20 ++----- ...ocationProtocolServerSideTranslatorPB.java | 18 ++---- .../scm/server/SCMClientProtocolServer.java | 33 +++-------- .../server/TestSCMClientProtocolServer.java | 5 +- .../scm/cli/ContainerOperationClient.java | 5 +- .../scm/cli/container/ListSubcommand.java | 15 ++--- .../hadoop/ozone/TestContainerOperations.java | 2 +- .../freon/ClosedContainerReplicator.java | 2 +- 11 files changed, 100 insertions(+), 91 deletions(-) create mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerListResult.java diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerListResult.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerListResult.java new file mode 100644 index 000000000000..506a624e785d --- /dev/null +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerListResult.java @@ -0,0 +1,59 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.scm.client; + +import org.apache.hadoop.hdds.scm.container.ContainerInfo; + +import java.util.List; + +/** + * Wrapper class for the result of listing containers with their total count. + */ +public class ContainerListResult { + private final List containerInfoList; + private final long totalCount; + + /** + * Constructs a new ContainerListResult. + * + * @param containerInfoList the list of containers + * @param totalCount the total number of containers + */ + public ContainerListResult(List containerInfoList, long totalCount) { + this.containerInfoList = containerInfoList; + this.totalCount = totalCount; + } + + /** + * Gets the list of containers. + * + * @return the list of containers + */ + public List getContainerInfoList() { + return containerInfoList; + } + + /** + * Gets the total count of containers. + * + * @return the total count of containers + */ + public long getTotalCount() { + return totalCount; + } +} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java index b04f190a0883..92f198744be6 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java @@ -138,7 +138,7 @@ List listContainer(long startContainerID, * in "hdds.container.list.max.count" and total number of containers. * @throws IOException */ - Pair, Long> listContainerWithCount(long startContainerID, + ContainerListResult listContainerWithCount(long startContainerID, int count) throws IOException; /** @@ -168,7 +168,7 @@ List listContainer(long startContainerID, int count, * in "hdds.container.list.max.count" and total number of containers. * @throws IOException */ - Pair, Long> listContainerWithCount(long startContainerID, int count, + ContainerListResult listContainerWithCount(long startContainerID, int count, HddsProtos.LifeCycleState state, HddsProtos.ReplicationType replicationType, ReplicationConfig replicationConfig) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java index 5ea1b3e3502d..26401155edd3 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java @@ -29,6 +29,7 @@ import org.apache.hadoop.hdds.scm.DatanodeAdminError; import org.apache.hadoop.hdds.scm.ScmConfig; import org.apache.hadoop.hdds.scm.ScmInfo; +import org.apache.hadoop.hdds.scm.client.ContainerListResult; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.ReplicationManagerReport; @@ -168,7 +169,7 @@ List listContainer(long startContainerID, * in "hdds.container.list.max.count" and total number of containers. * @throws IOException */ - Pair, Long> listContainerWithCount(long startContainerID, + ContainerListResult listContainerWithCount(long startContainerID, int count) throws IOException; /** @@ -207,7 +208,7 @@ List listContainer(long startContainerID, * in "hdds.container.list.max.count" and total number of containers. * @throws IOException */ - Pair, Long> listContainerWithCount(long startContainerID, + ContainerListResult listContainerWithCount(long startContainerID, int count, HddsProtos.LifeCycleState state) throws IOException; /** @@ -230,27 +231,6 @@ List listContainer(long startContainerID, int count, HddsProtos.LifeCycleState state, HddsProtos.ReplicationFactor factor) throws IOException; - /** - * Ask SCM a list of containers with a range of container names - * and the limit of count. - * Search container names between start name(exclusive), and - * use prefix name to filter the result. the max size of the - * searching range cannot exceed the value of count. - * - * @param startContainerID start container ID. - * @param count count, if count {@literal <} 0, the max size is unlimited.( - * Usually the count will be replace with a very big - * value instead of being unlimited in case the db is very big) - * @param state Container with this state will be returned. - * @param factor Container factor - * @return a list of containers capped by max count allowed - * in "hdds.container.list.max.count" and total number of containers. - * @throws IOException - */ - Pair, Long> listContainerWithCount(long startContainerID, - int count, HddsProtos.LifeCycleState state, - HddsProtos.ReplicationFactor factor) throws IOException; - /** * Ask SCM for a list of containers with a range of container ID, state * and replication config, and the limit of count. @@ -289,7 +269,7 @@ List listContainer(long startContainerID, * in "hdds.container.list.max.count" and total number of containers. * @throws IOException */ - Pair, Long> listContainerWithCount(long startContainerID, + ContainerListResult listContainerWithCount(long startContainerID, int count, HddsProtos.LifeCycleState state, HddsProtos.ReplicationType replicationType, ReplicationConfig replicationConfig) throws IOException; diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java index f5840a602208..dc17144c3834 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java @@ -108,6 +108,7 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ContainerBalancerStatusInfoRequestProto; import org.apache.hadoop.hdds.scm.DatanodeAdminError; import org.apache.hadoop.hdds.scm.ScmInfo; +import org.apache.hadoop.hdds.scm.client.ContainerListResult; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.ReplicationManagerReport; @@ -388,7 +389,7 @@ public List listContainer(long startContainerID, int count) } @Override - public Pair, Long> listContainerWithCount(long startContainerID, int count) + public ContainerListResult listContainerWithCount(long startContainerID, int count) throws IOException { return listContainerWithCount(startContainerID, count, null, null, null); } @@ -400,7 +401,7 @@ public List listContainer(long startContainerID, int count, } @Override - public Pair, Long> listContainerWithCount(long startContainerID, int count, + public ContainerListResult listContainerWithCount(long startContainerID, int count, HddsProtos.LifeCycleState state) throws IOException { return listContainerWithCount(startContainerID, count, state, null, null); } @@ -452,7 +453,7 @@ public List listContainer(long startContainerID, int count, } @Override - public Pair, Long> listContainerWithCount(long startContainerID, int count, + public ContainerListResult listContainerWithCount(long startContainerID, int count, HddsProtos.LifeCycleState state, HddsProtos.ReplicationType replicationType, ReplicationConfig replicationConfig) @@ -494,7 +495,7 @@ public Pair, Long> listContainerWithCount(long startContaine .getContainersList()) { containerList.add(ContainerInfo.fromProtobuf(containerInfoProto)); } - return Pair.of(containerList, response.getContainerCount()); + return new ContainerListResult(containerList, response.getContainerCount()); } @Deprecated @@ -506,15 +507,6 @@ public List listContainer(long startContainerID, int count, "the client side"); } - @Deprecated - @Override - public Pair, Long> listContainerWithCount(long startContainerID, int count, - HddsProtos.LifeCycleState state, HddsProtos.ReplicationFactor factor) - throws IOException { - throw new UnsupportedOperationException("Should no longer be called from " + - "the client side"); - } - /** * Ask SCM to delete a container by name. SCM will remove * the container mapping in its database. @@ -1254,7 +1246,7 @@ public void close() { public List getListOfContainers( long startContainerID, int count, HddsProtos.LifeCycleState state) throws IOException { - return listContainerWithCount(startContainerID, count, state).getLeft(); + return listContainerWithCount(startContainerID, count, state).getContainerInfoList(); } @Override diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java index 6a03d4372ba4..2d7c932a4904 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java @@ -115,6 +115,7 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ResetDeletedBlockRetryCountResponseProto; import org.apache.hadoop.hdds.scm.DatanodeAdminError; import org.apache.hadoop.hdds.scm.ScmInfo; +import org.apache.hadoop.hdds.scm.client.ContainerListResult; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; @@ -857,22 +858,15 @@ public SCMListContainerResponseProto listContainer( } else if (request.hasFactor()) { factor = request.getFactor(); } - Pair, Long> containerListAndTotalCount; - if (factor != null) { - // Call from a legacy client - containerListAndTotalCount = - impl.listContainerWithCount(startContainerID, count, state, factor); - } else { - containerListAndTotalCount = - impl.listContainerWithCount(startContainerID, count, state, replicationType, - repConfig); - } + ContainerListResult containerListAndTotalCount = + impl.listContainerWithCount(startContainerID, count, state, replicationType, + repConfig); SCMListContainerResponseProto.Builder builder = SCMListContainerResponseProto.newBuilder(); - for (ContainerInfo container : containerListAndTotalCount.getLeft()) { + for (ContainerInfo container : containerListAndTotalCount.getContainerInfoList()) { builder.addContainers(container.getProtobuf()); } - builder.setContainerCount(containerListAndTotalCount.getRight()); + builder.setContainerCount(containerListAndTotalCount.getTotalCount()); return builder.build(); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java index fd9e02649f1c..57971321255e 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java @@ -48,6 +48,7 @@ import org.apache.hadoop.hdds.scm.DatanodeAdminError; import org.apache.hadoop.hdds.scm.FetchMetrics; import org.apache.hadoop.hdds.scm.ScmInfo; +import org.apache.hadoop.hdds.scm.client.ContainerListResult; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException; @@ -438,7 +439,7 @@ public List listContainer(long startContainerID, * @throws IOException */ @Override - public Pair, Long> listContainerWithCount(long startContainerID, + public ContainerListResult listContainerWithCount(long startContainerID, int count) throws IOException { return listContainerWithCount(startContainerID, count, null, null, null); } @@ -471,7 +472,7 @@ public List listContainer(long startContainerID, * @throws IOException */ @Override - public Pair, Long> listContainerWithCount(long startContainerID, + public ContainerListResult listContainerWithCount(long startContainerID, int count, HddsProtos.LifeCycleState state) throws IOException { return listContainerWithCount(startContainerID, count, state, null, null); } @@ -543,26 +544,7 @@ public List listContainer(long startContainerID, } } - /** - * Lists a range of containers and get their info. - * - * @param startContainerID start containerID. - * @param count count must be {@literal >} 0. - * @param state Container with this state will be returned. - * @param factor Container factor. - * @return a list of containers capped by max count allowed - * in "hdds.container.list.max.count" and total number of containers. - * @throws IOException - */ - @Override - @Deprecated - public Pair, Long> listContainerWithCount(long startContainerID, - int count, HddsProtos.LifeCycleState state, - HddsProtos.ReplicationFactor factor) throws IOException { - return listContainerInternal(startContainerID, count, state, factor, null, null); - } - - private Pair, Long> listContainerInternal(long startContainerID, int count, + private ContainerListResult listContainerInternal(long startContainerID, int count, HddsProtos.LifeCycleState state, HddsProtos.ReplicationFactor factor, HddsProtos.ReplicationType replicationType, @@ -576,7 +558,10 @@ private Pair, Long> listContainerInternal(long startContaine List containerInfos = containerStream.filter(info -> info.containerID().getId() >= startContainerID) .sorted().collect(Collectors.toList()); - return Pair.of(containerInfos.stream().limit(count).collect(Collectors.toList()), (long) containerInfos.size()); + List limitedContainers = + containerInfos.stream().limit(count).collect(Collectors.toList()); + long totalCount = (long) containerInfos.size(); + return new ContainerListResult(limitedContainers, totalCount); } catch (Exception ex) { auditSuccess = false; AUDIT.logReadFailure( @@ -723,7 +708,7 @@ public List listContainer(long startContainerID, * @throws IOException */ @Override - public Pair, Long> listContainerWithCount(long startContainerID, + public ContainerListResult listContainerWithCount(long startContainerID, int count, HddsProtos.LifeCycleState state, HddsProtos.ReplicationType replicationType, ReplicationConfig repConfig) throws IOException { diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMClientProtocolServer.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMClientProtocolServer.java index ded1ec5b4db5..ecf0856ce2c2 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMClientProtocolServer.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMClientProtocolServer.java @@ -134,10 +134,7 @@ public void testScmListContainer() throws Exception { mockStorageContainerManager(), mock(ReconfigurationHandler.class)); assertEquals(10, scmServer.listContainerWithCount(1, 10, - null, HddsProtos.ReplicationType.RATIS, null).getLeft().size()); - // Test call from a legacy client, which uses a different method of listContainer - assertEquals(10, scmServer.listContainerWithCount(1, 10, null, - HddsProtos.ReplicationFactor.THREE).getLeft().size()); + null, HddsProtos.ReplicationType.RATIS, null).getContainerInfoList().size()); } private StorageContainerManager mockStorageContainerManager() { diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java index 4a3e865d773f..9de12f6fa82a 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java @@ -37,6 +37,7 @@ import org.apache.hadoop.hdds.scm.XceiverClientManager; import org.apache.hadoop.hdds.scm.XceiverClientSpi; import org.apache.hadoop.hdds.scm.client.ClientTrustManager; +import org.apache.hadoop.hdds.scm.client.ContainerListResult; import org.apache.hadoop.hdds.security.x509.certificate.client.CACertificateProvider; import org.apache.hadoop.hdds.scm.client.ScmClient; import org.apache.hadoop.hdds.scm.container.ContainerID; @@ -350,7 +351,7 @@ public List listContainer(long startContainerID, } @Override - public Pair, Long> listContainerWithCount(long startContainerID, + public ContainerListResult listContainerWithCount(long startContainerID, int count) throws IOException { if (count > maxCountOfContainerList) { LOG.error("Attempting to list {} containers. However, this exceeds" + @@ -373,7 +374,7 @@ public List listContainer(long startContainerID, @Override - public Pair, Long> listContainerWithCount(long startContainerID, + public ContainerListResult listContainerWithCount(long startContainerID, int count, HddsProtos.LifeCycleState state, HddsProtos.ReplicationType repType, ReplicationConfig replicationConfig) throws IOException { diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java index 5b68cb920adc..a80761de5477 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java @@ -29,6 +29,7 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.cli.ScmSubcommand; +import org.apache.hadoop.hdds.scm.client.ContainerListResult; import org.apache.hadoop.hdds.scm.client.ScmClient; import org.apache.hadoop.hdds.scm.container.ContainerInfo; @@ -124,34 +125,34 @@ public void execute(ScmClient scmClient) throws IOException { .getInt(ScmConfigKeys.HDDS_CONTAINER_LIST_MAX_COUNT, ScmConfigKeys.HDDS_CONTAINER_LIST_MAX_COUNT_DEFAULT); if (all) { - System.out.printf("Attempting to list all containers." + + System.err.printf("Attempting to list all containers." + " The total number of container might exceed" + " the cluster's current limit of %s. The results will be capped at the" + " maximum allowed count.%n", ScmConfigKeys.HDDS_CONTAINER_LIST_MAX_COUNT_DEFAULT); count = maxCountAllowed; } else { if (count > maxCountAllowed) { - System.out.printf("Attempting to list the first %d records of containers." + + System.err.printf("Attempting to list the first %d records of containers." + " However it exceeds the cluster's current limit of %d. The results will be capped at the" + " maximum allowed count.%n", count, ScmConfigKeys.HDDS_CONTAINER_LIST_MAX_COUNT_DEFAULT); count = maxCountAllowed; } } - Pair, Long> containerListAndTotalCount = + ContainerListResult containerListAndTotalCount = scmClient.listContainerWithCount(startId, count, state, type, repConfig); // Output data list - for (ContainerInfo container : containerListAndTotalCount.getLeft()) { + for (ContainerInfo container : containerListAndTotalCount.getContainerInfoList()) { outputContainerInfo(container); } - if (containerListAndTotalCount.getRight() > count) { - System.out.printf("Container List is truncated since it's too long. " + + if (containerListAndTotalCount.getTotalCount() > count) { + System.err.printf("Container List is truncated since it's too long. " + "List the first %d records of %d. " + "User won't be able to view the full list of containers until " + "pagination feature is supported. %n", - count, containerListAndTotalCount.getRight()); + count, containerListAndTotalCount.getTotalCount()); } } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerOperations.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerOperations.java index 50ecfe7616b1..298173d7921e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerOperations.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerOperations.java @@ -160,7 +160,7 @@ public void testListContainerExceedMaxAllowedCountOperations() throws Exception } assertEquals(1, storageClient.listContainerWithCount(0, 2) - .getLeft().size()); + .getContainerInfoList().size()); } /** diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ClosedContainerReplicator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ClosedContainerReplicator.java index b792e5d3c001..6803c52dfd12 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ClosedContainerReplicator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ClosedContainerReplicator.java @@ -101,7 +101,7 @@ public Void call() throws Exception { new ContainerOperationClient(conf); final List containerInfos = - containerOperationClient.listContainerWithCount(0L, 1_000_000).getLeft(); + containerOperationClient.listContainerWithCount(0L, 1_000_000).getContainerInfoList(); //logic same as the download+import on the destination datanode initializeReplicationSupervisor(conf, containerInfos.size() * 2); From ba0e0e2344f16c5a02fb39c231973c24fc300eb2 Mon Sep 17 00:00:00 2001 From: sarvekshayr Date: Mon, 16 Sep 2024 17:20:48 +0530 Subject: [PATCH 11/14] Checkstyle errors fixed --- ...rageContainerLocationProtocolServerSideTranslatorPB.java | 6 +----- .../hadoop/hdds/scm/cli/container/ListSubcommand.java | 2 -- 2 files changed, 1 insertion(+), 7 deletions(-) diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java index 2d7c932a4904..b609f81a34ba 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java @@ -833,7 +833,6 @@ public SCMListContainerResponseProto listContainer( } count = request.getCount(); HddsProtos.LifeCycleState state = null; - HddsProtos.ReplicationFactor factor = null; HddsProtos.ReplicationType replicationType = null; ReplicationConfig repConfig = null; if (request.hasState()) { @@ -855,12 +854,9 @@ public SCMListContainerResponseProto listContainer( .fromProtoTypeAndFactor(request.getType(), request.getFactor()); } } - } else if (request.hasFactor()) { - factor = request.getFactor(); } ContainerListResult containerListAndTotalCount = - impl.listContainerWithCount(startContainerID, count, state, replicationType, - repConfig); + impl.listContainerWithCount(startContainerID, count, state, replicationType, repConfig); SCMListContainerResponseProto.Builder builder = SCMListContainerResponseProto.newBuilder(); for (ContainerInfo container : containerListAndTotalCount.getContainerInfoList()) { diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java index a80761de5477..c7eb168d5e28 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java @@ -18,10 +18,8 @@ package org.apache.hadoop.hdds.scm.cli.container; import java.io.IOException; -import java.util.List; import com.google.common.base.Strings; -import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationType; From 54c8fe76e9a3dbcd7f7dc56a519a8e7758eb6478 Mon Sep 17 00:00:00 2001 From: sarvekshayr Date: Tue, 1 Oct 2024 15:49:19 +0530 Subject: [PATCH 12/14] Added wrapper class and pagination impl when --all is passed --- .../hadoop/hdds/scm/client/ScmClient.java | 33 +--- .../ContainerListResult.java | 4 +- .../StorageContainerLocationProtocol.java | 71 +------- ...ocationProtocolClientSideTranslatorPB.java | 73 ++------ ...ocationProtocolServerSideTranslatorPB.java | 16 +- .../scm/server/SCMClientProtocolServer.java | 169 ++---------------- .../server/TestSCMClientProtocolServer.java | 5 +- .../scm/cli/ContainerOperationClient.java | 27 +-- .../scm/cli/container/ListSubcommand.java | 61 ++++--- .../main/smoketest/admincli/container.robot | 12 ++ .../hadoop/ozone/TestContainerOperations.java | 2 +- .../hadoop/ozone/shell/TestOzoneShellHA.java | 17 +- .../freon/ClosedContainerReplicator.java | 2 +- 13 files changed, 113 insertions(+), 379 deletions(-) rename hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/{client => container}/ContainerListResult.java (94%) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java index 92f198744be6..202840a2c28a 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ContainerBalancerStatusInfoResponseProto; import org.apache.hadoop.hdds.scm.DatanodeAdminError; import org.apache.hadoop.hdds.scm.container.ContainerID; +import org.apache.hadoop.hdds.scm.container.ContainerListResult; import org.apache.hadoop.hdds.scm.container.ContainerReplicaInfo; import org.apache.hadoop.hdds.scm.container.ReplicationManagerReport; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; @@ -116,18 +117,6 @@ void deleteContainer(long containerId, Pipeline pipeline, boolean force) */ void deleteContainer(long containerId, boolean force) throws IOException; - /** - * Lists a range of containers and get their info. - * - * @param startContainerID start containerID. - * @param count count must be {@literal >} 0. - * - * @return a list of pipeline. - * @throws IOException - */ - List listContainer(long startContainerID, - int count) throws IOException; - /** * Lists a range of containers and get their info. * @@ -138,25 +127,9 @@ List listContainer(long startContainerID, * in "hdds.container.list.max.count" and total number of containers. * @throws IOException */ - ContainerListResult listContainerWithCount(long startContainerID, + ContainerListResult listContainer(long startContainerID, int count) throws IOException; - /** - * Lists a range of containers and get their info. - * - * @param startContainerID start containerID. - * @param count count must be {@literal >} 0. - * @param state Container of this state will be returned. - * @param replicationConfig container replication Config. - * @return a list of pipeline. - * @throws IOException - */ - List listContainer(long startContainerID, int count, - HddsProtos.LifeCycleState state, - HddsProtos.ReplicationType replicationType, - ReplicationConfig replicationConfig) - throws IOException; - /** * Lists a range of containers and get their info. * @@ -168,7 +141,7 @@ List listContainer(long startContainerID, int count, * in "hdds.container.list.max.count" and total number of containers. * @throws IOException */ - ContainerListResult listContainerWithCount(long startContainerID, int count, + ContainerListResult listContainer(long startContainerID, int count, HddsProtos.LifeCycleState state, HddsProtos.ReplicationType replicationType, ReplicationConfig replicationConfig) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerListResult.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerListResult.java similarity index 94% rename from hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerListResult.java rename to hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerListResult.java index 506a624e785d..9e8d5738db86 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerListResult.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerListResult.java @@ -15,9 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hdds.scm.client; - -import org.apache.hadoop.hdds.scm.container.ContainerInfo; +package org.apache.hadoop.hdds.scm.container; import java.util.List; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java index 26401155edd3..d44cc2cd0768 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java @@ -29,7 +29,7 @@ import org.apache.hadoop.hdds.scm.DatanodeAdminError; import org.apache.hadoop.hdds.scm.ScmConfig; import org.apache.hadoop.hdds.scm.ScmInfo; -import org.apache.hadoop.hdds.scm.client.ContainerListResult; +import org.apache.hadoop.hdds.scm.container.ContainerListResult; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.ReplicationManagerReport; @@ -135,24 +135,6 @@ List getContainerWithPipelineBatch( List getExistContainerWithPipelinesInBatch( List containerIDs); - /** - * Ask SCM a list of containers with a range of container names - * and the limit of count. - * Search container names between start name(exclusive), and - * use prefix name to filter the result. the max size of the - * searching range cannot exceed the value of count. - * - * @param startContainerID start container ID. - * @param count count, if count {@literal <} 0, the max size is unlimited.( - * Usually the count will be replace with a very big - * value instead of being unlimited in case the db is very big) - * - * @return a list of container. - * @throws IOException - */ - List listContainer(long startContainerID, - int count) throws IOException; - /** * Ask SCM a list of containers with a range of container names * and the limit of count. @@ -169,28 +151,9 @@ List listContainer(long startContainerID, * in "hdds.container.list.max.count" and total number of containers. * @throws IOException */ - ContainerListResult listContainerWithCount(long startContainerID, + ContainerListResult listContainer(long startContainerID, int count) throws IOException; - /** - * Ask SCM a list of containers with a range of container names - * and the limit of count. - * Search container names between start name(exclusive), and - * use prefix name to filter the result. the max size of the - * searching range cannot exceed the value of count. - * - * @param startContainerID start container ID. - * @param count count, if count {@literal <} 0, the max size is unlimited.( - * Usually the count will be replace with a very big - * value instead of being unlimited in case the db is very big) - * @param state Container with this state will be returned. - * - * @return a list of container. - * @throws IOException - */ - List listContainer(long startContainerID, - int count, HddsProtos.LifeCycleState state) throws IOException; - /** * Ask SCM a list of containers with a range of container names * and the limit of count. @@ -208,7 +171,7 @@ List listContainer(long startContainerID, * in "hdds.container.list.max.count" and total number of containers. * @throws IOException */ - ContainerListResult listContainerWithCount(long startContainerID, + ContainerListResult listContainer(long startContainerID, int count, HddsProtos.LifeCycleState state) throws IOException; /** @@ -224,34 +187,14 @@ ContainerListResult listContainerWithCount(long startContainerID, * value instead of being unlimited in case the db is very big) * @param state Container with this state will be returned. * @param factor Container factor - * @return a list of container. + * @return a list of containers capped by max count allowed + * in "hdds.container.list.max.count" and total number of containers. * @throws IOException */ - List listContainer(long startContainerID, + ContainerListResult listContainer(long startContainerID, int count, HddsProtos.LifeCycleState state, HddsProtos.ReplicationFactor factor) throws IOException; - /** - * Ask SCM for a list of containers with a range of container ID, state - * and replication config, and the limit of count. - * The containers are returned from startID (exclusive), and - * filtered by state and replication config. The returned list is limited to - * count entries. - * - * @param startContainerID start container ID. - * @param count count, if count {@literal <} 0, the max size is unlimited.( - * Usually the count will be replace with a very big - * value instead of being unlimited in case the db is very big) - * @param state Container with this state will be returned. - * @param replicationConfig Replication config for the containers - * @return a list of container. - * @throws IOException - */ - List listContainer(long startContainerID, - int count, HddsProtos.LifeCycleState state, - HddsProtos.ReplicationType replicationType, - ReplicationConfig replicationConfig) throws IOException; - /** * Ask SCM for a list of containers with a range of container ID, state * and replication config, and the limit of count. @@ -269,7 +212,7 @@ List listContainer(long startContainerID, * in "hdds.container.list.max.count" and total number of containers. * @throws IOException */ - ContainerListResult listContainerWithCount(long startContainerID, + ContainerListResult listContainer(long startContainerID, int count, HddsProtos.LifeCycleState state, HddsProtos.ReplicationType replicationType, ReplicationConfig replicationConfig) throws IOException; diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java index dc17144c3834..30e08f5994b3 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java @@ -108,7 +108,7 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ContainerBalancerStatusInfoRequestProto; import org.apache.hadoop.hdds.scm.DatanodeAdminError; import org.apache.hadoop.hdds.scm.ScmInfo; -import org.apache.hadoop.hdds.scm.client.ContainerListResult; +import org.apache.hadoop.hdds.scm.container.ContainerListResult; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.ReplicationManagerReport; @@ -383,31 +383,19 @@ public List getExistContainerWithPipelinesInBatch( * {@inheritDoc} */ @Override - public List listContainer(long startContainerID, int count) + public ContainerListResult listContainer(long startContainerID, int count) throws IOException { return listContainer(startContainerID, count, null, null, null); } @Override - public ContainerListResult listContainerWithCount(long startContainerID, int count) - throws IOException { - return listContainerWithCount(startContainerID, count, null, null, null); - } - - @Override - public List listContainer(long startContainerID, int count, + public ContainerListResult listContainer(long startContainerID, int count, HddsProtos.LifeCycleState state) throws IOException { return listContainer(startContainerID, count, state, null, null); } @Override - public ContainerListResult listContainerWithCount(long startContainerID, int count, - HddsProtos.LifeCycleState state) throws IOException { - return listContainerWithCount(startContainerID, count, state, null, null); - } - - @Override - public List listContainer(long startContainerID, int count, + public ContainerListResult listContainer(long startContainerID, int count, HddsProtos.LifeCycleState state, HddsProtos.ReplicationType replicationType, ReplicationConfig replicationConfig) @@ -449,58 +437,17 @@ public List listContainer(long startContainerID, int count, .getContainersList()) { containerList.add(ContainerInfo.fromProtobuf(containerInfoProto)); } - return containerList; - } - - @Override - public ContainerListResult listContainerWithCount(long startContainerID, int count, - HddsProtos.LifeCycleState state, - HddsProtos.ReplicationType replicationType, - ReplicationConfig replicationConfig) - throws IOException { - Preconditions.checkState(startContainerID >= 0, - "Container ID cannot be negative."); - Preconditions.checkState(count > 0, - "Container count must be greater than 0."); - SCMListContainerRequestProto.Builder builder = SCMListContainerRequestProto - .newBuilder(); - builder.setStartContainerID(startContainerID); - builder.setCount(count); - builder.setTraceID(TracingUtil.exportCurrentSpan()); - if (state != null) { - builder.setState(state); - } - if (replicationConfig != null) { - if (replicationConfig.getReplicationType() == EC) { - builder.setType(EC); - builder.setEcReplicationConfig( - ((ECReplicationConfig)replicationConfig).toProto()); - } else { - builder.setType(replicationConfig.getReplicationType()); - builder.setFactor(((ReplicatedReplicationConfig)replicationConfig) - .getReplicationFactor()); - } - } else if (replicationType != null) { - builder.setType(replicationType); + if (response.hasContainerCount()) { + return new ContainerListResult(containerList, response.getContainerCount()); } - - SCMListContainerRequestProto request = builder.build(); - - SCMListContainerResponseProto response = - submitRequest(Type.ListContainer, - builder1 -> builder1.setScmListContainerRequest(request)) - .getScmListContainerResponse(); - List containerList = new ArrayList<>(); - for (HddsProtos.ContainerInfoProto containerInfoProto : response - .getContainersList()) { - containerList.add(ContainerInfo.fromProtobuf(containerInfoProto)); + else { + return new ContainerListResult(containerList, -1); } - return new ContainerListResult(containerList, response.getContainerCount()); } @Deprecated @Override - public List listContainer(long startContainerID, int count, + public ContainerListResult listContainer(long startContainerID, int count, HddsProtos.LifeCycleState state, HddsProtos.ReplicationFactor factor) throws IOException { throw new UnsupportedOperationException("Should no longer be called from " + @@ -1246,7 +1193,7 @@ public void close() { public List getListOfContainers( long startContainerID, int count, HddsProtos.LifeCycleState state) throws IOException { - return listContainerWithCount(startContainerID, count, state).getContainerInfoList(); + return listContainer(startContainerID, count, state).getContainerInfoList(); } @Override diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java index b609f81a34ba..7277e414e529 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java @@ -115,7 +115,7 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ResetDeletedBlockRetryCountResponseProto; import org.apache.hadoop.hdds.scm.DatanodeAdminError; import org.apache.hadoop.hdds.scm.ScmInfo; -import org.apache.hadoop.hdds.scm.client.ContainerListResult; +import org.apache.hadoop.hdds.scm.container.ContainerListResult; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; @@ -833,6 +833,7 @@ public SCMListContainerResponseProto listContainer( } count = request.getCount(); HddsProtos.LifeCycleState state = null; + HddsProtos.ReplicationFactor factor = null; HddsProtos.ReplicationType replicationType = null; ReplicationConfig repConfig = null; if (request.hasState()) { @@ -854,9 +855,18 @@ public SCMListContainerResponseProto listContainer( .fromProtoTypeAndFactor(request.getType(), request.getFactor()); } } + } else if (request.hasFactor()) { + factor = request.getFactor(); + } + ContainerListResult containerListAndTotalCount; + if (factor != null) { + // Call from a legacy client + containerListAndTotalCount = + impl.listContainer(startContainerID, count, state, factor); + } else { + containerListAndTotalCount = + impl.listContainer(startContainerID, count, state, replicationType, repConfig); } - ContainerListResult containerListAndTotalCount = - impl.listContainerWithCount(startContainerID, count, state, replicationType, repConfig); SCMListContainerResponseProto.Builder builder = SCMListContainerResponseProto.newBuilder(); for (ContainerInfo container : containerListAndTotalCount.getContainerInfoList()) { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java index 57971321255e..2da6e203403a 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java @@ -48,7 +48,7 @@ import org.apache.hadoop.hdds.scm.DatanodeAdminError; import org.apache.hadoop.hdds.scm.FetchMetrics; import org.apache.hadoop.hdds.scm.ScmInfo; -import org.apache.hadoop.hdds.scm.client.ContainerListResult; +import org.apache.hadoop.hdds.scm.container.ContainerListResult; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException; @@ -413,21 +413,6 @@ private boolean hasRequiredReplicas(ContainerInfo contInfo) { } } - /** - * Lists a range of containers and get their info. - * - * @param startContainerID start containerID. - * @param count count must be {@literal >} 0. - * - * @return a list of pipeline. - * @throws IOException - */ - @Override - public List listContainer(long startContainerID, - int count) throws IOException { - return listContainer(startContainerID, count, null, null, null); - } - /** * Lists a range of containers and get their info. * @@ -439,25 +424,9 @@ public List listContainer(long startContainerID, * @throws IOException */ @Override - public ContainerListResult listContainerWithCount(long startContainerID, + public ContainerListResult listContainer(long startContainerID, int count) throws IOException { - return listContainerWithCount(startContainerID, count, null, null, null); - } - - /** - * Lists a range of containers and get their info. - * - * @param startContainerID start containerID. - * @param count count must be {@literal >} 0. - * @param state Container with this state will be returned. - * - * @return a list of pipeline. - * @throws IOException - */ - @Override - public List listContainer(long startContainerID, - int count, HddsProtos.LifeCycleState state) throws IOException { - return listContainer(startContainerID, count, state, null, null); + return listContainer(startContainerID, count, null, null, null); } /** @@ -472,9 +441,9 @@ public List listContainer(long startContainerID, * @throws IOException */ @Override - public ContainerListResult listContainerWithCount(long startContainerID, + public ContainerListResult listContainer(long startContainerID, int count, HddsProtos.LifeCycleState state) throws IOException { - return listContainerWithCount(startContainerID, count, state, null, null); + return listContainer(startContainerID, count, state, null, null); } /** @@ -484,64 +453,16 @@ public ContainerListResult listContainerWithCount(long startContainerID, * @param count count must be {@literal >} 0. * @param state Container with this state will be returned. * @param factor Container factor. - * @return a list of pipeline. + * @return a list of containers capped by max count allowed + * in "hdds.container.list.max.count" and total number of containers. * @throws IOException */ @Override @Deprecated - public List listContainer(long startContainerID, + public ContainerListResult listContainer(long startContainerID, int count, HddsProtos.LifeCycleState state, HddsProtos.ReplicationFactor factor) throws IOException { - boolean auditSuccess = true; - Map auditMap = Maps.newHashMap(); - auditMap.put("startContainerID", String.valueOf(startContainerID)); - auditMap.put("count", String.valueOf(count)); - if (state != null) { - auditMap.put("state", state.name()); - } - if (factor != null) { - auditMap.put("factor", factor.name()); - } - try { - final ContainerID containerId = ContainerID.valueOf(startContainerID); - if (state != null) { - if (factor != null) { - return scm.getContainerManager().getContainers(state).stream() - .filter(info -> info.containerID().getId() >= startContainerID) - //Filtering EC replication type as EC will not have factor. - .filter(info -> info - .getReplicationType() != HddsProtos.ReplicationType.EC) - .filter(info -> (info.getReplicationFactor() == factor)) - .sorted().limit(count).collect(Collectors.toList()); - } else { - return scm.getContainerManager().getContainers(state).stream() - .filter(info -> info.containerID().getId() >= startContainerID) - .sorted().limit(count).collect(Collectors.toList()); - } - } else { - if (factor != null) { - return scm.getContainerManager().getContainers().stream() - .filter(info -> info.containerID().getId() >= startContainerID) - //Filtering EC replication type as EC will not have factor. - .filter(info -> info - .getReplicationType() != HddsProtos.ReplicationType.EC) - .filter(info -> info.getReplicationFactor() == factor) - .sorted().limit(count).collect(Collectors.toList()); - } else { - return scm.getContainerManager().getContainers(containerId, count); - } - } - } catch (Exception ex) { - auditSuccess = false; - AUDIT.logReadFailure( - buildAuditMessageForFailure(SCMAction.LIST_CONTAINER, auditMap, ex)); - throw ex; - } finally { - if (auditSuccess) { - AUDIT.logReadSuccess( - buildAuditMessageForSuccess(SCMAction.LIST_CONTAINER, auditMap)); - } - } + return listContainerInternal(startContainerID, count, state, factor, null, null); } private ContainerListResult listContainerInternal(long startContainerID, int count, @@ -626,76 +547,6 @@ private Map buildAuditMap(long startContainerID, int count, return auditMap; } - /** - * Lists a range of containers and get their info. - * - * @param startContainerID start containerID. - * @param count count must be {@literal >} 0. - * @param state Container with this state will be returned. - * @param repConfig Replication Config for the container. - * @return a list of pipeline. - * @throws IOException - */ - @Override - public List listContainer(long startContainerID, - int count, HddsProtos.LifeCycleState state, - HddsProtos.ReplicationType replicationType, - ReplicationConfig repConfig) throws IOException { - boolean auditSuccess = true; - Map auditMap = Maps.newHashMap(); - auditMap.put("startContainerID", String.valueOf(startContainerID)); - auditMap.put("count", String.valueOf(count)); - if (state != null) { - auditMap.put("state", state.name()); - } - if (replicationType != null) { - auditMap.put("replicationType", replicationType.toString()); - } - if (repConfig != null) { - auditMap.put("replicationConfig", repConfig.toString()); - } - try { - final ContainerID containerId = ContainerID.valueOf(startContainerID); - if (state == null && replicationType == null && repConfig == null) { - // Not filters, so just return everything - return scm.getContainerManager().getContainers(containerId, count); - } - - List containerList; - if (state != null) { - containerList = scm.getContainerManager().getContainers(state); - } else { - containerList = scm.getContainerManager().getContainers(); - } - - Stream containerStream = containerList.stream() - .filter(info -> info.containerID().getId() >= startContainerID); - // If we have repConfig filter by it, as it includes repType too. - // Otherwise, we may have a filter just for repType, eg all EC containers - // without filtering on their replication scheme - if (repConfig != null) { - containerStream = containerStream - .filter(info -> info.getReplicationConfig().equals(repConfig)); - } else if (replicationType != null) { - containerStream = containerStream - .filter(info -> info.getReplicationType() == replicationType); - } - return containerStream.sorted() - .limit(count) - .collect(Collectors.toList()); - } catch (Exception ex) { - auditSuccess = false; - AUDIT.logReadFailure( - buildAuditMessageForFailure(SCMAction.LIST_CONTAINER, auditMap, ex)); - throw ex; - } finally { - if (auditSuccess) { - AUDIT.logReadSuccess( - buildAuditMessageForSuccess(SCMAction.LIST_CONTAINER, auditMap)); - } - } - } - /** * Lists a range of containers and get their info. * @@ -708,7 +559,7 @@ public List listContainer(long startContainerID, * @throws IOException */ @Override - public ContainerListResult listContainerWithCount(long startContainerID, + public ContainerListResult listContainer(long startContainerID, int count, HddsProtos.LifeCycleState state, HddsProtos.ReplicationType replicationType, ReplicationConfig repConfig) throws IOException { diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMClientProtocolServer.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMClientProtocolServer.java index ecf0856ce2c2..8e21eef930e3 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMClientProtocolServer.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMClientProtocolServer.java @@ -133,8 +133,11 @@ public void testScmListContainer() throws Exception { new SCMClientProtocolServer(new OzoneConfiguration(), mockStorageContainerManager(), mock(ReconfigurationHandler.class)); - assertEquals(10, scmServer.listContainerWithCount(1, 10, + assertEquals(10, scmServer.listContainer(1, 10, null, HddsProtos.ReplicationType.RATIS, null).getContainerInfoList().size()); + // Test call from a legacy client, which uses a different method of listContainer + assertEquals(10, scmServer.listContainer(1, 10, null, + HddsProtos.ReplicationFactor.THREE).getContainerInfoList().size()); } private StorageContainerManager mockStorageContainerManager() { diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java index 9de12f6fa82a..1322054d71b3 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java @@ -37,7 +37,7 @@ import org.apache.hadoop.hdds.scm.XceiverClientManager; import org.apache.hadoop.hdds.scm.XceiverClientSpi; import org.apache.hadoop.hdds.scm.client.ClientTrustManager; -import org.apache.hadoop.hdds.scm.client.ContainerListResult; +import org.apache.hadoop.hdds.scm.container.ContainerListResult; import org.apache.hadoop.hdds.security.x509.certificate.client.CACertificateProvider; import org.apache.hadoop.hdds.scm.client.ScmClient; import org.apache.hadoop.hdds.scm.container.ContainerID; @@ -344,14 +344,7 @@ public void deleteContainer(long containerID, boolean force) } @Override - public List listContainer(long startContainerID, - int count) throws IOException { - return storageContainerLocationClient.listContainer( - startContainerID, count); - } - - @Override - public ContainerListResult listContainerWithCount(long startContainerID, + public ContainerListResult listContainer(long startContainerID, int count) throws IOException { if (count > maxCountOfContainerList) { LOG.error("Attempting to list {} containers. However, this exceeds" + @@ -359,22 +352,12 @@ public ContainerListResult listContainerWithCount(long startContainerID, " maximum allowed count.", count, maxCountOfContainerList); count = maxCountOfContainerList; } - return storageContainerLocationClient.listContainerWithCount( - startContainerID, count); - } - - @Override - public List listContainer(long startContainerID, - int count, HddsProtos.LifeCycleState state, - HddsProtos.ReplicationType repType, - ReplicationConfig replicationConfig) throws IOException { return storageContainerLocationClient.listContainer( - startContainerID, count, state, repType, replicationConfig); + startContainerID, count); } - @Override - public ContainerListResult listContainerWithCount(long startContainerID, + public ContainerListResult listContainer(long startContainerID, int count, HddsProtos.LifeCycleState state, HddsProtos.ReplicationType repType, ReplicationConfig replicationConfig) throws IOException { @@ -384,7 +367,7 @@ public ContainerListResult listContainerWithCount(long startContainerID, " maximum allowed count.", count, maxCountOfContainerList); count = maxCountOfContainerList; } - return storageContainerLocationClient.listContainerWithCount( + return storageContainerLocationClient.listContainer( startContainerID, count, state, repType, replicationConfig); } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java index c7eb168d5e28..eccee15453f2 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java @@ -27,7 +27,7 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.cli.ScmSubcommand; -import org.apache.hadoop.hdds.scm.client.ContainerListResult; +import org.apache.hadoop.hdds.scm.container.ContainerListResult; import org.apache.hadoop.hdds.scm.client.ScmClient; import org.apache.hadoop.hdds.scm.container.ContainerInfo; @@ -62,10 +62,7 @@ public class ListSubcommand extends ScmSubcommand { private int count; @Option(names = {"-a", "--all"}, - description = "List all results. However the total number of containers might exceed " + - " the cluster's limit \"hdds.container.list.max.count\"." + - " The results will be capped at the " + - " maximum allowed count.", + description = "List all containers.", defaultValue = "false") private boolean all; @@ -122,35 +119,45 @@ public void execute(ScmClient scmClient) throws IOException { int maxCountAllowed = parent.getParent().getOzoneConf() .getInt(ScmConfigKeys.HDDS_CONTAINER_LIST_MAX_COUNT, ScmConfigKeys.HDDS_CONTAINER_LIST_MAX_COUNT_DEFAULT); - if (all) { - System.err.printf("Attempting to list all containers." + - " The total number of container might exceed" + - " the cluster's current limit of %s. The results will be capped at the" + - " maximum allowed count.%n", ScmConfigKeys.HDDS_CONTAINER_LIST_MAX_COUNT_DEFAULT); - count = maxCountAllowed; - } else { + + ContainerListResult containerListAndTotalCount; + + if (!all) { if (count > maxCountAllowed) { System.err.printf("Attempting to list the first %d records of containers." + " However it exceeds the cluster's current limit of %d. The results will be capped at the" + " maximum allowed count.%n", count, ScmConfigKeys.HDDS_CONTAINER_LIST_MAX_COUNT_DEFAULT); count = maxCountAllowed; } - } - - ContainerListResult containerListAndTotalCount = - scmClient.listContainerWithCount(startId, count, state, type, repConfig); - - // Output data list - for (ContainerInfo container : containerListAndTotalCount.getContainerInfoList()) { - outputContainerInfo(container); - } + containerListAndTotalCount = scmClient.listContainer(startId, count, state, type, repConfig); + for (ContainerInfo container : containerListAndTotalCount.getContainerInfoList()) { + outputContainerInfo(container); + } - if (containerListAndTotalCount.getTotalCount() > count) { - System.err.printf("Container List is truncated since it's too long. " + - "List the first %d records of %d. " + - "User won't be able to view the full list of containers until " + - "pagination feature is supported. %n", - count, containerListAndTotalCount.getTotalCount()); + if (containerListAndTotalCount.getTotalCount() > count) { + System.err.printf("Container list is truncated as it is too long. " + + "Displayed the first %d records of %d total containers.%n", + count, containerListAndTotalCount.getTotalCount()); + } + } else { + // Batch size is either count passed through cli or maxCountAllowed + int batchSize = (count > 0) ? count : maxCountAllowed; + long currentStartId = startId; + int fetchedCount; + + do { + // Fetch containers in batches of 'batchSize' + containerListAndTotalCount = scmClient.listContainer(currentStartId, batchSize, state, type, repConfig); + fetchedCount = containerListAndTotalCount.getContainerInfoList().size(); + + for (ContainerInfo container : containerListAndTotalCount.getContainerInfoList()) { + outputContainerInfo(container); + } + + if (fetchedCount > 0) { + currentStartId = containerListAndTotalCount.getContainerInfoList().get(fetchedCount - 1).getContainerID(); + } + } while (fetchedCount == batchSize); } } } diff --git a/hadoop-ozone/dist/src/main/smoketest/admincli/container.robot b/hadoop-ozone/dist/src/main/smoketest/admincli/container.robot index c50daa724dad..83c0731ff76c 100644 --- a/hadoop-ozone/dist/src/main/smoketest/admincli/container.robot +++ b/hadoop-ozone/dist/src/main/smoketest/admincli/container.robot @@ -84,6 +84,18 @@ Report containers as JSON Should contain ${output} stats Should contain ${output} samples +List all containers + ${output} = Execute ozone admin container list --all + Should contain ${output} OPEN + +List all containers according to count (batchSize) + ${output} = Execute ozone admin container list --all --count 10 + Should contain ${output} OPEN + +List all containers from a particular container ID + ${output} = Execute ozone admin container list --all --start 1 + Should contain ${output} OPEN + Close container ${container} = Execute ozone admin container list --state OPEN | jq -r 'select(.replicationConfig.replicationFactor == "THREE") | .containerID' | head -1 Execute ozone admin container close "${container}" diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerOperations.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerOperations.java index 298173d7921e..3906fa507b27 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerOperations.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerOperations.java @@ -159,7 +159,7 @@ public void testListContainerExceedMaxAllowedCountOperations() throws Exception .ONE, OzoneConsts.OZONE); } - assertEquals(1, storageClient.listContainerWithCount(0, 2) + assertEquals(1, storageClient.listContainer(0, 2) .getContainerInfoList().size()); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java index b7417bb3d838..a1986207555e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java @@ -966,16 +966,23 @@ private String getStdOut() throws UnsupportedEncodingException { @Test public void testOzoneAdminCmdListAllContainer() throws UnsupportedEncodingException { - String[] args1 = new String[] {"container", "create", "--scm", + String[] args = new String[] {"container", "create", "--scm", "localhost:" + cluster.getStorageContainerManager().getClientRpcPort()}; for (int i = 0; i < 2; i++) { - execute(ozoneAdminShell, args1); + execute(ozoneAdminShell, args); } - String[] args = new String[] {"container", "list", "-a", "--scm", - "localhost:" + cluster.getStorageContainerManager().getClientRpcPort()}; - execute(ozoneAdminShell, args); + String[] args1 = new String[] {"container", "list", "-c", "10", "--scm", + "localhost:" + cluster.getStorageContainerManager().getClientRpcPort()}; + execute(ozoneAdminShell, args1); + //results will be capped at the maximum allowed count assertEquals(1, getNumOfContainers()); + + String[] args2 = new String[] {"container", "list", "-a", "--scm", + "localhost:" + cluster.getStorageContainerManager().getClientRpcPort()}; + execute(ozoneAdminShell, args2); + //Lists all containers + assertNotEquals(1, getNumOfContainers()); } private int getNumOfContainers() diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ClosedContainerReplicator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ClosedContainerReplicator.java index 6803c52dfd12..393c7e599c54 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ClosedContainerReplicator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ClosedContainerReplicator.java @@ -101,7 +101,7 @@ public Void call() throws Exception { new ContainerOperationClient(conf); final List containerInfos = - containerOperationClient.listContainerWithCount(0L, 1_000_000).getContainerInfoList(); + containerOperationClient.listContainer(0L, 1_000_000).getContainerInfoList(); //logic same as the download+import on the destination datanode initializeReplicationSupervisor(conf, containerInfos.size() * 2); From e8df20021e3e7b305553d9d786a4a118e818eed1 Mon Sep 17 00:00:00 2001 From: sarvekshayr Date: Tue, 1 Oct 2024 16:18:00 +0530 Subject: [PATCH 13/14] Checkstyle errors fixed -2 --- ...torageContainerLocationProtocolClientSideTranslatorPB.java | 4 ++-- .../hadoop/hdds/scm/server/SCMClientProtocolServer.java | 1 - .../java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java | 2 +- 3 files changed, 3 insertions(+), 4 deletions(-) diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java index 30e08f5994b3..d72c3a0e69d4 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java @@ -437,10 +437,10 @@ public ContainerListResult listContainer(long startContainerID, int count, .getContainersList()) { containerList.add(ContainerInfo.fromProtobuf(containerInfoProto)); } + if (response.hasContainerCount()) { return new ContainerListResult(containerList, response.getContainerCount()); - } - else { + } else { return new ContainerListResult(containerList, -1); } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java index 2da6e203403a..7ad1996848aa 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java @@ -566,7 +566,6 @@ public ContainerListResult listContainer(long startContainerID, return listContainerInternal(startContainerID, count, state, null, replicationType, repConfig); } - @Override public void deleteContainer(long containerID) throws IOException { Map auditMap = Maps.newHashMap(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java index a1986207555e..1f5cfd6464aa 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java @@ -973,7 +973,7 @@ public void testOzoneAdminCmdListAllContainer() } String[] args1 = new String[] {"container", "list", "-c", "10", "--scm", - "localhost:" + cluster.getStorageContainerManager().getClientRpcPort()}; + "localhost:" + cluster.getStorageContainerManager().getClientRpcPort()}; execute(ozoneAdminShell, args1); //results will be capped at the maximum allowed count assertEquals(1, getNumOfContainers()); From d6e6b0dc52058342b4438cacd8b98ee215b2d518 Mon Sep 17 00:00:00 2001 From: sarvekshayr Date: Wed, 9 Oct 2024 13:31:20 +0530 Subject: [PATCH 14/14] Modified config name and pagination logic --- .../org/apache/hadoop/hdds/scm/ScmConfigKeys.java | 6 +++--- .../apache/hadoop/hdds/scm/client/ScmClient.java | 4 ++-- .../protocol/StorageContainerLocationProtocol.java | 8 ++++---- .../common/src/main/resources/ozone-default.xml | 4 ++-- .../hdds/scm/container/ContainerManagerImpl.java | 4 ++-- .../hdds/scm/server/SCMClientProtocolServer.java | 8 ++++---- .../hdds/scm/cli/ContainerOperationClient.java | 6 +++--- .../hdds/scm/cli/container/ListSubcommand.java | 14 +++++++------- .../hadoop/ozone/TestContainerOperations.java | 2 +- .../hadoop/ozone/shell/TestOzoneShellHA.java | 2 +- 10 files changed, 29 insertions(+), 29 deletions(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java index 01a08399cbca..c4b42acec435 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java @@ -131,10 +131,10 @@ public final class ScmConfigKeys { "hdds.ratis.snapshot.threshold"; public static final long HDDS_RATIS_SNAPSHOT_THRESHOLD_DEFAULT = 100000; - public static final String HDDS_CONTAINER_LIST_MAX_COUNT = - "hdds.container.list.max.count"; + public static final String OZONE_SCM_CONTAINER_LIST_MAX_COUNT = + "ozone.scm.container.list.max.count"; - public static final int HDDS_CONTAINER_LIST_MAX_COUNT_DEFAULT = 4096; + public static final int OZONE_SCM_CONTAINER_LIST_MAX_COUNT_DEFAULT = 4096; // TODO : this is copied from OzoneConsts, may need to move to a better place public static final String OZONE_SCM_CHUNK_SIZE_KEY = "ozone.scm.chunk.size"; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java index 202840a2c28a..23ac05c087a3 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java @@ -124,7 +124,7 @@ void deleteContainer(long containerId, Pipeline pipeline, boolean force) * @param count count must be {@literal >} 0. * * @return a list of containers capped by max count allowed - * in "hdds.container.list.max.count" and total number of containers. + * in "ozone.scm.container.list.max.count" and total number of containers. * @throws IOException */ ContainerListResult listContainer(long startContainerID, @@ -138,7 +138,7 @@ ContainerListResult listContainer(long startContainerID, * @param state Container of this state will be returned. * @param replicationConfig container replication Config. * @return a list of containers capped by max count allowed - * in "hdds.container.list.max.count" and total number of containers. + * in "ozone.scm.container.list.max.count" and total number of containers. * @throws IOException */ ContainerListResult listContainer(long startContainerID, int count, diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java index d44cc2cd0768..b1b83636902b 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java @@ -148,7 +148,7 @@ List getExistContainerWithPipelinesInBatch( * value instead of being unlimited in case the db is very big) * * @return a list of containers capped by max count allowed - * in "hdds.container.list.max.count" and total number of containers. + * in "ozone.scm.container.list.max.count" and total number of containers. * @throws IOException */ ContainerListResult listContainer(long startContainerID, @@ -168,7 +168,7 @@ ContainerListResult listContainer(long startContainerID, * @param state Container with this state will be returned. * * @return a list of containers capped by max count allowed - * in "hdds.container.list.max.count" and total number of containers. + * in "ozone.scm.container.list.max.count" and total number of containers. * @throws IOException */ ContainerListResult listContainer(long startContainerID, @@ -188,7 +188,7 @@ ContainerListResult listContainer(long startContainerID, * @param state Container with this state will be returned. * @param factor Container factor * @return a list of containers capped by max count allowed - * in "hdds.container.list.max.count" and total number of containers. + * in "ozone.scm.container.list.max.count" and total number of containers. * @throws IOException */ ContainerListResult listContainer(long startContainerID, @@ -209,7 +209,7 @@ ContainerListResult listContainer(long startContainerID, * @param state Container with this state will be returned. * @param replicationConfig Replication config for the containers * @return a list of containers capped by max count allowed - * in "hdds.container.list.max.count" and total number of containers. + * in "ozone.scm.container.list.max.count" and total number of containers. * @throws IOException */ ContainerListResult listContainer(long startContainerID, diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml index 3ae16cd0d4db..be7574a5122f 100644 --- a/hadoop-hdds/common/src/main/resources/ozone-default.xml +++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml @@ -161,9 +161,9 @@ - hdds.container.list.max.count + ozone.scm.container.list.max.count 4096 - OZONE, CONTAINER, MANAGEMENT + OZONE, SCM, CONTAINER The max number of containers info could be included in response of ListContainer request. diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java index 827342e1bf18..d61f9ee366bd 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java @@ -118,8 +118,8 @@ public ContainerManagerImpl( ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT_DEFAULT); this.maxCountOfContainerList = conf - .getInt(ScmConfigKeys.HDDS_CONTAINER_LIST_MAX_COUNT, - ScmConfigKeys.HDDS_CONTAINER_LIST_MAX_COUNT_DEFAULT); + .getInt(ScmConfigKeys.OZONE_SCM_CONTAINER_LIST_MAX_COUNT, + ScmConfigKeys.OZONE_SCM_CONTAINER_LIST_MAX_COUNT_DEFAULT); this.scmContainerManagerMetrics = SCMContainerManagerMetrics.create(); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java index 7ad1996848aa..a647f9c22bbe 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java @@ -420,7 +420,7 @@ private boolean hasRequiredReplicas(ContainerInfo contInfo) { * @param count count must be {@literal >} 0. * * @return a list of containers capped by max count allowed - * in "hdds.container.list.max.count" and total number of containers. + * in "ozone.scm.container.list.max.count" and total number of containers. * @throws IOException */ @Override @@ -437,7 +437,7 @@ public ContainerListResult listContainer(long startContainerID, * @param state Container with this state will be returned. * * @return a list of containers capped by max count allowed - * in "hdds.container.list.max.count" and total number of containers. + * in "ozone.scm.container.list.max.count" and total number of containers. * @throws IOException */ @Override @@ -454,7 +454,7 @@ public ContainerListResult listContainer(long startContainerID, * @param state Container with this state will be returned. * @param factor Container factor. * @return a list of containers capped by max count allowed - * in "hdds.container.list.max.count" and total number of containers. + * in "ozone.scm.container.list.max.count" and total number of containers. * @throws IOException */ @Override @@ -555,7 +555,7 @@ private Map buildAuditMap(long startContainerID, int count, * @param state Container with this state will be returned. * @param repConfig Replication Config for the container. * @return a list of containers capped by max count allowed - * in "hdds.container.list.max.count" and total number of containers. + * in "ozone.scm.container.list.max.count" and total number of containers. * @throws IOException */ @Override diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java index 1322054d71b3..e0566fbeea6e 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java @@ -113,8 +113,8 @@ public ContainerOperationClient(OzoneConfiguration conf) throws IOException { containerTokenEnabled = conf.getBoolean(HDDS_CONTAINER_TOKEN_ENABLED, HDDS_CONTAINER_TOKEN_ENABLED_DEFAULT); maxCountOfContainerList = conf - .getInt(ScmConfigKeys.HDDS_CONTAINER_LIST_MAX_COUNT, - ScmConfigKeys.HDDS_CONTAINER_LIST_MAX_COUNT_DEFAULT); + .getInt(ScmConfigKeys.OZONE_SCM_CONTAINER_LIST_MAX_COUNT, + ScmConfigKeys.OZONE_SCM_CONTAINER_LIST_MAX_COUNT_DEFAULT); } private XceiverClientManager newXCeiverClientManager(ConfigurationSource conf) @@ -347,7 +347,7 @@ public void deleteContainer(long containerID, boolean force) public ContainerListResult listContainer(long startContainerID, int count) throws IOException { if (count > maxCountOfContainerList) { - LOG.error("Attempting to list {} containers. However, this exceeds" + + LOG.warn("Attempting to list {} containers. However, this exceeds" + " the cluster's current limit of {}. The results will be capped at the" + " maximum allowed count.", count, maxCountOfContainerList); count = maxCountOfContainerList; diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java index eccee15453f2..88ccef702b3e 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java @@ -117,8 +117,8 @@ public void execute(ScmClient scmClient) throws IOException { } int maxCountAllowed = parent.getParent().getOzoneConf() - .getInt(ScmConfigKeys.HDDS_CONTAINER_LIST_MAX_COUNT, - ScmConfigKeys.HDDS_CONTAINER_LIST_MAX_COUNT_DEFAULT); + .getInt(ScmConfigKeys.OZONE_SCM_CONTAINER_LIST_MAX_COUNT, + ScmConfigKeys.OZONE_SCM_CONTAINER_LIST_MAX_COUNT_DEFAULT); ContainerListResult containerListAndTotalCount; @@ -126,7 +126,7 @@ public void execute(ScmClient scmClient) throws IOException { if (count > maxCountAllowed) { System.err.printf("Attempting to list the first %d records of containers." + " However it exceeds the cluster's current limit of %d. The results will be capped at the" + - " maximum allowed count.%n", count, ScmConfigKeys.HDDS_CONTAINER_LIST_MAX_COUNT_DEFAULT); + " maximum allowed count.%n", count, ScmConfigKeys.OZONE_SCM_CONTAINER_LIST_MAX_COUNT_DEFAULT); count = maxCountAllowed; } containerListAndTotalCount = scmClient.listContainer(startId, count, state, type, repConfig); @@ -135,8 +135,8 @@ public void execute(ScmClient scmClient) throws IOException { } if (containerListAndTotalCount.getTotalCount() > count) { - System.err.printf("Container list is truncated as it is too long. " + - "Displayed the first %d records of %d total containers.%n", + System.err.printf("Displaying %d out of %d containers. " + + "Container list has more containers.%n", count, containerListAndTotalCount.getTotalCount()); } } else { @@ -155,9 +155,9 @@ public void execute(ScmClient scmClient) throws IOException { } if (fetchedCount > 0) { - currentStartId = containerListAndTotalCount.getContainerInfoList().get(fetchedCount - 1).getContainerID(); + currentStartId = containerListAndTotalCount.getContainerInfoList().get(fetchedCount - 1).getContainerID() + 1; } - } while (fetchedCount == batchSize); + } while (fetchedCount > 0); } } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerOperations.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerOperations.java index 3906fa507b27..798e8a159919 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerOperations.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerOperations.java @@ -77,7 +77,7 @@ public static void setup() throws Exception { ozoneConf = new OzoneConfiguration(); ozoneConf.setClass(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY, SCMContainerPlacementCapacity.class, PlacementPolicy.class); - ozoneConf.set(ScmConfigKeys.HDDS_CONTAINER_LIST_MAX_COUNT, "1"); + ozoneConf.set(ScmConfigKeys.OZONE_SCM_CONTAINER_LIST_MAX_COUNT, "1"); cluster = MiniOzoneCluster.newBuilder(ozoneConf).setNumDatanodes(3).build(); storageClient = new ContainerOperationClient(ozoneConf); cluster.waitForClusterToBeReady(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java index 1f5cfd6464aa..e62b8699a164 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java @@ -202,7 +202,7 @@ protected static void startCluster(OzoneConfiguration conf) throws Exception { getKeyProviderURI(miniKMS)); conf.setInt(OMConfigKeys.OZONE_DIR_DELETING_SERVICE_INTERVAL, 10); conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, true); - conf.setInt(ScmConfigKeys.HDDS_CONTAINER_LIST_MAX_COUNT, 1); + conf.setInt(ScmConfigKeys.OZONE_SCM_CONTAINER_LIST_MAX_COUNT, 1); ozoneConfiguration = conf; MiniOzoneHAClusterImpl.Builder builder = MiniOzoneCluster.newHABuilder(conf); builder.setOMServiceId(omServiceId)