diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java index 2997e0dd7fa5..57053f76e08f 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java @@ -21,10 +21,13 @@ import com.fasterxml.jackson.annotation.PropertyAccessor; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.ObjectWriter; +import com.fasterxml.jackson.databind.SequenceWriter; import com.fasterxml.jackson.databind.SerializationFeature; import com.fasterxml.jackson.datatype.jsr310.JavaTimeModule; import com.google.common.base.Strings; import java.io.IOException; +import java.io.OutputStream; +import java.util.List; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationType; @@ -40,7 +43,13 @@ import picocli.CommandLine.Option; /** - * This is the handler that process container list command. + * The ListSubcommand class represents a command to list containers in a structured way. + * It provides options to control how the list is generated, including specifying + * starting container ID, maximum number of containers to list, and other filtering criteria + * such as container state or replication type. + * + * This command connects to the SCM (Storage Container Manager) client to fetch the + * container details and outputs the result in a JSON format. */ @Command( name = "list", @@ -89,13 +98,6 @@ public class ListSubcommand extends ScmSubcommand { WRITER = mapper.writerWithDefaultPrettyPrinter(); } - - private void outputContainerInfo(ContainerInfo containerInfo) - throws IOException { - // Print container report info. - System.out.println(WRITER.writeValueAsString(containerInfo)); - } - @Override public void execute(ScmClient scmClient) throws IOException { if (!Strings.isNullOrEmpty(replication) && type == null) { @@ -114,44 +116,104 @@ public void execute(ScmClient scmClient) throws IOException { .getInt(ScmConfigKeys.OZONE_SCM_CONTAINER_LIST_MAX_COUNT, ScmConfigKeys.OZONE_SCM_CONTAINER_LIST_MAX_COUNT_DEFAULT); - ContainerListResult containerListAndTotalCount; + // Use SequenceWriter to output JSON array format for all cases + SequenceWriter sequenceWriter = WRITER.writeValues(new NonClosingOutputStream(System.out)); + sequenceWriter.init(true); // Initialize as a JSON array if (!all) { + // Regular listing with count limit if (count > maxCountAllowed) { System.err.printf("Attempting to list the first %d records of containers." + " However it exceeds the cluster's current limit of %d. The results will be capped at the" + - " maximum allowed count.%n", count, ScmConfigKeys.OZONE_SCM_CONTAINER_LIST_MAX_COUNT_DEFAULT); + " maximum allowed count.%n", count, maxCountAllowed); count = maxCountAllowed; } - containerListAndTotalCount = scmClient.listContainer(startId, count, state, type, repConfig); - for (ContainerInfo container : containerListAndTotalCount.getContainerInfoList()) { - outputContainerInfo(container); - } - if (containerListAndTotalCount.getTotalCount() > count) { + ContainerListResult containerListResult = + scmClient.listContainer(startId, count, state, type, repConfig); + + writeContainers(sequenceWriter, containerListResult.getContainerInfoList()); + + closeStream(sequenceWriter); + if (containerListResult.getTotalCount() > count) { System.err.printf("Displaying %d out of %d containers. " + - "Container list has more containers.%n", - count, containerListAndTotalCount.getTotalCount()); + "Container list has more containers.%n", + count, containerListResult.getTotalCount()); } } else { - // Batch size is either count passed through cli or maxCountAllowed + // List all containers by fetching in batches int batchSize = (count > 0) ? count : maxCountAllowed; - long currentStartId = startId; - int fetchedCount; - - do { - // Fetch containers in batches of 'batchSize' - containerListAndTotalCount = scmClient.listContainer(currentStartId, batchSize, state, type, repConfig); - fetchedCount = containerListAndTotalCount.getContainerInfoList().size(); - - for (ContainerInfo container : containerListAndTotalCount.getContainerInfoList()) { - outputContainerInfo(container); - } - - if (fetchedCount > 0) { - currentStartId = containerListAndTotalCount.getContainerInfoList().get(fetchedCount - 1).getContainerID() + 1; - } - } while (fetchedCount > 0); + listAllContainers(scmClient, sequenceWriter, batchSize, repConfig); + closeStream(sequenceWriter); + } + } + + private void writeContainers(SequenceWriter writer, List containers) + throws IOException { + for (ContainerInfo container : containers) { + writer.write(container); + } + } + + private void closeStream(SequenceWriter writer) throws IOException { + writer.flush(); + writer.close(); + // Add the final newline + System.out.println(); + } + + private void listAllContainers(ScmClient scmClient, SequenceWriter writer, + int batchSize, ReplicationConfig repConfig) + throws IOException { + long currentStartId = startId; + int fetchedCount; + + do { + ContainerListResult result = + scmClient.listContainer(currentStartId, batchSize, state, type, repConfig); + fetchedCount = result.getContainerInfoList().size(); + + writeContainers(writer, result.getContainerInfoList()); + + if (fetchedCount > 0) { + currentStartId = + result.getContainerInfoList().get(fetchedCount - 1).getContainerID() + 1; + } + } while (fetchedCount > 0); + } + + + private static class NonClosingOutputStream extends OutputStream { + + private final OutputStream delegate; + + NonClosingOutputStream(OutputStream delegate) { + this.delegate = delegate; + } + + @Override + public void write(int b) throws IOException { + delegate.write(b); + } + + @Override + public void write(byte[] b) throws IOException { + delegate.write(b); + } + + @Override + public void write(byte[] b, int off, int len) throws IOException { + delegate.write(b, off, len); + } + + @Override + public void flush() throws IOException { + delegate.flush(); + } + + @Override + public void close() { + // Ignore close to keep the underlying stream open } } } diff --git a/hadoop-ozone/dist/src/main/smoketest/admincli/container.robot b/hadoop-ozone/dist/src/main/smoketest/admincli/container.robot index 564fd1f5d699..1f3279c6bdca 100644 --- a/hadoop-ozone/dist/src/main/smoketest/admincli/container.robot +++ b/hadoop-ozone/dist/src/main/smoketest/admincli/container.robot @@ -44,22 +44,32 @@ Create container List containers ${output} = Execute ozone admin container list Should contain ${output} OPEN + Should Start With ${output} [ + Should End With ${output} ] List containers with explicit host ${output} = Execute ozone admin container list --scm ${SCM} Should contain ${output} OPEN + Should Start With ${output} [ + Should End With ${output} ] List containers with container state ${output} = Execute ozone admin container list --state=CLOSED Should Not contain ${output} OPEN + Should Start With ${output} [ + Should End With ${output} ] List containers with replication factor ONE ${output} = Execute ozone admin container list -t RATIS -r ONE Should Not contain ${output} THREE + Should Start With ${output} [ + Should End With ${output} ] List containers with replication factor THREE ${output} = Execute ozone admin container list -t RATIS -r THREE Should Not contain ${output} ONE + Should Start With ${output} [ + Should End With ${output} ] Container info ${output} = Execute ozone admin container info "${CONTAINER}" @@ -87,17 +97,54 @@ Report containers as JSON List all containers ${output} = Execute ozone admin container list --all Should contain ${output} OPEN + Should Start With ${output} [ + Should End With ${output} ] List all containers according to count (batchSize) ${output} = Execute ozone admin container list --all --count 10 Should contain ${output} OPEN + Should Start With ${output} [ + Should End With ${output} ] List all containers from a particular container ID - ${output} = Execute ozone admin container list --all --start 1 + ${output} = Execute ozone admin container list --all --start 2 Should contain ${output} OPEN + Should Start With ${output} [ + Should End With ${output} ] + +Check JSON array parsing + ${output} = Execute ozone admin container list + Should Start With ${output} [ + Should Contain ${output} containerID + Should End With ${output} ] + ${containerIDs} = Execute echo '${output}' | jq -r '.[].containerID' + Should Not Be Empty ${containerIDs} + +Check state filtering with JSON array format + ${output} = Execute ozone admin container list --state=OPEN + Should Start With ${output} [ + Should End With ${output} ] + ${states} = Execute echo '${output}' | jq -r '.[].state' + Should Contain ${states} OPEN + Should Not Contain ${states} CLOSED + +Check count limit with JSON array format + ${output} = Execute ozone admin container create + Should contain ${output} is created + ${output} = Execute ozone admin container create + Should contain ${output} is created + ${output} = Execute ozone admin container create + Should contain ${output} is created + ${output} = Execute ozone admin container create + Should contain ${output} is created + ${output} = Execute ozone admin container create + Should contain ${output} is created + ${output} = Execute And Ignore Error ozone admin container list --count 5 2> /dev/null # This logs to error that the list is incomplete + ${count} = Execute echo '${output}' | jq -r 'length' + Should Be True ${count} == 5 Close container - ${container} = Execute ozone admin container list --state OPEN | jq -r 'select(.replicationConfig.replicationFactor == "THREE") | .containerID' | head -1 + ${container} = Execute ozone admin container list --state OPEN | jq -r '.[] | select(.replicationConfig.replicationFactor == "ONE") | .containerID' | head -1 Execute ozone admin container close "${container}" ${output} = Execute ozone admin container info "${container}" Should contain ${output} CLOS diff --git a/hadoop-ozone/dist/src/main/smoketest/balancer/testBalancer.robot b/hadoop-ozone/dist/src/main/smoketest/balancer/testBalancer.robot index 641bc1462bbc..21490e84d646 100644 --- a/hadoop-ozone/dist/src/main/smoketest/balancer/testBalancer.robot +++ b/hadoop-ozone/dist/src/main/smoketest/balancer/testBalancer.robot @@ -134,7 +134,7 @@ Get Uuid Close All Containers FOR ${INDEX} IN RANGE 15 - ${container} = Execute ozone admin container list --state OPEN | jq -r 'select(.replicationConfig.data == 3) | .containerID' | head -1 + ${container} = Execute ozone admin container list --state OPEN | jq -r '.[] | select(.replicationConfig.data == 3) | .containerID' | head -1 EXIT FOR LOOP IF "${container}" == "${EMPTY}" ${message} = Execute And Ignore Error ozone admin container close "${container}" Run Keyword If '${message}' != '${EMPTY}' Should Contain ${message} is in closing state @@ -145,7 +145,7 @@ Close All Containers All container is closed ${output} = Execute ozone admin container list --state OPEN - Should Be Empty ${output} + Should Be Equal ${output} [ ] Get Datanode Ozone Used Bytes Info [arguments] ${uuid} @@ -186,4 +186,4 @@ Verify Container Balancer for RATIS/EC containers #We need to ensure that after balancing, the amount of data recorded on each datanode falls within the following ranges: #{SIZE}*3 < used < {SIZE}*3.5 for RATIS containers, and {SIZE}*0.7 < used < {SIZE}*1.5 for EC containers. Should Be True ${datanodeOzoneUsedBytesInfoAfterContainerBalancing} < ${SIZE} * ${UPPER_LIMIT} - Should Be True ${datanodeOzoneUsedBytesInfoAfterContainerBalancing} > ${SIZE} * ${LOWER_LIMIT} \ No newline at end of file + Should Be True ${datanodeOzoneUsedBytesInfoAfterContainerBalancing} > ${SIZE} * ${LOWER_LIMIT} diff --git a/hadoop-ozone/dist/src/main/smoketest/freon/echoRPCLoad.robot b/hadoop-ozone/dist/src/main/smoketest/freon/echoRPCLoad.robot index c6ea4e63468e..abcd9417b30b 100644 --- a/hadoop-ozone/dist/src/main/smoketest/freon/echoRPCLoad.robot +++ b/hadoop-ozone/dist/src/main/smoketest/freon/echoRPCLoad.robot @@ -25,7 +25,7 @@ ${n} 1 *** Test Cases *** Get Container ID ${result} = Execute ozone admin container create - ${containerID} = Execute ozone admin container list --count 1 --state=OPEN | grep -o '"containerID" *: *[^,}]*' | awk -F'[:,]' '{print $2}' | tr -d '" ' + ${containerID} = Execute ozone admin container list --count 1 --state=OPEN | jq -r '.[0].containerID' Set Suite Variable ${containerID} [Read] Ozone DataNode Echo RPC Load Generator with request payload and response payload diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java index 19acdbd275fb..d1aa84c5808a 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java @@ -961,7 +961,8 @@ public void testOzoneAdminCmdListAllContainer() execute(ozoneAdminShell, args1); //results will be capped at the maximum allowed count assertEquals(1, getNumOfContainers()); - + out.reset(); + err.reset(); String[] args2 = new String[] {"container", "list", "-a", "--scm", "localhost:" + cluster.getStorageContainerManager().getClientRpcPort()}; execute(ozoneAdminShell, args2);