diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index b250771f815d..d6177cf77e4f 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -1962,6 +1962,15 @@
+
+ ozone.s3g.list.max.keys.limit
+ 1000
+
+ Maximum number of keys returned by S3 ListObjects/ListObjectsV2 API.
+ AWS default is 1000. Can be overridden per deployment in ozone-site.xml.
+
+
+
ozone.s3g.secret.http.enabled
false
diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/objectlist.robot b/hadoop-ozone/dist/src/main/smoketest/s3/objectlist.robot
new file mode 100644
index 000000000000..12416e3cf889
--- /dev/null
+++ b/hadoop-ozone/dist/src/main/smoketest/s3/objectlist.robot
@@ -0,0 +1,66 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+*** Settings ***
+Documentation S3 max-keys validation test for negative and zero values
+Library OperatingSystem
+Library String
+Resource ../commonlib.robot
+Resource commonawslib.robot
+Test Timeout 3 minutes
+Suite Setup Setup s3 tests
+
+*** Variables ***
+${ENDPOINT_URL} http://s3g:9878
+${BUCKET} generated
+
+*** Keywords ***
+Prepare Many Objects In Bucket
+ [Arguments] ${count}=1100
+ Execute mkdir -p /tmp/manyfiles
+ FOR ${i} IN RANGE ${count}
+ Execute echo "test-${i}" > /tmp/manyfiles/obj-${i}
+ END
+ Execute aws s3 cp /tmp/manyfiles s3://${BUCKET}/ --recursive --endpoint-url=${ENDPOINT_URL}
+
+*** Test Cases ***
+
+List objects with negative max-keys should fail
+ ${result} = Execute AWSS3APICli and checkrc list-objects-v2 --bucket ${BUCKET} --max-keys -1 255
+ Should Contain ${result} InvalidArgument
+
+List objects with zero max-keys should fail
+ ${result} = Execute AWSS3APICli and checkrc list-objects-v2 --bucket ${BUCKET} --max-keys 0 255
+ Should Contain ${result} InvalidArgument
+
+List objects with max-keys exceeding config limit should not return more than limit
+ Prepare Many Objects In Bucket 1100
+ ${result}= Execute AWSS3APICli and checkrc list-objects-v2 --bucket ${BUCKET} --max-keys 9999 --endpoint-url=${ENDPOINT_URL} --output json 0
+ ${tmpfile}= Generate Random String 8
+ ${tmpfile}= Set Variable /tmp/result_${tmpfile}.json
+ Create File ${tmpfile} ${result}
+ ${count}= Execute and checkrc jq -r '.Contents | length' ${tmpfile} 0
+ Should Be True ${count} <= 1000
+ Remove File ${tmpfile}
+
+List objects with max-keys less than config limit should return correct count
+ Prepare Many Objects In Bucket 1100
+ ${result}= Execute AWSS3APICli and checkrc list-objects-v2 --bucket ${BUCKET} --max-keys 500 --endpoint-url=${ENDPOINT_URL} --output json 0
+ ${tmpfile}= Generate Random String 8
+ ${tmpfile}= Set Variable /tmp/result_${tmpfile}.json
+ Create File ${tmpfile} ${result}
+ ${count}= Execute and checkrc jq -r '.Contents | length' ${tmpfile} 0
+ Should Be True ${count} == 500
+ Remove File ${tmpfile}
diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/s3_compatbility_check.sh b/hadoop-ozone/dist/src/main/smoketest/s3/s3_compatbility_check.sh
index f147de908528..8b6a4f5a55af 100755
--- a/hadoop-ozone/dist/src/main/smoketest/s3/s3_compatbility_check.sh
+++ b/hadoop-ozone/dist/src/main/smoketest/s3/s3_compatbility_check.sh
@@ -85,5 +85,6 @@ run_robot_test objectmultidelete
run_robot_test objecthead
run_robot_test MultipartUpload
run_robot_test objecttagging
+run_robot_test objectlist
rebot --outputdir results/ results/*.xml
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/S3GatewayConfigKeys.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/S3GatewayConfigKeys.java
index 3fca5528aca2..cc674f4b0d68 100644
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/S3GatewayConfigKeys.java
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/S3GatewayConfigKeys.java
@@ -98,6 +98,10 @@ public final class S3GatewayConfigKeys {
public static final String OZONE_S3G_METRICS_PERCENTILES_INTERVALS_SECONDS_KEY
= "ozone.s3g.metrics.percentiles.intervals.seconds";
+ // S3 ListObjects max-keys limit (default: 1000, AWS compatible)
+ public static final String OZONE_S3G_LIST_MAX_KEYS_LIMIT = "ozone.s3g.list.max.keys.limit";
+ public static final int OZONE_S3G_LIST_MAX_KEYS_LIMIT_DEFAULT = 1000;
+
/**
* Never constructed.
*/
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java
index 118900aa53d0..9307891fb9e7 100644
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java
@@ -23,10 +23,13 @@
import static org.apache.hadoop.ozone.audit.AuditLogger.PerformanceStringBuilder;
import static org.apache.hadoop.ozone.s3.S3GatewayConfigKeys.OZONE_S3G_LIST_KEYS_SHALLOW_ENABLED;
import static org.apache.hadoop.ozone.s3.S3GatewayConfigKeys.OZONE_S3G_LIST_KEYS_SHALLOW_ENABLED_DEFAULT;
+import static org.apache.hadoop.ozone.s3.S3GatewayConfigKeys.OZONE_S3G_LIST_MAX_KEYS_LIMIT;
+import static org.apache.hadoop.ozone.s3.S3GatewayConfigKeys.OZONE_S3G_LIST_MAX_KEYS_LIMIT_DEFAULT;
import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.NOT_IMPLEMENTED;
import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.newError;
import static org.apache.hadoop.ozone.s3.util.S3Consts.ENCODING_TYPE;
+import com.google.common.annotations.VisibleForTesting;
import java.io.IOException;
import java.io.InputStream;
import java.util.ArrayList;
@@ -37,6 +40,7 @@
import java.util.Map;
import java.util.Objects;
import java.util.Set;
+import javax.annotation.PostConstruct;
import javax.inject.Inject;
import javax.ws.rs.DELETE;
import javax.ws.rs.DefaultValue;
@@ -91,6 +95,7 @@ public class BucketEndpoint extends EndpointBase {
LoggerFactory.getLogger(BucketEndpoint.class);
private boolean listKeysShallowEnabled;
+ private int maxKeysLimit = 1000;
@Inject
private OzoneConfiguration ozoneConfiguration;
@@ -142,6 +147,8 @@ public Response get(
return listMultipartUploads(bucketName, prefix, keyMarker, uploadIdMarker, maxUploads);
}
+ maxKeys = validateMaxKeys(maxKeys);
+
if (prefix == null) {
prefix = "";
}
@@ -292,6 +299,14 @@ public Response get(
return Response.ok(response).build();
}
+ private int validateMaxKeys(int maxKeys) throws OS3Exception {
+ if (maxKeys <= 0) {
+ throw newError(S3ErrorTable.INVALID_ARGUMENT, "maxKeys must be > 0");
+ }
+
+ return Math.min(maxKeys, maxKeysLimit);
+ }
+
@PUT
public Response put(@PathParam("bucket") String bucketName,
@QueryParam("acl") String aclMarker,
@@ -752,10 +767,24 @@ private void addKey(ListObjectResponse response, OzoneKey next) {
response.addKey(keyMetadata);
}
+ @VisibleForTesting
+ public void setOzoneConfiguration(OzoneConfiguration config) {
+ this.ozoneConfiguration = config;
+ }
+
+ @VisibleForTesting
+ public OzoneConfiguration getOzoneConfiguration() {
+ return this.ozoneConfiguration;
+ }
+
@Override
+ @PostConstruct
public void init() {
listKeysShallowEnabled = ozoneConfiguration.getBoolean(
OZONE_S3G_LIST_KEYS_SHALLOW_ENABLED,
OZONE_S3G_LIST_KEYS_SHALLOW_ENABLED_DEFAULT);
+ maxKeysLimit = ozoneConfiguration.getInt(
+ OZONE_S3G_LIST_MAX_KEYS_LIMIT,
+ OZONE_S3G_LIST_MAX_KEYS_LIMIT_DEFAULT);
}
}
diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpointBuilder.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpointBuilder.java
new file mode 100644
index 000000000000..1a3c4c492aa4
--- /dev/null
+++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpointBuilder.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.s3.endpoint;
+
+/**
+ * Builder for BucketEndpoint in tests.
+ */
+public class BucketEndpointBuilder extends
+ EndpointBuilder {
+
+ public BucketEndpointBuilder() {
+ super(BucketEndpoint::new);
+ }
+
+ @Override
+ public BucketEndpoint build() {
+ BucketEndpoint endpoint = super.build();
+ endpoint.setOzoneConfiguration(getConfig());
+
+ return endpoint;
+ }
+}
diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBuilder.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBuilder.java
index 25c9d9a567ad..8ae4f941ac94 100644
--- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBuilder.java
+++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBuilder.java
@@ -111,7 +111,7 @@ public static EndpointBuilder newRootEndpointBuilder() {
}
public static EndpointBuilder newBucketEndpointBuilder() {
- return new EndpointBuilder<>(BucketEndpoint::new);
+ return new BucketEndpointBuilder();
}
public static EndpointBuilder newObjectEndpointBuilder() {
diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketList.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketList.java
index ad8b47754559..3f0810dedee2 100644
--- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketList.java
+++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketList.java
@@ -17,6 +17,7 @@
package org.apache.hadoop.ozone.s3.endpoint;
+import static org.apache.hadoop.ozone.s3.S3GatewayConfigKeys.OZONE_S3G_LIST_MAX_KEYS_LIMIT;
import static org.apache.hadoop.ozone.s3.util.S3Consts.ENCODING_TYPE;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
@@ -26,6 +27,8 @@
import static org.junit.jupiter.api.Assertions.assertTrue;
import java.io.IOException;
+import java.util.stream.IntStream;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.client.OzoneBucket;
import org.apache.hadoop.ozone.client.OzoneClient;
import org.apache.hadoop.ozone.client.OzoneClientStub;
@@ -519,6 +522,61 @@ public void testEncodingTypeException() throws IOException {
assertEquals(S3ErrorTable.INVALID_ARGUMENT.getCode(), e.getCode());
}
+ @Test
+ public void testListObjectsWithInvalidMaxKeys() throws Exception {
+ OzoneClient client = createClientWithKeys("file1");
+ BucketEndpoint bucketEndpoint = EndpointBuilder.newBucketEndpointBuilder()
+ .setClient(client)
+ .build();
+
+ // maxKeys < 0
+ OS3Exception e1 = assertThrows(OS3Exception.class, () ->
+ bucketEndpoint.get("bucket", null, null, null, -1, null,
+ null, null, null, null, null, null, 1000, null)
+ );
+ assertEquals(S3ErrorTable.INVALID_ARGUMENT.getCode(), e1.getCode());
+
+ // maxKeys == 0
+ OS3Exception e2 = assertThrows(OS3Exception.class, () ->
+ bucketEndpoint.get("bucket", null, null, null, 0, null,
+ null, null, null, null, null, null, 1000, null)
+ );
+ assertEquals(S3ErrorTable.INVALID_ARGUMENT.getCode(), e2.getCode());
+ }
+
+ @Test
+ public void testListObjectsRespectsConfiguredMaxKeysLimit() throws Exception {
+ // Arrange: Create a bucket with 1001 keys
+ String[] keys = IntStream.range(0, 1001).mapToObj(i -> "file" + i).toArray(String[]::new);
+ OzoneClient client = createClientWithKeys(keys);
+
+ // Arrange: Set the max-keys limit in the configuration
+ OzoneConfiguration config = new OzoneConfiguration();
+ final String configuredMaxKeysLimit = "900";
+ config.set(OZONE_S3G_LIST_MAX_KEYS_LIMIT, configuredMaxKeysLimit);
+
+ // Arrange: Build and initialize the BucketEndpoint with the config
+ BucketEndpoint bucketEndpoint = EndpointBuilder.newBucketEndpointBuilder()
+ .setClient(client)
+ .setConfig(config)
+ .build();
+ bucketEndpoint.init();
+
+ // Assert: Ensure the config value is correctly set in the endpoint
+ assertEquals(configuredMaxKeysLimit,
+ bucketEndpoint.getOzoneConfiguration().get(OZONE_S3G_LIST_MAX_KEYS_LIMIT));
+
+ // Act: Request more keys than the configured max-keys limit
+ final int requestedMaxKeys = Integer.parseInt(configuredMaxKeysLimit) + 1;
+ ListObjectResponse response = (ListObjectResponse)
+ bucketEndpoint.get("b1", null, null, null, requestedMaxKeys,
+ null, null, null, null, null, null, null,
+ 1000, null).getEntity();
+
+ // Assert: The number of returned keys should be capped at the configured limit
+ assertEquals(Integer.parseInt(configuredMaxKeysLimit), response.getContents().size());
+ }
+
private void assertEncodingTypeObject(
String exceptName, String exceptEncodingType, EncodingTypeObject object) {
assertEquals(exceptName, object.getName());