diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneManagerVersion.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneManagerVersion.java index a29f6d36043b..41cf8ab28560 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneManagerVersion.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneManagerVersion.java @@ -52,6 +52,9 @@ public enum OzoneManagerVersion implements ComponentVersion { S3_PART_AWARE_GET(10, "OzoneManager version that supports S3 get for a specific multipart " + "upload part number"), + S3_LIST_MULTIPART_UPLOADS_PAGINATION(11, + "OzoneManager version that supports S3 list multipart uploads API with pagination"), + FUTURE_VERSION(-1, "Used internally in the client when the server side is " + " newer and an unknown server version has arrived to the client."); diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java index 3c9252671f48..ba62bad2ac30 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java @@ -1029,9 +1029,10 @@ public List listStatus(String keyName, boolean recursive, * * @param prefix Optional string to filter for the selected keys. */ - public OzoneMultipartUploadList listMultipartUploads(String prefix) + public OzoneMultipartUploadList listMultipartUploads(String prefix, + String keyMarker, String uploadIdMarker, int maxUploads) throws IOException { - return proxy.listMultipartUploads(volumeName, getName(), prefix); + return proxy.listMultipartUploads(volumeName, getName(), prefix, keyMarker, uploadIdMarker, maxUploads); } /** diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneMultipartUploadList.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneMultipartUploadList.java index 7371e65d0dc1..1a71182fc135 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneMultipartUploadList.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneMultipartUploadList.java @@ -26,11 +26,20 @@ public class OzoneMultipartUploadList { private List uploads; + private String nextKeyMarker; + private String nextUploadIdMarker; + private boolean isTruncated; public OzoneMultipartUploadList( - List uploads) { + List uploads, + String nextKeyMarker, + String nextUploadIdMarker, + boolean isTruncated) { Preconditions.checkNotNull(uploads); this.uploads = uploads; + this.nextKeyMarker = nextKeyMarker; + this.nextUploadIdMarker = nextUploadIdMarker; + this.isTruncated = isTruncated; } public List getUploads() { @@ -41,4 +50,16 @@ public void setUploads( List uploads) { this.uploads = uploads; } + + public String getNextKeyMarker() { + return nextKeyMarker; + } + + public String getNextUploadIdMarker() { + return nextUploadIdMarker; + } + + public boolean isTruncated() { + return isTruncated; + } } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java index 29237081d23d..7ef2c38eb32b 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java @@ -666,7 +666,7 @@ OzoneMultipartUploadPartListParts listParts(String volumeName, * Return with the inflight multipart uploads. */ OzoneMultipartUploadList listMultipartUploads(String volumename, - String bucketName, String prefix) throws IOException; + String bucketName, String prefix, String keyMarker, String uploadIdMarker, int maxUploads) throws IOException; /** * Get a valid Delegation Token. diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java index ebd5c6a1b1fc..90afe10fe6bc 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java @@ -2124,10 +2124,16 @@ public OzoneMultipartUploadPartListParts listParts(String volumeName, @Override public OzoneMultipartUploadList listMultipartUploads(String volumeName, - String bucketName, String prefix) throws IOException { + String bucketName, String prefix, String keyMarker, String uploadIdMarker, int maxUploads) throws IOException { - OmMultipartUploadList omMultipartUploadList = - ozoneManagerClient.listMultipartUploads(volumeName, bucketName, prefix); + OmMultipartUploadList omMultipartUploadList; + if (omVersion.compareTo(OzoneManagerVersion.S3_LIST_MULTIPART_UPLOADS_PAGINATION) >= 0) { + omMultipartUploadList = ozoneManagerClient.listMultipartUploads(volumeName, bucketName, prefix, keyMarker, + uploadIdMarker, maxUploads, true); + } else { + omMultipartUploadList = ozoneManagerClient.listMultipartUploads(volumeName, bucketName, prefix, keyMarker, + uploadIdMarker, maxUploads, false); + } List uploads = omMultipartUploadList.getUploads() .stream() .map(upload -> new OzoneMultipartUpload(upload.getVolumeName(), @@ -2137,7 +2143,10 @@ public OzoneMultipartUploadList listMultipartUploads(String volumeName, upload.getCreationTime(), upload.getReplicationConfig())) .collect(Collectors.toList()); - OzoneMultipartUploadList result = new OzoneMultipartUploadList(uploads); + OzoneMultipartUploadList result = new OzoneMultipartUploadList(uploads, + omMultipartUploadList.getNextKeyMarker(), + omMultipartUploadList.getNextUploadIdMarker(), + omMultipartUploadList.isTruncated()); return result; } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/MultipartUploadKeys.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/MultipartUploadKeys.java new file mode 100644 index 000000000000..d74d9cb0c4c6 --- /dev/null +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/MultipartUploadKeys.java @@ -0,0 +1,92 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.helpers; + +import java.util.Set; + +/** + * This class is used to store the result of OmMetadataManager#getMultipartUploadKeys. + */ +public class MultipartUploadKeys { + private final Set keys; + private final String nextKeyMarker; + private final String nextUploadIdMarker; + private final boolean isTruncated; + + public MultipartUploadKeys(Set keys, String nextKeyMarker, String nextUploadIdMarker, boolean isTruncated) { + this.keys = keys; + this.nextKeyMarker = nextKeyMarker; + this.nextUploadIdMarker = nextUploadIdMarker; + this.isTruncated = isTruncated; + } + + public Set getKeys() { + return keys; + } + + public String getNextKeyMarker() { + return nextKeyMarker; + } + + public String getNextUploadIdMarker() { + return nextUploadIdMarker; + } + + public boolean isTruncated() { + return isTruncated; + } + + public static Builder newBuilder() { + return new Builder(); + } + + /** + * Builder class for MultipartUploadKeys. + */ + public static class Builder { + private Set keys; + private String nextKeyMarker = ""; + private String nextUploadIdMarker = ""; + private boolean isTruncated; + + public Builder setKeys(Set keys) { + this.keys = keys; + return this; + } + + public Builder setNextKeyMarker(String nextKeyMarker) { + this.nextKeyMarker = nextKeyMarker; + return this; + } + + public Builder setNextUploadIdMarker(String nextUploadIdMarker) { + this.nextUploadIdMarker = nextUploadIdMarker; + return this; + } + + public Builder setIsTruncated(boolean isTruncated) { + this.isTruncated = isTruncated; + return this; + } + + public MultipartUploadKeys build() { + return new MultipartUploadKeys(keys, nextKeyMarker, nextUploadIdMarker, isTruncated); + } + + } +} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUploadList.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUploadList.java index d7c7d0132102..022196758be0 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUploadList.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUploadList.java @@ -25,10 +25,19 @@ public class OmMultipartUploadList { private List uploads; + private String nextKeyMarker; + private String nextUploadIdMarker; + private boolean isTruncated; public OmMultipartUploadList( - List uploads) { + List uploads, + String nextKeyMarker, + String nextUploadIdMarker, + boolean isTruncated) { this.uploads = uploads; + this.nextKeyMarker = nextKeyMarker; + this.nextUploadIdMarker = nextUploadIdMarker; + this.isTruncated = isTruncated; } public List getUploads() { @@ -40,4 +49,16 @@ public void setUploads( this.uploads = uploads; } + public String getNextKeyMarker() { + return nextKeyMarker; + } + + public String getNextUploadIdMarker() { + return nextUploadIdMarker; + } + + public boolean isTruncated() { + return isTruncated; + } + } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java index 77ab5746dfe6..2bbff617ca7f 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java @@ -606,9 +606,12 @@ OmMultipartUploadListParts listParts(String volumeName, String bucketName, /** * List in-flight uploads. + * withPagination is for backward compatible as older listMultipartUploads does + * not support pagination. */ OmMultipartUploadList listMultipartUploads(String volumeName, - String bucketName, String prefix) throws IOException; + String bucketName, String prefix, + String keyMarker, String uploadIdMarker, int maxUploads, boolean withPagination) throws IOException; /** * Gets s3Secret for given kerberos user. diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java index 6e6c1f31b91b..41bfdcdea434 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java @@ -1808,12 +1808,17 @@ public OmMultipartUploadListParts listParts(String volumeName, @Override public OmMultipartUploadList listMultipartUploads(String volumeName, String bucketName, - String prefix) throws IOException { + String prefix, + String keyMarker, String uploadIdMarker, int maxUploads, boolean withPagination) throws IOException { ListMultipartUploadsRequest request = ListMultipartUploadsRequest .newBuilder() .setVolume(volumeName) .setBucket(bucketName) .setPrefix(prefix == null ? "" : prefix) + .setKeyMarker(keyMarker == null ? "" : keyMarker) + .setUploadIdMarker(uploadIdMarker == null ? "" : uploadIdMarker) + .setMaxUploads(maxUploads) + .setWithPagination(withPagination) .build(); OMRequest omRequest = createOMRequest(Type.ListMultipartUploads) @@ -1837,7 +1842,10 @@ public OmMultipartUploadList listMultipartUploads(String volumeName, )) .collect(Collectors.toList()); - OmMultipartUploadList response = new OmMultipartUploadList(uploadList); + OmMultipartUploadList response = new OmMultipartUploadList(uploadList, + listMultipartUploadsResponse.getNextKeyMarker(), + listMultipartUploadsResponse.getNextUploadIdMarker(), + listMultipartUploadsResponse.getIsTruncated()); return response; } diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot b/hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot index c12f8e335817..77d01889ed44 100644 --- a/hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot +++ b/hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot @@ -318,12 +318,58 @@ Test Multipart Upload Put With Copy and range with IfModifiedSince Compare files /tmp/10mb /tmp/part-result Test Multipart Upload list - ${uploadID1} = Initiate MPU ${BUCKET} ${PREFIX}/listtest/key1 - ${uploadID2} = Initiate MPU ${BUCKET} ${PREFIX}/listtest/key2 - - ${result} = Execute AWSS3APICli list-multipart-uploads --bucket ${BUCKET} --prefix ${PREFIX}/listtest - Should contain ${result} ${uploadID1} - Should contain ${result} ${uploadID2} - - ${count} = Execute and checkrc echo '${result}' | jq -r '.Uploads | length' 0 - Should Be Equal ${count} 2 + # Create 25 multipart uploads to test pagination + ${uploadIDs}= Create List + FOR ${index} IN RANGE 25 + ${key}= Set Variable ${PREFIX}/listtest/key-${index} + ${uploadID}= Initiate MPU ${BUCKET} ${key} + Append To List ${uploadIDs} ${uploadID} + END + + # Test listing with max-items=10 (should get 3 pages: 10, 10, 5) + ${result}= Execute AWSS3APICli list-multipart-uploads --bucket ${BUCKET} --prefix ${PREFIX}/listtest --max-items 10 + + # Verify first page + ${count}= Execute and checkrc echo '${result}' | jq -r '.Uploads | length' 0 + Should Be Equal ${count} 10 + + ${hasNext}= Execute and checkrc echo '${result}' | jq -r 'has("NextToken")' 0 + Should Be Equal ${hasNext} true + + ${nextToken}= Execute and checkrc echo '${result}' | jq -r '.NextToken' 0 + Should Not Be Empty ${nextToken} + + # Get second page + ${result}= Execute AWSS3APICli list-multipart-uploads --bucket ${BUCKET} --prefix ${PREFIX}/listtest --max-items 10 --starting-token ${nextToken} + + # Verify second page + ${count}= Execute and checkrc echo '${result}' | jq -r '.Uploads | length' 0 + Should Be Equal ${count} 10 + + ${hasNext}= Execute and checkrc echo '${result}' | jq -r 'has("NextToken")' 0 + Should Be Equal ${hasNext} true + + ${nextToken}= Execute and checkrc echo '${result}' | jq -r '.NextToken' 0 + Should Not Be Empty ${nextToken} + + # Get last page + ${result}= Execute AWSS3APICli list-multipart-uploads --bucket ${BUCKET} --prefix ${PREFIX}/listtest --max-items 10 --starting-token ${nextToken} + + # Verify last page + ${count}= Execute and checkrc echo '${result}' | jq -r '.Uploads | length' 0 + Should Be Equal ${count} 5 + + ${hasNext}= Execute and checkrc echo '${result}' | jq -r 'has("NextToken")' 0 + Should Be Equal ${hasNext} false + + # Test prefix filtering + ${result}= Execute AWSS3APICli list-multipart-uploads --bucket ${BUCKET} --prefix ${PREFIX}/listtest/key-1 + ${count}= Execute and checkrc echo '${result}' | jq -r '.Uploads | length' 0 + Should Be Equal ${count} 11 # Should match key-1, key-10 through key-19 + + # Cleanup + FOR ${index} IN RANGE 25 + ${key}= Set Variable ${PREFIX}/listtest/key-${index} + ${uploadID}= Get From List ${uploadIDs} ${index} + Abort MPU ${BUCKET} ${key} ${uploadID} 0 + END diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java index 48438158bb09..c953a521c78f 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java @@ -39,11 +39,14 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; +import java.util.HashSet; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.TreeMap; import java.util.UUID; +import java.util.stream.Collectors; import javax.xml.bind.DatatypeConverter; import org.apache.commons.codec.digest.DigestUtils; import org.apache.commons.lang3.RandomUtils; @@ -842,7 +845,7 @@ public void testListMultipartUpload() throws Exception { uploadPart(bucket, key2, uploadID2, 1, "data".getBytes(UTF_8)); uploadPart(bucket, key3, uploadID3, 1, "data".getBytes(UTF_8)); - OzoneMultipartUploadList listMPUs = bucket.listMultipartUploads("dir1"); + OzoneMultipartUploadList listMPUs = bucket.listMultipartUploads("dir1", "", "", 1000); assertEquals(3, listMPUs.getUploads().size()); List expectedList = new ArrayList<>(keys); for (OzoneMultipartUpload mpu : listMPUs.getUploads()) { @@ -850,7 +853,7 @@ public void testListMultipartUpload() throws Exception { } assertEquals(0, expectedList.size()); - listMPUs = bucket.listMultipartUploads("dir1/dir2"); + listMPUs = bucket.listMultipartUploads("dir1/dir2", "", "", 1000); assertEquals(2, listMPUs.getUploads().size()); expectedList = new ArrayList<>(); expectedList.add(key2); @@ -860,7 +863,7 @@ public void testListMultipartUpload() throws Exception { } assertEquals(0, expectedList.size()); - listMPUs = bucket.listMultipartUploads("dir1/dir2/dir3"); + listMPUs = bucket.listMultipartUploads("dir1/dir2/dir3", "", "", 1000); assertEquals(1, listMPUs.getUploads().size()); expectedList = new ArrayList<>(); expectedList.add(key3); @@ -870,7 +873,7 @@ public void testListMultipartUpload() throws Exception { assertEquals(0, expectedList.size()); // partial key - listMPUs = bucket.listMultipartUploads("d"); + listMPUs = bucket.listMultipartUploads("d", "", "", 1000); assertEquals(3, listMPUs.getUploads().size()); expectedList = new ArrayList<>(keys); for (OzoneMultipartUpload mpu : listMPUs.getUploads()) { @@ -879,7 +882,7 @@ public void testListMultipartUpload() throws Exception { assertEquals(0, expectedList.size()); // partial key - listMPUs = bucket.listMultipartUploads(""); + listMPUs = bucket.listMultipartUploads("", "", "", 1000); assertEquals(3, listMPUs.getUploads().size()); expectedList = new ArrayList<>(keys); for (OzoneMultipartUpload mpu : listMPUs.getUploads()) { @@ -888,6 +891,72 @@ public void testListMultipartUpload() throws Exception { assertEquals(0, expectedList.size()); } + @Test + public void testListMultipartUploadsPagination() throws Exception { + int numOfKeys = 25; + List keys = new ArrayList<>(); + Map keyToUploadId = new HashMap<>(); + + // Generate keys + for (int i = 0; i < numOfKeys; i++) { + StringBuilder key = new StringBuilder(); + int depth = 1 + i % 3; // Creates varying depth (1-3 levels) + for (int j = 0; j < depth; j++) { + key.append("dir").append(j + 1).append("/"); + } + key.append("file").append(i); + keys.add(key.toString()); + } + + for (String key : keys) { + String uploadId = initiateMultipartUploadWithAsserts(bucket, key, RATIS, ONE); + keyToUploadId.put(key, uploadId); + uploadPart(bucket, key, uploadId, 1, "data".getBytes(UTF_8)); + } + + // Test full pagination process + final int maxUploads = 10; + final int expectedPages = 3; + int pageCount = 0; + String keyMarker = ""; + String uploadIdMarker = ""; + Set retrievedKeys = new HashSet<>(); + boolean isTruncated = true; + + do { + OzoneMultipartUploadList result = bucket.listMultipartUploads( + "dir", keyMarker, uploadIdMarker, maxUploads); + + if (pageCount < 2) { + assertEquals(maxUploads, result.getUploads().size()); + assertTrue(result.isTruncated()); + } else { + assertEquals(numOfKeys - pageCount * maxUploads, result.getUploads().size()); + assertFalse(result.isTruncated()); + } + + for (OzoneMultipartUpload upload : result.getUploads()) { + String key = upload.getKeyName(); + retrievedKeys.add(key); + + assertEquals(keyToUploadId.get(key), upload.getUploadId()); + } + + // Update markers for next iteration + keyMarker = result.getNextKeyMarker(); + uploadIdMarker = result.getNextUploadIdMarker(); + isTruncated = result.isTruncated(); + + pageCount++; + } while (isTruncated); + + assertEquals(keys.size(), retrievedKeys.size()); + assertEquals(expectedPages, pageCount); + assertThat(retrievedKeys.stream().sorted().collect(Collectors.toList())) + .as("Retrieved keys should match expected keys in order") + .isEqualTo(keys.stream().sorted().collect(Collectors.toList())); + } + @Test void testGetAllPartsWhenZeroPartNumber() throws Exception { String parentDir = "a/b/c/d/e/f/"; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/s3/awssdk/v1/AbstractS3SDKV1Tests.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/s3/awssdk/v1/AbstractS3SDKV1Tests.java index 9d9bc72eb235..f088e2063812 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/s3/awssdk/v1/AbstractS3SDKV1Tests.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/s3/awssdk/v1/AbstractS3SDKV1Tests.java @@ -82,10 +82,12 @@ import java.util.Arrays; import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Random; +import java.util.Set; import java.util.stream.Collectors; import javax.xml.bind.DatatypeConverter; import org.apache.commons.lang3.RandomStringUtils; @@ -691,6 +693,98 @@ public void testListMultipartUploads() { assertEquals(uploadIds, listUploadIds); } + @Test + public void testListMultipartUploadsPagination() { + final String bucketName = getBucketName(); + final String multipartKeyPrefix = getKeyName("multipart"); + + s3Client.createBucket(bucketName); + + // Create 25 multipart uploads to test pagination + List allKeys = new ArrayList<>(); + Map keyToUploadId = new HashMap<>(); + + for (int i = 0; i < 25; i++) { + String key = String.format("%s-%03d", multipartKeyPrefix, i); + allKeys.add(key); + String uploadId = initiateMultipartUpload(bucketName, key, null, null, null); + keyToUploadId.put(key, uploadId); + } + Collections.sort(allKeys); + + // Test pagination with maxUploads=10 + Set retrievedKeys = new HashSet<>(); + String keyMarker = null; + String uploadIdMarker = null; + boolean truncated = true; + int pageCount = 0; + + do { + ListMultipartUploadsRequest request = new ListMultipartUploadsRequest(bucketName) + .withMaxUploads(10) + .withKeyMarker(keyMarker) + .withUploadIdMarker(uploadIdMarker); + + MultipartUploadListing result = s3Client.listMultipartUploads(request); + + // Verify page size + if (pageCount < 2) { + assertEquals(10, result.getMultipartUploads().size()); + assertTrue(result.isTruncated()); + } else { + assertEquals(5, result.getMultipartUploads().size()); + assertFalse(result.isTruncated()); + } + + // Collect keys and verify uploadIds + for (MultipartUpload upload : result.getMultipartUploads()) { + String key = upload.getKey(); + retrievedKeys.add(key); + assertEquals(keyToUploadId.get(key), upload.getUploadId()); + } + + // Verify response + assertNull(result.getPrefix()); + assertEquals(result.getUploadIdMarker(), uploadIdMarker); + assertEquals(result.getKeyMarker(), keyMarker); + assertEquals(result.getMaxUploads(), 10); + + // Update markers for next page + keyMarker = result.getNextKeyMarker(); + uploadIdMarker = result.getNextUploadIdMarker(); + + truncated = result.isTruncated(); + pageCount++; + + } while (truncated); + + // Verify pagination results + assertEquals(3, pageCount, "Should have exactly 3 pages"); + assertEquals(25, retrievedKeys.size(), "Should retrieve all uploads"); + assertEquals( + allKeys, + retrievedKeys.stream().sorted().collect(Collectors.toList()), + "Retrieved keys should match expected keys in order"); + + // Test with prefix + String prefix = multipartKeyPrefix + "-01"; + ListMultipartUploadsRequest prefixRequest = new ListMultipartUploadsRequest(bucketName) + .withPrefix(prefix); + + MultipartUploadListing prefixResult = s3Client.listMultipartUploads(prefixRequest); + + assertEquals(prefix, prefixResult.getPrefix()); + assertEquals( + Arrays.asList(multipartKeyPrefix + "-010", multipartKeyPrefix + "-011", + multipartKeyPrefix + "-012", multipartKeyPrefix + "-013", + multipartKeyPrefix + "-014", multipartKeyPrefix + "-015", + multipartKeyPrefix + "-016", multipartKeyPrefix + "-017", + multipartKeyPrefix + "-018", multipartKeyPrefix + "-019"), + prefixResult.getMultipartUploads().stream() + .map(MultipartUpload::getKey) + .collect(Collectors.toList())); + } + @Test public void testListParts(@TempDir Path tempDir) throws Exception { final String bucketName = getBucketName(); diff --git a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto index 92c2b6b4cc5a..df97028a0f31 100644 --- a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto +++ b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto @@ -1759,11 +1759,17 @@ message ListMultipartUploadsRequest { required string volume = 1; required string bucket = 2; required string prefix = 3; + optional string keyMarker = 4; + optional string uploadIdMarker = 5; + optional int32 maxUploads = 6; + optional bool withPagination = 7; // for backward compatibility } message ListMultipartUploadsResponse { optional bool isTruncated = 1; repeated MultipartUploadInfo uploadsList = 2; + optional string nextKeyMarker = 3; + optional string nextUploadIdMarker = 4; } message MultipartUploadInfo { diff --git a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java index 0c165c2b53e7..3818b4ede56f 100644 --- a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java +++ b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java @@ -38,6 +38,7 @@ import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.ListKeysResult; import org.apache.hadoop.ozone.om.helpers.ListOpenFilesResult; +import org.apache.hadoop.ozone.om.helpers.MultipartUploadKeys; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmDBAccessIdInfo; import org.apache.hadoop.ozone.om.helpers.OmDBTenantState; @@ -493,8 +494,9 @@ long countEstimatedRowsInTable(Table table) * Return the existing upload keys which includes volumeName, bucketName, * keyName. */ - Set getMultipartUploadKeys(String volumeName, - String bucketName, String prefix) throws IOException; + MultipartUploadKeys getMultipartUploadKeys(String volumeName, + String bucketName, String prefix, String keyMarker, String uploadIdMarker, int maxUploads, + boolean noPagination) throws IOException; /** * Gets the DirectoryTable. diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java index 157398530385..d25535b151d4 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java @@ -203,7 +203,8 @@ List getExpiredMultipartUploads( OmMultipartUploadList listMultipartUploads(String volumeName, - String bucketName, String prefix) throws OMException; + String bucketName, String prefix, + String keyMarker, String uploadIdMarker, int maxUploads, boolean withPagination) throws OMException; /** * Returns list of parts of a multipart upload key. diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java index 54b4608e64f2..8aa9b579faa2 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java @@ -132,6 +132,7 @@ import org.apache.hadoop.ozone.om.helpers.BucketEncryptionKeyInfo; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.ListKeysResult; +import org.apache.hadoop.ozone.om.helpers.MultipartUploadKeys; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; @@ -815,7 +816,9 @@ public boolean isSstFilteringSvcEnabled() { @Override public OmMultipartUploadList listMultipartUploads(String volumeName, - String bucketName, String prefix) throws OMException { + String bucketName, + String prefix, String keyMarker, String uploadIdMarker, int maxUploads, boolean withPagination) + throws OMException { Preconditions.checkNotNull(volumeName); Preconditions.checkNotNull(bucketName); @@ -823,11 +826,11 @@ public OmMultipartUploadList listMultipartUploads(String volumeName, bucketName); try { - Set multipartUploadKeys = - metadataManager - .getMultipartUploadKeys(volumeName, bucketName, prefix); + MultipartUploadKeys multipartUploadKeys = metadataManager + .getMultipartUploadKeys(volumeName, bucketName, prefix, keyMarker, uploadIdMarker, maxUploads, + !withPagination); - List collect = multipartUploadKeys.stream() + List collect = multipartUploadKeys.getKeys().stream() .map(OmMultipartUpload::from) .peek(upload -> { try { @@ -843,14 +846,17 @@ public OmMultipartUploadList listMultipartUploads(String volumeName, multipartKeyInfo.getReplicationConfig()); } catch (IOException e) { LOG.warn( - "Open key entry for multipart upload record can be read {}", + "Open key entry for multipart upload record can't be read {}", metadataManager.getOzoneKey(upload.getVolumeName(), upload.getBucketName(), upload.getKeyName())); } }) .collect(Collectors.toList()); - return new OmMultipartUploadList(collect); + return new OmMultipartUploadList(collect, + multipartUploadKeys.getNextKeyMarker(), + multipartUploadKeys.getNextUploadIdMarker(), + multipartUploadKeys.isTruncated()); } catch (IOException ex) { LOG.error("List Multipart Uploads Failed: volume: " + volumeName + diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java index a7793fdc9abe..77a0fa24e9ec 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java @@ -48,6 +48,7 @@ import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; +import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; @@ -55,6 +56,7 @@ import java.util.Objects; import java.util.Optional; import java.util.Set; +import java.util.SortedSet; import java.util.TreeMap; import java.util.TreeSet; import java.util.concurrent.TimeUnit; @@ -87,6 +89,7 @@ import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.ListKeysResult; import org.apache.hadoop.ozone.om.helpers.ListOpenFilesResult; +import org.apache.hadoop.ozone.om.helpers.MultipartUploadKeys; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmDBAccessIdInfo; import org.apache.hadoop.ozone.om.helpers.OmDBTenantState; @@ -1933,46 +1936,83 @@ public long countEstimatedRowsInTable(Table table) } @Override - public Set getMultipartUploadKeys( - String volumeName, String bucketName, String prefix) throws IOException { + public MultipartUploadKeys getMultipartUploadKeys( + String volumeName, String bucketName, String prefix, String keyMarker, + String uploadIdMarker, int maxUploads, boolean noPagination) throws IOException { - Set response = new TreeSet<>(); - Set aborted = new TreeSet<>(); - - Iterator, CacheValue>> - cacheIterator = getMultipartInfoTable().cacheIterator(); + MultipartUploadKeys.Builder resultBuilder = MultipartUploadKeys.newBuilder(); + SortedSet responseKeys = new TreeSet<>(); + Set aborted = new HashSet<>(); String prefixKey = OmMultipartUpload.getDbKey(volumeName, bucketName, prefix); + if (StringUtil.isNotBlank(keyMarker)) { + prefix = keyMarker; + if (StringUtil.isNotBlank(uploadIdMarker)) { + prefix = prefix + OM_KEY_PREFIX + uploadIdMarker; + } + } + String seekKey = OmMultipartUpload.getDbKey(volumeName, bucketName, prefix); + + Iterator, CacheValue>> + cacheIterator = getMultipartInfoTable().cacheIterator(); // First iterate all the entries in cache. while (cacheIterator.hasNext()) { Map.Entry, CacheValue> cacheEntry = cacheIterator.next(); - if (cacheEntry.getKey().getCacheKey().startsWith(prefixKey)) { + String cacheKey = cacheEntry.getKey().getCacheKey(); + if (cacheKey.startsWith(prefixKey)) { // Check if it is marked for delete, due to abort mpu - if (cacheEntry.getValue().getCacheValue() != null) { - response.add(cacheEntry.getKey().getCacheKey()); + if (cacheEntry.getValue().getCacheValue() != null && + cacheKey.compareTo(seekKey) >= 0) { + responseKeys.add(cacheKey); } else { - aborted.add(cacheEntry.getKey().getCacheKey()); + aborted.add(cacheKey); } } } - // prefixed iterator will only iterate until keys match the prefix + int dbKeysCount = 0; + // the prefix iterator will only iterate keys that match the given prefix + // so we don't need to check if the key is started with prefixKey again try (TableIterator> iterator = getMultipartInfoTable().iterator(prefixKey)) { + iterator.seek(seekKey); - while (iterator.hasNext()) { + while (iterator.hasNext() && (noPagination || dbKeysCount < maxUploads + 1)) { KeyValue entry = iterator.next(); // If it is marked for abort, skip it. if (!aborted.contains(entry.getKey())) { - response.add(entry.getKey()); + responseKeys.add(entry.getKey()); + dbKeysCount++; } } } - return response; + if (noPagination) { + resultBuilder.setKeys(responseKeys); + return resultBuilder.build(); + } + + String lastKey = responseKeys.stream() + .skip(maxUploads) + .findFirst() + .orElse(null); + if (lastKey != null) { + // implies the keyset size is greater than maxUploads + OmMultipartUpload lastKeyMultipartUpload = OmMultipartUpload.from(lastKey); + resultBuilder.setNextKeyMarker(lastKeyMultipartUpload.getKeyName()) + .setNextUploadIdMarker(lastKeyMultipartUpload.getUploadId()) + .setIsTruncated(true); + + // keep the [0, maxUploads] keys + responseKeys = responseKeys.subSet(responseKeys.first(), lastKey); + } + + return resultBuilder + .setKeys(responseKeys) + .build(); } @Override diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java index 73043a89b39a..a72ce02d1ddb 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java @@ -3710,7 +3710,9 @@ public OmMultipartUploadListParts listParts(final String volumeName, @Override public OmMultipartUploadList listMultipartUploads(String volumeName, - String bucketName, String prefix) throws IOException { + String bucketName, + String prefix, String keyMarker, String uploadIdMarker, int maxUploads, boolean withPagination) + throws IOException { ResolvedBucket bucket = resolveBucketLink(Pair.of(volumeName, bucketName)); @@ -3719,17 +3721,14 @@ public OmMultipartUploadList listMultipartUploads(String volumeName, metrics.incNumListMultipartUploads(); try { - OmMultipartUploadList omMultipartUploadList = - keyManager.listMultipartUploads(bucket.realVolume(), - bucket.realBucket(), prefix); - AUDIT.logReadSuccess(buildAuditMessageForSuccess(OMAction - .LIST_MULTIPART_UPLOADS, auditMap)); + OmMultipartUploadList omMultipartUploadList = keyManager.listMultipartUploads(bucket.realVolume(), + bucket.realBucket(), prefix, keyMarker, uploadIdMarker, maxUploads, withPagination); + AUDIT.logReadSuccess(buildAuditMessageForSuccess(OMAction.LIST_MULTIPART_UPLOADS, auditMap)); return omMultipartUploadList; } catch (IOException ex) { metrics.incNumListMultipartUploadFails(); - AUDIT.logReadFailure(buildAuditMessageForFailure(OMAction - .LIST_MULTIPART_UPLOADS, auditMap, ex)); + AUDIT.logReadFailure(buildAuditMessageForFailure(OMAction.LIST_MULTIPART_UPLOADS, auditMap, ex)); throw ex; } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java index aa0ff41a9b78..422da953a4cf 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java @@ -1015,9 +1015,9 @@ private ListMultipartUploadsResponse listMultipartUploads( ListMultipartUploadsRequest request) throws IOException { - OmMultipartUploadList omMultipartUploadList = - impl.listMultipartUploads(request.getVolume(), request.getBucket(), - request.getPrefix()); + OmMultipartUploadList omMultipartUploadList = impl.listMultipartUploads(request.getVolume(), request.getBucket(), + request.getPrefix(), + request.getKeyMarker(), request.getUploadIdMarker(), request.getMaxUploads(), request.getWithPagination()); List info = omMultipartUploadList .getUploads() @@ -1048,6 +1048,9 @@ private ListMultipartUploadsResponse listMultipartUploads( ListMultipartUploadsResponse response = ListMultipartUploadsResponse.newBuilder() .addAllUploadsList(info) + .setIsTruncated(omMultipartUploadList.isTruncated()) + .setNextKeyMarker(omMultipartUploadList.getNextKeyMarker()) + .setNextUploadIdMarker(omMultipartUploadList.getNextUploadIdMarker()) .build(); return response; diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java index 3cb1af0d8766..b4c886147690 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java @@ -23,6 +23,7 @@ import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.Mockito.anySet; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.reset; @@ -231,7 +232,7 @@ public void listMultipartUploads() throws IOException { //WHEN OmMultipartUploadList omMultipartUploadList = - keyManager.listMultipartUploads(volume, "bucket1", ""); + keyManager.listMultipartUploads(volume, "bucket1", "", "", "", 10, false); //THEN List uploads = omMultipartUploadList.getUploads(); @@ -252,11 +253,10 @@ public void listMultipartUploadsWithFewEntriesInCache() throws IOException { String volume = volumeName(); String bucket = "bucket"; - //GIVEN + // GIVEN createBucket(metadataManager, volume, bucket); createBucket(metadataManager, volume, bucket); - // Add few to cache and few to DB. addinitMultipartUploadToCache(volume, bucket, "dir/key1"); @@ -266,11 +266,11 @@ public void listMultipartUploadsWithFewEntriesInCache() throws IOException { initMultipartUpload(writeClient, volume, bucket, "dir/key4"); - //WHEN - OmMultipartUploadList omMultipartUploadList = - keyManager.listMultipartUploads(volume, bucket, ""); + // WHEN + OmMultipartUploadList omMultipartUploadList = keyManager.listMultipartUploads(volume, bucket, "", "", "", 10, + false); - //THEN + // THEN List uploads = omMultipartUploadList.getUploads(); assertEquals(4, uploads.size()); assertEquals("dir/key1", uploads.get(0).getKeyName()); @@ -291,10 +291,9 @@ public void listMultipartUploadsWithFewEntriesInCache() throws IOException { OmMultipartInfo omMultipartInfo4 = initMultipartUpload(writeClient, volume, bucket, "dir/ozonekey4"); - omMultipartUploadList = - keyManager.listMultipartUploads(volume, bucket, "dir/ozone"); + omMultipartUploadList = keyManager.listMultipartUploads(volume, bucket, "dir/ozone", "", "", 10, false); - //THEN + // THEN uploads = omMultipartUploadList.getUploads(); assertEquals(4, uploads.size()); assertEquals("dir/ozonekey1", uploads.get(0).getKeyName()); @@ -307,10 +306,9 @@ public void listMultipartUploadsWithFewEntriesInCache() throws IOException { omMultipartInfo4.getUploadID()); // Now list. - omMultipartUploadList = - keyManager.listMultipartUploads(volume, bucket, "dir/ozone"); + omMultipartUploadList = keyManager.listMultipartUploads(volume, bucket, "dir/ozone", "", "", 10, false); - //THEN + // THEN uploads = omMultipartUploadList.getUploads(); assertEquals(3, uploads.size()); assertEquals("dir/ozonekey1", uploads.get(0).getKeyName()); @@ -322,10 +320,9 @@ public void listMultipartUploadsWithFewEntriesInCache() throws IOException { omMultipartInfo3.getUploadID()); // Now list. - omMultipartUploadList = - keyManager.listMultipartUploads(volume, bucket, "dir/ozone"); + omMultipartUploadList = keyManager.listMultipartUploads(volume, bucket, "dir/ozone", "", "", 10, false); - //THEN + // THEN uploads = omMultipartUploadList.getUploads(); assertEquals(2, uploads.size()); assertEquals("dir/ozonekey1", uploads.get(0).getKeyName()); @@ -336,7 +333,7 @@ public void listMultipartUploadsWithFewEntriesInCache() throws IOException { @Test public void listMultipartUploadsWithPrefix() throws IOException { - //GIVEN + // GIVEN final String volumeName = volumeName(); createBucket(metadataManager, volumeName, "bucket1"); createBucket(metadataManager, volumeName, "bucket2"); @@ -349,17 +346,90 @@ public void listMultipartUploadsWithPrefix() throws IOException { initMultipartUpload(writeClient, volumeName, "bucket2", "dir/key1"); - //WHEN - OmMultipartUploadList omMultipartUploadList = - keyManager.listMultipartUploads(volumeName, "bucket1", "dir"); + // WHEN + OmMultipartUploadList omMultipartUploadList = keyManager.listMultipartUploads(volumeName, "bucket1", "dir", "", + "", 10, false); - //THEN + // THEN List uploads = omMultipartUploadList.getUploads(); assertEquals(2, uploads.size()); assertEquals("dir/key1", uploads.get(0).getKeyName()); assertEquals("dir/key2", uploads.get(1).getKeyName()); } + @Test + public void testListMultipartUploadsWithPagination() throws IOException { + // GIVEN + final String volumeName = volumeName(); + final String bucketName = "bucket1"; + createBucket(metadataManager, volumeName, bucketName); + + // Create 25 multipart uploads to test pagination + for (int i = 0; i < 25; i++) { + String key = String.format("key-%03d", i); // pad with zeros for proper sorting + initMultipartUpload(writeClient, volumeName, bucketName, key); + } + + // WHEN - First page (10 entries) + OmMultipartUploadList firstPage = keyManager.listMultipartUploads( + volumeName, bucketName, "", "", "", 10, true); + + // THEN + assertEquals(10, firstPage.getUploads().size()); + assertTrue(firstPage.isTruncated()); + assertNotNull(firstPage.getNextKeyMarker()); + assertNotNull(firstPage.getNextUploadIdMarker()); + + // Verify first page content + for (int i = 0; i < 10; i++) { + assertEquals(String.format("key-%03d", i), + firstPage.getUploads().get(i).getKeyName()); + } + + // WHEN - Second page using markers from first page + OmMultipartUploadList secondPage = keyManager.listMultipartUploads( + volumeName, bucketName, "", + firstPage.getNextKeyMarker(), + firstPage.getNextUploadIdMarker(), + 10, true); + + // THEN + assertEquals(10, secondPage.getUploads().size()); + assertTrue(secondPage.isTruncated()); + + // Verify second page content + for (int i = 0; i < 10; i++) { + assertEquals(String.format("key-%03d", i + 10), + secondPage.getUploads().get(i).getKeyName()); + } + + // WHEN - Last page + OmMultipartUploadList lastPage = keyManager.listMultipartUploads( + volumeName, bucketName, "", + secondPage.getNextKeyMarker(), + secondPage.getNextUploadIdMarker(), + 10, true); + + // THEN + assertEquals(5, lastPage.getUploads().size()); + assertFalse(lastPage.isTruncated()); + assertEquals("", lastPage.getNextKeyMarker()); + assertEquals("", lastPage.getNextUploadIdMarker()); + + // Verify last page content + for (int i = 0; i < 5; i++) { + assertEquals(String.format("key-%03d", i + 20), + lastPage.getUploads().get(i).getKeyName()); + } + + // Test with no pagination + OmMultipartUploadList noPagination = keyManager.listMultipartUploads( + volumeName, bucketName, "", "", "", 10, false); + + assertEquals(25, noPagination.getUploads().size()); + assertFalse(noPagination.isTruncated()); + } + private void createBucket(OMMetadataManager omMetadataManager, String volume, String bucket) throws IOException { diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java index b995ecdc2d26..47305fddfd9a 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java @@ -28,7 +28,9 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -39,6 +41,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; +import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Set; @@ -59,10 +62,12 @@ import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.ListOpenFilesResult; +import org.apache.hadoop.ozone.om.helpers.MultipartUploadKeys; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmMultipartUpload; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.helpers.OpenKeySession; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; @@ -1106,4 +1111,94 @@ public void testListSnapshotDoesNotListOtherBucketSnapshots() assertTrue(snapshotInfo.getName().startsWith(snapshotName2)); } } + + @Test + public void testGetMultipartUploadKeys() throws Exception { + String volumeName = "vol1"; + String bucketName = "bucket1"; + String prefix = "dir/"; + int maxUploads = 10; + + // Create volume and bucket + OMRequestTestUtils.addVolumeToDB(volumeName, omMetadataManager); + addBucketsToCache(volumeName, bucketName); + + List expectedKeys = new ArrayList<>(); + for (int i = 0; i < 25; i++) { + String key = prefix + "key" + i; + String uploadId = OMMultipartUploadUtils.getMultipartUploadId(); + + // Create multipart key info + OmKeyInfo keyInfo = OMRequestTestUtils.createOmKeyInfo( + volumeName, bucketName, key, + RatisReplicationConfig.getInstance(ONE)) + .build(); + + OmMultipartKeyInfo multipartKeyInfo = OMRequestTestUtils + .createOmMultipartKeyInfo(uploadId, Time.now(), + HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE, 0L); + + if (i % 2 == 0) { + OMRequestTestUtils.addMultipartInfoToTable(false, keyInfo, + multipartKeyInfo, 0L, omMetadataManager); + } else { + OMRequestTestUtils.addMultipartInfoToTableCache(keyInfo, + multipartKeyInfo, 0L, omMetadataManager); + } + + expectedKeys.add(key); + } + Collections.sort(expectedKeys); + + // List first page without markers + MultipartUploadKeys result = omMetadataManager.getMultipartUploadKeys( + volumeName, bucketName, prefix, null, null, maxUploads, false); + + assertEquals(maxUploads, result.getKeys().size()); + assertTrue(result.isTruncated()); + assertNotNull(result.getNextKeyMarker()); + assertNotNull(result.getNextUploadIdMarker()); + + // List next page using markers from first page + MultipartUploadKeys nextPage = omMetadataManager.getMultipartUploadKeys( + volumeName, bucketName, prefix, + result.getNextKeyMarker(), + result.getNextUploadIdMarker(), + maxUploads, false); + + assertEquals(maxUploads, nextPage.getKeys().size()); + assertTrue(nextPage.isTruncated()); + + // List with different prefix + MultipartUploadKeys differentPrefix = omMetadataManager.getMultipartUploadKeys( + volumeName, bucketName, "different/", null, null, maxUploads, false); + + assertEquals(0, differentPrefix.getKeys().size()); + assertFalse(differentPrefix.isTruncated()); + + // List all entries with large maxUploads + MultipartUploadKeys allEntries = omMetadataManager.getMultipartUploadKeys( + volumeName, bucketName, prefix, null, null, 100, false); + + assertEquals(25, allEntries.getKeys().size()); + assertFalse(allEntries.isTruncated()); + + // Verify all keys are present + List actualKeys = new ArrayList<>(); + for (String dbKey : allEntries.getKeys()) { + OmMultipartUpload mpu = OmMultipartUpload.from(dbKey); + actualKeys.add(mpu.getKeyName()); + } + Collections.sort(actualKeys); + assertEquals(expectedKeys, actualKeys); + + // Test with no pagination + MultipartUploadKeys noPagination = omMetadataManager.getMultipartUploadKeys( + volumeName, bucketName, prefix, null, null, 10, true); + + assertEquals(25, noPagination.getKeys().size()); + assertFalse(noPagination.isTruncated()); + + } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java index 394ddf029c29..1ce42bacd9e6 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java @@ -397,6 +397,25 @@ public static String addMultipartInfoToTable(boolean addToCache, return ozoneDBKey; } + /** + * Add multipart info entry to the multipartInfoTable. + * + * @throws Exception + */ + public static String addMultipartInfoToTableCache( + OmKeyInfo omKeyInfo, OmMultipartKeyInfo omMultipartKeyInfo, + long trxnLogIndex, OMMetadataManager omMetadataManager) throws IOException { + String ozoneDBKey = omMetadataManager.getMultipartKey( + omKeyInfo.getVolumeName(), omKeyInfo.getBucketName(), + omKeyInfo.getKeyName(), omMultipartKeyInfo.getUploadID()); + + omMetadataManager.getMultipartInfoTable() + .addCacheEntry(new CacheKey<>(ozoneDBKey), + CacheValue.get(trxnLogIndex, omMultipartKeyInfo)); + + return ozoneDBKey; + } + public static PartKeyInfo createPartKeyInfo(String volumeName, String bucketName, String keyName, String uploadId, int partNumber) { return PartKeyInfo.newBuilder() diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java index 274a85d5aaef..466293eeacad 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java @@ -114,6 +114,9 @@ public Response get( @QueryParam("start-after") String startAfter, @QueryParam("uploads") String uploads, @QueryParam("acl") String aclMarker, + @QueryParam("key-marker") String keyMarker, + @QueryParam("upload-id-marker") String uploadIdMarker, + @DefaultValue("1000") @QueryParam("max-uploads") int maxUploads, @Context HttpHeaders hh) throws OS3Exception, IOException { long startNanos = Time.monotonicNowNanos(); S3GAction s3GAction = S3GAction.GET_BUCKET; @@ -136,7 +139,7 @@ public Response get( if (uploads != null) { s3GAction = S3GAction.LIST_MULTIPART_UPLOAD; - return listMultipartUploads(bucketName, prefix); + return listMultipartUploads(bucketName, prefix, keyMarker, uploadIdMarker, maxUploads); } if (prefix == null) { @@ -326,9 +329,18 @@ public Response put(@PathParam("bucket") String bucketName, } public Response listMultipartUploads( - @PathParam("bucket") String bucketName, - @QueryParam("prefix") String prefix) + String bucketName, + String prefix, + String keyMarker, + String uploadIdMarker, + int maxUploads) throws OS3Exception, IOException { + + if (maxUploads < 1 || maxUploads > 1000) { + throw newError(S3ErrorTable.INVALID_ARGUMENT, "max-uploads", + new Exception("max-uploads must be between 1 and 1000")); + } + long startNanos = Time.monotonicNowNanos(); S3GAction s3GAction = S3GAction.LIST_MULTIPART_UPLOAD; @@ -336,10 +348,17 @@ public Response listMultipartUploads( try { OzoneMultipartUploadList ozoneMultipartUploadList = - bucket.listMultipartUploads(prefix); + bucket.listMultipartUploads(prefix, keyMarker, uploadIdMarker, maxUploads); ListMultipartUploadsResult result = new ListMultipartUploadsResult(); result.setBucket(bucketName); + result.setKeyMarker(keyMarker); + result.setUploadIdMarker(uploadIdMarker); + result.setNextKeyMarker(ozoneMultipartUploadList.getNextKeyMarker()); + result.setPrefix(prefix); + result.setNextUploadIdMarker(ozoneMultipartUploadList.getNextUploadIdMarker()); + result.setMaxUploads(maxUploads); + result.setTruncated(ozoneMultipartUploadList.isTruncated()); ozoneMultipartUploadList.getUploads().forEach(upload -> result.addUpload( new ListMultipartUploadsResult.Upload( diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ListMultipartUploadsResult.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ListMultipartUploadsResult.java index e7052fbf090b..98e9f98608b4 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ListMultipartUploadsResult.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ListMultipartUploadsResult.java @@ -48,6 +48,9 @@ public class ListMultipartUploadsResult { @XmlElement(name = "NextKeyMarker") private String nextKeyMarker; + @XmlElement(name = "Prefix") + private String prefix; + @XmlElement(name = "NextUploadIdMarker") private String nextUploadIdMarker; @@ -92,6 +95,14 @@ public void setNextKeyMarker(String nextKeyMarker) { this.nextKeyMarker = nextKeyMarker; } + public String getPrefix() { + return prefix; + } + + public void setPrefix(String prefix) { + this.prefix = prefix; + } + public String getNextUploadIdMarker() { return nextUploadIdMarker; } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java index 622c1121f244..ff3331b2ae3f 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java @@ -382,7 +382,10 @@ public OzoneMultipartUploadPartListParts listParts(String volumeName, @Override public OzoneMultipartUploadList listMultipartUploads(String volumename, String bucketName, - String prefix) + String prefix, + String keyMarker, + String uploadIdMarker, + int maxUploads) throws IOException { return null; } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketAcl.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketAcl.java index bbc380a20de7..1099fbea98cd 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketAcl.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketAcl.java @@ -82,7 +82,7 @@ public void testGetAcl() throws Exception { when(parameterMap.containsKey(ACL_MARKER)).thenReturn(true); Response response = bucketEndpoint.get(BUCKET_NAME, null, null, null, 0, null, - null, null, null, ACL_MARKER, headers); + null, null, null, ACL_MARKER, null, null, 0, headers); assertEquals(HTTP_OK, response.getStatus()); System.out.println(response.getEntity()); } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketList.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketList.java index 81960b9d5276..b3589afbfd46 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketList.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketList.java @@ -54,7 +54,7 @@ public void listRoot() throws OS3Exception, IOException { ListObjectResponse getBucketResponse = (ListObjectResponse) getBucket.get("b1", "/", null, null, 100, "", - null, null, null, null, null) + null, null, null, null, null, null, 0, null) .getEntity(); assertEquals(1, getBucketResponse.getCommonPrefixes().size()); @@ -79,7 +79,7 @@ public void listDir() throws OS3Exception, IOException { ListObjectResponse getBucketResponse = (ListObjectResponse) getBucket.get("b1", "/", null, null, 100, - "dir1", null, null, null, null, null).getEntity(); + "dir1", null, null, null, null, null, null, 0, null).getEntity(); assertEquals(1, getBucketResponse.getCommonPrefixes().size()); assertEquals("dir1/", @@ -103,7 +103,7 @@ public void listSubDir() throws OS3Exception, IOException { ListObjectResponse getBucketResponse = (ListObjectResponse) getBucket .get("b1", "/", null, null, 100, "dir1/", null, - null, null, null, null) + null, null, null, null, null, 0, null) .getEntity(); assertEquals(1, getBucketResponse.getCommonPrefixes().size()); @@ -139,7 +139,7 @@ public void listObjectOwner() throws OS3Exception, IOException { getBucket.setRequestIdentifier(new RequestIdentifier()); ListObjectResponse getBucketResponse = (ListObjectResponse) getBucket.get("b1", "/", null, null, 100, - "key", null, null, null, null, null).getEntity(); + "key", null, null, null, null, null, null, 0, null).getEntity(); assertEquals(2, getBucketResponse.getContents().size()); assertEquals(user1.getShortUserName(), @@ -163,7 +163,7 @@ public void listWithPrefixAndDelimiter() throws OS3Exception, IOException { ListObjectResponse getBucketResponse = (ListObjectResponse) getBucket.get("b1", "/", null, null, 100, - "dir1", null, null, null, null, null).getEntity(); + "dir1", null, null, null, null, null, null, 0, null).getEntity(); assertEquals(3, getBucketResponse.getCommonPrefixes().size()); @@ -183,7 +183,7 @@ public void listWithPrefixAndDelimiter1() throws OS3Exception, IOException { ListObjectResponse getBucketResponse = (ListObjectResponse) getBucket.get("b1", "/", null, null, 100, - "", null, null, null, null, null).getEntity(); + "", null, null, null, null, null, null, 0, null).getEntity(); assertEquals(3, getBucketResponse.getCommonPrefixes().size()); assertEquals("file2", getBucketResponse.getContents().get(0) @@ -204,7 +204,7 @@ public void listWithPrefixAndDelimiter2() throws OS3Exception, IOException { getBucket.setRequestIdentifier(new RequestIdentifier()); ListObjectResponse getBucketResponse = (ListObjectResponse) getBucket.get("b1", "/", null, null, 100, "dir1bh", - null, "dir1/dir2/file2", null, null, null).getEntity(); + null, "dir1/dir2/file2", null, null, null, null, 0, null).getEntity(); assertEquals(2, getBucketResponse.getCommonPrefixes().size()); @@ -224,7 +224,7 @@ public void listWithPrefixAndEmptyStrDelimiter() // Should behave the same if delimiter is null ListObjectResponse getBucketResponse = (ListObjectResponse) getBucket.get("b1", "", null, null, 100, "dir1/", - null, null, null, null, null).getEntity(); + null, null, null, null, null, null, 0, null).getEntity(); assertEquals(0, getBucketResponse.getCommonPrefixes().size()); assertEquals(4, getBucketResponse.getContents().size()); @@ -256,7 +256,7 @@ public void listWithContinuationToken() throws OS3Exception, IOException { // First time ListObjectResponse getBucketResponse = (ListObjectResponse) getBucket.get("b1", null, null, null, maxKeys, - "", null, null, null, null, null).getEntity(); + "", null, null, null, null, null, null, 0, null).getEntity(); assertTrue(getBucketResponse.isTruncated()); assertEquals(2, getBucketResponse.getContents().size()); @@ -265,7 +265,7 @@ public void listWithContinuationToken() throws OS3Exception, IOException { String continueToken = getBucketResponse.getNextToken(); getBucketResponse = (ListObjectResponse) getBucket.get("b1", null, null, null, maxKeys, - "", continueToken, null, null, null, null).getEntity(); + "", continueToken, null, null, null, null, null, 0, null).getEntity(); assertTrue(getBucketResponse.isTruncated()); assertEquals(2, getBucketResponse.getContents().size()); @@ -275,7 +275,7 @@ public void listWithContinuationToken() throws OS3Exception, IOException { //3rd time getBucketResponse = (ListObjectResponse) getBucket.get("b1", null, null, null, maxKeys, - "", continueToken, null, null, null, null).getEntity(); + "", continueToken, null, null, null, null, null, 0, null).getEntity(); assertFalse(getBucketResponse.isTruncated()); assertEquals(1, getBucketResponse.getContents().size()); @@ -308,7 +308,7 @@ public void listWithContinuationTokenDirBreak() getBucketResponse = (ListObjectResponse) getBucket.get("b1", "/", null, null, maxKeys, - "test/", null, null, null, null, null).getEntity(); + "test/", null, null, null, null, null, null, 0, null).getEntity(); assertEquals(0, getBucketResponse.getContents().size()); assertEquals(2, getBucketResponse.getCommonPrefixes().size()); @@ -320,7 +320,7 @@ public void listWithContinuationTokenDirBreak() getBucketResponse = (ListObjectResponse) getBucket.get("b1", "/", null, null, maxKeys, "test/", getBucketResponse.getNextToken(), null, null, null, - null).getEntity(); + null, null, 0, null).getEntity(); assertEquals(1, getBucketResponse.getContents().size()); assertEquals(1, getBucketResponse.getCommonPrefixes().size()); assertEquals("test/dir3/", @@ -352,7 +352,7 @@ public void listWithContinuationToken1() throws OS3Exception, IOException { // First time ListObjectResponse getBucketResponse = (ListObjectResponse) getBucket.get("b1", "/", null, null, maxKeys, - "dir", null, null, null, null, null).getEntity(); + "dir", null, null, null, null, null, null, 0, null).getEntity(); assertTrue(getBucketResponse.isTruncated()); assertEquals(2, getBucketResponse.getCommonPrefixes().size()); @@ -361,7 +361,7 @@ public void listWithContinuationToken1() throws OS3Exception, IOException { String continueToken = getBucketResponse.getNextToken(); getBucketResponse = (ListObjectResponse) getBucket.get("b1", "/", null, null, maxKeys, - "dir", continueToken, null, null, null, null).getEntity(); + "dir", continueToken, null, null, null, null, null, 0, null).getEntity(); assertTrue(getBucketResponse.isTruncated()); assertEquals(2, getBucketResponse.getCommonPrefixes().size()); @@ -369,7 +369,7 @@ public void listWithContinuationToken1() throws OS3Exception, IOException { continueToken = getBucketResponse.getNextToken(); getBucketResponse = (ListObjectResponse) getBucket.get("b1", "/", null, null, maxKeys, - "dir", continueToken, null, null, null, null).getEntity(); + "dir", continueToken, null, null, null, null, null, 0, null).getEntity(); assertFalse(getBucketResponse.isTruncated()); assertEquals(1, getBucketResponse.getCommonPrefixes().size()); @@ -389,7 +389,7 @@ public void listWithContinuationTokenFail() throws IOException { getBucket.setRequestIdentifier(new RequestIdentifier()); OS3Exception e = assertThrows(OS3Exception.class, () -> getBucket.get("b1", - "/", null, null, 2, "dir", "random", null, null, null, null) + "/", null, null, 2, "dir", "random", null, null, null, null, null, 1000, null) .getEntity(), "listWithContinuationTokenFail"); assertEquals("random", e.getResource()); assertEquals("Invalid Argument", e.getErrorMessage()); @@ -409,7 +409,7 @@ public void testStartAfter() throws IOException, OS3Exception { ListObjectResponse getBucketResponse = (ListObjectResponse) getBucket.get("b1", null, null, null, 1000, - null, null, null, null, null, null).getEntity(); + null, null, null, null, null, null, null, 0, null).getEntity(); assertFalse(getBucketResponse.isTruncated()); assertEquals(5, getBucketResponse.getContents().size()); @@ -420,14 +420,14 @@ public void testStartAfter() throws IOException, OS3Exception { getBucketResponse = (ListObjectResponse) getBucket.get("b1", null, null, null, - 1000, null, null, startAfter, null, null, null).getEntity(); + 1000, null, null, startAfter, null, null, null, null, 0, null).getEntity(); assertFalse(getBucketResponse.isTruncated()); assertEquals(4, getBucketResponse.getContents().size()); getBucketResponse = (ListObjectResponse) getBucket.get("b1", null, null, null, - 1000, null, null, "random", null, null, null).getEntity(); + 1000, null, null, "random", null, null, null, null, 0, null).getEntity(); assertFalse(getBucketResponse.isTruncated()); assertEquals(0, getBucketResponse.getContents().size()); @@ -474,7 +474,7 @@ public void testEncodingType() throws IOException, OS3Exception { ListObjectResponse response = (ListObjectResponse) getBucket.get( "b1", delimiter, encodingType, null, 1000, prefix, - null, startAfter, null, null, null).getEntity(); + null, startAfter, null, null, null, null, 0, null).getEntity(); // Assert encodingType == url. // The Object name will be encoded by ObjectKeyNameAdapter @@ -492,7 +492,7 @@ public void testEncodingType() throws IOException, OS3Exception { response = (ListObjectResponse) getBucket.get( "b1", delimiter, null, null, 1000, prefix, - null, startAfter, null, null, null).getEntity(); + null, startAfter, null, null, null, null, 0, null).getEntity(); // Assert encodingType == null. // The Object name will not be encoded by ObjectKeyNameAdapter @@ -517,7 +517,7 @@ public void testEncodingTypeException() throws IOException { getBucket.setRequestIdentifier(new RequestIdentifier()); OS3Exception e = assertThrows(OS3Exception.class, () -> getBucket.get( "b1", null, "unSupportType", null, 1000, null, - null, null, null, null, null).getEntity()); + null, null, null, null, null, null, 0, null).getEntity()); assertEquals(S3ErrorTable.INVALID_ARGUMENT.getCode(), e.getCode()); } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java index e9b334a0e8bd..3804e412fe59 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java @@ -24,6 +24,7 @@ import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.Mockito.any; import static org.mockito.Mockito.anyBoolean; +import static org.mockito.Mockito.anyInt; import static org.mockito.Mockito.anyLong; import static org.mockito.Mockito.anyMap; import static org.mockito.Mockito.anyString; @@ -142,12 +143,12 @@ public void testDeleteBucket() throws IOException { @Test public void testListMultiUpload() throws IOException { when(objectStore.getS3Bucket(anyString())).thenReturn(bucket); - doThrow(exception).when(bucket).listMultipartUploads(anyString()); + doThrow(exception).when(bucket).listMultipartUploads(anyString(), anyString(), anyString(), anyInt()); BucketEndpoint bucketEndpoint = EndpointBuilder.newBucketEndpointBuilder() .setClient(client) .build(); OS3Exception e = assertThrows(OS3Exception.class, () -> - bucketEndpoint.listMultipartUploads("bucketName", "prefix")); + bucketEndpoint.listMultipartUploads("bucketName", "prefix", "", "", 10)); assertEquals(HTTP_FORBIDDEN, e.getHttpCode()); } @@ -162,7 +163,7 @@ public void testListKey() throws IOException { .build(); OS3Exception e = assertThrows(OS3Exception.class, () -> bucketEndpoint.get( "bucketName", null, null, null, 1000, - null, null, null, null, null, null)); + null, null, null, null, null, null, null, 0, null)); assertEquals(HTTP_FORBIDDEN, e.getHttpCode()); } @@ -206,8 +207,8 @@ public void testGetAcl() throws Exception { .setClient(client) .build(); OS3Exception e = assertThrows(OS3Exception.class, () -> bucketEndpoint.get( - "bucketName", null, null, null, 1000, null, null, null, null, "acl", - null), "Expected OS3Exception with FORBIDDEN http code."); + "bucketName", null, null, null, 1000, null, null, null, null, "acl", + null, null, 0, null), "Expected OS3Exception with FORBIDDEN http code."); assertEquals(HTTP_FORBIDDEN, e.getHttpCode()); } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/metrics/TestS3GatewayMetrics.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/metrics/TestS3GatewayMetrics.java index 7c6afc509be5..0081d0d5abc0 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/metrics/TestS3GatewayMetrics.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/metrics/TestS3GatewayMetrics.java @@ -142,7 +142,7 @@ public void testGetBucketSuccess() throws Exception { bucketEndpoint.get(bucketName, null, null, null, 1000, null, null, "random", null, - null, null).getEntity(); + null, null, null, 0, null).getEntity(); long curMetric = metrics.getGetBucketSuccess(); assertEquals(1L, curMetric - oriMetric); @@ -155,7 +155,7 @@ public void testGetBucketFailure() throws Exception { // Searching for a bucket that does not exist OS3Exception e = assertThrows(OS3Exception.class, () -> bucketEndpoint.get( "newBucket", null, null, null, 1000, null, null, "random", null, - null, null)); + null, null, null, 0, null)); assertEquals(S3ErrorTable.NO_SUCH_BUCKET.getCode(), e.getCode()); assertEquals(S3ErrorTable.NO_SUCH_BUCKET.getErrorMessage(), e.getErrorMessage()); @@ -219,7 +219,7 @@ public void testGetAclSuccess() throws Exception { Response response = bucketEndpoint.get(bucketName, null, null, null, 0, null, null, - null, null, "acl", null); + null, null, "acl", null, null, 0, null); long curMetric = metrics.getGetAclSuccess(); assertEquals(HTTP_OK, response.getStatus()); assertEquals(1L, curMetric - oriMetric); @@ -232,7 +232,7 @@ public void testGetAclFailure() throws Exception { // Failing the getACL endpoint by applying ACL on a non-Existent Bucket OS3Exception e = assertThrows(OS3Exception.class, () -> bucketEndpoint.get( "random_bucket", null, null, null, 0, null, - null, null, null, "acl", null)); + null, null, null, "acl", null, null, 0, null)); assertEquals(S3ErrorTable.NO_SUCH_BUCKET.getCode(), e.getCode()); assertEquals(S3ErrorTable.NO_SUCH_BUCKET.getErrorMessage(), e.getErrorMessage());