Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
32 commits
Select commit Hold shift + click to select a range
2743e82
Add keyMarker, uploadIdMarker, maxUploads into ListMultipartUploads s…
peterxcli Feb 5, 2025
fd44b3b
Add keyMarker, uploadIdMarker, and maxUploads to related interface
peterxcli Feb 5, 2025
552590c
Add new arguments into data transform process
peterxcli Feb 5, 2025
274dfcb
Implement pagination logic for getMultipartUploadKeys in om metadata …
peterxcli Feb 5, 2025
659c363
Include max-uploads, key-marker and upload-id-marker as part of listM…
peterxcli Feb 5, 2025
0c13a3f
Updates tests to use new client interface
peterxcli Feb 5, 2025
aab4c1c
Fix checkstyle
peterxcli Feb 5, 2025
0318f15
Fix findbugs
peterxcli Feb 5, 2025
448f066
Fix the logic of prefix key building with keyMarker and uploadIdMarker
peterxcli Feb 6, 2025
edae70b
Add maxUploads to OzoneMultipartUploadList and related methods
peterxcli Feb 8, 2025
faecf3e
Add @Min validation for max-uploads parameter in listMultipartUploads
peterxcli Feb 8, 2025
17fbd65
Refactor listMultipartUploads pagination logic in OmMetadataManagerImpl
peterxcli Feb 8, 2025
76e4999
Add test for multipart upload list pagination in FSO
peterxcli Feb 8, 2025
155ccac
Reorder parameters in BucketEndpoint#listMultipartUploads method sign…
peterxcli Feb 9, 2025
c7c8b82
fix typo
peterxcli Feb 9, 2025
abf7e88
List keys only from DB and check tombstone from table partial cache
peterxcli Feb 9, 2025
bfd925a
Merge remote-tracking branch 'upstream/master' into hdds11530-support…
peterxcli Feb 16, 2025
7a9731a
create a new file for MultipartUploadKeys
peterxcli Feb 16, 2025
6fcdf5f
list multipartinfo should get all entries from cache, too
peterxcli Feb 16, 2025
6b8843a
Merge remote-tracking branch 'origin/master' into hdds11530-support-l…
adoroszlai Feb 17, 2025
1b6bdf7
Move listMultipartUploads params to GET method handler
peterxcli Feb 17, 2025
a7a47e3
Add pagination test for keyManagerImpl and metadataManagerImpl
peterxcli Feb 17, 2025
1067b2e
Remove maxUploads from OzoneMultipartUploadList
peterxcli Feb 17, 2025
88ba8a0
Return listMultiPartUpload req params as response
peterxcli Feb 17, 2025
024ac94
AbstractS3SDKV1Tests to test the listMultipartUploads using AWS SDK
peterxcli Feb 17, 2025
bb7fd8d
Add listmultipartUpload robot test and some minor fix
peterxcli Feb 18, 2025
33b2ee6
fix findbug
peterxcli Feb 18, 2025
59ab087
Merge remote-tracking branch 'origin/master' into hdds11530-support-l…
adoroszlai Feb 18, 2025
8f4a113
Add backward compatibility
peterxcli Feb 18, 2025
1420d92
Rename `noPagination` to `withPagination` since the old S3G will not …
peterxcli Feb 19, 2025
d72d0f8
Seperate the base case and pagination for ListMultipartUpload s3 sdk …
peterxcli Feb 19, 2025
62f06b3
Fix TestKeyManagerUnit#testListMultipartUploadsWithPagination
peterxcli Feb 19, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -920,22 +920,24 @@ public void testListMultipartUploadsPagination() throws Exception {

// Test full pagination process
final int maxUploads = 10;
final int expectedTruncated = 2;
int truncatedCount = 0;
final int expectedPages = 3;
int pageCount = 0;
String keyMarker = "";
String uploadIdMarker = "";
Set<String> retrievedKeys = new HashSet<>();
boolean hasMore = true;
boolean isTruncated = true;

while (hasMore) {
do {
OzoneMultipartUploadList result = bucket.listMultipartUploads(
"dir", keyMarker, uploadIdMarker, maxUploads);

assertThat(result.getUploads())
.as("Number of uploads should not exceed maxUploads")
.hasSizeLessThanOrEqualTo(maxUploads);

assertEquals(result.getUploads().size(), result.getMaxUploads());
if (pageCount < 2) {
assertEquals(maxUploads, result.getUploads().size());
assertTrue(result.isTruncated());
} else {
assertEquals(numOfKeys - pageCount * maxUploads, result.getUploads().size());
assertFalse(result.isTruncated());
}

for (OzoneMultipartUpload upload : result.getUploads()) {
String key = upload.getKeyName();
Expand All @@ -947,13 +949,13 @@ public void testListMultipartUploadsPagination() throws Exception {
// Update markers for next iteration
keyMarker = result.getNextKeyMarker();
uploadIdMarker = result.getNextUploadIdMarker();
hasMore = result.isTruncated();
isTruncated = result.isTruncated();

truncatedCount += result.isTruncated() ? 1 : 0;
}
pageCount++;
} while (isTruncated);

assertEquals(keys.size(), retrievedKeys.size());
assertEquals(expectedTruncated, truncatedCount);
assertEquals(expectedPages, pageCount);
assertThat(retrievedKeys.stream().sorted().collect(Collectors.toList()))
.as("Retrieved keys should match expected keys in order")
.isEqualTo(keys.stream().sorted().collect(Collectors.toList()));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -74,6 +74,7 @@
import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
import org.apache.hadoop.utils.InputSubstream;
import org.apache.ozone.test.OzoneTestBase;
import org.eclipse.jetty.util.StringUtil;
import org.junit.jupiter.api.MethodOrderer;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.TestMethodOrder;
Expand All @@ -95,10 +96,12 @@
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Random;
import java.util.Set;
import java.util.stream.Collectors;

import static org.apache.hadoop.ozone.OzoneConsts.MB;
Expand Down Expand Up @@ -663,33 +666,93 @@ public void testLowLevelMultipartUpload(@TempDir Path tempDir) throws Exception
@Test
public void testListMultipartUploads() {
final String bucketName = getBucketName();
final String multipartKey1 = getKeyName("multipart1");
final String multipartKey2 = getKeyName("multipart2");
final String multipartKeyPrefix = getKeyName("multipart");

s3Client.createBucket(bucketName);

List<String> uploadIds = new ArrayList<>();
// Create 25 multipart uploads to test pagination
List<String> allKeys = new ArrayList<>();
Map<String, String> keyToUploadId = new HashMap<>();

String uploadId1 = initiateMultipartUpload(bucketName, multipartKey1, null, null, null);
uploadIds.add(uploadId1);
String uploadId2 = initiateMultipartUpload(bucketName, multipartKey1, null, null, null);
uploadIds.add(uploadId2);
// TODO: Currently, Ozone sorts based on uploadId instead of MPU init time within the same key.
// Remove this sorting step once HDDS-11532 has been implemented
Collections.sort(uploadIds);
String uploadId3 = initiateMultipartUpload(bucketName, multipartKey2, null, null, null);
uploadIds.add(uploadId3);
for (int i = 0; i < 25; i++) {
String key = String.format("%s-%03d", multipartKeyPrefix, i);
allKeys.add(key);
String uploadId = initiateMultipartUpload(bucketName, key, null, null, null);
keyToUploadId.put(key, uploadId);
}
Collections.sort(allKeys);

// Test pagination with maxUploads=10
Set<String> retrievedKeys = new HashSet<>();
String keyMarker = null;
String uploadIdMarker = null;
boolean truncated = true;
int pageCount = 0;

// TODO: Add test for max uploads threshold and marker once HDDS-11530 has been implemented
ListMultipartUploadsRequest listMultipartUploadsRequest = new ListMultipartUploadsRequest(bucketName);
do {
ListMultipartUploadsRequest request = new ListMultipartUploadsRequest(bucketName)
.withMaxUploads(10)
.withKeyMarker(keyMarker)
.withUploadIdMarker(uploadIdMarker);

MultipartUploadListing result = s3Client.listMultipartUploads(listMultipartUploadsRequest);
MultipartUploadListing result = s3Client.listMultipartUploads(request);

List<String> listUploadIds = result.getMultipartUploads().stream()
.map(MultipartUpload::getUploadId)
.collect(Collectors.toList());
// Verify page size
if (pageCount < 2) {
assertEquals(10, result.getMultipartUploads().size());
assertTrue(result.isTruncated());
} else {
assertEquals(5, result.getMultipartUploads().size());
assertFalse(result.isTruncated());
}

// Collect keys and verify uploadIds
for (MultipartUpload upload : result.getMultipartUploads()) {
String key = upload.getKey();
retrievedKeys.add(key);
assertEquals(keyToUploadId.get(key), upload.getUploadId());
}

assertEquals(uploadIds, listUploadIds);
// Verify response
assertNull(result.getPrefix());
assertEquals(result.getUploadIdMarker(), uploadIdMarker);
assertEquals(result.getKeyMarker(), keyMarker);
assertEquals(result.getMaxUploads(), 10);

// Update markers for next page
keyMarker = result.getNextKeyMarker();
uploadIdMarker = result.getNextUploadIdMarker();

truncated = result.isTruncated();
pageCount++;

} while (truncated);

// Verify pagination results
assertEquals(3, pageCount, "Should have exactly 3 pages");
assertEquals(25, retrievedKeys.size(), "Should retrieve all uploads");
assertEquals(
allKeys,
retrievedKeys.stream().sorted().collect(Collectors.toList()),
"Retrieved keys should match expected keys in order");

// Test with prefix
String prefix = multipartKeyPrefix + "-01";
ListMultipartUploadsRequest prefixRequest = new ListMultipartUploadsRequest(bucketName)
.withPrefix(prefix);

MultipartUploadListing prefixResult = s3Client.listMultipartUploads(prefixRequest);

assertEquals(prefix, prefixResult.getPrefix());
assertEquals(
Arrays.asList(multipartKeyPrefix + "-010", multipartKeyPrefix + "-011",
multipartKeyPrefix + "-012", multipartKeyPrefix + "-013",
multipartKeyPrefix + "-014", multipartKeyPrefix + "-015",
multipartKeyPrefix + "-016", multipartKeyPrefix + "-017",
multipartKeyPrefix + "-018", multipartKeyPrefix + "-019"),
prefixResult.getMultipartUploads().stream()
.map(MultipartUpload::getKey)
.collect(Collectors.toList()));
}

@Test
Expand Down