From 66cee2b7c3146907811f94704ab83ed6f8a5648f Mon Sep 17 00:00:00 2001 From: Slava Tutrinov Date: Tue, 14 Nov 2023 09:30:20 +0300 Subject: [PATCH 01/11] HDDS-9680. provide a loaded key part's ETag as an md5 hash (S3G multipart upload) --- .../OzoneMultipartUploadPartListParts.java | 9 +- .../hadoop/ozone/client/rpc/RpcClient.java | 3 +- .../OmMultipartCommitUploadPartInfo.java | 11 +- .../OmMultipartUploadCompleteList.java | 4 +- .../helpers/OmMultipartUploadListParts.java | 3 +- .../hadoop/ozone/om/helpers/OmPartInfo.java | 11 +- ...ManagerProtocolClientSideTranslatorPB.java | 3 +- .../main/smoketest/s3/MultipartUpload.robot | 33 ++-- .../TestOzoneFSWithObjectStoreCreate.java | 7 +- .../client/rpc/TestOzoneAtRestEncryption.java | 16 +- ...TestOzoneClientMultipartUploadWithFSO.java | 149 +++++++++++------- .../rpc/TestOzoneRpcClientAbstract.java | 118 ++++++++------ .../rpc/TestOzoneRpcClientWithRatis.java | 9 +- .../ozone/om/TestObjectStoreWithLegacyFS.java | 11 +- .../TestOzoneManagerHAWithStoppedNodes.java | 9 +- .../src/main/proto/OmClientProtocol.proto | 7 +- .../src/main/resources/proto.lock | 12 ++ .../hadoop/ozone/om/KeyManagerImpl.java | 5 +- .../S3MultipartUploadCommitPartRequest.java | 3 +- .../S3MultipartUploadCompleteRequest.java | 14 +- .../ozone/om/request/OMRequestTestUtils.java | 38 ++++- .../TestS3MultipartUploadCompleteRequest.java | 15 +- .../s3/multipart/TestS3MultipartResponse.java | 2 +- .../TestMultipartUploadCleanupService.java | 3 + .../om/service/TestOpenKeyCleanupService.java | 3 + .../CompleteMultipartUploadRequest.java | 6 +- .../ozone/s3/endpoint/ObjectEndpoint.java | 10 +- .../s3/endpoint/ObjectEndpointStreaming.java | 14 -- .../hadoop/ozone/client/OzoneBucketStub.java | 30 +++- .../client/OzoneDataStreamOutputStub.java | 3 +- .../ozone/client/OzoneOutputStreamStub.java | 4 +- ...eteMultipartUploadRequestUnmarshaller.java | 4 +- .../endpoint/TestMultipartUploadComplete.java | 4 +- .../endpoint/TestMultipartUploadWithCopy.java | 11 +- 34 files changed, 384 insertions(+), 200 deletions(-) diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneMultipartUploadPartListParts.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneMultipartUploadPartListParts.java index c1902cdb60d2..67f8edf31408 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneMultipartUploadPartListParts.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneMultipartUploadPartListParts.java @@ -104,12 +104,15 @@ public static class PartInfo { private String partName; private long modificationTime; private long size; + private String eTag; - public PartInfo(int number, String name, long time, long size) { + public PartInfo(int number, String name, long time, long size, + String eTag) { this.partNumber = number; this.partName = name; this.modificationTime = time; this.size = size; + this.eTag = eTag; } public int getPartNumber() { @@ -127,5 +130,9 @@ public long getModificationTime() { public long getSize() { return size; } + + public String getETag() { + return eTag; + } } } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java index c85e2ed38900..6442c934c9d9 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java @@ -1944,7 +1944,8 @@ public OzoneMultipartUploadPartListParts listParts(String volumeName, ozoneMultipartUploadPartListParts.addPart( new OzoneMultipartUploadPartListParts.PartInfo( omPartInfo.getPartNumber(), omPartInfo.getPartName(), - omPartInfo.getModificationTime(), omPartInfo.getSize())); + omPartInfo.getModificationTime(), omPartInfo.getSize(), + omPartInfo.geteTag())); } return ozoneMultipartUploadPartListParts; diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartCommitUploadPartInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartCommitUploadPartInfo.java index 646cb421e434..bbf1a1bdae53 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartCommitUploadPartInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartCommitUploadPartInfo.java @@ -24,8 +24,15 @@ public class OmMultipartCommitUploadPartInfo { private final String partName; - public OmMultipartCommitUploadPartInfo(String name) { - this.partName = name; + private final String eTag; + + public OmMultipartCommitUploadPartInfo(String partName, String eTag) { + this.partName = partName; + this.eTag = eTag; + } + + public String getETag() { + return eTag; } public String getPartName() { diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUploadCompleteList.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUploadCompleteList.java index 63e6353c1850..897bae07415b 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUploadCompleteList.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUploadCompleteList.java @@ -56,8 +56,8 @@ public Map getMultipartMap() { */ public List getPartsList() { List partList = new ArrayList<>(); - multipartMap.forEach((partNumber, partName) -> partList.add(Part - .newBuilder().setPartName(partName).setPartNumber(partNumber).build())); + multipartMap.forEach((partNumber, eTag) -> partList.add(Part + .newBuilder().setETag(eTag).setPartNumber(partNumber).build())); return partList; } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUploadListParts.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUploadListParts.java index fbf519c22682..0ba0e26acda2 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUploadListParts.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUploadListParts.java @@ -79,6 +79,7 @@ public void addPartList(List partInfos) { public void addProtoPartList(List partInfos) { partInfos.forEach(partInfo -> partInfoList.add(new OmPartInfo( partInfo.getPartNumber(), partInfo.getPartName(), - partInfo.getModificationTime(), partInfo.getSize()))); + partInfo.getModificationTime(), partInfo.getSize(), + partInfo.getETag()))); } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPartInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPartInfo.java index 2d753a5caa5a..d0f6b0c7198e 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPartInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPartInfo.java @@ -28,12 +28,15 @@ public class OmPartInfo { private String partName; private long modificationTime; private long size; + private String eTag; - public OmPartInfo(int number, String name, long time, long size) { + public OmPartInfo(int number, String name, long time, long size, + String eTag) { this.partNumber = number; this.partName = name; this.modificationTime = time; this.size = size; + this.eTag = eTag; } public int getPartNumber() { @@ -52,9 +55,13 @@ public long getSize() { return size; } + public String geteTag() { + return eTag; + } + public PartInfo getProto() { return PartInfo.newBuilder().setPartNumber(partNumber).setPartName(partName) .setModificationTime(modificationTime) - .setSize(size).build(); + .setSize(size).setETag(eTag).build(); } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java index a179ca5c4084..158ec5b761de 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java @@ -1627,7 +1627,8 @@ public OmMultipartCommitUploadPartInfo commitMultipartUploadPart( .getCommitMultiPartUploadResponse(); OmMultipartCommitUploadPartInfo info = new - OmMultipartCommitUploadPartInfo(response.getPartName()); + OmMultipartCommitUploadPartInfo(response.getPartName(), + response.getETag()); return info; } diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot b/hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot index 04cce8fefcd4..3a6ae0e45d45 100644 --- a/hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot +++ b/hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot @@ -94,21 +94,28 @@ Test Multipart Upload Complete Should contain ${result} UploadId #upload parts - Run Keyword Create Random file 5 - ${result} = Execute AWSS3APICli upload-part --bucket ${BUCKET} --key ${PREFIX}/multipartKey1 --part-number 1 --body /tmp/part1 --upload-id ${uploadID} - ${eTag1} = Execute and checkrc echo '${result}' | jq -r '.ETag' 0 - Should contain ${result} ETag - - Execute echo "Part2" > /tmp/part2 - ${result} = Execute AWSS3APICli upload-part --bucket ${BUCKET} --key ${PREFIX}/multipartKey1 --part-number 2 --body /tmp/part2 --upload-id ${uploadID} - ${eTag2} = Execute and checkrc echo '${result}' | jq -r '.ETag' 0 - Should contain ${result} ETag + Run Keyword Create Random file 5 + ${result} = Execute AWSS3APICli upload-part --bucket ${BUCKET} --key ${PREFIX}/multipartKey1 --part-number 1 --body /tmp/part1 --upload-id ${uploadID} + ${eTag1} = Execute and checkrc echo '${result}' | jq -r '.ETag' 0 + Should contain ${result} ETag + ${part1Md5Sum} = Execute md5sum /tmp/part1 | awk '{print $1}' + Should Be Equal As Strings ${eTag1} ${part1Md5Sum} + + Execute echo "Part2" > /tmp/part2 + ${result} = Execute AWSS3APICli upload-part --bucket ${BUCKET} --key ${PREFIX}/multipartKey1 --part-number 2 --body /tmp/part2 --upload-id ${uploadID} + ${eTag2} = Execute and checkrc echo '${result}' | jq -r '.ETag' 0 + Should contain ${result} ETag + ${part2Md5Sum} = Execute md5sum /tmp/part2 | awk '{print $1}' + Should Be Equal As Strings ${eTag2} ${part2Md5Sum} #complete multipart upload - ${result} = Execute AWSS3APICli complete-multipart-upload --upload-id ${uploadID} --bucket ${BUCKET} --key ${PREFIX}/multipartKey1 --multipart-upload 'Parts=[{ETag=${eTag1},PartNumber=1},{ETag=${eTag2},PartNumber=2}]' - Should contain ${result} ${BUCKET} - Should contain ${result} ${PREFIX}/multipartKey1 - Should contain ${result} ETag + ${result} = Execute AWSS3APICli complete-multipart-upload --upload-id ${uploadID} --bucket ${BUCKET} --key ${PREFIX}/multipartKey1 --multipart-upload 'Parts=[{ETag=${eTag1},PartNumber=1},{ETag=${eTag2},PartNumber=2}]' + Should contain ${result} ${BUCKET} + Should contain ${result} ${PREFIX}/multipartKey1 + ${resultETag} = Execute and checkrc echo '${result}' | jq -r '.ETag' 0 + ${expectedResultETag} = Execute echo -n ${eTag1}${eTag2} | md5sum | awk '{print $1}' + Should contain ${result} ETag + Should Be Equal As Strings ${resultETag} "${expectedResultETag}-2" #read file and check the key ${result} = Execute AWSS3ApiCli get-object --bucket ${BUCKET} --key ${PREFIX}/multipartKey1 /tmp/${PREFIX}-multipartKey1.result diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java index 8ff1659ad6c9..d79a679cc24f 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java @@ -18,6 +18,7 @@ package org.apache.hadoop.fs.ozone; +import javax.xml.bind.DatatypeConverter; import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FileAlreadyExistsException; @@ -53,6 +54,7 @@ import java.io.FileNotFoundException; import java.io.IOException; import java.net.URI; +import java.security.MessageDigest; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; @@ -304,10 +306,13 @@ public void testMPUFailDuetoDirectoryCreationBeforeComplete() // This should succeed, as we check during creation of part or during // complete MPU. + ozoneOutputStream.getMetadata().put("ETag", + DatatypeConverter.printHexBinary(MessageDigest.getInstance("Md5") + .digest(b)).toLowerCase()); ozoneOutputStream.close(); Map partsMap = new HashMap<>(); - partsMap.put(1, ozoneOutputStream.getCommitUploadPartInfo().getPartName()); + partsMap.put(1, ozoneOutputStream.getCommitUploadPartInfo().getETag()); // Should fail, as we have directory with same name. try { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java index c40b46a79d77..fbdaa066d92d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java @@ -22,6 +22,7 @@ import java.net.URI; import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; +import java.security.MessageDigest; import java.security.NoSuchAlgorithmException; import java.time.Instant; import java.util.ArrayList; @@ -34,6 +35,7 @@ import java.util.UUID; import com.google.common.cache.Cache; +import javax.xml.bind.DatatypeConverter; import org.apache.hadoop.conf.StorageUnit; import org.apache.hadoop.crypto.key.KeyProvider; import org.apache.hadoop.crypto.key.kms.KMSClientProvider; @@ -629,14 +631,17 @@ private String uploadStreamPart(OzoneBucket bucket, String keyName, ByteBuffer dataBuffer = ByteBuffer.wrap(data); multipartStreamKey.write(dataBuffer, 0, length); + multipartStreamKey.getMetadata().put("ETag", + DatatypeConverter.printHexBinary(MessageDigest.getInstance("Md5") + .digest(data)).toLowerCase()); multipartStreamKey.close(); OmMultipartCommitUploadPartInfo omMultipartCommitUploadPartInfo = multipartStreamKey.getCommitUploadPartInfo(); assertNotNull(omMultipartCommitUploadPartInfo); - assertNotNull(omMultipartCommitUploadPartInfo.getPartName()); - return omMultipartCommitUploadPartInfo.getPartName(); + assertNotNull(omMultipartCommitUploadPartInfo.getETag()); + return omMultipartCommitUploadPartInfo.getETag(); } private String uploadPart(OzoneBucket bucket, String keyName, @@ -644,14 +649,17 @@ private String uploadPart(OzoneBucket bucket, String keyName, OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName, data.length, partNumber, uploadID); ozoneOutputStream.write(data, 0, data.length); + ozoneOutputStream.getMetadata().put("ETag", + DatatypeConverter.printHexBinary(MessageDigest.getInstance("Md5") + .digest(data)).toLowerCase()); ozoneOutputStream.close(); OmMultipartCommitUploadPartInfo omMultipartCommitUploadPartInfo = ozoneOutputStream.getCommitUploadPartInfo(); assertNotNull(omMultipartCommitUploadPartInfo); - assertNotNull(omMultipartCommitUploadPartInfo.getPartName()); - return omMultipartCommitUploadPartInfo.getPartName(); + assertNotNull(omMultipartCommitUploadPartInfo.getETag()); + return omMultipartCommitUploadPartInfo.getETag(); } private void completeMultipartUpload(OzoneBucket bucket, String keyName, diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java index cc7864a3b531..101529920bb7 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java @@ -17,8 +17,14 @@ package org.apache.hadoop.ozone.client.rpc; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; import java.util.HashMap; + +import javax.xml.bind.DatatypeConverter; +import org.apache.commons.codec.digest.DigestUtils; import org.apache.commons.lang3.RandomUtils; +import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.client.DefaultReplicationConfig; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.RatisReplicationConfig; @@ -87,6 +93,7 @@ import static org.apache.hadoop.hdds.client.ReplicationType.RATIS; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; @@ -222,13 +229,14 @@ public void testUploadPartWithNoOverride() throws IOException { OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName, sampleData.length(), 1, uploadID); ozoneOutputStream.write(string2Bytes(sampleData), 0, sampleData.length()); + ozoneOutputStream.getMetadata().put("ETag", DigestUtils.md5Hex(sampleData)); ozoneOutputStream.close(); OmMultipartCommitUploadPartInfo commitUploadPartInfo = ozoneOutputStream .getCommitUploadPartInfo(); Assert.assertNotNull(commitUploadPartInfo); - Assert.assertNotNull(commitUploadPartInfo.getPartName()); + Assert.assertNotNull(commitUploadPartInfo.getETag()); } @Test @@ -238,12 +246,12 @@ public void testUploadPartOverrideWithRatis() throws Exception { ReplicationType.RATIS, THREE); int partNumber = 1; - String partName = uploadPart(bucket, keyName, uploadID, partNumber, - sampleData.getBytes(UTF_8)); + Pair partNameAndETag = uploadPart(bucket, keyName, uploadID, + partNumber, sampleData.getBytes(UTF_8)); //Overwrite the part by creating part key with same part number. - String partNameNew = uploadPart(bucket, keyName, uploadID, partNumber, - "name".getBytes(UTF_8)); + Pair partNameAndETagNew = uploadPart(bucket, keyName, + uploadID, partNumber, "name".getBytes(UTF_8)); // PartName should be same from old part Name. // AWS S3 for same content generates same partName during upload part. @@ -253,8 +261,11 @@ public void testUploadPartOverrideWithRatis() throws Exception { // So, when a part is override partNames will still be same irrespective // of content in ozone s3. This will make S3 Mpu completeMPU pass when // comparing part names and large file uploads work using aws cp. - Assert.assertEquals("Part names should be same", partName, - partNameNew); + Assert.assertEquals("Part names should be same", partNameAndETag.getKey(), + partNameAndETagNew.getKey()); + + // ETags are not equal due to content differences + assertNotEquals(partNameAndETag.getValue(), partNameAndETagNew.getValue()); // old part bytes written needs discard and have only // new part bytes in quota for this bucket @@ -264,7 +275,8 @@ public void testUploadPartOverrideWithRatis() throws Exception { } @Test - public void testUploadTwiceWithEC() throws IOException { + public void testUploadTwiceWithEC() + throws IOException, NoSuchAlgorithmException { bucketName = UUID.randomUUID().toString(); bucket = getOzoneECBucket(bucketName); @@ -275,12 +287,12 @@ public void testUploadTwiceWithEC() throws IOException { String uploadID = multipartInfo.getUploadID(); int partNumber = 1; - String partName = uploadPart(bucket, keyName, uploadID, partNumber, - data); + Pair partNameAndETag = uploadPart(bucket, keyName, uploadID, + partNumber, data); - Map partsMap = new HashMap<>(); - partsMap.put(partNumber, partName); - bucket.completeMultipartUpload(keyName, uploadID, partsMap); + Map eTagsMap = new HashMap<>(); + eTagsMap.put(partNumber, partNameAndETag.getValue()); + bucket.completeMultipartUpload(keyName, uploadID, eTagsMap); long replicatedSize = QuotaUtil.getReplicatedSize(data.length, bucket.getReplicationConfig()); @@ -291,12 +303,12 @@ public void testUploadTwiceWithEC() throws IOException { multipartInfo = bucket.initiateMultipartUpload(keyName); uploadID = multipartInfo.getUploadID(); - partName = uploadPart(bucket, keyName, uploadID, partNumber, + partNameAndETag = uploadPart(bucket, keyName, uploadID, partNumber, data); - partsMap = new HashMap<>(); - partsMap.put(partNumber, partName); - bucket.completeMultipartUpload(keyName, uploadID, partsMap); + eTagsMap = new HashMap<>(); + eTagsMap.put(partNumber, partNameAndETag.getValue()); + bucket.completeMultipartUpload(keyName, uploadID, eTagsMap); // used sized should remain same, overwrite previous upload Assert.assertEquals(volume.getBucket(bucketName).getUsedBytes(), @@ -304,7 +316,8 @@ public void testUploadTwiceWithEC() throws IOException { } @Test - public void testUploadAbortWithEC() throws IOException { + public void testUploadAbortWithEC() + throws IOException, NoSuchAlgorithmException { byte[] data = generateData(81920, (byte) 97); bucketName = UUID.randomUUID().toString(); @@ -347,19 +360,19 @@ public void testMultipartUploadWithPartsLessThanMinSize() throws Exception { ONE); // Upload Parts - Map partsMap = new TreeMap<>(); + Map eTagsMap = new TreeMap<>(); // Uploading part 1 with less than min size - String partName = uploadPart(bucket, keyName, uploadID, 1, - "data".getBytes(UTF_8)); - partsMap.put(1, partName); + Pair partNameAndETag = uploadPart(bucket, keyName, uploadID, + 1, "data".getBytes(UTF_8)); + eTagsMap.put(1, partNameAndETag.getValue()); - partName = uploadPart(bucket, keyName, uploadID, 2, + partNameAndETag = uploadPart(bucket, keyName, uploadID, 2, "data".getBytes(UTF_8)); - partsMap.put(2, partName); + eTagsMap.put(2, partNameAndETag.getValue()); // Complete multipart upload OzoneTestUtils.expectOmException(OMException.ResultCodes.ENTITY_TOO_SMALL, - () -> completeMultipartUpload(bucket, keyName, uploadID, partsMap)); + () -> completeMultipartUpload(bucket, keyName, uploadID, eTagsMap)); } @Test @@ -370,22 +383,24 @@ public void testMultipartUploadWithDiscardedUnusedPartSize() byte[] data = generateData(10000000, (byte) 97); // Upload Parts - Map partsMap = new TreeMap<>(); + Map eTagsMap = new TreeMap<>(); - // Upload part 1 and add it to the partsMap for completing the upload. - String partName1 = uploadPart(bucket, keyName, uploadID, 1, data); - partsMap.put(1, partName1); + // Upload part 1 and add it to the eTagsMap for completing the upload. + Pair partNameAndETag1 = uploadPart(bucket, keyName, + uploadID, 1, data); + eTagsMap.put(1, partNameAndETag1.getValue()); - // Upload part 2 and add it to the partsMap for completing the upload. - String partName2 = uploadPart(bucket, keyName, uploadID, 2, data); - partsMap.put(2, partName2); + // Upload part 2 and add it to the eTagsMap for completing the upload. + Pair partNameAndETag2 = uploadPart(bucket, keyName, + uploadID, 2, data); + eTagsMap.put(2, partNameAndETag2.getValue()); - // Upload part 3 but do not add it to the partsMap. + // Upload part 3 but do not add it to the eTagsMap. uploadPart(bucket, keyName, uploadID, 3, data); - completeMultipartUpload(bucket, keyName, uploadID, partsMap); + completeMultipartUpload(bucket, keyName, uploadID, eTagsMap); - // Check the bucket size. Since part number 3 was not added to the partsMap, + // Check the bucket size. Since part number 3 was not added to the eTagsMap, // the unused part size should be discarded from the bucket size, // 30000000 - 10000000 = 20000000 long bucketSize = volume.getBucket(bucketName).getUsedBytes(); @@ -472,6 +487,9 @@ public void testCommitPartAfterCompleteUpload() throws Exception { OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName, data.length, 1, uploadID); ozoneOutputStream.write(data, 0, data.length); + ozoneOutputStream.getMetadata().put("ETag", + DatatypeConverter.printHexBinary(MessageDigest.getInstance("Md5") + .digest(data)).toLowerCase()); ozoneOutputStream.close(); OmMultipartCommitUploadPartInfo omMultipartCommitUploadPartInfo = @@ -480,10 +498,13 @@ public void testCommitPartAfterCompleteUpload() throws Exception { // Do not close output stream for part 2. ozoneOutputStream = bucket.createMultipartKey(keyName, data.length, 2, uploadID); + ozoneOutputStream.getMetadata().put("ETag", + DatatypeConverter.printHexBinary(MessageDigest.getInstance("Md5") + .digest(data)).toLowerCase()); ozoneOutputStream.write(data, 0, data.length); Map partsMap = new LinkedHashMap<>(); - partsMap.put(1, omMultipartCommitUploadPartInfo.getPartName()); + partsMap.put(1, omMultipartCommitUploadPartInfo.getETag()); OmMultipartUploadCompleteInfo omMultipartUploadCompleteInfo = bucket.completeMultipartUpload(keyName, uploadID, partsMap); @@ -569,12 +590,13 @@ public void testAbortUploadSuccessWithParts() throws Exception { String uploadID = initiateMultipartUpload(bucket, keyName, RATIS, ONE); - String partName = uploadPart(bucket, keyName, uploadID, 1, - "data".getBytes(UTF_8)); + Pair partNameAndETag = uploadPart(bucket, keyName, uploadID, + 1, "data".getBytes(UTF_8)); OMMetadataManager metadataMgr = cluster.getOzoneManager().getMetadataManager(); - String multipartKey = verifyUploadedPart(uploadID, partName, metadataMgr); + String multipartKey = verifyUploadedPart(uploadID, partNameAndETag.getKey(), + metadataMgr); bucket.abortMultipartUpload(keyName, uploadID); @@ -601,17 +623,17 @@ public void testListMultipartUploadParts() throws Exception { Map partsMap = new TreeMap<>(); String uploadID = initiateMultipartUpload(bucket, keyName, RATIS, ONE); - String partName1 = uploadPart(bucket, keyName, uploadID, 1, - generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); - partsMap.put(1, partName1); + Pair partNameAndETag1 = uploadPart(bucket, keyName, + uploadID, 1, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); + partsMap.put(1, partNameAndETag1.getKey()); - String partName2 = uploadPart(bucket, keyName, uploadID, 2, - generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); - partsMap.put(2, partName2); + Pair partNameAndETag2 = uploadPart(bucket, keyName, + uploadID, 2, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); + partsMap.put(2, partNameAndETag2.getKey()); - String partName3 = uploadPart(bucket, keyName, uploadID, 3, - generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); - partsMap.put(3, partName3); + Pair partNameAndETag3 = uploadPart(bucket, keyName, + uploadID, 3, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); + partsMap.put(3, partNameAndETag3.getKey()); OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts = bucket.listParts(keyName, uploadID, 0, 3); @@ -692,17 +714,17 @@ public void testListMultipartUploadPartsWithContinuation() Map partsMap = new TreeMap<>(); String uploadID = initiateMultipartUpload(bucket, keyName, RATIS, ONE); - String partName1 = uploadPart(bucket, keyName, uploadID, 1, - generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); - partsMap.put(1, partName1); + Pair partNameAndETag1 = uploadPart(bucket, keyName, + uploadID, 1, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); + partsMap.put(1, partNameAndETag1.getKey()); - String partName2 = uploadPart(bucket, keyName, uploadID, 2, - generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); - partsMap.put(2, partName2); + Pair partNameAndETag2 = uploadPart(bucket, keyName, + uploadID, 2, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); + partsMap.put(2, partNameAndETag2.getKey()); - String partName3 = uploadPart(bucket, keyName, uploadID, 3, - generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); - partsMap.put(3, partName3); + Pair partNameAndETag3 = uploadPart(bucket, keyName, + uploadID, 3, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); + partsMap.put(3, partNameAndETag3.getKey()); OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts = bucket.listParts(keyName, uploadID, 0, 2); @@ -950,22 +972,29 @@ private String initiateMultipartUpload(OzoneBucket oBucket, String kName, return uploadID; } - private String uploadPart(OzoneBucket oBucket, String kName, String - uploadID, int partNumber, byte[] data) throws IOException { + private Pair uploadPart(OzoneBucket oBucket, String kName, + String uploadID, int partNumber, + byte[] data) + throws IOException, NoSuchAlgorithmException { OzoneOutputStream ozoneOutputStream = oBucket.createMultipartKey(kName, data.length, partNumber, uploadID); ozoneOutputStream.write(data, 0, data.length); + ozoneOutputStream.getMetadata().put("ETag", + DatatypeConverter.printHexBinary(MessageDigest.getInstance("Md5") + .digest(data)).toLowerCase()); ozoneOutputStream.close(); OmMultipartCommitUploadPartInfo omMultipartCommitUploadPartInfo = ozoneOutputStream.getCommitUploadPartInfo(); Assert.assertNotNull(omMultipartCommitUploadPartInfo); + Assert.assertNotNull(omMultipartCommitUploadPartInfo.getETag()); Assert.assertNotNull(omMultipartCommitUploadPartInfo.getPartName()); - return omMultipartCommitUploadPartInfo.getPartName(); + return Pair.of(omMultipartCommitUploadPartInfo.getPartName(), + omMultipartCommitUploadPartInfo.getETag()); } private void completeMultipartUpload(OzoneBucket oBucket, String kName, diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java index 1e5dc26d8419..f10cda63cbf1 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java @@ -20,6 +20,7 @@ import java.io.File; import java.io.IOException; import java.io.InputStream; +import java.security.MessageDigest; import java.security.PrivilegedExceptionAction; import java.time.Instant; import java.util.ArrayList; @@ -38,6 +39,9 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Stream; +import javax.xml.bind.DatatypeConverter; +import org.apache.commons.codec.digest.DigestUtils; +import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.conf.StorageUnit; import org.apache.hadoop.hdds.client.DefaultReplicationConfig; import org.apache.hadoop.hdds.client.ECReplicationConfig; @@ -1512,6 +1516,7 @@ public void testUsedBytesWithUploadPart() throws IOException { sampleData.length(), 1, uploadID); ozoneOutputStream.write(string2Bytes(sampleData), 0, sampleData.length()); + ozoneOutputStream.getMetadata().put("ETag", DigestUtils.md5Hex(sampleData)); ozoneOutputStream.close(); assertEquals(valueLength, store.getVolume(volumeName) @@ -2677,13 +2682,14 @@ void testUploadPartWithNoOverride(ReplicationConfig replication) OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName, sampleData.length(), 1, uploadID); ozoneOutputStream.write(string2Bytes(sampleData), 0, sampleData.length()); + ozoneOutputStream.getMetadata().put("ETag", DigestUtils.md5Hex(sampleData)); ozoneOutputStream.close(); OmMultipartCommitUploadPartInfo commitUploadPartInfo = ozoneOutputStream .getCommitUploadPartInfo(); assertNotNull(commitUploadPartInfo); - assertNotNull(commitUploadPartInfo.getPartName()); + assertNotNull(commitUploadPartInfo.getETag()); } @ParameterizedTest @@ -2714,6 +2720,7 @@ void testUploadPartOverride(ReplicationConfig replication) OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName, sampleData.length(), partNumber, uploadID); ozoneOutputStream.write(string2Bytes(sampleData), 0, sampleData.length()); + ozoneOutputStream.getMetadata().put("ETag", DigestUtils.md5Hex(sampleData)); ozoneOutputStream.close(); OmMultipartCommitUploadPartInfo commitUploadPartInfo = ozoneOutputStream @@ -2721,7 +2728,7 @@ void testUploadPartOverride(ReplicationConfig replication) assertNotNull(commitUploadPartInfo); String partName = commitUploadPartInfo.getPartName(); - assertNotNull(commitUploadPartInfo.getPartName()); + assertNotNull(commitUploadPartInfo.getETag()); // Overwrite the part by creating part key with same part number // and different content. @@ -2729,13 +2736,14 @@ void testUploadPartOverride(ReplicationConfig replication) ozoneOutputStream = bucket.createMultipartKey(keyName, sampleData.length(), partNumber, uploadID); ozoneOutputStream.write(string2Bytes(sampleData), 0, "name".length()); + ozoneOutputStream.getMetadata().put("ETag", DigestUtils.md5Hex(sampleData)); ozoneOutputStream.close(); commitUploadPartInfo = ozoneOutputStream .getCommitUploadPartInfo(); assertNotNull(commitUploadPartInfo); - assertNotNull(commitUploadPartInfo.getPartName()); + assertNotNull(commitUploadPartInfo.getETag()); // AWS S3 for same content generates same partName during upload part. // In AWS S3 ETag is generated from md5sum. In Ozone right now we @@ -2865,12 +2873,13 @@ public void testMultipartUploadWithACL() throws Exception { // Upload part byte[] data = generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte) 1); - String partName = uploadPart(bucket, keyName2, uploadId, 1, data); - Map partsMap = new TreeMap<>(); - partsMap.put(1, partName); + Pair partNameAndETag = uploadPart(bucket, keyName2, + uploadId, 1, data); + Map eTagsMaps = new TreeMap<>(); + eTagsMaps.put(1, partNameAndETag.getValue()); // Complete multipart upload request - completeMultipartUpload(bucket2, keyName2, uploadId, partsMap); + completeMultipartUpload(bucket2, keyName2, uploadId, eTagsMaps); // User without permission cannot read multi-uploaded object try (OzoneInputStream ignored = bucket2.readKey(keyName)) { @@ -2921,21 +2930,21 @@ public void testMultipartUploadWithPartsLessThanMinSize() throws Exception { anyReplication()); // Upload Parts - Map partsMap = new TreeMap<>(); + Map eTagsMaps = new TreeMap<>(); // Uploading part 1 with less than min size - String partName = uploadPart(bucket, keyName, uploadID, 1, - "data".getBytes(UTF_8)); - partsMap.put(1, partName); + Pair partNameAndETag = uploadPart(bucket, keyName, + uploadID, 1, "data".getBytes(UTF_8)); + eTagsMaps.put(1, partNameAndETag.getValue()); - partName = uploadPart(bucket, keyName, uploadID, 2, + partNameAndETag = uploadPart(bucket, keyName, uploadID, 2, "data".getBytes(UTF_8)); - partsMap.put(2, partName); + eTagsMaps.put(2, partNameAndETag.getValue()); // Complete multipart upload OzoneTestUtils.expectOmException(ResultCodes.ENTITY_TOO_SMALL, - () -> completeMultipartUpload(bucket, keyName, uploadID, partsMap)); + () -> completeMultipartUpload(bucket, keyName, uploadID, eTagsMaps)); } @Test @@ -2982,11 +2991,11 @@ public void testMultipartUploadWithPartsMisMatchWithIncorrectPartName() uploadPart(bucket, keyName, uploadID, 1, "data".getBytes(UTF_8)); // We have not uploaded any parts, but passing some list it should throw // error. - TreeMap partsMap = new TreeMap<>(); - partsMap.put(1, UUID.randomUUID().toString()); + TreeMap eTagsMaps = new TreeMap<>(); + eTagsMaps.put(1, DigestUtils.md5Hex(UUID.randomUUID().toString())); OzoneTestUtils.expectOmException(ResultCodes.INVALID_PART, - () -> completeMultipartUpload(bucket, keyName, uploadID, partsMap)); + () -> completeMultipartUpload(bucket, keyName, uploadID, eTagsMaps)); } @@ -3008,11 +3017,11 @@ public void testMultipartUploadWithMissingParts() throws Exception { uploadPart(bucket, keyName, uploadID, 1, "data".getBytes(UTF_8)); // We have not uploaded any parts, but passing some list it should throw // error. - TreeMap partsMap = new TreeMap<>(); - partsMap.put(3, "random"); + TreeMap eTagsMap = new TreeMap<>(); + eTagsMap.put(3, DigestUtils.md5Hex("random")); OzoneTestUtils.expectOmException(ResultCodes.INVALID_PART, - () -> completeMultipartUpload(bucket, keyName, uploadID, partsMap)); + () -> completeMultipartUpload(bucket, keyName, uploadID, eTagsMap)); } @Test @@ -3118,6 +3127,9 @@ void testCommitPartAfterCompleteUpload() throws Exception { OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName, data.length, 1, uploadID); ozoneOutputStream.write(data, 0, data.length); + ozoneOutputStream.getMetadata().put("ETag", + DatatypeConverter.printHexBinary(MessageDigest.getInstance("Md5") + .digest(data)).toLowerCase()); ozoneOutputStream.close(); OmMultipartCommitUploadPartInfo omMultipartCommitUploadPartInfo = @@ -3126,10 +3138,13 @@ void testCommitPartAfterCompleteUpload() throws Exception { // Do not close output stream for part 2. ozoneOutputStream = bucket.createMultipartKey(keyName, data.length, 2, omMultipartInfo.getUploadID()); + ozoneOutputStream.getMetadata().put("ETag", + DatatypeConverter.printHexBinary(MessageDigest.getInstance("Md5") + .digest(data)).toLowerCase()); ozoneOutputStream.write(data, 0, data.length); Map partsMap = new LinkedHashMap<>(); - partsMap.put(1, omMultipartCommitUploadPartInfo.getPartName()); + partsMap.put(1, omMultipartCommitUploadPartInfo.getETag()); OmMultipartUploadCompleteInfo omMultipartUploadCompleteInfo = bucket.completeMultipartUpload(keyName, uploadID, partsMap); @@ -3206,17 +3221,17 @@ void testListMultipartUploadParts(ReplicationConfig replication) Map partsMap = new TreeMap<>(); String uploadID = initiateMultipartUpload(bucket, keyName, replication); - String partName1 = uploadPart(bucket, keyName, uploadID, 1, - generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); - partsMap.put(1, partName1); + Pair partNameAndETag1 = uploadPart(bucket, keyName, + uploadID, 1, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); + partsMap.put(1, partNameAndETag1.getKey()); - String partName2 = uploadPart(bucket, keyName, uploadID, 2, - generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); - partsMap.put(2, partName2); + Pair partNameAndETag2 = uploadPart(bucket, keyName, + uploadID, 2, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); + partsMap.put(2, partNameAndETag2.getKey()); - String partName3 = uploadPart(bucket, keyName, uploadID, 3, - generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); - partsMap.put(3, partName3); + Pair partNameAndETag3 = uploadPart(bucket, keyName, + uploadID, 3, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); + partsMap.put(3, partNameAndETag3.getKey()); OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts = bucket.listParts(keyName, uploadID, 0, 3); @@ -3257,17 +3272,17 @@ void testListMultipartUploadPartsWithContinuation( Map partsMap = new TreeMap<>(); String uploadID = initiateMultipartUpload(bucket, keyName, replication); - String partName1 = uploadPart(bucket, keyName, uploadID, 1, - generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); - partsMap.put(1, partName1); + Pair partNameAndETag1 = uploadPart(bucket, keyName, + uploadID, 1, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); + partsMap.put(1, partNameAndETag1.getKey()); - String partName2 = uploadPart(bucket, keyName, uploadID, 2, - generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); - partsMap.put(2, partName2); + Pair partNameAndETag2 = uploadPart(bucket, keyName, + uploadID, 2, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); + partsMap.put(2, partNameAndETag2.getKey()); - String partName3 = uploadPart(bucket, keyName, uploadID, 3, - generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); - partsMap.put(3, partName3); + Pair partNameAndETag3 = uploadPart(bucket, keyName, + uploadID, 3, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); + partsMap.put(3, partNameAndETag3.getKey()); OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts = bucket.listParts(keyName, uploadID, 0, 2); @@ -3740,19 +3755,20 @@ private void doMultipartUpload(OzoneBucket bucket, String keyName, byte val, // than 5mb int length = 0; byte[] data = generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, val); - String partName = uploadPart(bucket, keyName, uploadID, 1, data); - partsMap.put(1, partName); + Pair partNameAndEtag = uploadPart(bucket, keyName, uploadID, + 1, data); + partsMap.put(1, partNameAndEtag.getValue()); length += data.length; - partName = uploadPart(bucket, keyName, uploadID, 2, data); - partsMap.put(2, partName); + partNameAndEtag = uploadPart(bucket, keyName, uploadID, 2, data); + partsMap.put(2, partNameAndEtag.getValue()); length += data.length; String part3 = UUID.randomUUID().toString(); - partName = uploadPart(bucket, keyName, uploadID, 3, part3.getBytes( + partNameAndEtag = uploadPart(bucket, keyName, uploadID, 3, part3.getBytes( UTF_8)); - partsMap.put(3, partName); + partsMap.put(3, partNameAndEtag.getValue()); length += part3.getBytes(UTF_8).length; // Complete multipart upload request @@ -3809,20 +3825,26 @@ private String initiateMultipartUpload(OzoneBucket bucket, String keyName, return uploadID; } - private String uploadPart(OzoneBucket bucket, String keyName, String - uploadID, int partNumber, byte[] data) throws Exception { + private Pair uploadPart(OzoneBucket bucket, String keyName, + String uploadID, int partNumber, + byte[] data) throws Exception { OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName, data.length, partNumber, uploadID); ozoneOutputStream.write(data, 0, data.length); + ozoneOutputStream.getMetadata().put("ETag", + DatatypeConverter.printHexBinary(MessageDigest.getInstance("Md5") + .digest(data)).toLowerCase()); ozoneOutputStream.close(); OmMultipartCommitUploadPartInfo omMultipartCommitUploadPartInfo = ozoneOutputStream.getCommitUploadPartInfo(); assertNotNull(omMultipartCommitUploadPartInfo); + assertNotNull(omMultipartCommitUploadPartInfo.getETag()); assertNotNull(omMultipartCommitUploadPartInfo.getPartName()); - return omMultipartCommitUploadPartInfo.getPartName(); + return Pair.of(omMultipartCommitUploadPartInfo.getPartName(), + omMultipartCommitUploadPartInfo.getETag()); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithRatis.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithRatis.java index c84f6f314191..a2877ed45893 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithRatis.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithRatis.java @@ -24,11 +24,14 @@ import java.io.RandomAccessFile; import java.nio.ByteBuffer; import java.nio.channels.FileChannel; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; import java.util.Arrays; import java.util.HashMap; import java.util.UUID; import java.util.concurrent.ThreadLocalRandom; +import javax.xml.bind.DatatypeConverter; import org.apache.hadoop.hdds.client.DefaultReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationType; @@ -173,7 +176,8 @@ public void testGetKeyAndFileWithNetworkTopology() throws IOException { } @Test - public void testMultiPartUploadWithStream() throws IOException { + public void testMultiPartUploadWithStream() + throws IOException, NoSuchAlgorithmException { String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); String keyName = UUID.randomUUID().toString(); @@ -206,6 +210,9 @@ public void testMultiPartUploadWithStream() throws IOException { keyName, valueLength, 1, uploadID); ozoneStreamOutput.write(ByteBuffer.wrap(sampleData), 0, valueLength); + ozoneStreamOutput.getMetadata().put("ETag", + DatatypeConverter.printHexBinary(MessageDigest.getInstance("Md5") + .digest(sampleData)).toLowerCase()); ozoneStreamOutput.close(); OzoneMultipartUploadPartListParts parts = diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithLegacyFS.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithLegacyFS.java index ad1e5ee43df9..585309579900 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithLegacyFS.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithLegacyFS.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.om; +import javax.xml.bind.DatatypeConverter; import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.commons.lang3.RandomStringUtils; import org.apache.commons.lang3.RandomUtils; @@ -56,6 +57,8 @@ import org.slf4j.LoggerFactory; import java.io.IOException; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; import java.util.Arrays; import java.util.LinkedHashMap; import java.util.Map; @@ -217,7 +220,8 @@ public void testMultiPartCompleteUpload() throws Exception { } private OmMultipartUploadCompleteInfo uploadMPUWithDirectoryExists( - OzoneBucket bucket, String keyName) throws IOException { + OzoneBucket bucket, String keyName) + throws IOException, NoSuchAlgorithmException { OmMultipartInfo omMultipartInfo = bucket.initiateMultipartUpload(keyName, RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE)); @@ -230,6 +234,9 @@ private OmMultipartUploadCompleteInfo uploadMPUWithDirectoryExists( OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName, data.length, 1, uploadID); ozoneOutputStream.write(data, 0, data.length); + ozoneOutputStream.getMetadata().put("ETag", + DatatypeConverter.printHexBinary(MessageDigest.getInstance("Md5") + .digest(data)).toLowerCase()); ozoneOutputStream.close(); if (bucket.getBucketLayout() == BucketLayout.OBJECT_STORE) { @@ -249,7 +256,7 @@ private OmMultipartUploadCompleteInfo uploadMPUWithDirectoryExists( ozoneOutputStream.getCommitUploadPartInfo(); Map partsMap = new LinkedHashMap<>(); - partsMap.put(1, omMultipartCommitUploadPartInfo.getPartName()); + partsMap.put(1, omMultipartCommitUploadPartInfo.getETag()); OmMultipartUploadCompleteInfo omMultipartUploadCompleteInfo = bucket.completeMultipartUpload(keyName, uploadID, partsMap); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithStoppedNodes.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithStoppedNodes.java index 1a65d5d0653f..c6252de8098d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithStoppedNodes.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithStoppedNodes.java @@ -16,6 +16,7 @@ */ package org.apache.hadoop.ozone.om; +import org.apache.commons.codec.digest.DigestUtils; import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.hdds.client.ReplicationFactor; import org.apache.hadoop.hdds.client.ReplicationType; @@ -185,11 +186,12 @@ private void createMultipartKeyAndReadKey(OzoneBucket ozoneBucket, OzoneOutputStream ozoneOutputStream = ozoneBucket.createMultipartKey( keyName, value.length(), 1, uploadID); ozoneOutputStream.write(value.getBytes(UTF_8), 0, value.length()); + ozoneOutputStream.getMetadata().put("ETag", DigestUtils.md5Hex(value)); ozoneOutputStream.close(); Map partsMap = new HashMap<>(); - partsMap.put(1, ozoneOutputStream.getCommitUploadPartInfo().getPartName()); + partsMap.put(1, ozoneOutputStream.getCommitUploadPartInfo().getETag()); OmMultipartUploadCompleteInfo omMultipartUploadCompleteInfo = ozoneBucket.completeMultipartUpload(keyName, uploadID, partsMap); @@ -361,7 +363,7 @@ private void validateListParts(OzoneBucket ozoneBucket, String keyName, for (int i = 0; i < partsMap.size(); i++) { assertEquals(partsMap.get(partInfoList.get(i).getPartNumber()), - partInfoList.get(i).getPartName()); + partInfoList.get(i).getETag()); } @@ -378,9 +380,10 @@ private String createMultipartUploadPartKey(OzoneBucket ozoneBucket, OzoneOutputStream ozoneOutputStream = ozoneBucket.createMultipartKey( keyName, value.length(), partNumber, uploadID); ozoneOutputStream.write(value.getBytes(UTF_8), 0, value.length()); + ozoneOutputStream.getMetadata().put("ETag", DigestUtils.md5Hex(value)); ozoneOutputStream.close(); - return ozoneOutputStream.getCommitUploadPartInfo().getPartName(); + return ozoneOutputStream.getCommitUploadPartInfo().getETag(); } @Test diff --git a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto index 54cafbc0ad6d..3d72e8a2ab20 100644 --- a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto +++ b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto @@ -1580,8 +1580,9 @@ message MultipartCommitUploadPartRequest { } message MultipartCommitUploadPartResponse { - // This one is returned as Etag for S3. optional string partName = 1; + // This one is returned as Etag for S3. + optional string eTag = 2; } message MultipartUploadCompleteRequest { @@ -1598,7 +1599,8 @@ message MultipartUploadCompleteResponse { message Part { required uint32 partNumber = 1; - required string partName = 2; + optional string partName = 2; + required string eTag = 3; } message MultipartUploadAbortRequest { @@ -1671,6 +1673,7 @@ message PartInfo { required string partName = 2; required uint64 modificationTime = 3; required uint64 size = 4; + required string eTag = 5; } /** diff --git a/hadoop-ozone/interface-client/src/main/resources/proto.lock b/hadoop-ozone/interface-client/src/main/resources/proto.lock index ffe53f04cbfd..13f11372e5f3 100644 --- a/hadoop-ozone/interface-client/src/main/resources/proto.lock +++ b/hadoop-ozone/interface-client/src/main/resources/proto.lock @@ -4852,6 +4852,12 @@ "name": "partName", "type": "string", "optional": true + }, + { + "id": 2, + "name": "eTag", + "type": "string", + "optional": true } ] }, @@ -4915,6 +4921,12 @@ "name": "partName", "type": "string", "required": true + }, + { + "id": 3, + "name": "eTag", + "type": "string", + "required": true } ] }, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java index 63602aa28763..7352db1eecae 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java @@ -788,7 +788,10 @@ public OmMultipartUploadListParts listParts(String volumeName, OmPartInfo omPartInfo = new OmPartInfo(partKeyInfo.getPartNumber(), partName, partKeyInfo.getPartKeyInfo().getModificationTime(), - partKeyInfo.getPartKeyInfo().getDataSize()); + partKeyInfo.getPartKeyInfo().getDataSize(), + partKeyInfo.getPartKeyInfo().getMetadataList().stream() + .filter(keyValue -> keyValue.getKey().equals("ETag")) + .findFirst().get().getValue()); omPartInfoList.add(omPartInfo); //if there are parts, use replication type from one of the parts diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java index e1aacde7e8a1..e652fecf1ed8 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java @@ -242,7 +242,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, omResponse.setCommitMultiPartUploadResponse( MultipartCommitUploadPartResponse.newBuilder() - .setPartName(partName)); + .setPartName(partName) + .setETag(omKeyInfo.getMetadata().get("ETag"))); omClientResponse = getOmClientResponse(ozoneManager, oldPartKeyInfo, openKey, omKeyInfo, multipartKey, multipartKeyInfo, omResponse.build(), diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java index 26842dbf1ee4..012bf0a3b2b2 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java @@ -502,20 +502,20 @@ private long getMultipartDataSize(String requestedVolume, for (OzoneManagerProtocolProtos.Part part : partsList) { currentPartCount++; int partNumber = part.getPartNumber(); - String partName = part.getPartName(); + String eTag = part.getETag(); PartKeyInfo partKeyInfo = partKeyInfoMap.get(partNumber); - String dbPartName = null; + String dbPartETag = null; if (partKeyInfo != null) { - dbPartName = partKeyInfo.getPartName(); + dbPartETag = partKeyInfo.getPartKeyInfo().getMetadata(0).getValue(); } - if (!StringUtils.equals(partName, dbPartName)) { - String omPartName = partKeyInfo == null ? null : dbPartName; + if (!StringUtils.equals(eTag, dbPartETag)) { + String omPartName = partKeyInfo == null ? null : dbPartETag; throw new OMException( failureMessage(requestedVolume, requestedBucket, keyName) + - ". Provided Part info is { " + partName + ", " + partNumber + - "}, whereas OM has partName " + omPartName, + ". Provided Part info is { " + eTag + ", " + partNumber + + "}, whereas OM has eTag " + omPartName, OMException.ResultCodes.INVALID_PART); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java index 45209258f7fe..42bcd280ea3b 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java @@ -19,12 +19,19 @@ package org.apache.hadoop.ozone.om.request; +import java.io.ByteArrayInputStream; import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.security.DigestInputStream; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.UUID; +import javax.xml.bind.DatatypeConverter; +import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdds.HddsUtils; import org.apache.hadoop.hdds.client.BlockID; @@ -1054,14 +1061,31 @@ public static OMRequest createCommitPartMPURequest(String volumeName, String bucketName, String keyName, long clientID, long size, String multipartUploadID, int partNumber) { + MessageDigest eTagProvider; + try { + eTagProvider = MessageDigest.getInstance("Md5"); + } catch (NoSuchAlgorithmException e) { + throw new RuntimeException(e); + } + // Just set dummy size. - KeyArgs.Builder keyArgs = - KeyArgs.newBuilder().setVolumeName(volumeName).setKeyName(keyName) - .setBucketName(bucketName) - .setDataSize(size) - .setMultipartNumber(partNumber) - .setMultipartUploadID(multipartUploadID) - .addAllKeyLocations(new ArrayList<>()); + KeyArgs.Builder keyArgs = KeyArgs.newBuilder().setVolumeName(volumeName) + .setKeyName(keyName) + .setBucketName(bucketName) + .setDataSize(size) + .setMultipartNumber(partNumber) + .setMultipartUploadID(multipartUploadID) + .addAllKeyLocations(new ArrayList<>()) + .addMetadata(HddsProtos.KeyValue.newBuilder() + .setKey("ETag") + .setValue(DatatypeConverter.printHexBinary( + new DigestInputStream( + new ByteArrayInputStream( + RandomStringUtils.randomAlphanumeric((int) size) + .getBytes(StandardCharsets.UTF_8)), + eTagProvider) + .getMessageDigest().digest())) + .build()); // Just adding dummy list. As this is for UT only. MultipartCommitUploadPartRequest multipartCommitUploadPartRequest = diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java index 933c4c952734..afcfd998a316 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java @@ -130,9 +130,14 @@ private String checkValidateAndUpdateCacheSuccess(String volumeName, List partList = new ArrayList<>(); - String partName = getPartName(volumeName, bucketName, keyName, - multipartUploadID, 1); - partList.add(Part.newBuilder().setPartName(partName).setPartNumber(1) + String eTag = s3MultipartUploadCommitPartRequest.getOmRequest() + .getCommitMultiPartUploadRequest() + .getKeyArgs() + .getMetadataList() + .stream() + .filter(keyValue -> keyValue.getKey().equals("ETag")) + .findFirst().get().getValue(); + partList.add(Part.newBuilder().setETag(eTag).setPartNumber(1) .build()); OMRequest completeMultipartRequest = doPreExecuteCompleteMPU(volumeName, @@ -224,12 +229,12 @@ public void testInvalidPartOrderError() throws Exception { String partName = getPartName(volumeName, bucketName, keyName, multipartUploadID, 23); - partList.add(Part.newBuilder().setPartName(partName).setPartNumber(23) + partList.add(Part.newBuilder().setETag(partName).setPartNumber(23) .build()); partName = getPartName(volumeName, bucketName, keyName, multipartUploadID, 1); - partList.add(Part.newBuilder().setPartName(partName).setPartNumber(1) + partList.add(Part.newBuilder().setETag(partName).setPartNumber(1) .build()); OMRequest completeMultipartRequest = doPreExecuteCompleteMPU(volumeName, diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java index 23b543b6ec12..36e51a4d62c8 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java @@ -287,7 +287,7 @@ public S3MultipartUploadCommitPartResponse createS3CommitMPUResponseFSO( .setStatus(status).setSuccess(true) .setCommitMultiPartUploadResponse( OzoneManagerProtocolProtos.MultipartCommitUploadPartResponse - .newBuilder().setPartName(volumeName)).build(); + .newBuilder().setETag(volumeName)).build(); return new S3MultipartUploadCommitPartResponseWithFSO(omResponse, multipartKey, openKey, multipartKeyInfo, oldPartKeyInfo, diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestMultipartUploadCleanupService.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestMultipartUploadCleanupService.java index b65cfd048448..c2af4e1109e0 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestMultipartUploadCleanupService.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestMultipartUploadCleanupService.java @@ -19,6 +19,7 @@ package org.apache.hadoop.ozone.om.service; +import org.apache.commons.codec.digest.DigestUtils; import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -248,6 +249,8 @@ private void createIncompleteMPUKey(String volumeName, String bucketName, .setMultipartUploadID(omMultipartInfo.getUploadID()) .setMultipartUploadPartNumber(i) .setAcls(Collections.emptyList()) + .addMetadata("ETag", + DigestUtils.md5Hex(UUID.randomUUID().toString())) .setReplicationConfig( StandaloneReplicationConfig.getInstance(ONE)) .setLocationInfoList(Collections.emptyList()) diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestOpenKeyCleanupService.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestOpenKeyCleanupService.java index fad99837e2b9..2517d60b9267 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestOpenKeyCleanupService.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestOpenKeyCleanupService.java @@ -19,6 +19,7 @@ package org.apache.hadoop.ozone.om.service; +import org.apache.commons.codec.digest.DigestUtils; import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -479,6 +480,8 @@ private void createIncompleteMPUKey(String volumeName, String bucketName, .setReplicationConfig(RatisReplicationConfig.getInstance( HddsProtos.ReplicationFactor.ONE)) .setLocationInfoList(Collections.emptyList()) + .addMetadata("ETag", DigestUtils.md5Hex(UUID.randomUUID() + .toString())) .build(); writeClient.commitMultipartUploadPart(commitPartKeyArgs, diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CompleteMultipartUploadRequest.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CompleteMultipartUploadRequest.java index 72289470c2ca..56c3cb2c0a9e 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CompleteMultipartUploadRequest.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CompleteMultipartUploadRequest.java @@ -66,12 +66,12 @@ public void setPartNumber(int partNumber) { this.partNumber = partNumber; } - public String geteTag() { + public String getETag() { return eTag; } - public void seteTag(String eTag) { - this.eTag = eTag; + public void setETag(String eTagHash) { + this.eTag = eTagHash; } } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java index b607b1c5cff0..b6c4ec220c8a 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java @@ -807,7 +807,7 @@ public Response completeMultipartUpload(@PathParam("bucket") String bucket, OmMultipartUploadCompleteInfo omMultipartUploadCompleteInfo; try { for (CompleteMultipartUploadRequest.Part part : partList) { - partsMap.put(part.getPartNumber(), part.geteTag()); + partsMap.put(part.getPartNumber(), part.getETag()); } if (LOG.isDebugEnabled()) { LOG.debug("Parts map {}", partsMap); @@ -955,6 +955,8 @@ private Response createMultipartKey(OzoneVolume volume, String bucket, getMetrics().updateCopyKeyMetadataStats(startNanos); copyLength = IOUtils.copyLarge( sourceObject, ozoneOutputStream, 0, length); + ozoneOutputStream.getMetadata() + .putAll(sourceKeyDetails.getMetadata()); keyOutputStream = ozoneOutputStream.getKeyOutputStream(); } } else { @@ -964,6 +966,8 @@ private Response createMultipartKey(OzoneVolume volume, String bucket, metadataLatencyNs = getMetrics().updateCopyKeyMetadataStats(startNanos); copyLength = IOUtils.copyLarge(sourceObject, ozoneOutputStream); + ozoneOutputStream.getMetadata() + .putAll(sourceKeyDetails.getMetadata()); keyOutputStream = ozoneOutputStream.getKeyOutputStream(); } } @@ -993,7 +997,7 @@ private Response createMultipartKey(OzoneVolume volume, String bucket, assert keyOutputStream != null; OmMultipartCommitUploadPartInfo omMultipartCommitUploadPartInfo = keyOutputStream.getCommitUploadPartInfo(); - String eTag = omMultipartCommitUploadPartInfo.getPartName(); + String eTag = omMultipartCommitUploadPartInfo.getETag(); if (copyHeader != null) { getMetrics().updateCopyObjectSuccessStats(startNanos); @@ -1064,7 +1068,7 @@ private Response listParts(String bucket, String key, String uploadID, ozoneMultipartUploadPartListParts.getPartInfoList().forEach(partInfo -> { ListPartsResponse.Part part = new ListPartsResponse.Part(); part.setPartNumber(partInfo.getPartNumber()); - part.setETag(partInfo.getPartName()); + part.setETag(partInfo.getETag()); part.setSize(partInfo.getSize()); part.setLastModified(Instant.ofEpochMilli( partInfo.getModificationTime())); diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpointStreaming.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpointStreaming.java index dbc7f374a9a0..2f4383c9ddc5 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpointStreaming.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpointStreaming.java @@ -22,11 +22,9 @@ import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.ozone.client.OzoneBucket; -import org.apache.hadoop.ozone.client.io.KeyDataStreamOutput; import org.apache.hadoop.ozone.client.io.KeyMetadataAware; import org.apache.hadoop.ozone.client.io.OzoneDataStreamOutput; import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.om.helpers.OmMultipartCommitUploadPartInfo; import org.apache.hadoop.ozone.s3.exception.OS3Exception; import org.apache.hadoop.ozone.s3.exception.S3ErrorTable; import org.apache.hadoop.ozone.s3.metrics.S3GatewayMetrics; @@ -163,11 +161,6 @@ public static Response createMultipartKey(OzoneBucket ozoneBucket, String key, long startNanos = Time.monotonicNowNanos(); String eTag; S3GatewayMetrics metrics = S3GatewayMetrics.create(); - // OmMultipartCommitUploadPartInfo can only be gotten after the - // OzoneDataStreamOutput is closed, so we need to save the - // KeyDataStreamOutput in the OzoneDataStreamOutput and use it to get the - // OmMultipartCommitUploadPartInfo after OzoneDataStreamOutput is closed. - KeyDataStreamOutput keyDataStreamOutput = null; try { try (OzoneDataStreamOutput streamOutput = ozoneBucket .createMultipartStreamKey(key, length, partNumber, uploadID)) { @@ -180,7 +173,6 @@ public static Response createMultipartKey(OzoneBucket ozoneBucket, String key, metrics.incPutKeySuccessLength(putLength); perf.appendMetaLatencyNanos(metadataLatencyNs); perf.appendSizeBytes(putLength); - keyDataStreamOutput = streamOutput.getKeyDataStreamOutput(); } } catch (OMException ex) { if (ex.getResult() == @@ -192,12 +184,6 @@ public static Response createMultipartKey(OzoneBucket ozoneBucket, String key, ozoneBucket.getName() + "/" + key); } throw ex; - } finally { - if (keyDataStreamOutput != null) { - OmMultipartCommitUploadPartInfo commitUploadPartInfo = - keyDataStreamOutput.getCommitUploadPartInfo(); - eTag = commitUploadPartInfo.getPartName(); - } } return Response.ok().header("ETag", eTag).build(); } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java index fad3386c61c4..fbf571e52ad4 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java @@ -23,6 +23,8 @@ import java.io.ByteArrayOutputStream; import java.io.IOException; import java.nio.ByteBuffer; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; import java.util.ArrayList; import java.util.HashMap; import java.util.Iterator; @@ -32,6 +34,7 @@ import java.util.UUID; import java.util.stream.Collectors; +import javax.xml.bind.DatatypeConverter; import org.apache.commons.codec.digest.DigestUtils; import org.apache.hadoop.hdds.client.DefaultReplicationConfig; import org.apache.hadoop.hdds.client.RatisReplicationConfig; @@ -267,7 +270,8 @@ public void close() throws IOException { byte[] bytes = new byte[position]; buffer.get(bytes); - Part part = new Part(key + size, bytes); + Part part = new Part(key + size, bytes, + getMetadata().get("ETag")); if (partList.get(key) == null) { Map parts = new TreeMap<>(); parts.put(partNumber, part); @@ -425,7 +429,7 @@ public OzoneOutputStream createMultipartKey(String key, long size, @Override public void close() throws IOException { Part part = new Part(key + size, - toByteArray()); + toByteArray(), getMetadata().get("ETag")); if (partList.get(key) == null) { Map parts = new TreeMap<>(); parts.put(partNumber, part); @@ -463,7 +467,7 @@ public OmMultipartUploadCompleteInfo completeMultipartUpload(String key, for (Map.Entry part: partsMap.entrySet()) { Part recordedPart = partsList.get(part.getKey()); if (recordedPart == null || - !recordedPart.getPartName().equals(part.getValue())) { + !recordedPart.getETag().equals(part.getValue())) { throw new OMException(ResultCodes.INVALID_PART); } else { output.write(recordedPart.getContent()); @@ -506,13 +510,21 @@ public OzoneMultipartUploadPartListParts listParts(String key, int count = 0; int nextPartNumberMarker = 0; boolean truncated = false; + MessageDigest eTagProvider; + try { + eTagProvider = MessageDigest.getInstance("Md5"); + } catch (NoSuchAlgorithmException e) { + throw new RuntimeException(e); + } while (count < maxParts && partIterator.hasNext()) { Map.Entry partEntry = partIterator.next(); nextPartNumberMarker = partEntry.getKey(); if (partEntry.getKey() > partNumberMarker) { PartInfo partInfo = new PartInfo(partEntry.getKey(), partEntry.getValue().getPartName(), - Time.now(), partEntry.getValue().getContent().length); + Time.now(), partEntry.getValue().getContent().length, + DatatypeConverter.printHexBinary(eTagProvider.digest(partEntry + .getValue().getContent())).toLowerCase()); partInfoList.add(partInfo); count++; } @@ -563,9 +575,12 @@ public static class Part { private String partName; private byte[] content; - public Part(String name, byte[] data) { + private String eTag; + + public Part(String name, byte[] data, String eTag) { this.partName = name; this.content = data.clone(); + this.eTag = eTag; } public String getPartName() { @@ -575,6 +590,11 @@ public String getPartName() { public byte[] getContent() { return content.clone(); } + + public String getETag() { + return eTag; + } + } @Override diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneDataStreamOutputStub.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneDataStreamOutputStub.java index 7bb35682d8da..ff688a8e8ae3 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneDataStreamOutputStub.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneDataStreamOutputStub.java @@ -65,6 +65,7 @@ public synchronized void close() throws IOException { @Override public OmMultipartCommitUploadPartInfo getCommitUploadPartInfo() { - return closed ? new OmMultipartCommitUploadPartInfo(partName) : null; + return closed ? new OmMultipartCommitUploadPartInfo(partName, + getMetadata().get("ETag")) : null; } } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneOutputStreamStub.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneOutputStreamStub.java index 00a7ba557490..41446a55e173 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneOutputStreamStub.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneOutputStreamStub.java @@ -22,6 +22,7 @@ import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.ozone.client.io.KeyMetadataAware; import org.apache.hadoop.ozone.client.io.KeyOutputStream; import org.apache.hadoop.ozone.client.io.OzoneOutputStream; import org.apache.hadoop.ozone.om.helpers.OmMultipartCommitUploadPartInfo; @@ -86,7 +87,8 @@ public KeyOutputStream getKeyOutputStream() { @Override public OmMultipartCommitUploadPartInfo getCommitUploadPartInfo() { - return closed ? new OmMultipartCommitUploadPartInfo(partName) : null; + return closed ? new OmMultipartCommitUploadPartInfo(partName, + ((KeyMetadataAware)getOutputStream()).getMetadata().get("ETag")) : null; } } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestCompleteMultipartUploadRequestUnmarshaller.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestCompleteMultipartUploadRequestUnmarshaller.java index ab87f9c98e11..cd0fbfed4e65 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestCompleteMultipartUploadRequestUnmarshaller.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestCompleteMultipartUploadRequestUnmarshaller.java @@ -80,8 +80,8 @@ private void checkContent(CompleteMultipartUploadRequest request) { List parts = request.getPartList(); - assertEquals(part1, parts.get(0).geteTag()); - assertEquals(part2, parts.get(1).geteTag()); + assertEquals(part1, parts.get(0).getETag()); + assertEquals(part2, parts.get(1).getETag()); } private CompleteMultipartUploadRequest unmarshall( diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java index 5bbbe9f2bcc2..3e981ddc970f 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java @@ -95,7 +95,7 @@ private Part uploadPart(String key, String uploadID, int partNumber, String assertEquals(200, response.getStatus()); assertNotNull(response.getHeaderString("ETag")); Part part = new Part(); - part.seteTag(response.getHeaderString("ETag")); + part.setETag(response.getHeaderString("ETag")); part.setPartNumber(partNumber); return part; @@ -205,7 +205,7 @@ public void testMultipartInvalidPartError() throws Exception { Part part1 = uploadPart(key, uploadID, partNumber, content); // Change part name. - part1.seteTag("random"); + part1.setETag("random"); partsList.add(part1); content = "Multipart Upload 2"; diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadWithCopy.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadWithCopy.java index 7b32fa421fad..60a260498e6d 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadWithCopy.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadWithCopy.java @@ -32,6 +32,7 @@ import java.util.Map; import java.util.Scanner; +import org.apache.commons.codec.digest.DigestUtils; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationFactor; import org.apache.hadoop.hdds.client.ReplicationType; @@ -91,7 +92,11 @@ public static void setUp() throws Exception { try (OutputStream stream = bucket .createKey(EXISTING_KEY, keyContent.length, ReplicationConfig.fromTypeAndFactor(ReplicationType.RATIS, - ReplicationFactor.THREE), new HashMap<>())) { + ReplicationFactor.THREE), + new HashMap() {{ + put("ETag", DigestUtils.md5Hex(EXISTING_KEY_CONTENT)); + }} + )) { stream.write(keyContent); } @@ -329,7 +334,7 @@ private Part uploadPart(String key, String uploadID, int partNumber, String assertEquals(200, response.getStatus()); assertNotNull(response.getHeaderString("ETag")); Part part = new Part(); - part.seteTag(response.getHeaderString("ETag")); + part.setETag(response.getHeaderString("ETag")); part.setPartNumber(partNumber); return part; @@ -377,7 +382,7 @@ private Part uploadPartWithCopy(String key, String uploadID, int partNumber, assertNotNull(result.getETag()); assertNotNull(result.getLastModified()); Part part = new Part(); - part.seteTag(result.getETag()); + part.setETag(result.getETag()); part.setPartNumber(partNumber); return part; From fd3d762036db6a3161729615d97ba420cce2ecdd Mon Sep 17 00:00:00 2001 From: Slava Tutrinov Date: Mon, 27 Nov 2023 14:06:58 +0300 Subject: [PATCH 02/11] HDDS-9680. tiny refactoring - move eTag computer to static variable in tests --- .../hadoop/ozone/TestMultipartObjectGet.java | 2 +- .../client/rpc/TestOzoneAtRestEncryption.java | 16 +++++---- ...TestOzoneClientMultipartUploadWithFSO.java | 24 +++++++------ .../rpc/TestOzoneRpcClientAbstract.java | 36 ++++++++++++------- 4 files changed, 48 insertions(+), 30 deletions(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMultipartObjectGet.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMultipartObjectGet.java index 37cc7a3411f0..8c8a7abac4fa 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMultipartObjectGet.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMultipartObjectGet.java @@ -150,7 +150,7 @@ private CompleteMultipartUploadRequest.Part uploadPart(String uploadID, CompleteMultipartUploadRequest.Part part = new CompleteMultipartUploadRequest.Part(); - part.seteTag(response.getHeaderString("ETag")); + part.setETag(response.getHeaderString("ETag")); part.setPartNumber(partNumber); return part; } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java index fbdaa066d92d..c899eaebc1a8 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java @@ -98,6 +98,8 @@ class TestOzoneAtRestEncryption { + private static final String ETAG = "ETag"; + private static MiniOzoneCluster cluster = null; private static MiniKMS miniKMS; private static OzoneClient ozClient = null; @@ -119,6 +121,7 @@ class TestOzoneAtRestEncryption { private static final int DEFAULT_CRYPTO_BUFFER_SIZE = 8 * 1024; // 8KB // (this is the default Crypto Buffer size as determined by the config // hadoop.security.crypto.buffer.size) + private static MessageDigest eTagProvider; @BeforeAll static void init() throws Exception { @@ -168,6 +171,7 @@ static void init() throws Exception { // create test key createKey(TEST_KEY, cluster.getOzoneManager().getKmsProvider(), conf); + eTagProvider = MessageDigest.getInstance("Md5"); } @AfterAll @@ -631,9 +635,9 @@ private String uploadStreamPart(OzoneBucket bucket, String keyName, ByteBuffer dataBuffer = ByteBuffer.wrap(data); multipartStreamKey.write(dataBuffer, 0, length); - multipartStreamKey.getMetadata().put("ETag", - DatatypeConverter.printHexBinary(MessageDigest.getInstance("Md5") - .digest(data)).toLowerCase()); + multipartStreamKey.getMetadata().put(ETAG, + DatatypeConverter.printHexBinary(eTagProvider.digest(data)) + .toLowerCase()); multipartStreamKey.close(); OmMultipartCommitUploadPartInfo omMultipartCommitUploadPartInfo = @@ -649,9 +653,9 @@ private String uploadPart(OzoneBucket bucket, String keyName, OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName, data.length, partNumber, uploadID); ozoneOutputStream.write(data, 0, data.length); - ozoneOutputStream.getMetadata().put("ETag", - DatatypeConverter.printHexBinary(MessageDigest.getInstance("Md5") - .digest(data)).toLowerCase()); + ozoneOutputStream.getMetadata().put(ETAG, + DatatypeConverter.printHexBinary(eTagProvider.digest(data)) + .toLowerCase()); ozoneOutputStream.close(); OmMultipartCommitUploadPartInfo omMultipartCommitUploadPartInfo = diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java index 101529920bb7..ecb5d8ba99da 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java @@ -102,11 +102,14 @@ */ public class TestOzoneClientMultipartUploadWithFSO { + private static final String ETAG = "ETag"; + private static ObjectStore store = null; private static MiniOzoneCluster cluster = null; private static OzoneClient ozClient = null; private static String scmId = UUID.randomUUID().toString(); + private static MessageDigest eTagProvider; /** * Set a timeout for each test. @@ -131,6 +134,7 @@ public static void init() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); OMRequestTestUtils.configureFSOptimizedPaths(conf, true); startCluster(conf); + eTagProvider = MessageDigest.getInstance("Md5"); } /** @@ -229,7 +233,7 @@ public void testUploadPartWithNoOverride() throws IOException { OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName, sampleData.length(), 1, uploadID); ozoneOutputStream.write(string2Bytes(sampleData), 0, sampleData.length()); - ozoneOutputStream.getMetadata().put("ETag", DigestUtils.md5Hex(sampleData)); + ozoneOutputStream.getMetadata().put(ETAG, DigestUtils.md5Hex(sampleData)); ozoneOutputStream.close(); OmMultipartCommitUploadPartInfo commitUploadPartInfo = ozoneOutputStream @@ -487,9 +491,9 @@ public void testCommitPartAfterCompleteUpload() throws Exception { OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName, data.length, 1, uploadID); ozoneOutputStream.write(data, 0, data.length); - ozoneOutputStream.getMetadata().put("ETag", - DatatypeConverter.printHexBinary(MessageDigest.getInstance("Md5") - .digest(data)).toLowerCase()); + ozoneOutputStream.getMetadata().put(ETAG, + DatatypeConverter.printHexBinary(eTagProvider.digest(data)) + .toLowerCase()); ozoneOutputStream.close(); OmMultipartCommitUploadPartInfo omMultipartCommitUploadPartInfo = @@ -498,9 +502,9 @@ public void testCommitPartAfterCompleteUpload() throws Exception { // Do not close output stream for part 2. ozoneOutputStream = bucket.createMultipartKey(keyName, data.length, 2, uploadID); - ozoneOutputStream.getMetadata().put("ETag", - DatatypeConverter.printHexBinary(MessageDigest.getInstance("Md5") - .digest(data)).toLowerCase()); + ozoneOutputStream.getMetadata().put(ETAG, + DatatypeConverter.printHexBinary(eTagProvider.digest(data)) + .toLowerCase()); ozoneOutputStream.write(data, 0, data.length); Map partsMap = new LinkedHashMap<>(); @@ -981,9 +985,9 @@ private Pair uploadPart(OzoneBucket oBucket, String kName, data.length, partNumber, uploadID); ozoneOutputStream.write(data, 0, data.length); - ozoneOutputStream.getMetadata().put("ETag", - DatatypeConverter.printHexBinary(MessageDigest.getInstance("Md5") - .digest(data)).toLowerCase()); + ozoneOutputStream.getMetadata().put(ETAG, + DatatypeConverter.printHexBinary(eTagProvider.digest(data)) + .toLowerCase()); ozoneOutputStream.close(); OmMultipartCommitUploadPartInfo omMultipartCommitUploadPartInfo = diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java index f10cda63cbf1..ee3364b3e634 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java @@ -21,6 +21,7 @@ import java.io.IOException; import java.io.InputStream; import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; import java.security.PrivilegedExceptionAction; import java.time.Instant; import java.util.ArrayList; @@ -161,6 +162,7 @@ import static org.slf4j.event.Level.DEBUG; import org.apache.ozone.test.tag.Unhealthy; +import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.MethodOrderer; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.TestMethodOrder; @@ -178,6 +180,8 @@ @TestMethodOrder(MethodOrderer.MethodName.class) public abstract class TestOzoneRpcClientAbstract { + private static final String ETAG = "ETag"; + private static MiniOzoneCluster cluster = null; private static OzoneClient ozClient = null; private static ObjectStore store = null; @@ -197,6 +201,12 @@ public abstract class TestOzoneRpcClientAbstract { private static String scmId = UUID.randomUUID().toString(); private static String clusterId; + private static MessageDigest eTagProvider; + + @BeforeAll + public static void initialize() throws NoSuchAlgorithmException { + eTagProvider = MessageDigest.getInstance("Md5"); + } /** @@ -1516,7 +1526,7 @@ public void testUsedBytesWithUploadPart() throws IOException { sampleData.length(), 1, uploadID); ozoneOutputStream.write(string2Bytes(sampleData), 0, sampleData.length()); - ozoneOutputStream.getMetadata().put("ETag", DigestUtils.md5Hex(sampleData)); + ozoneOutputStream.getMetadata().put(ETAG, DigestUtils.md5Hex(sampleData)); ozoneOutputStream.close(); assertEquals(valueLength, store.getVolume(volumeName) @@ -2682,7 +2692,7 @@ void testUploadPartWithNoOverride(ReplicationConfig replication) OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName, sampleData.length(), 1, uploadID); ozoneOutputStream.write(string2Bytes(sampleData), 0, sampleData.length()); - ozoneOutputStream.getMetadata().put("ETag", DigestUtils.md5Hex(sampleData)); + ozoneOutputStream.getMetadata().put(ETAG, DigestUtils.md5Hex(sampleData)); ozoneOutputStream.close(); OmMultipartCommitUploadPartInfo commitUploadPartInfo = ozoneOutputStream @@ -2720,7 +2730,7 @@ void testUploadPartOverride(ReplicationConfig replication) OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName, sampleData.length(), partNumber, uploadID); ozoneOutputStream.write(string2Bytes(sampleData), 0, sampleData.length()); - ozoneOutputStream.getMetadata().put("ETag", DigestUtils.md5Hex(sampleData)); + ozoneOutputStream.getMetadata().put(ETAG, DigestUtils.md5Hex(sampleData)); ozoneOutputStream.close(); OmMultipartCommitUploadPartInfo commitUploadPartInfo = ozoneOutputStream @@ -2736,7 +2746,7 @@ void testUploadPartOverride(ReplicationConfig replication) ozoneOutputStream = bucket.createMultipartKey(keyName, sampleData.length(), partNumber, uploadID); ozoneOutputStream.write(string2Bytes(sampleData), 0, "name".length()); - ozoneOutputStream.getMetadata().put("ETag", DigestUtils.md5Hex(sampleData)); + ozoneOutputStream.getMetadata().put(ETAG, DigestUtils.md5Hex(sampleData)); ozoneOutputStream.close(); commitUploadPartInfo = ozoneOutputStream @@ -3127,9 +3137,9 @@ void testCommitPartAfterCompleteUpload() throws Exception { OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName, data.length, 1, uploadID); ozoneOutputStream.write(data, 0, data.length); - ozoneOutputStream.getMetadata().put("ETag", - DatatypeConverter.printHexBinary(MessageDigest.getInstance("Md5") - .digest(data)).toLowerCase()); + ozoneOutputStream.getMetadata().put(ETAG, + DatatypeConverter.printHexBinary(eTagProvider.digest(data)) + .toLowerCase()); ozoneOutputStream.close(); OmMultipartCommitUploadPartInfo omMultipartCommitUploadPartInfo = @@ -3138,9 +3148,9 @@ void testCommitPartAfterCompleteUpload() throws Exception { // Do not close output stream for part 2. ozoneOutputStream = bucket.createMultipartKey(keyName, data.length, 2, omMultipartInfo.getUploadID()); - ozoneOutputStream.getMetadata().put("ETag", - DatatypeConverter.printHexBinary(MessageDigest.getInstance("Md5") - .digest(data)).toLowerCase()); + ozoneOutputStream.getMetadata().put(ETAG, + DatatypeConverter.printHexBinary(eTagProvider.digest(data)) + .toLowerCase()); ozoneOutputStream.write(data, 0, data.length); Map partsMap = new LinkedHashMap<>(); @@ -3832,9 +3842,9 @@ private Pair uploadPart(OzoneBucket bucket, String keyName, data.length, partNumber, uploadID); ozoneOutputStream.write(data, 0, data.length); - ozoneOutputStream.getMetadata().put("ETag", - DatatypeConverter.printHexBinary(MessageDigest.getInstance("Md5") - .digest(data)).toLowerCase()); + ozoneOutputStream.getMetadata().put(ETAG, + DatatypeConverter.printHexBinary(eTagProvider.digest(data)) + .toLowerCase()); ozoneOutputStream.close(); OmMultipartCommitUploadPartInfo omMultipartCommitUploadPartInfo = From 7e0ef010292e0dc8d5afb2fc1bf350f45e022770 Mon Sep 17 00:00:00 2001 From: Slava Tutrinov Date: Thu, 30 Nov 2023 09:44:22 +0300 Subject: [PATCH 03/11] Update OmClientProtocol.proto Make an 'eTag' field optional for Part and PartInfo messages --- .../interface-client/src/main/proto/OmClientProtocol.proto | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto index 3d72e8a2ab20..fea92d1eceb5 100644 --- a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto +++ b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto @@ -1600,7 +1600,7 @@ message MultipartUploadCompleteResponse { message Part { required uint32 partNumber = 1; optional string partName = 2; - required string eTag = 3; + optional string eTag = 3; } message MultipartUploadAbortRequest { @@ -1673,7 +1673,7 @@ message PartInfo { required string partName = 2; required uint64 modificationTime = 3; required uint64 size = 4; - required string eTag = 5; + optional string eTag = 5; } /** From 060a9f870b8a8cfc5c0d808b41d32bc912056cf2 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" Date: Wed, 17 Jan 2024 18:44:04 +0100 Subject: [PATCH 04/11] fix compile error --- .../hadoop/ozone/s3/endpoint/ObjectEndpointStreaming.java | 5 ----- 1 file changed, 5 deletions(-) diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpointStreaming.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpointStreaming.java index da187cd870f9..895923c64e69 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpointStreaming.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpointStreaming.java @@ -159,11 +159,6 @@ public static Response createMultipartKey(OzoneBucket ozoneBucket, String key, throws IOException, OS3Exception { long startNanos = Time.monotonicNowNanos(); String eTag; - // OmMultipartCommitUploadPartInfo can only be gotten after the - // OzoneDataStreamOutput is closed, so we need to save the - // KeyDataStreamOutput in the OzoneDataStreamOutput and use it to get the - // OmMultipartCommitUploadPartInfo after OzoneDataStreamOutput is closed. - KeyDataStreamOutput keyDataStreamOutput = null; try { try (OzoneDataStreamOutput streamOutput = ozoneBucket .createMultipartStreamKey(key, length, partNumber, uploadID)) { From 5eea2be4dc4978a78e3898a68ce6c35b6b4909c0 Mon Sep 17 00:00:00 2001 From: Slava Tutrinov Date: Sun, 28 Jan 2024 14:15:53 +0300 Subject: [PATCH 05/11] HDDS-9680. support ETag as an md5-hash for multipart upload parts requests --- .../ozone/client/protocol/ClientProtocol.java | 1 + .../hadoop/ozone/client/rpc/RpcClient.java | 2 +- .../OmMultipartUploadCompleteList.java | 4 +- .../hadoop/ozone/om/helpers/OmPartInfo.java | 2 +- .../src/main/proto/OmClientProtocol.proto | 4 +- .../src/main/resources/proto.lock | 2 +- .../S3MultipartUploadCompleteRequest.java | 68 ++++++++++++++++--- .../ozone/client/ClientProtocolStub.java | 1 + .../hadoop/ozone/client/OzoneBucketStub.java | 1 + 9 files changed, 68 insertions(+), 17 deletions(-) diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java index 5316f7a99e9f..8eb354fbce13 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java @@ -24,6 +24,7 @@ import java.util.Map; import javax.annotation.Nonnull; +import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.crypto.key.KeyProvider; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationFactor; diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java index 47578514a937..870b0e381933 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java @@ -1938,7 +1938,7 @@ public OzoneMultipartUploadPartListParts listParts(String volumeName, new OzoneMultipartUploadPartListParts.PartInfo( omPartInfo.getPartNumber(), omPartInfo.getPartName(), omPartInfo.getModificationTime(), omPartInfo.getSize(), - omPartInfo.geteTag())); + omPartInfo.getETag())); } return ozoneMultipartUploadPartListParts; diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUploadCompleteList.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUploadCompleteList.java index 897bae07415b..d3460c9856ba 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUploadCompleteList.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUploadCompleteList.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.om.helpers; +import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Part; import java.util.ArrayList; @@ -57,7 +58,8 @@ public Map getMultipartMap() { public List getPartsList() { List partList = new ArrayList<>(); multipartMap.forEach((partNumber, eTag) -> partList.add(Part - .newBuilder().setETag(eTag).setPartNumber(partNumber).build())); + // set partName equal to eTag for back compatibility (partName is a required property) + .newBuilder().setPartName(eTag).setETag(eTag).setPartNumber(partNumber).build())); return partList; } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPartInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPartInfo.java index d0f6b0c7198e..e908c5a025f1 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPartInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPartInfo.java @@ -55,7 +55,7 @@ public long getSize() { return size; } - public String geteTag() { + public String getETag() { return eTag; } diff --git a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto index 36d3e6e080b2..b5d7b1872e84 100644 --- a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto +++ b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto @@ -1583,7 +1583,7 @@ message MultipartCommitUploadPartRequest { } message MultipartCommitUploadPartResponse { - optional string partName = 1; + required string partName = 1; // This one is returned as Etag for S3. optional string eTag = 2; } @@ -1602,7 +1602,7 @@ message MultipartUploadCompleteResponse { message Part { required uint32 partNumber = 1; - optional string partName = 2; + required string partName = 2; optional string eTag = 3; } diff --git a/hadoop-ozone/interface-client/src/main/resources/proto.lock b/hadoop-ozone/interface-client/src/main/resources/proto.lock index 13f11372e5f3..e6964dbf38fb 100644 --- a/hadoop-ozone/interface-client/src/main/resources/proto.lock +++ b/hadoop-ozone/interface-client/src/main/resources/proto.lock @@ -4926,7 +4926,7 @@ "id": 3, "name": "eTag", "type": "string", - "required": true + "optional": true } ] }, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java index 8ae5118d64ba..160ac3e8526d 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java @@ -27,6 +27,8 @@ import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.function.BiFunction; +import java.util.function.Function; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.ratis.server.protocol.TermIndex; @@ -80,6 +82,29 @@ public class S3MultipartUploadCompleteRequest extends OMKeyRequest { private static final Logger LOG = LoggerFactory.getLogger(S3MultipartUploadCompleteRequest.class); + private BiFunction eTagBasedValidator = + (part, partKeyInfo) -> { + String eTag = part.getETag(); + String dbPartETag = null; + String dbPartName = null; + if (partKeyInfo != null) { + dbPartETag = partKeyInfo.getPartKeyInfo().getMetadata(0).getValue(); + dbPartName = partKeyInfo.getPartName(); + } + return new MultipartCommitRequestPart(eTag, partKeyInfo == null ? null : + dbPartETag, StringUtils.equals(eTag, dbPartETag) || StringUtils.equals(eTag, dbPartName)); + }; + private BiFunction partNameBasedValidator = + (part, partKeyInfo) -> { + String partName = part.getPartName(); + String dbPartName = null; + if (partKeyInfo != null) { + dbPartName = partKeyInfo.getPartName(); + } + return new MultipartCommitRequestPart(partName, partKeyInfo == null ? null : + dbPartName, StringUtils.equals(partName, dbPartName)); + }; + public S3MultipartUploadCompleteRequest(OMRequest omRequest, BucketLayout bucketLayout) { super(omRequest, bucketLayout); @@ -491,24 +516,19 @@ private long getMultipartDataSize(String requestedVolume, OzoneManager ozoneManager) throws OMException { long dataSize = 0; int currentPartCount = 0; + boolean eTagBasedValidationAvailable = partsList.stream().allMatch(OzoneManagerProtocolProtos.Part::hasETag); // Now do actual logic, and check for any Invalid part during this. for (OzoneManagerProtocolProtos.Part part : partsList) { currentPartCount++; int partNumber = part.getPartNumber(); - String eTag = part.getETag(); - PartKeyInfo partKeyInfo = partKeyInfoMap.get(partNumber); - - String dbPartETag = null; - if (partKeyInfo != null) { - dbPartETag = partKeyInfo.getPartKeyInfo().getMetadata(0).getValue(); - } - if (!StringUtils.equals(eTag, dbPartETag)) { - String omPartName = partKeyInfo == null ? null : dbPartETag; + MultipartCommitRequestPart requestPart = eTagBasedValidationAvailable ? + eTagBasedValidator.apply(part, partKeyInfo) : partNameBasedValidator.apply(part, partKeyInfo); + if (!requestPart.isValid()) { throw new OMException( failureMessage(requestedVolume, requestedBucket, keyName) + - ". Provided Part info is { " + eTag + ", " + partNumber + - "}, whereas OM has eTag " + omPartName, + ". Provided Part info is { " + requestPart.getRequestPartId() + ", " + partNumber + + "}, whereas OM has eTag " + requestPart.getOmPartId(), OMException.ResultCodes.INVALID_PART); } @@ -648,4 +668,30 @@ private String multipartUploadedKeyHash( + partsList.size(); } + private static class MultipartCommitRequestPart { + private String requestPartId; + + private String omPartId; + + private boolean isValid; + + public MultipartCommitRequestPart(String requestPartId, String omPartId, boolean isValid) { + this.requestPartId = requestPartId; + this.omPartId = omPartId; + this.isValid = isValid; + } + + public String getRequestPartId() { + return requestPartId; + } + + public String getOmPartId() { + return omPartId; + } + + public boolean isValid() { + return isValid; + } + } + } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java index 174af69e255d..5484debb6b12 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java @@ -20,6 +20,7 @@ package org.apache.hadoop.ozone.client; import javax.annotation.Nonnull; +import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.crypto.key.KeyProvider; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationFactor; diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java index fbf571e52ad4..ca2bf193e8e4 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java @@ -36,6 +36,7 @@ import javax.xml.bind.DatatypeConverter; import org.apache.commons.codec.digest.DigestUtils; +import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.client.DefaultReplicationConfig; import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; From 2647df0ecd9c9518b7df77e329b8a6d0cd2a241a Mon Sep 17 00:00:00 2001 From: Slava Tutrinov Date: Sun, 28 Jan 2024 15:28:43 +0300 Subject: [PATCH 06/11] HDDS-9680. provide OzoneConst.ETAG constant and replace "ETag" occurrences with the ref to it * replace "Md5" occurrences with the ref to OzoneConst.MD5_HASH const --- .../java/org/apache/hadoop/ozone/OzoneConsts.java | 10 ++++++++++ .../docs/content/feature/S3-Tenant-Commands.md | 2 +- .../fs/ozone/TestOzoneFSWithObjectStoreCreate.java | 6 ++++-- .../apache/hadoop/ozone/TestMultipartObjectGet.java | 4 ++-- .../ozone/client/rpc/TestOzoneAtRestEncryption.java | 8 +++----- .../rpc/TestOzoneClientMultipartUploadWithFSO.java | 12 +++++------- .../ozone/client/rpc/TestOzoneRpcClientAbstract.java | 6 +++--- .../client/rpc/TestOzoneRpcClientWithRatis.java | 5 +++-- .../hadoop/ozone/om/TestObjectStoreWithLegacyFS.java | 4 ++-- .../ozone/om/TestOzoneManagerHAWithStoppedNodes.java | 5 +++-- .../org/apache/hadoop/ozone/om/KeyManagerImpl.java | 3 ++- .../S3MultipartUploadCommitPartRequest.java | 2 +- .../multipart/S3MultipartUploadCompleteRequest.java | 8 ++++---- .../hadoop/ozone/om/request/OMRequestTestUtils.java | 4 ++-- .../TestS3MultipartUploadCompleteRequest.java | 3 ++- .../service/TestMultipartUploadCleanupService.java | 3 ++- .../ozone/om/service/TestOpenKeyCleanupService.java | 3 ++- .../hadoop/ozone/s3/commontypes/KeyMetadata.java | 4 +++- .../s3/endpoint/CompleteMultipartUploadRequest.java | 4 +++- .../s3/endpoint/CompleteMultipartUploadResponse.java | 3 ++- .../hadoop/ozone/s3/endpoint/CopyObjectResponse.java | 3 ++- .../hadoop/ozone/s3/endpoint/CopyPartResult.java | 3 ++- .../hadoop/ozone/s3/endpoint/EndpointBase.java | 3 +-- .../hadoop/ozone/s3/endpoint/ListPartsResponse.java | 3 ++- .../hadoop/ozone/s3/endpoint/ObjectEndpoint.java | 3 ++- .../ozone/s3/endpoint/ObjectEndpointStreaming.java | 7 ++++--- .../apache/hadoop/ozone/client/OzoneBucketStub.java | 8 +++++--- .../ozone/client/OzoneDataStreamOutputStub.java | 3 ++- .../hadoop/ozone/client/OzoneOutputStreamStub.java | 3 ++- .../hadoop/ozone/s3/endpoint/TestListParts.java | 6 +++--- .../s3/endpoint/TestMultipartUploadComplete.java | 4 ++-- .../s3/endpoint/TestMultipartUploadWithCopy.java | 6 +++--- .../hadoop/ozone/s3/endpoint/TestPartUpload.java | 10 +++++----- .../ozone/s3/endpoint/TestPartUploadWithStream.java | 11 ++++++----- 34 files changed, 100 insertions(+), 72 deletions(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java index 4f0f800dfdd9..c026d2789803 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java @@ -598,4 +598,14 @@ private OzoneConsts() { */ public static final String COMPACTION_LOG_TABLE = "compactionLogTable"; + + /** + * Algo to compute message digest (s3g multipart upload request's part ETag) + */ + public static final String MD5 = "Md5"; + + /** + * S3G multipart upload request's ETag header key + */ + public static final String ETAG = "ETag"; } diff --git a/hadoop-hdds/docs/content/feature/S3-Tenant-Commands.md b/hadoop-hdds/docs/content/feature/S3-Tenant-Commands.md index f9ea5f608461..23c015515035 100644 --- a/hadoop-hdds/docs/content/feature/S3-Tenant-Commands.md +++ b/hadoop-hdds/docs/content/feature/S3-Tenant-Commands.md @@ -432,7 +432,7 @@ bash-4.2$ aws s3api --endpoint-url http://s3g:9878 list-objects --bucket bucket- { "Key": "file1", "LastModified": "2022-02-16T00:10:00.000Z", - "ETag": "2022-02-16T00:10:00.000Z", + "ETag": "e99f93dedfe22e9a133dc3c634f14634", "Size": 3811, "StorageClass": "STANDARD" } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java index 80d1a486cd54..50e361ef1801 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java @@ -60,6 +60,8 @@ import java.util.Map; import static java.nio.charset.StandardCharsets.UTF_8; +import static org.apache.hadoop.ozone.OzoneConsts.ETAG; +import static org.apache.hadoop.ozone.OzoneConsts.MD5_HASH; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_SCHEME; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_A_FILE; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -304,8 +306,8 @@ public void testMPUFailDuetoDirectoryCreationBeforeComplete() // This should succeed, as we check during creation of part or during // complete MPU. - ozoneOutputStream.getMetadata().put("ETag", - DatatypeConverter.printHexBinary(MessageDigest.getInstance("Md5") + ozoneOutputStream.getMetadata().put(ETAG, + DatatypeConverter.printHexBinary(MessageDigest.getInstance(MD5_HASH) .digest(b)).toLowerCase()); ozoneOutputStream.close(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMultipartObjectGet.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMultipartObjectGet.java index 417c6a619a81..80ebc8a6d7da 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMultipartObjectGet.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMultipartObjectGet.java @@ -148,11 +148,11 @@ private CompleteMultipartUploadRequest.Part uploadPart(String uploadID, Response response = REST.put(BUCKET, KEY, content.length(), partNumber, uploadID, body); assertEquals(200, response.getStatus()); - assertNotNull(response.getHeaderString("ETag")); + assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); CompleteMultipartUploadRequest.Part part = new CompleteMultipartUploadRequest.Part(); - part.setETag(response.getHeaderString("ETag")); + part.setETag(response.getHeaderString(OzoneConsts.ETAG)); part.setPartNumber(partNumber); return part; } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java index 118fdfcb2237..51dae57650de 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java @@ -102,8 +102,6 @@ class TestOzoneAtRestEncryption { - private static final String ETAG = "ETag"; - private static MiniOzoneCluster cluster = null; private static MiniKMS miniKMS; private static OzoneClient ozClient = null; @@ -175,7 +173,7 @@ static void init() throws Exception { // create test key createKey(TEST_KEY, cluster.getOzoneManager().getKmsProvider(), conf); - eTagProvider = MessageDigest.getInstance("Md5"); + eTagProvider = MessageDigest.getInstance(OzoneConsts.MD5_HASH); } @AfterAll @@ -638,7 +636,7 @@ private String uploadStreamPart(OzoneBucket bucket, String keyName, ByteBuffer dataBuffer = ByteBuffer.wrap(data); multipartStreamKey.write(dataBuffer, 0, length); - multipartStreamKey.getMetadata().put(ETAG, + multipartStreamKey.getMetadata().put(OzoneConsts.ETAG, DatatypeConverter.printHexBinary(eTagProvider.digest(data)) .toLowerCase()); multipartStreamKey.close(); @@ -656,7 +654,7 @@ private String uploadPart(OzoneBucket bucket, String keyName, OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName, data.length, partNumber, uploadID); ozoneOutputStream.write(data, 0, data.length); - ozoneOutputStream.getMetadata().put(ETAG, + ozoneOutputStream.getMetadata().put(OzoneConsts.ETAG, DatatypeConverter.printHexBinary(eTagProvider.digest(data)) .toLowerCase()); ozoneOutputStream.close(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java index 1ed5e4518b28..9b09f10ad107 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java @@ -103,8 +103,6 @@ @Timeout(300) public class TestOzoneClientMultipartUploadWithFSO { - private static final String ETAG = "ETag"; - private static ObjectStore store = null; private static MiniOzoneCluster cluster = null; private static OzoneClient ozClient = null; @@ -130,7 +128,7 @@ public static void init() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); OMRequestTestUtils.configureFSOptimizedPaths(conf, true); startCluster(conf); - eTagProvider = MessageDigest.getInstance("Md5"); + eTagProvider = MessageDigest.getInstance(OzoneConsts.MD5_HASH); } /** @@ -229,7 +227,7 @@ public void testUploadPartWithNoOverride() throws IOException { OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName, sampleData.length(), 1, uploadID); ozoneOutputStream.write(string2Bytes(sampleData), 0, sampleData.length()); - ozoneOutputStream.getMetadata().put(ETAG, DigestUtils.md5Hex(sampleData)); + ozoneOutputStream.getMetadata().put(OzoneConsts.ETAG, DigestUtils.md5Hex(sampleData)); ozoneOutputStream.close(); OmMultipartCommitUploadPartInfo commitUploadPartInfo = ozoneOutputStream @@ -486,7 +484,7 @@ public void testCommitPartAfterCompleteUpload() throws Exception { OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName, data.length, 1, uploadID); ozoneOutputStream.write(data, 0, data.length); - ozoneOutputStream.getMetadata().put(ETAG, + ozoneOutputStream.getMetadata().put(OzoneConsts.ETAG, DatatypeConverter.printHexBinary(eTagProvider.digest(data)) .toLowerCase()); ozoneOutputStream.close(); @@ -497,7 +495,7 @@ public void testCommitPartAfterCompleteUpload() throws Exception { // Do not close output stream for part 2. ozoneOutputStream = bucket.createMultipartKey(keyName, data.length, 2, uploadID); - ozoneOutputStream.getMetadata().put(ETAG, + ozoneOutputStream.getMetadata().put(OzoneConsts.ETAG, DatatypeConverter.printHexBinary(eTagProvider.digest(data)) .toLowerCase()); ozoneOutputStream.write(data, 0, data.length); @@ -962,7 +960,7 @@ private Pair uploadPart(OzoneBucket oBucket, String kName, OzoneOutputStream ozoneOutputStream = oBucket.createMultipartKey(kName, data.length, partNumber, uploadID); ozoneOutputStream.write(data, 0, data.length); - ozoneOutputStream.getMetadata().put(ETAG, + ozoneOutputStream.getMetadata().put(OzoneConsts.ETAG, DatatypeConverter.printHexBinary(eTagProvider.digest(data)) .toLowerCase()); ozoneOutputStream.close(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java index 1bd83dcb612d..c1313d717d8c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java @@ -140,7 +140,9 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE_DEFAULT; import static org.apache.hadoop.ozone.OzoneConsts.DEFAULT_OM_UPDATE_ID; +import static org.apache.hadoop.ozone.OzoneConsts.ETAG; import static org.apache.hadoop.ozone.OzoneConsts.GB; +import static org.apache.hadoop.ozone.OzoneConsts.MD5_HASH; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.PARTIAL_RENAME; @@ -183,8 +185,6 @@ @TestMethodOrder(MethodOrderer.MethodName.class) public abstract class TestOzoneRpcClientAbstract { - private static final String ETAG = "ETag"; - private static MiniOzoneCluster cluster = null; private static OzoneClient ozClient = null; private static ObjectStore store = null; @@ -208,7 +208,7 @@ public abstract class TestOzoneRpcClientAbstract { @BeforeAll public static void initialize() throws NoSuchAlgorithmException { - eTagProvider = MessageDigest.getInstance("Md5"); + eTagProvider = MessageDigest.getInstance(MD5_HASH); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithRatis.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithRatis.java index 8cf850b82415..d8cb7976ae37 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithRatis.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithRatis.java @@ -40,6 +40,7 @@ import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.utils.FaultInjector; import org.apache.hadoop.ozone.OzoneConfigKeys; +import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.client.BucketArgs; import org.apache.hadoop.ozone.client.ObjectStore; import org.apache.hadoop.ozone.client.OzoneBucket; @@ -209,8 +210,8 @@ public void testMultiPartUploadWithStream() keyName, valueLength, 1, uploadID); ozoneStreamOutput.write(ByteBuffer.wrap(sampleData), 0, valueLength); - ozoneStreamOutput.getMetadata().put("ETag", - DatatypeConverter.printHexBinary(MessageDigest.getInstance("Md5") + ozoneStreamOutput.getMetadata().put(OzoneConsts.ETAG, + DatatypeConverter.printHexBinary(MessageDigest.getInstance(OzoneConsts.MD5_HASH) .digest(sampleData)).toLowerCase()); ozoneStreamOutput.close(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithLegacyFS.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithLegacyFS.java index 5afa19b50862..1d80ae6d83d8 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithLegacyFS.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithLegacyFS.java @@ -232,8 +232,8 @@ private OmMultipartUploadCompleteInfo uploadMPUWithDirectoryExists( OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName, data.length, 1, uploadID); ozoneOutputStream.write(data, 0, data.length); - ozoneOutputStream.getMetadata().put("ETag", - DatatypeConverter.printHexBinary(MessageDigest.getInstance("Md5") + ozoneOutputStream.getMetadata().put(OzoneConsts.ETAG, + DatatypeConverter.printHexBinary(MessageDigest.getInstance(OzoneConsts.MD5_HASH) .digest(data)).toLowerCase()); ozoneOutputStream.close(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithStoppedNodes.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithStoppedNodes.java index 2f3c716db2ee..63202805ec57 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithStoppedNodes.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithStoppedNodes.java @@ -25,6 +25,7 @@ import org.apache.hadoop.hdfs.LogVerificationAppender; import org.apache.hadoop.ozone.MiniOzoneHAClusterImpl; import org.apache.hadoop.ozone.OzoneConfigKeys; +import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.client.ObjectStore; import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneMultipartUploadPartListParts; @@ -188,7 +189,7 @@ private void createMultipartKeyAndReadKey(OzoneBucket ozoneBucket, OzoneOutputStream ozoneOutputStream = ozoneBucket.createMultipartKey( keyName, value.length(), 1, uploadID); ozoneOutputStream.write(value.getBytes(UTF_8), 0, value.length()); - ozoneOutputStream.getMetadata().put("ETag", DigestUtils.md5Hex(value)); + ozoneOutputStream.getMetadata().put(OzoneConsts.ETAG, DigestUtils.md5Hex(value)); ozoneOutputStream.close(); @@ -381,7 +382,7 @@ private String createMultipartUploadPartKey(OzoneBucket ozoneBucket, OzoneOutputStream ozoneOutputStream = ozoneBucket.createMultipartKey( keyName, value.length(), partNumber, uploadID); ozoneOutputStream.write(value.getBytes(UTF_8), 0, value.length()); - ozoneOutputStream.getMetadata().put("ETag", DigestUtils.md5Hex(value)); + ozoneOutputStream.getMetadata().put(OzoneConsts.ETAG, DigestUtils.md5Hex(value)); ozoneOutputStream.close(); return ozoneOutputStream.getCommitUploadPartInfo().getETag(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java index e56b431f13d4..62189b65adb5 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java @@ -121,6 +121,7 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SNAPSHOT_DELETING_SERVICE_TIMEOUT_DEFAULT; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SNAPSHOT_SST_FILTERING_SERVICE_TIMEOUT; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SNAPSHOT_SST_FILTERING_SERVICE_TIMEOUT_DEFAULT; +import static org.apache.hadoop.ozone.OzoneConsts.ETAG; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_DIR_DELETING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_DIR_DELETING_SERVICE_INTERVAL_DEFAULT; @@ -820,7 +821,7 @@ public OmMultipartUploadListParts listParts(String volumeName, partKeyInfo.getPartKeyInfo().getModificationTime(), partKeyInfo.getPartKeyInfo().getDataSize(), partKeyInfo.getPartKeyInfo().getMetadataList().stream() - .filter(keyValue -> keyValue.getKey().equals("ETag")) + .filter(keyValue -> keyValue.getKey().equals(ETAG)) .findFirst().get().getValue()); omPartInfoList.add(omPartInfo); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java index 565e3eccbfd8..682749eae444 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java @@ -241,7 +241,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn omResponse.setCommitMultiPartUploadResponse( MultipartCommitUploadPartResponse.newBuilder() .setPartName(partName) - .setETag(omKeyInfo.getMetadata().get("ETag"))); + .setETag(omKeyInfo.getMetadata().get(OzoneConsts.ETAG))); omClientResponse = getOmClientResponse(ozoneManager, oldPartKeyInfo, openKey, omKeyInfo, multipartKey, multipartKeyInfo, omResponse.build(), diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java index 160ac3e8526d..c9778c1b835a 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java @@ -274,7 +274,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn .setVolume(requestedVolume) .setBucket(requestedBucket) .setKey(keyName) - .setHash(omKeyInfo.getMetadata().get("ETag"))); + .setHash(omKeyInfo.getMetadata().get(OzoneConsts.ETAG))); long volumeId = omMetadataManager.getVolumeId(volumeName); long bucketId = omMetadataManager.getBucketId(volumeName, bucketName); @@ -414,7 +414,7 @@ protected OmKeyInfo getOmKeyInfo(OzoneManager ozoneManager, long trxnLogIndex, .setOmKeyLocationInfos( Collections.singletonList(keyLocationInfoGroup)) .setAcls(dbOpenKeyInfo.getAcls()) - .addMetadata("ETag", + .addMetadata(OzoneConsts.ETAG, multipartUploadedKeyHash(partKeyInfoMap)); // Check if db entry has ObjectID. This check is required because // it is possible that between multipart key uploads and complete, @@ -444,7 +444,7 @@ protected OmKeyInfo getOmKeyInfo(OzoneManager ozoneManager, long trxnLogIndex, omKeyInfo.setModificationTime(keyArgs.getModificationTime()); omKeyInfo.setDataSize(dataSize); omKeyInfo.setReplicationConfig(dbOpenKeyInfo.getReplicationConfig()); - omKeyInfo.getMetadata().put("ETag", + omKeyInfo.getMetadata().put(OzoneConsts.ETAG, multipartUploadedKeyHash(partKeyInfoMap)); } omKeyInfo.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled()); @@ -662,7 +662,7 @@ private String multipartUploadedKeyHash( StringBuffer keysConcatenated = new StringBuffer(); for (PartKeyInfo partKeyInfo: partsList) { keysConcatenated.append(KeyValueUtil.getFromProtobuf(partKeyInfo - .getPartKeyInfo().getMetadataList()).get("ETag")); + .getPartKeyInfo().getMetadataList()).get(OzoneConsts.ETAG)); } return DigestUtils.md5Hex(keysConcatenated.toString()) + "-" + partsList.size(); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java index 55ce35f28255..951c1a7c7383 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java @@ -1065,7 +1065,7 @@ public static OMRequest createCommitPartMPURequest(String volumeName, MessageDigest eTagProvider; try { - eTagProvider = MessageDigest.getInstance("Md5"); + eTagProvider = MessageDigest.getInstance(OzoneConsts.MD5_HASH); } catch (NoSuchAlgorithmException e) { throw new RuntimeException(e); } @@ -1079,7 +1079,7 @@ public static OMRequest createCommitPartMPURequest(String volumeName, .setMultipartUploadID(multipartUploadID) .addAllKeyLocations(new ArrayList<>()) .addMetadata(HddsProtos.KeyValue.newBuilder() - .setKey("ETag") + .setKey(OzoneConsts.ETAG) .setValue(DatatypeConverter.printHexBinary( new DigestInputStream( new ByteArrayInputStream( diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java index baa05849090f..abdf63574260 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java @@ -31,6 +31,7 @@ import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; +import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; @@ -137,7 +138,7 @@ private String checkValidateAndUpdateCacheSuccess(String volumeName, .getKeyArgs() .getMetadataList() .stream() - .filter(keyValue -> keyValue.getKey().equals("ETag")) + .filter(keyValue -> keyValue.getKey().equals(OzoneConsts.ETAG)) .findFirst().get().getValue(); partList.add(Part.newBuilder().setETag(eTag).setPartNumber(1) .build()); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestMultipartUploadCleanupService.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestMultipartUploadCleanupService.java index 5c38e31f312a..e633bae80789 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestMultipartUploadCleanupService.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestMultipartUploadCleanupService.java @@ -25,6 +25,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.server.ServerUtils; import org.apache.hadoop.hdds.utils.db.DBConfigFromFile; +import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.KeyManager; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmTestManagers; @@ -250,7 +251,7 @@ private void createIncompleteMPUKey(String volumeName, String bucketName, .setMultipartUploadID(omMultipartInfo.getUploadID()) .setMultipartUploadPartNumber(i) .setAcls(Collections.emptyList()) - .addMetadata("ETag", + .addMetadata(OzoneConsts.ETAG, DigestUtils.md5Hex(UUID.randomUUID().toString())) .setReplicationConfig( StandaloneReplicationConfig.getInstance(ONE)) diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestOpenKeyCleanupService.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestOpenKeyCleanupService.java index cb9ed93b07e2..848ffb9a423e 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestOpenKeyCleanupService.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestOpenKeyCleanupService.java @@ -28,6 +28,7 @@ import org.apache.hadoop.hdds.server.ServerUtils; import org.apache.hadoop.hdds.utils.db.DBConfigFromFile; import org.apache.hadoop.ozone.OzoneConfigKeys; +import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.ExpiredOpenKeys; import org.apache.hadoop.ozone.om.KeyManager; import org.apache.hadoop.ozone.om.OMMetadataManager; @@ -479,7 +480,7 @@ private void createIncompleteMPUKey(String volumeName, String bucketName, .setReplicationConfig(RatisReplicationConfig.getInstance( HddsProtos.ReplicationFactor.ONE)) .setLocationInfoList(Collections.emptyList()) - .addMetadata("ETag", DigestUtils.md5Hex(UUID.randomUUID() + .addMetadata(OzoneConsts.ETAG, DigestUtils.md5Hex(UUID.randomUUID() .toString())) .build(); diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/commontypes/KeyMetadata.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/commontypes/KeyMetadata.java index 47b59cfcc0e8..8ae48ca4f83e 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/commontypes/KeyMetadata.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/commontypes/KeyMetadata.java @@ -21,6 +21,8 @@ import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlElement; import javax.xml.bind.annotation.adapters.XmlJavaTypeAdapter; +import org.apache.hadoop.ozone.OzoneConsts; + import java.time.Instant; /** @@ -37,7 +39,7 @@ public class KeyMetadata { @XmlElement(name = "LastModified") private Instant lastModified; - @XmlElement(name = "ETag") + @XmlElement(name = OzoneConsts.ETAG) private String eTag; @XmlElement(name = "Size") diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CompleteMultipartUploadRequest.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CompleteMultipartUploadRequest.java index 56c3cb2c0a9e..af5eafc9f438 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CompleteMultipartUploadRequest.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CompleteMultipartUploadRequest.java @@ -23,6 +23,8 @@ import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlElement; import javax.xml.bind.annotation.XmlRootElement; +import org.apache.hadoop.ozone.OzoneConsts; + import java.util.ArrayList; import java.util.List; @@ -55,7 +57,7 @@ public static class Part { @XmlElement(name = "PartNumber") private int partNumber; - @XmlElement(name = "ETag") + @XmlElement(name = OzoneConsts.ETAG) private String eTag; public int getPartNumber() { diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CompleteMultipartUploadResponse.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CompleteMultipartUploadResponse.java index c636f36b175b..2aa30d6b839b 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CompleteMultipartUploadResponse.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CompleteMultipartUploadResponse.java @@ -22,6 +22,7 @@ import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlElement; import javax.xml.bind.annotation.XmlRootElement; +import org.apache.hadoop.ozone.OzoneConsts; /** * Complete Multipart Upload request response. @@ -41,7 +42,7 @@ public class CompleteMultipartUploadResponse { @XmlElement(name = "Key") private String key; - @XmlElement(name = "ETag") + @XmlElement(name = OzoneConsts.ETAG) private String eTag; public String getLocation() { diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CopyObjectResponse.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CopyObjectResponse.java index 6e114c2e0c64..d1136fe9ed78 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CopyObjectResponse.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CopyObjectResponse.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.s3.endpoint; +import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.s3.commontypes.IsoDateAdapter; import javax.xml.bind.annotation.XmlAccessType; @@ -39,7 +40,7 @@ public class CopyObjectResponse { @XmlElement(name = "LastModified") private Instant lastModified; - @XmlElement(name = "ETag") + @XmlElement(name = OzoneConsts.ETAG) private String eTag; diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CopyPartResult.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CopyPartResult.java index c4e65aa38ff7..ab30c1f0e7c9 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CopyPartResult.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CopyPartResult.java @@ -25,6 +25,7 @@ import javax.xml.bind.annotation.adapters.XmlJavaTypeAdapter; import java.time.Instant; +import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.s3.commontypes.IsoDateAdapter; /** @@ -39,7 +40,7 @@ public class CopyPartResult { @XmlElement(name = "LastModified") private Instant lastModified; - @XmlElement(name = "ETag") + @XmlElement(name = OzoneConsts.ETAG) private String eTag; public CopyPartResult() { diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java index 5694d6f9f41b..5810c4ec2a2f 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java @@ -65,6 +65,7 @@ import org.slf4j.LoggerFactory; import static java.nio.charset.StandardCharsets.UTF_8; +import static org.apache.hadoop.ozone.OzoneConsts.ETAG; import static org.apache.hadoop.ozone.OzoneConsts.KB; import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.newError; import static org.apache.hadoop.ozone.s3.util.S3Consts.CUSTOM_METADATA_HEADER_PREFIX; @@ -74,8 +75,6 @@ */ public abstract class EndpointBase implements Auditor { - protected static final String ETAG = "ETag"; - protected static final String ETAG_CUSTOM = "etag-custom"; @Inject diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ListPartsResponse.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ListPartsResponse.java index fc9da14133c8..8f3fad735441 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ListPartsResponse.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ListPartsResponse.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.s3.endpoint; +import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.s3.commontypes.IsoDateAdapter; import javax.xml.bind.annotation.XmlAccessType; @@ -154,7 +155,7 @@ public static class Part { @XmlElement(name = "LastModified") private Instant lastModified; - @XmlElement(name = "ETag") + @XmlElement(name = OzoneConsts.ETAG) private String eTag; diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java index 1f72b2b13ecd..4a36ad9e62a8 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java @@ -101,6 +101,7 @@ import java.util.OptionalLong; import static javax.ws.rs.core.HttpHeaders.CONTENT_LENGTH; +import static javax.ws.rs.core.HttpHeaders.ETAG; import static javax.ws.rs.core.HttpHeaders.LAST_MODIFIED; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType.EC; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_DEFAULT; @@ -150,7 +151,7 @@ public class ObjectEndpoint extends EndpointBase { static { E_TAG_PROVIDER = ThreadLocal.withInitial(() -> { try { - return MessageDigest.getInstance("Md5"); + return MessageDigest.getInstance(OzoneConsts.MD5_HASH); } catch (NoSuchAlgorithmException e) { throw new RuntimeException(e); } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpointStreaming.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpointStreaming.java index 895923c64e69..bbb743ee3597 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpointStreaming.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpointStreaming.java @@ -21,6 +21,7 @@ import javax.xml.bind.DatatypeConverter; import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.client.ReplicationConfig; +import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.io.KeyMetadataAware; import org.apache.hadoop.ozone.client.io.OzoneDataStreamOutput; @@ -108,7 +109,7 @@ public static Pair putKeyWithStream( eTag = DatatypeConverter.printHexBinary(body.getMessageDigest().digest()) .toLowerCase(); perf.appendMetaLatencyNanos(metadataLatencyNs); - ((KeyMetadataAware)streamOutput).getMetadata().put("ETag", eTag); + ((KeyMetadataAware)streamOutput).getMetadata().put(OzoneConsts.ETAG, eTag); } return Pair.of(eTag, writeLen); } @@ -167,7 +168,7 @@ public static Response createMultipartKey(OzoneBucket ozoneBucket, String key, writeToStreamOutput(streamOutput, body, chunkSize, length); eTag = DatatypeConverter.printHexBinary( body.getMessageDigest().digest()).toLowerCase(); - ((KeyMetadataAware)streamOutput).getMetadata().put("ETag", eTag); + ((KeyMetadataAware)streamOutput).getMetadata().put(OzoneConsts.ETAG, eTag); METRICS.incPutKeySuccessLength(putLength); perf.appendMetaLatencyNanos(metadataLatencyNs); perf.appendSizeBytes(putLength); @@ -183,6 +184,6 @@ public static Response createMultipartKey(OzoneBucket ozoneBucket, String key, } throw ex; } - return Response.ok().header("ETag", eTag).build(); + return Response.ok().header(OzoneConsts.ETAG, eTag).build(); } } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java index ca2bf193e8e4..70b88fb6a749 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java @@ -57,6 +57,8 @@ import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo; import org.apache.hadoop.util.Time; +import static org.apache.hadoop.ozone.OzoneConsts.ETAG; +import static org.apache.hadoop.ozone.OzoneConsts.MD5_HASH; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; /** @@ -272,7 +274,7 @@ public void close() throws IOException { buffer.get(bytes); Part part = new Part(key + size, bytes, - getMetadata().get("ETag")); + getMetadata().get(ETAG)); if (partList.get(key) == null) { Map parts = new TreeMap<>(); parts.put(partNumber, part); @@ -430,7 +432,7 @@ public OzoneOutputStream createMultipartKey(String key, long size, @Override public void close() throws IOException { Part part = new Part(key + size, - toByteArray(), getMetadata().get("ETag")); + toByteArray(), getMetadata().get(ETAG)); if (partList.get(key) == null) { Map parts = new TreeMap<>(); parts.put(partNumber, part); @@ -513,7 +515,7 @@ public OzoneMultipartUploadPartListParts listParts(String key, boolean truncated = false; MessageDigest eTagProvider; try { - eTagProvider = MessageDigest.getInstance("Md5"); + eTagProvider = MessageDigest.getInstance(MD5_HASH); } catch (NoSuchAlgorithmException e) { throw new RuntimeException(e); } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneDataStreamOutputStub.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneDataStreamOutputStub.java index ff688a8e8ae3..b472320b7fe7 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneDataStreamOutputStub.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneDataStreamOutputStub.java @@ -21,6 +21,7 @@ package org.apache.hadoop.ozone.client; import org.apache.hadoop.hdds.scm.storage.ByteBufferStreamOutput; +import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.client.io.OzoneDataStreamOutput; import org.apache.hadoop.ozone.om.helpers.OmMultipartCommitUploadPartInfo; @@ -66,6 +67,6 @@ public synchronized void close() throws IOException { @Override public OmMultipartCommitUploadPartInfo getCommitUploadPartInfo() { return closed ? new OmMultipartCommitUploadPartInfo(partName, - getMetadata().get("ETag")) : null; + getMetadata().get(OzoneConsts.ETAG)) : null; } } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneOutputStreamStub.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneOutputStreamStub.java index 8decad561583..da2fb26ec8f5 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneOutputStreamStub.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneOutputStreamStub.java @@ -22,6 +22,7 @@ import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.client.io.KeyMetadataAware; import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.hdds.scm.StreamBufferArgs; @@ -95,7 +96,7 @@ public KeyOutputStream getKeyOutputStream() { @Override public OmMultipartCommitUploadPartInfo getCommitUploadPartInfo() { return closed ? new OmMultipartCommitUploadPartInfo(partName, - ((KeyMetadataAware)getOutputStream()).getMetadata().get("ETag")) : null; + ((KeyMetadataAware)getOutputStream()).getMetadata().get(OzoneConsts.ETAG)) : null; } } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestListParts.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestListParts.java index 3e8beb2c3a1e..677367e6d812 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestListParts.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestListParts.java @@ -79,17 +79,17 @@ public static void setUp() throws Exception { response = REST.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, content.length(), 1, uploadID, body); - assertNotNull(response.getHeaderString("ETag")); + assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); response = REST.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, content.length(), 2, uploadID, body); - assertNotNull(response.getHeaderString("ETag")); + assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); response = REST.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, content.length(), 3, uploadID, body); - assertNotNull(response.getHeaderString("ETag")); + assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); } @Test diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java index 3bb5222465e0..f754a0862b8d 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java @@ -93,9 +93,9 @@ private Part uploadPart(String key, String uploadID, int partNumber, String Response response = REST.put(OzoneConsts.S3_BUCKET, key, content.length(), partNumber, uploadID, body); assertEquals(200, response.getStatus()); - assertNotNull(response.getHeaderString("ETag")); + assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); Part part = new Part(); - part.setETag(response.getHeaderString("ETag")); + part.setETag(response.getHeaderString(OzoneConsts.ETAG)); part.setPartNumber(partNumber); return part; diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadWithCopy.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadWithCopy.java index a500ed8751e9..d9595aeff796 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadWithCopy.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadWithCopy.java @@ -94,7 +94,7 @@ public static void setUp() throws Exception { ReplicationConfig.fromTypeAndFactor(ReplicationType.RATIS, ReplicationFactor.THREE), new HashMap() {{ - put("ETag", DigestUtils.md5Hex(EXISTING_KEY_CONTENT)); + put(OzoneConsts.ETAG, DigestUtils.md5Hex(EXISTING_KEY_CONTENT)); }} )) { stream.write(keyContent); @@ -332,9 +332,9 @@ private Part uploadPart(String key, String uploadID, int partNumber, String Response response = REST.put(OzoneConsts.S3_BUCKET, key, content.length(), partNumber, uploadID, body); assertEquals(200, response.getStatus()); - assertNotNull(response.getHeaderString("ETag")); + assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); Part part = new Part(); - part.setETag(response.getHeaderString("ETag")); + part.setETag(response.getHeaderString(OzoneConsts.ETAG)); part.setPartNumber(partNumber); return part; diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java index c79b085fd1ad..5a9906590977 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java @@ -90,7 +90,7 @@ public void testPartUpload() throws Exception { response = REST.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, content.length(), 1, uploadID, body); - assertNotNull(response.getHeaderString("ETag")); + assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); } @@ -112,16 +112,16 @@ public void testPartUploadWithOverride() throws Exception { response = REST.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, content.length(), 1, uploadID, body); - assertNotNull(response.getHeaderString("ETag")); + assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); - String eTag = response.getHeaderString("ETag"); + String eTag = response.getHeaderString(OzoneConsts.ETAG); // Upload part again with same part Number, the ETag should be changed. content = "Multipart Upload Changed"; response = REST.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, content.length(), 1, uploadID, body); - assertNotNull(response.getHeaderString("ETag")); - assertNotEquals(eTag, response.getHeaderString("ETag")); + assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); + assertNotEquals(eTag, response.getHeaderString(OzoneConsts.ETAG)); } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUploadWithStream.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUploadWithStream.java index 210b5ffb4880..0f13a3fe2a5b 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUploadWithStream.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUploadWithStream.java @@ -22,6 +22,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.OzoneConfigKeys; +import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientStub; import org.apache.hadoop.ozone.s3.exception.OS3Exception; @@ -95,7 +96,7 @@ public void testPartUpload() throws Exception { response = REST.put(S3BUCKET, S3KEY, content.length(), 1, uploadID, body); - assertNotNull(response.getHeaderString("ETag")); + assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); } @@ -116,16 +117,16 @@ public void testPartUploadWithOverride() throws Exception { response = REST.put(S3BUCKET, S3KEY, content.length(), 1, uploadID, body); - assertNotNull(response.getHeaderString("ETag")); + assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); - String eTag = response.getHeaderString("ETag"); + String eTag = response.getHeaderString(OzoneConsts.ETAG); // Upload part again with same part Number, the ETag should be changed. content = "Multipart Upload Changed"; response = REST.put(S3BUCKET, S3KEY, content.length(), 1, uploadID, body); - assertNotNull(response.getHeaderString("ETag")); - assertNotEquals(eTag, response.getHeaderString("ETag")); + assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); + assertNotEquals(eTag, response.getHeaderString(OzoneConsts.ETAG)); } From 2790d2a8bd690afc33c689714367695c6eb493b7 Mon Sep 17 00:00:00 2001 From: Slava Tutrinov Date: Sun, 28 Jan 2024 19:30:52 +0300 Subject: [PATCH 07/11] HDDS-9680. fix checkstyle issues --- .../org/apache/hadoop/ozone/OzoneConsts.java | 7 +--- .../ozone/client/protocol/ClientProtocol.java | 1 - .../OmMultipartUploadCompleteList.java | 1 - .../S3MultipartUploadCompleteRequest.java | 39 +++++++++---------- .../ozone/client/ClientProtocolStub.java | 1 - .../hadoop/ozone/client/OzoneBucketStub.java | 1 - 6 files changed, 20 insertions(+), 30 deletions(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java index c026d2789803..70566767eaba 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java @@ -600,12 +600,7 @@ private OzoneConsts() { "compactionLogTable"; /** - * Algo to compute message digest (s3g multipart upload request's part ETag) - */ - public static final String MD5 = "Md5"; - - /** - * S3G multipart upload request's ETag header key + * S3G multipart upload request's ETag header key. */ public static final String ETAG = "ETag"; } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java index 8eb354fbce13..5316f7a99e9f 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java @@ -24,7 +24,6 @@ import java.util.Map; import javax.annotation.Nonnull; -import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.crypto.key.KeyProvider; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationFactor; diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUploadCompleteList.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUploadCompleteList.java index d3460c9856ba..ff39661d01b5 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUploadCompleteList.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUploadCompleteList.java @@ -18,7 +18,6 @@ package org.apache.hadoop.ozone.om.helpers; -import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Part; import java.util.ArrayList; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java index c9778c1b835a..ac55258c5869 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java @@ -28,7 +28,6 @@ import java.util.List; import java.util.Map; import java.util.function.BiFunction; -import java.util.function.Function; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.ratis.server.protocol.TermIndex; @@ -84,26 +83,26 @@ public class S3MultipartUploadCompleteRequest extends OMKeyRequest { private BiFunction eTagBasedValidator = (part, partKeyInfo) -> { - String eTag = part.getETag(); - String dbPartETag = null; - String dbPartName = null; - if (partKeyInfo != null) { - dbPartETag = partKeyInfo.getPartKeyInfo().getMetadata(0).getValue(); - dbPartName = partKeyInfo.getPartName(); - } - return new MultipartCommitRequestPart(eTag, partKeyInfo == null ? null : - dbPartETag, StringUtils.equals(eTag, dbPartETag) || StringUtils.equals(eTag, dbPartName)); - }; + String eTag = part.getETag(); + String dbPartETag = null; + String dbPartName = null; + if (partKeyInfo != null) { + dbPartETag = partKeyInfo.getPartKeyInfo().getMetadata(0).getValue(); + dbPartName = partKeyInfo.getPartName(); + } + return new MultipartCommitRequestPart(eTag, partKeyInfo == null ? null : + dbPartETag, StringUtils.equals(eTag, dbPartETag) || StringUtils.equals(eTag, dbPartName)); + }; private BiFunction partNameBasedValidator = (part, partKeyInfo) -> { - String partName = part.getPartName(); - String dbPartName = null; - if (partKeyInfo != null) { - dbPartName = partKeyInfo.getPartName(); - } - return new MultipartCommitRequestPart(partName, partKeyInfo == null ? null : - dbPartName, StringUtils.equals(partName, dbPartName)); - }; + String partName = part.getPartName(); + String dbPartName = null; + if (partKeyInfo != null) { + dbPartName = partKeyInfo.getPartName(); + } + return new MultipartCommitRequestPart(partName, partKeyInfo == null ? null : + dbPartName, StringUtils.equals(partName, dbPartName)); + }; public S3MultipartUploadCompleteRequest(OMRequest omRequest, BucketLayout bucketLayout) { @@ -675,7 +674,7 @@ private static class MultipartCommitRequestPart { private boolean isValid; - public MultipartCommitRequestPart(String requestPartId, String omPartId, boolean isValid) { + MultipartCommitRequestPart(String requestPartId, String omPartId, boolean isValid) { this.requestPartId = requestPartId; this.omPartId = omPartId; this.isValid = isValid; diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java index 5484debb6b12..174af69e255d 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java @@ -20,7 +20,6 @@ package org.apache.hadoop.ozone.client; import javax.annotation.Nonnull; -import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.crypto.key.KeyProvider; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationFactor; diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java index 70b88fb6a749..39ae9cc4af17 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java @@ -36,7 +36,6 @@ import javax.xml.bind.DatatypeConverter; import org.apache.commons.codec.digest.DigestUtils; -import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.client.DefaultReplicationConfig; import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; From 1b47c00b4554d58576a2bdbca7996d7686bfc756 Mon Sep 17 00:00:00 2001 From: Slava Tutrinov Date: Sun, 28 Jan 2024 19:47:49 +0300 Subject: [PATCH 08/11] HDDS-9680. revert ozone client interface proto.lock changes --- .../interface-client/src/main/resources/proto.lock | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/hadoop-ozone/interface-client/src/main/resources/proto.lock b/hadoop-ozone/interface-client/src/main/resources/proto.lock index e6964dbf38fb..ffe53f04cbfd 100644 --- a/hadoop-ozone/interface-client/src/main/resources/proto.lock +++ b/hadoop-ozone/interface-client/src/main/resources/proto.lock @@ -4852,12 +4852,6 @@ "name": "partName", "type": "string", "optional": true - }, - { - "id": 2, - "name": "eTag", - "type": "string", - "optional": true } ] }, @@ -4921,12 +4915,6 @@ "name": "partName", "type": "string", "required": true - }, - { - "id": 3, - "name": "eTag", - "type": "string", - "optional": true } ] }, From fd71ce1449f82b6a471a68a7e9496a205ae28e5f Mon Sep 17 00:00:00 2001 From: Slava Tutrinov Date: Mon, 29 Jan 2024 09:26:49 +0300 Subject: [PATCH 09/11] HDDS-9680. fix unit tests issues --- .../s3/multipart/TestS3MultipartUploadCompleteRequest.java | 6 +++--- .../om/response/s3/multipart/TestS3MultipartResponse.java | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java index abdf63574260..86992976af9c 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java @@ -140,7 +140,7 @@ private String checkValidateAndUpdateCacheSuccess(String volumeName, .stream() .filter(keyValue -> keyValue.getKey().equals(OzoneConsts.ETAG)) .findFirst().get().getValue(); - partList.add(Part.newBuilder().setETag(eTag).setPartNumber(1) + partList.add(Part.newBuilder().setETag(eTag).setPartName(eTag).setPartNumber(1) .build()); OMRequest completeMultipartRequest = doPreExecuteCompleteMPU(volumeName, @@ -228,10 +228,10 @@ public void testInvalidPartOrderError() throws Exception { String partName = getPartName(volumeName, bucketName, keyName, multipartUploadID, 23); - partList.add(Part.newBuilder().setETag(partName).setPartNumber(23).build()); + partList.add(Part.newBuilder().setETag(partName).setPartName(partName).setPartNumber(23).build()); partName = getPartName(volumeName, bucketName, keyName, multipartUploadID, 1); - partList.add(Part.newBuilder().setETag(partName).setPartNumber(1).build()); + partList.add(Part.newBuilder().setETag(partName).setPartName(partName).setPartNumber(1).build()); OMRequest completeMultipartRequest = doPreExecuteCompleteMPU(volumeName, bucketName, keyName, multipartUploadID, partList); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java index 36e51a4d62c8..51963a00a1cb 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java @@ -287,7 +287,7 @@ public S3MultipartUploadCommitPartResponse createS3CommitMPUResponseFSO( .setStatus(status).setSuccess(true) .setCommitMultiPartUploadResponse( OzoneManagerProtocolProtos.MultipartCommitUploadPartResponse - .newBuilder().setETag(volumeName)).build(); + .newBuilder().setETag(volumeName).setPartName(volumeName)).build(); return new S3MultipartUploadCommitPartResponseWithFSO(omResponse, multipartKey, openKey, multipartKeyInfo, oldPartKeyInfo, From 72ecefe4fa5b05e1636ea9def979bbc3496a4764 Mon Sep 17 00:00:00 2001 From: Slava Tutrinov Date: Mon, 29 Jan 2024 10:22:09 +0300 Subject: [PATCH 10/11] HDDS-9680. makepartName of MultipartCommitUploadPartResponse as optional field (revert change) --- .../interface-client/src/main/proto/OmClientProtocol.proto | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto index b5d7b1872e84..0fa8082e35b6 100644 --- a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto +++ b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto @@ -1583,7 +1583,7 @@ message MultipartCommitUploadPartRequest { } message MultipartCommitUploadPartResponse { - required string partName = 1; + optional string partName = 1; // This one is returned as Etag for S3. optional string eTag = 2; } From 255d7aba1097d4a377bd9dcb21a2c526832efffe Mon Sep 17 00:00:00 2001 From: Slava Tutrinov Date: Mon, 29 Jan 2024 20:19:17 +0300 Subject: [PATCH 11/11] HDDS-9680. improve an eTag property reading from the key metadata, fix MPU commit request computing of eTag hash for old clients --- .../S3MultipartUploadCommitPartRequest.java | 11 +++++++---- .../S3MultipartUploadCompleteRequest.java | 18 +++++++++++++----- 2 files changed, 20 insertions(+), 9 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java index 682749eae444..a3e7840ccce5 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java @@ -238,10 +238,13 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn correctedSpace); omBucketInfo.incrUsedBytes(correctedSpace); - omResponse.setCommitMultiPartUploadResponse( - MultipartCommitUploadPartResponse.newBuilder() - .setPartName(partName) - .setETag(omKeyInfo.getMetadata().get(OzoneConsts.ETAG))); + MultipartCommitUploadPartResponse.Builder commitResponseBuilder = MultipartCommitUploadPartResponse.newBuilder() + .setPartName(partName); + String eTag = omKeyInfo.getMetadata().get(OzoneConsts.ETAG); + if (eTag != null) { + commitResponseBuilder.setETag(eTag); + } + omResponse.setCommitMultiPartUploadResponse(commitResponseBuilder); omClientResponse = getOmClientResponse(ozoneManager, oldPartKeyInfo, openKey, omKeyInfo, multipartKey, multipartKeyInfo, omResponse.build(), diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java index ac55258c5869..2545e877bf5b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java @@ -27,6 +27,7 @@ import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.concurrent.atomic.AtomicReference; import java.util.function.BiFunction; import org.apache.hadoop.hdds.client.ReplicationConfig; @@ -84,14 +85,17 @@ public class S3MultipartUploadCompleteRequest extends OMKeyRequest { private BiFunction eTagBasedValidator = (part, partKeyInfo) -> { String eTag = part.getETag(); - String dbPartETag = null; + AtomicReference dbPartETag = new AtomicReference<>(); String dbPartName = null; if (partKeyInfo != null) { - dbPartETag = partKeyInfo.getPartKeyInfo().getMetadata(0).getValue(); + partKeyInfo.getPartKeyInfo().getMetadataList() + .stream() + .filter(keyValue -> keyValue.getKey().equals(OzoneConsts.ETAG)) + .findFirst().ifPresent(kv -> dbPartETag.set(kv.getValue())); dbPartName = partKeyInfo.getPartName(); } return new MultipartCommitRequestPart(eTag, partKeyInfo == null ? null : - dbPartETag, StringUtils.equals(eTag, dbPartETag) || StringUtils.equals(eTag, dbPartName)); + dbPartETag.get(), StringUtils.equals(eTag, dbPartETag.get()) || StringUtils.equals(eTag, dbPartName)); }; private BiFunction partNameBasedValidator = (part, partKeyInfo) -> { @@ -660,8 +664,12 @@ private String multipartUploadedKeyHash( OmMultipartKeyInfo.PartKeyInfoMap partsList) { StringBuffer keysConcatenated = new StringBuffer(); for (PartKeyInfo partKeyInfo: partsList) { - keysConcatenated.append(KeyValueUtil.getFromProtobuf(partKeyInfo - .getPartKeyInfo().getMetadataList()).get(OzoneConsts.ETAG)); + String partPropertyToComputeHash = KeyValueUtil.getFromProtobuf(partKeyInfo.getPartKeyInfo().getMetadataList()) + .get(OzoneConsts.ETAG); + if (partPropertyToComputeHash == null) { + partPropertyToComputeHash = partKeyInfo.getPartName(); + } + keysConcatenated.append(partPropertyToComputeHash); } return DigestUtils.md5Hex(keysConcatenated.toString()) + "-" + partsList.size();