diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java index 112c76f8c0a..021a36e94a6 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java @@ -679,7 +679,16 @@ public OmMultipartInfo initiateMultipartUpload(String keyName, public OmMultipartInfo initiateMultipartUpload(String keyName, ReplicationConfig config) throws IOException { - return proxy.initiateMultipartUpload(volumeName, name, keyName, config); + return initiateMultipartUpload(keyName, config, Collections.emptyMap()); + } + + /** + * Initiate multipart upload for a specified key. + */ + public OmMultipartInfo initiateMultipartUpload(String keyName, + ReplicationConfig config, Map metadata) + throws IOException { + return proxy.initiateMultipartUpload(volumeName, name, keyName, config, metadata); } /** diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java index 492cd31b672..912a3138c47 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java @@ -327,7 +327,7 @@ List listBuckets(String volumeName, String bucketPrefix, * @param bucketName Name of the Bucket * @param keyName Name of the Key * @param size Size of the data - * @param metadata custom key value metadata + * @param metadata Custom key value metadata * @return {@link OzoneOutputStream} * */ @@ -509,10 +509,10 @@ OmMultipartInfo initiateMultipartUpload(String volumeName, String /** * Initiate Multipart upload. - * @param volumeName - * @param bucketName - * @param keyName - * @param replicationConfig + * @param volumeName Name of the Volume + * @param bucketName Name of the Bucket + * @param keyName Name of the Key + * @param replicationConfig Replication Config * @return {@link OmMultipartInfo} * @throws IOException */ @@ -520,6 +520,21 @@ OmMultipartInfo initiateMultipartUpload(String volumeName, String bucketName, String keyName, ReplicationConfig replicationConfig) throws IOException; + /** + * Initiate Multipart upload. + * @param volumeName Name of the Volume + * @param bucketName Name of the Bucket + * @param keyName Name of the Key + * @param replicationConfig Replication config + * @param metadata Custom key value metadata + * @return {@link OmMultipartInfo} + * @throws IOException + */ + OmMultipartInfo initiateMultipartUpload(String volumeName, String + bucketName, String keyName, ReplicationConfig replicationConfig, + Map metadata) + throws IOException; + /** * Create a part key for a multipart upload key. * @param volumeName diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java index c990d257352..ae5c67eced6 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java @@ -1809,6 +1809,17 @@ public OmMultipartInfo initiateMultipartUpload(String volumeName, String keyName, ReplicationConfig replicationConfig) throws IOException { + return initiateMultipartUpload(volumeName, bucketName, keyName, replicationConfig, + Collections.emptyMap()); + } + + @Override + public OmMultipartInfo initiateMultipartUpload(String volumeName, + String bucketName, + String keyName, + ReplicationConfig replicationConfig, + Map metadata) + throws IOException { verifyVolumeName(volumeName); verifyBucketName(bucketName); HddsClientUtils.checkNotNull(keyName); @@ -1827,6 +1838,7 @@ public OmMultipartInfo initiateMultipartUpload(String volumeName, .setKeyName(keyName) .setReplicationConfig(replicationConfig) .setAcls(getAclList()) + .addAllMetadataGdpr(metadata) .build(); OmMultipartInfo multipartInfo = ozoneManagerClient .initiateMultipartUpload(keyArgs); diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java index 08fa029833e..24e2fc9d86c 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java @@ -1605,6 +1605,7 @@ public OmMultipartInfo initiateMultipartUpload(OmKeyArgs omKeyArgs) throws .setVolumeName(omKeyArgs.getVolumeName()) .setBucketName(omKeyArgs.getBucketName()) .setKeyName(omKeyArgs.getKeyName()) + .addAllMetadata(KeyValueUtil.toProtobuf(omKeyArgs.getMetadata())) .addAllAcls(omKeyArgs.getAcls().stream().map(a -> OzoneAcl.toProtobuf(a)).collect(Collectors.toList())); diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot b/hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot index 3a6ae0e45d4..96feec2f813 100644 --- a/hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot +++ b/hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot @@ -87,7 +87,7 @@ Test Multipart Upload Test Multipart Upload Complete - ${result} = Execute AWSS3APICli create-multipart-upload --bucket ${BUCKET} --key ${PREFIX}/multipartKey1 + ${result} = Execute AWSS3APICli create-multipart-upload --bucket ${BUCKET} --key ${PREFIX}/multipartKey1 --metadata="custom-key1=custom-value1,custom-key2=custom-value2,gdprEnabled=true" ${uploadID} = Execute and checkrc echo '${result}' | jq -r '.UploadId' 0 Should contain ${result} ${BUCKET} Should contain ${result} ${PREFIX}/multipartKey @@ -117,6 +117,16 @@ Test Multipart Upload Complete Should contain ${result} ETag Should Be Equal As Strings ${resultETag} "${expectedResultETag}-2" +#check whether the user defined metadata can be retrieved + ${result} = Execute AWSS3ApiCli head-object --bucket ${BUCKET} --key ${PREFIX}/multipartKey1 + Should contain ${result} \"custom-key1\": \"custom-value1\" + Should contain ${result} \"custom-key2\": \"custom-value2\" + + ${result} = Execute ozone sh key info /s3v/${BUCKET}/${PREFIX}/multipartKey1 + Should contain ${result} \"custom-key1\" : \"custom-value1\" + Should contain ${result} \"custom-key2\" : \"custom-value2\" + Should not contain ${result} \"gdprEnabled\": \"true\" + #read file and check the key ${result} = Execute AWSS3ApiCli get-object --bucket ${BUCKET} --key ${PREFIX}/multipartKey1 /tmp/${PREFIX}-multipartKey1.result Execute cat /tmp/part1 /tmp/part2 > /tmp/${PREFIX}-multipartKey1 @@ -128,6 +138,12 @@ Test Multipart Upload Complete ${result} = Execute AWSS3ApiCli get-object --bucket ${BUCKET} --key ${PREFIX}/multipartKey1 --part-number 2 /tmp/${PREFIX}-multipartKey1-part2.result Compare files /tmp/part2 /tmp/${PREFIX}-multipartKey1-part2.result +Test Multipart Upload with user defined metadata size larger than 2 KB + ${custom_metadata_value} = Execute printf 'v%.0s' {1..3000} + ${result} = Execute AWSS3APICli and checkrc create-multipart-upload --bucket ${BUCKET} --key ${PREFIX}/mpuWithLargeMetadata --metadata="custom-key1=${custom_metadata_value}" 255 + Should contain ${result} MetadataTooLarge + Should not contain ${result} custom-key1: ${custom_metadata_value} + Test Multipart Upload Complete Entity too small ${result} = Execute AWSS3APICli create-multipart-upload --bucket ${BUCKET} --key ${PREFIX}/multipartKey2 ${uploadID} = Execute and checkrc echo '${result}' | jq -r '.UploadId' 0 diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/objectputget.robot b/hadoop-ozone/dist/src/main/smoketest/s3/objectputget.robot index 05348fbcba4..bbff89e71f8 100644 --- a/hadoop-ozone/dist/src/main/smoketest/s3/objectputget.robot +++ b/hadoop-ozone/dist/src/main/smoketest/s3/objectputget.robot @@ -185,8 +185,9 @@ Create file with user defined metadata with gdpr enabled value in request Create file with user defined metadata size larger than 2 KB Execute echo "Randomtext" > /tmp/testfile2 ${custom_metadata_value} = Execute printf 'v%.0s' {1..3000} - ${result} = Execute AWSS3APICli and ignore error put-object --bucket ${BUCKET} --key ${PREFIX}/putobject/custom-metadata/key2 --body /tmp/testfile2 --metadata="custom-key1=${custom_metadata_value}" - Should not contain ${result} custom-key1: ${custom_metadata_value} + ${result} = Execute AWSS3APICli and checkrc put-object --bucket ${BUCKET} --key ${PREFIX}/putobject/custom-metadata/key2 --body /tmp/testfile2 --metadata="custom-key1=${custom_metadata_value}" 255 + Should contain ${result} MetadataTooLarge + Should not contain ${result} custom-key1: ${custom_metadata_value} Create small file and expect ETag (MD5) in a reponse header Execute head -c 1MB /tmp/small_file diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java index a77edd3abc5..632076e2eef 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java @@ -25,6 +25,7 @@ import java.time.Instant; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.Iterator; import java.util.LinkedHashMap; @@ -2960,6 +2961,26 @@ public void testMultipartPartNumberExceedingAllowedRange() throws Exception { keyName, sampleData.length(), 10001, uploadID)); } + @ParameterizedTest + @MethodSource("replicationConfigs") + public void testMultipartUploadWithCustomMetadata(ReplicationConfig replication) throws Exception { + String volumeName = UUID.randomUUID().toString(); + String bucketName = UUID.randomUUID().toString(); + String keyName = UUID.randomUUID().toString(); + + store.createVolume(volumeName); + OzoneVolume volume = store.getVolume(volumeName); + volume.createBucket(bucketName); + OzoneBucket bucket = volume.getBucket(bucketName); + + // Create custom metadata + Map customMetadata = new HashMap<>(); + customMetadata.put("custom-key1", "custom-value1"); + customMetadata.put("custom-key2", "custom-value2"); + + doMultipartUpload(bucket, keyName, (byte) 98, replication, customMetadata); + } + @Test public void testAbortUploadFail() throws Exception { String volumeName = UUID.randomUUID().toString(); @@ -3593,8 +3614,14 @@ private byte[] generateData(int size, byte val) { private void doMultipartUpload(OzoneBucket bucket, String keyName, byte val, ReplicationConfig replication) throws Exception { + doMultipartUpload(bucket, keyName, val, replication, Collections.emptyMap()); + } + + private void doMultipartUpload(OzoneBucket bucket, String keyName, byte val, + ReplicationConfig replication, Map customMetadata) + throws Exception { // Initiate Multipart upload request - String uploadID = initiateMultipartUpload(bucket, keyName, replication); + String uploadID = initiateMultipartUpload(bucket, keyName, replication, customMetadata); // Upload parts Map partsMap = new TreeMap<>(); @@ -3661,12 +3688,23 @@ private void doMultipartUpload(OzoneBucket bucket, String keyName, byte val, latestVersionLocations.getBlocksLatestVersionOnly() .forEach(omKeyLocationInfo -> assertNotEquals(-1, omKeyLocationInfo.getPartNumber())); + + Map keyMetadata = omKeyInfo.getMetadata(); + assertNotNull(keyMetadata.get(ETAG)); + if (customMetadata != null && !customMetadata.isEmpty()) { + assertThat(keyMetadata).containsAllEntriesOf(customMetadata); + } } private String initiateMultipartUpload(OzoneBucket bucket, String keyName, ReplicationConfig replicationConfig) throws Exception { + return initiateMultipartUpload(bucket, keyName, replicationConfig, Collections.emptyMap()); + } + + private String initiateMultipartUpload(OzoneBucket bucket, String keyName, + ReplicationConfig replicationConfig, Map customMetadata) throws Exception { OmMultipartInfo multipartInfo = bucket.initiateMultipartUpload(keyName, - replicationConfig); + replicationConfig, customMetadata); String uploadID = multipartInfo.getUploadID(); assertNotNull(uploadID); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java index e1772d4009c..914a707deb0 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java @@ -20,6 +20,7 @@ import com.google.common.base.Preconditions; import org.apache.hadoop.hdds.client.ReplicationConfig; +import org.apache.hadoop.ozone.om.helpers.KeyValueUtil; import org.apache.ratis.server.protocol.TermIndex; import org.apache.hadoop.ozone.audit.OMAction; import org.apache.hadoop.ozone.om.OMMetadataManager; @@ -211,6 +212,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn .setUpdateID(transactionLogIndex) .setFileEncryptionInfo(keyArgs.hasFileEncryptionInfo() ? OMPBHelper.convert(keyArgs.getFileEncryptionInfo()) : null) + .addAllMetadata(KeyValueUtil.getFromProtobuf(keyArgs.getMetadataList())) .build(); // Add to cache diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequestWithFSO.java index d1c865fbc7f..f2423736a39 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequestWithFSO.java @@ -20,6 +20,7 @@ import com.google.common.base.Preconditions; import org.apache.hadoop.hdds.client.ReplicationConfig; +import org.apache.hadoop.ozone.om.helpers.KeyValueUtil; import org.apache.ratis.server.protocol.TermIndex; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OzoneConfigUtil; @@ -187,6 +188,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn .setFileEncryptionInfo(keyArgs.hasFileEncryptionInfo() ? OMPBHelper.convert(keyArgs.getFileEncryptionInfo()) : null) .setParentObjectID(pathInfoFSO.getLastKnownParentId()) + .addAllMetadata(KeyValueUtil.getFromProtobuf(keyArgs.getMetadataList())) .build(); // validate and update namespace for missing parent directory diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java index 83b46de7fd1..b161bd7be67 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java @@ -417,6 +417,7 @@ protected OmKeyInfo getOmKeyInfo(OzoneManager ozoneManager, long trxnLogIndex, .setOmKeyLocationInfos( Collections.singletonList(keyLocationInfoGroup)) .setAcls(dbOpenKeyInfo.getAcls()) + .addAllMetadata(dbOpenKeyInfo.getMetadata()) .addMetadata(OzoneConsts.ETAG, multipartUploadedKeyHash(partKeyInfoMap)); // Check if db entry has ObjectID. This check is required because @@ -447,6 +448,9 @@ protected OmKeyInfo getOmKeyInfo(OzoneManager ozoneManager, long trxnLogIndex, omKeyInfo.setModificationTime(keyArgs.getModificationTime()); omKeyInfo.setDataSize(dataSize); omKeyInfo.setReplicationConfig(dbOpenKeyInfo.getReplicationConfig()); + if (dbOpenKeyInfo.getMetadata() != null) { + omKeyInfo.setMetadata(dbOpenKeyInfo.getMetadata()); + } omKeyInfo.getMetadata().put(OzoneConsts.ETAG, multipartUploadedKeyHash(partKeyInfoMap)); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java index 0ebd6946bd2..8103f6616c5 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java @@ -28,6 +28,7 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; +import java.util.Map; import java.util.UUID; import javax.xml.bind.DatatypeConverter; @@ -49,6 +50,7 @@ import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.helpers.KeyValueUtil; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; @@ -991,11 +993,28 @@ public static String deleteKey(String ozoneKey, */ public static OMRequest createInitiateMPURequest(String volumeName, String bucketName, String keyName) { + return createInitiateMPURequest(volumeName, bucketName, keyName, Collections.emptyMap()); + } + + /** + * Create OMRequest which encapsulates InitiateMultipartUpload request. + * @param volumeName + * @param bucketName + * @param keyName + * @param metadata + */ + public static OMRequest createInitiateMPURequest(String volumeName, + String bucketName, String keyName, Map metadata) { MultipartInfoInitiateRequest multipartInfoInitiateRequest = MultipartInfoInitiateRequest.newBuilder().setKeyArgs( - KeyArgs.newBuilder().setVolumeName(volumeName).setKeyName(keyName) - .setBucketName(bucketName)).build(); + KeyArgs.newBuilder() + .setVolumeName(volumeName) + .setKeyName(keyName) + .setBucketName(bucketName) + .addAllMetadata(KeyValueUtil.toProtobuf(metadata)) + ) + .build(); return OMRequest.newBuilder().setClientId(UUID.randomUUID().toString()) .setCmdType(OzoneManagerProtocolProtos.Type.InitiateMultiPartUpload) diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequest.java index a4c512b25aa..01657162310 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequest.java @@ -25,7 +25,9 @@ import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertTrue; import java.util.ArrayList; +import java.util.HashMap; import java.util.List; +import java.util.Map; import java.util.UUID; import java.util.stream.Collectors; @@ -62,8 +64,12 @@ public void testValidateAndUpdateCache() throws Exception { OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager, getBucketLayout()); + Map customMetadata = new HashMap<>(); + customMetadata.put("custom-key1", "custom-value1"); + customMetadata.put("custom-key2", "custom-value2"); + OMRequest modifiedRequest = doPreExecuteInitiateMPU(volumeName, - bucketName, keyName); + bucketName, keyName, customMetadata); S3InitiateMultipartUploadRequest s3InitiateMultipartUploadRequest = getS3InitiateMultipartUploadReq(modifiedRequest); @@ -84,6 +90,10 @@ public void testValidateAndUpdateCache() throws Exception { assertNotNull(openMPUKeyInfo); assertNotNull(openMPUKeyInfo.getLatestVersionLocations()); assertTrue(openMPUKeyInfo.getLatestVersionLocations().isMultipartKey()); + assertNotNull(openMPUKeyInfo.getMetadata()); + assertEquals("custom-value1", openMPUKeyInfo.getMetadata().get("custom-key1")); + assertEquals("custom-value2", openMPUKeyInfo.getMetadata().get("custom-key2")); + assertNotNull(omMetadataManager.getMultipartInfoTable().get(multipartKey)); assertEquals(modifiedRequest.getInitiateMultiPartUploadRequest() diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequestWithFSO.java index cbdea757206..dd8eb00edb9 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequestWithFSO.java @@ -36,7 +36,9 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.HashMap; import java.util.List; +import java.util.Map; import java.util.UUID; import java.util.stream.Collectors; @@ -62,11 +64,15 @@ public void testValidateAndUpdateCache() throws Exception { OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager, getBucketLayout()); + Map customMetadata = new HashMap<>(); + customMetadata.put("custom-key1", "custom-value1"); + customMetadata.put("custom-key2", "custom-value2"); + final long volumeId = omMetadataManager.getVolumeId(volumeName); final long bucketId = omMetadataManager.getBucketId(volumeName, bucketName); OMRequest modifiedRequest = doPreExecuteInitiateMPUWithFSO(volumeName, - bucketName, keyName); + bucketName, keyName, customMetadata); S3InitiateMultipartUploadRequest s3InitiateMultipartUploadReqFSO = getS3InitiateMultipartUploadReq(modifiedRequest); @@ -102,6 +108,9 @@ public void testValidateAndUpdateCache() throws Exception { "FileName mismatches!"); assertEquals(parentID, omKeyInfo.getParentObjectID(), "ParentId mismatches!"); + assertNotNull(omKeyInfo.getMetadata()); + assertEquals("custom-value1", omKeyInfo.getMetadata().get("custom-key1")); + assertEquals("custom-value2", omKeyInfo.getMetadata().get("custom-key2")); OmMultipartKeyInfo omMultipartKeyInfo = omMetadataManager .getMultipartInfoTable().get(multipartFileKey); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java index 16cb9b6821a..1972fee69ba 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java @@ -21,10 +21,13 @@ import java.io.IOException; import java.nio.file.Path; +import java.util.Collections; import java.util.List; +import java.util.Map; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.KeyValueUtil; import org.apache.hadoop.ozone.om.request.OMClientRequest; import org.apache.hadoop.ozone.om.upgrade.OMLayoutVersionManager; import org.apache.hadoop.ozone.security.acl.OzoneNativeAuthorizer; @@ -130,9 +133,24 @@ public void stop() { */ protected OMRequest doPreExecuteInitiateMPU( String volumeName, String bucketName, String keyName) throws Exception { + return doPreExecuteInitiateMPU(volumeName, bucketName, keyName, Collections.emptyMap()); + } + + /** + * Perform preExecute of Initiate Multipart upload request for given + * volume, bucket and key name. + * @param volumeName + * @param bucketName + * @param keyName + * @param metadata + * @return OMRequest - returned from preExecute. + */ + protected OMRequest doPreExecuteInitiateMPU( + String volumeName, String bucketName, String keyName, + Map metadata) throws Exception { OMRequest omRequest = OMRequestTestUtils.createInitiateMPURequest(volumeName, bucketName, - keyName); + keyName, metadata); S3InitiateMultipartUploadRequest s3InitiateMultipartUploadRequest = getS3InitiateMultipartUploadReq(omRequest); @@ -147,6 +165,14 @@ protected OMRequest doPreExecuteInitiateMPU( assertThat(modifiedRequest.getInitiateMultiPartUploadRequest() .getKeyArgs().getModificationTime()).isGreaterThan(0); + if (metadata != null) { + Map modifiedKeyMetadata = KeyValueUtil.getFromProtobuf( + modifiedRequest.getInitiateMultiPartUploadRequest() + .getKeyArgs().getMetadataList()); + + assertThat(modifiedKeyMetadata).containsAllEntriesOf(metadata); + } + return modifiedRequest; } @@ -247,9 +273,24 @@ protected OMRequest doPreExecuteCompleteMPU( */ protected OMRequest doPreExecuteInitiateMPUWithFSO( String volumeName, String bucketName, String keyName) throws Exception { + return doPreExecuteInitiateMPUWithFSO(volumeName, bucketName, keyName, Collections.emptyMap()); + } + + /** + * Perform preExecute of Initiate Multipart upload request for given + * volume, bucket and key name. + * @param volumeName + * @param bucketName + * @param keyName + * @param metadata + * @return OMRequest - returned from preExecute. + */ + protected OMRequest doPreExecuteInitiateMPUWithFSO( + String volumeName, String bucketName, String keyName, + Map metadata) throws Exception { OMRequest omRequest = OMRequestTestUtils.createInitiateMPURequest(volumeName, bucketName, - keyName); + keyName, metadata); S3InitiateMultipartUploadRequestWithFSO s3InitiateMultipartUploadRequestWithFSO = @@ -265,6 +306,13 @@ protected OMRequest doPreExecuteInitiateMPUWithFSO( .getKeyArgs().getMultipartUploadID()); assertThat(modifiedRequest.getInitiateMultiPartUploadRequest() .getKeyArgs().getModificationTime()).isGreaterThan(0); + if (metadata != null) { + Map modifiedKeyMetadata = KeyValueUtil.getFromProtobuf( + modifiedRequest.getInitiateMultiPartUploadRequest() + .getKeyArgs().getMetadataList()); + + assertThat(modifiedKeyMetadata).containsAllEntriesOf(metadata); + } return modifiedRequest; } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java index 34e32b0e182..663f2925cb1 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java @@ -26,7 +26,9 @@ import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.IOException; import java.util.ArrayList; +import java.util.HashMap; import java.util.List; +import java.util.Map; import java.util.UUID; import org.apache.hadoop.hdds.client.RatisReplicationConfig; @@ -72,13 +74,21 @@ public void testValidateAndUpdateCacheSuccess() throws Exception { OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager, getBucketLayout()); + Map customMetadata = new HashMap<>(); + customMetadata.put("custom-key1", "custom-value1"); + customMetadata.put("custom-key2", "custom-value2"); + String uploadId = checkValidateAndUpdateCacheSuccess( - volumeName, bucketName, keyName); + volumeName, bucketName, keyName, customMetadata); checkDeleteTableCount(volumeName, bucketName, keyName, 0, uploadId); + customMetadata.remove("custom-key1"); + customMetadata.remove("custom-key2"); + customMetadata.put("custom-key3", "custom-value3"); + // Do it twice to test overwrite uploadId = checkValidateAndUpdateCacheSuccess(volumeName, bucketName, - keyName); + keyName, customMetadata); // After overwrite, one entry must be in delete table checkDeleteTableCount(volumeName, bucketName, keyName, 1, uploadId); } @@ -106,10 +116,10 @@ public void checkDeleteTableCount(String volumeName, } private String checkValidateAndUpdateCacheSuccess(String volumeName, - String bucketName, String keyName) throws Exception { + String bucketName, String keyName, Map metadata) throws Exception { OMRequest initiateMPURequest = doPreExecuteInitiateMPU(volumeName, - bucketName, keyName); + bucketName, keyName, metadata); S3InitiateMultipartUploadRequest s3InitiateMultipartUploadRequest = getS3InitiateMultipartUploadReq(initiateMPURequest); @@ -175,6 +185,9 @@ private String checkValidateAndUpdateCacheSuccess(String volumeName, assertNotNull(multipartKeyInfo.getLatestVersionLocations()); assertTrue(multipartKeyInfo.getLatestVersionLocations() .isMultipartKey()); + if (metadata != null) { + assertThat(multipartKeyInfo.getMetadata()).containsAllEntriesOf(metadata); + } OmBucketInfo omBucketInfo = omMetadataManager.getBucketTable() .getCacheValue(new CacheKey<>( diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java index 5810c4ec2a2..136e47c776a 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java @@ -19,6 +19,7 @@ import javax.annotation.PostConstruct; import javax.inject.Inject; +import javax.ws.rs.core.HttpHeaders; import javax.ws.rs.core.MultivaluedMap; import javax.ws.rs.core.Response; import javax.ws.rs.container.ContainerRequestContext; @@ -308,6 +309,20 @@ protected Map getCustomMetadataFromHeaders( customMetadata.put(mapKey, value); } } + + // If the request contains a custom metadata header "x-amz-meta-ETag", + // replace the metadata key to "etag-custom" to prevent key metadata collision with + // the ETag calculated by hashing the object when storing the key in OM table. + // The custom ETag metadata header will be rebuilt during the headObject operation. + if (customMetadata.containsKey(HttpHeaders.ETAG) + || customMetadata.containsKey(HttpHeaders.ETAG.toLowerCase())) { + String customETag = customMetadata.get(HttpHeaders.ETAG) != null ? + customMetadata.get(HttpHeaders.ETAG) : customMetadata.get(HttpHeaders.ETAG.toLowerCase()); + customMetadata.remove(HttpHeaders.ETAG); + customMetadata.remove(HttpHeaders.ETAG.toLowerCase()); + customMetadata.put(ETAG_CUSTOM, customETag); + } + return customMetadata; } @@ -321,6 +336,7 @@ protected void addCustomMetadataHeaders( } String metadataKey = entry.getKey(); if (metadataKey.equals(ETAG_CUSTOM)) { + // Rebuild the ETag custom metadata header metadataKey = ETAG.toLowerCase(); } responseBuilder diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java index 8b7db9f061d..747cfce7cac 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java @@ -289,14 +289,6 @@ public Response put( // Normal put object Map customMetadata = getCustomMetadataFromHeaders(headers.getRequestHeaders()); - if (customMetadata.containsKey(ETAG) - || customMetadata.containsKey(ETAG.toLowerCase())) { - String customETag = customMetadata.get(ETAG) != null ? - customMetadata.get(ETAG) : customMetadata.get(ETAG.toLowerCase()); - customMetadata.remove(ETAG); - customMetadata.remove(ETAG.toLowerCase()); - customMetadata.put(ETAG_CUSTOM, customETag); - } if ("STREAMING-AWS4-HMAC-SHA256-PAYLOAD" .equals(headers.getHeaderString("x-amz-content-sha256"))) { @@ -754,11 +746,14 @@ public Response initializeMultipartUpload( OzoneBucket ozoneBucket = getBucket(bucket); String storageType = headers.getHeaderString(STORAGE_CLASS_HEADER); + Map customMetadata = + getCustomMetadataFromHeaders(headers.getRequestHeaders()); + ReplicationConfig replicationConfig = getReplicationConfig(ozoneBucket, storageType); OmMultipartInfo multipartInfo = - ozoneBucket.initiateMultipartUpload(key, replicationConfig); + ozoneBucket.initiateMultipartUpload(key, replicationConfig, customMetadata); MultipartUploadInitiateResponse multipartUploadInitiateResponse = new MultipartUploadInitiateResponse(); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java index 0400bc60500..bc562d5d936 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java @@ -56,6 +56,7 @@ import java.io.IOException; import java.net.URI; +import java.util.Collections; import java.util.List; import java.util.Map; @@ -309,8 +310,16 @@ public OmMultipartInfo initiateMultipartUpload(String volumeName, public OmMultipartInfo initiateMultipartUpload(String volumeName, String bucketName, String keyName, ReplicationConfig replicationConfig) throws IOException { + return initiateMultipartUpload(volumeName, bucketName, keyName, replicationConfig, Collections.emptyMap()); + } + + @Override + public OmMultipartInfo initiateMultipartUpload(String volumeName, + String bucketName, String keyName, ReplicationConfig replicationConfig, + Map metadata) + throws IOException { return getBucket(volumeName, bucketName) - .initiateMultipartUpload(keyName, replicationConfig); + .initiateMultipartUpload(keyName, replicationConfig, metadata); } @Override diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java index d272360fc3c..b5f37aaef3e 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java @@ -26,6 +26,7 @@ import java.security.MessageDigest; import java.security.NoSuchAlgorithmException; import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; import java.util.Iterator; import java.util.List; @@ -72,7 +73,7 @@ public final class OzoneBucketStub extends OzoneBucket { private Map keyContents = new HashMap<>(); - private Map multipartUploadIdMap = new HashMap<>(); + private Map keyToMultipartUpload = new HashMap<>(); private Map> partList = new HashMap<>(); @@ -210,8 +211,8 @@ public OzoneDataStreamOutput createMultipartStreamKey(String key, int partNumber, String uploadID) throws IOException { - String multipartUploadID = multipartUploadIdMap.get(key); - if (multipartUploadID == null || !multipartUploadID.equals(uploadID)) { + MultipartInfoStub multipartInfo = keyToMultipartUpload.get(key); + if (multipartInfo == null || !multipartInfo.getUploadId().equals(uploadID)) { throw new OMException(ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR); } else { ByteBufferStreamOutput byteBufferStreamOutput = @@ -275,6 +276,7 @@ public OzoneKey headObject(String key) throws IOException { ozoneKeyDetails.getCreationTime().toEpochMilli(), ozoneKeyDetails.getModificationTime().toEpochMilli(), ozoneKeyDetails.getReplicationConfig(), + ozoneKeyDetails.getMetadata(), ozoneKeyDetails.isFile()); } else { throw new OMException(ResultCodes.KEY_NOT_FOUND); @@ -358,16 +360,22 @@ public OmMultipartInfo initiateMultipartUpload(String keyName, ReplicationType type, ReplicationFactor factor) throws IOException { - String uploadID = UUID.randomUUID().toString(); - multipartUploadIdMap.put(keyName, uploadID); - return new OmMultipartInfo(getVolumeName(), getName(), keyName, uploadID); + return initiateMultipartUpload(keyName, ReplicationConfig.fromTypeAndFactor(type, factor), + Collections.emptyMap()); } @Override public OmMultipartInfo initiateMultipartUpload(String keyName, ReplicationConfig repConfig) throws IOException { + return initiateMultipartUpload(keyName, repConfig, Collections.emptyMap()); + } + + @Override + public OmMultipartInfo initiateMultipartUpload(String keyName, + ReplicationConfig config, Map metadata) + throws IOException { String uploadID = UUID.randomUUID().toString(); - multipartUploadIdMap.put(keyName, uploadID); + keyToMultipartUpload.put(keyName, new MultipartInfoStub(uploadID, metadata)); return new OmMultipartInfo(getVolumeName(), getName(), keyName, uploadID); } @@ -375,8 +383,8 @@ public OmMultipartInfo initiateMultipartUpload(String keyName, public OzoneOutputStream createMultipartKey(String key, long size, int partNumber, String uploadID) throws IOException { - String multipartUploadID = multipartUploadIdMap.get(key); - if (multipartUploadID == null || !multipartUploadID.equals(uploadID)) { + MultipartInfoStub multipartInfo = keyToMultipartUpload.get(key); + if (multipartInfo == null || !multipartInfo.getUploadId().equals(uploadID)) { throw new OMException(ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR); } else { ByteArrayOutputStream byteArrayOutputStream = @@ -402,13 +410,11 @@ public void close() throws IOException { public OmMultipartUploadCompleteInfo completeMultipartUpload(String key, String uploadID, Map partsMap) throws IOException { - if (multipartUploadIdMap.get(key) == null) { + if (keyToMultipartUpload.get(key) == null) { throw new OMException(ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR); } else { final Map partsList = partList.get(key); - int count = 1; - ByteArrayOutputStream output = new ByteArrayOutputStream(); int prevPartNumber = 0; @@ -429,6 +435,18 @@ public OmMultipartUploadCompleteInfo completeMultipartUpload(String key, } keyContents.put(key, output.toByteArray()); } + + keyDetails.put(key, new OzoneKeyDetails( + getVolumeName(), + getName(), + key, + keyContents.get(key) != null ? keyContents.get(key).length : 0, + System.currentTimeMillis(), + System.currentTimeMillis(), + new ArrayList<>(), getReplicationConfig(), + keyToMultipartUpload.get(key).getMetadata(), null, + () -> readKey(key), true + )); } return new OmMultipartUploadCompleteInfo(getVolumeName(), getName(), key, @@ -438,17 +456,17 @@ public OmMultipartUploadCompleteInfo completeMultipartUpload(String key, @Override public void abortMultipartUpload(String keyName, String uploadID) throws IOException { - if (multipartUploadIdMap.get(keyName) == null) { + if (keyToMultipartUpload.get(keyName) == null) { throw new OMException(ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR); } else { - multipartUploadIdMap.remove(keyName); + keyToMultipartUpload.remove(keyName); } } @Override public OzoneMultipartUploadPartListParts listParts(String key, String uploadID, int partNumberMarker, int maxParts) throws IOException { - if (multipartUploadIdMap.get(key) == null) { + if (keyToMultipartUpload.get(key) == null) { throw new OMException(ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR); } List partInfoList = new ArrayList<>(); @@ -642,4 +660,26 @@ public Map getMetadata() { } } + /** + * Multipart upload stub to store MPU related information. + */ + private static class MultipartInfoStub { + + private final String uploadId; + private final Map metadata; + + MultipartInfoStub(String uploadId, Map metadata) { + this.uploadId = uploadId; + this.metadata = metadata; + } + + public String getUploadId() { + return uploadId; + } + + public Map getMetadata() { + return metadata; + } + } + } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java index 3c0c87a177f..b23dbfb9c05 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java @@ -30,16 +30,22 @@ import org.junit.jupiter.api.Test; import javax.ws.rs.core.HttpHeaders; +import javax.ws.rs.core.MultivaluedHashMap; +import javax.ws.rs.core.MultivaluedMap; import javax.ws.rs.core.Response; import java.io.ByteArrayInputStream; import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; import java.util.List; +import java.util.Map; import java.util.UUID; import org.apache.hadoop.ozone.s3.endpoint.CompleteMultipartUploadRequest.Part; import static java.nio.charset.StandardCharsets.UTF_8; +import static org.apache.hadoop.ozone.s3.util.S3Consts.CUSTOM_METADATA_HEADER_PREFIX; import static org.apache.hadoop.ozone.s3.util.S3Consts.STORAGE_CLASS_HEADER; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; @@ -54,6 +60,7 @@ public class TestMultipartUploadComplete { private static final ObjectEndpoint REST = new ObjectEndpoint(); + private static final HttpHeaders HEADERS = mock(HttpHeaders.class); private static final OzoneClient CLIENT = new OzoneClientStub(); @BeforeAll @@ -61,18 +68,30 @@ public static void setUp() throws Exception { CLIENT.getObjectStore().createS3Bucket(OzoneConsts.S3_BUCKET); - - HttpHeaders headers = mock(HttpHeaders.class); - when(headers.getHeaderString(STORAGE_CLASS_HEADER)).thenReturn( + when(HEADERS.getHeaderString(STORAGE_CLASS_HEADER)).thenReturn( "STANDARD"); - REST.setHeaders(headers); + REST.setHeaders(HEADERS); REST.setClient(CLIENT); REST.setOzoneConfiguration(new OzoneConfiguration()); } private String initiateMultipartUpload(String key) throws IOException, OS3Exception { + return initiateMultipartUpload(key, Collections.emptyMap()); + } + + private String initiateMultipartUpload(String key, Map metadata) throws IOException, + OS3Exception { + MultivaluedMap metadataHeaders = new MultivaluedHashMap<>(); + + for (Map.Entry entry : metadata.entrySet()) { + metadataHeaders.computeIfAbsent(CUSTOM_METADATA_HEADER_PREFIX + entry.getKey(), k -> new ArrayList<>()) + .add(entry.getValue()); + } + + when(HEADERS.getRequestHeaders()).thenReturn(metadataHeaders); + Response response = REST.initializeMultipartUpload(OzoneConsts.S3_BUCKET, key); MultipartUploadInitiateResponse multipartUploadInitiateResponse = @@ -83,7 +102,6 @@ private String initiateMultipartUpload(String key) throws IOException, assertEquals(200, response.getStatus()); return uploadID; - } private Part uploadPart(String key, String uploadID, int partNumber, String @@ -152,6 +170,37 @@ public void testMultipart() throws Exception { } + @Test + public void testMultipartWithCustomMetadata() throws Exception { + String key = UUID.randomUUID().toString(); + + Map customMetadata = new HashMap<>(); + customMetadata.put("custom-key1", "custom-value1"); + customMetadata.put("custom-key2", "custom-value2"); + + String uploadID = initiateMultipartUpload(key, customMetadata); + + List partsList = new ArrayList<>(); + + // Upload parts + String content = "Multipart Upload 1"; + int partNumber = 1; + + Part part1 = uploadPart(key, uploadID, partNumber, content); + partsList.add(part1); + + CompleteMultipartUploadRequest completeMultipartUploadRequest = new + CompleteMultipartUploadRequest(); + completeMultipartUploadRequest.setPartList(partsList); + + completeMultipartUpload(key, completeMultipartUploadRequest, uploadID); + + Response headResponse = REST.head(OzoneConsts.S3_BUCKET, key); + + assertEquals("custom-value1", headResponse.getHeaderString(CUSTOM_METADATA_HEADER_PREFIX + "custom-key1")); + assertEquals("custom-value2", headResponse.getHeaderString(CUSTOM_METADATA_HEADER_PREFIX + "custom-key2")); + } + @Test public void testMultipartInvalidPartOrderError() throws Exception { diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java index d891573d5f1..ec262cdf215 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java @@ -45,6 +45,7 @@ import static org.mockito.Mockito.any; import static org.mockito.Mockito.anyBoolean; import static org.mockito.Mockito.anyLong; +import static org.mockito.Mockito.anyMap; import static org.mockito.Mockito.anyString; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.isNull; @@ -278,7 +279,7 @@ public void testDeleteKey() throws IOException { @Test public void testMultiUploadKey() throws IOException { when(objectStore.getS3Bucket(anyString())).thenReturn(bucket); - doThrow(exception).when(bucket).initiateMultipartUpload(anyString(), any()); + doThrow(exception).when(bucket).initiateMultipartUpload(anyString(), any(), anyMap()); ObjectEndpoint objectEndpoint = new ObjectEndpoint(); objectEndpoint.setClient(client); objectEndpoint.setHeaders(headers);