Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -679,7 +679,16 @@ public OmMultipartInfo initiateMultipartUpload(String keyName,
public OmMultipartInfo initiateMultipartUpload(String keyName,
ReplicationConfig config)
throws IOException {
return proxy.initiateMultipartUpload(volumeName, name, keyName, config);
return initiateMultipartUpload(keyName, config, Collections.emptyMap());
}

/**
* Initiate multipart upload for a specified key.
*/
public OmMultipartInfo initiateMultipartUpload(String keyName,
ReplicationConfig config, Map<String, String> metadata)
throws IOException {
return proxy.initiateMultipartUpload(volumeName, name, keyName, config, metadata);
}

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -327,7 +327,7 @@ List<OzoneBucket> listBuckets(String volumeName, String bucketPrefix,
* @param bucketName Name of the Bucket
* @param keyName Name of the Key
* @param size Size of the data
* @param metadata custom key value metadata
* @param metadata Custom key value metadata
* @return {@link OzoneOutputStream}
*
*/
Expand Down Expand Up @@ -509,17 +509,32 @@ OmMultipartInfo initiateMultipartUpload(String volumeName, String

/**
* Initiate Multipart upload.
* @param volumeName
* @param bucketName
* @param keyName
* @param replicationConfig
* @param volumeName Name of the Volume
* @param bucketName Name of the Bucket
* @param keyName Name of the Key
* @param replicationConfig Replication Config
* @return {@link OmMultipartInfo}
* @throws IOException
*/
OmMultipartInfo initiateMultipartUpload(String volumeName, String
bucketName, String keyName, ReplicationConfig replicationConfig)
throws IOException;

/**
* Initiate Multipart upload.
* @param volumeName Name of the Volume
* @param bucketName Name of the Bucket
* @param keyName Name of the Key
* @param replicationConfig Replication config
* @param metadata Custom key value metadata
* @return {@link OmMultipartInfo}
* @throws IOException
*/
OmMultipartInfo initiateMultipartUpload(String volumeName, String
bucketName, String keyName, ReplicationConfig replicationConfig,
Map<String, String> metadata)
throws IOException;

/**
* Create a part key for a multipart upload key.
* @param volumeName
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1809,6 +1809,17 @@ public OmMultipartInfo initiateMultipartUpload(String volumeName,
String keyName,
ReplicationConfig replicationConfig)
throws IOException {
return initiateMultipartUpload(volumeName, bucketName, keyName, replicationConfig,
Collections.emptyMap());
}

@Override
public OmMultipartInfo initiateMultipartUpload(String volumeName,
String bucketName,
String keyName,
ReplicationConfig replicationConfig,
Map<String, String> metadata)
throws IOException {
verifyVolumeName(volumeName);
verifyBucketName(bucketName);
HddsClientUtils.checkNotNull(keyName);
Expand All @@ -1827,6 +1838,7 @@ public OmMultipartInfo initiateMultipartUpload(String volumeName,
.setKeyName(keyName)
.setReplicationConfig(replicationConfig)
.setAcls(getAclList())
.addAllMetadataGdpr(metadata)
.build();
OmMultipartInfo multipartInfo = ozoneManagerClient
.initiateMultipartUpload(keyArgs);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1605,6 +1605,7 @@ public OmMultipartInfo initiateMultipartUpload(OmKeyArgs omKeyArgs) throws
.setVolumeName(omKeyArgs.getVolumeName())
.setBucketName(omKeyArgs.getBucketName())
.setKeyName(omKeyArgs.getKeyName())
.addAllMetadata(KeyValueUtil.toProtobuf(omKeyArgs.getMetadata()))
.addAllAcls(omKeyArgs.getAcls().stream().map(a ->
OzoneAcl.toProtobuf(a)).collect(Collectors.toList()));

Expand Down
18 changes: 17 additions & 1 deletion hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ Test Multipart Upload


Test Multipart Upload Complete
${result} = Execute AWSS3APICli create-multipart-upload --bucket ${BUCKET} --key ${PREFIX}/multipartKey1
${result} = Execute AWSS3APICli create-multipart-upload --bucket ${BUCKET} --key ${PREFIX}/multipartKey1 --metadata="custom-key1=custom-value1,custom-key2=custom-value2,gdprEnabled=true"
${uploadID} = Execute and checkrc echo '${result}' | jq -r '.UploadId' 0
Should contain ${result} ${BUCKET}
Should contain ${result} ${PREFIX}/multipartKey
Expand Down Expand Up @@ -117,6 +117,16 @@ Test Multipart Upload Complete
Should contain ${result} ETag
Should Be Equal As Strings ${resultETag} "${expectedResultETag}-2"

#check whether the user defined metadata can be retrieved
${result} = Execute AWSS3ApiCli head-object --bucket ${BUCKET} --key ${PREFIX}/multipartKey1
Should contain ${result} \"custom-key1\": \"custom-value1\"
Should contain ${result} \"custom-key2\": \"custom-value2\"

${result} = Execute ozone sh key info /s3v/${BUCKET}/${PREFIX}/multipartKey1
Should contain ${result} \"custom-key1\" : \"custom-value1\"
Should contain ${result} \"custom-key2\" : \"custom-value2\"
Should not contain ${result} \"gdprEnabled\": \"true\"

#read file and check the key
${result} = Execute AWSS3ApiCli get-object --bucket ${BUCKET} --key ${PREFIX}/multipartKey1 /tmp/${PREFIX}-multipartKey1.result
Execute cat /tmp/part1 /tmp/part2 > /tmp/${PREFIX}-multipartKey1
Expand All @@ -128,6 +138,12 @@ Test Multipart Upload Complete
${result} = Execute AWSS3ApiCli get-object --bucket ${BUCKET} --key ${PREFIX}/multipartKey1 --part-number 2 /tmp/${PREFIX}-multipartKey1-part2.result
Compare files /tmp/part2 /tmp/${PREFIX}-multipartKey1-part2.result

Test Multipart Upload with user defined metadata size larger than 2 KB
${custom_metadata_value} = Execute printf 'v%.0s' {1..3000}
${result} = Execute AWSS3APICli and checkrc create-multipart-upload --bucket ${BUCKET} --key ${PREFIX}/mpuWithLargeMetadata --metadata="custom-key1=${custom_metadata_value}" 255
Should contain ${result} MetadataTooLarge
Should not contain ${result} custom-key1: ${custom_metadata_value}

Test Multipart Upload Complete Entity too small
${result} = Execute AWSS3APICli create-multipart-upload --bucket ${BUCKET} --key ${PREFIX}/multipartKey2
${uploadID} = Execute and checkrc echo '${result}' | jq -r '.UploadId' 0
Expand Down
5 changes: 3 additions & 2 deletions hadoop-ozone/dist/src/main/smoketest/s3/objectputget.robot
Original file line number Diff line number Diff line change
Expand Up @@ -185,8 +185,9 @@ Create file with user defined metadata with gdpr enabled value in request
Create file with user defined metadata size larger than 2 KB
Execute echo "Randomtext" > /tmp/testfile2
${custom_metadata_value} = Execute printf 'v%.0s' {1..3000}
${result} = Execute AWSS3APICli and ignore error put-object --bucket ${BUCKET} --key ${PREFIX}/putobject/custom-metadata/key2 --body /tmp/testfile2 --metadata="custom-key1=${custom_metadata_value}"
Should not contain ${result} custom-key1: ${custom_metadata_value}
${result} = Execute AWSS3APICli and checkrc put-object --bucket ${BUCKET} --key ${PREFIX}/putobject/custom-metadata/key2 --body /tmp/testfile2 --metadata="custom-key1=${custom_metadata_value}" 255
Should contain ${result} MetadataTooLarge
Should not contain ${result} custom-key1: ${custom_metadata_value}

Create small file and expect ETag (MD5) in a reponse header
Execute head -c 1MB </dev/urandom > /tmp/small_file
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
import java.time.Instant;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.LinkedHashMap;
Expand Down Expand Up @@ -2960,6 +2961,26 @@ public void testMultipartPartNumberExceedingAllowedRange() throws Exception {
keyName, sampleData.length(), 10001, uploadID));
}

@ParameterizedTest
@MethodSource("replicationConfigs")
public void testMultipartUploadWithCustomMetadata(ReplicationConfig replication) throws Exception {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
String keyName = UUID.randomUUID().toString();

store.createVolume(volumeName);
OzoneVolume volume = store.getVolume(volumeName);
volume.createBucket(bucketName);
OzoneBucket bucket = volume.getBucket(bucketName);

// Create custom metadata
Map<String, String> customMetadata = new HashMap<>();
customMetadata.put("custom-key1", "custom-value1");
customMetadata.put("custom-key2", "custom-value2");

doMultipartUpload(bucket, keyName, (byte) 98, replication, customMetadata);
}

@Test
public void testAbortUploadFail() throws Exception {
String volumeName = UUID.randomUUID().toString();
Expand Down Expand Up @@ -3593,8 +3614,14 @@ private byte[] generateData(int size, byte val) {
private void doMultipartUpload(OzoneBucket bucket, String keyName, byte val,
ReplicationConfig replication)
throws Exception {
doMultipartUpload(bucket, keyName, val, replication, Collections.emptyMap());
}

private void doMultipartUpload(OzoneBucket bucket, String keyName, byte val,
ReplicationConfig replication, Map<String, String> customMetadata)
throws Exception {
// Initiate Multipart upload request
String uploadID = initiateMultipartUpload(bucket, keyName, replication);
String uploadID = initiateMultipartUpload(bucket, keyName, replication, customMetadata);

// Upload parts
Map<Integer, String> partsMap = new TreeMap<>();
Expand Down Expand Up @@ -3661,12 +3688,23 @@ private void doMultipartUpload(OzoneBucket bucket, String keyName, byte val,
latestVersionLocations.getBlocksLatestVersionOnly()
.forEach(omKeyLocationInfo ->
assertNotEquals(-1, omKeyLocationInfo.getPartNumber()));

Map<String, String> keyMetadata = omKeyInfo.getMetadata();
assertNotNull(keyMetadata.get(ETAG));
if (customMetadata != null && !customMetadata.isEmpty()) {
assertThat(keyMetadata).containsAllEntriesOf(customMetadata);
}
}

private String initiateMultipartUpload(OzoneBucket bucket, String keyName,
ReplicationConfig replicationConfig) throws Exception {
return initiateMultipartUpload(bucket, keyName, replicationConfig, Collections.emptyMap());
}

private String initiateMultipartUpload(OzoneBucket bucket, String keyName,
ReplicationConfig replicationConfig, Map<String, String> customMetadata) throws Exception {
OmMultipartInfo multipartInfo = bucket.initiateMultipartUpload(keyName,
replicationConfig);
replicationConfig, customMetadata);

String uploadID = multipartInfo.getUploadID();
assertNotNull(uploadID);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@

import com.google.common.base.Preconditions;
import org.apache.hadoop.hdds.client.ReplicationConfig;
import org.apache.hadoop.ozone.om.helpers.KeyValueUtil;
import org.apache.ratis.server.protocol.TermIndex;
import org.apache.hadoop.ozone.audit.OMAction;
import org.apache.hadoop.ozone.om.OMMetadataManager;
Expand Down Expand Up @@ -211,6 +212,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn
.setUpdateID(transactionLogIndex)
.setFileEncryptionInfo(keyArgs.hasFileEncryptionInfo() ?
OMPBHelper.convert(keyArgs.getFileEncryptionInfo()) : null)
.addAllMetadata(KeyValueUtil.getFromProtobuf(keyArgs.getMetadataList()))
.build();

// Add to cache
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@

import com.google.common.base.Preconditions;
import org.apache.hadoop.hdds.client.ReplicationConfig;
import org.apache.hadoop.ozone.om.helpers.KeyValueUtil;
import org.apache.ratis.server.protocol.TermIndex;
import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.OzoneConfigUtil;
Expand Down Expand Up @@ -187,6 +188,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn
.setFileEncryptionInfo(keyArgs.hasFileEncryptionInfo() ?
OMPBHelper.convert(keyArgs.getFileEncryptionInfo()) : null)
.setParentObjectID(pathInfoFSO.getLastKnownParentId())
.addAllMetadata(KeyValueUtil.getFromProtobuf(keyArgs.getMetadataList()))
.build();

// validate and update namespace for missing parent directory
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -417,6 +417,7 @@ protected OmKeyInfo getOmKeyInfo(OzoneManager ozoneManager, long trxnLogIndex,
.setOmKeyLocationInfos(
Collections.singletonList(keyLocationInfoGroup))
.setAcls(dbOpenKeyInfo.getAcls())
.addAllMetadata(dbOpenKeyInfo.getMetadata())
.addMetadata(OzoneConsts.ETAG,
multipartUploadedKeyHash(partKeyInfoMap));
// Check if db entry has ObjectID. This check is required because
Expand Down Expand Up @@ -447,6 +448,9 @@ protected OmKeyInfo getOmKeyInfo(OzoneManager ozoneManager, long trxnLogIndex,
omKeyInfo.setModificationTime(keyArgs.getModificationTime());
omKeyInfo.setDataSize(dataSize);
omKeyInfo.setReplicationConfig(dbOpenKeyInfo.getReplicationConfig());
if (dbOpenKeyInfo.getMetadata() != null) {
omKeyInfo.setMetadata(dbOpenKeyInfo.getMetadata());
}
omKeyInfo.getMetadata().put(OzoneConsts.ETAG,
multipartUploadedKeyHash(partKeyInfoMap));
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.UUID;

import javax.xml.bind.DatatypeConverter;
Expand All @@ -49,6 +50,7 @@
import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.OzoneManager;
import org.apache.hadoop.ozone.om.exceptions.OMException;
import org.apache.hadoop.ozone.om.helpers.KeyValueUtil;
import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
Expand Down Expand Up @@ -991,11 +993,28 @@ public static String deleteKey(String ozoneKey,
*/
public static OMRequest createInitiateMPURequest(String volumeName,
String bucketName, String keyName) {
return createInitiateMPURequest(volumeName, bucketName, keyName, Collections.emptyMap());
}

/**
* Create OMRequest which encapsulates InitiateMultipartUpload request.
* @param volumeName
* @param bucketName
* @param keyName
* @param metadata
*/
public static OMRequest createInitiateMPURequest(String volumeName,
String bucketName, String keyName, Map<String, String> metadata) {
MultipartInfoInitiateRequest
multipartInfoInitiateRequest =
MultipartInfoInitiateRequest.newBuilder().setKeyArgs(
KeyArgs.newBuilder().setVolumeName(volumeName).setKeyName(keyName)
.setBucketName(bucketName)).build();
KeyArgs.newBuilder()
.setVolumeName(volumeName)
.setKeyName(keyName)
.setBucketName(bucketName)
.addAllMetadata(KeyValueUtil.toProtobuf(metadata))
)
.build();

return OMRequest.newBuilder().setClientId(UUID.randomUUID().toString())
.setCmdType(OzoneManagerProtocolProtos.Type.InitiateMultiPartUpload)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,9 @@
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertTrue;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.UUID;
import java.util.stream.Collectors;

Expand Down Expand Up @@ -62,8 +64,12 @@ public void testValidateAndUpdateCache() throws Exception {
OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
omMetadataManager, getBucketLayout());

Map<String, String> customMetadata = new HashMap<>();
customMetadata.put("custom-key1", "custom-value1");
customMetadata.put("custom-key2", "custom-value2");

OMRequest modifiedRequest = doPreExecuteInitiateMPU(volumeName,
bucketName, keyName);
bucketName, keyName, customMetadata);

S3InitiateMultipartUploadRequest s3InitiateMultipartUploadRequest =
getS3InitiateMultipartUploadReq(modifiedRequest);
Expand All @@ -84,6 +90,10 @@ public void testValidateAndUpdateCache() throws Exception {
assertNotNull(openMPUKeyInfo);
assertNotNull(openMPUKeyInfo.getLatestVersionLocations());
assertTrue(openMPUKeyInfo.getLatestVersionLocations().isMultipartKey());
assertNotNull(openMPUKeyInfo.getMetadata());
assertEquals("custom-value1", openMPUKeyInfo.getMetadata().get("custom-key1"));
assertEquals("custom-value2", openMPUKeyInfo.getMetadata().get("custom-key2"));

assertNotNull(omMetadataManager.getMultipartInfoTable().get(multipartKey));

assertEquals(modifiedRequest.getInitiateMultiPartUploadRequest()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,9 @@

import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.UUID;
import java.util.stream.Collectors;

Expand All @@ -62,11 +64,15 @@ public void testValidateAndUpdateCache() throws Exception {
OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
omMetadataManager, getBucketLayout());

Map<String, String> customMetadata = new HashMap<>();
customMetadata.put("custom-key1", "custom-value1");
customMetadata.put("custom-key2", "custom-value2");

final long volumeId = omMetadataManager.getVolumeId(volumeName);
final long bucketId = omMetadataManager.getBucketId(volumeName,
bucketName);
OMRequest modifiedRequest = doPreExecuteInitiateMPUWithFSO(volumeName,
bucketName, keyName);
bucketName, keyName, customMetadata);

S3InitiateMultipartUploadRequest s3InitiateMultipartUploadReqFSO =
getS3InitiateMultipartUploadReq(modifiedRequest);
Expand Down Expand Up @@ -102,6 +108,9 @@ public void testValidateAndUpdateCache() throws Exception {
"FileName mismatches!");
assertEquals(parentID, omKeyInfo.getParentObjectID(),
"ParentId mismatches!");
assertNotNull(omKeyInfo.getMetadata());
assertEquals("custom-value1", omKeyInfo.getMetadata().get("custom-key1"));
assertEquals("custom-value2", omKeyInfo.getMetadata().get("custom-key2"));

OmMultipartKeyInfo omMultipartKeyInfo = omMetadataManager
.getMultipartInfoTable().get(multipartFileKey);
Expand Down
Loading