Skip to content
Merged
Original file line number Diff line number Diff line change
Expand Up @@ -22,10 +22,10 @@
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION_TYPE;

import java.util.Optional;
import org.apache.hadoop.fs.ozone.OzoneClientUtils;
import org.apache.hadoop.hdds.client.ReplicationConfig;
import org.apache.hadoop.hdds.client.ReplicationType;
import org.apache.hadoop.hdds.conf.ConfigurationSource;
import org.apache.hadoop.ozone.client.OzoneClientUtils;

/**
* Common options for specifying replication config: specialized for
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@

package org.apache.hadoop.ozone.shell.keys;

import static org.apache.hadoop.fs.ozone.OzoneClientUtils.getFileChecksumWithCombineMode;
import static org.apache.hadoop.ozone.client.OzoneClientUtils.getFileChecksumWithCombineMode;

import com.fasterxml.jackson.annotation.JsonAutoDetect;
import java.io.IOException;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
* limitations under the License.
*/

package org.apache.hadoop.fs.ozone;
package org.apache.hadoop.ozone.client;

import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION_TYPE;
Expand All @@ -36,9 +36,6 @@
import org.apache.hadoop.hdds.conf.ConfigurationSource;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.scm.OzoneClientConfig;
import org.apache.hadoop.ozone.client.ObjectStore;
import org.apache.hadoop.ozone.client.OzoneBucket;
import org.apache.hadoop.ozone.client.OzoneVolume;
import org.apache.hadoop.ozone.client.checksum.BaseFileChecksumHelper;
import org.apache.hadoop.ozone.client.checksum.ChecksumHelperFactory;
import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
* limitations under the License.
*/

package org.apache.hadoop.fs.ozone;
package org.apache.hadoop.ozone.client;

import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNull;
Expand All @@ -32,8 +32,6 @@
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.scm.OzoneClientConfig;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.client.OzoneBucket;
import org.apache.hadoop.ozone.client.OzoneVolume;
import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
import org.junit.jupiter.api.Test;

Expand Down
2 changes: 2 additions & 0 deletions hadoop-ozone/dist/src/main/compose/common/ec-test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -30,3 +30,5 @@ docker-compose up -d --no-recreate --scale datanode=3
execute_robot_test scm -v PREFIX:${prefix} -N read-3-datanodes ec/read.robot
docker-compose up -d --no-recreate --scale datanode=5
execute_robot_test scm -v container:1 -v count:5 -N EC-recovery replication/wait.robot
docker-compose up -d --no-recreate --scale datanode=9
execute_robot_test scm -N S3-EC-Storage ec/awss3ecstorage.robot
90 changes: 90 additions & 0 deletions hadoop-ozone/dist/src/main/smoketest/ec/awss3ecstorage.robot
Original file line number Diff line number Diff line change
@@ -0,0 +1,90 @@
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

*** Settings ***
Documentation S3 gateway test with aws cli with STANDARD_IA storage class
Library OperatingSystem
Library String
Resource ../commonlib.robot
Resource ../s3/commonawslib.robot
Resource ../s3/mpu_lib.robot
Resource ../ozone-lib/shell.robot
Test Timeout 5 minutes
Suite Setup Setup EC Multipart Tests
Suite Teardown Teardown EC Multipart Tests
Test Setup Generate random prefix

*** Keywords ***
Setup EC Multipart Tests
Setup s3 tests
Create Random File KB 1023 /tmp/1mb

Teardown EC Multipart Tests
Remove Files /tmp/1mb

*** Variables ***
${ENDPOINT_URL} http://s3g:9878
${BUCKET} generated

*** Test Cases ***

Put Object with STANDARD_IA storage class
${file_checksum} = Execute md5sum /tmp/1mb | awk '{print $1}'

${result} = Execute AWSS3ApiCli put-object --bucket ${BUCKET} --key ${PREFIX}/ecKey32 --body /tmp/1mb --storage-class STANDARD_IA
${eTag} = Execute echo '${result}' | jq -r '.ETag'
Should Be Equal ${eTag} \"${file_checksum}\"
Verify Key EC Replication Config /s3v/${BUCKET}/${PREFIX}/ecKey32 RS 3 2 1048576

${result} = Execute AWSS3ApiCli put-object --bucket ${BUCKET} --key ${PREFIX}/ecKey63 --body /tmp/1mb --storage-class STANDARD_IA --metadata="storage-config=rs-6-3-1024k"
${eTag} = Execute echo '${result}' | jq -r '.ETag'
Should Be Equal ${eTag} \"${file_checksum}\"
Verify Key EC Replication Config /s3v/${BUCKET}/${PREFIX}/ecKey63 RS 6 3 1048576

Test multipart upload with STANDARD_IA storage
${uploadID} = Initiate MPU ${BUCKET} ${PREFIX}/ecmultipartKey32 0 --storage-class STANDARD_IA
${eTag1} = Upload MPU part ${BUCKET} ${PREFIX}/ecmultipartKey32 ${uploadID} 1 /tmp/1mb
${result} = Execute AWSS3APICli list-parts --bucket ${BUCKET} --key ${PREFIX}/ecmultipartKey32 --upload-id ${uploadID}
${part1} = Execute echo '${result}' | jq -r '.Parts[0].ETag'
Should Be equal ${part1} ${eTag1}
Should contain ${result} STANDARD_IA
Complete MPU ${BUCKET} ${PREFIX}/ecmultipartKey32 ${uploadID} {ETag=${eTag1},PartNumber=1}
Verify Key EC Replication Config /s3v/${BUCKET}/${PREFIX}/ecmultipartKey32 RS 3 2 1048576

${uploadID} = Initiate MPU ${BUCKET} ${PREFIX}/ecmultipartKey63 0 --storage-class STANDARD_IA --metadata="storage-config=rs-6-3-1024k"
${eTag1} = Upload MPU part ${BUCKET} ${PREFIX}/ecmultipartKey63 ${uploadID} 1 /tmp/part1
${result} = Execute AWSS3APICli list-parts --bucket ${BUCKET} --key ${PREFIX}/ecmultipartKey63 --upload-id ${uploadID}
${part1} = Execute echo '${result}' | jq -r '.Parts[0].ETag'
Should Be equal ${part1} ${eTag1}
Should contain ${result} STANDARD_IA
Complete MPU ${BUCKET} ${PREFIX}/ecmultipartKey63 ${uploadID} {ETag=${eTag1},PartNumber=1}
Verify Key EC Replication Config /s3v/${BUCKET}/${PREFIX}/ecmultipartKey63 RS 6 3 1048576

Copy Object change storage class to STANDARD_IA
${file_checksum} = Execute md5sum /tmp/1mb | awk '{print $1}'
${result} = Execute AWSS3ApiCli put-object --bucket ${BUCKET} --key ${PREFIX}/copyobject/Key1 --body /tmp/1mb
${eTag} = Execute echo '${result}' | jq -r '.ETag'
Should Be Equal ${eTag} \"${file_checksum}\"

${result} = Execute AWSS3APICli copy-object --storage-class STANDARD_IA --bucket ${BUCKET} --key ${PREFIX}/copyobject/Key1 --copy-source ${BUCKET}/${PREFIX}/copyobject/Key1
Should contain ${result} ETag
${eTag} = Execute echo '${result}' | jq -r '.CopyObjectResult.ETag'
Should Be Equal ${eTag} \"${file_checksum}\"

${result} = Execute AWSS3APICli copy-object --storage-class STANDARD_IA --metadata="storage-config=rs-6-3-1024k" --bucket ${BUCKET} --key ${PREFIX}/copyobject/Key1 --copy-source ${BUCKET}/${PREFIX}/copyobject/Key1
Should contain ${result} ETag
${eTag} = Execute echo '${result}' | jq -r '.CopyObjectResult.ETag'
Should Be Equal ${eTag} \"${file_checksum}\"
## TODO: Verify Key EC Replication Config when we support changing storage class
8 changes: 8 additions & 0 deletions hadoop-ozone/dist/src/main/smoketest/s3/awss3.robot
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,14 @@ File upload and directory list
Should not contain ${result} testfile
Should not contain ${result} dir1
Should contain ${result} file
# Verify S3 storage class if file is replicated or erasure coded.
${result} = Execute AWSS3CliDebug ls s3://${BUCKET}/dir1/dir2/file
IF '${BUCKET}' == 'erasure'
Should contain ${result} STANDARD_IA
ELSE
Should contain ${result} STANDARD
Should not contain ${result} STANDARD_IA
END

File upload with special chars
Execute date > /tmp/testfile
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,7 @@
import org.apache.hadoop.ozone.client.OzoneBucket;
import org.apache.hadoop.ozone.client.OzoneClient;
import org.apache.hadoop.ozone.client.OzoneClientFactory;
import org.apache.hadoop.ozone.client.OzoneClientUtils;
import org.apache.hadoop.ozone.client.OzoneKey;
import org.apache.hadoop.ozone.client.OzoneVolume;
import org.apache.hadoop.ozone.client.io.OzoneDataStreamOutput;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -74,6 +74,7 @@
import org.apache.hadoop.hdds.utils.LegacyHadoopConfigurationSource;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.OzoneFsServerDefaults;
import org.apache.hadoop.ozone.client.OzoneClientUtils;
import org.apache.hadoop.ozone.client.io.SelectorOutputStream;
import org.apache.hadoop.ozone.om.exceptions.OMException;
import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,7 @@
import org.apache.hadoop.ozone.client.OzoneBucket;
import org.apache.hadoop.ozone.client.OzoneClient;
import org.apache.hadoop.ozone.client.OzoneClientFactory;
import org.apache.hadoop.ozone.client.OzoneClientUtils;
import org.apache.hadoop.ozone.client.OzoneKey;
import org.apache.hadoop.ozone.client.OzoneSnapshot;
import org.apache.hadoop.ozone.client.OzoneVolume;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,7 @@
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.OzoneFsServerDefaults;
import org.apache.hadoop.ozone.client.OzoneBucket;
import org.apache.hadoop.ozone.client.OzoneClientUtils;
import org.apache.hadoop.ozone.client.OzoneVolume;
import org.apache.hadoop.ozone.client.io.SelectorOutputStream;
import org.apache.hadoop.ozone.om.exceptions.OMException;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,6 @@
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.hdds.client.ReplicationType;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.OzoneAcl;
import org.apache.hadoop.ozone.audit.S3GAction;
Expand Down Expand Up @@ -753,12 +752,8 @@ private void addKey(ListObjectResponse response, OzoneKey next) {
if (eTag != null) {
keyMetadata.setETag(ObjectEndpoint.wrapInQuotes(eTag));
}
if (next.getReplicationType().toString().equals(ReplicationType
.STAND_ALONE.toString())) {
keyMetadata.setStorageClass(S3StorageType.REDUCED_REDUNDANCY.toString());
} else {
keyMetadata.setStorageClass(S3StorageType.STANDARD.toString());
}
keyMetadata.setStorageClass(S3StorageType.fromReplicationConfig(
next.getReplicationConfig()).toString());
keyMetadata.setLastModified(next.getModificationTime());
String displayName = next.getOwner();
keyMetadata.setOwner(S3Owner.of(displayName));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.newError;
import static org.apache.hadoop.ozone.s3.util.S3Consts.AWS_TAG_PREFIX;
import static org.apache.hadoop.ozone.s3.util.S3Consts.CUSTOM_METADATA_HEADER_PREFIX;
import static org.apache.hadoop.ozone.s3.util.S3Consts.STORAGE_CONFIG_HEADER;
import static org.apache.hadoop.ozone.s3.util.S3Consts.TAG_HEADER;
import static org.apache.hadoop.ozone.s3.util.S3Consts.TAG_KEY_LENGTH_LIMIT;
import static org.apache.hadoop.ozone.s3.util.S3Consts.TAG_NUM_LIMIT;
Expand Down Expand Up @@ -98,7 +99,7 @@ public abstract class EndpointBase implements Auditor {
private ContainerRequestContext context;

private Set<String> excludeMetadataFields =
new HashSet<>(Arrays.asList(OzoneConsts.GDPR_FLAG));
new HashSet<>(Arrays.asList(OzoneConsts.GDPR_FLAG, STORAGE_CONFIG_HEADER));
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

what does this mean

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We store any x-amz-meta-key=value for an object in KeyArgs.metadata (or ex., Etag). ^^This will exclude storing EC storage config in the metadata because it will be available in replication config. We only want to use this header to determine EC replication config when client uses STANDARD_IA storage class.

private static final Logger LOG =
LoggerFactory.getLogger(EndpointBase.class);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,9 +27,6 @@
import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED_DEFAULT;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_DATASTREAM_AUTO_THRESHOLD;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_DATASTREAM_AUTO_THRESHOLD_DEFAULT;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION_TYPE;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION_TYPE_DEFAULT;
import static org.apache.hadoop.ozone.audit.AuditLogger.PerformanceStringBuilder;
import static org.apache.hadoop.ozone.s3.S3GatewayConfigKeys.OZONE_S3G_CLIENT_BUFFER_SIZE_DEFAULT;
import static org.apache.hadoop.ozone.s3.S3GatewayConfigKeys.OZONE_S3G_CLIENT_BUFFER_SIZE_KEY;
Expand All @@ -49,12 +46,14 @@
import static org.apache.hadoop.ozone.s3.util.S3Consts.COPY_SOURCE_IF_MODIFIED_SINCE;
import static org.apache.hadoop.ozone.s3.util.S3Consts.COPY_SOURCE_IF_UNMODIFIED_SINCE;
import static org.apache.hadoop.ozone.s3.util.S3Consts.CUSTOM_METADATA_COPY_DIRECTIVE_HEADER;
import static org.apache.hadoop.ozone.s3.util.S3Consts.CUSTOM_METADATA_HEADER_PREFIX;
import static org.apache.hadoop.ozone.s3.util.S3Consts.CopyDirective;
import static org.apache.hadoop.ozone.s3.util.S3Consts.DECODED_CONTENT_LENGTH_HEADER;
import static org.apache.hadoop.ozone.s3.util.S3Consts.MP_PARTS_COUNT;
import static org.apache.hadoop.ozone.s3.util.S3Consts.RANGE_HEADER;
import static org.apache.hadoop.ozone.s3.util.S3Consts.RANGE_HEADER_SUPPORTED_UNIT;
import static org.apache.hadoop.ozone.s3.util.S3Consts.STORAGE_CLASS_HEADER;
import static org.apache.hadoop.ozone.s3.util.S3Consts.STORAGE_CONFIG_HEADER;
import static org.apache.hadoop.ozone.s3.util.S3Consts.TAG_COUNT_HEADER;
import static org.apache.hadoop.ozone.s3.util.S3Consts.TAG_DIRECTIVE_HEADER;
import static org.apache.hadoop.ozone.s3.util.S3Utils.hasMultiChunksPayload;
Expand Down Expand Up @@ -110,12 +109,12 @@
import org.apache.commons.lang3.tuple.Pair;
import org.apache.hadoop.hdds.client.ECReplicationConfig;
import org.apache.hadoop.hdds.client.ReplicationConfig;
import org.apache.hadoop.hdds.client.ReplicationType;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.conf.StorageUnit;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.audit.S3GAction;
import org.apache.hadoop.ozone.client.OzoneBucket;
import org.apache.hadoop.ozone.client.OzoneClientUtils;
import org.apache.hadoop.ozone.client.OzoneKey;
import org.apache.hadoop.ozone.client.OzoneKeyDetails;
import org.apache.hadoop.ozone.client.OzoneMultipartUploadPartListParts;
Expand Down Expand Up @@ -236,7 +235,7 @@ public Response put(
boolean auditSuccess = true;
PerformanceStringBuilder perf = new PerformanceStringBuilder();

String copyHeader = null, storageType = null;
String copyHeader = null, storageType = null, storageConfig = null;
DigestInputStream digestInputStream = null;
try {
if (aclMarker != null) {
Expand All @@ -262,12 +261,13 @@ public Response put(

copyHeader = headers.getHeaderString(COPY_SOURCE_HEADER);
storageType = headers.getHeaderString(STORAGE_CLASS_HEADER);
storageConfig = headers.getHeaderString(CUSTOM_METADATA_HEADER_PREFIX + STORAGE_CONFIG_HEADER);
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

curious: how will a client add the header to the request? is it supported by s3 cli or hadoop aws cloud connector?

i wonder where we add this information in the developer doc.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

All s3 clients should support it. They would just need to add the "x-amz-metadata-storage-class=" to their requests.
This is demonstrated in the acceptance test with AWS cli which does the above underneath.

boolean storageTypeDefault = StringUtils.isEmpty(storageType);

// Normal put object
OzoneBucket bucket = volume.getBucket(bucketName);
ReplicationConfig replicationConfig =
getReplicationConfig(bucket, storageType);
getReplicationConfig(bucket, storageType, storageConfig);

boolean enableEC = false;
if ((replicationConfig != null &&
Expand Down Expand Up @@ -819,14 +819,15 @@ public Response initializeMultipartUpload(
try {
OzoneBucket ozoneBucket = getBucket(bucket);
String storageType = headers.getHeaderString(STORAGE_CLASS_HEADER);
String storageConfig = headers.getHeaderString(CUSTOM_METADATA_HEADER_PREFIX + STORAGE_CONFIG_HEADER);

Map<String, String> customMetadata =
getCustomMetadataFromHeaders(headers.getRequestHeaders());

Map<String, String> tags = getTaggingFromHeaders(headers);

ReplicationConfig replicationConfig =
getReplicationConfig(ozoneBucket, storageType);
getReplicationConfig(ozoneBucket, storageType, storageConfig);

OmMultipartInfo multipartInfo =
ozoneBucket.initiateMultipartUpload(key, replicationConfig, customMetadata, tags);
Expand Down Expand Up @@ -859,21 +860,12 @@ public Response initializeMultipartUpload(
}

private ReplicationConfig getReplicationConfig(OzoneBucket ozoneBucket,
String storageType) throws OS3Exception {
if (StringUtils.isEmpty(storageType)) {
S3StorageType defaultStorageType = S3StorageType.getDefault(ozoneConfiguration);
storageType = (defaultStorageType != null ? defaultStorageType.toString() : null);
}
String storageType, String storageConfig) throws OS3Exception {

ReplicationConfig clientConfiguredReplicationConfig = null;
String replication = ozoneConfiguration.get(OZONE_REPLICATION);
if (replication != null) {
clientConfiguredReplicationConfig = ReplicationConfig.parse(
ReplicationType.valueOf(ozoneConfiguration
.get(OZONE_REPLICATION_TYPE, OZONE_REPLICATION_TYPE_DEFAULT)),
replication, ozoneConfiguration);
}
return S3Utils.resolveS3ClientSideReplicationConfig(storageType,
ReplicationConfig clientConfiguredReplicationConfig =
OzoneClientUtils.getClientConfiguredReplicationConfig(ozoneConfiguration);

return S3Utils.resolveS3ClientSideReplicationConfig(storageType, storageConfig,
clientConfiguredReplicationConfig, ozoneBucket.getReplicationConfig());
}

Expand Down Expand Up @@ -972,9 +964,10 @@ private Response createMultipartKey(OzoneVolume volume, String bucket,

copyHeader = headers.getHeaderString(COPY_SOURCE_HEADER);
String storageType = headers.getHeaderString(STORAGE_CLASS_HEADER);
String storageConfig = headers.getHeaderString(CUSTOM_METADATA_HEADER_PREFIX + STORAGE_CONFIG_HEADER);
final OzoneBucket ozoneBucket = volume.getBucket(bucket);
ReplicationConfig replicationConfig =
getReplicationConfig(ozoneBucket, storageType);
getReplicationConfig(ozoneBucket, storageType, storageConfig);

boolean enableEC = false;
if ((replicationConfig != null &&
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -150,6 +150,12 @@ public final class S3ErrorTable {
HTTP_FORBIDDEN
);

public static final OS3Exception INVALID_STORAGE_CLASS = new OS3Exception(
"InvalidStorageClass", "The storage class that you specified is not valid. " +
"Provide a supported storage class[STANDARD|REDUCED_REDUNDANCY|STANDARD_IA] or " +
"a valid custom EC storage config for if using STANDARD_IA.",
HTTP_BAD_REQUEST);

private static Function<Exception, OS3Exception> generateInternalError =
e -> new OS3Exception("InternalError", e.getMessage(), HTTP_INTERNAL_ERROR);

Expand Down
Loading