Skip to content
Merged
Show file tree
Hide file tree
Changes from 8 commits
Commits
Show all changes
35 commits
Select commit Hold shift + click to select a range
cef94c9
HDDS-10775. Support bucket ownership verification
hevinhsu Jun 3, 2025
219933b
HDDS-10775. Add integration test
hevinhsu Jun 3, 2025
e385d1e
HDDS-10775. Fix error
hevinhsu Jun 3, 2025
05468b9
Merge branch 'apache:master' into HDDS-10775
hevinhsu Jun 3, 2025
0a0f166
Merge remote-tracking branch 'origin/master' into HDDS-10775
hevinhsu Jun 4, 2025
4281392
Merge remote-tracking branch 'origin/master' into HDDS-10775
hevinhsu Jun 5, 2025
0204224
Move integration tests to AbstractS3SDKV2Tests to avoid duplicated mi…
hevinhsu Jun 5, 2025
6d83496
fix spotbug warning
hevinhsu Jun 6, 2025
35d962d
Merge remote-tracking branch 'origin/master' into HDDS-10775
hevinhsu Jun 10, 2025
6d07294
Feat: add null bucketOwner verification
hevinhsu Jun 10, 2025
a779f77
Fix: fix check style
hevinhsu Jun 10, 2025
2888c76
Refactor: move BucketOwnerCondition static method to S3Owner
hevinhsu Jun 10, 2025
f9c807c
Fix: fix checkstyle
hevinhsu Jun 10, 2025
145b2b0
Refactor: rename method name
hevinhsu Jun 10, 2025
6e9b763
chore: recover non-necessary changes
hevinhsu Jun 11, 2025
31c460b
Fix: fix build failed
hevinhsu Jun 11, 2025
54c4a94
feat: add error entry for bucket owner verification failure
hevinhsu Jun 11, 2025
7972065
Revert non relative changes
hevinhsu Jun 12, 2025
3c9f721
Refactor: merge bucket owner verification tests to reduce setup dupli…
hevinhsu Jun 12, 2025
8084bca
Feat: add link bucket integration tests
hevinhsu Jun 12, 2025
f1863a8
Fix: fix check style
hevinhsu Jun 12, 2025
41ea401
Revert: revert non relative changes
hevinhsu Jun 12, 2025
5745682
Remove bucket owner condition unit tests because these scenario cover…
hevinhsu Jun 13, 2025
660f2e6
refactor: simplify implementation to minimize code changes
hevinhsu Jun 13, 2025
23835f2
Fix typo
hevinhsu Jun 13, 2025
ae642b3
Merge remote-tracking branch 'origin/master' into HDDS-10775
hevinhsu Jun 13, 2025
2c2d8c7
Rename testcase names
hevinhsu Jun 13, 2025
059932b
Refactor: add new BucketEndpoint attribute: headers to replace HttpHe…
hevinhsu Jun 14, 2025
9d41ae9
Fix typo
hevinhsu Jun 14, 2025
3ad3aa1
Refactor: ignore destBucket owner verification to because it's alread…
hevinhsu Jun 14, 2025
3bec8cd
Refactor: simplify method call to get omMultipartUploadCompleteInfo
hevinhsu Jun 14, 2025
f42ba39
Merge remote-tracking branch 'origin/master' into HDDS-10775
hevinhsu Jun 14, 2025
97d7dca
Add correct resource name on S3Error when exception occur on copy ope…
hevinhsu Jun 14, 2025
e66d702
Fix: fix findbugs warning
hevinhsu Jun 14, 2025
92d346b
Merge remote-tracking branch 'origin/master' into HDDS-10775
hevinhsu Jun 14, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -13,4 +13,8 @@
limitations under the License. See accompanying LICENSE file.
-->
<FindBugsFilter>
<Match>
<Class name="org.apache.hadoop.ozone.s3.awssdk.v2.AbstractS3SDKV2Tests$S3BucketVerificationConditionTests"/>
<Bug pattern="SIC_INNER_SHOULD_BE_STATIC"/>
</Match>
</FindBugsFilter>

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,7 @@ public Response get(
try {
if (aclMarker != null) {
s3GAction = S3GAction.GET_ACL;
S3BucketAcl result = getAcl(bucketName);
S3BucketAcl result = getAcl(hh, bucketName);
getMetrics().updateGetAclSuccessStats(startNanos);
AUDIT.logReadSuccess(
buildAuditMessageForSuccess(s3GAction, getAuditParameters()));
Expand All @@ -143,7 +143,7 @@ public Response get(

if (uploads != null) {
s3GAction = S3GAction.LIST_MULTIPART_UPLOAD;
return listMultipartUploads(bucketName, prefix, keyMarker, uploadIdMarker, maxUploads);
return listMultipartUploads(bucketName, prefix, keyMarker, uploadIdMarker, maxUploads, hh);
}

maxKeys = validateMaxKeys(maxKeys);
Expand All @@ -168,8 +168,9 @@ public Response get(
&& OZONE_URI_DELIMITER.equals(delimiter);

bucket = getBucket(bucketName);
ozoneKeyIterator = bucket.listKeys(prefix, prevKey, shallow);
BucketOwnerCondition.verify(hh, bucket.getOwner());

ozoneKeyIterator = bucket.listKeys(prefix, prevKey, shallow);
} catch (OMException ex) {
AUDIT.logReadFailure(
buildAuditMessageForFailure(s3GAction, getAuditParameters(), ex));
Expand Down Expand Up @@ -347,18 +348,21 @@ public Response listMultipartUploads(
String prefix,
String keyMarker,
String uploadIdMarker,
int maxUploads)
int maxUploads,
HttpHeaders httpHeaders)
throws OS3Exception, IOException {

if (maxUploads < 1 || maxUploads > 1000) {
throw newError(S3ErrorTable.INVALID_ARGUMENT, "max-uploads",
new Exception("max-uploads must be between 1 and 1000"));
}


long startNanos = Time.monotonicNowNanos();
S3GAction s3GAction = S3GAction.LIST_MULTIPART_UPLOAD;

OzoneBucket bucket = getBucket(bucketName);
BucketOwnerCondition.verify(httpHeaders, bucket.getOwner());

try {
OzoneMultipartUploadList ozoneMultipartUploadList =
Expand Down Expand Up @@ -408,16 +412,25 @@ public Response listMultipartUploads(
* for more details.
*/
@HEAD
public Response head(@PathParam("bucket") String bucketName)
public Response head(@PathParam("bucket") String bucketName, @Context HttpHeaders httpHeaders)
throws OS3Exception, IOException {
long startNanos = Time.monotonicNowNanos();
S3GAction s3GAction = S3GAction.HEAD_BUCKET;
try {
getBucket(bucketName);
OzoneBucket bucket = getBucket(bucketName);
BucketOwnerCondition.verify(httpHeaders, bucket.getOwner());
AUDIT.logReadSuccess(
buildAuditMessageForSuccess(s3GAction, getAuditParameters()));
getMetrics().updateHeadBucketSuccessStats(startNanos);
return Response.ok().build();
} catch (OMException ex) {
AUDIT.logReadFailure(
buildAuditMessageForFailure(s3GAction, getAuditParameters(), ex));
if (isAccessDenied(ex)) {
throw newError(S3ErrorTable.ACCESS_DENIED, bucketName, ex);
} else {
throw ex;
}
} catch (Exception e) {
AUDIT.logReadFailure(
buildAuditMessageForFailure(s3GAction, getAuditParameters(), e));
Expand All @@ -432,13 +445,13 @@ public Response head(@PathParam("bucket") String bucketName)
* for more details.
*/
@DELETE
public Response delete(@PathParam("bucket") String bucketName)
public Response delete(@PathParam("bucket") String bucketName, @Context HttpHeaders httpHeaders)
throws IOException, OS3Exception {
long startNanos = Time.monotonicNowNanos();
S3GAction s3GAction = S3GAction.DELETE_BUCKET;

try {
deleteS3Bucket(bucketName);
deleteS3Bucket(bucketName, httpHeaders);
} catch (OMException ex) {
AUDIT.logWriteFailure(
buildAuditMessageForFailure(s3GAction, getAuditParameters(), ex));
Expand Down Expand Up @@ -477,7 +490,8 @@ public Response delete(@PathParam("bucket") String bucketName)
@Produces(MediaType.APPLICATION_XML)
public MultiDeleteResponse multiDelete(@PathParam("bucket") String bucketName,
@QueryParam("delete") String delete,
MultiDeleteRequest request)
MultiDeleteRequest request,
@Context HttpHeaders httpHeaders)
throws OS3Exception, IOException {
S3GAction s3GAction = S3GAction.MULTI_DELETE;

Expand All @@ -492,6 +506,7 @@ public MultiDeleteResponse multiDelete(@PathParam("bucket") String bucketName,
}
long startNanos = Time.monotonicNowNanos();
try {
BucketOwnerCondition.verify(httpHeaders, bucket.getOwner());
undeletedKeyResultMap = bucket.deleteKeys(deleteKeys, true);
for (DeleteObject d : request.getObjects()) {
ErrorInfo error = undeletedKeyResultMap.get(d.getKey());
Expand All @@ -508,6 +523,14 @@ public MultiDeleteResponse multiDelete(@PathParam("bucket") String bucketName,
}
}
getMetrics().updateDeleteKeySuccessStats(startNanos);
} catch (OMException ex) {
AUDIT.logReadFailure(
buildAuditMessageForFailure(s3GAction, getAuditParameters(), ex));
if (isAccessDenied(ex)) {
throw newError(S3ErrorTable.ACCESS_DENIED, bucketName, ex);
} else {
throw ex;
}
} catch (IOException ex) {
LOG.error("Delete key failed: {}", ex.getMessage());
getMetrics().updateDeleteKeyFailureStats(startNanos);
Expand All @@ -534,16 +557,14 @@ public MultiDeleteResponse multiDelete(@PathParam("bucket") String bucketName,
* <p>
* see: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAcl.html
*/
public S3BucketAcl getAcl(String bucketName)
public S3BucketAcl getAcl(HttpHeaders headers, String bucketName)
throws OS3Exception, IOException {
long startNanos = Time.monotonicNowNanos();
S3BucketAcl result = new S3BucketAcl();
try {
OzoneBucket bucket = getBucket(bucketName);
OzoneVolume volume = getVolume();
// TODO: use bucket owner instead of volume owner here once bucket owner
// TODO: is supported.
S3Owner owner = S3Owner.of(volume.getOwner());
BucketOwnerCondition.verify(headers, bucket.getOwner());
S3Owner owner = S3Owner.of(bucket.getOwner());
result.setOwner(owner);

// TODO: remove this duplication avoid logic when ACCESS and DEFAULT scope
Expand Down Expand Up @@ -592,6 +613,7 @@ public Response putAcl(String bucketName, HttpHeaders httpHeaders,

try {
OzoneBucket bucket = getBucket(bucketName);
BucketOwnerCondition.verify(httpHeaders, bucket.getOwner());
OzoneVolume volume = getVolume();

List<OzoneAcl> ozoneAclListOnBucket = new ArrayList<>();
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,89 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package org.apache.hadoop.ozone.s3.endpoint;

import javax.ws.rs.core.HttpHeaders;
import org.apache.hadoop.ozone.om.exceptions.OMException;
import org.apache.hadoop.ozone.s3.util.S3Consts;

/**
* Bucket owner condition to verify bucket ownership.
* <p>
* See: https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucket-owner-condition.html
* for more details
*/
public final class BucketOwnerCondition {

public static final String ERROR_MESSAGE = "Expected bucket owner does not match";

private BucketOwnerCondition() {

}

/**
* Verify the bucket owner condition.
*
* @param headers HTTP headers
* @param bucketOwner bucket owner
* @throws OMException if the expected bucket owner does not match the actual
* bucket owner
*/
public static void verify(HttpHeaders headers, String bucketOwner) throws OMException {
if (headers == null) {
return;
}

final String expectedBucketOwner = headers.getHeaderString(S3Consts.EXPECTED_BUCKET_OWNER_HEADER);
// client does not use this feature
if (expectedBucketOwner == null) {
return;
}
if (expectedBucketOwner.equals(bucketOwner)) {
return;
}
throw new OMException(ERROR_MESSAGE, OMException.ResultCodes.PERMISSION_DENIED);
}

/**
* Verify the copy operation's source and destination bucket owners.
*
* @param headers HTTP headers
* @param sourceOwner source bucket owner
* @param destOwner destination bucket owner
* @throws OMException if the expected source or destination bucket owner does
* not match the actual owners
*/
public static void verifyCopyOperation(HttpHeaders headers, String sourceOwner, String destOwner) throws OMException {
if (headers == null) {
return;
}

final String expectedSourceOwner = headers.getHeaderString(S3Consts.EXPECTED_SOURCE_BUCKET_OWNER_HEADER);
final String expectedDestOwner = headers.getHeaderString(S3Consts.EXPECTED_BUCKET_OWNER_HEADER);

// expectedSourceOwner is null, means the client does not want to check source owner
if (expectedSourceOwner != null && !sourceOwner.equals(expectedSourceOwner)) {
throw new OMException(ERROR_MESSAGE, OMException.ResultCodes.PERMISSION_DENIED);
}

// expectedDestOwner is null, means the client does not want to check destination owner
if (expectedDestOwner != null && !destOwner.equals(expectedDestOwner)) {
throw new OMException(ERROR_MESSAGE, OMException.ResultCodes.PERMISSION_DENIED);
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -210,11 +210,14 @@ protected String createS3Bucket(String bucketName) throws
/**
* Deletes an s3 bucket and removes mapping of Ozone volume/bucket.
* @param s3BucketName - S3 Bucket Name.
* @param headers - HttpHeaders
* @throws IOException in case the bucket cannot be deleted.
*/
protected void deleteS3Bucket(String s3BucketName)
protected void deleteS3Bucket(String s3BucketName, HttpHeaders headers)
throws IOException, OS3Exception {
try {
OzoneBucket bucket = getBucket(s3BucketName);
BucketOwnerCondition.verify(headers, bucket.getOwner());
client.getObjectStore().deleteS3Bucket(s3BucketName);
} catch (OMException ex) {
if (ex.getResult() == ResultCodes.PERMISSION_DENIED) {
Expand Down
Loading