Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,6 @@
import static org.junit.Assert.assertThat;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.junit.Assume.assumeFalse;

import org.junit.Ignore;
import org.junit.Test;
Expand Down Expand Up @@ -2221,8 +2220,6 @@ public void testListPartsWithInvalidUploadID() throws Exception {

@Test
public void testNativeAclsForVolume() throws Exception {
assumeFalse("Remove this once ACL HA is supported",
getClass().equals(TestOzoneRpcClientWithRatis.class));
String volumeName = UUID.randomUUID().toString();
store.createVolume(volumeName);

Expand All @@ -2237,8 +2234,6 @@ public void testNativeAclsForVolume() throws Exception {

@Test
public void testNativeAclsForBucket() throws Exception {
assumeFalse("Remove this once ACL HA is supported",
getClass().equals(TestOzoneRpcClientWithRatis.class));
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();

Expand Down Expand Up @@ -2299,8 +2294,6 @@ private void validateDefaultAcls(OzoneObj parentObj, OzoneObj childObj,

@Test
public void testNativeAclsForKey() throws Exception {
assumeFalse("Remove this once ACL HA is supported",
getClass().equals(TestOzoneRpcClientWithRatis.class));
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
String key1 = "dir1/dir2" + UUID.randomUUID().toString();
Expand Down Expand Up @@ -2363,8 +2356,6 @@ public void testNativeAclsForKey() throws Exception {

@Test
public void testNativeAclsForPrefix() throws Exception {
assumeFalse("Remove this once ACL HA is supported",
getClass().equals(TestOzoneRpcClientWithRatis.class));
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;

import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.audit.AuditAction;
Expand Down Expand Up @@ -142,7 +143,8 @@ public void checkAcls(OzoneManager ozoneManager,
*/
@VisibleForTesting
public UserGroupInformation createUGI() {
if (omRequest.hasUserInfo()) {
if (omRequest.hasUserInfo() &&
!StringUtils.isBlank(omRequest.getUserInfo().getUserName())) {
return UserGroupInformation.createRemoteUser(
omRequest.getUserInfo().getUserName());
} else {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,12 @@
package org.apache.hadoop.ozone.om.request.bucket;

import java.io.IOException;
import java.util.ArrayList;
import java.util.List;

import com.google.common.base.Optional;
import org.apache.hadoop.ozone.OzoneAcl;
import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
Expand Down Expand Up @@ -146,8 +150,11 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
volumeName);
acquiredBucketLock = metadataManager.getLock().acquireLock(BUCKET_LOCK,
volumeName, bucketName);

OmVolumeArgs omVolumeArgs =
metadataManager.getVolumeTable().get(volumeKey);
//Check if the volume exists
if (metadataManager.getVolumeTable().get(volumeKey) == null) {
if (omVolumeArgs == null) {
LOG.debug("volume: {} not found ", volumeName);
throw new OMException("Volume doesn't exist",
OMException.ResultCodes.VOLUME_NOT_FOUND);
Expand All @@ -160,6 +167,9 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
OMException.ResultCodes.BUCKET_ALREADY_EXISTS);
}

// Add default acls from volume.
addDefaultAcls(omBucketInfo, omVolumeArgs);

// Update table cache.
metadataManager.getBucketTable().addCacheEntry(new CacheKey<>(bucketKey),
new CacheValue<>(Optional.of(omBucketInfo), transactionLogIndex));
Expand Down Expand Up @@ -205,6 +215,26 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
}


/**
* Add default acls for bucket. These acls are inherited from volume
* default acl list.
* @param omBucketInfo
* @param omVolumeArgs
*/
private void addDefaultAcls(OmBucketInfo omBucketInfo,
OmVolumeArgs omVolumeArgs) {
// Add default acls from volume.
List<OzoneAcl> acls = new ArrayList<>();
if (omBucketInfo.getAcls() != null) {
acls.addAll(omBucketInfo.getAcls());
}
omVolumeArgs.getAclMap().getDefaultAclList().forEach(
defaultAcl -> acls.add(
OzoneAcl.fromProtobufWithAccessType(defaultAcl)));
omBucketInfo.setAcls(acls);
}


private BucketInfo getBucketInfoFromRequest() {
CreateBucketRequest createBucketRequest =
getOmRequest().getCreateBucketRequest();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -265,20 +265,20 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
omKeyInfo = prepareKeyInfo(omMetadataManager, keyArgs,
omMetadataManager.getOzoneKey(volumeName, bucketName,
keyName), keyArgs.getDataSize(), locations,
encryptionInfo.orNull());
encryptionInfo.orNull(), ozoneManager.getPrefixManager(), bucketInfo);

omClientResponse = prepareCreateKeyResponse(keyArgs, omKeyInfo,
locations, encryptionInfo.orNull(), exception,
createFileRequest.getClientID(), transactionLogIndex, volumeName,
bucketName, keyName, ozoneManager,
OMAction.CREATE_FILE);
OMAction.CREATE_FILE, ozoneManager.getPrefixManager(), bucketInfo);
} catch (IOException ex) {
exception = ex;
omClientResponse = prepareCreateKeyResponse(keyArgs, omKeyInfo,
locations, encryptionInfo.orNull(), exception,
createFileRequest.getClientID(), transactionLogIndex,
volumeName, bucketName, keyName, ozoneManager,
OMAction.CREATE_FILE);
OMAction.CREATE_FILE, ozoneManager.getPrefixManager(), null);
} finally {
if (omClientResponse != null) {
omClientResponse.setFlushFuture(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -53,8 +53,6 @@
.OMRequest;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
.OMResponse;
import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
import org.apache.hadoop.ozone.security.acl.OzoneObj;
import org.apache.hadoop.utils.db.cache.CacheKey;
import org.apache.hadoop.utils.db.cache.CacheValue;

Expand Down Expand Up @@ -171,11 +169,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
OmKeyInfo omKeyInfo = null;
try {
// check Acl
if (ozoneManager.getAclsEnabled()) {
checkAcls(ozoneManager, OzoneObj.ResourceType.KEY,
OzoneObj.StoreType.OZONE, IAccessAuthorizer.ACLType.WRITE,
volumeName, bucketName, keyName);
}
checkKeyAcls(ozoneManager, volumeName, bucketName, keyName, false);

OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
validateBucketAndVolume(omMetadataManager, volumeName,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -48,8 +48,6 @@
.KeyArgs;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
.OMRequest;
import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
import org.apache.hadoop.ozone.security.acl.OzoneObj;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.utils.db.cache.CacheKey;
import org.apache.hadoop.utils.db.cache.CacheValue;
Expand Down Expand Up @@ -117,11 +115,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
try {
// check Acl
if (ozoneManager.getAclsEnabled()) {
checkAcls(ozoneManager, OzoneObj.ResourceType.KEY,
OzoneObj.StoreType.OZONE, IAccessAuthorizer.ACLType.WRITE,
volumeName, bucketName, keyName);
}
checkKeyAcls(ozoneManager, volumeName, bucketName, keyName, false);

List<OmKeyLocationInfo> locationInfoList = commitKeyArgs
.getKeyLocationsList().stream()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -47,8 +47,6 @@
.KeyArgs;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
.OMRequest;
import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
import org.apache.hadoop.ozone.security.acl.OzoneObj;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.utils.UniqueId;

Expand Down Expand Up @@ -164,11 +162,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
OMClientResponse omClientResponse = null;
try {
// check Acl
if (ozoneManager.getAclsEnabled()) {
checkAcls(ozoneManager, OzoneObj.ResourceType.KEY,
OzoneObj.StoreType.OZONE, IAccessAuthorizer.ACLType.WRITE,
volumeName, bucketName, keyName);
}
checkKeyAcls(ozoneManager, volumeName, bucketName, keyName, false);

acquireLock = omMetadataManager.getLock().acquireLock(BUCKET_LOCK,
volumeName, bucketName);
Expand All @@ -184,17 +178,19 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,

omKeyInfo = prepareKeyInfo(omMetadataManager, keyArgs,
omMetadataManager.getOzoneKey(volumeName, bucketName, keyName),
keyArgs.getDataSize(), locations, encryptionInfo.orNull());
keyArgs.getDataSize(), locations, encryptionInfo.orNull(),
ozoneManager.getPrefixManager(), bucketInfo);
omClientResponse = prepareCreateKeyResponse(keyArgs, omKeyInfo,
locations, encryptionInfo.orNull(), exception,
createKeyRequest.getClientID(), transactionLogIndex, volumeName,
bucketName, keyName, ozoneManager, OMAction.ALLOCATE_KEY);
bucketName, keyName, ozoneManager, OMAction.ALLOCATE_KEY,
ozoneManager.getPrefixManager(), bucketInfo);
} catch (IOException ex) {
exception = ex;
omClientResponse = prepareCreateKeyResponse(keyArgs, omKeyInfo, locations,
encryptionInfo.orNull(), exception, createKeyRequest.getClientID(),
transactionLogIndex, volumeName, bucketName, keyName, ozoneManager,
OMAction.ALLOCATE_KEY);
OMAction.ALLOCATE_KEY, ozoneManager.getPrefixManager(), null);
} finally {
if (omClientResponse != null) {
omClientResponse.setFlushFuture(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -43,8 +43,6 @@
.DeleteKeyResponse;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
.OMRequest;
import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
import org.apache.hadoop.ozone.security.acl.OzoneObj;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.utils.db.cache.CacheKey;
import org.apache.hadoop.utils.db.cache.CacheValue;
Expand Down Expand Up @@ -111,11 +109,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
OMClientResponse omClientResponse = null;
try {
// check Acl
if (ozoneManager.getAclsEnabled()) {
checkAcls(ozoneManager, OzoneObj.ResourceType.KEY,
OzoneObj.StoreType.OZONE, IAccessAuthorizer.ACLType.DELETE,
volumeName, bucketName, keyName);
}
checkKeyAcls(ozoneManager, volumeName, bucketName, keyName, true);

String objectKey = omMetadataManager.getOzoneKey(
volumeName, bucketName, keyName);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -44,8 +44,6 @@
.RenameKeyRequest;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
.RenameKeyResponse;
import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
import org.apache.hadoop.ozone.security.acl.OzoneObj;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.utils.db.Table;
import org.apache.hadoop.utils.db.cache.CacheKey;
Expand Down Expand Up @@ -120,11 +118,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
OMException.ResultCodes.INVALID_KEY_NAME);
}
// check Acl
if (ozoneManager.getAclsEnabled()) {
checkAcls(ozoneManager, OzoneObj.ResourceType.KEY,
OzoneObj.StoreType.OZONE, IAccessAuthorizer.ACLType.WRITE,
volumeName, bucketName, fromKeyName);
}
checkKeyAcls(ozoneManager, volumeName, bucketName, fromKeyName, true);

acquiredLock = omMetadataManager.getLock().acquireLock(BUCKET_LOCK,
volumeName, bucketName);
Expand Down
Loading