diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java index 73692b37c599..93c675d9b900 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java @@ -121,7 +121,6 @@ import org.apache.hadoop.ozone.om.helpers.OmTenantArgs; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.helpers.OpenKeySession; -import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatusLight; import org.apache.hadoop.ozone.om.helpers.S3SecretValue; @@ -141,9 +140,6 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRoleInfo; import org.apache.hadoop.ozone.security.GDPRSymmetricKey; import org.apache.hadoop.ozone.security.OzoneTokenIdentifier; -import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType; -import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType; -import org.apache.hadoop.ozone.security.acl.OzoneAclConfig; import org.apache.hadoop.ozone.security.acl.OzoneObj; import org.apache.hadoop.ozone.snapshot.CancelSnapshotDiffResponse; import org.apache.hadoop.ozone.snapshot.ListSnapshotResponse; @@ -161,7 +157,6 @@ import java.security.InvalidKeyException; import java.security.PrivilegedExceptionAction; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.LinkedHashMap; @@ -176,7 +171,7 @@ import java.util.function.Function; import java.util.stream.Collectors; -import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS; +import static org.apache.hadoop.ozone.OzoneAcl.LINK_BUCKET_DEFAULT_ACL; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_CLIENT_KEY_PROVIDER_CACHE_EXPIRY; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_CLIENT_KEY_PROVIDER_CACHE_EXPIRY_DEFAULT; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_CLIENT_REQUIRED_OM_VERSION_MIN_KEY; @@ -185,8 +180,6 @@ import static org.apache.hadoop.ozone.OzoneConsts.MAXIMUM_NUMBER_OF_PARTS_PER_UPLOAD; import static org.apache.hadoop.ozone.OzoneConsts.OLD_QUOTA_DEFAULT; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_MAXIMUM_ACCESS_ID_LENGTH; -import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.READ; -import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.WRITE; /** * Ozone RPC Client Implementation, it connects to OM, SCM and DataNode @@ -210,8 +203,6 @@ public class RpcClient implements ClientProtocol { private final XceiverClientFactory xceiverClientManager; private final UserGroupInformation ugi; private UserGroupInformation s3gUgi; - private final ACLType userRights; - private final ACLType groupRights; private final ClientId clientId = ClientId.randomId(); private final boolean unsafeByteBufferConversion; private Text dtService; @@ -244,12 +235,8 @@ public RpcClient(ConfigurationSource conf, String omServiceId) Preconditions.checkNotNull(conf); this.conf = conf; this.ugi = UserGroupInformation.getCurrentUser(); - // Get default acl rights for user and group. - OzoneAclConfig aclConfig = this.conf.getObject(OzoneAclConfig.class); replicationConfigValidator = this.conf.getObject(ReplicationConfigValidator.class); - this.userRights = aclConfig.getUserDefaultRights(); - this.groupRights = aclConfig.getGroupDefaultRights(); this.clientConfig = conf.getObject(OzoneClientConfig.class); this.ecReconstructExecutor = MemoizedSupplier.valueOf(() -> createThreadPoolExecutor( @@ -450,20 +437,6 @@ public void createVolume(String volumeName, VolumeArgs volArgs) ugi.getShortUserName() : volArgs.getOwner(); long quotaInNamespace = volArgs.getQuotaInNamespace(); long quotaInBytes = volArgs.getQuotaInBytes(); - List listOfAcls = new ArrayList<>(); - //User ACL - listOfAcls.add(new OzoneAcl(ACLIdentityType.USER, - owner, ACCESS, userRights)); - //Group ACLs of the User - List userGroups = Arrays.asList(UserGroupInformation - .createRemoteUser(owner).getGroupNames()); - userGroups.stream().forEach((group) -> listOfAcls.add( - new OzoneAcl(ACLIdentityType.GROUP, group, ACCESS, groupRights))); - //ACLs from VolumeArgs - List volumeAcls = volArgs.getAcls(); - if (volumeAcls != null) { - listOfAcls.addAll(volumeAcls); - } OmVolumeArgs.Builder builder = OmVolumeArgs.newBuilder(); builder.setVolume(volumeName); @@ -473,11 +446,14 @@ public void createVolume(String volumeName, VolumeArgs volArgs) builder.setQuotaInNamespace(quotaInNamespace); builder.setUsedNamespace(0L); builder.addAllMetadata(volArgs.getMetadata()); - - //Remove duplicates and add ACLs - for (OzoneAcl ozoneAcl : - listOfAcls.stream().distinct().collect(Collectors.toList())) { - builder.addOzoneAcls(ozoneAcl); + //ACLs from VolumeArgs + List volumeAcls = volArgs.getAcls(); + if (volumeAcls != null) { + //Remove duplicates and add ACLs + for (OzoneAcl ozoneAcl : + volumeAcls.stream().distinct().collect(Collectors.toList())) { + builder.addOzoneAcls(ozoneAcl); + } } if (volArgs.getQuotaInBytes() == 0) { @@ -667,17 +643,6 @@ public void createBucket( .setKeyName(bucketArgs.getEncryptionKey()).build(); } - List listOfAcls = getAclList(); - //ACLs from BucketArgs - if (bucketArgs.getAcls() != null) { - listOfAcls.addAll(bucketArgs.getAcls()); - } - // Link bucket default acl - if (bucketArgs.getSourceVolume() != null - && bucketArgs.getSourceBucket() != null) { - listOfAcls.add(linkBucketDefaultAcl()); - } - OmBucketInfo.Builder builder = OmBucketInfo.newBuilder(); builder.setVolumeName(volumeName) .setBucketName(bucketName) @@ -688,10 +653,19 @@ public void createBucket( .setSourceBucket(bucketArgs.getSourceBucket()) .setQuotaInBytes(bucketArgs.getQuotaInBytes()) .setQuotaInNamespace(bucketArgs.getQuotaInNamespace()) - .setAcls(listOfAcls.stream().distinct().collect(Collectors.toList())) .setBucketLayout(bucketLayout) .setOwner(owner); + if (bucketArgs.getAcls() != null) { + builder.setAcls(bucketArgs.getAcls()); + } + + // Link bucket default acl + if (bucketArgs.getSourceVolume() != null + && bucketArgs.getSourceBucket() != null) { + builder.addAcl(LINK_BUCKET_DEFAULT_ACL); + } + if (bek != null) { builder.setBucketEncryptionKey(bek); } @@ -752,17 +726,6 @@ private static void verifySpaceQuota(long quota) throws OMException { } } - /** - * Helper function to get default acl list for current user. - * - * @return listOfAcls - * */ - private List getAclList() { - UserGroupInformation realUserInfo = getRealUserInfo(); - return OzoneAclUtil.getAclList(realUserInfo.getUserName(), - realUserInfo.getGroupNames(), userRights, groupRights); - } - /** * Helper function to get the actual operating user. * @@ -778,16 +741,6 @@ private UserGroupInformation getRealUserInfo() { return ugi; } - /** - * Link bucket default acl defined [world::rw] - * which is similar to Linux POSIX symbolic. - * - * @return OzoneAcl - */ - private OzoneAcl linkBucketDefaultAcl() { - return new OzoneAcl(ACLIdentityType.WORLD, "", ACCESS, READ, WRITE); - } - /** * Get a valid Delegation Token. * @@ -1427,7 +1380,6 @@ public OzoneOutputStream createKey( .setReplicationConfig(replicationConfig) .addAllMetadataGdpr(metadata) .addAllTags(tags) - .setAcls(getAclList()) .setLatestVersionLocation(getLatestVersionLocation) .setOwnerName(ownerName); @@ -1536,7 +1488,6 @@ public OzoneDataStreamOutput createStreamKey( .addAllMetadataGdpr(metadata) .addAllTags(tags) .setSortDatanodesInPipeline(true) - .setAcls(getAclList()) .setOwnerName(ownerName); OpenKeySession openKey = ozoneManagerClient.openKey(builder.build()); @@ -1955,7 +1906,6 @@ public OmMultipartInfo initiateMultipartUpload(String volumeName, .setBucketName(bucketName) .setKeyName(keyName) .setReplicationConfig(replicationConfig) - .setAcls(getAclList()) .addAllMetadataGdpr(metadata) .setOwnerName(ownerName) .addAllTags(tags) @@ -1992,7 +1942,6 @@ private OpenKeySession newMultipartOpenKey( .setMultipartUploadID(uploadID) .setMultipartUploadPartNumber(partNumber) .setSortDatanodesInPipeline(sortDatanodesInPipeline) - .setAcls(getAclList()) .setOwnerName(ownerName) .build(); return ozoneManagerClient.openKey(keyArgs); @@ -2064,7 +2013,6 @@ public OmMultipartUploadCompleteInfo completeMultipartUpload( .setBucketName(bucketName) .setKeyName(keyName) .setMultipartUploadID(uploadID) - .setAcls(getAclList()) .setOwnerName(ownerName) .build(); @@ -2169,7 +2117,6 @@ public void createDirectory(String volumeName, String bucketName, OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName) .setBucketName(bucketName) .setKeyName(keyName) - .setAcls(getAclList()) .setOwnerName(ownerName) .build(); ozoneManagerClient.createDirectory(keyArgs); @@ -2250,7 +2197,6 @@ public OzoneOutputStream createFile(String volumeName, String bucketName, .setKeyName(keyName) .setDataSize(size) .setReplicationConfig(replicationConfig) - .setAcls(getAclList()) .setLatestVersionLocation(getLatestVersionLocation) .setOwnerName(ownerName) .build(); @@ -2282,7 +2228,6 @@ public OzoneDataStreamOutput createStreamFile(String volumeName, .setKeyName(keyName) .setDataSize(size) .setReplicationConfig(replicationConfig) - .setAcls(getAclList()) .setLatestVersionLocation(getLatestVersionLocation) .setSortDatanodesInPipeline(true) .setOwnerName(ownerName) diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java index 26693d19c64a..e2b2f61a3685 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java @@ -27,6 +27,7 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneAclInfo; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneAclInfo.OzoneAclScope; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneAclInfo.OzoneAclType; +import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType; import org.apache.ratis.util.MemoizedSupplier; @@ -41,8 +42,11 @@ import java.util.function.IntFunction; import java.util.function.Supplier; +import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS; import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.ALL; import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.NONE; +import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.READ; +import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.WRITE; /** * OzoneACL classes define bucket ACLs used in OZONE. @@ -58,6 +62,13 @@ public class OzoneAcl { private static final String ACL_SCOPE_REGEX = ".*\\[(ACCESS|DEFAULT)\\]"; + /** + * Link bucket default acl defined [world::rw] + * which is similar to Linux POSIX symbolic. + */ + public static final OzoneAcl LINK_BUCKET_DEFAULT_ACL = + new OzoneAcl(IAccessAuthorizer.ACLIdentityType.WORLD, "", ACCESS, READ, WRITE); + private final ACLIdentityType type; private final String name; @JsonIgnore diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneAclUtil.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneAclUtil.java index 517f0c14ce09..083b1329db6d 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneAclUtil.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneAclUtil.java @@ -18,21 +18,25 @@ package org.apache.hadoop.ozone.om.helpers; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneAclInfo; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType; +import org.apache.hadoop.ozone.security.acl.OzoneAclConfig; import org.apache.hadoop.ozone.security.acl.RequestContext; +import java.io.IOException; import java.util.ArrayList; -import java.util.Arrays; import java.util.List; import java.util.Objects; import java.util.stream.Collectors; import java.util.stream.Stream; import org.apache.hadoop.security.UserGroupInformation; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS; import static org.apache.hadoop.ozone.OzoneAcl.AclScope.DEFAULT; @@ -43,28 +47,51 @@ * Helper class for ozone acls operations. */ public final class OzoneAclUtil { + static final Logger LOG = LoggerFactory.getLogger(OzoneAclUtil.class); private OzoneAclUtil() { } + private static ACLType[] userRights; + private static ACLType[] groupRights; + /** - * Helper function to get access acl list for current user. + * Helper function to get default access acl list for current user. * - * @param userName - * @param userGroups + * @param ugi current login user + * @param conf current configuration * @return list of OzoneAcls * */ - public static List getAclList(String userName, - String[] userGroups, ACLType userRights, ACLType groupRights) { - + public static List getDefaultAclList(UserGroupInformation ugi, OzoneConfiguration conf) { + // Get default acl rights for user and group. + if (userRights == null || groupRights == null) { + OzoneAclConfig aclConfig = conf.getObject(OzoneAclConfig.class); + userRights = aclConfig.getUserDefaultRights(); + groupRights = aclConfig.getGroupDefaultRights(); + } List listOfAcls = new ArrayList<>(); + // User ACL. + listOfAcls.add(new OzoneAcl(USER, ugi.getShortUserName(), ACCESS, userRights)); + try { + String groupName = ugi.getPrimaryGroupName(); + listOfAcls.add(new OzoneAcl(GROUP, groupName, ACCESS, groupRights)); + } catch (IOException e) { + // do nothing, since user has the permission, user can add ACL for selected groups later. + LOG.warn("Failed to get primary group from user {}", ugi); + } + return listOfAcls; + } + public static List getAclList(UserGroupInformation ugi, ACLType userPrivilege, ACLType groupPrivilege) { + List listOfAcls = new ArrayList<>(); // User ACL. - listOfAcls.add(new OzoneAcl(USER, userName, ACCESS, userRights)); - if (userGroups != null) { - // Group ACLs of the User. - Arrays.asList(userGroups).forEach((group) -> listOfAcls.add( - new OzoneAcl(GROUP, group, ACCESS, groupRights))); + listOfAcls.add(new OzoneAcl(USER, ugi.getShortUserName(), ACCESS, userPrivilege)); + try { + String groupName = ugi.getPrimaryGroupName(); + listOfAcls.add(new OzoneAcl(GROUP, groupName, ACCESS, groupPrivilege)); + } catch (IOException e) { + // do nothing, since user has the permission, user can add ACL for selected groups later. + LOG.warn("Failed to get primary group from user {}", ugi); } return listOfAcls; } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java index 5d07ea390b19..6b23b0f2682b 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java @@ -1652,10 +1652,13 @@ public OmMultipartInfo initiateMultipartUpload(OmKeyArgs omKeyArgs) throws .setKeyName(omKeyArgs.getKeyName()) .addAllMetadata(KeyValueUtil.toProtobuf(omKeyArgs.getMetadata())) .setOwnerName(omKeyArgs.getOwner()) - .addAllAcls(omKeyArgs.getAcls().stream().map(a -> - OzoneAcl.toProtobuf(a)).collect(Collectors.toList())) .addAllTags(KeyValueUtil.toProtobuf(omKeyArgs.getTags())); + if (omKeyArgs.getAcls() != null) { + keyArgs.addAllAcls(omKeyArgs.getAcls().stream().map(a -> + OzoneAcl.toProtobuf(a)).collect(Collectors.toList())); + } + setReplicationConfig(omKeyArgs.getReplicationConfig(), keyArgs); multipartInfoInitiateRequest.setKeyArgs(keyArgs.build()); @@ -1726,10 +1729,12 @@ public OmMultipartUploadCompleteInfo completeMultipartUpload( .setVolumeName(omKeyArgs.getVolumeName()) .setBucketName(omKeyArgs.getBucketName()) .setKeyName(omKeyArgs.getKeyName()) - .addAllAcls(omKeyArgs.getAcls().stream().map(a -> - OzoneAcl.toProtobuf(a)).collect(Collectors.toList())) .setOwnerName(omKeyArgs.getOwner()) .setMultipartUploadID(omKeyArgs.getMultipartUploadID()); + if (omKeyArgs.getAcls() != null) { + keyArgs.addAllAcls(omKeyArgs.getAcls().stream().map(a -> + OzoneAcl.toProtobuf(a)).collect(Collectors.toList())); + } multipartUploadCompleteRequest.setKeyArgs(keyArgs.build()); multipartUploadCompleteRequest.addAllPartsList(multipartUploadList @@ -2125,16 +2130,17 @@ public OzoneFileStatus getFileStatus(OmKeyArgs args) throws IOException { @Override public void createDirectory(OmKeyArgs args) throws IOException { - KeyArgs keyArgs = KeyArgs.newBuilder() + KeyArgs.Builder keyArgsBuilder = KeyArgs.newBuilder() .setVolumeName(args.getVolumeName()) .setBucketName(args.getBucketName()) .setKeyName(args.getKeyName()) - .addAllAcls(args.getAcls().stream().map(a -> - OzoneAcl.toProtobuf(a)).collect(Collectors.toList())) - .setOwnerName(args.getOwner()) - .build(); + .setOwnerName(args.getOwner()); + if (args.getAcls() != null) { + keyArgsBuilder.addAllAcls(args.getAcls().stream().map(a -> + OzoneAcl.toProtobuf(a)).collect(Collectors.toList())); + } CreateDirectoryRequest request = CreateDirectoryRequest.newBuilder() - .setKeyArgs(keyArgs) + .setKeyArgs(keyArgsBuilder.build()) .build(); OMRequest omRequest = createOMRequest(Type.CreateDirectory) @@ -2296,9 +2302,11 @@ public OpenKeySession createFile(OmKeyArgs args, .setBucketName(args.getBucketName()) .setKeyName(args.getKeyName()) .setDataSize(args.getDataSize()) - .addAllAcls(args.getAcls().stream().map(a -> - OzoneAcl.toProtobuf(a)).collect(Collectors.toList())) .setOwnerName(args.getOwner()); + if (args.getAcls() != null) { + keyArgsBuilder.addAllAcls(args.getAcls().stream().map(a -> + OzoneAcl.toProtobuf(a)).collect(Collectors.toList())); + } if (args.getReplicationConfig() != null) { if (args.getReplicationConfig() instanceof ECReplicationConfig) { keyArgsBuilder.setEcReplicationConfig( diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneAclConfig.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneAclConfig.java index 75dee0b8a455..7fbf5a920651 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneAclConfig.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneAclConfig.java @@ -23,6 +23,10 @@ import org.apache.hadoop.hdds.conf.ConfigType; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + /** * Ozone ACL config pojo. * */ @@ -40,7 +44,7 @@ public class OzoneAclConfig { private String userDefaultRights; @Config(key = "group.rights", - defaultValue = "ALL", + defaultValue = "READ, LIST", type = ConfigType.STRING, tags = {ConfigTag.OM, ConfigTag.SECURITY}, description = "Default group permissions set for an object in " + @@ -48,18 +52,26 @@ public class OzoneAclConfig { ) private String groupDefaultRights; - public ACLType getUserDefaultRights() { + public ACLType[] getUserDefaultRights() { + List types = new ArrayList(); if (userDefaultRights == null) { - return ACLType.ALL; + types.add(ACLType.ALL); + } else { + String[] array = userDefaultRights.trim().split(","); + Arrays.stream(array).forEach(t -> types.add(ACLType.valueOf(t.trim()))); } - return ACLType.valueOf(userDefaultRights); + return types.toArray(new ACLType[0]); } - public ACLType getGroupDefaultRights() { + public ACLType[] getGroupDefaultRights() { + List types = new ArrayList(); if (groupDefaultRights == null) { - return ACLType.ALL; + types.add(ACLType.READ); + types.add(ACLType.LIST); + } else { + String[] array = groupDefaultRights.trim().split(","); + Arrays.stream(array).forEach(t -> types.add(ACLType.valueOf(t.trim()))); } - return ACLType.valueOf(groupDefaultRights); + return types.toArray(new ACLType[0]); } - } diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOzoneAclUtil.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOzoneAclUtil.java index 35a8a95d8d02..a6b5d9c01969 100644 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOzoneAclUtil.java +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOzoneAclUtil.java @@ -186,8 +186,8 @@ private static List getDefaultAcls() { } OzoneAclConfig aclConfig = newInstanceOf(OzoneAclConfig.class); - IAccessAuthorizer.ACLType userRights = aclConfig.getUserDefaultRights(); - IAccessAuthorizer.ACLType groupRights = aclConfig.getGroupDefaultRights(); + IAccessAuthorizer.ACLType[] userRights = aclConfig.getUserDefaultRights(); + IAccessAuthorizer.ACLType[] groupRights = aclConfig.getGroupDefaultRights(); OzoneAclUtil.addAcl(ozoneAcls, new OzoneAcl(USER, ugi.getUserName(), ACCESS, userRights)); diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/bucketcreate.robot b/hadoop-ozone/dist/src/main/smoketest/s3/bucketcreate.robot index 7cc40a82c19b..39ddbde41b04 100644 --- a/hadoop-ozone/dist/src/main/smoketest/s3/bucketcreate.robot +++ b/hadoop-ozone/dist/src/main/smoketest/s3/bucketcreate.robot @@ -42,7 +42,7 @@ Create bucket with invalid bucket name ${result} = Execute AWSS3APICli and checkrc create-bucket --bucket invalid_bucket_${randStr} 255 Should contain ${result} InvalidBucketName -Create new bucket and check no group ACL +Create new bucket and check default group ACL ${bucket} = Create bucket ${acl} = Execute ozone sh bucket getacl s3v/${bucket} ${group} = Get Regexp Matches ${acl} "GROUP" @@ -50,5 +50,6 @@ Create new bucket and check no group ACL ${json} = Evaluate json.loads('''${acl}''') json # make sure this check is for group acl Should contain ${json}[1][type] GROUP - Should contain ${json}[1][aclList] NONE + Should contain ${json}[1][aclList] READ + Should contain ${json}[1][aclList] LIST END diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTest.java index 8b71a2160031..2251b1058172 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTest.java @@ -1207,7 +1207,7 @@ void testSharedTmpDir() throws IOException { ClientProtocol proxy = objectStore.getClientProxy(); // Get default acl rights for user OzoneAclConfig aclConfig = conf.getObject(OzoneAclConfig.class); - ACLType userRights = aclConfig.getUserDefaultRights(); + ACLType[] userRights = aclConfig.getUserDefaultRights(); // Construct ACL for world access // ACL admin owner, world read+write EnumSet aclRights = EnumSet.of(READ, WRITE); @@ -1310,7 +1310,7 @@ void testTempMount() throws IOException { ClientProtocol proxy = objectStore.getClientProxy(); // Get default acl rights for user OzoneAclConfig aclConfig = conf.getObject(OzoneAclConfig.class); - ACLType userRights = aclConfig.getUserDefaultRights(); + ACLType[] userRights = aclConfig.getUserDefaultRights(); // Construct ACL for world access OzoneAcl aclWorldAccess = new OzoneAcl(ACLIdentityType.WORLD, "", ACCESS, userRights); @@ -2311,7 +2311,7 @@ void testNonPrivilegedUserMkdirCreateBucket() throws IOException { // Get default acl rights for user OzoneAclConfig aclConfig = conf.getObject(OzoneAclConfig.class); - ACLType userRights = aclConfig.getUserDefaultRights(); + ACLType[] userRights = aclConfig.getUserDefaultRights(); // Construct ACL for world access OzoneAcl aclWorldAccess = new OzoneAcl(ACLIdentityType.WORLD, "", ACCESS, userRights); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/OzoneRpcClientTests.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/OzoneRpcClientTests.java index 69d516b9f515..7e518687beaf 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/OzoneRpcClientTests.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/OzoneRpcClientTests.java @@ -172,6 +172,8 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.PARTIAL_RENAME; import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType.GROUP; import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType.USER; +import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.ALL; +import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.LIST; import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.READ; import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.WRITE; @@ -394,10 +396,10 @@ public void testBucketSetOwner() throws IOException { .setVolumeName(volumeName).setBucketName(bucketName) .setStoreType(OzoneObj.StoreType.OZONE) .setResType(OzoneObj.ResourceType.BUCKET).build(); - store.addAcl(volumeObj, new OzoneAcl(USER, "user1", ACCESS, ACLType.ALL)); - store.addAcl(volumeObj, new OzoneAcl(USER, "user2", ACCESS, ACLType.ALL)); - store.addAcl(bucketObj, new OzoneAcl(USER, "user1", ACCESS, ACLType.ALL)); - store.addAcl(bucketObj, new OzoneAcl(USER, "user2", ACCESS, ACLType.ALL)); + store.addAcl(volumeObj, new OzoneAcl(USER, "user1", ACCESS, ALL)); + store.addAcl(volumeObj, new OzoneAcl(USER, "user2", ACCESS, ALL)); + store.addAcl(bucketObj, new OzoneAcl(USER, "user1", ACCESS, ALL)); + store.addAcl(bucketObj, new OzoneAcl(USER, "user2", ACCESS, ALL)); createKeyForUser(volumeName, bucketName, key1, content, user1); createKeyForUser(volumeName, bucketName, key2, content, user2); @@ -779,7 +781,7 @@ public void testCreateBucketWithAllArgument() String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); OzoneAcl userAcl = new OzoneAcl(USER, "test", - ACCESS, ACLType.ALL); + ACCESS, ALL); ReplicationConfig repConfig = new ECReplicationConfig(3, 2); store.createVolume(volumeName); OzoneVolume volume = store.getVolume(volumeName); @@ -818,7 +820,7 @@ public void testAddBucketAcl() OzoneVolume volume = store.getVolume(volumeName); volume.createBucket(bucketName); List acls = new ArrayList<>(); - acls.add(new OzoneAcl(USER, "test", ACCESS, ACLType.ALL)); + acls.add(new OzoneAcl(USER, "test", ACCESS, ALL)); OzoneBucket bucket = volume.getBucket(bucketName); for (OzoneAcl acl : acls) { assertTrue(bucket.addAcl(acl)); @@ -834,7 +836,7 @@ public void testRemoveBucketAcl() String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); OzoneAcl userAcl = new OzoneAcl(USER, "test", - ACCESS, ACLType.ALL); + ACCESS, ALL); store.createVolume(volumeName); OzoneVolume volume = store.getVolume(volumeName); BucketArgs.Builder builder = BucketArgs.newBuilder() @@ -853,9 +855,9 @@ public void testRemoveBucketAclUsingRpcClientRemoveAcl() String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); OzoneAcl userAcl = new OzoneAcl(USER, "test", - ACCESS, ACLType.ALL); + ACCESS, ALL); OzoneAcl acl2 = new OzoneAcl(USER, "test1", - ACCESS, ACLType.ALL); + ACCESS, ALL); store.createVolume(volumeName); OzoneVolume volume = store.getVolume(volumeName); BucketArgs.Builder builder = BucketArgs.newBuilder() @@ -913,6 +915,64 @@ public void testAclsAfterCallingSetBucketProperty() throws Exception { } + @Test + public void testAclDeDuplication() + throws IOException { + String volumeName = UUID.randomUUID().toString(); + String bucketName = UUID.randomUUID().toString(); + OzoneAcl userAcl1 = new OzoneAcl(USER, "test", DEFAULT, READ); + UserGroupInformation currentUser = UserGroupInformation.getCurrentUser(); + OzoneAcl currentUserAcl = new OzoneAcl(USER, currentUser.getShortUserName(), ACCESS, ALL); + OzoneAcl currentUserPrimaryGroupAcl = new OzoneAcl(GROUP, currentUser.getPrimaryGroupName(), ACCESS, READ, LIST); + VolumeArgs createVolumeArgs = VolumeArgs.newBuilder() + .setOwner(currentUser.getShortUserName()) + .setAdmin(currentUser.getShortUserName()) + .addAcl(userAcl1) + .addAcl(currentUserAcl) + .addAcl(currentUserPrimaryGroupAcl) + .build(); + + store.createVolume(volumeName, createVolumeArgs); + OzoneVolume volume = store.getVolume(volumeName); + List volumeAcls = volume.getAcls(); + assertEquals(3, volumeAcls.size()); + assertTrue(volumeAcls.contains(userAcl1)); + assertTrue(volumeAcls.contains(currentUserAcl)); + assertTrue(volumeAcls.contains(currentUserPrimaryGroupAcl)); + + // normal bucket + BucketArgs.Builder builder = BucketArgs.newBuilder() + .addAcl(currentUserAcl).addAcl(currentUserPrimaryGroupAcl); + volume.createBucket(bucketName, builder.build()); + OzoneBucket bucket = volume.getBucket(bucketName); + List bucketAcls = bucket.getAcls(); + assertEquals(bucketName, bucket.getName()); + assertEquals(3, bucketAcls.size()); + assertTrue(bucketAcls.contains(currentUserAcl)); + assertTrue(bucketAcls.contains(currentUserPrimaryGroupAcl)); + assertTrue(bucketAcls.get(2).getName().equals(userAcl1.getName())); + assertTrue(bucketAcls.get(2).getAclList().equals(userAcl1.getAclList())); + assertTrue(bucketAcls.get(2).getAclScope().equals(ACCESS)); + + // link bucket + OzoneAcl userAcl2 = new OzoneAcl(USER, "test-link", DEFAULT, READ); + String linkBucketName = "link-" + bucketName; + builder = BucketArgs.newBuilder().setSourceVolume(volumeName).setSourceBucket(bucketName) + .addAcl(currentUserAcl).addAcl(currentUserPrimaryGroupAcl).addAcl(userAcl2); + volume.createBucket(linkBucketName, builder.build()); + OzoneBucket linkBucket = volume.getBucket(linkBucketName); + List linkBucketAcls = linkBucket.getAcls(); + assertEquals(linkBucketName, linkBucket.getName()); + assertEquals(5, linkBucketAcls.size()); + assertTrue(linkBucketAcls.contains(currentUserAcl)); + assertTrue(linkBucketAcls.contains(currentUserPrimaryGroupAcl)); + assertTrue(linkBucketAcls.contains(userAcl2)); + assertTrue(linkBucketAcls.contains(OzoneAcl.LINK_BUCKET_DEFAULT_ACL)); + assertTrue(linkBucketAcls.get(4).getName().equals(userAcl1.getName())); + assertTrue(linkBucketAcls.get(4).getAclList().equals(userAcl1.getAclList())); + assertTrue(linkBucketAcls.get(4).getAclScope().equals(ACCESS)); + } + @Test public void testSetBucketStorageType() throws IOException { @@ -3029,10 +3089,10 @@ public void testMultipartUploadWithACL() throws Exception { OzoneBucket bucket = volume.getBucket(bucketName); // Add ACL on Bucket - OzoneAcl acl1 = new OzoneAcl(USER, "Monday", DEFAULT, ACLType.ALL); - OzoneAcl acl2 = new OzoneAcl(USER, "Friday", DEFAULT, ACLType.ALL); - OzoneAcl acl3 = new OzoneAcl(USER, "Jan", ACCESS, ACLType.ALL); - OzoneAcl acl4 = new OzoneAcl(USER, "Feb", ACCESS, ACLType.ALL); + OzoneAcl acl1 = new OzoneAcl(USER, "Monday", DEFAULT, ALL); + OzoneAcl acl2 = new OzoneAcl(USER, "Friday", DEFAULT, ALL); + OzoneAcl acl3 = new OzoneAcl(USER, "Jan", ACCESS, ALL); + OzoneAcl acl4 = new OzoneAcl(USER, "Feb", ACCESS, ALL); bucket.addAcl(acl1); bucket.addAcl(acl2); bucket.addAcl(acl3); @@ -3203,10 +3263,10 @@ public void testMultipartUploadOwner() throws Exception { .setVolumeName(volumeName).setBucketName(bucketName) .setStoreType(OzoneObj.StoreType.OZONE) .setResType(OzoneObj.ResourceType.BUCKET).build(); - store.addAcl(volumeObj, new OzoneAcl(USER, "user1", ACCESS, ACLType.ALL)); - store.addAcl(volumeObj, new OzoneAcl(USER, "awsUser1", ACCESS, ACLType.ALL)); - store.addAcl(bucketObj, new OzoneAcl(USER, "user1", ACCESS, ACLType.ALL)); - store.addAcl(bucketObj, new OzoneAcl(USER, "awsUser1", ACCESS, ACLType.ALL)); + store.addAcl(volumeObj, new OzoneAcl(USER, "user1", ACCESS, ALL)); + store.addAcl(volumeObj, new OzoneAcl(USER, "awsUser1", ACCESS, ALL)); + store.addAcl(bucketObj, new OzoneAcl(USER, "user1", ACCESS, ALL)); + store.addAcl(bucketObj, new OzoneAcl(USER, "awsUser1", ACCESS, ALL)); // user1 MultipartUpload a key UserGroupInformation.setLoginUser(user1); @@ -3943,7 +4003,7 @@ public void testNativeAclsForPrefix() throws Exception { aclsGet = store.getAcl(prefixObj); assertEquals(0, aclsGet.size()); - OzoneAcl group1Acl = new OzoneAcl(GROUP, "group1", ACCESS, ACLType.ALL); + OzoneAcl group1Acl = new OzoneAcl(GROUP, "group1", ACCESS, ALL); List acls = new ArrayList<>(); acls.add(user1Acl); acls.add(group1Acl); @@ -3980,14 +4040,12 @@ private List getAclList(OzoneConfiguration conf) //User ACL UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); OzoneAclConfig aclConfig = conf.getObject(OzoneAclConfig.class); - ACLType userRights = aclConfig.getUserDefaultRights(); - ACLType groupRights = aclConfig.getGroupDefaultRights(); - - listOfAcls.add(new OzoneAcl(USER, ugi.getUserName(), ACCESS, userRights)); - //Group ACLs of the User - List userGroups = Arrays.asList(ugi.getGroupNames()); - userGroups.stream().forEach((group) -> listOfAcls.add( - new OzoneAcl(GROUP, group, ACCESS, groupRights))); + ACLType[] userRights = aclConfig.getUserDefaultRights(); + ACLType[] groupRights = aclConfig.getGroupDefaultRights(); + + listOfAcls.add(new OzoneAcl(USER, ugi.getShortUserName(), ACCESS, userRights)); + //Group ACL of the User + listOfAcls.add(new OzoneAcl(GROUP, ugi.getPrimaryGroupName(), ACCESS, groupRights)); return listOfAcls; } @@ -4056,7 +4114,7 @@ private void validateOzoneAccessAcl(OzoneObj ozObj) throws IOException { OzoneAcl ua = new OzoneAcl(USER, "userx", ACCESS, ACLType.READ_ACL); OzoneAcl ug = new OzoneAcl(GROUP, "userx", - ACCESS, ACLType.ALL); + ACCESS, ALL); store.setAcl(ozObj, Arrays.asList(ua, ug)); newAcls = store.getAcl(ozObj); assertEquals(2, newAcls.size()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestBucketLayoutWithOlderClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestBucketLayoutWithOlderClient.java index 73596781cc64..e1b2a59d78c2 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestBucketLayoutWithOlderClient.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestBucketLayoutWithOlderClient.java @@ -26,6 +26,7 @@ import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.security.UserGroupInformation; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; @@ -96,6 +97,9 @@ public void testCreateBucketWithOlderClient() throws Exception { OzoneManagerProtocolProtos.StorageTypeProto.DISK) .build()) .build()).build(); + createBucketReq = createBucketReq.toBuilder() + .setUserInfo(OzoneManagerProtocolProtos.UserInfo.newBuilder() + .setUserName(UserGroupInformation.getCurrentUser().getShortUserName()).build()).build(); OzoneManagerProtocolProtos.OMResponse omResponse = cluster.getOzoneManager().getOmServerProtocol() diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java index a349acf6e378..e9c9b946c8e4 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java @@ -337,8 +337,7 @@ public void openKeyFailureInSafeMode() throws Exception { .setKeyName(KEY_NAME) .setDataSize(1000) .setReplicationConfig(RatisReplicationConfig.getInstance(THREE)) - .setAcls(OzoneAclUtil.getAclList(ugi.getUserName(), ugi.getGroupNames(), - ALL, ALL)) + .setAcls(OzoneAclUtil.getAclList(ugi, ALL, ALL)) .build(); OMException omException = assertThrows(OMException.class, () -> writeClient.openKey(keyArgs)); @@ -1695,8 +1694,7 @@ private OmKeyArgs.Builder createBuilder(String bucketName) .setDataSize(0) .setReplicationConfig( StandaloneReplicationConfig.getInstance(ONE)) - .setAcls(OzoneAclUtil.getAclList(ugi.getUserName(), ugi.getGroupNames(), - ALL, ALL)) + .setAcls(OzoneAclUtil.getAclList(ugi, ALL, ALL)) .setVolumeName(VOLUME_NAME) .setOwnerName(ugi.getShortUserName()); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestRecursiveAclWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestRecursiveAclWithFSO.java index 6f86fcba70ec..1a2e61b88005 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestRecursiveAclWithFSO.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestRecursiveAclWithFSO.java @@ -32,6 +32,7 @@ import org.apache.hadoop.ozone.client.protocol.ClientProtocol; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; +import org.apache.hadoop.ozone.security.acl.OzoneAclConfig; import org.apache.hadoop.ozone.security.acl.OzoneObj; import org.apache.hadoop.ozone.security.acl.OzoneObjInfo; import org.apache.hadoop.security.UserGroupInformation; @@ -45,11 +46,13 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.List; +import java.util.UUID; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_AUTHORIZER_CLASS; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_AUTHORIZER_CLASS_NATIVE; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED; import static org.apache.hadoop.ozone.security.acl.OzoneObj.StoreType.OZONE; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -68,6 +71,8 @@ public class TestRecursiveAclWithFSO { .createUserForTesting("user1", new String[] {"test1"}); private final UserGroupInformation user2 = UserGroupInformation .createUserForTesting("user2", new String[] {"test2"}); + private final UserGroupInformation user3 = UserGroupInformation + .createUserForTesting("user3", new String[] {"test3, test4"}); @BeforeEach public void init() throws Exception { @@ -213,6 +218,70 @@ public void testKeyDeleteAndRenameWithoutPermission() throws Exception { } } + @Test + public void testKeyDefaultACL() throws Exception { + String volumeName = "vol1"; + try (OzoneClient client = cluster.newClient()) { + ObjectStore objectStore = client.getObjectStore(); + objectStore.createVolume(volumeName); + addVolumeAcl(objectStore, volumeName, "world::a"); + + // verify volume ACLs. This volume will have 2 default ACLs, plus above one added + OzoneObj obj = OzoneObjInfo.Builder.newBuilder().setVolumeName(volumeName) + .setResType(OzoneObj.ResourceType.VOLUME) + .setStoreType(OZONE).build(); + List acls = objectStore.getAcl(obj); + assertEquals(3, acls.size()); + assertEquals(adminUser.getShortUserName(), acls.get(0).getName()); + OzoneAclConfig aclConfig = cluster.getConf().getObject(OzoneAclConfig.class); + assertArrayEquals(aclConfig.getUserDefaultRights(), acls.get(0).getAclList().toArray()); + assertEquals(adminUser.getPrimaryGroupName(), acls.get(1).getName()); + assertArrayEquals(aclConfig.getGroupDefaultRights(), acls.get(1).getAclList().toArray()); + assertEquals("WORLD", acls.get(2).getName()); + assertArrayEquals(aclConfig.getUserDefaultRights(), acls.get(2).getAclList().toArray()); + } + + // set LoginUser as user3 + UserGroupInformation.setLoginUser(user3); + try (OzoneClient client = cluster.newClient()) { + ObjectStore objectStore = client.getObjectStore(); + OzoneVolume volume = objectStore.getVolume(volumeName); + BucketArgs omBucketArgs = + BucketArgs.newBuilder().setStorageType(StorageType.DISK).build(); + String bucketName = "bucket"; + volume.createBucket(bucketName, omBucketArgs); + OzoneBucket ozoneBucket = volume.getBucket(bucketName); + + // verify bucket default ACLs + OzoneObj obj = OzoneObjInfo.Builder.newBuilder().setVolumeName(volume.getName()) + .setBucketName(ozoneBucket.getName()).setResType(OzoneObj.ResourceType.BUCKET) + .setStoreType(OZONE).build(); + List acls = objectStore.getAcl(obj); + assertEquals(2, acls.size()); + assertEquals(user3.getShortUserName(), acls.get(0).getName()); + OzoneAclConfig aclConfig = cluster.getConf().getObject(OzoneAclConfig.class); + assertArrayEquals(aclConfig.getUserDefaultRights(), acls.get(0).getAclList().toArray()); + assertEquals(user3.getPrimaryGroupName(), acls.get(1).getName()); + assertArrayEquals(aclConfig.getGroupDefaultRights(), acls.get(1).getAclList().toArray()); + + // verify key default ACLs + int length = 10; + byte[] input = new byte[length]; + Arrays.fill(input, (byte) 96); + String keyName = UUID.randomUUID().toString(); + createKey(ozoneBucket, keyName, length, input); + obj = OzoneObjInfo.Builder.newBuilder().setVolumeName(volume.getName()) + .setBucketName(ozoneBucket.getName()).setKeyName(keyName) + .setResType(OzoneObj.ResourceType.KEY).setStoreType(OZONE).build(); + acls = objectStore.getAcl(obj); + assertEquals(2, acls.size()); + assertEquals(user3.getShortUserName(), acls.get(0).getName()); + assertArrayEquals(aclConfig.getUserDefaultRights(), acls.get(0).getAclList().toArray()); + assertEquals(user3.getPrimaryGroupName(), acls.get(1).getName()); + assertArrayEquals(aclConfig.getGroupDefaultRights(), acls.get(1).getAclList().toArray()); + } + } + private void removeAclsFromKey(ObjectStore objectStore, OzoneBucket ozoneBucket, String key) throws IOException { OzoneObj ozoneObj = OzoneObjInfo.Builder.newBuilder().setKeyName(key) @@ -271,6 +340,16 @@ private void setVolumeAcl(ObjectStore objectStore, String volumeName, assertTrue(objectStore.setAcl(obj, OzoneAcl.parseAcls(aclString))); } + /** + * Helper function to add volume ACL. + */ + private void addVolumeAcl(ObjectStore objectStore, String volumeName, + String aclString) throws IOException { + OzoneObj obj = OzoneObjInfo.Builder.newBuilder().setVolumeName(volumeName) + .setResType(OzoneObj.ResourceType.VOLUME).setStoreType(OZONE).build(); + assertTrue(objectStore.addAcl(obj, OzoneAcl.parseAcl(aclString))); + } + /** * Helper function to set bucket ACL. */ diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java index 17f9663ae1f2..c9c664b303f7 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java @@ -381,7 +381,6 @@ vol, bucket, key, volOwner, bucketOwner, createUGIForApi(), */ @VisibleForTesting public UserGroupInformation createUGI() throws AuthenticationException { - if (userGroupInformation != null) { return userGroupInformation; } @@ -413,6 +412,11 @@ public UserGroupInformation createUGIForApi() throws OMException { return ugi; } + @VisibleForTesting + public void setUGI(UserGroupInformation ugi) { + this.userGroupInformation = ugi; + } + /** * Return InetAddress created from OMRequest userInfo. If userInfo is not * set, returns null. diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java index 72c5cf57d99c..3c21a2a851b8 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java @@ -66,10 +66,12 @@ import java.nio.file.InvalidPathException; import java.util.ArrayList; import java.util.List; +import java.util.stream.Collectors; import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_ALREADY_EXISTS; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND; +import static org.apache.hadoop.ozone.om.helpers.OzoneAclUtil.getDefaultAclList; import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.VOLUME_LOCK; @@ -246,8 +248,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn omBucketInfo.setUpdateID(transactionLogIndex, ozoneManager.isRatisEnabled()); - // Add default acls from volume. - addDefaultAcls(omBucketInfo, omVolumeArgs); + addDefaultAcls(omBucketInfo, omVolumeArgs, ozoneManager); // check namespace quota checkQuotaInNamespace(omVolumeArgs, 1L); @@ -322,16 +323,20 @@ private boolean isECBucket(BucketInfo bucketInfo) { * @param omVolumeArgs */ private void addDefaultAcls(OmBucketInfo omBucketInfo, - OmVolumeArgs omVolumeArgs) { - // Add default acls for bucket creator. + OmVolumeArgs omVolumeArgs, OzoneManager ozoneManager) throws OMException { List acls = new ArrayList<>(); + // Add default acls + acls.addAll(getDefaultAclList(createUGIForApi(), ozoneManager.getConfiguration())); if (omBucketInfo.getAcls() != null) { + // Add acls for bucket creator. acls.addAll(omBucketInfo.getAcls()); } // Add default acls from volume. List defaultVolumeAcls = omVolumeArgs.getDefaultAcls(); OzoneAclUtil.inheritDefaultAcls(acls, defaultVolumeAcls, ACCESS); + // Remove the duplicates + acls = acls.stream().distinct().collect(Collectors.toList()); omBucketInfo.setAcls(acls); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java index 0b938cb1e6ee..732886fa0e6c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java @@ -22,18 +22,11 @@ import java.nio.file.InvalidPathException; import java.nio.file.Path; import java.nio.file.Paths; -import java.util.ArrayList; -import java.util.Collections; import java.util.List; import java.util.Map; import com.google.common.base.Preconditions; -import org.apache.hadoop.hdds.client.ECReplicationConfig; -import org.apache.hadoop.hdds.client.ReplicationConfig; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.ratis.server.protocol.TermIndex; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; -import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; @@ -73,7 +66,6 @@ import org.apache.hadoop.util.Time; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_ALREADY_EXISTS; -import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_KEY_NAME; import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.DIRECTORY_EXISTS_IN_GIVENPATH; import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.FILE_EXISTS_IN_GIVENPATH; @@ -88,11 +80,6 @@ public class OMDirectoryCreateRequest extends OMKeyRequest { private static final Logger LOG = LoggerFactory.getLogger(OMDirectoryCreateRequest.class); - // The maximum number of directories which can be created through a single - // transaction (recursive directory creations) is 2^8 - 1 as only 8 - // bits are set aside for this in ObjectID. - private static final long MAX_NUM_OF_RECURSIVE_DIRS = 255; - /** * Stores the result of request execution in * OMClientRequest#validateAndUpdateCache. @@ -203,7 +190,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn dirKeyInfo = createDirectoryKeyInfoWithACL(keyName, keyArgs, baseObjId, omBucketInfo, omPathInfo, trxnLogIndex, - ozoneManager.getDefaultReplicationConfig()); + ozoneManager.getDefaultReplicationConfig(), ozoneManager.getConfiguration()); missingParentInfos = getAllParentInfo(ozoneManager, keyArgs, missingParents, omBucketInfo, omPathInfo, trxnLogIndex); @@ -250,58 +237,6 @@ dirKeyInfo, missingParentInfos, result, getBucketLayout(), return omClientResponse; } - /** - * Construct OmKeyInfo for every parent directory in missing list. - * @param ozoneManager - * @param keyArgs - * @param missingParents list of parent directories to be created - * @param bucketInfo - * @param omPathInfo - * @param trxnLogIndex - * @return {@code List} - * @throws IOException - */ - public static List getAllParentInfo(OzoneManager ozoneManager, - KeyArgs keyArgs, List missingParents, OmBucketInfo bucketInfo, - OMFileRequest.OMPathInfo omPathInfo, long trxnLogIndex) - throws IOException { - List missingParentInfos = new ArrayList<>(); - - // The base id is left shifted by 8 bits for creating space to - // create (2^8 - 1) object ids in every request. - // maxObjId represents the largest object id allocation possible inside - // the transaction. - long baseObjId = ozoneManager.getObjectIdFromTxId(trxnLogIndex); - long maxObjId = baseObjId + MAX_NUM_OF_RECURSIVE_DIRS; - long objectCount = 1; // baseObjID is used by the leaf directory - - String volumeName = keyArgs.getVolumeName(); - String bucketName = keyArgs.getBucketName(); - String keyName = keyArgs.getKeyName(); - - for (String missingKey : missingParents) { - long nextObjId = baseObjId + objectCount; - if (nextObjId > maxObjId) { - throw new OMException("Too many directories in path. Exceeds limit of " - + MAX_NUM_OF_RECURSIVE_DIRS + ". Unable to create directory: " - + keyName + " in volume/bucket: " + volumeName + "/" + bucketName, - INVALID_KEY_NAME); - } - - LOG.debug("missing parent {} getting added to KeyTable", missingKey); - - OmKeyInfo parentKeyInfo = - createDirectoryKeyInfoWithACL(missingKey, keyArgs, nextObjId, - bucketInfo, omPathInfo, trxnLogIndex, - ozoneManager.getDefaultReplicationConfig()); - objectCount++; - - missingParentInfos.add(parentKeyInfo); - } - - return missingParentInfos; - } - private void logResult(CreateDirectoryRequest createDirectoryRequest, KeyArgs keyArgs, OMMetrics omMetrics, Result result, Exception exception, int numMissingParents) { @@ -336,69 +271,6 @@ private void logResult(CreateDirectoryRequest createDirectoryRequest, } } - /** - * fill in a KeyInfo for a new directory entry in OM database. - * without initializing ACLs from the KeyArgs - used for intermediate - * directories which get created internally/recursively during file - * and directory create. - * @param keyName - * @param keyArgs - * @param objectId - * @param bucketInfo - * @param omPathInfo - * @param transactionIndex - * @param serverDefaultReplConfig - * @return the OmKeyInfo structure - */ - public static OmKeyInfo createDirectoryKeyInfoWithACL(String keyName, - KeyArgs keyArgs, long objectId, OmBucketInfo bucketInfo, - OMFileRequest.OMPathInfo omPathInfo, long transactionIndex, - ReplicationConfig serverDefaultReplConfig) { - return dirKeyInfoBuilderNoACL(keyName, keyArgs, objectId, - serverDefaultReplConfig) - .setAcls(getAclsForDir(keyArgs, bucketInfo, omPathInfo)) - .setUpdateID(transactionIndex).build(); - } - - private static OmKeyInfo.Builder dirKeyInfoBuilderNoACL(String keyName, - KeyArgs keyArgs, long objectId, - ReplicationConfig serverDefaultReplConfig) { - String dirName = OzoneFSUtils.addTrailingSlashIfNeeded(keyName); - - OmKeyInfo.Builder keyInfoBuilder = - new OmKeyInfo.Builder() - .setVolumeName(keyArgs.getVolumeName()) - .setBucketName(keyArgs.getBucketName()) - .setKeyName(dirName) - .setOwnerName(keyArgs.getOwnerName()) - .setOmKeyLocationInfos(Collections.singletonList( - new OmKeyLocationInfoGroup(0, new ArrayList<>()))) - .setCreationTime(keyArgs.getModificationTime()) - .setModificationTime(keyArgs.getModificationTime()) - .setDataSize(0); - if (keyArgs.getFactor() != null && keyArgs - .getFactor() != HddsProtos.ReplicationFactor.ZERO && keyArgs - .getType() != HddsProtos.ReplicationType.EC) { - // Factor available and not an EC replication config. - keyInfoBuilder.setReplicationConfig(ReplicationConfig - .fromProtoTypeAndFactor(keyArgs.getType(), keyArgs.getFactor())); - } else if (keyArgs.getType() == HddsProtos.ReplicationType.EC) { - // Found EC type - keyInfoBuilder.setReplicationConfig( - new ECReplicationConfig(keyArgs.getEcReplicationConfig())); - } else { - // default type - keyInfoBuilder.setReplicationConfig(serverDefaultReplConfig); - } - - keyInfoBuilder.setObjectID(objectId); - return keyInfoBuilder; - } - - static long getMaxNumOfRecursiveDirs() { - return MAX_NUM_OF_RECURSIVE_DIRS; - } - @RequestFeatureValidator( conditions = ValidationCondition.CLUSTER_NEEDS_FINALIZATION, processingPhase = RequestProcessingPhase.PRE_PROCESS, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestWithFSO.java index 46a2ac5f7cc3..8bef8e179284 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestWithFSO.java @@ -51,12 +51,10 @@ import java.nio.file.InvalidPathException; import java.nio.file.Path; import java.nio.file.Paths; -import java.util.ArrayList; import java.util.List; import java.util.Map; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_ALREADY_EXISTS; -import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_KEY_NAME; import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.DIRECTORY_EXISTS_IN_GIVENPATH; import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.FILE_EXISTS; @@ -145,8 +143,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn OmBucketInfo omBucketInfo = getBucketInfo(omMetadataManager, volumeName, bucketName); // prepare all missing parents - missingParentInfos = - OMDirectoryCreateRequestWithFSO.getAllMissingParentDirInfo( + missingParentInfos = getAllMissingParentDirInfo( ozoneManager, keyArgs, omBucketInfo, omPathInfo, trxnLogIndex); final long volumeId = omMetadataManager.getVolumeId(volumeName); @@ -163,7 +160,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn omPathInfo.getLeafNodeName(), keyArgs, omPathInfo.getLeafNodeObjectId(), omPathInfo.getLastKnownParentId(), trxnLogIndex, - omBucketInfo, omPathInfo); + omBucketInfo, omPathInfo, ozoneManager.getConfiguration()); OMFileRequest.addDirectoryTableCacheEntries(omMetadataManager, volumeId, bucketId, trxnLogIndex, missingParentInfos, dirInfo); @@ -235,86 +232,4 @@ private void logResult(CreateDirectoryRequest createDirectoryRequest, createDirectoryRequest); } } - - /** - * Construct OmDirectoryInfo for every parent directory in missing list. - * - * @param keyArgs key arguments - * @param pathInfo list of parent directories to be created and its ACLs - * @param trxnLogIndex transaction log index id - * @return list of missing parent directories - * @throws IOException DB failure - */ - public static List getAllMissingParentDirInfo( - OzoneManager ozoneManager, KeyArgs keyArgs, OmBucketInfo bucketInfo, - OMFileRequest.OMPathInfoWithFSO pathInfo, long trxnLogIndex) - throws IOException { - List missingParentInfos = new ArrayList<>(); - - // The base id is left shifted by 8 bits for creating space to - // create (2^8 - 1) object ids in every request. - // maxObjId represents the largest object id allocation possible inside - // the transaction. - long baseObjId = ozoneManager.getObjectIdFromTxId(trxnLogIndex); - long maxObjId = baseObjId + getMaxNumOfRecursiveDirs(); - long objectCount = 1; - - String volumeName = keyArgs.getVolumeName(); - String bucketName = keyArgs.getBucketName(); - String keyName = keyArgs.getKeyName(); - - long lastKnownParentId = pathInfo.getLastKnownParentId(); - List missingParents = pathInfo.getMissingParents(); - for (String missingKey : missingParents) { - long nextObjId = baseObjId + objectCount; - if (nextObjId > maxObjId) { - throw new OMException("Too many directories in path. Exceeds limit of " - + getMaxNumOfRecursiveDirs() + ". Unable to create directory: " - + keyName + " in volume/bucket: " + volumeName + "/" + bucketName, - INVALID_KEY_NAME); - } - - LOG.debug("missing parent {} getting added to DirectoryTable", - missingKey); - OmDirectoryInfo dirInfo = createDirectoryInfoWithACL(missingKey, - keyArgs, nextObjId, lastKnownParentId, trxnLogIndex, - bucketInfo, pathInfo); - objectCount++; - - missingParentInfos.add(dirInfo); - - // updating id for the next sub-dir - lastKnownParentId = nextObjId; - } - pathInfo.setLastKnownParentId(lastKnownParentId); - pathInfo.setLeafNodeObjectId(baseObjId + objectCount); - return missingParentInfos; - } - - /** - * Fill in a DirectoryInfo for a new directory entry in OM database. - * @param dirName - * @param keyArgs - * @param objectId - * @param parentObjectId - * @param bucketInfo - * @param omPathInfo - * @return the OmDirectoryInfo structure - */ - private static OmDirectoryInfo createDirectoryInfoWithACL( - String dirName, KeyArgs keyArgs, long objectId, - long parentObjectId, long transactionIndex, - OmBucketInfo bucketInfo, OMFileRequest.OMPathInfo omPathInfo) { - - return OmDirectoryInfo.newBuilder() - .setName(dirName) - .setOwner(keyArgs.getOwnerName()) - .setCreationTime(keyArgs.getModificationTime()) - .setModificationTime(keyArgs.getModificationTime()) - .setObjectID(objectId) - .setUpdateID(transactionIndex) - .setParentObjectID(parentObjectId) - .setAcls(getAclsForDir(keyArgs, bucketInfo, omPathInfo)) - .build(); - } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java index 5d1922cdefb1..08b25718288c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java @@ -247,7 +247,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn keyArgs.getDataSize(), locations, getFileEncryptionInfo(keyArgs), ozoneManager.getPrefixManager(), omBucketInfo, pathInfo, trxnLogIndex, ozoneManager.getObjectIdFromTxId(trxnLogIndex), - ozoneManager.isRatisEnabled(), repConfig); + ozoneManager.isRatisEnabled(), repConfig, ozoneManager.getConfiguration()); validateEncryptionKeyInfo(omBucketInfo, keyArgs); long openVersion = omKeyInfo.getLatestVersionLocations().getVersion(); @@ -255,8 +255,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn String dbOpenKeyName = omMetadataManager.getOpenKey(volumeName, bucketName, keyName, clientID); - missingParentInfos = OMDirectoryCreateRequest - .getAllParentInfo(ozoneManager, keyArgs, + missingParentInfos = getAllParentInfo(ozoneManager, keyArgs, pathInfo.getMissingParents(), omBucketInfo, pathInfo, trxnLogIndex); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestWithFSO.java index 291b0a8d5371..c4967d5af1fc 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestWithFSO.java @@ -153,9 +153,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn omMetadataManager.getBucketKey(volumeName, bucketName)); // add all missing parents to dir table - missingParentInfos = - OMDirectoryCreateRequestWithFSO.getAllMissingParentDirInfo( - ozoneManager, keyArgs, bucketInfo, pathInfoFSO, trxnLogIndex); + missingParentInfos = getAllMissingParentDirInfo( + ozoneManager, keyArgs, bucketInfo, pathInfoFSO, trxnLogIndex); // total number of keys created. numKeysCreated = missingParentInfos.size(); @@ -171,7 +170,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn getFileEncryptionInfo(keyArgs), ozoneManager.getPrefixManager(), bucketInfo, pathInfoFSO, trxnLogIndex, pathInfoFSO.getLeafNodeObjectId(), - ozoneManager.isRatisEnabled(), repConfig); + ozoneManager.isRatisEnabled(), repConfig, ozoneManager.getConfiguration()); validateEncryptionKeyInfo(bucketInfo, keyArgs); long openVersion = omFileInfo.getLatestVersionLocations().getVersion(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java index 6ee2f719800d..e817901c22ef 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java @@ -34,7 +34,6 @@ import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.lock.OzoneLockStrategy; -import org.apache.hadoop.ozone.om.request.file.OMDirectoryCreateRequest; import org.apache.hadoop.ozone.om.request.file.OMFileRequest; import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; import org.apache.hadoop.ozone.om.request.validation.RequestFeatureValidator; @@ -261,9 +260,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn " as there is already file in the given path", NOT_A_FILE); } - missingParentInfos = OMDirectoryCreateRequest - .getAllParentInfo(ozoneManager, keyArgs, - pathInfo.getMissingParents(), bucketInfo, + missingParentInfos = getAllParentInfo(ozoneManager, keyArgs, + pathInfo.getMissingParents(), bucketInfo, pathInfo, trxnLogIndex); numMissingParents = missingParentInfos.size(); @@ -279,7 +277,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn keyArgs.getDataSize(), locations, getFileEncryptionInfo(keyArgs), ozoneManager.getPrefixManager(), bucketInfo, pathInfo, trxnLogIndex, ozoneManager.getObjectIdFromTxId(trxnLogIndex), - ozoneManager.isRatisEnabled(), replicationConfig); + ozoneManager.isRatisEnabled(), replicationConfig, ozoneManager.getConfiguration()); validateEncryptionKeyInfo(bucketInfo, keyArgs); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestWithFSO.java index f40adb7495f8..87cc151351e6 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestWithFSO.java @@ -31,7 +31,6 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.apache.hadoop.ozone.om.request.file.OMDirectoryCreateRequestWithFSO; import org.apache.hadoop.ozone.om.request.file.OMFileRequest; import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; import org.apache.hadoop.ozone.om.response.OMClientResponse; @@ -138,9 +137,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn omMetadataManager.getBucketKey(volumeName, bucketName)); // add all missing parents to dir table - missingParentInfos = - OMDirectoryCreateRequestWithFSO.getAllMissingParentDirInfo( - ozoneManager, keyArgs, bucketInfo, pathInfoFSO, trxnLogIndex); + missingParentInfos = getAllMissingParentDirInfo( + ozoneManager, keyArgs, bucketInfo, pathInfoFSO, trxnLogIndex); // total number of keys created. numKeysCreated = missingParentInfos.size(); @@ -156,7 +154,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn getFileEncryptionInfo(keyArgs), ozoneManager.getPrefixManager(), bucketInfo, pathInfoFSO, trxnLogIndex, pathInfoFSO.getLeafNodeObjectId(), - ozoneManager.isRatisEnabled(), repConfig); + ozoneManager.isRatisEnabled(), repConfig, ozoneManager.getConfiguration()); validateEncryptionKeyInfo(bucketInfo, keyArgs); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java index 8eba6b2e9288..6a467f3acf58 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java @@ -39,6 +39,8 @@ import org.apache.hadoop.hdds.client.ContainerBlockID; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.OmUtils; @@ -52,12 +54,14 @@ import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.KeyValueUtil; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.om.helpers.OmPrefixInfo; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil; +import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import org.apache.hadoop.ozone.om.helpers.QuotaUtil; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.lock.OzoneLockStrategy; @@ -99,9 +103,11 @@ import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes .BUCKET_NOT_FOUND; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_KEY_NAME; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_REQUEST; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes .VOLUME_NOT_FOUND; +import static org.apache.hadoop.ozone.om.helpers.OzoneAclUtil.getDefaultAclList; import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; import static org.apache.hadoop.util.Time.monotonicNow; @@ -110,6 +116,11 @@ */ public abstract class OMKeyRequest extends OMClientRequest { + // The maximum number of directories which can be created through a single + // transaction (recursive directory creations) is 2^8 - 1 as only 8 + // bits are set aside for this in ObjectID. + private static final long MAX_NUM_OF_RECURSIVE_DIRS = 255; + @VisibleForTesting public static final Logger LOG = LoggerFactory.getLogger(OMKeyRequest.class); @@ -400,11 +411,12 @@ public EncryptedKeyVersion run() throws IOException { return edek; } - protected List< OzoneAcl > getAclsForKey(KeyArgs keyArgs, + protected List getAclsForKey(KeyArgs keyArgs, OmBucketInfo bucketInfo, OMFileRequest.OMPathInfo omPathInfo, - PrefixManager prefixManager) { + PrefixManager prefixManager, OzoneConfiguration config) throws OMException { List acls = new ArrayList<>(); + acls.addAll(getDefaultAclList(createUGIForApi(), config)); if (keyArgs.getAclsList() != null) { acls.addAll(OzoneAclUtil.fromProtobuf(keyArgs.getAclsList())); } @@ -422,6 +434,8 @@ protected List< OzoneAcl > getAclsForKey(KeyArgs keyArgs, OmPrefixInfo prefixInfo = prefixList.get(prefixList.size() - 1); if (prefixInfo != null) { if (OzoneAclUtil.inheritDefaultAcls(acls, prefixInfo.getAcls(), ACCESS)) { + // Remove the duplicates + acls = acls.stream().distinct().collect(Collectors.toList()); return acls; } } @@ -432,6 +446,7 @@ protected List< OzoneAcl > getAclsForKey(KeyArgs keyArgs, // prefix are not set if (omPathInfo != null) { if (OzoneAclUtil.inheritDefaultAcls(acls, omPathInfo.getAcls(), ACCESS)) { + acls = acls.stream().distinct().collect(Collectors.toList()); return acls; } } @@ -440,10 +455,12 @@ protected List< OzoneAcl > getAclsForKey(KeyArgs keyArgs, // parent-dir are not set. if (bucketInfo != null) { if (OzoneAclUtil.inheritDefaultAcls(acls, bucketInfo.getAcls(), ACCESS)) { + acls = acls.stream().distinct().collect(Collectors.toList()); return acls; } } + acls = acls.stream().distinct().collect(Collectors.toList()); return acls; } @@ -452,12 +469,15 @@ protected List< OzoneAcl > getAclsForKey(KeyArgs keyArgs, * @param keyArgs * @param bucketInfo * @param omPathInfo + * @param config * @return Acls which inherited parent DEFAULT and keyArgs ACCESS acls. */ - protected static List getAclsForDir(KeyArgs keyArgs, - OmBucketInfo bucketInfo, OMFileRequest.OMPathInfo omPathInfo) { + protected List getAclsForDir(KeyArgs keyArgs, OmBucketInfo bucketInfo, + OMFileRequest.OMPathInfo omPathInfo, OzoneConfiguration config) throws OMException { // Acls inherited from parent or bucket will convert to DEFAULT scope List acls = new ArrayList<>(); + // add default ACLs + acls.addAll(getDefaultAclList(createUGIForApi(), config)); // Inherit DEFAULT acls from parent-dir if (omPathInfo != null) { @@ -470,12 +490,207 @@ protected static List getAclsForDir(KeyArgs keyArgs, OzoneAclUtil.inheritDefaultAcls(acls, bucketInfo.getAcls(), DEFAULT); } - // add itself acls + // add acls from clients acls.addAll(OzoneAclUtil.fromProtobuf(keyArgs.getAclsList())); - + acls = acls.stream().distinct().collect(Collectors.toList()); return acls; } + /** + * Construct OmDirectoryInfo for every parent directory in missing list. + * + * @param keyArgs key arguments + * @param pathInfo list of parent directories to be created and its ACLs + * @param trxnLogIndex transaction log index id + * @return list of missing parent directories + * @throws IOException DB failure + */ + protected List getAllMissingParentDirInfo( + OzoneManager ozoneManager, KeyArgs keyArgs, OmBucketInfo bucketInfo, + OMFileRequest.OMPathInfoWithFSO pathInfo, long trxnLogIndex) + throws IOException { + List missingParentInfos = new ArrayList<>(); + + // The base id is left shifted by 8 bits for creating space to + // create (2^8 - 1) object ids in every request. + // maxObjId represents the largest object id allocation possible inside + // the transaction. + long baseObjId = ozoneManager.getObjectIdFromTxId(trxnLogIndex); + long maxObjId = baseObjId + MAX_NUM_OF_RECURSIVE_DIRS; + long objectCount = 1; + + String volumeName = keyArgs.getVolumeName(); + String bucketName = keyArgs.getBucketName(); + String keyName = keyArgs.getKeyName(); + + long lastKnownParentId = pathInfo.getLastKnownParentId(); + List missingParents = pathInfo.getMissingParents(); + for (String missingKey : missingParents) { + long nextObjId = baseObjId + objectCount; + if (nextObjId > maxObjId) { + throw new OMException("Too many directories in path. Exceeds limit of " + + MAX_NUM_OF_RECURSIVE_DIRS + ". Unable to create directory: " + + keyName + " in volume/bucket: " + volumeName + "/" + bucketName, + INVALID_KEY_NAME); + } + + LOG.debug("missing parent {} getting added to DirectoryTable", + missingKey); + OmDirectoryInfo dirInfo = createDirectoryInfoWithACL(missingKey, + keyArgs, nextObjId, lastKnownParentId, trxnLogIndex, + bucketInfo, pathInfo, ozoneManager.getConfiguration()); + objectCount++; + + missingParentInfos.add(dirInfo); + + // updating id for the next sub-dir + lastKnownParentId = nextObjId; + } + pathInfo.setLastKnownParentId(lastKnownParentId); + pathInfo.setLeafNodeObjectId(baseObjId + objectCount); + return missingParentInfos; + } + + /** + * Construct OmKeyInfo for every parent directory in missing list. + * @param ozoneManager + * @param keyArgs + * @param missingParents list of parent directories to be created + * @param bucketInfo + * @param omPathInfo + * @param trxnLogIndex + * @return {@code List} + * @throws IOException + */ + protected List getAllParentInfo(OzoneManager ozoneManager, + KeyArgs keyArgs, List missingParents, OmBucketInfo bucketInfo, + OMFileRequest.OMPathInfo omPathInfo, long trxnLogIndex) + throws IOException { + List missingParentInfos = new ArrayList<>(); + + // The base id is left shifted by 8 bits for creating space to + // create (2^8 - 1) object ids in every request. + // maxObjId represents the largest object id allocation possible inside + // the transaction. + long baseObjId = ozoneManager.getObjectIdFromTxId(trxnLogIndex); + long maxObjId = baseObjId + MAX_NUM_OF_RECURSIVE_DIRS; + long objectCount = 1; // baseObjID is used by the leaf directory + + String volumeName = keyArgs.getVolumeName(); + String bucketName = keyArgs.getBucketName(); + String keyName = keyArgs.getKeyName(); + + for (String missingKey : missingParents) { + long nextObjId = baseObjId + objectCount; + if (nextObjId > maxObjId) { + throw new OMException("Too many directories in path. Exceeds limit of " + + MAX_NUM_OF_RECURSIVE_DIRS + ". Unable to create directory: " + + keyName + " in volume/bucket: " + volumeName + "/" + bucketName, + INVALID_KEY_NAME); + } + + LOG.debug("missing parent {} getting added to KeyTable", missingKey); + + OmKeyInfo parentKeyInfo = + createDirectoryKeyInfoWithACL(missingKey, keyArgs, nextObjId, + bucketInfo, omPathInfo, trxnLogIndex, + ozoneManager.getDefaultReplicationConfig(), ozoneManager.getConfiguration()); + objectCount++; + + missingParentInfos.add(parentKeyInfo); + } + + return missingParentInfos; + } + + /** + * Fill in a DirectoryInfo for a new directory entry in OM database. + * @param dirName + * @param keyArgs + * @param objectId + * @param parentObjectId + * @param bucketInfo + * @param omPathInfo + * @param config + * @return the OmDirectoryInfo structure + */ + @SuppressWarnings("parameternumber") + protected OmDirectoryInfo createDirectoryInfoWithACL( + String dirName, KeyArgs keyArgs, long objectId, + long parentObjectId, long transactionIndex, + OmBucketInfo bucketInfo, OMFileRequest.OMPathInfo omPathInfo, + OzoneConfiguration config) throws OMException { + return OmDirectoryInfo.newBuilder() + .setName(dirName) + .setOwner(keyArgs.getOwnerName()) + .setCreationTime(keyArgs.getModificationTime()) + .setModificationTime(keyArgs.getModificationTime()) + .setObjectID(objectId) + .setUpdateID(transactionIndex) + .setParentObjectID(parentObjectId).setAcls(getAclsForDir(keyArgs, bucketInfo, omPathInfo, config)) + .build(); + } + + /** + * fill in a KeyInfo for a new directory entry in OM database. + * without initializing ACLs from the KeyArgs - used for intermediate + * directories which get created internally/recursively during file + * and directory create. + * @param keyName + * @param keyArgs + * @param objectId + * @param bucketInfo + * @param omPathInfo + * @param transactionIndex + * @param serverDefaultReplConfig + * @param config + * @return the OmKeyInfo structure + */ + @SuppressWarnings("parameternumber") + protected OmKeyInfo createDirectoryKeyInfoWithACL(String keyName, + KeyArgs keyArgs, long objectId, OmBucketInfo bucketInfo, + OMFileRequest.OMPathInfo omPathInfo, long transactionIndex, + ReplicationConfig serverDefaultReplConfig, OzoneConfiguration config) throws OMException { + return dirKeyInfoBuilderNoACL(keyName, keyArgs, objectId, + serverDefaultReplConfig) + .setAcls(getAclsForDir(keyArgs, bucketInfo, omPathInfo, config)) + .setUpdateID(transactionIndex).build(); + } + + protected OmKeyInfo.Builder dirKeyInfoBuilderNoACL(String keyName, KeyArgs keyArgs, long objectId, + ReplicationConfig serverDefaultReplConfig) { + String dirName = OzoneFSUtils.addTrailingSlashIfNeeded(keyName); + + OmKeyInfo.Builder keyInfoBuilder = + new OmKeyInfo.Builder() + .setVolumeName(keyArgs.getVolumeName()) + .setBucketName(keyArgs.getBucketName()) + .setKeyName(dirName) + .setOwnerName(keyArgs.getOwnerName()) + .setOmKeyLocationInfos(Collections.singletonList( + new OmKeyLocationInfoGroup(0, new ArrayList<>()))) + .setCreationTime(keyArgs.getModificationTime()) + .setModificationTime(keyArgs.getModificationTime()) + .setDataSize(0); + if (keyArgs.getFactor() != null && keyArgs + .getFactor() != HddsProtos.ReplicationFactor.ZERO && keyArgs + .getType() != HddsProtos.ReplicationType.EC) { + // Factor available and not an EC replication config. + keyInfoBuilder.setReplicationConfig(ReplicationConfig + .fromProtoTypeAndFactor(keyArgs.getType(), keyArgs.getFactor())); + } else if (keyArgs.getType() == HddsProtos.ReplicationType.EC) { + // Found EC type + keyInfoBuilder.setReplicationConfig( + new ECReplicationConfig(keyArgs.getEcReplicationConfig())); + } else { + // default type + keyInfoBuilder.setReplicationConfig(serverDefaultReplConfig); + } + + keyInfoBuilder.setObjectID(objectId); + return keyInfoBuilder; + } + /** * Check Acls for the ozone bucket. * @param ozoneManager @@ -801,12 +1016,12 @@ protected OmKeyInfo prepareKeyInfo( @Nullable OmBucketInfo omBucketInfo, OMFileRequest.OMPathInfo omPathInfo, long transactionLogIndex, long objectID, boolean isRatisEnabled, - ReplicationConfig replicationConfig) + ReplicationConfig replicationConfig, OzoneConfiguration config) throws IOException { return prepareFileInfo(omMetadataManager, keyArgs, dbKeyInfo, size, locations, encInfo, prefixManager, omBucketInfo, omPathInfo, - transactionLogIndex, objectID, isRatisEnabled, replicationConfig); + transactionLogIndex, objectID, isRatisEnabled, replicationConfig, config); } /** @@ -824,12 +1039,12 @@ protected OmKeyInfo prepareFileInfo( @Nullable OmBucketInfo omBucketInfo, OMFileRequest.OMPathInfo omPathInfo, long transactionLogIndex, long objectID, - boolean isRatisEnabled, ReplicationConfig replicationConfig) - throws IOException { + boolean isRatisEnabled, ReplicationConfig replicationConfig, + OzoneConfiguration config) throws IOException { if (keyArgs.getIsMultipartKey()) { return prepareMultipartFileInfo(omMetadataManager, keyArgs, size, locations, encInfo, prefixManager, omBucketInfo, - omPathInfo, transactionLogIndex, objectID); + omPathInfo, transactionLogIndex, objectID, config); //TODO args.getMetadata } if (dbKeyInfo != null) { @@ -872,7 +1087,7 @@ protected OmKeyInfo prepareFileInfo( // Blocks will be appended as version 0. return createFileInfo(keyArgs, locations, replicationConfig, keyArgs.getDataSize(), encInfo, prefixManager, - omBucketInfo, omPathInfo, transactionLogIndex, objectID); + omBucketInfo, omPathInfo, transactionLogIndex, objectID, config); } /** @@ -889,7 +1104,8 @@ protected OmKeyInfo createFileInfo( @Nonnull PrefixManager prefixManager, @Nullable OmBucketInfo omBucketInfo, OMFileRequest.OMPathInfo omPathInfo, - long transactionLogIndex, long objectID) { + long transactionLogIndex, long objectID, + OzoneConfiguration config) throws OMException { OmKeyInfo.Builder builder = new OmKeyInfo.Builder(); builder.setVolumeName(keyArgs.getVolumeName()) .setBucketName(keyArgs.getBucketName()) @@ -902,7 +1118,7 @@ protected OmKeyInfo createFileInfo( .setReplicationConfig(replicationConfig) .setFileEncryptionInfo(encInfo) .setAcls(getAclsForKey( - keyArgs, omBucketInfo, omPathInfo, prefixManager)) + keyArgs, omBucketInfo, omPathInfo, prefixManager, config)) .addAllMetadata(KeyValueUtil.getFromProtobuf( keyArgs.getMetadataList())) .addAllTags(KeyValueUtil.getFromProtobuf( @@ -936,8 +1152,8 @@ private OmKeyInfo prepareMultipartFileInfo( FileEncryptionInfo encInfo, @Nonnull PrefixManager prefixManager, @Nullable OmBucketInfo omBucketInfo, OMFileRequest.OMPathInfo omPathInfo, - @Nonnull long transactionLogIndex, long objectID) - throws IOException { + @Nonnull long transactionLogIndex, long objectID, + OzoneConfiguration configuration) throws IOException { Preconditions.checkArgument(args.getMultipartNumber() > 0, "PartNumber Should be greater than zero"); @@ -975,7 +1191,7 @@ private OmKeyInfo prepareMultipartFileInfo( // is not an actual key, it is a part of the key. return createFileInfo(args, locations, partKeyInfo.getReplicationConfig(), size, encInfo, prefixManager, omBucketInfo, omPathInfo, - transactionLogIndex, objectID); + transactionLogIndex, objectID, configuration); } /** diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java index 26c559eef6e5..0a2703c769ed 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java @@ -209,7 +209,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn .setOmKeyLocationInfos(Collections.singletonList( new OmKeyLocationInfoGroup(0, new ArrayList<>(), true))) .setAcls(getAclsForKey(keyArgs, bucketInfo, pathInfo, - ozoneManager.getPrefixManager())) + ozoneManager.getPrefixManager(), ozoneManager.getConfiguration())) .setObjectID(objectID) .setUpdateID(transactionLogIndex) .setFileEncryptionInfo(keyArgs.hasFileEncryptionInfo() ? diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequestWithFSO.java index de78c6651109..d55a7b41918b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequestWithFSO.java @@ -33,7 +33,6 @@ import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo; -import org.apache.hadoop.ozone.om.request.file.OMDirectoryCreateRequestWithFSO; import org.apache.hadoop.ozone.om.request.file.OMFileRequest; import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; import org.apache.hadoop.ozone.om.response.OMClientResponse; @@ -121,8 +120,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn volumeName, bucketName); // add all missing parents to dir table - missingParentInfos = OMDirectoryCreateRequestWithFSO - .getAllMissingParentDirInfo(ozoneManager, keyArgs, bucketInfo, + missingParentInfos = getAllMissingParentDirInfo(ozoneManager, keyArgs, bucketInfo, pathInfoFSO, transactionLogIndex); // We are adding uploadId to key, because if multiple users try to @@ -185,7 +183,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn .setOmKeyLocationInfos(Collections.singletonList( new OmKeyLocationInfoGroup(0, new ArrayList<>(), true))) .setAcls(getAclsForKey(keyArgs, bucketInfo, pathInfoFSO, - ozoneManager.getPrefixManager())) + ozoneManager.getPrefixManager(), ozoneManager.getConfiguration())) .setObjectID(pathInfoFSO.getLeafNodeObjectId()) .setUpdateID(transactionLogIndex) .setFileEncryptionInfo(keyArgs.hasFileEncryptionInfo() ? diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java index 597a40006f94..2bb77005c957 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java @@ -33,7 +33,6 @@ import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.ozone.om.OzoneConfigUtil; -import org.apache.hadoop.ozone.om.request.file.OMDirectoryCreateRequestWithFSO; import org.apache.hadoop.ozone.om.request.file.OMFileRequest; import org.apache.hadoop.ozone.protocolPB.OMPBHelper; import org.apache.ratis.server.protocol.TermIndex; @@ -187,8 +186,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn OMFileRequest.OMPathInfoWithFSO pathInfoFSO = OMFileRequest .verifyDirectoryKeysInPath(omMetadataManager, volumeName, bucketName, keyName, Paths.get(keyName)); - missingParentInfos = OMDirectoryCreateRequestWithFSO - .getAllMissingParentDirInfo(ozoneManager, keyArgs, omBucketInfo, + missingParentInfos = getAllMissingParentDirInfo(ozoneManager, keyArgs, omBucketInfo, pathInfoFSO, trxnLogIndex); if (missingParentInfos != null) { @@ -236,7 +234,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn .setOmKeyLocationInfos(Collections.singletonList( new OmKeyLocationInfoGroup(0, new ArrayList<>(), true))) .setAcls(getAclsForKey(keyArgs, omBucketInfo, pathInfoFSO, - ozoneManager.getPrefixManager())) + ozoneManager.getPrefixManager(), ozoneManager.getConfiguration())) .setObjectID(pathInfoFSO.getLeafNodeObjectId()) .setUpdateID(trxnLogIndex) .setFileEncryptionInfo(keyArgs.hasFileEncryptionInfo() ? diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeCreateRequest.java index 01dbb5ba1e02..a22775107b9c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeCreateRequest.java @@ -20,11 +20,15 @@ import java.io.IOException; import java.nio.file.InvalidPathException; +import java.util.List; import java.util.Map; +import java.util.stream.Collectors; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; +import org.apache.hadoop.ozone.OzoneAcl; +import org.apache.hadoop.security.UserGroupInformation; import org.apache.ratis.server.protocol.TermIndex; import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; @@ -55,6 +59,7 @@ .VolumeInfo; import org.apache.hadoop.util.Time; +import static org.apache.hadoop.ozone.om.helpers.OzoneAclUtil.getDefaultAclList; import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.VOLUME_LOCK; import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.USER_LOCK; @@ -160,6 +165,18 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn volumeList = omMetadataManager.getUserTable().get(dbUserKey); volumeList = addVolumeToOwnerList(volumeList, volume, owner, ozoneManager.getMaxUserVolumeCount(), transactionLogIndex); + + // Add default ACL for volume + List listOfAcls = getDefaultAclList(UserGroupInformation.createRemoteUser(owner), + ozoneManager.getConfiguration()); + // ACLs from VolumeArgs + if (omVolumeArgs.getAcls() != null) { + listOfAcls.addAll(omVolumeArgs.getAcls()); + } + // Remove the duplicates + listOfAcls = listOfAcls.stream().distinct().collect(Collectors.toList()); + omVolumeArgs.setAcls(listOfAcls); + createVolume(omMetadataManager, omVolumeArgs, volumeList, dbVolumeKey, dbUserKey, transactionLogIndex); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java index 54b04260d556..eb13f97d2376 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java @@ -40,6 +40,7 @@ import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.BucketInfo; +import org.apache.hadoop.security.UserGroupInformation; import org.apache.ratis.server.protocol.TermIndex; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; @@ -107,6 +108,7 @@ public void setup() throws IOException { when(ozoneManager.getMaxUserVolumeCount()).thenReturn(10L); auditLogger = mock(AuditLogger.class); when(ozoneManager.getAuditLogger()).thenReturn(auditLogger); + when(ozoneManager.getConfiguration()).thenReturn(ozoneConfiguration); doNothing().when(auditLogger).logWrite(any(AuditMessage.class)); doubleBuffer = OzoneManagerDoubleBuffer.newBuilder() .setOmMetadataManager(omMetadataManager) @@ -450,6 +452,11 @@ private OMClientResponse createVolume(String volumeName, OMVolumeCreateRequest omVolumeCreateRequest = new OMVolumeCreateRequest(omRequest); + try { + omVolumeCreateRequest.setUGI(UserGroupInformation.getCurrentUser()); + } catch (IOException e) { + throw new RuntimeException(e); + } final TermIndex termIndex = TransactionInfo.getTermIndex(transactionId); OMClientResponse omClientResponse = omVolumeCreateRequest.validateAndUpdateCache(ozoneManager, termIndex); @@ -462,7 +469,7 @@ private OMClientResponse createVolume(String volumeName, * @return OMBucketCreateResponse */ private OMBucketCreateResponse createBucket(String volumeName, - String bucketName, long transactionID) { + String bucketName, long transactionID) { BucketInfo.Builder bucketInfo = newBucketInfoBuilder(bucketName, volumeName) @@ -472,6 +479,10 @@ private OMBucketCreateResponse createBucket(String volumeName, OMBucketCreateRequest omBucketCreateRequest = new OMBucketCreateRequest(omRequest); + try { + omBucketCreateRequest.setUGI(UserGroupInformation.getCurrentUser()); + } catch (IOException e) { + } final TermIndex termIndex = TermIndex.valueOf(term, transactionID); OMClientResponse omClientResponse = omBucketCreateRequest.validateAndUpdateCache(ozoneManager, termIndex); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequest.java index 201c2a759fca..59debe08a616 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequest.java @@ -26,6 +26,7 @@ import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.security.UserGroupInformation; import org.junit.jupiter.api.Test; import org.apache.hadoop.ozone.om.OMMetadataManager; @@ -41,6 +42,7 @@ import static org.apache.hadoop.ozone.om.request.OMRequestTestUtils.newBucketInfoBuilder; import static org.apache.hadoop.ozone.om.request.OMRequestTestUtils.newCreateBucketRequest; +import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; @@ -336,7 +338,7 @@ protected void doValidateAndUpdateCache(String volumeName, String bucketName, assertNull(omMetadataManager.getBucketTable().get(bucketKey)); OMBucketCreateRequest omBucketCreateRequest = new OMBucketCreateRequest(modifiedRequest); - + omBucketCreateRequest.setUGI(UserGroupInformation.getCurrentUser()); OMClientResponse omClientResponse = omBucketCreateRequest.validateAndUpdateCache(ozoneManager, 1); @@ -355,8 +357,7 @@ protected void doValidateAndUpdateCache(String volumeName, String bucketName, dbBucketInfo.getCreationTime()); assertEquals(bucketInfoFromProto.getModificationTime(), dbBucketInfo.getModificationTime()); - assertEquals(bucketInfoFromProto.getAcls(), - dbBucketInfo.getAcls()); + assertTrue(dbBucketInfo.getAcls().containsAll(bucketInfoFromProto.getAcls())); assertEquals(bucketInfoFromProto.getIsVersionEnabled(), dbBucketInfo.getIsVersionEnabled()); assertEquals(bucketInfoFromProto.getStorageType(), diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequestWithFSO.java index 3a1d22f08a80..029b1f9082b8 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequestWithFSO.java @@ -26,6 +26,7 @@ import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.security.UserGroupInformation; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -34,6 +35,7 @@ import static org.apache.hadoop.ozone.om.request.OMRequestTestUtils.newBucketInfoBuilder; import static org.apache.hadoop.ozone.om.request.OMRequestTestUtils.newCreateBucketRequest; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.BucketLayoutProto.FILE_SYSTEM_OPTIMIZED; +import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; @@ -143,7 +145,7 @@ protected void doValidateAndUpdateCache(String volumeName, String bucketName, assertNull(omMetadataManager.getBucketTable().get(bucketKey)); OMBucketCreateRequest omBucketCreateRequest = new OMBucketCreateRequest(modifiedRequest); - + omBucketCreateRequest.setUGI(UserGroupInformation.getCurrentUser()); OMClientResponse omClientResponse = omBucketCreateRequest.validateAndUpdateCache(ozoneManager, 1); @@ -166,8 +168,7 @@ protected void doValidateAndUpdateCache(String volumeName, String bucketName, dbBucketInfo.getCreationTime()); assertEquals(bucketInfoFromProto.getModificationTime(), dbBucketInfo.getModificationTime()); - assertEquals(bucketInfoFromProto.getAcls(), - dbBucketInfo.getAcls()); + assertTrue(dbBucketInfo.getAcls().containsAll(bucketInfoFromProto.getAcls())); assertEquals(bucketInfoFromProto.getIsVersionEnabled(), dbBucketInfo.getIsVersionEnabled()); assertEquals(bucketInfoFromProto.getStorageType(), diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequest.java index 2701aa22db6a..9df26293d0ef 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequest.java @@ -38,6 +38,7 @@ import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.ozone.om.upgrade.OMLayoutVersionManager; +import org.apache.hadoop.security.UserGroupInformation; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -68,6 +69,7 @@ import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertSame; import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.Mockito.any; import static org.mockito.Mockito.anyString; import static org.mockito.Mockito.doNothing; @@ -183,7 +185,7 @@ public void testValidateAndUpdateCache() throws Exception { omDirectoryCreateRequest = new OMDirectoryCreateRequest(modifiedOmRequest, getBucketLayout()); - + omDirectoryCreateRequest.setUGI(UserGroupInformation.getCurrentUser()); OMClientResponse omClientResponse = omDirectoryCreateRequest.validateAndUpdateCache(ozoneManager, 100L); @@ -222,6 +224,7 @@ public void testValidateAndUpdateCacheWithNamespaceQuotaExceed() omDirectoryCreateRequest = new OMDirectoryCreateRequest(modifiedOmRequest, getBucketLayout()); + omDirectoryCreateRequest.setUGI(UserGroupInformation.getCurrentUser()); OMClientResponse omClientResponse = omDirectoryCreateRequest.validateAndUpdateCache(ozoneManager, 100L); @@ -310,7 +313,7 @@ public void testValidateAndUpdateCacheWithSubDirectoryInPath() omDirectoryCreateRequest = new OMDirectoryCreateRequest(modifiedOmRequest, getBucketLayout()); - + omDirectoryCreateRequest.setUGI(UserGroupInformation.getCurrentUser()); OMClientResponse omClientResponse = omDirectoryCreateRequest.validateAndUpdateCache(ozoneManager, 100L); @@ -430,6 +433,7 @@ public void testCreateDirectoryOMMetric() omDirectoryCreateRequest = new OMDirectoryCreateRequest(modifiedOmRequest, getBucketLayout()); + omDirectoryCreateRequest.setUGI(UserGroupInformation.getCurrentUser()); assertEquals(0L, omMetrics.getNumKeys()); OMClientResponse omClientResponse = @@ -480,7 +484,7 @@ public void testCreateDirectoryInheritParentDefaultAcls() throws Exception { omDirectoryCreateRequest = new OMDirectoryCreateRequest(modifiedOmRequest, getBucketLayout()); - + omDirectoryCreateRequest.setUGI(UserGroupInformation.getCurrentUser()); OMClientResponse omClientResponse = omDirectoryCreateRequest.validateAndUpdateCache(ozoneManager, 100L); @@ -510,7 +514,7 @@ private void verifyDirectoriesInheritAcls(String volumeName, List omKeyAcls = omKeyInfo.getAcls(); - assertEquals(expectedInheritAcls, omKeyAcls, "Failed to inherit parent acls!,"); + assertTrue(omKeyAcls.containsAll(expectedInheritAcls), "Failed to inherit parent acls!,"); prefix = dirName + OZONE_URI_DELIMITER; expectedInheritAcls = omKeyAcls; diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequestWithFSO.java index 795e6e3c534c..fca7efba169a 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequestWithFSO.java @@ -47,6 +47,7 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import jakarta.annotation.Nonnull; +import org.apache.hadoop.security.UserGroupInformation; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -169,7 +170,7 @@ public void testValidateAndUpdateCache() throws Exception { omDirCreateRequestFSO = new OMDirectoryCreateRequestWithFSO(modifiedOmReq, BucketLayout.FILE_SYSTEM_OPTIMIZED); - + omDirCreateRequestFSO.setUGI(UserGroupInformation.getCurrentUser()); OMClientResponse omClientResponse = omDirCreateRequestFSO.validateAndUpdateCache(ozoneManager, 100L); @@ -209,6 +210,7 @@ public void testValidateAndUpdateCacheWithNamespaceQuotaExceeded() omDirCreateRequestFSO = new OMDirectoryCreateRequestWithFSO(modifiedOmReq, BucketLayout.FILE_SYSTEM_OPTIMIZED); + omDirCreateRequestFSO.setUGI(UserGroupInformation.getCurrentUser()); OMClientResponse omClientResponse = omDirCreateRequestFSO.validateAndUpdateCache(ozoneManager, 100L); assertSame(omClientResponse.getOMResponse().getStatus(), @@ -317,7 +319,7 @@ public void testValidateAndUpdateCacheWithSubDirectoryInPath() omDirCreateReqFSO = new OMDirectoryCreateRequestWithFSO(modifiedOmReq, BucketLayout.FILE_SYSTEM_OPTIMIZED); - + omDirCreateReqFSO.setUGI(UserGroupInformation.getCurrentUser()); OMClientResponse omClientResponse = omDirCreateReqFSO.validateAndUpdateCache(ozoneManager, 100L); @@ -570,6 +572,7 @@ public void testCreateDirectoryUptoLimitOfMaxLevels255() throws Exception { omDirCreateReqFSO = new OMDirectoryCreateRequestWithFSO(modifiedOmReq, BucketLayout.FILE_SYSTEM_OPTIMIZED); + omDirCreateReqFSO.setUGI(UserGroupInformation.getCurrentUser()); assertEquals(0L, omMetrics.getNumKeys()); OMClientResponse omClientResponse = @@ -604,7 +607,7 @@ public void testCreateDirectoryExceedLimitOfMaxLevels255() throws Exception { omDirCreateReqFSO = new OMDirectoryCreateRequestWithFSO(modifiedOmReq, BucketLayout.FILE_SYSTEM_OPTIMIZED); - + omDirCreateReqFSO.setUGI(UserGroupInformation.getCurrentUser()); assertEquals(0L, omMetrics.getNumKeys()); OMClientResponse omClientResponse = omDirCreateReqFSO.validateAndUpdateCache(ozoneManager, 100L); @@ -643,6 +646,7 @@ public void testCreateDirectoryOMMetric() throws Exception { omDirCreateReqFSO = new OMDirectoryCreateRequestWithFSO(modifiedOmReq, BucketLayout.FILE_SYSTEM_OPTIMIZED); + omDirCreateReqFSO.setUGI(UserGroupInformation.getCurrentUser()); assertEquals(0L, omMetrics.getNumKeys()); OMClientResponse omClientResponse = @@ -695,7 +699,7 @@ public void testCreateDirectoryInheritParentDefaultAcls() throws Exception { omDirCreateReqFSO = new OMDirectoryCreateRequestWithFSO(modifiedOmReq, BucketLayout.FILE_SYSTEM_OPTIMIZED); - + omDirCreateReqFSO.setUGI(UserGroupInformation.getCurrentUser()); OMClientResponse omClientResponse = omDirCreateReqFSO.validateAndUpdateCache(ozoneManager, 100L); assertSame(omClientResponse.getOMResponse().getStatus(), @@ -730,7 +734,7 @@ private void verifyDirectoriesInheritAcls(List dirs, System.out.println( " subdir acls : " + omDirInfo + " ==> " + omDirAcls); - assertEquals(expectedInheritAcls, omDirAcls, + assertTrue(omDirAcls.containsAll(expectedInheritAcls), "Failed to inherit parent DEFAULT acls!"); parentID = omDirInfo.getObjectID(); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java index 20da9d3e5dcc..cdad3bcb18ee 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java @@ -35,6 +35,7 @@ import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import jakarta.annotation.Nonnull; +import org.apache.hadoop.security.UserGroupInformation; import org.junit.jupiter.api.Test; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; @@ -481,7 +482,7 @@ protected void verifyInheritAcls(List dirs, OmKeyInfo omKeyInfo, System.out.println( " subdir acls : " + omDirInfo + " ==> " + omDirAcls); - assertEquals(expectedInheritAcls, omDirAcls, + assertTrue(omDirAcls.containsAll(expectedInheritAcls), "Failed to inherit parent DEFAULT acls!"); parentID = omDirInfo.getObjectID(); @@ -513,9 +514,9 @@ protected void verifyInheritAcls(List dirs, OmKeyInfo omKeyInfo, // Should inherit parent DEFAULT acls // [user:newUser:rw[ACCESS], group:newGroup:rwl[ACCESS]] - assertEquals(parentDefaultAcl.stream() + assertTrue(keyAcls.containsAll(parentDefaultAcl.stream() .map(acl -> acl.withScope(OzoneAcl.AclScope.ACCESS)) - .collect(Collectors.toList()), keyAcls, + .collect(Collectors.toList())), "Failed to inherit bucket DEFAULT acls!"); // Should not inherit parent ACCESS acls assertThat(keyAcls).doesNotContain(parentAccessAcl); @@ -529,7 +530,7 @@ protected void verifyInheritAcls(List dirs, OmKeyInfo omKeyInfo, ".snapshot/a/b/keyName,Cannot create key under path reserved for snapshot: .snapshot/", ".snapshot,Cannot create key with reserved name: .snapshot"}) public void testPreExecuteWithInvalidKeyPrefix(String invalidKeyName, - String expectedErrorMessage) { + String expectedErrorMessage) throws IOException { OMRequest omRequest = createFileRequest(volumeName, bucketName, invalidKeyName, HddsProtos.ReplicationFactor.ONE, @@ -644,8 +645,10 @@ protected OMRequest createFileRequest( * @return OMFileCreateRequest reference */ @Nonnull - protected OMFileCreateRequest getOMFileCreateRequest(OMRequest omRequest) { - return new OMFileCreateRequest(omRequest, getBucketLayout()); + protected OMFileCreateRequest getOMFileCreateRequest(OMRequest omRequest) throws IOException { + OMFileCreateRequest request = new OMFileCreateRequest(omRequest, getBucketLayout()); + request.setUGI(UserGroupInformation.getCurrentUser()); + return request; } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequestWithFSO.java index e988949c5b85..5a8c638141fc 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequestWithFSO.java @@ -28,9 +28,11 @@ import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.StringUtils; import org.junit.jupiter.api.Test; +import java.io.IOException; import java.util.UUID; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; @@ -236,9 +238,11 @@ private OmDirectoryInfo getDirInfo(String key) } @Override - protected OMFileCreateRequest getOMFileCreateRequest(OMRequest omRequest) { - return new OMFileCreateRequestWithFSO(omRequest, + protected OMFileCreateRequest getOMFileCreateRequest(OMRequest omRequest) throws IOException { + OMFileCreateRequest request = new OMFileCreateRequestWithFSO(omRequest, BucketLayout.FILE_SYSTEM_OPTIMIZED); + request.setUGI(UserGroupInformation.getCurrentUser()); + return request; } @Override diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java index 4bfdd333296d..b9b7c30744ec 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java @@ -47,6 +47,7 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.om.lock.OzoneLockProvider; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; +import org.apache.hadoop.security.UserGroupInformation; import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.MethodSource; @@ -501,6 +502,7 @@ public void testOverwritingExistingMetadata( createKeyRequest(false, 0, keyName, initialMetadata); OMKeyCreateRequest initialOmKeyCreateRequest = new OMKeyCreateRequest(initialRequest, getBucketLayout()); + initialOmKeyCreateRequest.setUGI(UserGroupInformation.getCurrentUser()); OMClientResponse initialResponse = initialOmKeyCreateRequest.validateAndUpdateCache(ozoneManager, 100L); verifyMetadataInResponse(initialResponse, initialMetadata); @@ -519,6 +521,7 @@ public void testOverwritingExistingMetadata( createKeyRequest(false, 0, keyName, updatedMetadata); OMKeyCreateRequest updatedOmKeyCreateRequest = new OMKeyCreateRequest(updatedRequest, getBucketLayout()); + updatedOmKeyCreateRequest.setUGI(UserGroupInformation.getCurrentUser()); OMClientResponse updatedResponse = updatedOmKeyCreateRequest.validateAndUpdateCache(ozoneManager, 101L); @@ -562,6 +565,7 @@ public void testCreationWithoutMetadataFollowedByOverwriteWithMetadata( createKeyRequest(false, 0, keyName, overwriteMetadata, emptyMap(), emptyList()); OMKeyCreateRequest overwriteOmKeyCreateRequest = new OMKeyCreateRequest(overwriteRequestWithMetadata, getBucketLayout()); + overwriteOmKeyCreateRequest.setUGI(UserGroupInformation.getCurrentUser()); // Perform the overwrite operation and capture the response OMClientResponse overwriteResponse = @@ -989,7 +993,7 @@ public void testAtomicRewrite( // Retrieve the committed key info OmKeyInfo existingKeyInfo = omMetadataManager.getKeyTable(getBucketLayout()).get(getOzoneKey()); List existingAcls = existingKeyInfo.getAcls(); - assertEquals(acls, existingAcls); + assertThat(existingAcls.containsAll(acls)); // Create a request with a generation which doesn't match the current key omRequest = createKeyRequest(false, 0, 100, @@ -1039,9 +1043,9 @@ private void verifyKeyInheritAcls(List keyAcls, .findAny().orElse(null); // Should inherit parent DEFAULT Acls - assertEquals(parentDefaultAcl.stream() + assertTrue(keyAcls.containsAll(parentDefaultAcl.stream() .map(acl -> acl.withScope(OzoneAcl.AclScope.ACCESS)) - .collect(Collectors.toList()), keyAcls, + .collect(Collectors.toList())), "Failed to inherit parent DEFAULT acls!,"); // Should not inherit parent ACCESS Acls @@ -1054,7 +1058,7 @@ protected void addToKeyTable(String keyName) throws Exception { } - private void checkNotAValidPath(String keyName) { + private void checkNotAValidPath(String keyName) throws IOException { OMRequest omRequest = createKeyRequest(false, 0, keyName); OMKeyCreateRequest omKeyCreateRequest = getOMKeyCreateRequest(omRequest); OMException ex = @@ -1137,13 +1141,16 @@ protected String getOzoneKey() throws IOException { return omMetadataManager.getOzoneKey(volumeName, bucketName, keyName); } - protected OMKeyCreateRequest getOMKeyCreateRequest(OMRequest omRequest) { - return new OMKeyCreateRequest(omRequest, getBucketLayout()); + protected OMKeyCreateRequest getOMKeyCreateRequest(OMRequest omRequest) throws IOException { + OMKeyCreateRequest request = new OMKeyCreateRequest(omRequest, getBucketLayout()); + request.setUGI(UserGroupInformation.getCurrentUser()); + return request; } protected OMKeyCreateRequest getOMKeyCreateRequest( - OMRequest omRequest, BucketLayout layout) { - return new OMKeyCreateRequest(omRequest, layout); + OMRequest omRequest, BucketLayout layout) throws IOException { + OMKeyCreateRequest request = new OMKeyCreateRequest(omRequest, layout); + request.setUGI(UserGroupInformation.getCurrentUser()); + return request; } - } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequestWithFSO.java index a5181b25a0e6..8f8cc0254364 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequestWithFSO.java @@ -31,6 +31,7 @@ import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.security.UserGroupInformation; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.MethodSource; @@ -210,15 +211,19 @@ protected String getOzoneKey() throws IOException { } @Override - protected OMKeyCreateRequest getOMKeyCreateRequest(OMRequest omRequest) { - return new OMKeyCreateRequestWithFSO(omRequest, + protected OMKeyCreateRequest getOMKeyCreateRequest(OMRequest omRequest) throws IOException { + OMKeyCreateRequest request = new OMKeyCreateRequestWithFSO(omRequest, BucketLayout.FILE_SYSTEM_OPTIMIZED); + request.setUGI(UserGroupInformation.getCurrentUser()); + return request; } @Override protected OMKeyCreateRequest getOMKeyCreateRequest( - OMRequest omRequest, BucketLayout layout) { - return new OMKeyCreateRequestWithFSO(omRequest, layout); + OMRequest omRequest, BucketLayout layout) throws IOException { + OMKeyCreateRequest request = new OMKeyCreateRequestWithFSO(omRequest, layout); + request.setUGI(UserGroupInformation.getCurrentUser()); + return request; } @Override diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3ExpiredMultipartUploadsAbortRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3ExpiredMultipartUploadsAbortRequest.java index f02e1ee23679..0220afbc60c8 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3ExpiredMultipartUploadsAbortRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3ExpiredMultipartUploadsAbortRequest.java @@ -53,6 +53,7 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ExpiredMultipartUploadsBucket; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartUploadsExpiredAbortRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.security.UserGroupInformation; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.MethodSource; @@ -447,6 +448,7 @@ private List createMPUsWithFSO(String volume, String bucket, S3InitiateMultipartUploadRequest s3InitiateMultipartUploadRequest = new S3InitiateMultipartUploadRequestWithFSO(initiateMPURequest, BucketLayout.FILE_SYSTEM_OPTIMIZED); + s3InitiateMultipartUploadRequest.setUGI(UserGroupInformation.getLoginUser()); OMClientResponse omClientResponse = s3InitiateMultipartUploadRequest .validateAndUpdateCache(ozoneManager, trxnLogIndex); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequest.java index 30b76801d9e8..f9006b852e4c 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequest.java @@ -250,7 +250,7 @@ private void verifyKeyInheritAcls(List keyAcls, // Should inherit parent DEFAULT Acls // [user:newUser:rw[DEFAULT], group:newGroup:rwl[DEFAULT]] - assertEquals(parentDefaultAcl, keyAcls, + assertTrue(keyAcls.containsAll(parentDefaultAcl), "Failed to inherit parent DEFAULT acls!"); // Should not inherit parent ACCESS Acls diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequestWithFSO.java index 1d4eb5310e05..d92992edf58c 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequestWithFSO.java @@ -32,6 +32,7 @@ import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.security.UserGroupInformation; import org.junit.jupiter.api.Test; import java.io.IOException; @@ -166,9 +167,11 @@ private long verifyDirectoriesInDB(List dirs, final long volumeId, @Override protected S3InitiateMultipartUploadRequest getS3InitiateMultipartUploadReq( - OMRequest initiateMPURequest) { - return new S3InitiateMultipartUploadRequestWithFSO(initiateMPURequest, + OMRequest initiateMPURequest) throws IOException { + S3InitiateMultipartUploadRequest request = new S3InitiateMultipartUploadRequestWithFSO(initiateMPURequest, BucketLayout.FILE_SYSTEM_OPTIMIZED); + request.setUGI(UserGroupInformation.getLoginUser()); + return request; } @Test @@ -256,7 +259,7 @@ private void verifyKeyInheritAcls(List dirs, OmKeyInfo fileInfo, List omDirAcls = omDirInfo.getAcls(); System.out.println(" subdir acls : " + omDirInfo + " ==> " + omDirAcls); - assertEquals(expectedInheritAcls, omDirAcls, + assertTrue(omDirAcls.containsAll(expectedInheritAcls), "Failed to inherit parent DEFAULT acls!"); parentID = omDirInfo.getObjectID(); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java index bd93fe176e93..ff9206675397 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java @@ -31,6 +31,7 @@ import org.apache.hadoop.ozone.om.request.OMClientRequest; import org.apache.hadoop.ozone.om.upgrade.OMLayoutVersionManager; import org.apache.hadoop.ozone.security.acl.OzoneNativeAuthorizer; +import org.apache.hadoop.security.UserGroupInformation; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.io.TempDir; @@ -114,6 +115,7 @@ public void setup() throws Exception { when(lvm.getMetadataLayoutVersion()).thenReturn(0); when(ozoneManager.getVersionManager()).thenReturn(lvm); when(ozoneManager.isRatisEnabled()).thenReturn(true); + when(ozoneManager.getConfiguration()).thenReturn(ozoneConfiguration); } @@ -353,21 +355,27 @@ protected OMRequest doPreExecuteInitiateMPUWithFSO( } protected S3MultipartUploadCompleteRequest getS3MultipartUploadCompleteReq( - OMRequest omRequest) { - return new S3MultipartUploadCompleteRequest(omRequest, + OMRequest omRequest) throws IOException { + S3MultipartUploadCompleteRequest request = new S3MultipartUploadCompleteRequest(omRequest, BucketLayout.DEFAULT); + request.setUGI(UserGroupInformation.getCurrentUser()); + return request; } protected S3MultipartUploadCommitPartRequest getS3MultipartUploadCommitReq( - OMRequest omRequest) { - return new S3MultipartUploadCommitPartRequest(omRequest, + OMRequest omRequest) throws IOException { + S3MultipartUploadCommitPartRequest request = new S3MultipartUploadCommitPartRequest(omRequest, BucketLayout.DEFAULT); + request.setUGI(UserGroupInformation.getCurrentUser()); + return request; } protected S3InitiateMultipartUploadRequest getS3InitiateMultipartUploadReq( - OMRequest initiateMPURequest) { - return new S3InitiateMultipartUploadRequest(initiateMPURequest, + OMRequest initiateMPURequest) throws IOException { + S3InitiateMultipartUploadRequest request = new S3InitiateMultipartUploadRequest(initiateMPURequest, BucketLayout.DEFAULT); + request.setUGI(UserGroupInformation.getCurrentUser()); + return request; } protected S3MultipartUploadAbortRequest getS3MultipartUploadAbortReq( diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadAbortRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadAbortRequestWithFSO.java index 3c710988a567..7e92cf042e77 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadAbortRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadAbortRequestWithFSO.java @@ -22,6 +22,7 @@ import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.security.UserGroupInformation; import java.io.IOException; import java.util.UUID; @@ -45,9 +46,11 @@ protected S3MultipartUploadAbortRequest getS3MultipartUploadAbortReq( @Override protected S3InitiateMultipartUploadRequest getS3InitiateMultipartUploadReq( - OMRequest initiateMPURequest) { - return new S3InitiateMultipartUploadRequestWithFSO(initiateMPURequest, + OMRequest initiateMPURequest) throws IOException { + S3InitiateMultipartUploadRequest request = new S3InitiateMultipartUploadRequestWithFSO(initiateMPURequest, BucketLayout.FILE_SYSTEM_OPTIMIZED); + request.setUGI(UserGroupInformation.getCurrentUser()); + return request; } @Override diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequest.java index 014b4e021cb3..fa901af6457b 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequest.java @@ -71,7 +71,7 @@ public void testValidateAndUpdateCacheSuccess() throws Exception { bucketName, keyName); S3InitiateMultipartUploadRequest s3InitiateMultipartUploadRequest = - getS3InitiateMultipartUploadReq(initiateMPURequest); + getS3InitiateMultipartUploadReq(initiateMPURequest); OMClientResponse omClientResponse = s3InitiateMultipartUploadRequest.validateAndUpdateCache(ozoneManager, 1L); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequestWithFSO.java index 24480c249cc8..eb2c82af1726 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequestWithFSO.java @@ -32,6 +32,7 @@ import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.security.UserGroupInformation; import java.io.IOException; import java.util.ArrayList; @@ -49,16 +50,20 @@ public class TestS3MultipartUploadCommitPartRequestWithFSO @Override protected S3MultipartUploadCommitPartRequest getS3MultipartUploadCommitReq( - OMRequest omRequest) { - return new S3MultipartUploadCommitPartRequestWithFSO(omRequest, + OMRequest omRequest) throws IOException { + S3MultipartUploadCommitPartRequest request = new S3MultipartUploadCommitPartRequestWithFSO(omRequest, BucketLayout.FILE_SYSTEM_OPTIMIZED); + request.setUGI(UserGroupInformation.getCurrentUser()); + return request; } @Override protected S3InitiateMultipartUploadRequest getS3InitiateMultipartUploadReq( - OMRequest initiateMPURequest) { - return new S3InitiateMultipartUploadRequestWithFSO(initiateMPURequest, + OMRequest initiateMPURequest) throws IOException { + S3InitiateMultipartUploadRequest request = new S3InitiateMultipartUploadRequestWithFSO(initiateMPURequest, BucketLayout.FILE_SYSTEM_OPTIMIZED); + request.setUGI(UserGroupInformation.getCurrentUser()); + return request; } @Override diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequestWithFSO.java index 1762f38b44bd..dc58254d7d37 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequestWithFSO.java @@ -30,6 +30,7 @@ import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.request.file.OMFileRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.security.UserGroupInformation; import java.io.IOException; import java.util.ArrayList; @@ -113,23 +114,29 @@ protected String getOzoneDBKey(String volumeName, String bucketName, @Override protected S3MultipartUploadCompleteRequest getS3MultipartUploadCompleteReq( - OMRequest omRequest) { - return new S3MultipartUploadCompleteRequestWithFSO(omRequest, + OMRequest omRequest) throws IOException { + S3MultipartUploadCompleteRequest request = new S3MultipartUploadCompleteRequestWithFSO(omRequest, BucketLayout.FILE_SYSTEM_OPTIMIZED); + request.setUGI(UserGroupInformation.getCurrentUser()); + return request; } @Override protected S3MultipartUploadCommitPartRequest getS3MultipartUploadCommitReq( - OMRequest omRequest) { - return new S3MultipartUploadCommitPartRequestWithFSO(omRequest, + OMRequest omRequest) throws IOException { + S3MultipartUploadCommitPartRequest request = new S3MultipartUploadCommitPartRequestWithFSO(omRequest, BucketLayout.FILE_SYSTEM_OPTIMIZED); + request.setUGI(UserGroupInformation.getCurrentUser()); + return request; } @Override protected S3InitiateMultipartUploadRequest getS3InitiateMultipartUploadReq( - OMRequest initiateMPURequest) { - return new S3InitiateMultipartUploadRequestWithFSO(initiateMPURequest, + OMRequest initiateMPURequest) throws IOException { + S3InitiateMultipartUploadRequest request = new S3InitiateMultipartUploadRequestWithFSO(initiateMPURequest, BucketLayout.FILE_SYSTEM_OPTIMIZED); + request.setUGI(UserGroupInformation.getCurrentUser()); + return request; } @Override diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeRequest.java index 14f1438b78bf..5e0d2db17c9b 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeRequest.java @@ -81,6 +81,7 @@ public void setup() throws Exception { auditLogger = mock(AuditLogger.class); when(ozoneManager.getAuditLogger()).thenReturn(auditLogger); doNothing().when(auditLogger).logWrite(any(AuditMessage.class)); + when(ozoneManager.getConfiguration()).thenReturn(ozoneConfiguration); } @AfterEach diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneNativeAuthorizer.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneNativeAuthorizer.java index c354864a5297..ab773f6d718f 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneNativeAuthorizer.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneNativeAuthorizer.java @@ -169,8 +169,7 @@ private void createKey(String volume, .setReplicationConfig(StandaloneReplicationConfig .getInstance(HddsProtos.ReplicationFactor.ONE)) .setDataSize(0) - .setAcls(OzoneAclUtil.getAclList(testUgi.getUserName(), - testUgi.getGroupNames(), ALL, ALL)) + .setAcls(OzoneAclUtil.getAclList(testUgi, ALL, ALL)) .setOwnerName(UserGroupInformation.getCurrentUser().getShortUserName()) .build(); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestParentAcl.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestParentAcl.java index c3ec7843a6f6..a6e6f13ae343 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestParentAcl.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestParentAcl.java @@ -344,8 +344,7 @@ private OzoneObjInfo createKey(String volume, String bucket, String keyName) HddsProtos.ReplicationFactor.ONE)) .setDataSize(0) // here we give test ugi full access - .setAcls(OzoneAclUtil.getAclList(testUgi.getUserName(), - testUgi.getGroupNames(), ALL, ALL)) + .setAcls(OzoneAclUtil.getAclList(testUgi, ALL, ALL)) .setOwnerName(UserGroupInformation.getCurrentUser().getShortUserName()) .build(); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestVolumeOwner.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestVolumeOwner.java index 7c1aad0723be..98e7ce7be85d 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestVolumeOwner.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestVolumeOwner.java @@ -142,11 +142,9 @@ private static void prepareTestKeys() throws IOException { UserGroupInformation.getCurrentUser().getShortUserName()) .setDataSize(0); if (k == 0) { - keyArgsBuilder.setAcls(OzoneAclUtil.getAclList( - testUgi.getUserName(), testUgi.getGroupNames(), ALL, ALL)); + keyArgsBuilder.setAcls(OzoneAclUtil.getAclList(testUgi, ALL, ALL)); } else { - keyArgsBuilder.setAcls(OzoneAclUtil.getAclList( - testUgi.getUserName(), testUgi.getGroupNames(), NONE, NONE)); + keyArgsBuilder.setAcls(OzoneAclUtil.getAclList(testUgi, NONE, NONE)); } OmKeyArgs keyArgs = keyArgsBuilder.build(); OpenKeySession keySession = writeClient.createFile(keyArgs, true, diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmKeyGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmKeyGenerator.java index 9c98817185e3..d5fbdc75f19d 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmKeyGenerator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmKeyGenerator.java @@ -109,8 +109,7 @@ private void createKey(long counter) throws Exception { .setKeyName(generateObjectName(counter)) .setReplicationConfig(replicationConfig) .setLocationInfoList(new ArrayList<>()) - .setAcls(OzoneAclUtil.getAclList(ugi.getUserName(), ugi.getGroupNames(), - ALL, ALL)) + .setAcls(OzoneAclUtil.getAclList(ugi, ALL, ALL)) .setOwnerName(ownerName) .build(); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmMetadataGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmMetadataGenerator.java index 24060b0bac8f..4c277f07422d 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmMetadataGenerator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmMetadataGenerator.java @@ -272,8 +272,7 @@ private OmKeyArgs.Builder createKeyArgsBuilder() { .setVolumeName(volumeName) .setReplicationConfig(replicationConfig) .setLocationInfoList(new ArrayList<>()) - .setAcls(OzoneAclUtil.getAclList(ugi.getUserName(), ugi.getGroupNames(), - ALL, ALL)); + .setAcls(OzoneAclUtil.getAclList(ugi, ALL, ALL)); } private String getPath(long counter) {