diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java index 2982e3845e40..2a48e6826514 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java @@ -1000,6 +1000,7 @@ public OzoneFileStatus getOzoneFileStatus(String volumeName, .setVolumeName(volumeName) .setBucketName(bucketName) .setKeyName(keyName) + .setRefreshPipeline(true) .build(); return ozoneManagerClient.getFileStatus(keyArgs); } @@ -1081,6 +1082,7 @@ public List listStatus(String volumeName, String bucketName, .setVolumeName(volumeName) .setBucketName(bucketName) .setKeyName(keyName) + .setRefreshPipeline(true) .build(); return ozoneManagerClient .listStatus(keyArgs, recursive, startKey, numEntries); diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFileStatus.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFileStatus.java index 871794651214..1f4c0e5b1e16 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFileStatus.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFileStatus.java @@ -44,8 +44,9 @@ public OzoneFileStatus(OmKeyInfo key, long blockSize, boolean isDirectory) { keyInfo = key; } - public OzoneFileStatus(FileStatus status) throws IOException { + public OzoneFileStatus(FileStatus status, OmKeyInfo key) throws IOException { super(status); + keyInfo = key; } // Use this constructor only for directories @@ -54,13 +55,18 @@ public OzoneFileStatus(String keyName) { } public OzoneFileStatusProto getProtobuf() throws IOException { - return OzoneFileStatusProto.newBuilder().setStatus(PBHelper.convert(this)) - .build(); + OzoneFileStatusProto.Builder builder = OzoneFileStatusProto.newBuilder() + .setStatus(PBHelper.convert(this)); + if (keyInfo != null) { + builder.setKeyInfo(keyInfo.getProtobuf()); + } + return builder.build(); } public static OzoneFileStatus getFromProtobuf(OzoneFileStatusProto response) throws IOException { - return new OzoneFileStatus(PBHelper.convert(response.getStatus())); + return new OzoneFileStatus(PBHelper.convert(response.getStatus()), + OmKeyInfo.getFromProtobuf(response.getKeyInfo())); } public static Path getPath(String keyName) { diff --git a/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto b/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto index b9315d30780b..aabe9e45f2a7 100644 --- a/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto +++ b/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto @@ -754,6 +754,7 @@ message RepeatedKeyInfo { message OzoneFileStatusProto { required hadoop.fs.FileStatusProto status = 1; + optional KeyInfo keyInfo = 2; } message GetFileStatusRequest { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java index 2a7210103f6b..00e86d154c73 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java @@ -25,17 +25,21 @@ import java.util.Collection; import java.util.List; +import org.apache.hadoop.conf.StorageUnit; +import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.GlobalStorageStatistics; +import org.apache.hadoop.fs.LocatedFileStatus; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.StorageStatistics; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.MiniOzoneCluster; +import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.TestDataUtil; import org.apache.hadoop.ozone.client.OzoneBucket; @@ -51,6 +55,7 @@ import static org.apache.hadoop.fs.ozone.Constants.OZONE_DEFAULT_USER; import org.junit.After; import org.junit.Assert; + import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; @@ -312,6 +317,54 @@ public void testOzoneManagerFileSystemInterface() throws IOException { assertEquals(omStatus.getPath().getName(), o3fs.pathToKey(path)); } + @Test + public void testOzoneManagerLocatedFileStatus() throws IOException { + String data = RandomStringUtils.randomAlphanumeric(20); + String filePath = RandomStringUtils.randomAlphanumeric(5); + Path path = createPath("/" + filePath); + try (FSDataOutputStream stream = fs.create(path)) { + stream.writeBytes(data); + } + FileStatus status = fs.getFileStatus(path); + assertTrue(status instanceof LocatedFileStatus); + LocatedFileStatus locatedFileStatus = (LocatedFileStatus) status; + assertTrue(locatedFileStatus.getBlockLocations().length >= 1); + + for (BlockLocation blockLocation : locatedFileStatus.getBlockLocations()) { + assertTrue(blockLocation.getNames().length >= 1); + assertTrue(blockLocation.getHosts().length >= 1); + } + } + + @Test + public void testOzoneManagerLocatedFileStatusBlockOffsetsWithMultiBlockFile() + throws Exception { + // naive assumption: MiniOzoneCluster will not have larger than ~1GB + // block size when running this test. + int blockSize = (int) fs.getConf().getStorageSize( + OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE, + OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE_DEFAULT, + StorageUnit.BYTES + ); + String data = RandomStringUtils.randomAlphanumeric(2*blockSize+837); + String filePath = RandomStringUtils.randomAlphanumeric(5); + Path path = createPath("/" + filePath); + try (FSDataOutputStream stream = fs.create(path)) { + stream.writeBytes(data); + } + FileStatus status = fs.getFileStatus(path); + assertTrue(status instanceof LocatedFileStatus); + LocatedFileStatus locatedFileStatus = (LocatedFileStatus) status; + BlockLocation[] blockLocations = locatedFileStatus.getBlockLocations(); + + assertEquals(0, blockLocations[0].getOffset()); + assertEquals(blockSize, blockLocations[1].getOffset()); + assertEquals(2*blockSize, blockLocations[2].getOffset()); + assertEquals(blockSize, blockLocations[0].getLength()); + assertEquals(blockSize, blockLocations[1].getLength()); + assertEquals(837, blockLocations[2].getLength()); + } + @Test public void testPathToKey() throws Exception { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java index 080d90363656..b06653498c5d 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java @@ -37,6 +37,7 @@ import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; +import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.StorageUnit; import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension; @@ -686,31 +687,35 @@ public OmKeyInfo lookupKey(OmKeyArgs args, String clientAddress) */ @VisibleForTesting protected void refreshPipeline(OmKeyInfo value) throws IOException { - Map containerWithPipelineMap = new HashMap<>(); - for (OmKeyLocationInfoGroup key : value.getKeyLocationVersions()) { - for (OmKeyLocationInfo k : key.getLocationList()) { - // TODO: fix Some tests that may not initialize container client - // The production should always have containerClient initialized. - if (scmClient.getContainerClient() != null) { - try { - if (!containerWithPipelineMap.containsKey(k.getContainerID())) { - ContainerWithPipeline containerWithPipeline = scmClient - .getContainerClient() - .getContainerWithPipeline(k.getContainerID()); - containerWithPipelineMap.put(k.getContainerID(), - containerWithPipeline); + if (value != null && + CollectionUtils.isNotEmpty(value.getKeyLocationVersions())) { + Map containerWithPipelineMap = + new HashMap<>(); + for (OmKeyLocationInfoGroup key : value.getKeyLocationVersions()) { + for (OmKeyLocationInfo k : key.getLocationList()) { + // TODO: fix Some tests that may not initialize container client + // The production should always have containerClient initialized. + if (scmClient.getContainerClient() != null) { + try { + if (!containerWithPipelineMap.containsKey(k.getContainerID())) { + ContainerWithPipeline containerWithPipeline = scmClient + .getContainerClient() + .getContainerWithPipeline(k.getContainerID()); + containerWithPipelineMap.put(k.getContainerID(), + containerWithPipeline); + } + } catch (IOException ioEx) { + LOG.debug("Get containerPipeline failed for volume:{} bucket:{} " + + "key:{}", value.getVolumeName(), value.getBucketName(), + value.getKeyName(), ioEx); + throw new OMException(ioEx.getMessage(), + SCM_GET_PIPELINE_EXCEPTION); + } + ContainerWithPipeline cp = + containerWithPipelineMap.get(k.getContainerID()); + if (!cp.getPipeline().equals(k.getPipeline())) { + k.setPipeline(cp.getPipeline()); } - } catch (IOException ioEx) { - LOG.debug("Get containerPipeline failed for volume:{} bucket:{} " + - "key:{}", value.getVolumeName(), value.getBucketName(), - value.getKeyName(), ioEx); - throw new OMException(ioEx.getMessage(), - SCM_GET_PIPELINE_EXCEPTION); - } - ContainerWithPipeline cp = - containerWithPipelineMap.get(k.getContainerID()); - if (!cp.getPipeline().equals(k.getPipeline())) { - k.setPipeline(cp.getPipeline()); } } } @@ -1687,6 +1692,9 @@ public OzoneFileStatus getFileStatus(OmKeyArgs args) throws IOException { volumeName, bucketName, keyName); OmKeyInfo fileKeyInfo = metadataManager.getKeyTable().get(fileKeyBytes); if (fileKeyInfo != null) { + if (args.getRefreshPipeline()) { + refreshPipeline(fileKeyInfo); + } // this is a file return new OzoneFileStatus(fileKeyInfo, scmBlockSize, false); } @@ -2024,6 +2032,9 @@ public List listStatus(OmKeyArgs args, boolean recursive, for (Map.Entry entry : cacheKeyMap.entrySet()) { // No need to check if a key is deleted or not here, this is handled // when adding entries to cacheKeyMap from DB. + if (args.getRefreshPipeline()) { + refreshPipeline(entry.getValue().getKeyInfo()); + } fileStatusList.add(entry.getValue()); countEntries++; if (countEntries >= numEntries) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java index f5e2398ce793..03b8715f3689 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java @@ -557,6 +557,7 @@ private GetFileStatusResponse getOzoneFileStatus( .setVolumeName(keyArgs.getVolumeName()) .setBucketName(keyArgs.getBucketName()) .setKeyName(keyArgs.getKeyName()) + .setRefreshPipeline(true) .build(); GetFileStatusResponse.Builder rb = GetFileStatusResponse.newBuilder(); @@ -588,6 +589,7 @@ private ListStatusResponse listStatus( .setVolumeName(keyArgs.getVolumeName()) .setBucketName(keyArgs.getBucketName()) .setKeyName(keyArgs.getKeyName()) + .setRefreshPipeline(true) .build(); List statuses = impl.listStatus(omKeyArgs, request.getRecursive(), diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java index a16f8dc56c55..a97a647571c3 100644 --- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java +++ b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java @@ -25,15 +25,18 @@ import java.util.Iterator; import java.util.List; -import org.apache.hadoop.hdds.annotation.InterfaceAudience; +import org.apache.commons.collections.CollectionUtils; +import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.crypto.key.KeyProvider; +import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdds.client.ReplicationFactor; import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.security.x509.SecurityConfig; import org.apache.hadoop.io.Text; import org.apache.hadoop.ozone.OmUtils; @@ -46,6 +49,9 @@ import org.apache.hadoop.ozone.client.OzoneVolume; import org.apache.hadoop.ozone.client.io.OzoneOutputStream; import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; import org.apache.hadoop.ozone.security.OzoneTokenIdentifier; import org.apache.hadoop.security.token.Token; @@ -74,6 +80,7 @@ public class BasicOzoneClientAdapterImpl implements OzoneClientAdapter { private ReplicationType replicationType; private ReplicationFactor replicationFactor; private boolean securityEnabled; + private int configuredDnPort; /** * Create new OzoneClientAdapter implementation. @@ -168,6 +175,9 @@ public BasicOzoneClientAdapterImpl(String omHost, int omPort, this.bucket = volume.getBucket(bucketStr); this.replicationType = ReplicationType.valueOf(replicationTypeConf); this.replicationFactor = ReplicationFactor.valueOf(replicationCountConf); + this.configuredDnPort = conf.getInt( + OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, + OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT); } finally { Thread.currentThread().setContextClassLoader(contextClassLoader); } @@ -440,7 +450,64 @@ private FileStatusAdapter toFileStatusAdapter(OzoneFileStatus status) { status.getPermission().toShort(), status.getOwner(), status.getGroup(), - status.getPath() + status.getPath(), + getBlockLocations(status) ); } + + /** + * Helper method to get List of BlockLocation from OM Key info. + * @param fileStatus Ozone key file status. + * @return list of block locations. + */ + private BlockLocation[] getBlockLocations(OzoneFileStatus fileStatus) { + + if (fileStatus == null) { + return new BlockLocation[0]; + } + + OmKeyInfo keyInfo = fileStatus.getKeyInfo(); + if (keyInfo == null || CollectionUtils.isEmpty( + keyInfo.getKeyLocationVersions())) { + return new BlockLocation[0]; + } + List omKeyLocationInfoGroups = + keyInfo.getKeyLocationVersions(); + if (CollectionUtils.isEmpty(omKeyLocationInfoGroups)) { + return new BlockLocation[0]; + } + + OmKeyLocationInfoGroup omKeyLocationInfoGroup = + keyInfo.getLatestVersionLocations(); + BlockLocation[] blockLocations = new BlockLocation[ + omKeyLocationInfoGroup.getBlocksLatestVersionOnly().size()]; + + int i = 0; + long offsetOfBlockInFile = 0L; + for (OmKeyLocationInfo omKeyLocationInfo : + omKeyLocationInfoGroup.getBlocksLatestVersionOnly()) { + List hostList = new ArrayList<>(); + List nameList = new ArrayList<>(); + omKeyLocationInfo.getPipeline().getNodes() + .forEach(dn -> { + hostList.add(dn.getHostName()); + int port = dn.getPort( + DatanodeDetails.Port.Name.STANDALONE).getValue(); + if (port == 0) { + port = configuredDnPort; + } + nameList.add(dn.getHostName() + ":" + port); + }); + + String[] hosts = hostList.toArray(new String[hostList.size()]); + String[] names = nameList.toArray(new String[nameList.size()]); + BlockLocation blockLocation = new BlockLocation( + names, hosts, offsetOfBlockInFile, + omKeyLocationInfo.getLength()); + offsetOfBlockInFile += omKeyLocationInfo.getLength(); + blockLocations[i++] = blockLocation; + } + return blockLocations; + } + } diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java index d421554335c4..b91c95d9345b 100644 --- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java +++ b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java @@ -35,12 +35,14 @@ import org.apache.hadoop.hdds.annotation.InterfaceAudience; import org.apache.hadoop.hdds.annotation.InterfaceStability; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.CreateFlag; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.LocatedFileStatus; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException; import org.apache.hadoop.fs.permission.FsPermission; @@ -164,6 +166,7 @@ public void initialize(URI name, Configuration conf) throws IOException { } this.workingDir = new Path(OZONE_USER_DIR, this.userName) .makeQualified(this.uri, this.workingDir); + } catch (URISyntaxException ue) { final String msg = "Invalid Ozone endpoint " + name; LOG.error(msg, ue); @@ -643,6 +646,17 @@ public FileStatus getFileStatus(Path f) throws IOException { return fileStatus; } + @Override + public BlockLocation[] getFileBlockLocations(FileStatus fileStatus, + long start, long len) + throws IOException { + if (fileStatus instanceof LocatedFileStatus) { + return ((LocatedFileStatus) fileStatus).getBlockLocations(); + } else { + return super.getFileBlockLocations(fileStatus, start, len); + } + } + /** * Turn a path (relative or otherwise) into an Ozone key. * @@ -784,7 +798,7 @@ private FileStatus convertFileStatus( //NOOP: If not symlink symlink remains null. } - return new FileStatus( + FileStatus fileStatus = new FileStatus( fileStatusAdapter.getLength(), fileStatusAdapter.isDir(), fileStatusAdapter.getBlockReplication(), @@ -798,5 +812,11 @@ private FileStatus convertFileStatus( fileStatusAdapter.getPath() ); + BlockLocation[] blockLocations = fileStatusAdapter.getBlockLocations(); + if (blockLocations == null || blockLocations.length == 0) { + return fileStatus; + } + return new LocatedFileStatus(fileStatus, blockLocations); } + } diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/FileStatusAdapter.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/FileStatusAdapter.java index 91597839340b..64e43f5320e5 100644 --- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/FileStatusAdapter.java +++ b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/FileStatusAdapter.java @@ -17,8 +17,11 @@ */ package org.apache.hadoop.fs.ozone; +import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.Path; +import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; + /** * Class to hold the internal information of a FileStatus. *

@@ -42,12 +45,13 @@ public final class FileStatusAdapter { private final String owner; private final String group; private final Path symlink; + private final BlockLocation[] blockLocations; @SuppressWarnings("checkstyle:ParameterNumber") public FileStatusAdapter(long length, Path path, boolean isdir, short blockReplication, long blocksize, long modificationTime, long accessTime, short permission, String owner, - String group, Path symlink) { + String group, Path symlink, BlockLocation[] locations) { this.length = length; this.path = path; this.isdir = isdir; @@ -59,6 +63,7 @@ public FileStatusAdapter(long length, Path path, boolean isdir, this.owner = owner; this.group = group; this.symlink = symlink; + this.blockLocations = locations.clone(); } public Path getPath() { @@ -105,4 +110,9 @@ public long getLength() { return length; } + @SuppressFBWarnings("EI_EXPOSE_REP") + public BlockLocation[] getBlockLocations() { + return blockLocations; + } + } diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/FilteredClassLoader.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/FilteredClassLoader.java index a90797efdd89..e1152519769b 100644 --- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/FilteredClassLoader.java +++ b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/FilteredClassLoader.java @@ -63,6 +63,7 @@ public FilteredClassLoader(URL[] urls, ClassLoader parent) { delegatedClasses.add("org.apache.hadoop.fs.Seekable"); delegatedClasses.add("org.apache.hadoop.io.Text"); delegatedClasses.add("org.apache.hadoop.fs.Path"); + delegatedClasses.add("org.apache.hadoop.fs.BlockLocation"); delegatedClasses.addAll(StringUtils.getTrimmedStringCollection( System.getenv("HADOOP_OZONE_DELEGATED_CLASSES"))); this.delegate = parent;