diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeClientProtocolServer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeClientProtocolServer.java
index 623587dd33b4..d2b44163d876 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeClientProtocolServer.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeClientProtocolServer.java
@@ -28,7 +28,7 @@
import java.io.IOException;
import java.net.InetSocketAddress;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.hdds.HddsUtils;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.conf.ReconfigurationHandler;
@@ -66,7 +66,7 @@ protected HddsDatanodeClientProtocolServer(
HDDS_DATANODE_CLIENT_ADDRESS_KEY,
HddsUtils.getDatanodeRpcAddress(conf), rpcServer);
datanodeDetails.setPort(CLIENT_RPC, clientRpcAddress.getPort());
- if (conf.getBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION,
+ if (conf.getBoolean(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION,
false)) {
rpcServer.refreshServiceAcl(conf, HddsPolicyProvider.getInstance());
}
diff --git a/hadoop-hdds/dev-support/checkstyle/checkstyle.xml b/hadoop-hdds/dev-support/checkstyle/checkstyle.xml
index 1382bed67f08..da9175a36ef6 100644
--- a/hadoop-hdds/dev-support/checkstyle/checkstyle.xml
+++ b/hadoop-hdds/dev-support/checkstyle/checkstyle.xml
@@ -112,7 +112,8 @@
+ value="^org.apache.hadoop.test.(GenericTestUtils|LambdaTestUtils|TimedOutTestsListener)
+ |^org.apache.hadoop.fs.CommonConfigurationKeys"/>
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/HttpServer2.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/HttpServer2.java
index 0e505633dd47..4b9f628223f3 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/HttpServer2.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/HttpServer2.java
@@ -62,7 +62,7 @@
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.conf.ConfServlet;
import org.apache.hadoop.conf.Configuration.IntegerRanges;
-import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.hdds.annotation.InterfaceAudience;
import org.apache.hadoop.hdds.annotation.InterfaceStability;
import org.apache.hadoop.hdds.conf.ConfigurationSource;
@@ -183,6 +183,8 @@ public final class HttpServer2 implements FilterContainer {
private static final String NO_CACHE_FILTER = "NoCacheFilter";
private static final String BIND_ADDRESS = "bind.address";
+ private static final String HADOOP_JETTY_LOGS_SERVE_ALIASES = "hadoop.jetty.logs.serve.aliases";
+ private static final boolean DEFAULT_HADOOP_JETTY_LOGS_SERVE_ALIASES = true;
private final AccessControlList adminsAcl;
@@ -761,16 +763,14 @@ protected void addDefaultApps(ContextHandlerCollection parent,
// and it's enabled.
String logDir = System.getProperty("hadoop.log.dir");
boolean logsEnabled = conf.getBoolean(
- CommonConfigurationKeys.HADOOP_HTTP_LOGS_ENABLED,
- CommonConfigurationKeys.HADOOP_HTTP_LOGS_ENABLED_DEFAULT);
+ CommonConfigurationKeysPublic.HADOOP_HTTP_LOGS_ENABLED,
+ CommonConfigurationKeysPublic.HADOOP_HTTP_LOGS_ENABLED_DEFAULT);
if (logDir != null && logsEnabled) {
ServletContextHandler logContext =
new ServletContextHandler(parent, "/logs");
logContext.setResourceBase(logDir);
logContext.addServlet(AdminAuthorizedServlet.class, "/*");
- if (conf.getBoolean(
- CommonConfigurationKeys.HADOOP_JETTY_LOGS_SERVE_ALIASES,
- CommonConfigurationKeys.DEFAULT_HADOOP_JETTY_LOGS_SERVE_ALIASES)) {
+ if (conf.getBoolean(HADOOP_JETTY_LOGS_SERVE_ALIASES, DEFAULT_HADOOP_JETTY_LOGS_SERVE_ALIASES)) {
Map params = logContext.getInitParams();
params.put("org.eclipse.jetty.servlet.Default.aliases", "true");
}
@@ -1441,7 +1441,7 @@ public static boolean isInstrumentationAccessAllowed(
boolean access = true;
boolean adminAccess = conf.getBoolean(
- CommonConfigurationKeys.HADOOP_SECURITY_INSTRUMENTATION_REQUIRES_ADMIN,
+ CommonConfigurationKeysPublic.HADOOP_SECURITY_INSTRUMENTATION_REQUIRES_ADMIN,
false);
if (adminAccess) {
access = hasAdministratorAccess(servletContext, request, response);
@@ -1466,7 +1466,7 @@ public static boolean hasAdministratorAccess(
.getAttribute(CONF_CONTEXT_ATTRIBUTE);
// If there is no authorization, anybody has administrator access.
if (!conf.getBoolean(
- CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) {
+ CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, false)) {
return true;
}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java
index 962a1a1b91ab..60c6384ba822 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java
@@ -44,7 +44,7 @@
import java.util.concurrent.TimeoutException;
import java.util.stream.Collectors;
import org.apache.commons.lang3.StringUtils;
-import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.hdds.client.BlockID;
import org.apache.hadoop.hdds.client.ReplicationConfig;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -146,7 +146,7 @@ public SCMBlockProtocolServer(OzoneConfiguration conf,
updateRPCListenAddress(
conf, scm.getScmNodeDetails().getBlockProtocolServerAddressKey(),
scmBlockAddress, blockRpcServer);
- if (conf.getBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION,
+ if (conf.getBoolean(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION,
false)) {
blockRpcServer.refreshServiceAcl(conf, SCMPolicyProvider.getInstance());
}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
index b9b5a1920b37..a4a8fe0976c2 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
@@ -50,7 +50,7 @@
import java.util.stream.Collectors;
import java.util.stream.Stream;
import org.apache.commons.lang3.tuple.Pair;
-import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.hdds.client.ReplicationConfig;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.conf.ReconfigurationHandler;
@@ -187,7 +187,7 @@ public SCMClientProtocolServer(OzoneConfiguration conf,
updateRPCListenAddress(conf,
scm.getScmNodeDetails().getClientProtocolServerAddressKey(),
scmAddress, clientRpcServer);
- if (conf.getBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION,
+ if (conf.getBoolean(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION,
false)) {
clientRpcServer.refreshServiceAcl(conf, SCMPolicyProvider.getInstance());
}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
index eb4feec2fc5c..b23698640b36 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
@@ -52,7 +52,7 @@
import java.util.Map;
import java.util.OptionalLong;
import java.util.concurrent.TimeoutException;
-import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
@@ -181,7 +181,7 @@ public SCMDatanodeProtocolServer(final OzoneConfiguration conf,
conf, getDatanodeAddressKey(), datanodeRpcAddr,
datanodeRpcServer);
- if (conf.getBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION,
+ if (conf.getBoolean(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION,
false)) {
datanodeRpcServer.refreshServiceAcl(conf, getPolicyProvider());
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java
index 0a1aeedc9e49..fb9e3d50f838 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java
@@ -17,7 +17,6 @@
package org.apache.hadoop.tools.contract;
-import static org.apache.hadoop.fs.CommonConfigurationKeys.IOSTATISTICS_LOGGING_LEVEL_INFO;
import static org.apache.hadoop.fs.contract.ContractTestUtils.createFile;
import static org.apache.hadoop.fs.contract.ContractTestUtils.dataset;
import static org.apache.hadoop.fs.contract.ContractTestUtils.skip;
@@ -188,7 +187,7 @@ public void setup() throws Exception {
@Override
public void teardown() throws Exception {
// if remote FS supports IOStatistics log it.
- logIOStatisticsAtLevel(LOG, IOSTATISTICS_LOGGING_LEVEL_INFO, getRemoteFS());
+ logIOStatisticsAtLevel(LOG, "info", getRemoteFS());
super.teardown();
}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/common/BekInfoUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/common/BekInfoUtils.java
index ca361482bfe5..40f3ce697add 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/common/BekInfoUtils.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/common/BekInfoUtils.java
@@ -23,7 +23,7 @@
import org.apache.hadoop.crypto.CipherSuite;
import org.apache.hadoop.crypto.key.KeyProvider;
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
-import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.ozone.om.exceptions.OMException;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.BucketEncryptionInfoProto;
import org.apache.hadoop.ozone.protocolPB.OMPBHelper;
@@ -42,7 +42,7 @@ public static BucketEncryptionInfoProto getBekInfo(
BucketEncryptionInfoProto.Builder bekb = null;
if (kmsProvider == null) {
throw new OMException("Invalid KMS provider, check configuration " +
- CommonConfigurationKeys.HADOOP_SECURITY_KEY_PROVIDER_PATH,
+ CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH,
OMException.ResultCodes.INVALID_KMS_PROVIDER);
}
if (bek.getKeyName() == null) {
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
index 9fcc2ae565fe..66dc87e14339 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
@@ -156,7 +156,6 @@
import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.crypto.key.KeyProvider;
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
-import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
@@ -1440,7 +1439,7 @@ private RPC.Server startRpcServer(OzoneConfiguration conf,
HddsServerUtil.addPBProtocol(conf, ReconfigureProtocolOmPB.class,
reconfigureProtocolService, rpcServer);
- if (conf.getBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION,
+ if (conf.getBoolean(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION,
false)) {
rpcServer.refreshServiceAcl(conf, OMPolicyProvider.getInstance());
}
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java
index c956436fd55f..f7ae16450ff6 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java
@@ -58,7 +58,6 @@
import java.util.concurrent.atomic.AtomicBoolean;
import javax.sql.DataSource;
import org.apache.commons.io.FileUtils;
-import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.conf.ReconfigurationHandler;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
@@ -139,6 +138,8 @@ public class ReconStorageContainerManagerFacade
private static final Logger LOG = LoggerFactory
.getLogger(ReconStorageContainerManagerFacade.class);
public static final long CONTAINER_METADATA_SIZE = 1 * 1024 * 1024L;
+ private static final String IPC_MAXIMUM_DATA_LENGTH = "ipc.maximum.data.length";
+ private static final int IPC_MAXIMUM_DATA_LENGTH_DEFAULT = 128 * 1024 * 1024;
private final OzoneConfiguration ozoneConfiguration;
private final ReconDatanodeProtocolServer datanodeProtocolServer;
@@ -594,9 +595,7 @@ private long getContainerCountPerCall(long totalContainerCount) {
// Assumption of size of 1 container info object here is 1 MB
long containersMetaDataTotalRpcRespSizeMB =
CONTAINER_METADATA_SIZE * totalContainerCount;
- long hadoopRPCSize = ozoneConfiguration.getInt(
- CommonConfigurationKeys.IPC_MAXIMUM_DATA_LENGTH,
- CommonConfigurationKeys.IPC_MAXIMUM_DATA_LENGTH_DEFAULT);
+ long hadoopRPCSize = ozoneConfiguration.getInt(IPC_MAXIMUM_DATA_LENGTH, IPC_MAXIMUM_DATA_LENGTH_DEFAULT);
long containerCountPerCall = containersMetaDataTotalRpcRespSizeMB <=
hadoopRPCSize ? totalContainerCount :
Math.round(Math.floor(