diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
index d516cd5f2776..2fcc3a67db4e 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
@@ -412,8 +412,6 @@ private HddsConfigKeys() {
"hdds.datanode.dns.nameserver";
public static final String HDDS_DATANODE_HOST_NAME_KEY =
"hdds.datanode.hostname";
- public static final String HDDS_DATANODE_DATA_DIR_KEY =
- "hdds.datanode.data.dir";
public static final String HDDS_DATANODE_USE_DN_HOSTNAME =
"hdds.datanode.use.datanode.hostname";
public static final boolean HDDS_DATANODE_USE_DN_HOSTNAME_DEFAULT = false;
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java
index 58f33d445acd..0e11be5477f4 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java
@@ -387,7 +387,7 @@ private static void addDeprecatedKeys() {
new DeprecationDelta("dfs.datanode.hostname",
HddsConfigKeys.HDDS_DATANODE_HOST_NAME_KEY),
new DeprecationDelta("dfs.datanode.data.dir",
- HddsConfigKeys.HDDS_DATANODE_DATA_DIR_KEY),
+ ScmConfigKeys.HDDS_DATANODE_DIR_KEY),
new DeprecationDelta("dfs.datanode.use.datanode.hostname",
HddsConfigKeys.HDDS_DATANODE_USE_DN_HOSTNAME),
new DeprecationDelta("dfs.xframe.enabled",
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 512c055e1000..7217b096afa1 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -171,8 +171,7 @@
hdds.datanode.dir
OZONE, CONTAINER, STORAGE, MANAGEMENT
- Determines where on the local filesystem HDDS data will be
- stored. Defaults to hdds.datanode.data.dir if not specified.
+ Determines where on the local filesystem HDDS data will be stored.
The directories should be tagged with corresponding storage types
([SSD]/[DISK]/[ARCHIVE]/[RAM_DISK]) for storage policies. The default
storage type will be DISK if the directory does not have a storage type
@@ -4543,4 +4542,126 @@
allowing for better identification and analysis of performance issues.
+
+
+ hdds.datanode.dns.interface
+ default
+ OZONE, DATANODE
+
+ The name of the Network Interface from which a Datanode should
+ report its IP address. e.g. eth2. This setting may be required for some
+ multi-homed nodes where the Datanodes are assigned multiple hostnames
+ and it is desirable for the Datanodes to use a non-default hostname.
+
+
+
+ hdds.datanode.dns.nameserver
+ default
+ OZONE, DATANODE
+
+ The host name or IP address of the name server (DNS) which a Datanode
+ should use to determine its own host name.
+
+
+
+ hdds.datanode.hostname
+
+ OZONE, DATANODE
+
+ Optional. The hostname for the Datanode containing this
+ configuration file. Will be different for each machine.
+ Defaults to current hostname.
+
+
+
+ hdds.datanode.use.datanode.hostname
+ false
+ OZONE, DATANODE
+
+ Whether Datanodes should use Datanode hostnames when
+ connecting to other Datanodes for data transfer.
+
+
+
+ hdds.xframe.enabled
+ true
+ OZONE, HDDS
+
+ If true, then enables protection against clickjacking by returning
+ X_FRAME_OPTIONS header value set to SAMEORIGIN.
+ Clickjacking protection prevents an attacker from using transparent or
+ opaque layers to trick a user into clicking on a button
+ or link on another page.
+
+
+
+ hdds.xframe.value
+ SAMEORIGIN
+ OZONE, HDDS
+
+ This configration value allows user to specify the value for the
+ X-FRAME-OPTIONS. The possible values for this field are
+ DENY, SAMEORIGIN and ALLOW-FROM. Any other value will throw an
+ exception when Datanodes are starting up.
+
+
+
+ hdds.metrics.session-id
+
+ OZONE, HDDS
+
+ Get the user-specified session identifier. The default is the empty string.
+ The session identifier is used to tag metric data that is reported to some
+ performance metrics system via the org.apache.hadoop.metrics API. The
+ session identifier is intended, in particular, for use by Hadoop-On-Demand
+ (HOD) which allocates a virtual Hadoop cluster dynamically and transiently.
+ HOD will set the session identifier by modifying the mapred-site.xml file
+ before starting the cluster.
+ When not running under HOD, this identifer is expected to remain set to
+ the empty string.
+
+
+
+ hdds.datanode.kerberos.principal
+
+ OZONE, DATANODE
+
+ The Datanode service principal. This is typically set to
+ dn/_HOST@REALM.TLD. Each Datanode will substitute _HOST with its
+ own fully qualified hostname at startup. The _HOST placeholder
+ allows using the same configuration setting on all Datanodes.
+
+
+
+ hdds.datanode.kerberos.keytab.file
+
+ OZONE, DATANODE
+
+ The keytab file used by each Datanode daemon to login as its
+ service principal. The principal name is configured with
+ hdds.datanode.kerberos.principal.
+
+
+
+ hdds.metrics.percentiles.intervals
+
+ OZONE, DATANODE
+
+ Comma-delimited set of integers denoting the desired rollover intervals
+ (in seconds) for percentile latency metrics on the Datanode.
+ By default, percentile latency metrics are disabled.
+
+
+
+
+ net.topology.node.switch.mapping.impl
+ org.apache.hadoop.net.ScriptBasedMapping
+ OZONE, SCM
+
+ The default implementation of the DNSToSwitchMapping. It
+ invokes a script specified in net.topology.script.file.name to resolve
+ node names. If the value for net.topology.script.file.name is not set, the
+ default value of DEFAULT_RACK is returned for all node names.
+
+
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsDatanodeService.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsDatanodeService.java
index 7547036a59fd..63157450a19e 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsDatanodeService.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsDatanodeService.java
@@ -98,7 +98,7 @@ public void setUp() throws IOException {
conf.setBoolean(HDDS_CONTAINER_TOKEN_ENABLED, true);
String volumeDir = testDir + OZONE_URI_DELIMITER + "disk1";
- conf.set(HddsConfigKeys.HDDS_DATANODE_DATA_DIR_KEY, volumeDir);
+ conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, volumeDir);
}
@ParameterizedTest
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsSecureDatanodeInit.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsSecureDatanodeInit.java
index ec64da763786..b0773db76665 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsSecureDatanodeInit.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsSecureDatanodeInit.java
@@ -53,6 +53,7 @@
import org.apache.hadoop.hdds.protocol.MockDatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos;
import org.apache.hadoop.hdds.protocolPB.SCMSecurityProtocolClientSideTranslatorPB;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.security.SecurityConfig;
import org.apache.hadoop.hdds.security.x509.certificate.client.DNCertificateClient;
import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec;
@@ -97,7 +98,7 @@ public static void setUp() throws Exception {
conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getPath());
//conf.set(ScmConfigKeys.OZONE_SCM_NAMES, "localhost");
String volumeDir = testDir + "/disk1";
- conf.set(HddsConfigKeys.HDDS_DATANODE_DATA_DIR_KEY, volumeDir);
+ conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, volumeDir);
conf.setBoolean(OZONE_SECURITY_ENABLED_KEY, true);
conf.setClass(OzoneConfigKeys.HDDS_DATANODE_PLUGINS_KEY,
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteBlocksCommandHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteBlocksCommandHandler.java
index 5b9a5abe0d85..8f84eb57516d 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteBlocksCommandHandler.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteBlocksCommandHandler.java
@@ -55,6 +55,7 @@
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.CommandStatus.Status;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto.DeleteBlockTransactionResult;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.ozone.container.ContainerTestHelper;
import org.apache.hadoop.ozone.container.common.ContainerTestUtils;
import org.apache.hadoop.ozone.container.common.helpers.BlockDeletingServiceMetrics;
@@ -301,6 +302,7 @@ public void testDeleteBlockCommandHandleWhenDeleteCommandQueuesFull()
// Setting up the test environment
OzoneConfiguration configuration = new OzoneConfiguration();
configuration.set(HddsConfigKeys.OZONE_METADATA_DIRS, folder.toString());
+ configuration.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, folder.toString());
DatanodeDetails datanodeDetails = MockDatanodeDetails.randomDatanodeDetails();
DatanodeConfiguration dnConf =
configuration.getObject(DatanodeConfiguration.class);
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java
index ab99d5f8837e..d3fc67d053e7 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java
@@ -76,7 +76,7 @@ public void setup() throws Exception {
String dataDirKey = volume1 + "," + volume2;
volumes.add(volume1);
volumes.add(volume2);
- conf.set(HddsConfigKeys.HDDS_DATANODE_DATA_DIR_KEY, dataDirKey);
+ conf.set(HDDS_DATANODE_DIR_KEY, dataDirKey);
conf.set(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR,
dataDirKey);
initializeVolumeSet();
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java
index 4f1838ce9f7f..60eea0ac3fb5 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java
@@ -90,7 +90,7 @@ public class TestVolumeSetDiskChecks {
@AfterEach
public void cleanup() {
final Collection dirs = conf.getTrimmedStringCollection(
- HddsConfigKeys.HDDS_DATANODE_DATA_DIR_KEY);
+ ScmConfigKeys.HDDS_DATANODE_DIR_KEY);
for (String d: dirs) {
FileUtils.deleteQuietly(new File(d));
@@ -115,7 +115,7 @@ public void testOzoneDirsAreCreated() throws IOException {
// Verify that the Ozone dirs were created during initialization.
Collection dirs = conf.getTrimmedStringCollection(
- HddsConfigKeys.HDDS_DATANODE_DATA_DIR_KEY);
+ ScmConfigKeys.HDDS_DATANODE_DIR_KEY);
for (String d : dirs) {
assertTrue(new File(d).isDirectory());
}
@@ -222,7 +222,7 @@ private OzoneConfiguration getConfWithDataNodeDirs(int numDirs) {
for (int i = 0; i < numDirs; ++i) {
dirs.add(new File(dir, randomAlphanumeric(10)).toString());
}
- ozoneConf.set(HddsConfigKeys.HDDS_DATANODE_DATA_DIR_KEY,
+ ozoneConf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY,
String.join(",", dirs));
final List metaDirs = new ArrayList<>();
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerImporter.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerImporter.java
index f03a3f407946..65d1d4553aa9 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerImporter.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerImporter.java
@@ -43,6 +43,7 @@
import org.apache.commons.io.IOUtils;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
import org.apache.hadoop.ozone.container.common.impl.ContainerData;
@@ -74,6 +75,7 @@ class TestContainerImporter {
@BeforeEach
void setup() {
conf = new OzoneConfiguration();
+ conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, tempDir.getAbsolutePath());
}
@Test
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestSendContainerRequestHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestSendContainerRequestHandler.java
index a8fbd1a60d92..be2c315198b7 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestSendContainerRequestHandler.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestSendContainerRequestHandler.java
@@ -24,8 +24,10 @@
import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.mock;
+import java.io.File;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion;
import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
@@ -38,17 +40,22 @@
import org.apache.ratis.thirdparty.io.grpc.stub.StreamObserver;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
/**
* Test for {@link SendContainerRequestHandler}.
*/
class TestSendContainerRequestHandler {
+ @TempDir
+ private File tempDir;
+
private OzoneConfiguration conf;
@BeforeEach
void setup() {
conf = new OzoneConfiguration();
+ conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, tempDir.getAbsolutePath());
}
@Test
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java
index f58887ad6104..81d3ec169752 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java
@@ -17,7 +17,6 @@
package org.apache.hadoop.hdds.utils;
-import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DATANODE_DATA_DIR_KEY;
import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL;
import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL_DEFAULT;
import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_RECON_HEARTBEAT_INTERVAL;
@@ -404,16 +403,10 @@ public static Collection getOzoneDatanodeRatisDirectory(
return rawLocations;
}
- public static Collection getDatanodeStorageDirs(
- ConfigurationSource conf) {
- Collection rawLocations = conf.getTrimmedStringCollection(
- HDDS_DATANODE_DIR_KEY);
- if (rawLocations.isEmpty()) {
- rawLocations = conf.getTrimmedStringCollection(HDDS_DATANODE_DATA_DIR_KEY);
- }
+ public static Collection getDatanodeStorageDirs(ConfigurationSource conf) {
+ Collection rawLocations = conf.getTrimmedStringCollection(HDDS_DATANODE_DIR_KEY);
if (rawLocations.isEmpty()) {
- throw new IllegalArgumentException("No location configured in either "
- + HDDS_DATANODE_DIR_KEY + " or " + HDDS_DATANODE_DATA_DIR_KEY);
+ throw new IllegalArgumentException("No location configured in " + HDDS_DATANODE_DIR_KEY);
}
return rawLocations;
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java
index 63afb9aed66e..9c040f9aa006 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java
@@ -132,19 +132,7 @@ private void addPropertiesNotInXml() {
HddsConfigKeys.HDDS_DATANODE_VOLUME_MIN_FREE_SPACE_PERCENT,
OzoneConfigKeys.HDDS_SCM_CLIENT_RPC_TIME_OUT,
OzoneConfigKeys.HDDS_SCM_CLIENT_MAX_RETRY_TIMEOUT,
- OzoneConfigKeys.HDDS_SCM_CLIENT_FAILOVER_MAX_RETRY,
- HddsConfigKeys.HDDS_DATANODE_DNS_INTERFACE_KEY,
- HddsConfigKeys.HDDS_DATANODE_DNS_NAMESERVER_KEY,
- HddsConfigKeys.HDDS_DATANODE_HOST_NAME_KEY,
- HddsConfigKeys.HDDS_DATANODE_DATA_DIR_KEY,
- HddsConfigKeys.HDDS_DATANODE_USE_DN_HOSTNAME,
- HddsConfigKeys.HDDS_XFRAME_OPTION_ENABLED,
- HddsConfigKeys.HDDS_XFRAME_OPTION_VALUE,
- HddsConfigKeys.HDDS_METRICS_SESSION_ID_KEY,
- ScmConfigKeys.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
- HddsConfigKeys.HDDS_DATANODE_KERBEROS_PRINCIPAL_KEY,
- HddsConfigKeys.HDDS_DATANODE_KERBEROS_KEYTAB_FILE_KEY,
- HddsConfigKeys.HDDS_METRICS_PERCENTILES_INTERVALS_KEY
+ OzoneConfigKeys.HDDS_SCM_CLIENT_FAILOVER_MAX_RETRY
));
}
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/UniformDatanodesFactory.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/UniformDatanodesFactory.java
index e9672bc601e5..1029860375dd 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/UniformDatanodesFactory.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/UniformDatanodesFactory.java
@@ -18,7 +18,6 @@
package org.apache.hadoop.ozone;
import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DATANODE_CLIENT_ADDRESS_KEY;
-import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DATANODE_DATA_DIR_KEY;
import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DATANODE_HTTP_ADDRESS_KEY;
import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_DU_RESERVED;
@@ -96,7 +95,6 @@ public OzoneConfiguration apply(OzoneConfiguration conf) throws IOException {
}
String reservedSpaceString = String.join(",", reservedSpaceList);
String listOfDirs = String.join(",", dataDirs);
- dnConf.set(HDDS_DATANODE_DATA_DIR_KEY, listOfDirs);
dnConf.set(HDDS_DATANODE_DIR_KEY, listOfDirs);
dnConf.set(HDDS_DATANODE_DIR_DU_RESERVED, reservedSpaceString);