Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -412,8 +412,6 @@ private HddsConfigKeys() {
"hdds.datanode.dns.nameserver";
public static final String HDDS_DATANODE_HOST_NAME_KEY =
"hdds.datanode.hostname";
public static final String HDDS_DATANODE_DATA_DIR_KEY =
"hdds.datanode.data.dir";
public static final String HDDS_DATANODE_USE_DN_HOSTNAME =
"hdds.datanode.use.datanode.hostname";
public static final boolean HDDS_DATANODE_USE_DN_HOSTNAME_DEFAULT = false;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -387,7 +387,7 @@ private static void addDeprecatedKeys() {
new DeprecationDelta("dfs.datanode.hostname",
HddsConfigKeys.HDDS_DATANODE_HOST_NAME_KEY),
new DeprecationDelta("dfs.datanode.data.dir",
HddsConfigKeys.HDDS_DATANODE_DATA_DIR_KEY),
ScmConfigKeys.HDDS_DATANODE_DIR_KEY),
new DeprecationDelta("dfs.datanode.use.datanode.hostname",
HddsConfigKeys.HDDS_DATANODE_USE_DN_HOSTNAME),
new DeprecationDelta("dfs.xframe.enabled",
Expand Down
125 changes: 123 additions & 2 deletions hadoop-hdds/common/src/main/resources/ozone-default.xml
Original file line number Diff line number Diff line change
Expand Up @@ -171,8 +171,7 @@
<name>hdds.datanode.dir</name>
<value/>
<tag>OZONE, CONTAINER, STORAGE, MANAGEMENT</tag>
<description>Determines where on the local filesystem HDDS data will be
stored. Defaults to hdds.datanode.data.dir if not specified.
<description>Determines where on the local filesystem HDDS data will be stored.
The directories should be tagged with corresponding storage types
([SSD]/[DISK]/[ARCHIVE]/[RAM_DISK]) for storage policies. The default
storage type will be DISK if the directory does not have a storage type
Expand Down Expand Up @@ -4543,4 +4542,126 @@
allowing for better identification and analysis of performance issues.
</description>
</property>

<property>
<name>hdds.datanode.dns.interface</name>
<value>default</value>
<tag>OZONE, DATANODE</tag>
<description>
The name of the Network Interface from which a Datanode should
report its IP address. e.g. eth2. This setting may be required for some
multi-homed nodes where the Datanodes are assigned multiple hostnames
and it is desirable for the Datanodes to use a non-default hostname.
</description>
</property>
<property>
<name>hdds.datanode.dns.nameserver</name>
<value>default</value>
<tag>OZONE, DATANODE</tag>
<description>
The host name or IP address of the name server (DNS) which a Datanode
should use to determine its own host name.
</description>
</property>
<property>
<name>hdds.datanode.hostname</name>
<value/>
<tag>OZONE, DATANODE</tag>
<description>
Optional. The hostname for the Datanode containing this
configuration file. Will be different for each machine.
Defaults to current hostname.
</description>
</property>
<property>
<name>hdds.datanode.use.datanode.hostname</name>
<value>false</value>
<tag>OZONE, DATANODE</tag>
<description>
Whether Datanodes should use Datanode hostnames when
connecting to other Datanodes for data transfer.
</description>
</property>
<property>
<name>hdds.xframe.enabled</name>
<value>true</value>
<tag>OZONE, HDDS</tag>
<description>
If true, then enables protection against clickjacking by returning
X_FRAME_OPTIONS header value set to SAMEORIGIN.
Clickjacking protection prevents an attacker from using transparent or
opaque layers to trick a user into clicking on a button
or link on another page.
</description>
</property>
<property>
<name>hdds.xframe.value</name>
<value>SAMEORIGIN</value>
<tag>OZONE, HDDS</tag>
<description>
This configration value allows user to specify the value for the
X-FRAME-OPTIONS. The possible values for this field are
DENY, SAMEORIGIN and ALLOW-FROM. Any other value will throw an
exception when Datanodes are starting up.
</description>
</property>
<property>
<name>hdds.metrics.session-id</name>
<value/>
<tag>OZONE, HDDS</tag>
<description>
Get the user-specified session identifier. The default is the empty string.
The session identifier is used to tag metric data that is reported to some
performance metrics system via the org.apache.hadoop.metrics API. The
session identifier is intended, in particular, for use by Hadoop-On-Demand
(HOD) which allocates a virtual Hadoop cluster dynamically and transiently.
HOD will set the session identifier by modifying the mapred-site.xml file
before starting the cluster.
When not running under HOD, this identifer is expected to remain set to
the empty string.
</description>
</property>
<property>
<name>hdds.datanode.kerberos.principal</name>
<value/>
<tag>OZONE, DATANODE</tag>
<description>
The Datanode service principal. This is typically set to
dn/[email protected]. Each Datanode will substitute _HOST with its
own fully qualified hostname at startup. The _HOST placeholder
allows using the same configuration setting on all Datanodes.
</description>
</property>
<property>
<name>hdds.datanode.kerberos.keytab.file</name>
<value/>
<tag>OZONE, DATANODE</tag>
<description>
The keytab file used by each Datanode daemon to login as its
service principal. The principal name is configured with
hdds.datanode.kerberos.principal.
</description>
</property>
<property>
<name>hdds.metrics.percentiles.intervals</name>
<value></value>
<tag>OZONE, DATANODE</tag>
<description>
Comma-delimited set of integers denoting the desired rollover intervals
(in seconds) for percentile latency metrics on the Datanode.
By default, percentile latency metrics are disabled.
</description>
</property>

<property>
<name>net.topology.node.switch.mapping.impl</name>
<value>org.apache.hadoop.net.ScriptBasedMapping</value>
<tag>OZONE, SCM</tag>
<description>
The default implementation of the DNSToSwitchMapping. It
invokes a script specified in net.topology.script.file.name to resolve
node names. If the value for net.topology.script.file.name is not set, the
default value of DEFAULT_RACK is returned for all node names.
</description>
</property>
</configuration>
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@ public void setUp() throws IOException {
conf.setBoolean(HDDS_CONTAINER_TOKEN_ENABLED, true);

String volumeDir = testDir + OZONE_URI_DELIMITER + "disk1";
conf.set(HddsConfigKeys.HDDS_DATANODE_DATA_DIR_KEY, volumeDir);
conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, volumeDir);
}

@ParameterizedTest
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,7 @@
import org.apache.hadoop.hdds.protocol.MockDatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos;
import org.apache.hadoop.hdds.protocolPB.SCMSecurityProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.security.SecurityConfig;
import org.apache.hadoop.hdds.security.x509.certificate.client.DNCertificateClient;
import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec;
Expand Down Expand Up @@ -97,7 +98,7 @@ public static void setUp() throws Exception {
conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getPath());
//conf.set(ScmConfigKeys.OZONE_SCM_NAMES, "localhost");
String volumeDir = testDir + "/disk1";
conf.set(HddsConfigKeys.HDDS_DATANODE_DATA_DIR_KEY, volumeDir);
conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, volumeDir);

conf.setBoolean(OZONE_SECURITY_ENABLED_KEY, true);
conf.setClass(OzoneConfigKeys.HDDS_DATANODE_PLUGINS_KEY,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,7 @@
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.CommandStatus.Status;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto.DeleteBlockTransactionResult;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.ozone.container.ContainerTestHelper;
import org.apache.hadoop.ozone.container.common.ContainerTestUtils;
import org.apache.hadoop.ozone.container.common.helpers.BlockDeletingServiceMetrics;
Expand Down Expand Up @@ -301,6 +302,7 @@ public void testDeleteBlockCommandHandleWhenDeleteCommandQueuesFull()
// Setting up the test environment
OzoneConfiguration configuration = new OzoneConfiguration();
configuration.set(HddsConfigKeys.OZONE_METADATA_DIRS, folder.toString());
configuration.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, folder.toString());
DatanodeDetails datanodeDetails = MockDatanodeDetails.randomDatanodeDetails();
DatanodeConfiguration dnConf =
configuration.getObject(DatanodeConfiguration.class);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ public void setup() throws Exception {
String dataDirKey = volume1 + "," + volume2;
volumes.add(volume1);
volumes.add(volume2);
conf.set(HddsConfigKeys.HDDS_DATANODE_DATA_DIR_KEY, dataDirKey);
conf.set(HDDS_DATANODE_DIR_KEY, dataDirKey);
conf.set(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR,
dataDirKey);
initializeVolumeSet();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ public class TestVolumeSetDiskChecks {
@AfterEach
public void cleanup() {
final Collection<String> dirs = conf.getTrimmedStringCollection(
HddsConfigKeys.HDDS_DATANODE_DATA_DIR_KEY);
ScmConfigKeys.HDDS_DATANODE_DIR_KEY);

for (String d: dirs) {
FileUtils.deleteQuietly(new File(d));
Expand All @@ -115,7 +115,7 @@ public void testOzoneDirsAreCreated() throws IOException {

// Verify that the Ozone dirs were created during initialization.
Collection<String> dirs = conf.getTrimmedStringCollection(
HddsConfigKeys.HDDS_DATANODE_DATA_DIR_KEY);
ScmConfigKeys.HDDS_DATANODE_DIR_KEY);
for (String d : dirs) {
assertTrue(new File(d).isDirectory());
}
Expand Down Expand Up @@ -222,7 +222,7 @@ private OzoneConfiguration getConfWithDataNodeDirs(int numDirs) {
for (int i = 0; i < numDirs; ++i) {
dirs.add(new File(dir, randomAlphanumeric(10)).toString());
}
ozoneConf.set(HddsConfigKeys.HDDS_DATANODE_DATA_DIR_KEY,
ozoneConf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY,
String.join(",", dirs));

final List<String> metaDirs = new ArrayList<>();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@
import org.apache.commons.io.IOUtils;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
import org.apache.hadoop.ozone.container.common.impl.ContainerData;
Expand Down Expand Up @@ -74,6 +75,7 @@ class TestContainerImporter {
@BeforeEach
void setup() {
conf = new OzoneConfiguration();
conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, tempDir.getAbsolutePath());
}

@Test
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,10 @@
import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.mock;

import java.io.File;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion;
import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
Expand All @@ -38,17 +40,22 @@
import org.apache.ratis.thirdparty.io.grpc.stub.StreamObserver;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.io.TempDir;

/**
* Test for {@link SendContainerRequestHandler}.
*/
class TestSendContainerRequestHandler {

@TempDir
private File tempDir;

private OzoneConfiguration conf;

@BeforeEach
void setup() {
conf = new OzoneConfiguration();
conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, tempDir.getAbsolutePath());
}

@Test
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@

package org.apache.hadoop.hdds.utils;

import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DATANODE_DATA_DIR_KEY;
import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL;
import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL_DEFAULT;
import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_RECON_HEARTBEAT_INTERVAL;
Expand Down Expand Up @@ -404,16 +403,10 @@ public static Collection<String> getOzoneDatanodeRatisDirectory(
return rawLocations;
}

public static Collection<String> getDatanodeStorageDirs(
ConfigurationSource conf) {
Collection<String> rawLocations = conf.getTrimmedStringCollection(
HDDS_DATANODE_DIR_KEY);
if (rawLocations.isEmpty()) {
rawLocations = conf.getTrimmedStringCollection(HDDS_DATANODE_DATA_DIR_KEY);
}
public static Collection<String> getDatanodeStorageDirs(ConfigurationSource conf) {
Collection<String> rawLocations = conf.getTrimmedStringCollection(HDDS_DATANODE_DIR_KEY);
if (rawLocations.isEmpty()) {
throw new IllegalArgumentException("No location configured in either "
+ HDDS_DATANODE_DIR_KEY + " or " + HDDS_DATANODE_DATA_DIR_KEY);
throw new IllegalArgumentException("No location configured in " + HDDS_DATANODE_DIR_KEY);
}
return rawLocations;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -132,19 +132,7 @@ private void addPropertiesNotInXml() {
HddsConfigKeys.HDDS_DATANODE_VOLUME_MIN_FREE_SPACE_PERCENT,
OzoneConfigKeys.HDDS_SCM_CLIENT_RPC_TIME_OUT,
OzoneConfigKeys.HDDS_SCM_CLIENT_MAX_RETRY_TIMEOUT,
OzoneConfigKeys.HDDS_SCM_CLIENT_FAILOVER_MAX_RETRY,
HddsConfigKeys.HDDS_DATANODE_DNS_INTERFACE_KEY,
HddsConfigKeys.HDDS_DATANODE_DNS_NAMESERVER_KEY,
HddsConfigKeys.HDDS_DATANODE_HOST_NAME_KEY,
HddsConfigKeys.HDDS_DATANODE_DATA_DIR_KEY,
HddsConfigKeys.HDDS_DATANODE_USE_DN_HOSTNAME,
HddsConfigKeys.HDDS_XFRAME_OPTION_ENABLED,
HddsConfigKeys.HDDS_XFRAME_OPTION_VALUE,
HddsConfigKeys.HDDS_METRICS_SESSION_ID_KEY,
ScmConfigKeys.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
HddsConfigKeys.HDDS_DATANODE_KERBEROS_PRINCIPAL_KEY,
HddsConfigKeys.HDDS_DATANODE_KERBEROS_KEYTAB_FILE_KEY,
HddsConfigKeys.HDDS_METRICS_PERCENTILES_INTERVALS_KEY
OzoneConfigKeys.HDDS_SCM_CLIENT_FAILOVER_MAX_RETRY
));
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@
package org.apache.hadoop.ozone;

import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DATANODE_CLIENT_ADDRESS_KEY;
import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DATANODE_DATA_DIR_KEY;
import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DATANODE_HTTP_ADDRESS_KEY;
import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_DU_RESERVED;
Expand Down Expand Up @@ -96,7 +95,6 @@ public OzoneConfiguration apply(OzoneConfiguration conf) throws IOException {
}
String reservedSpaceString = String.join(",", reservedSpaceList);
String listOfDirs = String.join(",", dataDirs);
dnConf.set(HDDS_DATANODE_DATA_DIR_KEY, listOfDirs);
dnConf.set(HDDS_DATANODE_DIR_KEY, listOfDirs);
dnConf.set(HDDS_DATANODE_DIR_DU_RESERVED, reservedSpaceString);

Expand Down