Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view

This file was deleted.

Original file line number Diff line number Diff line change
Expand Up @@ -405,4 +405,32 @@ private HddsConfigKeys() {

public static final String OZONE_DATANODE_IO_METRICS_PERCENTILES_INTERVALS_SECONDS_KEY =
"ozone.volume.io.percentiles.intervals.seconds";

public static final String HDDS_DATANODE_DNS_INTERFACE_KEY =
"hdds.datanode.dns.interface";
public static final String HDDS_DATANODE_DNS_NAMESERVER_KEY =
"hdds.datanode.dns.nameserver";
public static final String HDDS_DATANODE_HOST_NAME_KEY =
"hdds.datanode.hostname";
public static final String HDDS_DATANODE_DATA_DIR_KEY =
"hdds.datanode.data.dir";
public static final String HDDS_DATANODE_USE_DN_HOSTNAME =
"hdds.datanode.use.datanode.hostname";
public static final boolean HDDS_DATANODE_USE_DN_HOSTNAME_DEFAULT = false;

public static final String HDDS_XFRAME_OPTION_ENABLED = "hdds.xframe.enabled";
public static final boolean HDDS_XFRAME_OPTION_ENABLED_DEFAULT = true;
public static final String HDDS_XFRAME_OPTION_VALUE = "hdds.xframe.value";
public static final String HDDS_XFRAME_OPTION_VALUE_DEFAULT = "SAMEORIGIN";

public static final String HDDS_METRICS_SESSION_ID_KEY =
"hdds.metrics.session-id";

public static final String HDDS_DATANODE_KERBEROS_PRINCIPAL_KEY =
"hdds.datanode.kerberos.principal";
public static final String HDDS_DATANODE_KERBEROS_KEYTAB_FILE_KEY =
"hdds.datanode.kerberos.keytab.file";
public static final String HDDS_METRICS_PERCENTILES_INTERVALS_KEY =
"hdds.metrics.percentiles.intervals";

}
Original file line number Diff line number Diff line change
Expand Up @@ -17,14 +17,14 @@

package org.apache.hadoop.hdds;

import static org.apache.hadoop.hdds.DFSConfigKeysLegacy.DFS_DATANODE_DNS_INTERFACE_KEY;
import static org.apache.hadoop.hdds.DFSConfigKeysLegacy.DFS_DATANODE_DNS_NAMESERVER_KEY;
import static org.apache.hadoop.hdds.DFSConfigKeysLegacy.DFS_DATANODE_HOST_NAME_KEY;
import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DATANODE_CLIENT_ADDRESS_KEY;
import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DATANODE_CLIENT_BIND_HOST_DEFAULT;
import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DATANODE_CLIENT_BIND_HOST_KEY;
import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DATANODE_CLIENT_PORT_DEFAULT;
import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DATANODE_CLIENT_PORT_KEY;
import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DATANODE_DNS_INTERFACE_KEY;
import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DATANODE_DNS_NAMESERVER_KEY;
import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DATANODE_HOST_NAME_KEY;
import static org.apache.hadoop.hdds.recon.ReconConfigKeys.OZONE_RECON_ADDRESS_KEY;
import static org.apache.hadoop.hdds.recon.ReconConfigKeys.OZONE_RECON_DATANODE_PORT_DEFAULT;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_ADDRESS_KEY;
Expand Down Expand Up @@ -361,7 +361,7 @@ public static InetSocketAddress getReconAddresses(
*/
public static String getHostName(ConfigurationSource conf)
throws UnknownHostException {
String name = conf.get(DFS_DATANODE_HOST_NAME_KEY);
String name = conf.get(HDDS_DATANODE_HOST_NAME_KEY);
if (name == null) {
String dnsInterface = conf.get(
CommonConfigurationKeysPublic.HADOOP_SECURITY_DNS_INTERFACE_KEY);
Expand All @@ -371,9 +371,9 @@ public static String getHostName(ConfigurationSource conf)

if (dnsInterface == null) {
// Try the legacy configuration keys.
dnsInterface = conf.get(DFS_DATANODE_DNS_INTERFACE_KEY);
dnsInterface = conf.get(DFS_DATANODE_DNS_INTERFACE_KEY);
nameServer = conf.get(DFS_DATANODE_DNS_NAMESERVER_KEY);
dnsInterface = conf.get(HDDS_DATANODE_DNS_INTERFACE_KEY);
dnsInterface = conf.get(HDDS_DATANODE_DNS_INTERFACE_KEY);
nameServer = conf.get(HDDS_DATANODE_DNS_NAMESERVER_KEY);
} else {
// If HADOOP_SECURITY_DNS_* is set then also attempt hosts file
// resolution if DNS fails. We will not use hosts file resolution
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@
import javax.xml.bind.annotation.XmlElement;
import javax.xml.bind.annotation.XmlRootElement;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdds.DFSConfigKeysLegacy;
import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.annotation.InterfaceAudience;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.utils.LegacyHadoopConfigurationSource;
Expand Down Expand Up @@ -315,7 +315,7 @@ private static void addDeprecatedKeys() {
HDDS_DATANODE_RATIS_PREFIX_KEY + "."
+ RaftServerConfigKeys.PREFIX + "." + "rpc.slowness.timeout"),
new DeprecationDelta("dfs.datanode.keytab.file",
DFSConfigKeysLegacy.DFS_DATANODE_KERBEROS_KEYTAB_FILE_KEY),
HddsConfigKeys.HDDS_DATANODE_KERBEROS_KEYTAB_FILE_KEY),
new DeprecationDelta("ozone.scm.chunk.layout",
ScmConfigKeys.OZONE_SCM_CONTAINER_LAYOUT_KEY),
new DeprecationDelta("hdds.datanode.replication.work.dir",
Expand Down Expand Up @@ -381,33 +381,33 @@ private static void addDeprecatedKeys() {
new DeprecationDelta("dfs.ratis.snapshot.threshold",
ScmConfigKeys.HDDS_RATIS_SNAPSHOT_THRESHOLD_KEY),
new DeprecationDelta("dfs.datanode.dns.interface",
DFSConfigKeysLegacy.DFS_DATANODE_DNS_INTERFACE_KEY),
HddsConfigKeys.HDDS_DATANODE_DNS_INTERFACE_KEY),
new DeprecationDelta("dfs.datanode.dns.nameserver",
DFSConfigKeysLegacy.DFS_DATANODE_DNS_NAMESERVER_KEY),
HddsConfigKeys.HDDS_DATANODE_DNS_NAMESERVER_KEY),
new DeprecationDelta("dfs.datanode.hostname",
DFSConfigKeysLegacy.DFS_DATANODE_HOST_NAME_KEY),
HddsConfigKeys.HDDS_DATANODE_HOST_NAME_KEY),
new DeprecationDelta("dfs.datanode.data.dir",
DFSConfigKeysLegacy.DFS_DATANODE_DATA_DIR_KEY),
HddsConfigKeys.HDDS_DATANODE_DATA_DIR_KEY),
new DeprecationDelta("dfs.datanode.use.datanode.hostname",
DFSConfigKeysLegacy.DFS_DATANODE_USE_DN_HOSTNAME),
HddsConfigKeys.HDDS_DATANODE_USE_DN_HOSTNAME),
new DeprecationDelta("dfs.xframe.enabled",
DFSConfigKeysLegacy.DFS_XFRAME_OPTION_ENABLED),
HddsConfigKeys.HDDS_XFRAME_OPTION_ENABLED),
new DeprecationDelta("dfs.xframe.value",
DFSConfigKeysLegacy.DFS_XFRAME_OPTION_VALUE),
HddsConfigKeys.HDDS_XFRAME_OPTION_VALUE),
new DeprecationDelta("dfs.metrics.session-id",
DFSConfigKeysLegacy.DFS_METRICS_SESSION_ID_KEY),
HddsConfigKeys.HDDS_METRICS_SESSION_ID_KEY),
new DeprecationDelta("dfs.client.https.keystore.resource",
OzoneConfigKeys.OZONE_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY),
new DeprecationDelta("dfs.https.server.keystore.resource",
OzoneConfigKeys.OZONE_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY),
new DeprecationDelta("dfs.http.policy",
OzoneConfigKeys.OZONE_HTTP_POLICY_KEY),
new DeprecationDelta("dfs.datanode.kerberos.principal",
DFSConfigKeysLegacy.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY),
HddsConfigKeys.HDDS_DATANODE_KERBEROS_PRINCIPAL_KEY),
new DeprecationDelta("dfs.datanode.kerberos.keytab.file",
DFSConfigKeysLegacy.DFS_DATANODE_KERBEROS_KEYTAB_FILE_KEY),
HddsConfigKeys.HDDS_DATANODE_KERBEROS_KEYTAB_FILE_KEY),
new DeprecationDelta("dfs.metrics.percentiles.intervals",
DFSConfigKeysLegacy.DFS_METRICS_PERCENTILES_INTERVALS_KEY),
HddsConfigKeys.HDDS_METRICS_PERCENTILES_INTERVALS_KEY),
});
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@
import java.util.function.BooleanSupplier;
import java.util.stream.Collectors;
import javax.net.ssl.TrustManager;
import org.apache.hadoop.hdds.DFSConfigKeysLegacy;
import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.StringUtils;
import org.apache.hadoop.hdds.conf.ConfigurationSource;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
Expand Down Expand Up @@ -450,8 +450,8 @@ public static Long getMinReplicatedIndex(

private static boolean datanodeUseHostName() {
return CONF.getBoolean(
DFSConfigKeysLegacy.DFS_DATANODE_USE_DN_HOSTNAME,
DFSConfigKeysLegacy.DFS_DATANODE_USE_DN_HOSTNAME_DEFAULT);
HddsConfigKeys.HDDS_DATANODE_USE_DN_HOSTNAME,
HddsConfigKeys.HDDS_DATANODE_USE_DN_HOSTNAME_DEFAULT);
}

private static <U> Class<? extends U> getClass(String name,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -629,6 +629,10 @@ public final class ScmConfigKeys {
"ozone.scm.ha.dbtransactionbuffer.flush.interval";
public static final long
OZONE_SCM_HA_DBTRANSACTIONBUFFER_FLUSH_INTERVAL_DEFAULT = 600 * 1000L;

public static final String NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY =
"net.topology.node.switch.mapping.impl";

/**
* Never constructed.
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -595,7 +595,6 @@ public final class OzoneConfigKeys {
OZONE_CLIENT_BUCKET_REPLICATION_CONFIG_REFRESH_PERIOD_DEFAULT_MS =
300 * 1000;


// Values for bucket layout configurations.
public static final String OZONE_BUCKET_LAYOUT_LEGACY =
"LEGACY";
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -44,8 +44,8 @@
import java.util.concurrent.atomic.AtomicBoolean;
import javax.management.ObjectName;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.hdds.DFSConfigKeysLegacy;
import org.apache.hadoop.hdds.DatanodeVersion;
import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.HddsUtils;
import org.apache.hadoop.hdds.cli.GenericCli;
import org.apache.hadoop.hdds.cli.HddsVersionProvider;
Expand Down Expand Up @@ -249,16 +249,16 @@ public String getNamespace() {
UserGroupInformation.AuthenticationMethod.KERBEROS)) {
LOG.info("Ozone security is enabled. Attempting login for Hdds " +
"Datanode user. Principal: {},keytab: {}", conf.get(
DFSConfigKeysLegacy.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY),
HddsConfigKeys.HDDS_DATANODE_KERBEROS_PRINCIPAL_KEY),
conf.get(
DFSConfigKeysLegacy.DFS_DATANODE_KERBEROS_KEYTAB_FILE_KEY));
HddsConfigKeys.HDDS_DATANODE_KERBEROS_KEYTAB_FILE_KEY));

UserGroupInformation.setConfiguration(conf);

SecurityUtil
.login(conf,
DFSConfigKeysLegacy.DFS_DATANODE_KERBEROS_KEYTAB_FILE_KEY,
DFSConfigKeysLegacy.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY,
HddsConfigKeys.HDDS_DATANODE_KERBEROS_KEYTAB_FILE_KEY,
HddsConfigKeys.HDDS_DATANODE_KERBEROS_PRINCIPAL_KEY,
hostname);
} else {
throw new AuthenticationException(SecurityUtil.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@

import java.io.Closeable;
import java.util.EnumMap;
import org.apache.hadoop.hdds.DFSConfigKeysLegacy;
import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.annotation.InterfaceAudience;
import org.apache.hadoop.hdds.conf.ConfigurationSource;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
Expand Down Expand Up @@ -94,7 +94,7 @@ public static ContainerMetrics create(ConfigurationSource conf) {
MetricsSystem ms = DefaultMetricsSystem.instance();
// Percentile measurement is off by default, by watching no intervals
int[] intervals =
conf.getInts(DFSConfigKeysLegacy.DFS_METRICS_PERCENTILES_INTERVALS_KEY);
conf.getInts(HddsConfigKeys.HDDS_METRICS_PERCENTILES_INTERVALS_KEY);
return ms.register(STORAGE_CONTAINER_METRICS,
"Storage Container Node Metrics",
new ContainerMetrics(intervals));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@

import static org.apache.hadoop.hdds.recon.ReconConfig.ConfigStrings.OZONE_RECON_KERBEROS_PRINCIPAL_KEY;

import org.apache.hadoop.hdds.DFSConfigKeysLegacy;
import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.ipc.ProtocolInfo;
import org.apache.hadoop.security.KerberosInfo;

Expand All @@ -32,7 +32,7 @@
protocolVersion = 1)
@KerberosInfo(
serverPrincipal = OZONE_RECON_KERBEROS_PRINCIPAL_KEY,
clientPrincipal = DFSConfigKeysLegacy.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY)
clientPrincipal = HddsConfigKeys.HDDS_DATANODE_KERBEROS_PRINCIPAL_KEY)
public interface ReconDatanodeProtocolPB extends
StorageContainerDatanodeProtocolPB {
}
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@

package org.apache.hadoop.ozone.protocolPB;

import org.apache.hadoop.hdds.DFSConfigKeysLegacy;
import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageContainerDatanodeProtocolService;
import org.apache.hadoop.hdds.scm.ScmConfig;
import org.apache.hadoop.ipc.ProtocolInfo;
Expand All @@ -33,7 +33,7 @@
protocolVersion = 1)
@KerberosInfo(
serverPrincipal = ScmConfig.ConfigStrings.HDDS_SCM_KERBEROS_PRINCIPAL_KEY,
clientPrincipal = DFSConfigKeysLegacy.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY)
clientPrincipal = HddsConfigKeys.HDDS_DATANODE_KERBEROS_PRINCIPAL_KEY)
public interface StorageContainerDatanodeProtocolPB extends
StorageContainerDatanodeProtocolService.BlockingInterface {
}
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,6 @@
import java.util.ArrayList;
import java.util.List;
import java.util.UUID;
import org.apache.hadoop.hdds.DFSConfigKeysLegacy;
import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
Expand Down Expand Up @@ -99,7 +98,7 @@ public void setUp() throws IOException {
conf.setBoolean(HDDS_CONTAINER_TOKEN_ENABLED, true);

String volumeDir = testDir + OZONE_URI_DELIMITER + "disk1";
conf.set(DFSConfigKeysLegacy.DFS_DATANODE_DATA_DIR_KEY, volumeDir);
conf.set(HddsConfigKeys.HDDS_DATANODE_DATA_DIR_KEY, volumeDir);
}

@ParameterizedTest
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,6 @@
import java.util.List;
import java.util.concurrent.Callable;
import org.apache.commons.io.FileUtils;
import org.apache.hadoop.hdds.DFSConfigKeysLegacy;
import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
Expand Down Expand Up @@ -98,7 +97,7 @@ public static void setUp() throws Exception {
conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getPath());
//conf.set(ScmConfigKeys.OZONE_SCM_NAMES, "localhost");
String volumeDir = testDir + "/disk1";
conf.set(DFSConfigKeysLegacy.DFS_DATANODE_DATA_DIR_KEY, volumeDir);
conf.set(HddsConfigKeys.HDDS_DATANODE_DATA_DIR_KEY, volumeDir);

conf.setBoolean(OZONE_SECURITY_ENABLED_KEY, true);
conf.setClass(OzoneConfigKeys.HDDS_DATANODE_PLUGINS_KEY,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,6 @@
import java.util.List;
import java.util.UUID;
import org.apache.commons.io.FileUtils;
import org.apache.hadoop.hdds.DFSConfigKeysLegacy;
import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.OzoneConfigKeys;
Expand Down Expand Up @@ -77,7 +76,7 @@ public void setup() throws Exception {
String dataDirKey = volume1 + "," + volume2;
volumes.add(volume1);
volumes.add(volume2);
conf.set(DFSConfigKeysLegacy.DFS_DATANODE_DATA_DIR_KEY, dataDirKey);
conf.set(HddsConfigKeys.HDDS_DATANODE_DATA_DIR_KEY, dataDirKey);
conf.set(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR,
dataDirKey);
initializeVolumeSet();
Expand Down
Loading