diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java index a3957adcd40f..b15aa64f6292 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java @@ -42,6 +42,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import static org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature.WEBUI_PORTS_IN_DATANODEDETAILS; import static org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature.RATIS_DATASTREAM_PORT_IN_DATANODEDETAILS; import static org.apache.hadoop.ozone.ClientVersion.VERSION_HANDLES_UNKNOWN_DN_PORTS; @@ -805,7 +806,11 @@ public static final class Port { public enum Name { STANDALONE, RATIS, REST, REPLICATION, RATIS_ADMIN, RATIS_SERVER, @BelongsToHDDSLayoutVersion(RATIS_DATASTREAM_PORT_IN_DATANODEDETAILS) - RATIS_DATASTREAM; + RATIS_DATASTREAM, + @BelongsToHDDSLayoutVersion(WEBUI_PORTS_IN_DATANODEDETAILS) + HTTP, + @BelongsToHDDSLayoutVersion(WEBUI_PORTS_IN_DATANODEDETAILS) + HTTPS; public static final Set ALL_PORTS = ImmutableSet.copyOf( Name.values()); diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/upgrade/HDDSLayoutFeature.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/upgrade/HDDSLayoutFeature.java index ce5b6d32fb31..bfe3099c9661 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/upgrade/HDDSLayoutFeature.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/upgrade/HDDSLayoutFeature.java @@ -37,7 +37,9 @@ public enum HDDSLayoutFeature implements LayoutFeature { DATANODE_SCHEMA_V3(4, "Datanode RocksDB Schema Version 3 (one rocksdb " + "per disk)"), RATIS_DATASTREAM_PORT_IN_DATANODEDETAILS(5, "Adding the RATIS_DATASTREAM " + - "port to the DatanodeDetails."); + "port to the DatanodeDetails."), + WEBUI_PORTS_IN_DATANODEDETAILS(6, "Adding HTTP and HTTPS ports " + + "to DatanodeDetails."); ////////////////////////////// ////////////////////////////// diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java index f6e756cd25ce..e06c2565a720 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java @@ -44,6 +44,7 @@ import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; import org.apache.hadoop.hdds.security.x509.certificate.client.DNCertificateClient; import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateSignRequest; +import org.apache.hadoop.hdds.server.http.HttpConfig; import org.apache.hadoop.hdds.server.http.RatisDropwizardExports; import org.apache.hadoop.hdds.tracing.TracingUtil; import org.apache.hadoop.hdds.utils.HddsServerUtil; @@ -68,6 +69,8 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; +import static org.apache.hadoop.hdds.protocol.DatanodeDetails.Port.Name.HTTP; +import static org.apache.hadoop.hdds.protocol.DatanodeDetails.Port.Name.HTTPS; import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_DATANODE_PLUGINS_KEY; import static org.apache.hadoop.ozone.conf.OzoneServiceConfig.DEFAULT_SHUTDOWN_HOOK_PRIORITY; import static org.apache.hadoop.ozone.common.Storage.StorageState.INITIALIZED; @@ -296,6 +299,15 @@ public void start() { try { httpServer = new HddsDatanodeHttpServer(conf); httpServer.start(); + HttpConfig.Policy policy = HttpConfig.getHttpPolicy(conf); + if (policy.isHttpEnabled()) { + datanodeDetails.setPort(DatanodeDetails.newPort(HTTP, + httpServer.getHttpAddress().getPort())); + } + if (policy.isHttpsEnabled()) { + datanodeDetails.setPort(DatanodeDetails.newPort(HTTPS, + httpServer.getHttpsAddress().getPort())); + } } catch (Exception ex) { LOG.error("HttpServer failed to start.", ex); } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestDatanodeIdYaml.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestDatanodeIdYaml.java index c3d70da8a233..8a2728681469 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestDatanodeIdYaml.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestDatanodeIdYaml.java @@ -30,8 +30,9 @@ import java.io.IOException; import java.util.UUID; -import static org.junit.Assert.assertNotNull; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; /** * Tests for {@link DatanodeIdYaml}. @@ -96,4 +97,48 @@ void testWriteReadAfterRatisDatastreamPortLayoutVersion(@TempDir File dir) read.getPort(DatanodeDetails.Port.Name.RATIS_DATASTREAM)); } + @Test + void testWriteReadBeforeWebUIPortLayoutVersion(@TempDir File dir) + throws IOException { + DatanodeDetails original = MockDatanodeDetails.randomDatanodeDetails(); + File file = new File(dir, "datanode.yaml"); + OzoneConfiguration conf = new OzoneConfiguration(); + conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, dir.toString()); + DatanodeLayoutStorage layoutStorage = new DatanodeLayoutStorage(conf, + UUID.randomUUID().toString(), + HDDSLayoutFeature.DATANODE_SCHEMA_V3.layoutVersion()); + layoutStorage.initialize(); + + DatanodeIdYaml.createDatanodeIdFile(original, file, conf); + DatanodeDetails read = DatanodeIdYaml.readDatanodeIdFile(file); + + assertNotNull(original.getPort(DatanodeDetails.Port.Name.HTTP)); + assertNotNull(original.getPort(DatanodeDetails.Port.Name.HTTPS)); + assertNull(read.getPort(DatanodeDetails.Port.Name.HTTP)); + assertNull(read.getPort(DatanodeDetails.Port.Name.HTTPS)); + } + + @Test + void testWriteReadAfterWebUIPortLayoutVersion(@TempDir File dir) + throws IOException { + DatanodeDetails original = MockDatanodeDetails.randomDatanodeDetails(); + File file = new File(dir, "datanode.yaml"); + OzoneConfiguration conf = new OzoneConfiguration(); + conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, dir.toString()); + DatanodeLayoutStorage layoutStorage = new DatanodeLayoutStorage(conf, + UUID.randomUUID().toString(), + HDDSLayoutFeature.WEBUI_PORTS_IN_DATANODEDETAILS.layoutVersion()); + layoutStorage.initialize(); + + DatanodeIdYaml.createDatanodeIdFile(original, file, conf); + DatanodeDetails read = DatanodeIdYaml.readDatanodeIdFile(file); + + assertNotNull(original.getPort(DatanodeDetails.Port.Name.HTTP)); + assertNotNull(original.getPort(DatanodeDetails.Port.Name.HTTPS)); + assertEquals(original.getPort(DatanodeDetails.Port.Name.HTTP), + read.getPort(DatanodeDetails.Port.Name.HTTP)); + assertEquals(original.getPort(DatanodeDetails.Port.Name.HTTPS), + read.getPort(DatanodeDetails.Port.Name.HTTPS)); + } + }