diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java index fc3f8ae6fb82..a3957adcd40f 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java @@ -37,10 +37,12 @@ import com.google.common.base.Preconditions; import com.google.common.base.Strings; +import org.apache.hadoop.hdds.upgrade.BelongsToHDDSLayoutVersion; import org.apache.hadoop.ozone.ClientVersion; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import static org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature.RATIS_DATASTREAM_PORT_IN_DATANODEDETAILS; import static org.apache.hadoop.ozone.ClientVersion.VERSION_HANDLES_UNKNOWN_DN_PORTS; /** @@ -802,6 +804,7 @@ public static final class Port { */ public enum Name { STANDALONE, RATIS, REST, REPLICATION, RATIS_ADMIN, RATIS_SERVER, + @BelongsToHDDSLayoutVersion(RATIS_DATASTREAM_PORT_IN_DATANODEDETAILS) RATIS_DATASTREAM; public static final Set ALL_PORTS = ImmutableSet.copyOf( diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/upgrade/BelongsToHDDSLayoutVersion.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/upgrade/BelongsToHDDSLayoutVersion.java new file mode 100644 index 000000000000..3bb59deb0c6a --- /dev/null +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/upgrade/BelongsToHDDSLayoutVersion.java @@ -0,0 +1,33 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.upgrade; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * Annotation to mark a class or a field declaration that belongs to a specific + * HDDS Layout Version. + */ +@Target({ElementType.TYPE, ElementType.FIELD}) +@Retention(RetentionPolicy.RUNTIME) +public @interface BelongsToHDDSLayoutVersion { + HDDSLayoutFeature value(); +} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/upgrade/HDDSLayoutFeature.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/upgrade/HDDSLayoutFeature.java index d2b07ef017e5..ce5b6d32fb31 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/upgrade/HDDSLayoutFeature.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/upgrade/HDDSLayoutFeature.java @@ -35,7 +35,9 @@ public enum HDDSLayoutFeature implements LayoutFeature { ERASURE_CODED_STORAGE_SUPPORT(3, "Ozone version with built in support for" + " Erasure Coded block data storage."), DATANODE_SCHEMA_V3(4, "Datanode RocksDB Schema Version 3 (one rocksdb " + - "per disk)"); + "per disk)"), + RATIS_DATASTREAM_PORT_IN_DATANODEDETAILS(5, "Adding the RATIS_DATASTREAM " + + "port to the DatanodeDetails."); ////////////////////////////// ////////////////////////////// diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java index a20bcd20030e..0b3c1f20c88f 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java @@ -440,7 +440,7 @@ private void persistDatanodeDetails(DatanodeDetails dnDetails) String idFilePath = HddsServerUtil.getDatanodeIdFilePath(conf); Preconditions.checkNotNull(idFilePath); File idFile = new File(idFilePath); - ContainerUtils.writeDatanodeDetailsTo(dnDetails, idFile); + ContainerUtils.writeDatanodeDetailsTo(dnDetails, idFile, conf); } /** diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java index 11f54be8b93b..20bec126815a 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java @@ -141,7 +141,8 @@ public static String getContainerDbFileName(String containerName) { * @throws IOException when read/write error occurs */ public static synchronized void writeDatanodeDetailsTo( - DatanodeDetails datanodeDetails, File path) throws IOException { + DatanodeDetails datanodeDetails, File path, ConfigurationSource conf) + throws IOException { if (path.exists()) { if (!path.delete() || !path.createNewFile()) { throw new IOException("Unable to overwrite the datanode ID file."); @@ -152,7 +153,7 @@ public static synchronized void writeDatanodeDetailsTo( throw new IOException("Unable to create datanode ID directories."); } } - DatanodeIdYaml.createDatanodeIdFile(datanodeDetails, path); + DatanodeIdYaml.createDatanodeIdFile(datanodeDetails, path, conf); } /** diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DatanodeIdYaml.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DatanodeIdYaml.java index 29fba398976d..f8acbc7e2d69 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DatanodeIdYaml.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DatanodeIdYaml.java @@ -23,6 +23,7 @@ import java.io.IOException; import java.io.OutputStreamWriter; import java.io.Writer; +import java.lang.reflect.Field; import java.nio.charset.StandardCharsets; import java.util.LinkedHashMap; import java.util.Map; @@ -30,9 +31,15 @@ import org.apache.commons.collections.CollectionUtils; import org.apache.commons.collections.MapUtils; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.server.YamlUtils; +import org.apache.hadoop.hdds.upgrade.BelongsToHDDSLayoutVersion; +import org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature; +import org.apache.hadoop.ozone.container.common.DatanodeLayoutStorage; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.yaml.snakeyaml.DumperOptions; import org.yaml.snakeyaml.Yaml; @@ -41,6 +48,9 @@ */ public final class DatanodeIdYaml { + private static final Logger LOG = + LoggerFactory.getLogger(DatanodeIdYaml.class); + private DatanodeIdYaml() { // static helper methods only, no state. } @@ -53,7 +63,9 @@ private DatanodeIdYaml() { * @param path Path to datnode.id file */ public static void createDatanodeIdFile(DatanodeDetails datanodeDetails, - File path) throws IOException { + File path, + ConfigurationSource conf) + throws IOException { DumperOptions options = new DumperOptions(); options.setPrettyFlow(true); options.setDefaultFlowStyle(DumperOptions.FlowStyle.FLOW); @@ -61,7 +73,7 @@ public static void createDatanodeIdFile(DatanodeDetails datanodeDetails, try (Writer writer = new OutputStreamWriter( new FileOutputStream(path), StandardCharsets.UTF_8)) { - yaml.dump(getDatanodeDetailsYaml(datanodeDetails), writer); + yaml.dump(getDatanodeDetailsYaml(datanodeDetails, conf), writer); } } @@ -219,11 +231,32 @@ public void setCurrentVersion(int version) { } private static DatanodeDetailsYaml getDatanodeDetailsYaml( - DatanodeDetails datanodeDetails) { + DatanodeDetails datanodeDetails, ConfigurationSource conf) + throws IOException { + + DatanodeLayoutStorage datanodeLayoutStorage + = new DatanodeLayoutStorage(conf, datanodeDetails.getUuidString()); Map portDetails = new LinkedHashMap<>(); if (!CollectionUtils.isEmpty(datanodeDetails.getPorts())) { for (DatanodeDetails.Port port : datanodeDetails.getPorts()) { + Field f = null; + try { + f = DatanodeDetails.Port.Name.class + .getDeclaredField(port.getName().name()); + } catch (NoSuchFieldException e) { + LOG.error("There is no such field as {} in {}", port.getName().name(), + DatanodeDetails.Port.Name.class); + } + if (f != null + && f.isAnnotationPresent(BelongsToHDDSLayoutVersion.class)) { + HDDSLayoutFeature layoutFeature + = f.getAnnotation(BelongsToHDDSLayoutVersion.class).value(); + if (layoutFeature.layoutVersion() > + datanodeLayoutStorage.getLayoutVersion()) { + continue; + } + } portDetails.put(port.getName().toString(), port.getValue()); } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/SetNodeOperationalStateCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/SetNodeOperationalStateCommandHandler.java index 42e58ebc7bbe..3f10ef456045 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/SetNodeOperationalStateCommandHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/SetNodeOperationalStateCommandHandler.java @@ -108,7 +108,7 @@ private void persistDatanodeDetails(DatanodeDetails dnDetails) String idFilePath = HddsServerUtil.getDatanodeIdFilePath(conf); Preconditions.checkNotNull(idFilePath); File idFile = new File(idFilePath); - ContainerUtils.writeDatanodeDetailsTo(dnDetails, idFile); + ContainerUtils.writeDatanodeDetailsTo(dnDetails, idFile, conf); } /** diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/InitDatanodeState.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/InitDatanodeState.java index cd469620e1b5..290d39f88ecd 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/InitDatanodeState.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/InitDatanodeState.java @@ -127,7 +127,7 @@ private void persistContainerDatanodeDetails() { .getDatanodeDetails(); if (datanodeDetails != null) { try { - ContainerUtils.writeDatanodeDetailsTo(datanodeDetails, idPath); + ContainerUtils.writeDatanodeDetailsTo(datanodeDetails, idPath, conf); } catch (IOException ex) { // As writing DatanodeDetails in to datanodeid file failed, which is // a critical thing, so shutting down the state machine. diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java index f2faeaa3a358..1fc56b29639f 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java @@ -218,7 +218,7 @@ public void testDatanodeStateContext() throws IOException, DatanodeDetails.Port.Name.STANDALONE, OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT); datanodeDetails.setPort(port); - ContainerUtils.writeDatanodeDetailsTo(datanodeDetails, idPath); + ContainerUtils.writeDatanodeDetailsTo(datanodeDetails, idPath, conf); try (DatanodeStateMachine stateMachine = new DatanodeStateMachine(datanodeDetails, conf, null, null, null)) { diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestContainerUtils.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestContainerUtils.java index 953db528ee9f..e56946905a7c 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestContainerUtils.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestContainerUtils.java @@ -18,6 +18,8 @@ package org.apache.hadoop.ozone.container.common.helpers; import org.apache.commons.lang3.RandomUtils; +import org.apache.hadoop.hdds.HddsConfigKeys; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto; @@ -25,6 +27,7 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.ByteStringConversion; import org.apache.hadoop.ozone.common.ChunkBuffer; +import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; @@ -49,6 +52,14 @@ */ public class TestContainerUtils { + private OzoneConfiguration conf; + + @BeforeEach + void setup(@TempDir File dir) { + conf = new OzoneConfiguration(); + conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, dir.toString()); + } + @Test public void redactsDataBuffers() { // GIVEN @@ -112,11 +123,11 @@ public void testDatanodeIDPersistent(@TempDir File tempDir) throws Exception { assertWriteRead(tempDir, id1); } - private static void assertWriteRead(@TempDir File tempDir, + private void assertWriteRead(@TempDir File tempDir, DatanodeDetails details) throws IOException { // Write a single ID to the file and read it out File file = new File(tempDir, "valid-values.id"); - ContainerUtils.writeDatanodeDetailsTo(details, file); + ContainerUtils.writeDatanodeDetailsTo(details, file, conf); DatanodeDetails read = ContainerUtils.readDatanodeDetailsFrom(file); @@ -127,7 +138,7 @@ private static void assertWriteRead(@TempDir File tempDir, private void createMalformedIDFile(File malformedFile) throws IOException { DatanodeDetails id = randomDatanodeDetails(); - ContainerUtils.writeDatanodeDetailsTo(id, malformedFile); + ContainerUtils.writeDatanodeDetailsTo(id, malformedFile, conf); try (FileOutputStream out = new FileOutputStream(malformedFile)) { out.write("malformed".getBytes(StandardCharsets.UTF_8)); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestDatanodeIdYaml.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestDatanodeIdYaml.java index 1c69bd049f24..c3d70da8a233 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestDatanodeIdYaml.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestDatanodeIdYaml.java @@ -17,14 +17,20 @@ */ package org.apache.hadoop.ozone.container.common.helpers; +import org.apache.hadoop.hdds.HddsConfigKeys; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; +import org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature; +import org.apache.hadoop.ozone.container.common.DatanodeLayoutStorage; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; import java.io.File; import java.io.IOException; +import java.util.UUID; +import static org.junit.Assert.assertNotNull; import static org.junit.jupiter.api.Assertions.assertEquals; /** @@ -37,11 +43,57 @@ void testWriteRead(@TempDir File dir) throws IOException { DatanodeDetails original = MockDatanodeDetails.randomDatanodeDetails(); File file = new File(dir, "datanode.yaml"); - DatanodeIdYaml.createDatanodeIdFile(original, file); + OzoneConfiguration conf = new OzoneConfiguration(); + conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, dir.toString()); + + DatanodeIdYaml.createDatanodeIdFile(original, file, conf); DatanodeDetails read = DatanodeIdYaml.readDatanodeIdFile(file); assertEquals(original, read); assertEquals(original.toDebugString(), read.toDebugString()); } + @Test + void testWriteReadBeforeRatisDatastreamPortLayoutVersion(@TempDir File dir) + throws IOException { + DatanodeDetails original = MockDatanodeDetails.randomDatanodeDetails(); + File file = new File(dir, "datanode.yaml"); + OzoneConfiguration conf = new OzoneConfiguration(); + conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, dir.toString()); + DatanodeLayoutStorage layoutStorage = new DatanodeLayoutStorage(conf, + UUID.randomUUID().toString(), + HDDSLayoutFeature.DATANODE_SCHEMA_V3.layoutVersion()); + layoutStorage.initialize(); + + DatanodeIdYaml.createDatanodeIdFile(original, file, conf); + DatanodeDetails read = DatanodeIdYaml.readDatanodeIdFile(file); + + assertNotNull(original.getPort(DatanodeDetails.Port.Name.RATIS_DATASTREAM)); + // if no separate admin/server/datastream port, return single Ratis one for + // compat + assertEquals(read.getPort(DatanodeDetails.Port.Name.RATIS_DATASTREAM), + read.getPort(DatanodeDetails.Port.Name.RATIS)); + } + + @Test + void testWriteReadAfterRatisDatastreamPortLayoutVersion(@TempDir File dir) + throws IOException { + DatanodeDetails original = MockDatanodeDetails.randomDatanodeDetails(); + File file = new File(dir, "datanode.yaml"); + OzoneConfiguration conf = new OzoneConfiguration(); + conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, dir.toString()); + DatanodeLayoutStorage layoutStorage = new DatanodeLayoutStorage(conf, + UUID.randomUUID().toString(), + HDDSLayoutFeature.RATIS_DATASTREAM_PORT_IN_DATANODEDETAILS + .layoutVersion()); + layoutStorage.initialize(); + + DatanodeIdYaml.createDatanodeIdFile(original, file, conf); + DatanodeDetails read = DatanodeIdYaml.readDatanodeIdFile(file); + + assertNotNull(original.getPort(DatanodeDetails.Port.Name.RATIS_DATASTREAM)); + assertEquals(original.getPort(DatanodeDetails.Port.Name.RATIS_DATASTREAM), + read.getPort(DatanodeDetails.Port.Name.RATIS_DATASTREAM)); + } + } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToSchemaV3.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToSchemaV3.java index 7af39e5e7f19..1670c1ebc076 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToSchemaV3.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToSchemaV3.java @@ -289,7 +289,8 @@ public void testAddHddsVolumeAfterFinalize() throws Exception { // Add a new HddsVolume. It should have DB created after DN restart. addHddsVolume(); - restartDatanode(HDDSLayoutFeature.DATANODE_SCHEMA_V3.layoutVersion(), true); + restartDatanode(HDDSLayoutFeature.DATANODE_SCHEMA_V3.layoutVersion(), + false); for (StorageVolume vol: dsm.getContainer().getVolumeSet().getVolumesList()) { HddsVolume hddsVolume = (HddsVolume) vol; @@ -323,7 +324,8 @@ public void testAddDbVolumeAfterFinalize() throws Exception { // Add a new DbVolume addDbVolume(); - restartDatanode(HDDSLayoutFeature.DATANODE_SCHEMA_V3.layoutVersion(), true); + restartDatanode(HDDSLayoutFeature.DATANODE_SCHEMA_V3.layoutVersion(), + false); // HddsVolume should still use the rocksDB under it's volume DbVolume dbVolume = (DbVolume) dsm.getContainer().getDbVolumeSet() @@ -352,7 +354,8 @@ public void testAddDbAndHddsVolumeAfterFinalize() throws Exception { addDbVolume(); File newDataVolume = addHddsVolume(); - restartDatanode(HDDSLayoutFeature.DATANODE_SCHEMA_V3.layoutVersion(), true); + restartDatanode(HDDSLayoutFeature.DATANODE_SCHEMA_V3.layoutVersion(), + false); DbVolume dbVolume = (DbVolume) dsm.getContainer().getDbVolumeSet() .getVolumesList().get(0); @@ -424,7 +427,8 @@ public void testWrite(boolean enable, String expectedVersion) // Set SchemaV3 enable status conf.setBoolean(DatanodeConfiguration.CONTAINER_SCHEMA_V3_ENABLED, enable); - restartDatanode(HDDSLayoutFeature.DATANODE_SCHEMA_V3.layoutVersion(), true); + restartDatanode(HDDSLayoutFeature.DATANODE_SCHEMA_V3.layoutVersion(), + false); // Write new data final long containerID2 = addContainer(pipeline);