diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java index 5b6fb6fe9b81..885710646d62 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java @@ -371,6 +371,9 @@ public static DatanodeDetails.Builder newBuilder( builder.setPersistedOpStateExpiry( datanodeDetailsProto.getPersistedOpStateExpiry()); } + if (datanodeDetailsProto.hasCurrentVersion()) { + builder.setCurrentVersion(datanodeDetailsProto.getCurrentVersion()); + } return builder; } @@ -475,6 +478,8 @@ public HddsProtos.DatanodeDetailsProto.Builder toProtoBuilder( } } + builder.setCurrentVersion(currentVersion); + return builder; } @@ -505,6 +510,7 @@ public ExtendedDatanodeDetailsProto getExtendedProtoBufMessage() { } /** + * Note: Datanode initial version is not passed to the client due to no use case. See HDDS-9884 * @return the version this datanode was initially created with */ public int getInitialVersion() { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java index f59622cb0faf..bbaf58d36b4f 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java @@ -233,7 +233,6 @@ public void start() { datanodeDetails.setRevision( HddsVersionInfo.HDDS_VERSION_INFO.getRevision()); datanodeDetails.setBuildDate(HddsVersionInfo.HDDS_VERSION_INFO.getDate()); - datanodeDetails.setCurrentVersion(DatanodeVersion.CURRENT_VERSION); TracingUtil.initTracing( "HddsDatanodeService." + datanodeDetails.getUuidString() .substring(0, 8), conf); @@ -424,17 +423,19 @@ private DatanodeDetails initializeDatanodeDetails() String idFilePath = HddsServerUtil.getDatanodeIdFilePath(conf); Preconditions.checkNotNull(idFilePath); File idFile = new File(idFilePath); + DatanodeDetails details; if (idFile.exists()) { - return ContainerUtils.readDatanodeDetailsFrom(idFile); + details = ContainerUtils.readDatanodeDetailsFrom(idFile); + // Current version is always overridden to the latest + details.setCurrentVersion(getDefaultCurrentVersion()); } else { // There is no datanode.id file, this might be the first time datanode // is started. - DatanodeDetails details = DatanodeDetails.newBuilder() - .setUuid(UUID.randomUUID()).build(); - details.setInitialVersion(DatanodeVersion.CURRENT_VERSION); - details.setCurrentVersion(DatanodeVersion.CURRENT_VERSION); - return details; + details = DatanodeDetails.newBuilder().setUuid(UUID.randomUUID()).build(); + details.setInitialVersion(getDefaultInitialVersion()); + details.setCurrentVersion(getDefaultCurrentVersion()); } + return details; } /** @@ -678,4 +679,20 @@ private String reconfigReplicationStreamsLimit(String value) { .setPoolSize(Integer.parseInt(value)); return value; } + + /** + * Returns the initial version of the datanode. + */ + @VisibleForTesting + public static int getDefaultInitialVersion() { + return DatanodeVersion.CURRENT_VERSION; + } + + /** + * Returns the current version of the datanode. + */ + @VisibleForTesting + public static int getDefaultCurrentVersion() { + return DatanodeVersion.CURRENT_VERSION; + } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DatanodeIdYaml.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DatanodeIdYaml.java index f8acbc7e2d69..58b1c1d1d5e0 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DatanodeIdYaml.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DatanodeIdYaml.java @@ -56,7 +56,7 @@ private DatanodeIdYaml() { } /** - * Creates a yaml file using DatnodeDetails. This method expects the path + * Creates a yaml file using DatanodeDetails. This method expects the path * validation to be performed by the caller. * * @param datanodeDetails {@link DatanodeDetails} diff --git a/hadoop-hdds/interface-client/src/main/proto/hdds.proto b/hadoop-hdds/interface-client/src/main/proto/hdds.proto index 3f346300b3ed..987bf25ad884 100644 --- a/hadoop-hdds/interface-client/src/main/proto/hdds.proto +++ b/hadoop-hdds/interface-client/src/main/proto/hdds.proto @@ -45,6 +45,7 @@ message DatanodeDetailsProto { optional string networkLocation = 7; // Network topology location optional NodeOperationalState persistedOpState = 8; // The Operational state persisted in the datanode.id file optional int64 persistedOpStateExpiry = 9; // The seconds after the epoch when the OpState should expire + optional int32 currentVersion = 10; // Current datanode wire version // TODO(runzhiwang): when uuid is gone, specify 1 as the index of uuid128 and mark as required optional UUID uuid128 = 100; // UUID with 128 bits assigned to the Datanode. } diff --git a/hadoop-ozone/fault-injection-test/pom.xml b/hadoop-ozone/fault-injection-test/pom.xml index 7de9bcc297da..432faab48777 100644 --- a/hadoop-ozone/fault-injection-test/pom.xml +++ b/hadoop-ozone/fault-injection-test/pom.xml @@ -33,4 +33,12 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> mini-chaos-tests + + + org.mockito + mockito-inline + test + + + diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java index e864cae00b37..176dab7335d4 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java @@ -23,6 +23,7 @@ import java.util.UUID; import java.util.concurrent.TimeoutException; +import org.apache.hadoop.hdds.DatanodeVersion; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; @@ -321,15 +322,20 @@ abstract class Builder { protected Optional hbProcessorInterval = Optional.empty(); protected String scmId = UUID.randomUUID().toString(); protected String omId = UUID.randomUUID().toString(); - + protected Optional datanodeReservedSpace = Optional.empty(); protected boolean includeRecon = false; - protected Optional omLayoutVersion = Optional.empty(); protected Optional scmLayoutVersion = Optional.empty(); protected Optional dnLayoutVersion = Optional.empty(); + protected int dnInitialVersion = DatanodeVersion.FUTURE_VERSION.toProtoValue(); + protected int dnCurrentVersion = DatanodeVersion.FUTURE_VERSION.toProtoValue(); + + // Use relative smaller number of handlers for testing + protected int numOfOmHandlers = 20; + protected int numOfScmHandlers = 20; protected int numOfDatanodes = 3; protected int numDataVolumes = 1; protected boolean startDataNodes = true; @@ -412,6 +418,30 @@ public Builder setNumDatanodes(int val) { return this; } + /** + * Set the initialVersion for all datanodes. + * + * @param val initialVersion value to be set for all datanodes. + * + * @return MiniOzoneCluster.Builder + */ + public Builder setDatanodeInitialVersion(int val) { + dnInitialVersion = val; + return this; + } + + /** + * Set the currentVersion for all datanodes. + * + * @param val currentVersion value to be set for all datanodes. + * + * @return MiniOzoneCluster.Builder + */ + public Builder setDatanodeCurrentVersion(int val) { + dnCurrentVersion = val; + return this; + } + /** * Sets the number of data volumes per datanode. * diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java index 400ae3ee2cc8..820562bae2c9 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java @@ -35,6 +35,7 @@ import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.hdds.DFSConfigKeysLegacy; +import org.apache.hadoop.hdds.DatanodeVersion; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.annotation.InterfaceAudience; import org.apache.hadoop.hdds.client.RatisReplicationConfig; @@ -102,6 +103,8 @@ import static org.apache.ozone.test.GenericTestUtils.PortAllocator.localhostWithFreePort; import org.hadoop.ozone.recon.codegen.ReconSqlDbConfig; +import org.mockito.MockedStatic; +import org.mockito.Mockito; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -132,12 +135,14 @@ public class MiniOzoneClusterImpl implements MiniOzoneCluster { private CertificateClient caClient; private final Set clients = ConcurrentHashMap.newKeySet(); private SecretKeyClient secretKeyClient; + private static MockedStatic mockDNStatic = Mockito.mockStatic(HddsDatanodeService.class); /** * Creates a new MiniOzoneCluster with Recon. * * @throws IOException if there is an I/O error */ + @SuppressWarnings("checkstyle:ParameterNumber") MiniOzoneClusterImpl(OzoneConfiguration conf, SCMConfigurator scmConfigurator, OzoneManager ozoneManager, @@ -396,6 +401,16 @@ private void waitForHddsDatanodeToStop(DatanodeDetails dn) }, 1000, waitForClusterToBeReadyTimeout); } + private static void overrideDatanodeVersions(int dnInitialVersion, int dnCurrentVersion) { + // FUTURE_VERSION (-1) is not a valid version for a datanode, using it as a marker when version is not overridden + if (dnInitialVersion != DatanodeVersion.FUTURE_VERSION.toProtoValue()) { + mockDNStatic.when(HddsDatanodeService::getDefaultInitialVersion).thenReturn(dnInitialVersion); + } + if (dnCurrentVersion != DatanodeVersion.FUTURE_VERSION.toProtoValue()) { + mockDNStatic.when(HddsDatanodeService::getDefaultCurrentVersion).thenReturn(dnCurrentVersion); + } + } + @Override public void restartHddsDatanode(int i, boolean waitForDatanode) throws InterruptedException, TimeoutException { @@ -782,10 +797,14 @@ protected List createHddsDatanodes( String[] args = new String[] {}; conf.setStrings(ScmConfigKeys.OZONE_SCM_NAMES, scmAddress); List hddsDatanodes = new ArrayList<>(); + + // Override default datanode initial and current version if necessary + overrideDatanodeVersions(dnInitialVersion, dnCurrentVersion); + for (int i = 0; i < numOfDatanodes; i++) { OzoneConfiguration dnConf = new OzoneConfiguration(conf); configureDatanodePorts(dnConf); - String datanodeBaseDir = path + "/datanode-" + Integer.toString(i); + String datanodeBaseDir = path + "/datanode-" + i; Path metaDir = Paths.get(datanodeBaseDir, "meta"); List dataDirs = new ArrayList<>(); List reservedSpaceList = new ArrayList<>(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java index 8bb791bb103e..2559be4ea41d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java @@ -17,9 +17,11 @@ package org.apache.hadoop.ozone.client.rpc; +import org.apache.hadoop.hdds.DatanodeVersion; import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.StorageUnit; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.hdds.scm.XceiverClientManager; @@ -28,6 +30,7 @@ import org.apache.hadoop.hdds.scm.storage.ByteBufferStreamOutput; import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.ozone.ClientConfigForTesting; +import org.apache.hadoop.ozone.HddsDatanodeService; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.client.ObjectStore; @@ -45,6 +48,7 @@ import java.io.IOException; import java.nio.ByteBuffer; +import java.util.List; import java.util.UUID; import java.util.concurrent.TimeUnit; @@ -70,6 +74,7 @@ public class TestBlockDataStreamOutput { private static String volumeName; private static String bucketName; private static String keyString; + private static final int DN_OLD_VERSION = DatanodeVersion.SEPARATE_RATIS_PORTS_AVAILABLE.toProtoValue(); /** * Create a MiniDFSCluster for testing. @@ -105,6 +110,7 @@ public static void init() throws Exception { cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(5) + .setDatanodeCurrentVersion(DN_OLD_VERSION) .setTotalPipelineNumLimit(3) .build(); cluster.waitForClusterToBeReady(); @@ -270,4 +276,25 @@ public void testTotalAckDataLength() throws Exception { assertEquals(dataLength, stream.getTotalAckDataLength()); } + @Test + public void testDatanodeVersion() throws Exception { + // Verify all DNs internally have versions set correctly + List dns = cluster.getHddsDatanodes(); + for (HddsDatanodeService dn : dns) { + DatanodeDetails details = dn.getDatanodeDetails(); + assertEquals(DN_OLD_VERSION, details.getCurrentVersion()); + } + + String keyName = getKeyName(); + OzoneDataStreamOutput key = createKey(keyName, ReplicationType.RATIS, 0); + KeyDataStreamOutput keyDataStreamOutput = (KeyDataStreamOutput) key.getByteBufStreamOutput(); + BlockDataStreamOutputEntry stream = keyDataStreamOutput.getStreamEntries().get(0); + + // Now check 3 DNs in a random pipeline returns the correct DN versions + List streamDnDetails = stream.getPipeline().getNodes(); + for (DatanodeDetails details : streamDnDetails) { + assertEquals(DN_OLD_VERSION, details.getCurrentVersion()); + } + } + } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDatanodeVersion.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDatanodeVersion.java new file mode 100644 index 000000000000..5e7d8a4b0525 --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDatanodeVersion.java @@ -0,0 +1,143 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.client.rpc; + +import org.apache.hadoop.hdds.DatanodeVersion; +import org.apache.hadoop.hdds.client.ReplicationType; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.StorageUnit; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.scm.OzoneClientConfig; +import org.apache.hadoop.hdds.utils.IOUtils; +import org.apache.hadoop.ozone.ClientConfigForTesting; +import org.apache.hadoop.ozone.HddsDatanodeService; +import org.apache.hadoop.ozone.MiniOzoneCluster; +import org.apache.hadoop.ozone.OzoneConfigKeys; +import org.apache.hadoop.ozone.client.ObjectStore; +import org.apache.hadoop.ozone.client.OzoneClient; +import org.apache.hadoop.ozone.client.OzoneClientFactory; +import org.apache.hadoop.ozone.client.io.BlockDataStreamOutputEntry; +import org.apache.hadoop.ozone.client.io.KeyDataStreamOutput; +import org.apache.hadoop.ozone.client.io.OzoneDataStreamOutput; +import org.apache.hadoop.ozone.container.TestHelper; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; + +import java.util.List; +import java.util.UUID; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +/** + * Tests DatanodeVersion in client stream. + */ +@Timeout(120) +public class TestDatanodeVersion { + private static MiniOzoneCluster cluster; + private static OzoneConfiguration conf = new OzoneConfiguration(); + private static OzoneClient client; + private static ObjectStore objectStore; + private static int chunkSize; + private static int flushSize; + private static int maxFlushSize; + private static int blockSize; + private static String volumeName; + private static String bucketName; + private static final int DN_OLD_VERSION = DatanodeVersion.SEPARATE_RATIS_PORTS_AVAILABLE.toProtoValue(); + + /** + * Create a MiniDFSCluster for testing. + *

+ * Ozone is made active by setting OZONE_ENABLED = true + */ + @BeforeAll + public static void init() throws Exception { + chunkSize = 100; + flushSize = 2 * chunkSize; + maxFlushSize = 2 * flushSize; + blockSize = 2 * maxFlushSize; + + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + conf.setFromObject(clientConfig); + + conf.setQuietMode(false); + conf.setStorageSize(OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE, 4, StorageUnit.MB); + + ClientConfigForTesting.newBuilder(StorageUnit.BYTES) + .setBlockSize(blockSize) + .setChunkSize(chunkSize) + .setStreamBufferFlushSize(flushSize) + .setStreamBufferMaxSize(maxFlushSize) + .setDataStreamBufferFlushSize(maxFlushSize) + .setDataStreamMinPacketSize(chunkSize) + .setDataStreamWindowSize(5 * chunkSize) + .applyTo(conf); + + cluster = MiniOzoneCluster.newBuilder(conf) + .setNumDatanodes(3) + .setDatanodeCurrentVersion(DN_OLD_VERSION) + .build(); + cluster.waitForClusterToBeReady(); + //the easiest way to create an open container is creating a key + client = OzoneClientFactory.getRpcClient(conf); + objectStore = client.getObjectStore(); + volumeName = "testblockoutputstream"; + bucketName = volumeName; + objectStore.createVolume(volumeName); + objectStore.getVolume(volumeName).createBucket(bucketName); + } + + /** + * Shutdown MiniDFSCluster. + */ + @AfterAll + public static void shutdown() { + IOUtils.closeQuietly(client); + if (cluster != null) { + cluster.shutdown(); + } + } + + static OzoneDataStreamOutput createKey(String keyName, ReplicationType type, long size) throws Exception { + return TestHelper.createStreamKey(keyName, type, size, objectStore, volumeName, bucketName); + } + + @Test + public void testStreamDatanodeVersion() throws Exception { + // Verify all DNs internally have versions set correctly + List dns = cluster.getHddsDatanodes(); + for (HddsDatanodeService dn : dns) { + DatanodeDetails details = dn.getDatanodeDetails(); + assertEquals(DN_OLD_VERSION, details.getCurrentVersion()); + } + + String keyName = UUID.randomUUID().toString(); + OzoneDataStreamOutput key = createKey(keyName, ReplicationType.RATIS, 0); + KeyDataStreamOutput keyDataStreamOutput = (KeyDataStreamOutput) key.getByteBufStreamOutput(); + BlockDataStreamOutputEntry stream = keyDataStreamOutput.getStreamEntries().get(0); + + // Now check 3 DNs in a random pipeline returns the correct DN versions + List streamDnDetails = stream.getPipeline().getNodes(); + for (DatanodeDetails details : streamDnDetails) { + assertEquals(DN_OLD_VERSION, details.getCurrentVersion()); + } + } + +}