Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -37,10 +37,12 @@

import com.google.common.base.Preconditions;
import com.google.common.base.Strings;
import org.apache.hadoop.hdds.upgrade.BelongsToHDDSLayoutVersion;
import org.apache.hadoop.ozone.ClientVersion;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import static org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature.RATIS_DATASTREAM_PORT_IN_DATANODEDETAILS;
import static org.apache.hadoop.ozone.ClientVersion.VERSION_HANDLES_UNKNOWN_DN_PORTS;

/**
Expand Down Expand Up @@ -802,6 +804,7 @@ public static final class Port {
*/
public enum Name {
STANDALONE, RATIS, REST, REPLICATION, RATIS_ADMIN, RATIS_SERVER,
@BelongsToHDDSLayoutVersion(RATIS_DATASTREAM_PORT_IN_DATANODEDETAILS)
RATIS_DATASTREAM;

public static final Set<Name> ALL_PORTS = ImmutableSet.copyOf(
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdds.upgrade;

import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;

/**
* Annotation to mark a class or a field declaration that belongs to a specific
* HDDS Layout Version.
*/
@Target({ElementType.TYPE, ElementType.FIELD})
@Retention(RetentionPolicy.RUNTIME)
public @interface BelongsToHDDSLayoutVersion {
HDDSLayoutFeature value();
}
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,9 @@ public enum HDDSLayoutFeature implements LayoutFeature {
ERASURE_CODED_STORAGE_SUPPORT(3, "Ozone version with built in support for"
+ " Erasure Coded block data storage."),
DATANODE_SCHEMA_V3(4, "Datanode RocksDB Schema Version 3 (one rocksdb " +
"per disk)");
"per disk)"),
RATIS_DATASTREAM_PORT_IN_DATANODEDETAILS(5, "Adding the RATIS_DATASTREAM " +
"port to the DatanodeDetails.");

////////////////////////////// //////////////////////////////

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -440,7 +440,7 @@ private void persistDatanodeDetails(DatanodeDetails dnDetails)
String idFilePath = HddsServerUtil.getDatanodeIdFilePath(conf);
Preconditions.checkNotNull(idFilePath);
File idFile = new File(idFilePath);
ContainerUtils.writeDatanodeDetailsTo(dnDetails, idFile);
ContainerUtils.writeDatanodeDetailsTo(dnDetails, idFile, conf);
}

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -141,7 +141,8 @@ public static String getContainerDbFileName(String containerName) {
* @throws IOException when read/write error occurs
*/
public static synchronized void writeDatanodeDetailsTo(
DatanodeDetails datanodeDetails, File path) throws IOException {
DatanodeDetails datanodeDetails, File path, ConfigurationSource conf)
throws IOException {
if (path.exists()) {
if (!path.delete() || !path.createNewFile()) {
throw new IOException("Unable to overwrite the datanode ID file.");
Expand All @@ -152,7 +153,7 @@ public static synchronized void writeDatanodeDetailsTo(
throw new IOException("Unable to create datanode ID directories.");
}
}
DatanodeIdYaml.createDatanodeIdFile(datanodeDetails, path);
DatanodeIdYaml.createDatanodeIdFile(datanodeDetails, path, conf);
}

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,16 +23,23 @@
import java.io.IOException;
import java.io.OutputStreamWriter;
import java.io.Writer;
import java.lang.reflect.Field;
import java.nio.charset.StandardCharsets;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.UUID;

import org.apache.commons.collections.CollectionUtils;
import org.apache.commons.collections.MapUtils;
import org.apache.hadoop.hdds.conf.ConfigurationSource;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.server.YamlUtils;
import org.apache.hadoop.hdds.upgrade.BelongsToHDDSLayoutVersion;
import org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature;
import org.apache.hadoop.ozone.container.common.DatanodeLayoutStorage;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.yaml.snakeyaml.DumperOptions;
import org.yaml.snakeyaml.Yaml;

Expand All @@ -41,6 +48,9 @@
*/
public final class DatanodeIdYaml {

private static final Logger LOG =
LoggerFactory.getLogger(DatanodeIdYaml.class);

private DatanodeIdYaml() {
// static helper methods only, no state.
}
Expand All @@ -53,15 +63,17 @@ private DatanodeIdYaml() {
* @param path Path to datnode.id file
*/
public static void createDatanodeIdFile(DatanodeDetails datanodeDetails,
File path) throws IOException {
File path,
ConfigurationSource conf)
throws IOException {
DumperOptions options = new DumperOptions();
options.setPrettyFlow(true);
options.setDefaultFlowStyle(DumperOptions.FlowStyle.FLOW);
Yaml yaml = new Yaml(options);

try (Writer writer = new OutputStreamWriter(
new FileOutputStream(path), StandardCharsets.UTF_8)) {
yaml.dump(getDatanodeDetailsYaml(datanodeDetails), writer);
yaml.dump(getDatanodeDetailsYaml(datanodeDetails, conf), writer);
}
}

Expand Down Expand Up @@ -219,11 +231,32 @@ public void setCurrentVersion(int version) {
}

private static DatanodeDetailsYaml getDatanodeDetailsYaml(
DatanodeDetails datanodeDetails) {
DatanodeDetails datanodeDetails, ConfigurationSource conf)
throws IOException {

DatanodeLayoutStorage datanodeLayoutStorage
= new DatanodeLayoutStorage(conf, datanodeDetails.getUuidString());

Map<String, Integer> portDetails = new LinkedHashMap<>();
if (!CollectionUtils.isEmpty(datanodeDetails.getPorts())) {
for (DatanodeDetails.Port port : datanodeDetails.getPorts()) {
Field f = null;
try {
f = DatanodeDetails.Port.Name.class
.getDeclaredField(port.getName().name());
} catch (NoSuchFieldException e) {
LOG.error("There is no such field as {} in {}", port.getName().name(),
DatanodeDetails.Port.Name.class);
}
if (f != null
&& f.isAnnotationPresent(BelongsToHDDSLayoutVersion.class)) {
HDDSLayoutFeature layoutFeature
= f.getAnnotation(BelongsToHDDSLayoutVersion.class).value();
if (layoutFeature.layoutVersion() >
datanodeLayoutStorage.getLayoutVersion()) {
continue;
}
}
portDetails.put(port.getName().toString(), port.getValue());
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,7 @@ private void persistDatanodeDetails(DatanodeDetails dnDetails)
String idFilePath = HddsServerUtil.getDatanodeIdFilePath(conf);
Preconditions.checkNotNull(idFilePath);
File idFile = new File(idFilePath);
ContainerUtils.writeDatanodeDetailsTo(dnDetails, idFile);
ContainerUtils.writeDatanodeDetailsTo(dnDetails, idFile, conf);
}

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -127,7 +127,7 @@ private void persistContainerDatanodeDetails() {
.getDatanodeDetails();
if (datanodeDetails != null) {
try {
ContainerUtils.writeDatanodeDetailsTo(datanodeDetails, idPath);
ContainerUtils.writeDatanodeDetailsTo(datanodeDetails, idPath, conf);
} catch (IOException ex) {
// As writing DatanodeDetails in to datanodeid file failed, which is
// a critical thing, so shutting down the state machine.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -218,7 +218,7 @@ public void testDatanodeStateContext() throws IOException,
DatanodeDetails.Port.Name.STANDALONE,
OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT);
datanodeDetails.setPort(port);
ContainerUtils.writeDatanodeDetailsTo(datanodeDetails, idPath);
ContainerUtils.writeDatanodeDetailsTo(datanodeDetails, idPath, conf);
try (DatanodeStateMachine stateMachine =
new DatanodeStateMachine(datanodeDetails, conf, null, null,
null)) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,13 +18,16 @@
package org.apache.hadoop.ozone.container.common.helpers;

import org.apache.commons.lang3.RandomUtils;
import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.scm.ByteStringConversion;
import org.apache.hadoop.ozone.common.ChunkBuffer;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.io.TempDir;

Expand All @@ -49,6 +52,14 @@
*/
public class TestContainerUtils {

private OzoneConfiguration conf;

@BeforeEach
void setup(@TempDir File dir) {
conf = new OzoneConfiguration();
conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, dir.toString());
}

@Test
public void redactsDataBuffers() {
// GIVEN
Expand Down Expand Up @@ -112,11 +123,11 @@ public void testDatanodeIDPersistent(@TempDir File tempDir) throws Exception {
assertWriteRead(tempDir, id1);
}

private static void assertWriteRead(@TempDir File tempDir,
private void assertWriteRead(@TempDir File tempDir,
DatanodeDetails details) throws IOException {
// Write a single ID to the file and read it out
File file = new File(tempDir, "valid-values.id");
ContainerUtils.writeDatanodeDetailsTo(details, file);
ContainerUtils.writeDatanodeDetailsTo(details, file, conf);

DatanodeDetails read = ContainerUtils.readDatanodeDetailsFrom(file);

Expand All @@ -127,7 +138,7 @@ private static void assertWriteRead(@TempDir File tempDir,
private void createMalformedIDFile(File malformedFile)
throws IOException {
DatanodeDetails id = randomDatanodeDetails();
ContainerUtils.writeDatanodeDetailsTo(id, malformedFile);
ContainerUtils.writeDatanodeDetailsTo(id, malformedFile, conf);

try (FileOutputStream out = new FileOutputStream(malformedFile)) {
out.write("malformed".getBytes(StandardCharsets.UTF_8));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,14 +17,20 @@
*/
package org.apache.hadoop.ozone.container.common.helpers;

import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.MockDatanodeDetails;
import org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature;
import org.apache.hadoop.ozone.container.common.DatanodeLayoutStorage;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.io.TempDir;

import java.io.File;
import java.io.IOException;
import java.util.UUID;

import static org.junit.Assert.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertEquals;

/**
Expand All @@ -37,11 +43,57 @@ void testWriteRead(@TempDir File dir) throws IOException {
DatanodeDetails original = MockDatanodeDetails.randomDatanodeDetails();
File file = new File(dir, "datanode.yaml");

DatanodeIdYaml.createDatanodeIdFile(original, file);
OzoneConfiguration conf = new OzoneConfiguration();
conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, dir.toString());

DatanodeIdYaml.createDatanodeIdFile(original, file, conf);
DatanodeDetails read = DatanodeIdYaml.readDatanodeIdFile(file);

assertEquals(original, read);
assertEquals(original.toDebugString(), read.toDebugString());
}

@Test
void testWriteReadBeforeRatisDatastreamPortLayoutVersion(@TempDir File dir)
throws IOException {
DatanodeDetails original = MockDatanodeDetails.randomDatanodeDetails();
File file = new File(dir, "datanode.yaml");
OzoneConfiguration conf = new OzoneConfiguration();
conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, dir.toString());
DatanodeLayoutStorage layoutStorage = new DatanodeLayoutStorage(conf,
UUID.randomUUID().toString(),
HDDSLayoutFeature.DATANODE_SCHEMA_V3.layoutVersion());
layoutStorage.initialize();

DatanodeIdYaml.createDatanodeIdFile(original, file, conf);
DatanodeDetails read = DatanodeIdYaml.readDatanodeIdFile(file);

assertNotNull(original.getPort(DatanodeDetails.Port.Name.RATIS_DATASTREAM));
// if no separate admin/server/datastream port, return single Ratis one for
// compat
assertEquals(read.getPort(DatanodeDetails.Port.Name.RATIS_DATASTREAM),
read.getPort(DatanodeDetails.Port.Name.RATIS));
}

@Test
void testWriteReadAfterRatisDatastreamPortLayoutVersion(@TempDir File dir)
throws IOException {
DatanodeDetails original = MockDatanodeDetails.randomDatanodeDetails();
File file = new File(dir, "datanode.yaml");
OzoneConfiguration conf = new OzoneConfiguration();
conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, dir.toString());
DatanodeLayoutStorage layoutStorage = new DatanodeLayoutStorage(conf,
UUID.randomUUID().toString(),
HDDSLayoutFeature.RATIS_DATASTREAM_PORT_IN_DATANODEDETAILS
.layoutVersion());
layoutStorage.initialize();

DatanodeIdYaml.createDatanodeIdFile(original, file, conf);
DatanodeDetails read = DatanodeIdYaml.readDatanodeIdFile(file);

assertNotNull(original.getPort(DatanodeDetails.Port.Name.RATIS_DATASTREAM));
assertEquals(original.getPort(DatanodeDetails.Port.Name.RATIS_DATASTREAM),
read.getPort(DatanodeDetails.Port.Name.RATIS_DATASTREAM));
}

}
Original file line number Diff line number Diff line change
Expand Up @@ -289,7 +289,8 @@ public void testAddHddsVolumeAfterFinalize() throws Exception {

// Add a new HddsVolume. It should have DB created after DN restart.
addHddsVolume();
restartDatanode(HDDSLayoutFeature.DATANODE_SCHEMA_V3.layoutVersion(), true);
restartDatanode(HDDSLayoutFeature.DATANODE_SCHEMA_V3.layoutVersion(),
false);
for (StorageVolume vol:
dsm.getContainer().getVolumeSet().getVolumesList()) {
HddsVolume hddsVolume = (HddsVolume) vol;
Expand Down Expand Up @@ -323,7 +324,8 @@ public void testAddDbVolumeAfterFinalize() throws Exception {

// Add a new DbVolume
addDbVolume();
restartDatanode(HDDSLayoutFeature.DATANODE_SCHEMA_V3.layoutVersion(), true);
restartDatanode(HDDSLayoutFeature.DATANODE_SCHEMA_V3.layoutVersion(),
false);

// HddsVolume should still use the rocksDB under it's volume
DbVolume dbVolume = (DbVolume) dsm.getContainer().getDbVolumeSet()
Expand Down Expand Up @@ -352,7 +354,8 @@ public void testAddDbAndHddsVolumeAfterFinalize() throws Exception {

addDbVolume();
File newDataVolume = addHddsVolume();
restartDatanode(HDDSLayoutFeature.DATANODE_SCHEMA_V3.layoutVersion(), true);
restartDatanode(HDDSLayoutFeature.DATANODE_SCHEMA_V3.layoutVersion(),
false);

DbVolume dbVolume = (DbVolume) dsm.getContainer().getDbVolumeSet()
.getVolumesList().get(0);
Expand Down Expand Up @@ -424,7 +427,8 @@ public void testWrite(boolean enable, String expectedVersion)
// Set SchemaV3 enable status
conf.setBoolean(DatanodeConfiguration.CONTAINER_SCHEMA_V3_ENABLED,
enable);
restartDatanode(HDDSLayoutFeature.DATANODE_SCHEMA_V3.layoutVersion(), true);
restartDatanode(HDDSLayoutFeature.DATANODE_SCHEMA_V3.layoutVersion(),
false);

// Write new data
final long containerID2 = addContainer(pipeline);
Expand Down