Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,8 @@ public enum HDDSLayoutFeature implements LayoutFeature {
HADOOP_PRC_PORTS_IN_DATANODEDETAILS(7, "Adding Hadoop RPC ports " +
"to DatanodeDetails."),
HBASE_SUPPORT(8, "Datanode RocksDB Schema Version 3 has an extra table " +
"for the last chunk of blocks to support HBase.)");
"for the last chunk of blocks to support HBase.)"),
DATANODE_SCHEMA_V4(9, "Container yaml file doesn't require chunksPath and metadataPath");
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
DATANODE_SCHEMA_V4(9, "Container yaml file doesn't require chunksPath and metadataPath");
DATANODE_SCHEMA_V4(9, "Container YAML file doesn't require chunksPath and metadataPath");


////////////////////////////// //////////////////////////////

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -243,6 +243,9 @@ private OzoneConsts() {
// V3: Column families definitions are close to V2,
// but have containerID as key prefixes.
public static final String SCHEMA_V3 = "3";
// V4: Column families is same as V3,
// removed chunkPath and metadataPath in .container file
Comment on lines +246 to +247
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
// V4: Column families is same as V3,
// removed chunkPath and metadataPath in .container file
/**
* Schema version 4 for Ozone container files.
* <p>
* The column families remain the same as defined in {@link #SCHEMA_V3}.
* However, the {@code chunkPath} and {@code metadataPath}
* fields have been removed in this version of the .container files.
* </p>
*/

public static final String SCHEMA_V4 = "4";

// Supported store types.
public static final String OZONE = "ozone";
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -316,6 +316,7 @@ private static Builder getContainerCommandRequestBuilder(long containerID,
request.setCreateContainer(
ContainerProtos.CreateContainerRequestProto.getDefaultInstance().toBuilder().setState(state).build());
request.setDatanodeUuid(pipeline.getFirstNode().getUuidString());
request.setPipelineID(pipeline.getId().getId().toString());

return request;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -201,7 +201,7 @@ public static void verifyChecksum(ContainerData containerData,
String storedChecksum = containerData.getChecksum();

Yaml yaml = ContainerDataYaml.getYamlForContainerType(
containerData.getContainerType(),
containerData.getContainerType(), containerData,
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
containerData.getContainerType(), containerData,
containerData.getContainerType(),
containerData,

containerData instanceof KeyValueContainerData &&
((KeyValueContainerData)containerData).getReplicaIndex() > 0);
containerData.computeAndSetChecksum(yaml);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,12 @@

package org.apache.hadoop.ozone.container.common.impl;

import static org.apache.hadoop.ozone.OzoneConsts.CHUNKS_PATH;
import static org.apache.hadoop.ozone.OzoneConsts.METADATA_PATH;
import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER;
import static org.apache.hadoop.ozone.OzoneConsts.REPLICA_INDEX;
import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V4;
import static org.apache.hadoop.ozone.OzoneConsts.STORAGE_DIR_CHUNKS;
import static org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData.KEYVALUE_YAML_TAG;

import com.google.common.base.Preconditions;
Expand Down Expand Up @@ -88,7 +93,7 @@ public static void createContainerFile(ContainerType containerType,
((KeyValueContainerData) containerData).getReplicaIndex() > 0;

// Create Yaml for given container type
Yaml yaml = getYamlForContainerType(containerType, withReplicaIndex);
Yaml yaml = getYamlForContainerType(containerType, containerData, withReplicaIndex);
// Compute Checksum and update ContainerData
containerData.computeAndSetChecksum(yaml);

Expand Down Expand Up @@ -122,9 +127,16 @@ public static ContainerData readContainerFile(File containerFile)
throws IOException {
Preconditions.checkNotNull(containerFile, "containerFile cannot be null");
try (FileInputStream inputFileStream = new FileInputStream(containerFile)) {
return readContainer(inputFileStream);
KeyValueContainerData containerData = (KeyValueContainerData) readContainer(inputFileStream);
if (containerData.getChunksPath() == null) {
containerData.setChunksPath(containerFile.getParentFile().getParentFile().getAbsolutePath()
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Please move containerFile.getParentFile().getParentFile().getAbsolutePath() .concat(OZONE_URI_DELIMITER).concat(STORAGE_DIR_CHUNKS) into separate variable.

.concat(OZONE_URI_DELIMITER).concat(STORAGE_DIR_CHUNKS));
}
if (containerData.getMetadataPath() == null) {
containerData.setMetadataPath(containerFile.getParentFile().getAbsolutePath());
}
return containerData;
}

}

/**
Expand Down Expand Up @@ -183,11 +195,12 @@ public static ContainerData readContainer(InputStream input)
* the container properties.
*
* @param containerType type of container
* @parm ContainerData container data
* @param withReplicaIndex in the container yaml
* @return Yamal representation of container properties
* @throws StorageContainerException if the type is unrecognized
*/
public static Yaml getYamlForContainerType(ContainerType containerType,
public static Yaml getYamlForContainerType(ContainerType containerType, ContainerData containerData,
boolean withReplicaIndex)
throws StorageContainerException {
PropertyUtils propertyUtils = new PropertyUtils();
Expand All @@ -201,6 +214,11 @@ public static Yaml getYamlForContainerType(ContainerType containerType,
yamlFields = new ArrayList<>(yamlFields);
yamlFields.add(REPLICA_INDEX);
}
if (((KeyValueContainerData)containerData).olderSchemaThan(SCHEMA_V4)) {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
if (((KeyValueContainerData)containerData).olderSchemaThan(SCHEMA_V4)) {
if (((KeyValueContainerData) containerData).olderSchemaThan(SCHEMA_V4)) {

yamlFields = new ArrayList<>(yamlFields);
yamlFields.add(METADATA_PATH);
yamlFields.add(CHUNKS_PATH);
}
Representer representer = new ContainerDataRepresenter(yamlFields);
representer.setPropertyUtils(propertyUtils);
representer.addClassTag(
Expand Down Expand Up @@ -299,9 +317,12 @@ public Object construct(Node node) {

kvData.setContainerDBType((String)nodes.get(
OzoneConsts.CONTAINER_DB_TYPE));
kvData.setMetadataPath((String) nodes.get(
OzoneConsts.METADATA_PATH));
kvData.setChunksPath((String) nodes.get(OzoneConsts.CHUNKS_PATH));
String schemaVersion = (String) nodes.get(OzoneConsts.SCHEMA_VERSION);
kvData.setSchemaVersion(schemaVersion);
if (kvData.olderSchemaThan(SCHEMA_V4)) {
kvData.setMetadataPath((String) nodes.get(OzoneConsts.METADATA_PATH));
kvData.setChunksPath((String) nodes.get(OzoneConsts.CHUNKS_PATH));
}
Map<String, String> meta = (Map) nodes.get(OzoneConsts.METADATA);
kvData.setMetadata(meta);
kvData.setChecksum((String) nodes.get(OzoneConsts.CHECKSUM));
Expand All @@ -310,8 +331,6 @@ public Object construct(Node node) {
String state = (String) nodes.get(OzoneConsts.STATE);
kvData
.setState(ContainerProtos.ContainerDataProto.State.valueOf(state));
String schemaVersion = (String) nodes.get(OzoneConsts.SCHEMA_VERSION);
kvData.setSchemaVersion(schemaVersion);
final Object replicaIndex = nodes.get(REPLICA_INDEX);
if (replicaIndex != null) {
kvData.setReplicaIndex(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V1;
import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V2;
import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V3;
import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V4;

import com.google.common.annotations.VisibleForTesting;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
Expand Down Expand Up @@ -105,6 +106,7 @@ public DeleteBlocksCommandHandler(OzoneContainer container,
schemaHandlers.put(SCHEMA_V1, this::markBlocksForDeletionSchemaV1);
schemaHandlers.put(SCHEMA_V2, this::markBlocksForDeletionSchemaV2);
schemaHandlers.put(SCHEMA_V3, this::markBlocksForDeletionSchemaV3);
schemaHandlers.put(SCHEMA_V4, this::markBlocksForDeletionSchemaV3);

ThreadFactory threadFactory = new ThreadFactoryBuilder()
.setNameFormat(threadNamePrefix +
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,10 @@
import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.INVALID_CONTAINER_STATE;
import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.IO_EXCEPTION;
import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.UNSUPPORTED_REQUEST;
import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V3;
import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V4;
import static org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil.onFailure;
import static org.apache.hadoop.ozone.container.upgrade.VersionedDatanodeFeatures.isFinalized;

import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
Expand Down Expand Up @@ -57,6 +60,7 @@
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerType;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto;
import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
import org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature;
import org.apache.hadoop.hdfs.util.Canceler;
import org.apache.hadoop.hdfs.util.DataTransferThrottler;
import org.apache.hadoop.io.nativeio.NativeIO;
Expand Down Expand Up @@ -176,7 +180,7 @@ public void create(VolumeSet volumeSet, VolumeChoosingPolicy
containerVolume, clusterId);
// Set schemaVersion before the dbFile since we have to
// choose the dbFile location based on schema version.
String schemaVersion = VersionedDatanodeFeatures.SchemaV3
String schemaVersion = VersionedDatanodeFeatures.SchemaV4
.chooseSchemaVersion(config);
containerData.setSchemaVersion(schemaVersion);

Expand Down Expand Up @@ -294,6 +298,10 @@ private void writeToContainerFile(File containerFile, boolean isCreate)
long containerId = containerData.getContainerID();
try {
tempContainerFile = createTempFile(containerFile);
if (containerData.hasSchema(SCHEMA_V3) && isFinalized(HDDSLayoutFeature.DATANODE_SCHEMA_V4)) {
// convert container from V3 to V4 on yaml file update
containerData.setSchemaVersion(SCHEMA_V4);
}
ContainerDataYaml.createContainerFile(
ContainerType.KeyValueContainer, containerData, tempContainerFile);

Expand Down Expand Up @@ -646,7 +654,7 @@ public void importContainerData(InputStream input,

// delete all other temporary data in case of any exception.
try {
if (containerData.hasSchema(OzoneConsts.SCHEMA_V3)) {
if (containerData.sharedDB()) {
BlockUtils.removeContainerFromDB(containerData, config);
}
FileUtils.deleteDirectory(new File(containerData.getMetadataPath()));
Expand All @@ -669,12 +677,22 @@ public void importContainerData(KeyValueContainerData originalContainerData)
containerData.setState(originalContainerData.getState());
containerData
.setContainerDBType(originalContainerData.getContainerDBType());
containerData.setSchemaVersion(originalContainerData.getSchemaVersion());
if (VersionedDatanodeFeatures.SchemaV4.isFinalizedAndEnabled(config) &&
originalContainerData.hasSchema(SCHEMA_V3)) {
// migrate V3 to V4 on container import
containerData.setSchemaVersion(SCHEMA_V4);
} else if (!VersionedDatanodeFeatures.SchemaV4.isFinalizedAndEnabled(config) &&
originalContainerData.hasSchema(SCHEMA_V4)) {
// if V4 is not finalized, covert V4 back to V3 on container import
containerData.setSchemaVersion(SCHEMA_V3);
} else {
containerData.setSchemaVersion(originalContainerData.getSchemaVersion());
}

//rewriting the yaml file with new checksum calculation.
update(originalContainerData.getMetadata(), true);

if (containerData.hasSchema(OzoneConsts.SCHEMA_V3)) {
if (containerData.sharedDB()) {
// load metadata from received dump files before we try to parse kv
BlockUtils.loadKVContainerDataFromFiles(containerData, config);
}
Expand Down Expand Up @@ -702,7 +720,7 @@ public void exportContainerData(OutputStream destination,
}

try {
if (!containerData.hasSchema(OzoneConsts.SCHEMA_V3)) {
if (!containerData.sharedDB()) {
compactDB();
// Close DB (and remove from cache) to avoid concurrent modification
// while packing it.
Expand Down Expand Up @@ -1000,7 +1018,7 @@ private File createTempFile(File file) throws IOException {
private void packContainerToDestination(OutputStream destination,
ContainerPacker<KeyValueContainerData> packer)
throws IOException {
if (containerData.hasSchema(OzoneConsts.SCHEMA_V3)) {
if (containerData.sharedDB()) {
// Synchronize the dump and pack operation,
// so concurrent exports don't get dump files overwritten.
// We seldom got concurrent exports for a container,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,6 @@
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.hdfs.util.Canceler;
import org.apache.hadoop.hdfs.util.DataTransferThrottler;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.common.Checksum;
import org.apache.hadoop.ozone.common.ChecksumData;
import org.apache.hadoop.ozone.container.common.helpers.BlockData;
Expand Down Expand Up @@ -266,7 +265,7 @@ private ScanResult scanData(DataTransferThrottler throttler,
} else {
// If schema V3 and container details not in DB or
// if containerDBPath is removed
if ((onDiskContainerData.hasSchema(OzoneConsts.SCHEMA_V3) &&
if ((onDiskContainerData.sharedDB() &&
db.getStore().getMetadataTable().get(
onDiskContainerData.getBcsIdKey()) == null) ||
!new File(onDiskContainerData.getDbFile()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,8 +31,10 @@
import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V1;
import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V2;
import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V3;
import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V4;
import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_VERSION;
import static org.apache.hadoop.ozone.container.metadata.DatanodeSchemaThreeDBDefinition.getContainerKeyPrefix;
import static org.apache.hadoop.ozone.container.upgrade.VersionedDatanodeFeatures.isFinalized;

import com.fasterxml.jackson.annotation.JsonIgnore;
import com.google.common.base.Preconditions;
Expand All @@ -46,7 +48,9 @@
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
.ContainerDataProto;
import org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature;
import org.apache.hadoop.hdds.utils.MetadataKeyFilters.KeyPrefixFilter;
import org.apache.hadoop.hdds.utils.db.BatchOperation;
import org.apache.hadoop.hdds.utils.db.Table;
Expand All @@ -68,6 +72,8 @@ public class KeyValueContainerData extends ContainerData {

// Fields need to be stored in .container file.
private static final List<String> KV_YAML_FIELDS;
// Fields need to be stored in .container file for Schema V4;
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
// Fields need to be stored in .container file for Schema V4;
// Fields need to be stored in .container file for Schema V4;

private static final List<String> KV_YAML_FIELDS_SCHEMA_V4;

// Path to Container metadata Level DB/RocksDB Store and .container file.
private String metadataPath;
Expand Down Expand Up @@ -98,6 +104,11 @@ public class KeyValueContainerData extends ContainerData {
KV_YAML_FIELDS.add(CHUNKS_PATH);
KV_YAML_FIELDS.add(CONTAINER_DB_TYPE);
KV_YAML_FIELDS.add(SCHEMA_VERSION);

KV_YAML_FIELDS_SCHEMA_V4 = Lists.newArrayList();
KV_YAML_FIELDS_SCHEMA_V4.addAll(YAML_FIELDS);
KV_YAML_FIELDS_SCHEMA_V4.add(CONTAINER_DB_TYPE);
KV_YAML_FIELDS_SCHEMA_V4.add(SCHEMA_VERSION);
}

/**
Expand Down Expand Up @@ -150,7 +161,7 @@ public String getSchemaVersion() {
* @throws UnsupportedOperationException If no valid schema version is found.
*/
public String getSupportedSchemaVersionOrDefault() {
String[] versions = {SCHEMA_V1, SCHEMA_V2, SCHEMA_V3};
String[] versions = {SCHEMA_V1, SCHEMA_V2, SCHEMA_V3, SCHEMA_V4};

for (String version : versions) {
if (this.hasSchema(version)) {
Expand Down Expand Up @@ -336,7 +347,10 @@ public ContainerDataProto getProtoBufMessage() {
}

public static List<String> getYamlFields() {
return Collections.unmodifiableList(KV_YAML_FIELDS);
List<String> list = isFinalized(HDDSLayoutFeature.DATANODE_SCHEMA_V4)
? KV_YAML_FIELDS_SCHEMA_V4
: KV_YAML_FIELDS;
return Collections.unmodifiableList(list);
}

/**
Expand Down Expand Up @@ -426,7 +440,7 @@ public KeyPrefixFilter getDeletingBlockKeyFilter() {
* for other schemas just return null.
*/
public String startKeyEmpty() {
if (hasSchema(SCHEMA_V3)) {
if (sharedDB()) {
return getContainerKeyPrefix(getContainerID());
}
return null;
Expand All @@ -437,7 +451,7 @@ public String startKeyEmpty() {
* for other schemas just return null.
*/
public String containerPrefix() {
if (hasSchema(SCHEMA_V3)) {
if (sharedDB()) {
return getContainerKeyPrefix(getContainerID());
}
return "";
Expand All @@ -451,7 +465,7 @@ public String containerPrefix() {
* @return formatted key
*/
private String formatKey(String key) {
if (hasSchema(SCHEMA_V3)) {
if (sharedDB()) {
key = getContainerKeyPrefix(getContainerID()) + key;
}
return key;
Expand All @@ -461,4 +475,17 @@ public boolean hasSchema(String version) {
return KeyValueContainerUtil.isSameSchemaVersion(schemaVersion, version);
}

public boolean sharedDB() {
return KeyValueContainerUtil.isSameSchemaVersion(schemaVersion, SCHEMA_V3) ||
KeyValueContainerUtil.isSameSchemaVersion(schemaVersion, SCHEMA_V4);
}

/**
* Whether this container's schema version is lower than @param version.
*/
public boolean olderSchemaThan(String version) {
String target = version != null ? version : SCHEMA_V1;
String self = schemaVersion != null ? schemaVersion : SCHEMA_V1;
return Integer.parseInt(self) < Integer.parseInt(target);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
package org.apache.hadoop.ozone.container.keyvalue;

import static org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil.isSameSchemaVersion;
import static org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil.isSharedDBVersion;

import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.node.ArrayNode;
Expand Down Expand Up @@ -292,7 +293,7 @@ static ObjectNode getAggregateValues(DatanodeStore store,
(DatanodeStoreSchemaTwoImpl) store;
pendingDelete =
countPendingDeletesSchemaV2(schemaTwoStore, containerData);
} else if (isSameSchemaVersion(schemaVersion, OzoneConsts.SCHEMA_V3)) {
} else if (isSharedDBVersion(schemaVersion)) {
DatanodeStoreSchemaThreeImpl schemaThreeStore =
(DatanodeStoreSchemaThreeImpl) store;
pendingDelete =
Expand Down
Loading