diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/upgrade/HDDSLayoutFeature.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/upgrade/HDDSLayoutFeature.java index 02e68515f389..7b0689214c8a 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/upgrade/HDDSLayoutFeature.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/upgrade/HDDSLayoutFeature.java @@ -41,7 +41,8 @@ public enum HDDSLayoutFeature implements LayoutFeature { HADOOP_PRC_PORTS_IN_DATANODEDETAILS(7, "Adding Hadoop RPC ports " + "to DatanodeDetails."), HBASE_SUPPORT(8, "Datanode RocksDB Schema Version 3 has an extra table " + - "for the last chunk of blocks to support HBase.)"); + "for the last chunk of blocks to support HBase.)"), + DATANODE_SCHEMA_V4(9, "Container yaml file doesn't require chunksPath and metadataPath"); ////////////////////////////// ////////////////////////////// diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java index d03cc2a22fe5..949a964bd85c 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java @@ -243,6 +243,9 @@ private OzoneConsts() { // V3: Column families definitions are close to V2, // but have containerID as key prefixes. public static final String SCHEMA_V3 = "3"; + // V4: Column families is same as V3, + // removed chunkPath and metadataPath in .container file + public static final String SCHEMA_V4 = "4"; // Supported store types. public static final String OZONE = "ozone"; diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java index 6277a3584c6b..a13a58dd8537 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java @@ -316,6 +316,7 @@ private static Builder getContainerCommandRequestBuilder(long containerID, request.setCreateContainer( ContainerProtos.CreateContainerRequestProto.getDefaultInstance().toBuilder().setState(state).build()); request.setDatanodeUuid(pipeline.getFirstNode().getUuidString()); + request.setPipelineID(pipeline.getId().getId().toString()); return request; } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java index bbc012d3cb98..bbe6bc694165 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java @@ -201,7 +201,7 @@ public static void verifyChecksum(ContainerData containerData, String storedChecksum = containerData.getChecksum(); Yaml yaml = ContainerDataYaml.getYamlForContainerType( - containerData.getContainerType(), + containerData.getContainerType(), containerData, containerData instanceof KeyValueContainerData && ((KeyValueContainerData)containerData).getReplicaIndex() > 0); containerData.computeAndSetChecksum(yaml); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerDataYaml.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerDataYaml.java index ac57be2e2638..779852c32e65 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerDataYaml.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerDataYaml.java @@ -17,7 +17,12 @@ package org.apache.hadoop.ozone.container.common.impl; +import static org.apache.hadoop.ozone.OzoneConsts.CHUNKS_PATH; +import static org.apache.hadoop.ozone.OzoneConsts.METADATA_PATH; +import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; import static org.apache.hadoop.ozone.OzoneConsts.REPLICA_INDEX; +import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V4; +import static org.apache.hadoop.ozone.OzoneConsts.STORAGE_DIR_CHUNKS; import static org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData.KEYVALUE_YAML_TAG; import com.google.common.base.Preconditions; @@ -88,7 +93,7 @@ public static void createContainerFile(ContainerType containerType, ((KeyValueContainerData) containerData).getReplicaIndex() > 0; // Create Yaml for given container type - Yaml yaml = getYamlForContainerType(containerType, withReplicaIndex); + Yaml yaml = getYamlForContainerType(containerType, containerData, withReplicaIndex); // Compute Checksum and update ContainerData containerData.computeAndSetChecksum(yaml); @@ -122,9 +127,16 @@ public static ContainerData readContainerFile(File containerFile) throws IOException { Preconditions.checkNotNull(containerFile, "containerFile cannot be null"); try (FileInputStream inputFileStream = new FileInputStream(containerFile)) { - return readContainer(inputFileStream); + KeyValueContainerData containerData = (KeyValueContainerData) readContainer(inputFileStream); + if (containerData.getChunksPath() == null) { + containerData.setChunksPath(containerFile.getParentFile().getParentFile().getAbsolutePath() + .concat(OZONE_URI_DELIMITER).concat(STORAGE_DIR_CHUNKS)); + } + if (containerData.getMetadataPath() == null) { + containerData.setMetadataPath(containerFile.getParentFile().getAbsolutePath()); + } + return containerData; } - } /** @@ -183,11 +195,12 @@ public static ContainerData readContainer(InputStream input) * the container properties. * * @param containerType type of container + * @parm ContainerData container data * @param withReplicaIndex in the container yaml * @return Yamal representation of container properties * @throws StorageContainerException if the type is unrecognized */ - public static Yaml getYamlForContainerType(ContainerType containerType, + public static Yaml getYamlForContainerType(ContainerType containerType, ContainerData containerData, boolean withReplicaIndex) throws StorageContainerException { PropertyUtils propertyUtils = new PropertyUtils(); @@ -201,6 +214,11 @@ public static Yaml getYamlForContainerType(ContainerType containerType, yamlFields = new ArrayList<>(yamlFields); yamlFields.add(REPLICA_INDEX); } + if (((KeyValueContainerData)containerData).olderSchemaThan(SCHEMA_V4)) { + yamlFields = new ArrayList<>(yamlFields); + yamlFields.add(METADATA_PATH); + yamlFields.add(CHUNKS_PATH); + } Representer representer = new ContainerDataRepresenter(yamlFields); representer.setPropertyUtils(propertyUtils); representer.addClassTag( @@ -299,9 +317,12 @@ public Object construct(Node node) { kvData.setContainerDBType((String)nodes.get( OzoneConsts.CONTAINER_DB_TYPE)); - kvData.setMetadataPath((String) nodes.get( - OzoneConsts.METADATA_PATH)); - kvData.setChunksPath((String) nodes.get(OzoneConsts.CHUNKS_PATH)); + String schemaVersion = (String) nodes.get(OzoneConsts.SCHEMA_VERSION); + kvData.setSchemaVersion(schemaVersion); + if (kvData.olderSchemaThan(SCHEMA_V4)) { + kvData.setMetadataPath((String) nodes.get(OzoneConsts.METADATA_PATH)); + kvData.setChunksPath((String) nodes.get(OzoneConsts.CHUNKS_PATH)); + } Map meta = (Map) nodes.get(OzoneConsts.METADATA); kvData.setMetadata(meta); kvData.setChecksum((String) nodes.get(OzoneConsts.CHECKSUM)); @@ -310,8 +331,6 @@ public Object construct(Node node) { String state = (String) nodes.get(OzoneConsts.STATE); kvData .setState(ContainerProtos.ContainerDataProto.State.valueOf(state)); - String schemaVersion = (String) nodes.get(OzoneConsts.SCHEMA_VERSION); - kvData.setSchemaVersion(schemaVersion); final Object replicaIndex = nodes.get(REPLICA_INDEX); if (replicaIndex != null) { kvData.setReplicaIndex( diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java index 80c078c5087d..774305092add 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java @@ -21,6 +21,7 @@ import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V1; import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V2; import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V3; +import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V4; import com.google.common.annotations.VisibleForTesting; import com.google.common.util.concurrent.ThreadFactoryBuilder; @@ -105,6 +106,7 @@ public DeleteBlocksCommandHandler(OzoneContainer container, schemaHandlers.put(SCHEMA_V1, this::markBlocksForDeletionSchemaV1); schemaHandlers.put(SCHEMA_V2, this::markBlocksForDeletionSchemaV2); schemaHandlers.put(SCHEMA_V3, this::markBlocksForDeletionSchemaV3); + schemaHandlers.put(SCHEMA_V4, this::markBlocksForDeletionSchemaV3); ThreadFactory threadFactory = new ThreadFactoryBuilder() .setNameFormat(threadNamePrefix + diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java index c5af0c7d9ed2..c538a23b3db3 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java @@ -27,7 +27,10 @@ import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.INVALID_CONTAINER_STATE; import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.IO_EXCEPTION; import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.UNSUPPORTED_REQUEST; +import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V3; +import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V4; import static org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil.onFailure; +import static org.apache.hadoop.ozone.container.upgrade.VersionedDatanodeFeatures.isFinalized; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; @@ -57,6 +60,7 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerType; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto; import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; +import org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature; import org.apache.hadoop.hdfs.util.Canceler; import org.apache.hadoop.hdfs.util.DataTransferThrottler; import org.apache.hadoop.io.nativeio.NativeIO; @@ -176,7 +180,7 @@ public void create(VolumeSet volumeSet, VolumeChoosingPolicy containerVolume, clusterId); // Set schemaVersion before the dbFile since we have to // choose the dbFile location based on schema version. - String schemaVersion = VersionedDatanodeFeatures.SchemaV3 + String schemaVersion = VersionedDatanodeFeatures.SchemaV4 .chooseSchemaVersion(config); containerData.setSchemaVersion(schemaVersion); @@ -294,6 +298,10 @@ private void writeToContainerFile(File containerFile, boolean isCreate) long containerId = containerData.getContainerID(); try { tempContainerFile = createTempFile(containerFile); + if (containerData.hasSchema(SCHEMA_V3) && isFinalized(HDDSLayoutFeature.DATANODE_SCHEMA_V4)) { + // convert container from V3 to V4 on yaml file update + containerData.setSchemaVersion(SCHEMA_V4); + } ContainerDataYaml.createContainerFile( ContainerType.KeyValueContainer, containerData, tempContainerFile); @@ -646,7 +654,7 @@ public void importContainerData(InputStream input, // delete all other temporary data in case of any exception. try { - if (containerData.hasSchema(OzoneConsts.SCHEMA_V3)) { + if (containerData.sharedDB()) { BlockUtils.removeContainerFromDB(containerData, config); } FileUtils.deleteDirectory(new File(containerData.getMetadataPath())); @@ -669,12 +677,22 @@ public void importContainerData(KeyValueContainerData originalContainerData) containerData.setState(originalContainerData.getState()); containerData .setContainerDBType(originalContainerData.getContainerDBType()); - containerData.setSchemaVersion(originalContainerData.getSchemaVersion()); + if (VersionedDatanodeFeatures.SchemaV4.isFinalizedAndEnabled(config) && + originalContainerData.hasSchema(SCHEMA_V3)) { + // migrate V3 to V4 on container import + containerData.setSchemaVersion(SCHEMA_V4); + } else if (!VersionedDatanodeFeatures.SchemaV4.isFinalizedAndEnabled(config) && + originalContainerData.hasSchema(SCHEMA_V4)) { + // if V4 is not finalized, covert V4 back to V3 on container import + containerData.setSchemaVersion(SCHEMA_V3); + } else { + containerData.setSchemaVersion(originalContainerData.getSchemaVersion()); + } //rewriting the yaml file with new checksum calculation. update(originalContainerData.getMetadata(), true); - if (containerData.hasSchema(OzoneConsts.SCHEMA_V3)) { + if (containerData.sharedDB()) { // load metadata from received dump files before we try to parse kv BlockUtils.loadKVContainerDataFromFiles(containerData, config); } @@ -702,7 +720,7 @@ public void exportContainerData(OutputStream destination, } try { - if (!containerData.hasSchema(OzoneConsts.SCHEMA_V3)) { + if (!containerData.sharedDB()) { compactDB(); // Close DB (and remove from cache) to avoid concurrent modification // while packing it. @@ -1000,7 +1018,7 @@ private File createTempFile(File file) throws IOException { private void packContainerToDestination(OutputStream destination, ContainerPacker packer) throws IOException { - if (containerData.hasSchema(OzoneConsts.SCHEMA_V3)) { + if (containerData.sharedDB()) { // Synchronize the dump and pack operation, // so concurrent exports don't get dump files overwritten. // We seldom got concurrent exports for a container, diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java index 7b392896b5f2..fa9d61221021 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java @@ -31,7 +31,6 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdfs.util.Canceler; import org.apache.hadoop.hdfs.util.DataTransferThrottler; -import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.common.Checksum; import org.apache.hadoop.ozone.common.ChecksumData; import org.apache.hadoop.ozone.container.common.helpers.BlockData; @@ -266,7 +265,7 @@ private ScanResult scanData(DataTransferThrottler throttler, } else { // If schema V3 and container details not in DB or // if containerDBPath is removed - if ((onDiskContainerData.hasSchema(OzoneConsts.SCHEMA_V3) && + if ((onDiskContainerData.sharedDB() && db.getStore().getMetadataTable().get( onDiskContainerData.getBcsIdKey()) == null) || !new File(onDiskContainerData.getDbFile() diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java index f0e350c2fb31..e91b0bd84197 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java @@ -31,8 +31,10 @@ import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V1; import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V2; import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V3; +import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V4; import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_VERSION; import static org.apache.hadoop.ozone.container.metadata.DatanodeSchemaThreeDBDefinition.getContainerKeyPrefix; +import static org.apache.hadoop.ozone.container.upgrade.VersionedDatanodeFeatures.isFinalized; import com.fasterxml.jackson.annotation.JsonIgnore; import com.google.common.base.Preconditions; @@ -46,7 +48,9 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicLong; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos + .ContainerDataProto; +import org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature; import org.apache.hadoop.hdds.utils.MetadataKeyFilters.KeyPrefixFilter; import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.hdds.utils.db.Table; @@ -68,6 +72,8 @@ public class KeyValueContainerData extends ContainerData { // Fields need to be stored in .container file. private static final List KV_YAML_FIELDS; + // Fields need to be stored in .container file for Schema V4; + private static final List KV_YAML_FIELDS_SCHEMA_V4; // Path to Container metadata Level DB/RocksDB Store and .container file. private String metadataPath; @@ -98,6 +104,11 @@ public class KeyValueContainerData extends ContainerData { KV_YAML_FIELDS.add(CHUNKS_PATH); KV_YAML_FIELDS.add(CONTAINER_DB_TYPE); KV_YAML_FIELDS.add(SCHEMA_VERSION); + + KV_YAML_FIELDS_SCHEMA_V4 = Lists.newArrayList(); + KV_YAML_FIELDS_SCHEMA_V4.addAll(YAML_FIELDS); + KV_YAML_FIELDS_SCHEMA_V4.add(CONTAINER_DB_TYPE); + KV_YAML_FIELDS_SCHEMA_V4.add(SCHEMA_VERSION); } /** @@ -150,7 +161,7 @@ public String getSchemaVersion() { * @throws UnsupportedOperationException If no valid schema version is found. */ public String getSupportedSchemaVersionOrDefault() { - String[] versions = {SCHEMA_V1, SCHEMA_V2, SCHEMA_V3}; + String[] versions = {SCHEMA_V1, SCHEMA_V2, SCHEMA_V3, SCHEMA_V4}; for (String version : versions) { if (this.hasSchema(version)) { @@ -336,7 +347,10 @@ public ContainerDataProto getProtoBufMessage() { } public static List getYamlFields() { - return Collections.unmodifiableList(KV_YAML_FIELDS); + List list = isFinalized(HDDSLayoutFeature.DATANODE_SCHEMA_V4) + ? KV_YAML_FIELDS_SCHEMA_V4 + : KV_YAML_FIELDS; + return Collections.unmodifiableList(list); } /** @@ -426,7 +440,7 @@ public KeyPrefixFilter getDeletingBlockKeyFilter() { * for other schemas just return null. */ public String startKeyEmpty() { - if (hasSchema(SCHEMA_V3)) { + if (sharedDB()) { return getContainerKeyPrefix(getContainerID()); } return null; @@ -437,7 +451,7 @@ public String startKeyEmpty() { * for other schemas just return null. */ public String containerPrefix() { - if (hasSchema(SCHEMA_V3)) { + if (sharedDB()) { return getContainerKeyPrefix(getContainerID()); } return ""; @@ -451,7 +465,7 @@ public String containerPrefix() { * @return formatted key */ private String formatKey(String key) { - if (hasSchema(SCHEMA_V3)) { + if (sharedDB()) { key = getContainerKeyPrefix(getContainerID()) + key; } return key; @@ -461,4 +475,17 @@ public boolean hasSchema(String version) { return KeyValueContainerUtil.isSameSchemaVersion(schemaVersion, version); } + public boolean sharedDB() { + return KeyValueContainerUtil.isSameSchemaVersion(schemaVersion, SCHEMA_V3) || + KeyValueContainerUtil.isSameSchemaVersion(schemaVersion, SCHEMA_V4); + } + + /** + * Whether this container's schema version is lower than @param version. + */ + public boolean olderSchemaThan(String version) { + String target = version != null ? version : SCHEMA_V1; + String self = schemaVersion != null ? schemaVersion : SCHEMA_V1; + return Integer.parseInt(self) < Integer.parseInt(target); + } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerMetadataInspector.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerMetadataInspector.java index a4ebabe3c31f..546d412011c5 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerMetadataInspector.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerMetadataInspector.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.container.keyvalue; import static org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil.isSameSchemaVersion; +import static org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil.isSharedDBVersion; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.ArrayNode; @@ -292,7 +293,7 @@ static ObjectNode getAggregateValues(DatanodeStore store, (DatanodeStoreSchemaTwoImpl) store; pendingDelete = countPendingDeletesSchemaV2(schemaTwoStore, containerData); - } else if (isSameSchemaVersion(schemaVersion, OzoneConsts.SCHEMA_V3)) { + } else if (isSharedDBVersion(schemaVersion)) { DatanodeStoreSchemaThreeImpl schemaThreeStore = (DatanodeStoreSchemaThreeImpl) store; pendingDelete = diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/TarContainerPacker.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/TarContainerPacker.java index 5d3c001eaf73..f9ce3ef1aff1 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/TarContainerPacker.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/TarContainerPacker.java @@ -19,7 +19,6 @@ import static java.util.stream.Collectors.toList; import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.CONTAINER_ALREADY_EXISTS; -import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V3; import com.google.common.annotations.VisibleForTesting; import java.io.BufferedOutputStream; @@ -185,7 +184,7 @@ public byte[] unpackContainerDescriptor(InputStream input) } public static Path getDbPath(KeyValueContainerData containerData) { - if (containerData.hasSchema(SCHEMA_V3)) { + if (containerData.sharedDB()) { return DatanodeStoreSchemaThreeImpl.getDumpDir( new File(containerData.getMetadataPath())).toPath(); } else { @@ -203,7 +202,7 @@ public static Path getDbPath(Path baseDir, Path dbPath = Paths.get(containerData.getDbFile().getPath()); Path relativePath = containerPath.relativize(dbPath); - if (containerData.hasSchema(SCHEMA_V3)) { + if (containerData.sharedDB()) { Path metadataDir = KeyValueContainerLocationUtil.getContainerMetaDataPath( baseDir.toString()).toPath(); return DatanodeStoreSchemaThreeImpl.getDumpDir(metadataDir.toFile()) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java index 730689539f94..be8bd2cb3833 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java @@ -25,6 +25,7 @@ import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.UNKNOWN_BCSID; import static org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil.onFailure; import static org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil.isSameSchemaVersion; +import static org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil.isSharedDBVersion; import com.google.common.base.Preconditions; import java.io.File; @@ -79,7 +80,7 @@ public static DatanodeStore getUncachedDatanodeStore( store = new DatanodeStoreSchemaOneImpl(conf, containerDBPath, readOnly); } else if (isSameSchemaVersion(schemaVersion, OzoneConsts.SCHEMA_V2)) { store = new DatanodeStoreSchemaTwoImpl(conf, containerDBPath, readOnly); - } else if (isSameSchemaVersion(schemaVersion, OzoneConsts.SCHEMA_V3)) { + } else if (isSharedDBVersion(schemaVersion)) { store = new DatanodeStoreSchemaThreeImpl(conf, containerDBPath, readOnly); } else { @@ -124,7 +125,7 @@ public static DBHandle getDB(KeyValueContainerData containerData, String containerDBPath = containerData.getDbFile().getAbsolutePath(); try { - if (containerData.hasSchema(OzoneConsts.SCHEMA_V3)) { + if (containerData.sharedDB()) { DatanodeStoreCache cache = DatanodeStoreCache.getInstance(); Preconditions.checkNotNull(cache); return cache.getDB(containerDBPath, conf); @@ -154,6 +155,7 @@ public static void removeDB(KeyValueContainerData container, Preconditions.checkNotNull(container); Preconditions.checkNotNull(container.getDbFile()); Preconditions.checkState(!container.hasSchema(OzoneConsts.SCHEMA_V3)); + Preconditions.checkState(!container.hasSchema(OzoneConsts.SCHEMA_V4)); ContainerCache cache = ContainerCache.getInstance(conf); Preconditions.checkNotNull(cache); @@ -180,7 +182,7 @@ public static void shutdownCache(ConfigurationSource config) { */ public static void addDB(DatanodeStore store, String containerDBPath, ConfigurationSource conf, String schemaVersion) { - if (isSameSchemaVersion(schemaVersion, OzoneConsts.SCHEMA_V3)) { + if (isSharedDBVersion(schemaVersion)) { DatanodeStoreCache cache = DatanodeStoreCache.getInstance(); Preconditions.checkNotNull(cache); cache.addDB(containerDBPath, new RawDB(store, containerDBPath)); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerLocationUtil.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerLocationUtil.java index d2882780a794..c978629cb959 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerLocationUtil.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerLocationUtil.java @@ -122,7 +122,7 @@ private static String getContainerSubDirectory(long containerId) { * Return containerDB File. */ public static File getContainerDBFile(KeyValueContainerData containerData) { - if (containerData.hasSchema(OzoneConsts.SCHEMA_V3)) { + if (containerData.sharedDB()) { Preconditions.checkNotNull(containerData.getVolume().getDbParentDir(), "Base Directory cannot be null"); return new File(containerData.getVolume().getDbParentDir(), OzoneConsts.CONTAINER_DB_NAME); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java index ab87875dbdc3..904dc7be6d17 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java @@ -18,6 +18,8 @@ package org.apache.hadoop.ozone.container.keyvalue.helpers; import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V1; +import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V3; +import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V4; import com.google.common.base.Preconditions; import java.io.File; @@ -104,8 +106,8 @@ public static void createContainerMetaData( } else if (isSameSchemaVersion(schemaVersion, OzoneConsts.SCHEMA_V2)) { store = new DatanodeStoreSchemaTwoImpl(conf, dbFile.getAbsolutePath(), false); - } else if (isSameSchemaVersion(schemaVersion, OzoneConsts.SCHEMA_V3)) { - // We don't create per-container store for schema v3 containers, + } else if (isSharedDBVersion(schemaVersion)) { + // We don't create per-container store for schema v3/v4 containers, // they should use per-volume db store. return; } else { @@ -142,7 +144,7 @@ public static void removeContainer( public static void removeContainerDB( KeyValueContainerData containerData, ConfigurationSource conf) throws IOException { - if (containerData.hasSchema(OzoneConsts.SCHEMA_V3)) { + if (containerData.sharedDB()) { // DB failure is catastrophic, the disk needs to be replaced. // In case of an exception, LOG the message and rethrow the exception. try { @@ -230,7 +232,7 @@ public static void parseKVContainerData(KeyValueContainerData kvContainerData, config.getObject(DatanodeConfiguration.class); boolean bCheckChunksFilePath = dnConf.getCheckEmptyContainerDir(); - if (kvContainerData.hasSchema(OzoneConsts.SCHEMA_V3)) { + if (kvContainerData.sharedDB()) { try (DBHandle db = BlockUtils.getDB(kvContainerData, config)) { populateContainerMetadata(kvContainerData, db.getStore(), bCheckChunksFilePath); @@ -434,6 +436,12 @@ public static boolean isSameSchemaVersion(String schema, String other) { return effective1.equals(effective2); } + public static boolean isSharedDBVersion(String schema) { + String effective = schema != null ? schema : SCHEMA_V1; + return effective.equals(SCHEMA_V3) || effective.equals(SCHEMA_V4); + } + + /** * Moves container directory to a new location * under "volume/hdds/cluster-id/tmp/deleted-containers" diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingTask.java index 38fe872f30e4..4ca4c093d56d 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingTask.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingTask.java @@ -19,7 +19,6 @@ import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V1; import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V2; -import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V3; import java.io.File; import java.io.IOException; @@ -147,7 +146,7 @@ private ContainerBackgroundTaskResult handleDeleteTask() throws Exception { crr = deleteViaSchema1(meta, container, dataDir, startTime); } else if (containerData.hasSchema(SCHEMA_V2)) { crr = deleteViaSchema2(meta, container, dataDir, startTime); - } else if (containerData.hasSchema(SCHEMA_V3)) { + } else if (containerData.sharedDB()) { crr = deleteViaSchema3(meta, container, dataDir, startTime); } else { throw new UnsupportedOperationException( diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/VersionedDatanodeFeatures.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/VersionedDatanodeFeatures.java index ffd038afdcc2..8f6a15bfd1ce 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/VersionedDatanodeFeatures.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/VersionedDatanodeFeatures.java @@ -148,4 +148,26 @@ public static boolean isFinalizedAndEnabled(ConfigurationSource conf) { return false; } } + + /** + * Utilities for container Schema V4 layout feature. + * Compared to Schema V3, this schema doesn't save chunksPath and metadataPath into container yaml file. + */ + public static class SchemaV4 { + public static String chooseSchemaVersion(ConfigurationSource conf) { + if (isFinalizedAndEnabled(conf)) { + return OzoneConsts.SCHEMA_V4; + } else { + return SchemaV3.chooseSchemaVersion(conf); + } + } + + public static boolean isFinalizedAndEnabled(ConfigurationSource conf) { + DatanodeConfiguration dcf = conf.getObject(DatanodeConfiguration.class); + if (isFinalized(HDDSLayoutFeature.DATANODE_SCHEMA_V4) && dcf.getContainerSchemaV3Enabled()) { + return true; + } + return false; + } + } } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java index 34a5553f311c..abf761ee7a4c 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java @@ -22,13 +22,13 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V1; import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V2; -import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V3; import static org.apache.hadoop.ozone.container.common.ContainerTestUtils.COMMIT_STAGE; import static org.apache.hadoop.ozone.container.common.ContainerTestUtils.WRITE_STAGE; import static org.apache.hadoop.ozone.container.common.ContainerTestUtils.createDbInstancesForTestIfNeeded; import static org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion.FILE_PER_BLOCK; import static org.apache.hadoop.ozone.container.common.states.endpoint.VersionEndpointTask.LOG; import static org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil.isSameSchemaVersion; +import static org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil.isSharedDBVersion; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; @@ -189,7 +189,7 @@ private KeyValueContainerData createToDeleteBlocks(ContainerSet containerSet, createPendingDeleteBlocksSchema1(numOfBlocksPerContainer, data, containerID, numOfChunksPerBlock, buffer, chunkManager, container); } else if (isSameSchemaVersion(schemaVersion, SCHEMA_V2) - || isSameSchemaVersion(schemaVersion, SCHEMA_V3)) { + || isSharedDBVersion(schemaVersion)) { createPendingDeleteBlocksViaTxn(numOfBlocksPerContainer, txnID, containerID, numOfChunksPerBlock, buffer, chunkManager, container, data); @@ -274,7 +274,7 @@ private void createTxn(KeyValueContainerData data, List containerBlocks, .initBatchOperation()) { DatanodeStore ds = metadata.getStore(); - if (isSameSchemaVersion(schemaVersion, SCHEMA_V3)) { + if (isSharedDBVersion(schemaVersion)) { DatanodeStoreSchemaThreeImpl dnStoreThreeImpl = (DatanodeStoreSchemaThreeImpl) ds; dnStoreThreeImpl.getDeleteTransactionTable() @@ -381,7 +381,7 @@ private int getUnderDeletionBlocksCount(DBHandle meta, } } return pendingBlocks; - } else if (data.hasSchema(SCHEMA_V3)) { + } else if (data.sharedDB()) { int pendingBlocks = 0; DatanodeStore ds = meta.getStore(); DatanodeStoreSchemaThreeImpl dnStoreThreeImpl = @@ -983,7 +983,7 @@ public void testContainerMaxLockHoldingTime( (containerData.get(0).getBytesUsed() == 0), 100, 3000); if (schemaVersion != null && ( - schemaVersion.equals(SCHEMA_V2) || schemaVersion.equals(SCHEMA_V3))) { + schemaVersion.equals(SCHEMA_V2) || isSharedDBVersion(schemaVersion))) { // Since MaxLockHoldingTime is -1, every "deletion transaction" triggers // a timeout except the last one, where a "deletion transaction" diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaOneBackwardsCompatibility.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaOneBackwardsCompatibility.java index aa3ec32280fc..ad0867a90b6f 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaOneBackwardsCompatibility.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaOneBackwardsCompatibility.java @@ -605,7 +605,7 @@ private KeyValueContainerData newKvData() throws IOException { // Changing the paths above affects the checksum, so it was also removed // from the container file and calculated at run time. Yaml yaml = ContainerDataYaml.getYamlForContainerType( - kvData.getContainerType(), + kvData.getContainerType(), kvData, kvData.getReplicaIndex() > 0); kvData.computeAndSetChecksum(yaml); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java index db6ca37fa652..42082962fa76 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java @@ -23,6 +23,7 @@ import static org.apache.hadoop.ozone.container.ContainerTestHelper.getData; import static org.apache.hadoop.ozone.container.ContainerTestHelper.setDataChecksum; import static org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil.isSameSchemaVersion; +import static org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil.isSharedDBVersion; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; @@ -258,7 +259,7 @@ public void testAddingBlockToDeletedContainer( initSchemaAndVersionInfo(versionInfo); // With schema v3, we don't have a container dedicated db, // so skip check the behaviors related to it. - assumeFalse(isSameSchemaVersion(schemaVersion, OzoneConsts.SCHEMA_V3)); + assumeFalse(isSharedDBVersion(schemaVersion)); long testContainerID = getTestContainerID(); Thread.sleep(100); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteBlocksCommandHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteBlocksCommandHandler.java index 5b9a5abe0d85..f8ef7b70b6a4 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteBlocksCommandHandler.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteBlocksCommandHandler.java @@ -21,6 +21,7 @@ import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V1; import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V2; import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V3; +import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V4; import static org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration.BLOCK_DELETE_COMMAND_WORKER_INTERVAL; import static org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration.BLOCK_DELETE_COMMAND_WORKER_INTERVAL_DEFAULT; import static org.apache.hadoop.ozone.container.common.statemachine.commandhandler.DeleteBlocksCommandHandler.DeleteBlockTransactionExecutionResult; @@ -135,10 +136,12 @@ private void setup() throws Exception { TestSchemaHandler testSchemaHandler1 = spy(new TestSchemaHandler()); TestSchemaHandler testSchemaHandler2 = spy(new TestSchemaHandler()); TestSchemaHandler testSchemaHandler3 = spy(new TestSchemaHandler()); + TestSchemaHandler testSchemaHandler4 = spy(new TestSchemaHandler()); handler.getSchemaHandlers().put(SCHEMA_V1, testSchemaHandler1); handler.getSchemaHandlers().put(SCHEMA_V2, testSchemaHandler2); handler.getSchemaHandlers().put(SCHEMA_V3, testSchemaHandler3); + handler.getSchemaHandlers().put(SCHEMA_V4, testSchemaHandler4); } @AfterEach diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/ContainerTestVersionInfo.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/ContainerTestVersionInfo.java index bb336482c3dc..b96c914dc1ee 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/ContainerTestVersionInfo.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/ContainerTestVersionInfo.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.container.keyvalue; -import static org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil.isSameSchemaVersion; +import static org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil.isSharedDBVersion; import java.lang.annotation.ElementType; import java.lang.annotation.Retention; @@ -55,6 +55,7 @@ public class ContainerTestVersionInfo { OzoneConsts.SCHEMA_V1, OzoneConsts.SCHEMA_V2, OzoneConsts.SCHEMA_V3, + OzoneConsts.SCHEMA_V4 }; private final String schemaVersion; @@ -93,7 +94,7 @@ public static List getLayoutList() { } public static void setTestSchemaVersion(String schemaVersion, OzoneConfiguration conf) { - if (isSameSchemaVersion(schemaVersion, OzoneConsts.SCHEMA_V3)) { + if (isSharedDBVersion(schemaVersion)) { ContainerTestUtils.enableSchemaV3(conf); } else { ContainerTestUtils.disableSchemaV3(conf); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java index 083afa4b0560..4e6e0af798ba 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java @@ -19,9 +19,9 @@ import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DB_PROFILE; import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V2; -import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V3; +import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V4; import static org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration.CONTAINER_SCHEMA_V3_ENABLED; -import static org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil.isSameSchemaVersion; +import static org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil.isSharedDBVersion; import static org.apache.hadoop.ozone.container.replication.CopyContainerCompression.NO_COMPRESSION; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -582,7 +582,7 @@ public void testDeleteContainer(ContainerTestVersionInfo versionInfo) assertFalse(keyValueContainer.getContainerFile().exists(), "Container File still exists"); - if (isSameSchemaVersion(schemaVersion, OzoneConsts.SCHEMA_V3)) { + if (isSharedDBVersion(schemaVersion)) { assertTrue(keyValueContainer.getContainerDBFile().exists()); } else { assertFalse(keyValueContainer.getContainerDBFile().exists(), @@ -766,7 +766,7 @@ public void testDBProfileAffectsDBOptions( } // DBOtions should be different, except SCHEMA-V3 - if (isSameSchemaVersion(schemaVersion, OzoneConsts.SCHEMA_V3)) { + if (isSharedDBVersion(schemaVersion)) { assertEquals( outProfile1.getDBOptions().compactionReadaheadSize(), outProfile2.getDBOptions().compactionReadaheadSize()); @@ -810,7 +810,7 @@ public void testKeyValueDataProtoBufMsg(ContainerTestVersionInfo versionInfo) void testAutoCompactionSmallSstFile( ContainerTestVersionInfo versionInfo) throws Exception { init(versionInfo); - assumeTrue(isSameSchemaVersion(schemaVersion, OzoneConsts.SCHEMA_V3)); + assumeTrue(isSharedDBVersion(schemaVersion)); // Create a new HDDS volume String volumeDirPath = Files.createDirectory(folder.toPath().resolve("volumeDir")).toFile() @@ -1059,7 +1059,8 @@ private void testMixedSchemaImport(String dir, // verify container schema if (schemaV3Enabled) { - assertEquals(SCHEMA_V3, + // After HDDS-6611, it's V4 when schemaV3Enabled is true + assertEquals(SCHEMA_V4, container.getContainerData().getSchemaVersion()); } else { assertEquals(SCHEMA_V2, @@ -1092,7 +1093,8 @@ private void testMixedSchemaImport(String dir, importedContainer.importContainerData(fio, packer); } - assertEquals(schemaV3Enabled ? SCHEMA_V3 : SCHEMA_V2, + // After HDDS-6611, it's V4 when schemaV3Enabled is true + assertEquals(schemaV3Enabled ? SCHEMA_V4 : SCHEMA_V2, importedContainer.getContainerData().getSchemaVersion()); assertEquals(pendingDeleteBlockCount, importedContainer.getContainerData().getNumPendingDeletionBlocks()); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java index 87a15d58af7b..3ceda8ded73c 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java @@ -186,6 +186,8 @@ void testKeyValueContainerCheckDeleted(ContainerTestVersionInfo versionInfo) .thenReturn(containerData.getContainerDBType()); when(mockContainerData.getSchemaVersion()) .thenReturn(containerData.getSchemaVersion()); + when(mockContainerData.sharedDB()) + .thenReturn(containerData.sharedDB()); // Mimic the scenario where scanning starts just before // blocks are marked for deletion. diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToSchemaV3.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToSchemaV3.java index c873a4b6f0e5..0881c230eefa 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToSchemaV3.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToSchemaV3.java @@ -47,7 +47,6 @@ import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature; import org.apache.hadoop.ipc.RPC; -import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.container.common.ContainerTestUtils; import org.apache.hadoop.ozone.container.common.DatanodeLayoutStorage; @@ -84,10 +83,6 @@ private void initTests(Boolean enable) throws Exception { conf = new OzoneConfiguration(); conf.setBoolean(DatanodeConfiguration.CONTAINER_SCHEMA_V3_ENABLED, schemaV3Enabled); - conf.setBoolean( - OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED, true); - conf.setBoolean( - OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, true); setup(); } @@ -413,7 +408,8 @@ public void testWriteWithV3Enabled(boolean schemaV3Enabled) throws Exception { public void testWriteWithV3Disabled(boolean schemaV3Enabled) throws Exception { initTests(schemaV3Enabled); - testWrite(true, OzoneConsts.SCHEMA_V3); + // After HDDS-6611, it's V4 + testWrite(true, OzoneConsts.SCHEMA_V4); } public void testWrite(boolean enable, String expectedVersion) diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToSchemaV4.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToSchemaV4.java new file mode 100644 index 000000000000..0d16ea6d9639 --- /dev/null +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToSchemaV4.java @@ -0,0 +1,384 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.container.upgrade; + +import static org.apache.hadoop.ozone.OzoneConsts.CHUNKS_PATH; +import static org.apache.hadoop.ozone.OzoneConsts.METADATA_PATH; +import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V2; +import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V3; +import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V4; +import static org.apache.hadoop.ozone.OzoneConsts.STORAGE_DIR_CHUNKS; +import static org.apache.hadoop.ozone.container.replication.CopyContainerCompression.NO_COMPRESSION; +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.params.provider.Arguments.arguments; + +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.net.InetSocketAddress; +import java.nio.charset.Charset; +import java.nio.file.Files; +import java.nio.file.Paths; +import java.nio.file.attribute.FileTime; +import java.util.Collections; +import java.util.stream.Stream; +import org.apache.commons.io.FileUtils; +import org.apache.commons.lang3.builder.EqualsBuilder; +import org.apache.hadoop.hdds.HddsConfigKeys; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.hdds.scm.pipeline.MockPipeline; +import org.apache.hadoop.hdds.scm.pipeline.Pipeline; +import org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature; +import org.apache.hadoop.ipc.RPC; +import org.apache.hadoop.ozone.container.common.SCMTestUtils; +import org.apache.hadoop.ozone.container.common.ScmTestMock; +import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; +import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml; +import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher; +import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration; +import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine; +import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; +import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; +import org.apache.hadoop.ozone.container.keyvalue.TarContainerPacker; +import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.io.TempDir; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; + +/** + * Tests upgrading a single datanode from container Schema V4. + */ +public class TestDatanodeUpgradeToSchemaV4 { + @TempDir + private File tempFolder; + + private DatanodeStateMachine dsm; + private OzoneConfiguration conf; + private static final String CLUSTER_ID = "clusterID"; + + private RPC.Server scmRpcServer; + private InetSocketAddress address; + + private void initTests(Boolean enable) throws Exception { + boolean schemaV3Enabled = enable; + conf = new OzoneConfiguration(); + conf.setBoolean(DatanodeConfiguration.CONTAINER_SCHEMA_V3_ENABLED, schemaV3Enabled); + setup(); + } + + private void setup() throws Exception { + address = SCMTestUtils.getReuseableAddress(); + conf.setSocketAddr(ScmConfigKeys.OZONE_SCM_NAMES, address); + conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, tempFolder.toString()); + } + + @AfterEach + public void teardown() throws Exception { + if (scmRpcServer != null) { + scmRpcServer.stop(); + } + + if (dsm != null) { + dsm.close(); + } + } + + public static Stream parameters() { + return Stream.of( + arguments(true, false), + arguments(true, true), + arguments(false, false), + arguments(false, true) + ); + } + + /** + * a. new container will be schema V2/V3 before DATANODE_SCHEMA_V4 is finalized, + * depending on whether V3 is enabled or not. + * b. new container will be schema V2/V4 after DATANODE_SCHEMA_V4 is finalized, + * depending on whether V3 is enabled or not. + */ + @ParameterizedTest(name = "schema V3 enabled :{0}, SchemaV4 finalized: {1}") + @MethodSource("parameters") + public void testContainerSchemaV4(boolean schemaV3Enabled, boolean finalize) throws Exception { + initTests(schemaV3Enabled); + // start DN and SCM + ScmTestMock scmTestMock = new ScmTestMock(CLUSTER_ID); + scmRpcServer = SCMTestUtils.startScmRpcServer(conf, scmTestMock, address, 10); + UpgradeTestHelper.addHddsVolume(conf, tempFolder.toPath()); + dsm = UpgradeTestHelper.startPreFinalizedDatanode(conf, tempFolder.toPath(), dsm, address, + HDDSLayoutFeature.HBASE_SUPPORT.layoutVersion()); + ContainerDispatcher dispatcher = dsm.getContainer().getDispatcher(); + dispatcher.setClusterId(CLUSTER_ID); + if (finalize) { + dsm.finalizeUpgrade(); + } + + final Pipeline pipeline = MockPipeline.createPipeline( + Collections.singletonList(dsm.getDatanodeDetails())); + // Create a container to write data. + final long containerID1 = UpgradeTestHelper.addContainer(dispatcher, pipeline); + UpgradeTestHelper.putBlock(dispatcher, containerID1, pipeline); + UpgradeTestHelper.closeContainer(dispatcher, containerID1, pipeline); + KeyValueContainer container = (KeyValueContainer) + dsm.getContainer().getContainerSet().getContainer(containerID1); + File yamlFile = container.getContainerFile(); + String content = + FileUtils.readFileToString(yamlFile, Charset.defaultCharset()); + if (finalize) { + if (schemaV3Enabled) { + assertThat(content).doesNotContain(METADATA_PATH); + assertThat(content).doesNotContain(CHUNKS_PATH); + // V3 is converted to V4 on container yaml file update during container close. + assertTrue(container.getContainerData().getSchemaVersion().equals(SCHEMA_V4)); + } else { + assertThat(content).contains(METADATA_PATH); + assertThat(content).contains(CHUNKS_PATH); + assertTrue(container.getContainerData().getSchemaVersion().equals(SCHEMA_V2)); + } + } else { + assertThat(content).contains(METADATA_PATH); + assertThat(content).contains(CHUNKS_PATH); + if (schemaV3Enabled) { + assertTrue(container.getContainerData().getSchemaVersion().equals(SCHEMA_V3)); + } else { + assertTrue(container.getContainerData().getSchemaVersion().equals(SCHEMA_V2)); + } + } + assertEquals(yamlFile.getParentFile().getParentFile().toPath().resolve(STORAGE_DIR_CHUNKS).toString(), + container.getContainerData().getChunksPath()); + assertEquals(yamlFile.getParentFile().getAbsolutePath(), container.getContainerData().getMetadataPath()); + File containerDir = new File(container.getContainerData().getContainerPath()); + assertTrue(containerDir.exists() && containerDir.isDirectory()); + FileTime creationTime1 = (FileTime) Files.getAttribute(containerDir.toPath(), "creationTime"); + + // export the container + File folderToExport = Files.createFile( + tempFolder.toPath().resolve("export-testContainerSchemaV4.tar")).toFile(); + TarContainerPacker packer = new TarContainerPacker(NO_COMPRESSION); + + //export the container + try (FileOutputStream fos = new FileOutputStream(folderToExport)) { + container.exportContainerData(fos, packer); + } + + //delete the original one + KeyValueContainerUtil.removeContainer(container.getContainerData(), conf); + container.delete(); + assertFalse(new File(container.getContainerData().getContainerPath()).exists()); + if (schemaV3Enabled) { + assertThat(container.getContainerData().getDbFile()).exists(); + } + + //create a new one + KeyValueContainerData oldContainerData = container.getContainerData(); + KeyValueContainerData newContainerData = + new KeyValueContainerData(containerID1, + oldContainerData.getLayoutVersion(), + oldContainerData.getMaxSize(), pipeline.getId().getId().toString(), + dsm.getDatanodeDetails().getUuidString()); + newContainerData.setSchemaVersion(oldContainerData.getSchemaVersion()); + KeyValueContainer newContainer = new KeyValueContainer(newContainerData, conf); + newContainer.populatePathFields(scmTestMock.getClusterId(), oldContainerData.getVolume()); + + // verify yaml file checksum + try (FileInputStream fis = new FileInputStream(folderToExport)) { + byte[] containerDescriptorYaml = packer.unpackContainerDescriptor(fis); + KeyValueContainerData data = (KeyValueContainerData) ContainerDataYaml + .readContainer(containerDescriptorYaml); + ContainerUtils.verifyChecksum(data, conf); + } + + // sleep 1s to make sure creationTime will have different value. + Thread.sleep(1000); + try (FileInputStream fis = new FileInputStream(folderToExport)) { + newContainer.importContainerData(fis, packer); + } + + assertTrue(isContainerEqual(newContainerData, oldContainerData)); + assertThat(new File(newContainerData.getContainerPath())).exists(); + assertThat(new File(newContainerData.getChunksPath())).exists(); + assertThat(new File(newContainerData.getMetadataPath())).exists(); + if (schemaV3Enabled) { + assertThat(newContainerData.getDbFile()).exists(); + assertEquals(newContainerData.getDbFile(), oldContainerData.getDbFile()); + } + yamlFile = newContainer.getContainerFile(); + content = FileUtils.readFileToString(yamlFile, Charset.defaultCharset()); + if (finalize) { + if (schemaV3Enabled) { + assertThat(content).doesNotContain(METADATA_PATH); + assertThat(content).doesNotContain(CHUNKS_PATH); + } else { + assertThat(content).contains(METADATA_PATH); + assertThat(content).contains(CHUNKS_PATH); + } + } else { + assertThat(content).contains(METADATA_PATH); + assertThat(content).contains(CHUNKS_PATH); + } + assertEquals(yamlFile.getParentFile().getParentFile().toPath().resolve(STORAGE_DIR_CHUNKS).toString(), + newContainer.getContainerData().getChunksPath()); + assertEquals(yamlFile.getParentFile().getAbsolutePath(), newContainer.getContainerData().getMetadataPath()); + FileTime creationTime2 = (FileTime) Files.getAttribute( + Paths.get(newContainer.getContainerData().getContainerPath()), "creationTime"); + assertNotEquals(creationTime1.toInstant(), creationTime2.toInstant()); + } + + /** + * Test container created before finalization, still be accessible. + * V3 container will be automatically migrated to V4 if there is any container yaml file update on disk. + */ + @ParameterizedTest(name = "schema V3 enabled :{0}, export container before finalization: {1}") + @MethodSource("parameters") + public void testContainerBeforeFinalization( + boolean schemaV3Enabled, boolean exportBeforeFinalization) throws Exception { + initTests(schemaV3Enabled); + // start DN and SCM + ScmTestMock scmTestMock = new ScmTestMock(CLUSTER_ID); + scmRpcServer = SCMTestUtils.startScmRpcServer(conf, scmTestMock, address, 10); + UpgradeTestHelper.addHddsVolume(conf, tempFolder.toPath()); + dsm = UpgradeTestHelper.startPreFinalizedDatanode(conf, tempFolder.toPath(), dsm, address, + HDDSLayoutFeature.HBASE_SUPPORT.layoutVersion()); + ContainerDispatcher dispatcher = dsm.getContainer().getDispatcher(); + dispatcher.setClusterId(CLUSTER_ID); + + // create container + final Pipeline pipeline = MockPipeline.createPipeline( + Collections.singletonList(dsm.getDatanodeDetails())); + // Create a container to write data. + final long containerID1 = UpgradeTestHelper.addContainer(dispatcher, pipeline); + UpgradeTestHelper.putBlock(dispatcher, containerID1, pipeline); + UpgradeTestHelper.closeContainer(dispatcher, containerID1, pipeline); + KeyValueContainer container = (KeyValueContainer) + dsm.getContainer().getContainerSet().getContainer(containerID1); + File yamlFile = container.getContainerFile(); + String content = + FileUtils.readFileToString(yamlFile, Charset.defaultCharset()); + // yaml file contains chunkPath and metadataPath + assertThat(content).contains(METADATA_PATH); + assertThat(content).contains(CHUNKS_PATH); + assertEquals(yamlFile.getParentFile().getParentFile().toPath().resolve(STORAGE_DIR_CHUNKS).toString(), + container.getContainerData().getChunksPath()); + assertEquals(yamlFile.getParentFile().getAbsolutePath(), container.getContainerData().getMetadataPath()); + File containerDir = new File(container.getContainerData().getContainerPath()); + assertTrue(containerDir.exists() && containerDir.isDirectory()); + FileTime creationTime1 = (FileTime) Files.getAttribute(containerDir.toPath(), "creationTime"); + + File folderToExport = Files.createFile( + tempFolder.toPath().resolve("export-testContainerBeforeFinalization.tar")).toFile(); + TarContainerPacker packer = new TarContainerPacker(NO_COMPRESSION); + if (exportBeforeFinalization) { + //export the container + try (FileOutputStream fos = new FileOutputStream(folderToExport)) { + container.exportContainerData(fos, packer); + } + } + + dsm.finalizeUpgrade(); + + if (!exportBeforeFinalization) { + //export the container + try (FileOutputStream fos = new FileOutputStream(folderToExport)) { + container.exportContainerData(fos, packer); + } + } + + //delete the original one + KeyValueContainerUtil.removeContainer(container.getContainerData(), conf); + container.delete(); + assertFalse(new File(container.getContainerData().getContainerPath()).exists()); + if (schemaV3Enabled) { + assertTrue(container.getContainerData().getDbFile().exists()); + } + + //create a new one + KeyValueContainerData oldContainerData = container.getContainerData(); + KeyValueContainerData newContainerData = + new KeyValueContainerData(containerID1, + oldContainerData.getLayoutVersion(), + oldContainerData.getMaxSize(), pipeline.getId().getId().toString(), + dsm.getDatanodeDetails().getUuidString()); + newContainerData.setSchemaVersion(oldContainerData.getSchemaVersion()); + KeyValueContainer newContainer = new KeyValueContainer(newContainerData, conf); + newContainer.populatePathFields(scmTestMock.getClusterId(), oldContainerData.getVolume()); + + // verify yaml file checksum + try (FileInputStream fis = new FileInputStream(folderToExport)) { + byte[] containerDescriptorYaml = packer.unpackContainerDescriptor(fis); + KeyValueContainerData data = (KeyValueContainerData) ContainerDataYaml + .readContainer(containerDescriptorYaml); + ContainerUtils.verifyChecksum(data, conf); + } + + // sleep 1s to make sure creationTime will have different value. + Thread.sleep(1000); + try (FileInputStream fis = new FileInputStream(folderToExport)) { + newContainer.importContainerData(fis, packer); + } + + assertTrue(isContainerEqual(newContainerData, oldContainerData)); + assertThat(new File(newContainerData.getContainerPath())).exists(); + assertThat(new File(newContainerData.getChunksPath())).exists(); + assertThat(new File(newContainerData.getMetadataPath())).exists(); + if (schemaV3Enabled) { + assertThat(newContainerData.getDbFile()).exists(); + assertEquals(newContainerData.getDbFile(), oldContainerData.getDbFile()); + } + yamlFile = newContainer.getContainerFile(); + content = FileUtils.readFileToString(yamlFile, Charset.defaultCharset()); + if (schemaV3Enabled) { + assertThat(content).doesNotContain(METADATA_PATH); + assertThat(content).doesNotContain(CHUNKS_PATH); + // V3 migrate to V4 automatically + assertTrue(newContainer.getContainerData().hasSchema(SCHEMA_V4)); + } else { + assertThat(content).contains(METADATA_PATH); + assertThat(content).contains(CHUNKS_PATH); + assertTrue(newContainer.getContainerData().hasSchema(SCHEMA_V2)); + } + + assertEquals(yamlFile.getParentFile().getParentFile().toPath().resolve(STORAGE_DIR_CHUNKS).toString(), + newContainer.getContainerData().getChunksPath()); + assertEquals(yamlFile.getParentFile().getAbsolutePath(), newContainer.getContainerData().getMetadataPath()); + FileTime creationTime2 = (FileTime) Files.getAttribute( + Paths.get(newContainer.getContainerData().getContainerPath()), "creationTime"); + assertNotEquals(creationTime1.toInstant(), creationTime2.toInstant()); + } + + private boolean isContainerEqual(KeyValueContainerData containerData1, KeyValueContainerData containerData2) { + return new EqualsBuilder() + .append(containerData1.getContainerID(), containerData2.getContainerID()) + .append(containerData1.getContainerDBType(), containerData2.getContainerDBType()) + .append(containerData1.getState(), containerData2.getState()) + .append(containerData1.getLayoutVersion(), containerData2.getLayoutVersion()) + .append(containerData1.getBlockCount(), containerData2.getBlockCount()) + .append(containerData1.getBytesUsed(), containerData2.getBytesUsed()) + .append(containerData1.getMetadataPath(), containerData2.getMetadataPath()) + .append(containerData1.getContainerPath(), containerData2.getContainerPath()) + .append(containerData1.getChunksPath(), containerData2.getChunksPath()) + .isEquals(); + } +} diff --git a/hadoop-ozone/dist/src/main/smoketest/compatibility/dn-one-rocksdb.robot b/hadoop-ozone/dist/src/main/smoketest/compatibility/dn-one-rocksdb.robot index 282aa8f168fe..09fd28e0da3c 100644 --- a/hadoop-ozone/dist/src/main/smoketest/compatibility/dn-one-rocksdb.robot +++ b/hadoop-ozone/dist/src/main/smoketest/compatibility/dn-one-rocksdb.robot @@ -26,4 +26,4 @@ Create a container and check container schema version ${output} = Execute ozone admin container create Should not contain ${output} Failed ${output} = Execute ozone debug datanode container list - Should contain ${output} \"schemaVersion\" : \"3\" + Should contain ${output} \"schemaVersion\" : \"4\"