diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
index dbbfa9923e8a..593764ee09cf 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
@@ -145,10 +145,6 @@ public final class ScmConfigKeys {
public static final String OZONE_CHUNK_READ_MAPPED_BUFFER_THRESHOLD_DEFAULT =
"32KB";
- public static final String OZONE_CHUNK_LIST_INCREMENTAL =
- "ozone.incremental.chunk.list";
- public static final boolean OZONE_CHUNK_LIST_INCREMENTAL_DEFAULT = true;
-
public static final String OZONE_SCM_CONTAINER_LAYOUT_KEY =
"ozone.scm.container.layout";
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 8be6526c194f..ce990d43a8eb 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -851,17 +851,6 @@
The default read threshold to use memory mapped buffers.
-
- ozone.incremental.chunk.list
- true
- OZONE, CLIENT, DATANODE, PERFORMANCE
-
- By default, a writer client sends full chunk list of a block when it
- sends PutBlock requests. Changing this configuration to true will send
- only incremental chunk list which reduces metadata overhead and improves
- hsync performance.
-
-
ozone.scm.container.layout
FILE_PER_BLOCK
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
index 740dfa4e4135..5d309d0e633e 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
@@ -173,6 +173,7 @@ private boolean canIgnoreException(Result result) {
case CONTAINER_UNHEALTHY:
case CLOSED_CONTAINER_IO:
case DELETE_ON_OPEN_CONTAINER:
+ case UNSUPPORTED_REQUEST: // Blame client for sending unsupported request.
return true;
default:
return false;
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
index c83b93e412a3..402e1be4cd0f 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
@@ -53,6 +53,7 @@
import org.apache.hadoop.hdds.scm.ByteStringConversion;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
+import org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature;
import org.apache.hadoop.hdds.utils.FaultInjector;
import org.apache.hadoop.hdds.utils.HddsServerUtil;
import org.apache.hadoop.ozone.OzoneConfigKeys;
@@ -101,6 +102,7 @@
import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.INVALID_CONTAINER_STATE;
import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.IO_EXCEPTION;
import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.PUT_SMALL_FILE_ERROR;
+import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.UNSUPPORTED_REQUEST;
import static org.apache.hadoop.hdds.scm.protocolPB.ContainerCommandResponseBuilders.getBlockDataResponse;
import static org.apache.hadoop.hdds.scm.protocolPB.ContainerCommandResponseBuilders.getBlockLengthResponse;
import static org.apache.hadoop.hdds.scm.protocolPB.ContainerCommandResponseBuilders.getEchoResponse;
@@ -590,6 +592,11 @@ ContainerCommandResponseProto handleFinalizeBlock(
ContainerProtos.BlockData responseData;
try {
+ if (!VersionedDatanodeFeatures.isFinalized(HDDSLayoutFeature.HBASE_SUPPORT)) {
+ throw new StorageContainerException("DataNode has not finalized " +
+ "upgrading to a version that supports block finalization.", UNSUPPORTED_REQUEST);
+ }
+
checkContainerOpen(kvContainer);
BlockID blockID = BlockID.getFromProtobuf(
request.getFinalizeBlock().getBlockID());
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
index 9ca0aa89843d..7adc95a7e627 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
@@ -41,8 +41,7 @@
import com.google.common.base.Preconditions;
import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.BCSID_MISMATCH;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_CHUNK_LIST_INCREMENTAL;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_CHUNK_LIST_INCREMENTAL_DEFAULT;
+import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.UNSUPPORTED_REQUEST;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -65,7 +64,6 @@ public class BlockManagerImpl implements BlockManager {
// Default Read Buffer capacity when Checksum is not present
private final int defaultReadBufferCapacity;
private final int readMappedBufferThreshold;
- private boolean incrementalEnabled;
/**
* Constructs a Block Manager.
@@ -81,15 +79,6 @@ public BlockManagerImpl(ConfigurationSource conf) {
this.readMappedBufferThreshold = config.getBufferSize(
ScmConfigKeys.OZONE_CHUNK_READ_MAPPED_BUFFER_THRESHOLD_KEY,
ScmConfigKeys.OZONE_CHUNK_READ_MAPPED_BUFFER_THRESHOLD_DEFAULT);
- incrementalEnabled =
- config.getBoolean(OZONE_CHUNK_LIST_INCREMENTAL,
- OZONE_CHUNK_LIST_INCREMENTAL_DEFAULT);
- if (incrementalEnabled && !VersionedDatanodeFeatures.isFinalized(
- HDDSLayoutFeature.HBASE_SUPPORT)) {
- LOG.warn("DataNode has not finalized upgrading to a version that " +
- "supports incremental chunk list. Fallback to full chunk list");
- incrementalEnabled = false;
- }
}
@Override
@@ -162,6 +151,14 @@ public long persistPutBlock(KeyValueContainer container,
}
}
+ boolean incrementalEnabled = true;
+ if (!VersionedDatanodeFeatures.isFinalized(HDDSLayoutFeature.HBASE_SUPPORT)) {
+ if (isPartialChunkList(data)) {
+ throw new StorageContainerException("DataNode has not finalized " +
+ "upgrading to a version that supports incremental chunk list.", UNSUPPORTED_REQUEST);
+ }
+ incrementalEnabled = false;
+ }
db.getStore().putBlockByID(batch, incrementalEnabled, localID, data,
containerData, endOfBlock);
if (bcsId != 0) {
@@ -258,7 +255,7 @@ private void mergeLastChunkForBlockFinalization(BlockID blockId, DBHandle db,
if (blockData.getMetadata().containsKey(INCREMENTAL_CHUNK_LIST)) {
BlockData emptyBlockData = new BlockData(blockId);
emptyBlockData.addMetadata(INCREMENTAL_CHUNK_LIST, "");
- db.getStore().putBlockByID(batch, incrementalEnabled, localID,
+ db.getStore().putBlockByID(batch, true, localID,
emptyBlockData, kvContainer.getContainerData(), true);
}
}
@@ -368,4 +365,8 @@ private BlockData getBlockByID(DBHandle db, BlockID blockID,
String blockKey = containerData.getBlockKey(blockID.getLocalID());
return db.getStore().getBlockByID(blockID, blockKey);
}
+
+ private static boolean isPartialChunkList(BlockData data) {
+ return data.getMetadata().containsKey(INCREMENTAL_CHUNK_LIST);
+ }
}
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestBlockManagerImpl.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestBlockManagerImpl.java
index 26d959e88600..38a01e46900d 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestBlockManagerImpl.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestBlockManagerImpl.java
@@ -45,7 +45,6 @@
import java.util.List;
import java.util.UUID;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_CHUNK_LIST_INCREMENTAL;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil.isSameSchemaVersion;
@@ -84,7 +83,6 @@ private void initTest(ContainerTestVersionInfo versionInfo)
this.schemaVersion = versionInfo.getSchemaVersion();
this.config = new OzoneConfiguration();
ContainerTestVersionInfo.setTestSchemaVersion(schemaVersion, config);
- config.setBoolean(OZONE_CHUNK_LIST_INCREMENTAL, true);
initilaze();
}
diff --git a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestBlockOutputStreamIncrementalPutBlock.java b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestBlockOutputStreamIncrementalPutBlock.java
index 1014b943a2a2..5f2b80bdef6c 100644
--- a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestBlockOutputStreamIncrementalPutBlock.java
+++ b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestBlockOutputStreamIncrementalPutBlock.java
@@ -44,7 +44,6 @@
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.MethodSource;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_CHUNK_LIST_INCREMENTAL;
import static org.junit.jupiter.api.Assertions.assertArrayEquals;
@@ -74,8 +73,6 @@ private void init(boolean incrementalChunkList) throws IOException {
((InMemoryConfiguration) config).setBoolean(
OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED, true);
- ((InMemoryConfiguration) config).setBoolean(
- OZONE_CHUNK_LIST_INCREMENTAL, incrementalChunkList);
RpcClient rpcClient = new RpcClient(config, null) {
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSync.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSync.java
index 56d38f9f0264..98d7388310b3 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSync.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSync.java
@@ -110,7 +110,6 @@
import org.slf4j.event.Level;
import static java.nio.charset.StandardCharsets.UTF_8;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_CHUNK_LIST_INCREMENTAL;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL;
import static org.apache.hadoop.ozone.OzoneConsts.OZONE_OFS_URI_SCHEME;
@@ -176,7 +175,6 @@ public static void init() throws Exception {
CONF.setTimeDuration(OZONE_DIR_DELETING_SERVICE_INTERVAL, 100, TimeUnit.MILLISECONDS);
CONF.setBoolean("ozone.client.incremental.chunk.list", true);
CONF.setBoolean("ozone.client.stream.putblock.piggybacking", true);
- CONF.setBoolean(OZONE_CHUNK_LIST_INCREMENTAL, true);
CONF.setTimeDuration(OZONE_OM_OPEN_KEY_CLEANUP_SERVICE_INTERVAL,
SERVICE_INTERVAL, TimeUnit.MILLISECONDS);
CONF.setTimeDuration(OZONE_OM_OPEN_KEY_EXPIRE_THRESHOLD,