Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -145,10 +145,6 @@ public final class ScmConfigKeys {
public static final String OZONE_CHUNK_READ_MAPPED_BUFFER_THRESHOLD_DEFAULT =
"32KB";

public static final String OZONE_CHUNK_LIST_INCREMENTAL =
"ozone.incremental.chunk.list";
public static final boolean OZONE_CHUNK_LIST_INCREMENTAL_DEFAULT = true;

public static final String OZONE_SCM_CONTAINER_LAYOUT_KEY =
"ozone.scm.container.layout";

Expand Down
11 changes: 0 additions & 11 deletions hadoop-hdds/common/src/main/resources/ozone-default.xml
Original file line number Diff line number Diff line change
Expand Up @@ -851,17 +851,6 @@
The default read threshold to use memory mapped buffers.
</description>
</property>
<property>
<name>ozone.incremental.chunk.list</name>
<value>true</value>
<tag>OZONE, CLIENT, DATANODE, PERFORMANCE</tag>
<description>
By default, a writer client sends full chunk list of a block when it
sends PutBlock requests. Changing this configuration to true will send
only incremental chunk list which reduces metadata overhead and improves
hsync performance.
</description>
</property>
<property>
<name>ozone.scm.container.layout</name>
<value>FILE_PER_BLOCK</value>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -173,6 +173,7 @@ private boolean canIgnoreException(Result result) {
case CONTAINER_UNHEALTHY:
case CLOSED_CONTAINER_IO:
case DELETE_ON_OPEN_CONTAINER:
case UNSUPPORTED_REQUEST: // Blame client for sending unsupported request.
return true;
default:
return false;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,7 @@
import org.apache.hadoop.hdds.scm.ByteStringConversion;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
import org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature;
import org.apache.hadoop.hdds.utils.FaultInjector;
import org.apache.hadoop.hdds.utils.HddsServerUtil;
import org.apache.hadoop.ozone.OzoneConfigKeys;
Expand Down Expand Up @@ -101,6 +102,7 @@
import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.INVALID_CONTAINER_STATE;
import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.IO_EXCEPTION;
import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.PUT_SMALL_FILE_ERROR;
import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.UNSUPPORTED_REQUEST;
import static org.apache.hadoop.hdds.scm.protocolPB.ContainerCommandResponseBuilders.getBlockDataResponse;
import static org.apache.hadoop.hdds.scm.protocolPB.ContainerCommandResponseBuilders.getBlockLengthResponse;
import static org.apache.hadoop.hdds.scm.protocolPB.ContainerCommandResponseBuilders.getEchoResponse;
Expand Down Expand Up @@ -590,6 +592,11 @@ ContainerCommandResponseProto handleFinalizeBlock(
ContainerProtos.BlockData responseData;

try {
if (!VersionedDatanodeFeatures.isFinalized(HDDSLayoutFeature.HBASE_SUPPORT)) {
throw new StorageContainerException("DataNode has not finalized " +
"upgrading to a version that supports block finalization.", UNSUPPORTED_REQUEST);
}

checkContainerOpen(kvContainer);
BlockID blockID = BlockID.getFromProtobuf(
request.getFinalizeBlock().getBlockID());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -41,8 +41,7 @@
import com.google.common.base.Preconditions;

import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.BCSID_MISMATCH;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_CHUNK_LIST_INCREMENTAL;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_CHUNK_LIST_INCREMENTAL_DEFAULT;
import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.UNSUPPORTED_REQUEST;

import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
Expand All @@ -65,7 +64,6 @@ public class BlockManagerImpl implements BlockManager {
// Default Read Buffer capacity when Checksum is not present
private final int defaultReadBufferCapacity;
private final int readMappedBufferThreshold;
private boolean incrementalEnabled;

/**
* Constructs a Block Manager.
Expand All @@ -81,15 +79,6 @@ public BlockManagerImpl(ConfigurationSource conf) {
this.readMappedBufferThreshold = config.getBufferSize(
ScmConfigKeys.OZONE_CHUNK_READ_MAPPED_BUFFER_THRESHOLD_KEY,
ScmConfigKeys.OZONE_CHUNK_READ_MAPPED_BUFFER_THRESHOLD_DEFAULT);
incrementalEnabled =
config.getBoolean(OZONE_CHUNK_LIST_INCREMENTAL,
OZONE_CHUNK_LIST_INCREMENTAL_DEFAULT);
if (incrementalEnabled && !VersionedDatanodeFeatures.isFinalized(
HDDSLayoutFeature.HBASE_SUPPORT)) {
LOG.warn("DataNode has not finalized upgrading to a version that " +
"supports incremental chunk list. Fallback to full chunk list");
incrementalEnabled = false;
}
}

@Override
Expand Down Expand Up @@ -162,6 +151,14 @@ public long persistPutBlock(KeyValueContainer container,
}
}

boolean incrementalEnabled = true;
if (!VersionedDatanodeFeatures.isFinalized(HDDSLayoutFeature.HBASE_SUPPORT)) {
if (isPartialChunkList(data)) {
throw new StorageContainerException("DataNode has not finalized " +
"upgrading to a version that supports incremental chunk list.", UNSUPPORTED_REQUEST);
}
incrementalEnabled = false;
}
db.getStore().putBlockByID(batch, incrementalEnabled, localID, data,
containerData, endOfBlock);
if (bcsId != 0) {
Expand Down Expand Up @@ -258,7 +255,7 @@ private void mergeLastChunkForBlockFinalization(BlockID blockId, DBHandle db,
if (blockData.getMetadata().containsKey(INCREMENTAL_CHUNK_LIST)) {
BlockData emptyBlockData = new BlockData(blockId);
emptyBlockData.addMetadata(INCREMENTAL_CHUNK_LIST, "");
db.getStore().putBlockByID(batch, incrementalEnabled, localID,
db.getStore().putBlockByID(batch, true, localID,
emptyBlockData, kvContainer.getContainerData(), true);
}
}
Expand Down Expand Up @@ -368,4 +365,8 @@ private BlockData getBlockByID(DBHandle db, BlockID blockID,
String blockKey = containerData.getBlockKey(blockID.getLocalID());
return db.getStore().getBlockByID(blockID, blockKey);
}

private static boolean isPartialChunkList(BlockData data) {
return data.getMetadata().containsKey(INCREMENTAL_CHUNK_LIST);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,6 @@
import java.util.List;
import java.util.UUID;

import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_CHUNK_LIST_INCREMENTAL;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil.isSameSchemaVersion;
Expand Down Expand Up @@ -84,7 +83,6 @@ private void initTest(ContainerTestVersionInfo versionInfo)
this.schemaVersion = versionInfo.getSchemaVersion();
this.config = new OzoneConfiguration();
ContainerTestVersionInfo.setTestSchemaVersion(schemaVersion, config);
config.setBoolean(OZONE_CHUNK_LIST_INCREMENTAL, true);
initilaze();
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,6 @@
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.MethodSource;

import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_CHUNK_LIST_INCREMENTAL;
import static org.junit.jupiter.api.Assertions.assertArrayEquals;


Expand Down Expand Up @@ -74,8 +73,6 @@ private void init(boolean incrementalChunkList) throws IOException {

((InMemoryConfiguration) config).setBoolean(
OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED, true);
((InMemoryConfiguration) config).setBoolean(
OZONE_CHUNK_LIST_INCREMENTAL, incrementalChunkList);

RpcClient rpcClient = new RpcClient(config, null) {

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,6 @@
import org.slf4j.event.Level;

import static java.nio.charset.StandardCharsets.UTF_8;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_CHUNK_LIST_INCREMENTAL;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL;
import static org.apache.hadoop.ozone.OzoneConsts.OZONE_OFS_URI_SCHEME;
Expand Down Expand Up @@ -176,7 +175,6 @@ public static void init() throws Exception {
CONF.setTimeDuration(OZONE_DIR_DELETING_SERVICE_INTERVAL, 100, TimeUnit.MILLISECONDS);
CONF.setBoolean("ozone.client.incremental.chunk.list", true);
CONF.setBoolean("ozone.client.stream.putblock.piggybacking", true);
CONF.setBoolean(OZONE_CHUNK_LIST_INCREMENTAL, true);
CONF.setTimeDuration(OZONE_OM_OPEN_KEY_CLEANUP_SERVICE_INTERVAL,
SERVICE_INTERVAL, TimeUnit.MILLISECONDS);
CONF.setTimeDuration(OZONE_OM_OPEN_KEY_EXPIRE_THRESHOLD,
Expand Down