Skip to content
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -145,10 +145,6 @@ public final class ScmConfigKeys {
public static final String OZONE_CHUNK_READ_MAPPED_BUFFER_THRESHOLD_DEFAULT =
"32KB";

public static final String OZONE_CHUNK_LIST_INCREMENTAL =
"ozone.incremental.chunk.list";
public static final boolean OZONE_CHUNK_LIST_INCREMENTAL_DEFAULT = true;

public static final String OZONE_SCM_CONTAINER_LAYOUT_KEY =
"ozone.scm.container.layout";

Expand Down
11 changes: 0 additions & 11 deletions hadoop-hdds/common/src/main/resources/ozone-default.xml
Original file line number Diff line number Diff line change
Expand Up @@ -851,17 +851,6 @@
The default read threshold to use memory mapped buffers.
</description>
</property>
<property>
<name>ozone.incremental.chunk.list</name>
<value>true</value>
<tag>OZONE, CLIENT, DATANODE, PERFORMANCE</tag>
<description>
By default, a writer client sends full chunk list of a block when it
sends PutBlock requests. Changing this configuration to true will send
only incremental chunk list which reduces metadata overhead and improves
hsync performance.
</description>
</property>
<property>
<name>ozone.scm.container.layout</name>
<value>FILE_PER_BLOCK</value>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -41,8 +41,6 @@
import com.google.common.base.Preconditions;

import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.BCSID_MISMATCH;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_CHUNK_LIST_INCREMENTAL;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_CHUNK_LIST_INCREMENTAL_DEFAULT;

import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
Expand All @@ -65,7 +63,6 @@ public class BlockManagerImpl implements BlockManager {
// Default Read Buffer capacity when Checksum is not present
private final int defaultReadBufferCapacity;
private final int readMappedBufferThreshold;
private boolean incrementalEnabled;

/**
* Constructs a Block Manager.
Expand All @@ -81,15 +78,6 @@ public BlockManagerImpl(ConfigurationSource conf) {
this.readMappedBufferThreshold = config.getBufferSize(
ScmConfigKeys.OZONE_CHUNK_READ_MAPPED_BUFFER_THRESHOLD_KEY,
ScmConfigKeys.OZONE_CHUNK_READ_MAPPED_BUFFER_THRESHOLD_DEFAULT);
incrementalEnabled =
config.getBoolean(OZONE_CHUNK_LIST_INCREMENTAL,
OZONE_CHUNK_LIST_INCREMENTAL_DEFAULT);
if (incrementalEnabled && !VersionedDatanodeFeatures.isFinalized(
HDDSLayoutFeature.HBASE_SUPPORT)) {
LOG.warn("DataNode has not finalized upgrading to a version that " +
"supports incremental chunk list. Fallback to full chunk list");
incrementalEnabled = false;
}
}

@Override
Expand Down Expand Up @@ -162,6 +150,14 @@ public long persistPutBlock(KeyValueContainer container,
}
}

boolean incrementalEnabled = true;
if (!VersionedDatanodeFeatures.isFinalized(HDDSLayoutFeature.HBASE_SUPPORT)) {
if (isPartialChunkList(data)) {
throw new UnsupportedOperationException("DataNode has not finalized " +
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm not sure the exception is correct here. On the client side we use the unchecked UnsupportedOperationException because it OK to crash the client at that point. On the server we want to throw an exception type and result code that will be translated back to the client. I think this would be StorageContainerException with a new result code similar to NOT_SUPPORTED_OPERATION_PRIOR_FINALIZATION which exists on the OM.

Note that there's no tests in the PR. Per the mailing thread it looks like we are going to do those after the merge which is ok, but we should try to make sure this is working correctly here because I think this is the first time we have rejected a request based on a layout version on the datanode.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

yeah agreed. I think there are DataNode layout upgrade test code somewhere. Let me find out.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Opening a new jira to track adding the test code to validate layout version upgrade behavior. https://issues.apache.org/jira/browse/HDDS-11270

"upgrading to a version that supports incremental chunk list.");
}
incrementalEnabled = false;
}
db.getStore().putBlockByID(batch, incrementalEnabled, localID, data,
containerData, endOfBlock);
if (bcsId != 0) {
Expand Down Expand Up @@ -226,6 +222,10 @@ public void finalizeBlock(Container container, BlockID blockId)
"be null for finalizeBlock operation.");
Preconditions.checkState(blockId.getContainerID() >= 0,
"Container Id cannot be negative");
if (!VersionedDatanodeFeatures.isFinalized(HDDSLayoutFeature.HBASE_SUPPORT)) {
throw new UnsupportedOperationException("DataNode has not finalized " +
"upgrading to a version that supports block finalization.");
}

KeyValueContainer kvContainer = (KeyValueContainer)container;
long localID = blockId.getLocalID();
Expand Down Expand Up @@ -258,7 +258,7 @@ private void mergeLastChunkForBlockFinalization(BlockID blockId, DBHandle db,
if (blockData.getMetadata().containsKey(INCREMENTAL_CHUNK_LIST)) {
BlockData emptyBlockData = new BlockData(blockId);
emptyBlockData.addMetadata(INCREMENTAL_CHUNK_LIST, "");
db.getStore().putBlockByID(batch, incrementalEnabled, localID,
db.getStore().putBlockByID(batch, true, localID,
emptyBlockData, kvContainer.getContainerData(), true);
}
}
Expand Down Expand Up @@ -368,4 +368,8 @@ private BlockData getBlockByID(DBHandle db, BlockID blockID,
String blockKey = containerData.getBlockKey(blockID.getLocalID());
return db.getStore().getBlockByID(blockID, blockKey);
}

private static boolean isPartialChunkList(BlockData data) {
return data.getMetadata().containsKey(INCREMENTAL_CHUNK_LIST);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,6 @@
import java.util.List;
import java.util.UUID;

import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_CHUNK_LIST_INCREMENTAL;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil.isSameSchemaVersion;
Expand Down Expand Up @@ -84,7 +83,6 @@ private void initTest(ContainerTestVersionInfo versionInfo)
this.schemaVersion = versionInfo.getSchemaVersion();
this.config = new OzoneConfiguration();
ContainerTestVersionInfo.setTestSchemaVersion(schemaVersion, config);
config.setBoolean(OZONE_CHUNK_LIST_INCREMENTAL, true);
initilaze();
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,6 @@
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.MethodSource;

import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_CHUNK_LIST_INCREMENTAL;
import static org.junit.jupiter.api.Assertions.assertArrayEquals;


Expand Down Expand Up @@ -74,8 +73,6 @@ private void init(boolean incrementalChunkList) throws IOException {

((InMemoryConfiguration) config).setBoolean(
OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED, true);
((InMemoryConfiguration) config).setBoolean(
OZONE_CHUNK_LIST_INCREMENTAL, incrementalChunkList);

RpcClient rpcClient = new RpcClient(config, null) {

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,6 @@
import org.slf4j.event.Level;

import static java.nio.charset.StandardCharsets.UTF_8;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_CHUNK_LIST_INCREMENTAL;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL;
import static org.apache.hadoop.ozone.OzoneConsts.OZONE_OFS_URI_SCHEME;
Expand Down Expand Up @@ -176,7 +175,6 @@ public static void init() throws Exception {
CONF.setTimeDuration(OZONE_DIR_DELETING_SERVICE_INTERVAL, 100, TimeUnit.MILLISECONDS);
CONF.setBoolean("ozone.client.incremental.chunk.list", true);
CONF.setBoolean("ozone.client.stream.putblock.piggybacking", true);
CONF.setBoolean(OZONE_CHUNK_LIST_INCREMENTAL, true);
CONF.setTimeDuration(OZONE_OM_OPEN_KEY_CLEANUP_SERVICE_INTERVAL,
SERVICE_INTERVAL, TimeUnit.MILLISECONDS);
CONF.setTimeDuration(OZONE_OM_OPEN_KEY_EXPIRE_THRESHOLD,
Expand Down