Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@

package org.apache.hadoop.hdds.scm;

import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import org.apache.hadoop.hdds.conf.Config;
import org.apache.hadoop.hdds.conf.ConfigGroup;
Expand All @@ -37,7 +36,7 @@ public class OzoneClientConfig {

private static final Logger LOG = LoggerFactory.getLogger(OzoneClientConfig.class);

@Config(key = "stream.buffer.flush.size",
@Config(key = "ozone.client.stream.buffer.flush.size",
defaultValue = "16MB",
type = ConfigType.SIZE,
description = "Size which determines at what buffer position a partial "
Expand All @@ -46,45 +45,45 @@ public class OzoneClientConfig {
tags = ConfigTag.CLIENT)
private long streamBufferFlushSize = 16 * 1024 * 1024;

@Config(key = "stream.buffer.size",
@Config(key = "ozone.client.stream.buffer.size",
defaultValue = "4MB",
type = ConfigType.SIZE,
description = "The size of chunks the client will send to the server",
tags = ConfigTag.CLIENT)
private int streamBufferSize = 4 * 1024 * 1024;

@Config(key = "datastream.buffer.flush.size",
@Config(key = "ozone.client.datastream.buffer.flush.size",
defaultValue = "16MB",
type = ConfigType.SIZE,
description = "The boundary at which putBlock is executed",
tags = ConfigTag.CLIENT)
private long dataStreamBufferFlushSize = 16 * 1024 * 1024;

@Config(key = "datastream.min.packet.size",
@Config(key = "ozone.client.datastream.min.packet.size",
defaultValue = "1MB",
type = ConfigType.SIZE,
description = "The maximum size of the ByteBuffer "
+ "(used via ratis streaming)",
tags = ConfigTag.CLIENT)
private int dataStreamMinPacketSize = 1024 * 1024;

@Config(key = "datastream.window.size",
@Config(key = "ozone.client.datastream.window.size",
defaultValue = "64MB",
type = ConfigType.SIZE,
description = "Maximum size of BufferList(used for retry) size per " +
"BlockDataStreamOutput instance",
tags = ConfigTag.CLIENT)
private long streamWindowSize = 64 * 1024 * 1024;

@Config(key = "datastream.pipeline.mode",
@Config(key = "ozone.client.datastream.pipeline.mode",
defaultValue = "true",
description = "Streaming write support both pipeline mode(datanode1->" +
"datanode2->datanode3) and star mode(datanode1->datanode2, " +
"datanode1->datanode3). By default we use pipeline mode.",
tags = ConfigTag.CLIENT)
private boolean datastreamPipelineMode = true;

@Config(key = "stream.buffer.increment",
@Config(key = "ozone.client.stream.buffer.increment",
defaultValue = "0B",
type = ConfigType.SIZE,
description = "Buffer (defined by ozone.client.stream.buffer.size) "
Expand All @@ -96,7 +95,7 @@ public class OzoneClientConfig {
tags = ConfigTag.CLIENT)
private int bufferIncrement = 0;

@Config(key = "stream.buffer.flush.delay",
@Config(key = "ozone.client.stream.buffer.flush.delay",
defaultValue = "true",
description = "Default true, when call flush() and determine whether "
+ "the data in the current buffer is greater than ozone.client"
Expand All @@ -105,7 +104,7 @@ public class OzoneClientConfig {
+ "to false.", tags = ConfigTag.CLIENT)
private boolean streamBufferFlushDelay = true;

@Config(key = "stream.buffer.max.size",
@Config(key = "ozone.client.stream.buffer.max.size",
defaultValue = "32MB",
type = ConfigType.SIZE,
description = "Size which determines at what buffer position write call"
Expand All @@ -114,14 +113,14 @@ public class OzoneClientConfig {
tags = ConfigTag.CLIENT)
private long streamBufferMaxSize = 32 * 1024 * 1024;

@Config(key = "max.retries",
@Config(key = "ozone.client.max.retries",
defaultValue = "5",
description = "Maximum number of retries by Ozone Client on "
+ "encountering exception while writing a key",
tags = ConfigTag.CLIENT)
private int maxRetryCount = 5;

@Config(key = "retry.interval",
@Config(key = "ozone.client.retry.interval",
defaultValue = "0",
description =
"Indicates the time duration a client will wait before retrying a "
Expand All @@ -130,14 +129,14 @@ public class OzoneClientConfig {
tags = ConfigTag.CLIENT)
private int retryInterval = 0;

@Config(key = "read.max.retries",
@Config(key = "ozone.client.read.max.retries",
defaultValue = "3",
description = "Maximum number of retries by Ozone Client on "
+ "encountering connectivity exception when reading a key.",
tags = ConfigTag.CLIENT)
private int maxReadRetryCount = 3;

@Config(key = "read.retry.interval",
@Config(key = "ozone.client.read.retry.interval",
defaultValue = "1",
description =
"Indicates the time duration in seconds a client will wait "
Expand All @@ -147,15 +146,15 @@ public class OzoneClientConfig {
tags = ConfigTag.CLIENT)
private int readRetryInterval = 1;

@Config(key = "checksum.type",
@Config(key = "ozone.client.checksum.type",
defaultValue = "CRC32",
description = "The checksum type [NONE/ CRC32/ CRC32C/ SHA256/ MD5] "
+ "determines which algorithm would be used to compute checksum for "
+ "chunk data. Default checksum type is CRC32.",
tags = { ConfigTag.CLIENT, ConfigTag.CRYPTO_COMPLIANCE })
private String checksumType = ChecksumType.CRC32.name();

@Config(key = "bytes.per.checksum",
@Config(key = "ozone.client.bytes.per.checksum",
defaultValue = "16KB",
type = ConfigType.SIZE,
description = "Checksum will be computed for every bytes per checksum "
Expand All @@ -164,36 +163,36 @@ public class OzoneClientConfig {
tags = { ConfigTag.CLIENT, ConfigTag.CRYPTO_COMPLIANCE })
private int bytesPerChecksum = 16 * 1024;

@Config(key = "verify.checksum",
@Config(key = "ozone.client.verify.checksum",
defaultValue = "true",
description = "Ozone client to verify checksum of the checksum "
+ "blocksize data.",
tags = ConfigTag.CLIENT)
private boolean checksumVerify = true;

@Config(key = "max.ec.stripe.write.retries",
@Config(key = "ozone.client.max.ec.stripe.write.retries",
defaultValue = "10",
description = "Ozone EC client to retry stripe to new block group on" +
" failures.",
tags = ConfigTag.CLIENT)
private int maxECStripeWriteRetries = 10;

@Config(key = "ec.stripe.queue.size",
@Config(key = "ozone.client.ec.stripe.queue.size",
defaultValue = "2",
description = "The max number of EC stripes can be buffered in client " +
" before flushing into datanodes.",
tags = ConfigTag.CLIENT)
private int ecStripeQueueSize = 2;

@Config(key = "exclude.nodes.expiry.time",
@Config(key = "ozone.client.exclude.nodes.expiry.time",
defaultValue = "600000",
description = "Time after which an excluded node is reconsidered for" +
" writes. If the value is zero, the node is excluded for the" +
" life of the client",
tags = ConfigTag.CLIENT)
private long excludeNodesExpiryTime = 10 * 60 * 1000;

@Config(key = "ec.reconstruct.stripe.read.pool.limit",
@Config(key = "ozone.client.ec.reconstruct.stripe.read.pool.limit",
defaultValue = "30",
description = "Thread pool max size for parallelly read" +
" available ec chunks to reconstruct the whole stripe.",
Expand All @@ -204,14 +203,14 @@ public class OzoneClientConfig {
// 3 concurrent stripe read should be enough.
private int ecReconstructStripeReadPoolLimit = 10 * 3;

@Config(key = "ec.reconstruct.stripe.write.pool.limit",
@Config(key = "ozone.client.ec.reconstruct.stripe.write.pool.limit",
defaultValue = "30",
description = "Thread pool max size for parallelly write" +
" available ec chunks to reconstruct the whole stripe.",
tags = ConfigTag.CLIENT)
private int ecReconstructStripeWritePoolLimit = 10 * 3;

@Config(key = "checksum.combine.mode",
@Config(key = "ozone.client.checksum.combine.mode",
defaultValue = "COMPOSITE_CRC",
description = "The combined checksum type [MD5MD5CRC / COMPOSITE_CRC] "
+ "determines which algorithm would be used to compute file checksum."
Expand All @@ -225,16 +224,15 @@ public class OzoneClientConfig {
private String checksumCombineMode =
ChecksumCombineMode.COMPOSITE_CRC.name();

@Config(key = "fs.default.bucket.layout",
@Config(key = "ozone.client.fs.default.bucket.layout",
defaultValue = "FILE_SYSTEM_OPTIMIZED",
type = ConfigType.STRING,
description = "The bucket layout used by buckets created using OFS. " +
"Valid values include FILE_SYSTEM_OPTIMIZED and LEGACY",
tags = ConfigTag.CLIENT)
private String fsDefaultBucketLayout = "FILE_SYSTEM_OPTIMIZED";

// ozone.client.hbase.enhancements.allowed
@Config(key = "hbase.enhancements.allowed",
@Config(key = "ozone.client.hbase.enhancements.allowed",
defaultValue = "false",
description = "When set to false, client-side HBase enhancement-related Ozone (experimental) features " +
"are disabled (not allowed to be enabled) regardless of whether those configs are set.\n" +
Expand All @@ -249,8 +247,7 @@ public class OzoneClientConfig {
tags = ConfigTag.CLIENT)
private boolean hbaseEnhancementsAllowed = false;

// ozone.client.incremental.chunk.list
@Config(key = "incremental.chunk.list",
@Config(key = "ozone.client.incremental.chunk.list",
defaultValue = "false",
type = ConfigType.BOOLEAN,
description = "Client PutBlock request can choose incremental chunk " +
Expand All @@ -260,17 +257,15 @@ public class OzoneClientConfig {
tags = ConfigTag.CLIENT)
private boolean incrementalChunkList = false;

// ozone.client.stream.putblock.piggybacking
@Config(key = "stream.putblock.piggybacking",
@Config(key = "ozone.client.stream.putblock.piggybacking",
defaultValue = "false",
type = ConfigType.BOOLEAN,
description = "Allow PutBlock to be piggybacked in WriteChunk requests if the chunk is small. " +
"Can be enabled only when ozone.client.hbase.enhancements.allowed = true",
tags = ConfigTag.CLIENT)
private boolean enablePutblockPiggybacking = false;

// ozone.client.key.write.concurrency
@Config(key = "key.write.concurrency",
@Config(key = "ozone.client.key.write.concurrency",
defaultValue = "1",
description = "Maximum concurrent writes allowed on each key. " +
"Defaults to 1 which matches the behavior before HDDS-9844. " +
Expand Down Expand Up @@ -339,7 +334,6 @@ public long getStreamBufferFlushSize() {
return streamBufferFlushSize;
}

@VisibleForTesting
public void setStreamBufferFlushSize(long streamBufferFlushSize) {
this.streamBufferFlushSize = streamBufferFlushSize;
}
Expand All @@ -348,7 +342,6 @@ public int getStreamBufferSize() {
return streamBufferSize;
}

@VisibleForTesting
public void setStreamBufferSize(int streamBufferSize) {
this.streamBufferSize = streamBufferSize;
}
Expand All @@ -357,7 +350,6 @@ public boolean isStreamBufferFlushDelay() {
return streamBufferFlushDelay;
}

@VisibleForTesting
public void setStreamBufferFlushDelay(boolean streamBufferFlushDelay) {
this.streamBufferFlushDelay = streamBufferFlushDelay;
}
Expand All @@ -366,7 +358,6 @@ public long getStreamBufferMaxSize() {
return streamBufferMaxSize;
}

@VisibleForTesting
public void setStreamBufferMaxSize(long streamBufferMaxSize) {
this.streamBufferMaxSize = streamBufferMaxSize;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ public class ScmConfig extends ReconfigurableConfig {
+ " : chooses a pipeline in a round robin fashion. Intended for troubleshooting and testing purposes only.";

// hdds.scm.pipeline.choose.policy.impl
@Config(key = "pipeline.choose.policy.impl",
@Config(key = "hdds.scm.pipeline.choose.policy.impl",
type = ConfigType.STRING,
defaultValue = "org.apache.hadoop.hdds.scm.pipeline.choose.algorithms.RandomPipelineChoosePolicy",
tags = { ConfigTag.SCM, ConfigTag.PIPELINE },
Expand All @@ -88,7 +88,7 @@ public class ScmConfig extends ReconfigurableConfig {
private String pipelineChoosePolicyName;

// hdds.scm.ec.pipeline.choose.policy.impl
@Config(key = "ec.pipeline.choose.policy.impl",
@Config(key = "hdds.scm.ec.pipeline.choose.policy.impl",
type = ConfigType.STRING,
defaultValue = "org.apache.hadoop.hdds.scm.pipeline.choose.algorithms.RandomPipelineChoosePolicy",
tags = { ConfigTag.SCM, ConfigTag.PIPELINE },
Expand All @@ -101,7 +101,7 @@ public class ScmConfig extends ReconfigurableConfig {
)
private String ecPipelineChoosePolicyName;

@Config(key = "block.deletion.per-interval.max",
@Config(key = "hdds.scm.block.deletion.per-interval.max",
type = ConfigType.INT,
defaultValue = "100000",
reconfigurable = true,
Expand All @@ -115,7 +115,7 @@ public class ScmConfig extends ReconfigurableConfig {
)
private int blockDeletionLimit;

@Config(key = "block.deleting.service.interval",
@Config(key = "hdds.scm.block.deleting.service.interval",
defaultValue = "60s",
type = ConfigType.TIME,
tags = { ConfigTag.SCM, ConfigTag.DELETION },
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,8 @@

package org.apache.hadoop.hdds.conf;

import static org.apache.hadoop.hdds.conf.ConfigurationReflectionUtil.getFullKey;

import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
Expand Down Expand Up @@ -133,17 +135,7 @@ private void writeConfigAnnotations(ConfigGroup configGroup,

Config configAnnotation = element.getAnnotation(Config.class);

if (configAnnotation.key().startsWith(configGroup.prefix())) {
String msg = String.format(
"@%s(key = \"%s\") should not duplicate prefix from @%s(\"%s\")",
Config.class.getSimpleName(), configAnnotation.key(),
ConfigGroup.class.getSimpleName(), configGroup.prefix());
processingEnv.getMessager().printMessage(Kind.ERROR, msg, element);
continue;
}

String key = configGroup.prefix() + "."
+ configAnnotation.key();
String key = getFullKey(configGroup, configAnnotation);

appender.addConfig(key,
configAnnotation.defaultValue(),
Expand Down
Loading