diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java index 6e2e1522a705..5defd821a56b 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java @@ -167,8 +167,8 @@ private synchronized void connectToDatanode(DatanodeDetails dn) // port. int port = dn.getPort(DatanodeDetails.Port.Name.STANDALONE).getValue(); if (port == 0) { - port = config.getInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, - OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT); + port = config.getInt(OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT, + OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT_DEFAULT); } // Add credential context to the client call @@ -282,6 +282,7 @@ public ContainerCommandResponseProto sendCommand( } for (DatanodeDetails dn : datanodeList) { try { + request = reconstructRequestIfNeeded(request, dn); futureHashMap.put(dn, sendCommandAsync(request, dn).getResponse()); } catch (InterruptedException e) { LOG.error("Command execution was interrupted."); @@ -313,6 +314,29 @@ public ContainerCommandResponseProto sendCommand( return responseProtoHashMap; } + /** + * @param request + * @param dn + * @param pipeline + * In case of getBlock for EC keys, it is required to set replicaIndex for + * every request with the replicaIndex for that DN for which the request is + * sent to. This method unpacks proto and reconstructs request after setting + * the replicaIndex field. + * @return new updated Request + */ + private ContainerCommandRequestProto reconstructRequestIfNeeded( + ContainerCommandRequestProto request, DatanodeDetails dn) { + boolean isEcRequest = pipeline.getReplicationConfig() + .getReplicationType() == HddsProtos.ReplicationType.EC; + if (request.hasGetBlock() && isEcRequest) { + ContainerProtos.GetBlockRequestProto gbr = request.getGetBlock(); + request = request.toBuilder().setGetBlock(gbr.toBuilder().setBlockID( + gbr.getBlockID().toBuilder().setReplicaIndex( + pipeline.getReplicaIndex(dn)).build()).build()).build(); + } + return request; + } + @Override public ContainerCommandResponseProto sendCommand( ContainerCommandRequestProto request, List validators) diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java index aff0aa966a79..58a2153352a4 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java @@ -83,8 +83,8 @@ public static XceiverClientRatis newXceiverClientRatis( org.apache.hadoop.hdds.scm.pipeline.Pipeline pipeline, ConfigurationSource ozoneConf, ClientTrustManager trustManager) { final String rpcType = ozoneConf - .get(ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY, - ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT); + .get(ScmConfigKeys.HDDS_CONTAINER_RATIS_RPC_TYPE_KEY, + ScmConfigKeys.HDDS_CONTAINER_RATIS_RPC_TYPE_DEFAULT); final RetryPolicy retryPolicy = RatisHelper.createRetryPolicy(ozoneConf); final GrpcTlsConfig tlsConfig = RatisHelper.createTlsClientConfig(new SecurityConfig(ozoneConf), trustManager); diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java index 10c2189f5649..6a87c69b5351 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java @@ -46,6 +46,7 @@ import org.apache.hadoop.hdds.utils.LegacyHadoopConfigurationSource; import com.google.common.base.Preconditions; +import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.ratis.server.RaftServerConfigKeys; import static java.util.Collections.unmodifiableSortedSet; @@ -332,7 +333,67 @@ private static void addDeprecatedKeys() { new DeprecationDelta("ozone.scm.chunk.layout", ScmConfigKeys.OZONE_SCM_CONTAINER_LAYOUT_KEY), new DeprecationDelta("hdds.datanode.replication.work.dir", - OZONE_CONTAINER_COPY_WORKDIR) + OZONE_CONTAINER_COPY_WORKDIR), + new DeprecationDelta("dfs.container.chunk.write.sync", + OzoneConfigKeys.HDDS_CONTAINER_CHUNK_WRITE_SYNC_KEY), + new DeprecationDelta("dfs.container.ipc", + OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT), + new DeprecationDelta("dfs.container.ipc.random.port", + OzoneConfigKeys.HDDS_CONTAINER_IPC_RANDOM_PORT), + new DeprecationDelta("dfs.container.ratis.admin.port", + OzoneConfigKeys.HDDS_CONTAINER_RATIS_ADMIN_PORT), + new DeprecationDelta("dfs.container.ratis.datanode.storage.dir", + OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR), + new DeprecationDelta("dfs.container.ratis.datastream.enabled", + OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED), + new DeprecationDelta("dfs.container.ratis.datastream.port", + OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_PORT), + new DeprecationDelta("dfs.container.ratis.datastream.random.port", + OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT), + new DeprecationDelta("dfs.container.ratis.enabled", + ScmConfigKeys.HDDS_CONTAINER_RATIS_ENABLED_KEY), + new DeprecationDelta("dfs.container.ratis.ipc", + OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_PORT), + new DeprecationDelta("dfs.container.ratis.ipc.random.port", + OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_RANDOM_PORT), + new DeprecationDelta("dfs.container.ratis.leader.pending.bytes.limit", + ScmConfigKeys.HDDS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT), + new DeprecationDelta("dfs.container.ratis.log.appender.queue.byte-limit", + ScmConfigKeys.HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT), + new DeprecationDelta("dfs.container.ratis.log.appender.queue.num-elements", + ScmConfigKeys.HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS), + new DeprecationDelta("dfs.container.ratis.log.purge.gap", + ScmConfigKeys.HDDS_CONTAINER_RATIS_LOG_PURGE_GAP), + new DeprecationDelta("dfs.container.ratis.log.queue.byte-limit", + ScmConfigKeys.HDDS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT), + new DeprecationDelta("dfs.container.ratis.log.queue.num-elements", + ScmConfigKeys.HDDS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS), + new DeprecationDelta("dfs.container.ratis.num.container.op.executors", + ScmConfigKeys.HDDS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY), + new DeprecationDelta("dfs.container.ratis.num.write.chunk.threads.per.volume", + ScmConfigKeys.HDDS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME), + new DeprecationDelta("dfs.container.ratis.replication.level", + ScmConfigKeys.HDDS_CONTAINER_RATIS_REPLICATION_LEVEL_KEY), + new DeprecationDelta("dfs.container.ratis.rpc.type", + ScmConfigKeys.HDDS_CONTAINER_RATIS_RPC_TYPE_KEY), + new DeprecationDelta("dfs.container.ratis.segment.preallocated.size", + ScmConfigKeys.HDDS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY), + new DeprecationDelta("dfs.container.ratis.segment.size", + ScmConfigKeys.HDDS_CONTAINER_RATIS_SEGMENT_SIZE_KEY), + new DeprecationDelta("dfs.container.ratis.server.port", + OzoneConfigKeys.HDDS_CONTAINER_RATIS_SERVER_PORT), + new DeprecationDelta("dfs.container.ratis.statemachinedata.sync.retries", + ScmConfigKeys.HDDS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES), + new DeprecationDelta("dfs.container.ratis.statemachinedata.sync.timeout", + ScmConfigKeys.HDDS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT), + new DeprecationDelta("dfs.container.ratis.statemachine.max.pending.apply-transactions", + ScmConfigKeys.HDDS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS), + new DeprecationDelta("dfs.ratis.leader.election.minimum.timeout.duration", + ScmConfigKeys.HDDS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY), + new DeprecationDelta("dfs.ratis.server.retry-cache.timeout.duration", + ScmConfigKeys.HDDS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY), + new DeprecationDelta("dfs.ratis.snapshot.threshold", + ScmConfigKeys.HDDS_RATIS_SNAPSHOT_THRESHOLD_KEY) }); } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java index 4d172ef6ab97..0b79273e5118 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java @@ -228,8 +228,8 @@ public static RaftClient newRaftClient(RpcType rpcType, Pipeline pipeline, private static RpcType getRpcType(ConfigurationSource conf) { return SupportedRpcType.valueOfIgnoreCase(conf.get( - ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY, - ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT)); + ScmConfigKeys.HDDS_CONTAINER_RATIS_RPC_TYPE_KEY, + ScmConfigKeys.HDDS_CONTAINER_RATIS_RPC_TYPE_DEFAULT)); } public static BiFunction newRaftClient( diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java index 007dc3dfaef8..52e2a26f6431 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java @@ -41,95 +41,95 @@ public final class ScmConfigKeys { public static final String OZONE_SCM_DB_DIRS_PERMISSIONS = "ozone.scm.db.dirs.permissions"; - public static final String DFS_CONTAINER_RATIS_ENABLED_KEY - = "dfs.container.ratis.enabled"; - public static final boolean DFS_CONTAINER_RATIS_ENABLED_DEFAULT + public static final String HDDS_CONTAINER_RATIS_ENABLED_KEY + = "hdds.container.ratis.enabled"; + public static final boolean HDDS_CONTAINER_RATIS_ENABLED_DEFAULT = false; - public static final String DFS_CONTAINER_RATIS_RPC_TYPE_KEY - = "dfs.container.ratis.rpc.type"; - public static final String DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT + public static final String HDDS_CONTAINER_RATIS_RPC_TYPE_KEY + = "hdds.container.ratis.rpc.type"; + public static final String HDDS_CONTAINER_RATIS_RPC_TYPE_DEFAULT = "GRPC"; public static final String - DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME - = "dfs.container.ratis.num.write.chunk.threads.per.volume"; + HDDS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME + = "hdds.container.ratis.num.write.chunk.threads.per.volume"; public static final int - DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_DEFAULT + HDDS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_DEFAULT = 10; - public static final String DFS_CONTAINER_RATIS_REPLICATION_LEVEL_KEY - = "dfs.container.ratis.replication.level"; + public static final String HDDS_CONTAINER_RATIS_REPLICATION_LEVEL_KEY + = "hdds.container.ratis.replication.level"; public static final ReplicationLevel - DFS_CONTAINER_RATIS_REPLICATION_LEVEL_DEFAULT = ReplicationLevel.MAJORITY; - public static final String DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY - = "dfs.container.ratis.num.container.op.executors"; - public static final int DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_DEFAULT + HDDS_CONTAINER_RATIS_REPLICATION_LEVEL_DEFAULT = ReplicationLevel.MAJORITY; + public static final String HDDS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY + = "hdds.container.ratis.num.container.op.executors"; + public static final int HDDS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_DEFAULT = 10; - public static final String DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY = - "dfs.container.ratis.segment.size"; - public static final String DFS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT = + public static final String HDDS_CONTAINER_RATIS_SEGMENT_SIZE_KEY = + "hdds.container.ratis.segment.size"; + public static final String HDDS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT = "64MB"; - public static final String DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY = - "dfs.container.ratis.segment.preallocated.size"; + public static final String HDDS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY = + "hdds.container.ratis.segment.preallocated.size"; public static final String - DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT = "4MB"; + HDDS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT = "4MB"; public static final String - DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT = - "dfs.container.ratis.statemachinedata.sync.timeout"; + HDDS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT = + "hdds.container.ratis.statemachinedata.sync.timeout"; public static final TimeDuration - DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT = + HDDS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT = TimeDuration.valueOf(10, TimeUnit.SECONDS); public static final String - DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES = - "dfs.container.ratis.statemachinedata.sync.retries"; + HDDS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES = + "hdds.container.ratis.statemachinedata.sync.retries"; public static final String - DFS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS = - "dfs.container.ratis.statemachine.max.pending.apply-transactions"; + HDDS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS = + "hdds.container.ratis.statemachine.max.pending.apply-transactions"; // The default value of maximum number of pending state machine apply // transactions is kept same as default snapshot threshold. public static final int - DFS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS_DEFAULT = + HDDS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS_DEFAULT = 100000; - public static final String DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS = - "dfs.container.ratis.log.queue.num-elements"; - public static final int DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS_DEFAULT = + public static final String HDDS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS = + "hdds.container.ratis.log.queue.num-elements"; + public static final int HDDS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS_DEFAULT = 1024; - public static final String DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT = - "dfs.container.ratis.log.queue.byte-limit"; - public static final String DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT_DEFAULT = + public static final String HDDS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT = + "hdds.container.ratis.log.queue.byte-limit"; + public static final String HDDS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT_DEFAULT = "4GB"; public static final String - DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS = - "dfs.container.ratis.log.appender.queue.num-elements"; + HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS = + "hdds.container.ratis.log.appender.queue.num-elements"; public static final int - DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS_DEFAULT = 1; - public static final String DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT = - "dfs.container.ratis.log.appender.queue.byte-limit"; + HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS_DEFAULT = 1; + public static final String HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT = + "hdds.container.ratis.log.appender.queue.byte-limit"; public static final String - DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT = "32MB"; - public static final String DFS_CONTAINER_RATIS_LOG_PURGE_GAP = - "dfs.container.ratis.log.purge.gap"; + HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT = "32MB"; + public static final String HDDS_CONTAINER_RATIS_LOG_PURGE_GAP = + "hdds.container.ratis.log.purge.gap"; // TODO: Set to 1024 once RATIS issue around purge is fixed. - public static final int DFS_CONTAINER_RATIS_LOG_PURGE_GAP_DEFAULT = + public static final int HDDS_CONTAINER_RATIS_LOG_PURGE_GAP_DEFAULT = 1000000; - public static final String DFS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT = - "dfs.container.ratis.leader.pending.bytes.limit"; + public static final String HDDS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT = + "hdds.container.ratis.leader.pending.bytes.limit"; public static final String - DFS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT_DEFAULT = "1GB"; + HDDS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT_DEFAULT = "1GB"; - public static final String DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY = - "dfs.ratis.server.retry-cache.timeout.duration"; + public static final String HDDS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY = + "hdds.ratis.server.retry-cache.timeout.duration"; public static final TimeDuration - DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT = + HDDS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT = TimeDuration.valueOf(600000, TimeUnit.MILLISECONDS); public static final String - DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY = - "dfs.ratis.leader.election.minimum.timeout.duration"; + HDDS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY = + "hdds.ratis.leader.election.minimum.timeout.duration"; public static final TimeDuration - DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT = + HDDS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT = TimeDuration.valueOf(5, TimeUnit.SECONDS); - public static final String DFS_RATIS_SNAPSHOT_THRESHOLD_KEY = - "dfs.ratis.snapshot.threshold"; - public static final long DFS_RATIS_SNAPSHOT_THRESHOLD_DEFAULT = 100000; + public static final String HDDS_RATIS_SNAPSHOT_THRESHOLD_KEY = + "hdds.ratis.snapshot.threshold"; + public static final long HDDS_RATIS_SNAPSHOT_THRESHOLD_DEFAULT = 100000; // TODO : this is copied from OzoneConsts, may need to move to a better place public static final String OZONE_SCM_CHUNK_SIZE_KEY = "ozone.scm.chunk.size"; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java index 21c89cc3c8d4..9508d9125311 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java @@ -36,9 +36,9 @@ @InterfaceAudience.Public @InterfaceStability.Unstable public final class OzoneConfigKeys { - public static final String DFS_CONTAINER_IPC_PORT = - "dfs.container.ipc"; - public static final int DFS_CONTAINER_IPC_PORT_DEFAULT = 9859; + public static final String HDDS_CONTAINER_IPC_PORT = + "hdds.container.ipc.port"; + public static final int HDDS_CONTAINER_IPC_PORT_DEFAULT = 9859; public static final String OZONE_METADATA_DIRS = "ozone.metadata.dirs"; @@ -56,52 +56,52 @@ public final class OzoneConfigKeys { * so that a mini cluster is able to launch multiple containers on a node. * * When set to false (default), the container port will be specified as - * {@link #DFS_CONTAINER_IPC_PORT} and the default value will be specified - * as {@link #DFS_CONTAINER_IPC_PORT_DEFAULT}. + * {@link #HDDS_CONTAINER_IPC_PORT} and the default value will be specified + * as {@link #HDDS_CONTAINER_IPC_PORT_DEFAULT}. */ - public static final String DFS_CONTAINER_IPC_RANDOM_PORT = - "dfs.container.ipc.random.port"; - public static final boolean DFS_CONTAINER_IPC_RANDOM_PORT_DEFAULT = + public static final String HDDS_CONTAINER_IPC_RANDOM_PORT = + "hdds.container.ipc.random.port"; + public static final boolean HDDS_CONTAINER_IPC_RANDOM_PORT_DEFAULT = false; - public static final String DFS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT = - "dfs.container.ratis.datastream.random.port"; + public static final String HDDS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT = + "hdds.container.ratis.datastream.random.port"; public static final boolean - DFS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT_DEFAULT = + HDDS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT_DEFAULT = false; - public static final String DFS_CONTAINER_CHUNK_WRITE_SYNC_KEY = - "dfs.container.chunk.write.sync"; - public static final boolean DFS_CONTAINER_CHUNK_WRITE_SYNC_DEFAULT = false; + public static final String HDDS_CONTAINER_CHUNK_WRITE_SYNC_KEY = + "hdds.container.chunk.write.sync"; + public static final boolean HDDS_CONTAINER_CHUNK_WRITE_SYNC_DEFAULT = false; /** * Ratis Port where containers listen to. */ - public static final String DFS_CONTAINER_RATIS_IPC_PORT = - "dfs.container.ratis.ipc"; - public static final int DFS_CONTAINER_RATIS_IPC_PORT_DEFAULT = 9858; + public static final String HDDS_CONTAINER_RATIS_IPC_PORT = + "hdds.container.ratis.ipc.port"; + public static final int HDDS_CONTAINER_RATIS_IPC_PORT_DEFAULT = 9858; /** * Ratis Port where containers listen to admin requests. */ - public static final String DFS_CONTAINER_RATIS_ADMIN_PORT = - "dfs.container.ratis.admin.port"; - public static final int DFS_CONTAINER_RATIS_ADMIN_PORT_DEFAULT = 9857; + public static final String HDDS_CONTAINER_RATIS_ADMIN_PORT = + "hdds.container.ratis.admin.port"; + public static final int HDDS_CONTAINER_RATIS_ADMIN_PORT_DEFAULT = 9857; /** * Ratis Port where containers listen to server-to-server requests. */ - public static final String DFS_CONTAINER_RATIS_SERVER_PORT = - "dfs.container.ratis.server.port"; - public static final int DFS_CONTAINER_RATIS_SERVER_PORT_DEFAULT = 9856; + public static final String HDDS_CONTAINER_RATIS_SERVER_PORT = + "hdds.container.ratis.server.port"; + public static final int HDDS_CONTAINER_RATIS_SERVER_PORT_DEFAULT = 9856; /** * Ratis Port where containers listen to datastream requests. */ - public static final String DFS_CONTAINER_RATIS_DATASTREAM_ENABLED - = "dfs.container.ratis.datastream.enabled"; - public static final boolean DFS_CONTAINER_RATIS_DATASTREAM_ENABLED_DEFAULT + public static final String HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED + = "hdds.container.ratis.datastream.enabled"; + public static final boolean HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED_DEFAULT = false; - public static final String DFS_CONTAINER_RATIS_DATASTREAM_PORT - = "dfs.container.ratis.datastream.port"; - public static final int DFS_CONTAINER_RATIS_DATASTREAM_PORT_DEFAULT + public static final String HDDS_CONTAINER_RATIS_DATASTREAM_PORT + = "hdds.container.ratis.datastream.port"; + public static final int HDDS_CONTAINER_RATIS_DATASTREAM_PORT_DEFAULT = 9855; /** @@ -133,9 +133,9 @@ public final class OzoneConfigKeys { * When set to true, allocate a random free port for ozone container, so that * a mini cluster is able to launch multiple containers on a node. */ - public static final String DFS_CONTAINER_RATIS_IPC_RANDOM_PORT = - "dfs.container.ratis.ipc.random.port"; - public static final boolean DFS_CONTAINER_RATIS_IPC_RANDOM_PORT_DEFAULT = + public static final String HDDS_CONTAINER_RATIS_IPC_RANDOM_PORT = + "hdds.container.ratis.ipc.random.port"; + public static final boolean HDDS_CONTAINER_RATIS_IPC_RANDOM_PORT_DEFAULT = false; public static final String OZONE_TRACE_ENABLED_KEY = "ozone.trace.enabled"; @@ -328,97 +328,97 @@ public final class OzoneConfigKeys { public static final int OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL_DEFAULT = 10; - public static final String DFS_CONTAINER_RATIS_ENABLED_KEY - = ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY; - public static final boolean DFS_CONTAINER_RATIS_ENABLED_DEFAULT - = ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_DEFAULT; - public static final String DFS_CONTAINER_RATIS_RPC_TYPE_KEY - = ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY; - public static final String DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT - = ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT; + public static final String HDDS_CONTAINER_RATIS_ENABLED_KEY + = ScmConfigKeys.HDDS_CONTAINER_RATIS_ENABLED_KEY; + public static final boolean HDDS_CONTAINER_RATIS_ENABLED_DEFAULT + = ScmConfigKeys.HDDS_CONTAINER_RATIS_ENABLED_DEFAULT; + public static final String HDDS_CONTAINER_RATIS_RPC_TYPE_KEY + = ScmConfigKeys.HDDS_CONTAINER_RATIS_RPC_TYPE_KEY; + public static final String HDDS_CONTAINER_RATIS_RPC_TYPE_DEFAULT + = ScmConfigKeys.HDDS_CONTAINER_RATIS_RPC_TYPE_DEFAULT; public static final String - DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_KEY - = ScmConfigKeys.DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME; + HDDS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_KEY + = ScmConfigKeys.HDDS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME; public static final int - DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_DEFAULT + HDDS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_DEFAULT = ScmConfigKeys. - DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_DEFAULT; - public static final String DFS_CONTAINER_RATIS_REPLICATION_LEVEL_KEY - = ScmConfigKeys.DFS_CONTAINER_RATIS_REPLICATION_LEVEL_KEY; + HDDS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_DEFAULT; + public static final String HDDS_CONTAINER_RATIS_REPLICATION_LEVEL_KEY + = ScmConfigKeys.HDDS_CONTAINER_RATIS_REPLICATION_LEVEL_KEY; public static final ReplicationLevel - DFS_CONTAINER_RATIS_REPLICATION_LEVEL_DEFAULT - = ScmConfigKeys.DFS_CONTAINER_RATIS_REPLICATION_LEVEL_DEFAULT; - public static final String DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY - = ScmConfigKeys.DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY; - public static final int DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_DEFAULT - = ScmConfigKeys.DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_DEFAULT; - public static final String DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY - = ScmConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY; - public static final String DFS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT - = ScmConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT; - public static final String DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY - = ScmConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY; + HDDS_CONTAINER_RATIS_REPLICATION_LEVEL_DEFAULT + = ScmConfigKeys.HDDS_CONTAINER_RATIS_REPLICATION_LEVEL_DEFAULT; + public static final String HDDS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY + = ScmConfigKeys.HDDS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY; + public static final int HDDS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_DEFAULT + = ScmConfigKeys.HDDS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_DEFAULT; + public static final String HDDS_CONTAINER_RATIS_SEGMENT_SIZE_KEY + = ScmConfigKeys.HDDS_CONTAINER_RATIS_SEGMENT_SIZE_KEY; + public static final String HDDS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT + = ScmConfigKeys.HDDS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT; + public static final String HDDS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY + = ScmConfigKeys.HDDS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY; public static final String - DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT = - ScmConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT; + HDDS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT = + ScmConfigKeys.HDDS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT; // config settings to enable stateMachineData write timeout public static final String - DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT = - ScmConfigKeys.DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT; + HDDS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT = + ScmConfigKeys.HDDS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT; public static final TimeDuration - DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT = - ScmConfigKeys.DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT; + HDDS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT = + ScmConfigKeys.HDDS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT; - public static final String DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR = - "dfs.container.ratis.datanode.storage.dir"; + public static final String HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR = + "hdds.container.ratis.datanode.storage.dir"; - public static final String DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY = - ScmConfigKeys.DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY; + public static final String HDDS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY = + ScmConfigKeys.HDDS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY; public static final TimeDuration - DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT = - ScmConfigKeys.DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT; + HDDS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT = + ScmConfigKeys.HDDS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT; public static final String - DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES = - ScmConfigKeys.DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES; - public static final String DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS = - ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS; - public static final int DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS_DEFAULT = - ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS_DEFAULT; - public static final String DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT = - ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT; - public static final String DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT_DEFAULT = - ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT_DEFAULT; + HDDS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES = + ScmConfigKeys.HDDS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES; + public static final String HDDS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS = + ScmConfigKeys.HDDS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS; + public static final int HDDS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS_DEFAULT = + ScmConfigKeys.HDDS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS_DEFAULT; + public static final String HDDS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT = + ScmConfigKeys.HDDS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT; + public static final String HDDS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT_DEFAULT = + ScmConfigKeys.HDDS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT_DEFAULT; public static final String - DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS = - ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS; + HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS = + ScmConfigKeys.HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS; public static final int - DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS_DEFAULT = - ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS_DEFAULT; - public static final String DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT = - ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT; + HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS_DEFAULT = + ScmConfigKeys.HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS_DEFAULT; + public static final String HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT = + ScmConfigKeys.HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT; public static final String - DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT = - ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT; - public static final String DFS_CONTAINER_RATIS_LOG_PURGE_GAP = - ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_PURGE_GAP; - public static final int DFS_CONTAINER_RATIS_LOG_PURGE_GAP_DEFAULT = - ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_PURGE_GAP_DEFAULT; - public static final String DFS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT = - ScmConfigKeys.DFS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT; + HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT = + ScmConfigKeys.HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT; + public static final String HDDS_CONTAINER_RATIS_LOG_PURGE_GAP = + ScmConfigKeys.HDDS_CONTAINER_RATIS_LOG_PURGE_GAP; + public static final int HDDS_CONTAINER_RATIS_LOG_PURGE_GAP_DEFAULT = + ScmConfigKeys.HDDS_CONTAINER_RATIS_LOG_PURGE_GAP_DEFAULT; + public static final String HDDS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT = + ScmConfigKeys.HDDS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT; public static final String - DFS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT_DEFAULT = - ScmConfigKeys.DFS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT_DEFAULT; + HDDS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT_DEFAULT = + ScmConfigKeys.HDDS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT_DEFAULT; public static final String - DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY = - ScmConfigKeys.DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY; + HDDS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY = + ScmConfigKeys.HDDS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY; public static final TimeDuration - DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT = - ScmConfigKeys.DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT; - public static final String DFS_RATIS_SNAPSHOT_THRESHOLD_KEY = - ScmConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_KEY; - public static final long DFS_RATIS_SNAPSHOT_THRESHOLD_DEFAULT = - ScmConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_DEFAULT; + HDDS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT = + ScmConfigKeys.HDDS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT; + public static final String HDDS_RATIS_SNAPSHOT_THRESHOLD_KEY = + ScmConfigKeys.HDDS_RATIS_SNAPSHOT_THRESHOLD_KEY; + public static final long HDDS_RATIS_SNAPSHOT_THRESHOLD_DEFAULT = + ScmConfigKeys.HDDS_RATIS_SNAPSHOT_THRESHOLD_DEFAULT; public static final String HDDS_DATANODE_PLUGINS_KEY = "hdds.datanode.plugins"; diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml index cd3a01aabc23..4cc32eb336c3 100644 --- a/hadoop-hdds/common/src/main/resources/ozone-default.xml +++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml @@ -46,26 +46,26 @@ - dfs.container.ipc + hdds.container.ipc.port 9859 OZONE, CONTAINER, MANAGEMENT The ipc port number of container. - dfs.container.ratis.datastream.enabled + hdds.container.ratis.datastream.enabled false OZONE, CONTAINER, RATIS, DATASTREAM It specifies whether to enable data stream of container. - dfs.container.ratis.datastream.port + hdds.container.ratis.datastream.port 9855 OZONE, CONTAINER, RATIS, DATASTREAM The datastream port number of container. - dfs.container.ratis.datastream.random.port + hdds.container.ratis.datastream.random.port false OZONE, CONTAINER, RATIS, DATASTREAM Allocates a random free port for ozone container datastream. @@ -73,7 +73,7 @@ - dfs.container.ipc.random.port + hdds.container.ipc.random.port false OZONE, DEBUG, CONTAINER Allocates a random free port for ozone container. This is used @@ -82,7 +82,7 @@ - dfs.container.chunk.write.sync + hdds.container.chunk.write.sync false OZONE, CONTAINER, MANAGEMENT Determines whether the chunk writes in the container happen as @@ -90,19 +90,19 @@ - dfs.container.ratis.statemachinedata.sync.timeout + hdds.container.ratis.statemachinedata.sync.timeout 10s OZONE, DEBUG, CONTAINER, RATIS Timeout for StateMachine data writes by Ratis. - dfs.container.ratis.statemachinedata.sync.retries + hdds.container.ratis.statemachinedata.sync.retries OZONE, DEBUG, CONTAINER, RATIS Number of times the WriteStateMachineData op will be tried before failing. If the value is not configured, it will default - to (hdds.ratis.rpc.slowness.timeout / dfs.container.ratis.statemachinedata.sync.timeout), + to (hdds.ratis.rpc.slowness.timeout / hdds.container.ratis.statemachinedata.sync.timeout), which means that the WriteStatMachineData will be retried for every sync timeout until the configured slowness timeout is hit, after which the StateMachine will close down the pipeline. @@ -112,21 +112,22 @@ - dfs.container.ratis.log.queue.num-elements + hdds.container.ratis.log.queue.num-elements 1024 OZONE, DEBUG, CONTAINER, RATIS Limit for the number of operations in Ratis Log Worker. - dfs.container.ratis.log.queue.byte-limit + hdds.container.ratis.log.queue.byte-limit 4GB OZONE, DEBUG, CONTAINER, RATIS Byte limit for Ratis Log Worker queue. - dfs.container.ratis.log.appender.queue.num-elements + hdds.container.ratis.log.appender.queue.num-elements + 1 OZONE, DEBUG, CONTAINER, RATIS Limit for number of append entries in ratis leader's @@ -134,14 +135,16 @@ - dfs.container.ratis.log.appender.queue.byte-limit + hdds.container.ratis.log.appender.queue.byte-limit + 32MB OZONE, DEBUG, CONTAINER, RATIS Byte limit for ratis leader's log appender queue. - dfs.container.ratis.log.purge.gap + hdds.container.ratis.log.purge.gap + 1000000 OZONE, DEBUG, CONTAINER, RATIS Purge gap between the last purged commit index @@ -149,7 +152,7 @@ - dfs.container.ratis.datanode.storage.dir + hdds.container.ratis.datanode.storage.dir OZONE, CONTAINER, STORAGE, MANAGEMENT, RATIS This directory is used for storing Ratis metadata like logs. If @@ -223,7 +226,7 @@ - dfs.container.ratis.enabled + hdds.container.ratis.enabled false OZONE, MANAGEMENT, PIPELINE, RATIS Ozone supports different kinds of replication pipelines. Ratis @@ -232,25 +235,26 @@ - dfs.container.ratis.ipc + hdds.container.ratis.ipc.port 9858 OZONE, CONTAINER, PIPELINE, RATIS The ipc port number of container for clients. - dfs.container.ratis.admin.port + hdds.container.ratis.admin.port 9857 OZONE, CONTAINER, PIPELINE, RATIS, MANAGEMENT The ipc port number of container for admin requests. - dfs.container.ratis.server.port + hdds.container.ratis.server.port 9856 OZONE, CONTAINER, PIPELINE, RATIS, MANAGEMENT The ipc port number of container for server-server communication. - dfs.container.ratis.ipc.random.port + hdds.container.ratis.ipc.random.port + false OZONE,DEBUG Allocates a random free port for ozone ratis port for the @@ -259,7 +263,7 @@ - dfs.container.ratis.rpc.type + hdds.container.ratis.rpc.type GRPC OZONE, RATIS, MANAGEMENT Ratis supports different kinds of transports like netty, GRPC, @@ -268,7 +272,7 @@ - dfs.ratis.snapshot.threshold + hdds.ratis.snapshot.threshold 10000 OZONE, RATIS Number of transactions after which a ratis snapshot should be @@ -276,16 +280,16 @@ - dfs.container.ratis.statemachine.max.pending.apply-transactions + hdds.container.ratis.statemachine.max.pending.apply-transactions 10000 OZONE, RATIS Maximum number of pending apply transactions in a data pipeline. The default value is kept same as default snapshot threshold - dfs.ratis.snapshot.threshold. + hdds.ratis.snapshot.threshold. - dfs.container.ratis.num.write.chunk.threads.per.volume + hdds.container.ratis.num.write.chunk.threads.per.volume 10 OZONE, RATIS, PERFORMANCE Maximum number of threads in the thread pool that Datanode @@ -295,7 +299,8 @@ - dfs.container.ratis.leader.pending.bytes.limit + hdds.container.ratis.leader.pending.bytes.limit + 1GB OZONE, RATIS, PERFORMANCE Limit on the total bytes of pending requests after which @@ -303,7 +308,7 @@ - dfs.container.ratis.replication.level + hdds.container.ratis.replication.level MAJORITY OZONE, RATIS Replication level to be used by datanode for submitting a @@ -312,7 +317,7 @@ - dfs.container.ratis.num.container.op.executors + hdds.container.ratis.num.container.op.executors 10 OZONE, RATIS, PERFORMANCE Number of executors that will be used by Ratis to execute @@ -320,7 +325,7 @@ - dfs.container.ratis.segment.size + hdds.container.ratis.segment.size 64MB OZONE, RATIS, PERFORMANCE The size of the raft segment file used @@ -328,7 +333,7 @@ - dfs.container.ratis.segment.preallocated.size + hdds.container.ratis.segment.preallocated.size 4MB OZONE, RATIS, PERFORMANCE The pre-allocated file size for raft segment used @@ -336,13 +341,13 @@ - dfs.ratis.server.retry-cache.timeout.duration + hdds.ratis.server.retry-cache.timeout.duration 600000ms OZONE, RATIS, MANAGEMENT Retry Cache entry timeout for ratis server. - dfs.ratis.leader.election.minimum.timeout.duration + hdds.ratis.leader.election.minimum.timeout.duration 5s OZONE, RATIS, MANAGEMENT The minimum timeout duration for ratis leader election. @@ -707,7 +712,7 @@ For production clusters or any time you care about performance, it is recommended that ozone.om.db.dirs, ozone.scm.db.dirs and - dfs.container.ratis.datanode.storage.dir be configured separately. + hdds.container.ratis.datanode.storage.dir be configured separately. @@ -4234,15 +4239,6 @@ - - ozone.om.snapshot.sst_dumptool.pool.size - 1 - OZONE, OM - - Threadpool size for SST Dumptool which would be used for computing snapdiff when native library is enabled. - - - ozone.om.snapshot.load.native.lib true @@ -4252,15 +4248,6 @@ - - ozone.om.snapshot.sst_dumptool.buffer.size - 8KB - OZONE, OM - - Buffer size for SST Dumptool Pipe which would be used for computing snapdiff when native library is enabled. - - - ozone.om.snapshot.diff.max.allowed.keys.changed.per.job 10000000 diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java index a0f2735c6008..1b60dad694b9 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java @@ -202,7 +202,6 @@ private ContainerCommandResponseProto dispatchRequest( long startTime = Time.monotonicNow(); Type cmdType = msg.getCmdType(); long containerID = msg.getContainerID(); - metrics.incContainerOpsMetrics(cmdType); Container container = getContainer(containerID); boolean isWriteStage = (cmdType == Type.WriteChunk && dispatcherContext != null @@ -214,6 +213,16 @@ private ContainerCommandResponseProto dispatchRequest( && dispatcherContext.getStage() == DispatcherContext.WriteChunkStage.COMMIT_DATA); + if (dispatcherContext == null) { + // increase all op not through ratis + metrics.incContainerOpsMetrics(cmdType); + } else if (isWriteStage) { + // increase WriteChunk in only WRITE_STAGE + metrics.incContainerOpsMetrics(cmdType); + } else if (cmdType != Type.WriteChunk) { + metrics.incContainerOpsMetrics(cmdType); + } + try { if (DispatcherContext.op(dispatcherContext).validateToken()) { validateToken(msg); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java index 009e6396e0d2..346b05ebb4c1 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java @@ -99,11 +99,11 @@ public XceiverServerGrpc(DatanodeDetails datanodeDetails, this.id = datanodeDetails.getUuid(); this.datanodeDetails = datanodeDetails; - this.port = conf.getInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, - OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT); + this.port = conf.getInt(OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT, + OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT_DEFAULT); - if (conf.getBoolean(OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT, - OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT_DEFAULT)) { + if (conf.getBoolean(OzoneConfigKeys.HDDS_CONTAINER_IPC_RANDOM_PORT, + OzoneConfigKeys.HDDS_CONTAINER_IPC_RANDOM_PORT_DEFAULT)) { this.port = 0; } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java index 31e0c603aeed..56210eab7e4d 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java @@ -219,8 +219,8 @@ public ContainerStateMachine(RaftGroupId gid, this.writeChunkFutureMap = new ConcurrentHashMap<>(); applyTransactionCompletionMap = new ConcurrentHashMap<>(); long pendingRequestsBytesLimit = (long)conf.getStorageSize( - OzoneConfigKeys.DFS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT, - OzoneConfigKeys.DFS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT_DEFAULT, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT_DEFAULT, StorageUnit.BYTES); // cache with FIFO eviction, and if element not found, this needs // to be obtained from disk for slow follower @@ -238,13 +238,13 @@ public ContainerStateMachine(RaftGroupId gid, this.container2BCSIDMap = new ConcurrentHashMap<>(); final int numContainerOpExecutors = conf.getInt( - OzoneConfigKeys.DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY, - OzoneConfigKeys.DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_DEFAULT); + OzoneConfigKeys.HDDS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_DEFAULT); int maxPendingApplyTransactions = conf.getInt( ScmConfigKeys. - DFS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS, + HDDS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS, ScmConfigKeys. - DFS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS_DEFAULT); + HDDS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS_DEFAULT); applyTransactionSemaphore = new Semaphore(maxPendingApplyTransactions); stateMachineHealthy = new AtomicBoolean(true); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java index e6cbe262f6a6..58c8655454bd 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java @@ -111,12 +111,12 @@ import org.slf4j.LoggerFactory; import static org.apache.hadoop.hdds.DatanodeVersion.SEPARATE_RATIS_PORTS_AVAILABLE; -import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT; -import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT; -import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS; -import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS_DEFAULT; -import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT; -import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY; +import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT; +import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT; +import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS; +import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS_DEFAULT; +import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT; +import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_CONTAINER_RATIS_SEGMENT_SIZE_KEY; import static org.apache.ratis.util.Preconditions.assertTrue; /** @@ -172,8 +172,8 @@ private XceiverServerRatis(DatanodeDetails dd, ratisServerConfig = conf.getObject(DatanodeRatisServerConfig.class); assignPorts(); this.streamEnable = conf.getBoolean( - OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_ENABLED, - OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_ENABLED_DEFAULT); + OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED_DEFAULT); this.context = context; this.dispatcher = dispatcher; this.containerController = containerController; @@ -200,17 +200,17 @@ private XceiverServerRatis(DatanodeDetails dd, private void assignPorts() { clientPort = determinePort( - OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_PORT, - OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_PORT_DEFAULT); + OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_PORT, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_PORT_DEFAULT); if (DatanodeVersion.fromProtoValue(datanodeDetails.getInitialVersion()) .compareTo(SEPARATE_RATIS_PORTS_AVAILABLE) >= 0) { adminPort = determinePort( - OzoneConfigKeys.DFS_CONTAINER_RATIS_ADMIN_PORT, - OzoneConfigKeys.DFS_CONTAINER_RATIS_ADMIN_PORT_DEFAULT); + OzoneConfigKeys.HDDS_CONTAINER_RATIS_ADMIN_PORT, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_ADMIN_PORT_DEFAULT); serverPort = determinePort( - OzoneConfigKeys.DFS_CONTAINER_RATIS_SERVER_PORT, - OzoneConfigKeys.DFS_CONTAINER_RATIS_SERVER_PORT_DEFAULT); + OzoneConfigKeys.HDDS_CONTAINER_RATIS_SERVER_PORT, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_SERVER_PORT_DEFAULT); } else { adminPort = clientPort; serverPort = clientPort; @@ -219,8 +219,8 @@ private void assignPorts() { private int determinePort(String key, int defaultValue) { boolean randomPort = conf.getBoolean( - OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, - OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT_DEFAULT); + OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_RANDOM_PORT, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_RANDOM_PORT_DEFAULT); return randomPort ? 0 : conf.getInt(key, defaultValue); } @@ -232,14 +232,14 @@ private ContainerStateMachine getStateMachine(RaftGroupId gid) { private void setUpRatisStream(RaftProperties properties) { // set the datastream config if (conf.getBoolean( - OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, OzoneConfigKeys. - DFS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT_DEFAULT)) { + HDDS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT_DEFAULT)) { dataStreamPort = 0; } else { dataStreamPort = conf.getInt( - OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_PORT, - OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_PORT_DEFAULT); + OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_PORT, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_PORT_DEFAULT); } RatisHelper.enableNettyStreaming(properties); NettyConfigKeys.DataStream.setPort(properties, dataStreamPort); @@ -310,8 +310,8 @@ public RaftProperties newRaftProperties() { } long snapshotThreshold = - conf.getLong(OzoneConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_KEY, - OzoneConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_DEFAULT); + conf.getLong(OzoneConfigKeys.HDDS_RATIS_SNAPSHOT_THRESHOLD_KEY, + OzoneConfigKeys.HDDS_RATIS_SNAPSHOT_THRESHOLD_DEFAULT); RaftServerConfigKeys.Snapshot. setAutoTriggerEnabled(properties, true); RaftServerConfigKeys.Snapshot. @@ -321,11 +321,11 @@ public RaftProperties newRaftProperties() { setPendingRequestsLimits(properties); int logQueueNumElements = - conf.getInt(OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS, - OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS_DEFAULT); + conf.getInt(OzoneConfigKeys.HDDS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS_DEFAULT); final long logQueueByteLimit = (long) conf.getStorageSize( - OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT, - OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT_DEFAULT, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT_DEFAULT, StorageUnit.BYTES); RaftServerConfigKeys.Log.setQueueElementLimit( properties, logQueueNumElements); @@ -336,8 +336,8 @@ public RaftProperties newRaftProperties() { false); int purgeGap = conf.getInt( - OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_PURGE_GAP, - OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_PURGE_GAP_DEFAULT); + OzoneConfigKeys.HDDS_CONTAINER_RATIS_LOG_PURGE_GAP, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_LOG_PURGE_GAP_DEFAULT); RaftServerConfigKeys.Log.setPurgeGap(properties, purgeGap); //Set the number of Snapshots Retained. @@ -358,12 +358,12 @@ private void setRatisLeaderElectionTimeout(RaftProperties properties) { long duration; TimeUnit leaderElectionMinTimeoutUnit = OzoneConfigKeys. - DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT + HDDS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT .getUnit(); duration = conf.getTimeDuration( - OzoneConfigKeys.DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY, + OzoneConfigKeys.HDDS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY, OzoneConfigKeys. - DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT + HDDS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT .getDuration(), leaderElectionMinTimeoutUnit); final TimeDuration leaderElectionMinTimeout = TimeDuration.valueOf(duration, leaderElectionMinTimeoutUnit); @@ -379,11 +379,11 @@ private void setTimeoutForRetryCache(RaftProperties properties) { TimeUnit timeUnit; long duration; timeUnit = - OzoneConfigKeys.DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT + OzoneConfigKeys.HDDS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT .getUnit(); duration = conf.getTimeDuration( - OzoneConfigKeys.DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY, - OzoneConfigKeys.DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT + OzoneConfigKeys.HDDS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY, + OzoneConfigKeys.HDDS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT .getDuration(), timeUnit); final TimeDuration retryCacheTimeout = TimeDuration.valueOf(duration, timeUnit); @@ -393,8 +393,8 @@ private void setTimeoutForRetryCache(RaftProperties properties) { private long setRaftSegmentPreallocatedSize(RaftProperties properties) { final long raftSegmentPreallocatedSize = (long) conf.getStorageSize( - OzoneConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY, - OzoneConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT, StorageUnit.BYTES); RaftServerConfigKeys.Log.setPreallocatedSize(properties, SizeInBytes.valueOf(raftSegmentPreallocatedSize)); @@ -403,23 +403,23 @@ private long setRaftSegmentPreallocatedSize(RaftProperties properties) { private void setRaftSegmentAndWriteBufferSize(RaftProperties properties) { final int logAppenderQueueNumElements = conf.getInt( - DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS, - DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS_DEFAULT); + HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS, + HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS_DEFAULT); final int logAppenderQueueByteLimit = (int) conf.getStorageSize( - DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT, - DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT, + HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT, + HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT, StorageUnit.BYTES); final long raftSegmentSize = (long) conf.getStorageSize( - DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY, - DFS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT, + HDDS_CONTAINER_RATIS_SEGMENT_SIZE_KEY, + HDDS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT, StorageUnit.BYTES); final long raftSegmentBufferSize = logAppenderQueueByteLimit + 8; assertTrue(raftSegmentBufferSize <= raftSegmentSize, - () -> DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT + " = " + () -> HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT + " = " + logAppenderQueueByteLimit - + " must be <= (" + DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY + " - 8" + + " must be <= (" + HDDS_CONTAINER_RATIS_SEGMENT_SIZE_KEY + " - 8" + " = " + (raftSegmentSize - 8) + ")"); RaftServerConfigKeys.Log.Appender.setBufferElementLimit(properties, @@ -437,11 +437,11 @@ private void setStateMachineDataConfigurations(RaftProperties properties) { RaftServerConfigKeys.Log.StateMachineData.setSync(properties, true); TimeUnit timeUnit = OzoneConfigKeys. - DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT.getUnit(); + HDDS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT.getUnit(); long duration = conf.getTimeDuration( - OzoneConfigKeys.DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT, OzoneConfigKeys. - DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT + HDDS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT .getDuration(), timeUnit); final TimeDuration dataSyncTimeout = TimeDuration.valueOf(duration, timeUnit); @@ -462,7 +462,7 @@ private void setStateMachineDataConfigurations(RaftProperties properties) { int syncTimeoutRetryDefault = (int) nodeFailureTimeoutMs / dataSyncTimeout.toIntExact(TimeUnit.MILLISECONDS); int numSyncRetries = conf.getInt( - OzoneConfigKeys.DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES, syncTimeoutRetryDefault); RaftServerConfigKeys.Log.StateMachineData.setSyncTimeoutRetry(properties, numSyncRetries); @@ -490,8 +490,8 @@ private void setStateMachineDataConfigurations(RaftProperties properties) { private RpcType setRpcType(RaftProperties properties) { final String rpcType = conf.get( - OzoneConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY, - OzoneConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT); + OzoneConfigKeys.HDDS_CONTAINER_RATIS_RPC_TYPE_KEY, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_RPC_TYPE_DEFAULT); final RpcType rpc = SupportedRpcType.valueOfIgnoreCase(rpcType); RatisHelper.setRpcType(properties, rpc); return rpc; @@ -500,8 +500,8 @@ private RpcType setRpcType(RaftProperties properties) { private void setPendingRequestsLimits(RaftProperties properties) { long pendingRequestsBytesLimit = (long) conf.getStorageSize( - OzoneConfigKeys.DFS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT, - OzoneConfigKeys.DFS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT_DEFAULT, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT_DEFAULT, StorageUnit.BYTES); final int pendingRequestsMegaBytesLimit = HddsUtils.roundupMb(pendingRequestsBytesLimit); @@ -967,9 +967,9 @@ private static List createChunkExecutors( // TODO create single pool with N threads if using non-incremental chunks final int threadCountPerDisk = conf.getInt( OzoneConfigKeys - .DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_KEY, + .HDDS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_KEY, OzoneConfigKeys - .DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_DEFAULT); + .HDDS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_DEFAULT); final int numberOfDisks = HddsServerUtil.getDatanodeStorageDirs(conf).size(); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java index c462649c68c7..16da798c807e 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java @@ -165,9 +165,9 @@ public KeyValueHandler(ConfigurationSource config, // Requests. final int threadCountPerDisk = conf.getInt( OzoneConfigKeys - .DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_KEY, + .HDDS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_KEY, OzoneConfigKeys - .DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_DEFAULT); + .HDDS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_DEFAULT); final int numberOfDisks = HddsServerUtil.getDatanodeStorageDirs(conf).size(); containerCreationLocks = Striped.lazyWeakLock( diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerFactory.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerFactory.java index 1267ed786892..288a2d3e3312 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerFactory.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerFactory.java @@ -51,8 +51,8 @@ private ChunkManagerFactory() { public static ChunkManager createChunkManager(ConfigurationSource conf, BlockManager manager, VolumeSet volSet) { boolean sync = - conf.getBoolean(OzoneConfigKeys.DFS_CONTAINER_CHUNK_WRITE_SYNC_KEY, - OzoneConfigKeys.DFS_CONTAINER_CHUNK_WRITE_SYNC_DEFAULT); + conf.getBoolean(OzoneConfigKeys.HDDS_CONTAINER_CHUNK_WRITE_SYNC_KEY, + OzoneConfigKeys.HDDS_CONTAINER_CHUNK_WRITE_SYNC_DEFAULT); boolean persist = conf.getBoolean(HDDS_CONTAINER_PERSISTDATA, HDDS_CONTAINER_PERSISTDATA_DEFAULT); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java index 735ad6033fb8..3bba28fe20aa 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java @@ -336,7 +336,7 @@ public static ContainerController getEmptyContainerController() { public static XceiverServerRatis newXceiverServerRatis( DatanodeDetails dn, OzoneConfiguration conf) throws IOException { - conf.setInt(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_PORT, + conf.setInt(OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_PORT, dn.getPort(DatanodeDetails.Port.Name.RATIS).getValue()); return XceiverServerRatis.newXceiverServerRatis(dn, conf, diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java index 91935e0d04b1..36ffbca6584b 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java @@ -168,8 +168,8 @@ public static HddsProtos.ReplicationFactor getReplicationFactor( private static boolean isUseRatis(ConfigurationSource c) { return c.getBoolean( - ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY, - ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_DEFAULT); + ScmConfigKeys.HDDS_CONTAINER_RATIS_ENABLED_KEY, + ScmConfigKeys.HDDS_CONTAINER_RATIS_ENABLED_DEFAULT); } } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java index 914057f70a34..6c7694556baa 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java @@ -80,12 +80,12 @@ public void setUp() throws Exception { conf = SCMTestUtils.getConf(); conf.setTimeDuration(OZONE_SCM_HEARTBEAT_RPC_TIMEOUT, 500, TimeUnit.MILLISECONDS); - conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, true); - conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT, true); - conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_ENABLED, + conf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_RANDOM_PORT, true); + conf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_IPC_RANDOM_PORT, true); + conf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED, true); conf.setBoolean( - OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, true); + OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, true); serverAddresses = new ArrayList<>(); scmServers = new ArrayList<>(); mockServers = new ArrayList<>(); @@ -216,7 +216,7 @@ public void testDatanodeStateContext() throws IOException, DatanodeDetails datanodeDetails = getNewDatanodeDetails(); DatanodeDetails.Port port = DatanodeDetails.newPort( DatanodeDetails.Port.Name.STANDALONE, - OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT); + OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT_DEFAULT); datanodeDetails.setPort(port); ContainerUtils.writeDatanodeDetailsTo(datanodeDetails, idPath, conf); try (DatanodeStateMachine stateMachine = @@ -343,7 +343,7 @@ public void testDatanodeStateMachineWithIdWriteFail() throws Exception { DatanodeDetails datanodeDetails = getNewDatanodeDetails(); DatanodeDetails.Port port = DatanodeDetails.newPort( DatanodeDetails.Port.Name.STANDALONE, - OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT); + OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT_DEFAULT); datanodeDetails.setPort(port); try (DatanodeStateMachine stateMachine = diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/TestDatanodeConfiguration.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/TestDatanodeConfiguration.java index abfa2c76147a..8946f8baac49 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/TestDatanodeConfiguration.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/TestDatanodeConfiguration.java @@ -179,7 +179,7 @@ public void isCreatedWitDefaultValues() { public void testConf() throws Exception { final OzoneConfiguration conf = new OzoneConfiguration(); final String dir = "dummy/dir"; - conf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dir); + conf.set(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dir); final DatanodeRatisServerConfig ratisConf = conf.getObject( DatanodeRatisServerConfig.class); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestPeriodicVolumeChecker.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestPeriodicVolumeChecker.java index 85165548142e..b25162757b35 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestPeriodicVolumeChecker.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestPeriodicVolumeChecker.java @@ -59,7 +59,7 @@ public class TestPeriodicVolumeChecker { public void setup() throws IOException { conf = new OzoneConfiguration(); conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, folder.toString()); - conf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, + conf.set(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, Files.createDirectory(folder.resolve("VolumeCheckerDir")).toString()); } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java index d10e678b572d..b95765d62f6a 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java @@ -77,7 +77,7 @@ public void setup() throws Exception { volumes.add(volume1); volumes.add(volume2); conf.set(DFSConfigKeysLegacy.DFS_DATANODE_DATA_DIR_KEY, dataDirKey); - conf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, + conf.set(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dataDirKey); initializeVolumeSet(); } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java index 832fbcac03ab..c52b2bed4d11 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java @@ -230,7 +230,7 @@ private OzoneConfiguration getConfWithDataNodeDirs(int numDirs) { for (int i = 0; i < numDirs; ++i) { metaDirs.add(GenericTestUtils.getRandomizedTestDir().getPath()); } - ozoneConf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, + ozoneConf.set(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, String.join(",", metaDirs)); final List dbDirs = new ArrayList<>(); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java index b22f3a68516e..25d45c53503b 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java @@ -323,7 +323,7 @@ public void testMultipleContainerReader() throws Exception { BlockUtils.shutdownCache(conf); conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, datanodeDirs.toString()); - conf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, + conf.set(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, datanodeDirs.toString()); MutableVolumeSet volumeSets = new MutableVolumeSet(datanodeId.toString(), clusterId, conf, null, diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java index 1384530ff772..a3d2b67b6738 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java @@ -178,7 +178,7 @@ public void testBuildContainerMap() throws Exception { public void testBuildNodeReport() throws Exception { String path = folder.getRoot() .getAbsolutePath(); - conf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, + conf.set(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, String.join(",", path + "/ratis1", path + "/ratis2", path + "ratis3")); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToSchemaV3.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToSchemaV3.java index 7669a577cf7e..8c8107b4c598 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToSchemaV3.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToSchemaV3.java @@ -105,10 +105,10 @@ public TestDatanodeUpgradeToSchemaV3(Boolean enable) { conf = new OzoneConfiguration(); conf.setBoolean(DatanodeConfiguration.CONTAINER_SCHEMA_V3_ENABLED, this.schemaV3Enabled); - conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_ENABLED, + conf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED, true); conf.setBoolean( - OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, true); + OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, true); } @Before diff --git a/hadoop-hdds/docs/content/feature/Streaming-Write-Pipeline.md b/hadoop-hdds/docs/content/feature/Streaming-Write-Pipeline.md index 5f55afebc3c8..e48a95c8bb9c 100644 --- a/hadoop-hdds/docs/content/feature/Streaming-Write-Pipeline.md +++ b/hadoop-hdds/docs/content/feature/Streaming-Write-Pipeline.md @@ -43,7 +43,7 @@ Set the following properties to the Ozone configuration file `ozone-site.xml`. - To enable the Streaming Write Pipeline feature, set the following property to true. ```XML - dfs.container.ratis.datastream.enabled + hdds.container.ratis.datastream.enabled false OZONE, CONTAINER, RATIS, DATASTREAM It specifies whether to enable data stream of container. @@ -52,7 +52,7 @@ Set the following properties to the Ozone configuration file `ozone-site.xml`. - Datanodes listen to the following port for the streaming traffic. ```XML - dfs.container.ratis.datastream.port + hdds.container.ratis.datastream.port 9855 OZONE, CONTAINER, RATIS, DATASTREAM The datastream port number of container. diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java index 70d394e73b31..4fae3686c93c 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java @@ -382,14 +382,14 @@ public static int getLogWarnInterval(ConfigurationSource conf) { * @return port number. */ public static int getContainerPort(ConfigurationSource conf) { - return conf.getInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, - OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT); + return conf.getInt(OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT, + OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT_DEFAULT); } public static Collection getOzoneDatanodeRatisDirectory( ConfigurationSource conf) { Collection rawLocations = conf.getTrimmedStringCollection( - OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR); + OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR); if (rawLocations.isEmpty()) { rawLocations = new ArrayList<>(1); diff --git a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/JniLibNamePropertyWriter.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/JniLibNamePropertyWriter.java new file mode 100644 index 000000000000..46fbeb412a84 --- /dev/null +++ b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/JniLibNamePropertyWriter.java @@ -0,0 +1,48 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.utils.db.managed; + +import org.rocksdb.util.Environment; + +import java.io.IOException; +import java.io.OutputStreamWriter; +import java.io.Writer; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Paths; + +/** + * Class to write the rocksdb lib name to a file. + * This would be used to build native ozone_rocksdb_tools library. + */ +public final class JniLibNamePropertyWriter { + + private JniLibNamePropertyWriter() { + } + + public static void main(String[] args) { + String filePath = args[0]; + try (Writer writer = new OutputStreamWriter( + Files.newOutputStream(Paths.get(filePath)), StandardCharsets.UTF_8)) { + writer.write("rocksdbLibName=" + + Environment.getJniLibraryFileName("rocksdb")); + } catch (IOException e) { + e.printStackTrace(); + } + } +} diff --git a/hadoop-hdds/rocks-native/pom.xml b/hadoop-hdds/rocks-native/pom.xml index 1b29bfcbd6af..6e92f57b6c18 100644 --- a/hadoop-hdds/rocks-native/pom.xml +++ b/hadoop-hdds/rocks-native/pom.xml @@ -57,13 +57,16 @@ mockito-inline test + + org.assertj + assertj-core + false 8 8 - https://zlib.net/fossils/zlib-${zlib.version}.tar.gz @@ -120,79 +123,80 @@ - com.googlecode.maven-download-plugin - download-maven-plugin + org.codehaus.mojo + exec-maven-plugin - rocksdb source download - generate-sources + set-property + initialize - wget + java - https://github.com/facebook/rocksdb/archive/refs/tags/v${rocksdb.version}.tar.gz - rocksdb-v${rocksdb.version}.tar.gz - ${project.build.directory}/rocksdb - - - - zlib source download - generate-sources - - wget - - - ${zlib.url} - zlib-${zlib.version}.tar.gz - ${project.build.directory}/zlib - - - - bzip2 source download - generate-sources - - wget - - - https://sourceware.org/pub/bzip2/bzip2-${bzip2.version}.tar.gz - bzip2-v${bzip2.version}.tar.gz - ${project.build.directory}/bzip2 + org.apache.hadoop.hdds.utils.db.managed.JniLibNamePropertyWriter + + ${project.build.directory}/propertyFile.txt + + + + + org.codehaus.mojo + properties-maven-plugin + - lz4 source download - generate-sources + read-property-from-file + initialize - wget + read-project-properties - https://github.com/lz4/lz4/archive/refs/tags/v${lz4.version}.tar.gz - lz4-v${lz4.version}.tar.gz - ${project.build.directory}/lz4 + + ${project.build.directory}/propertyFile.txt + + + + + org.apache.maven.plugins + maven-dependency-plugin + - snappy source download - generate-sources + unpack-dependency + initialize - wget + unpack - https://github.com/google/snappy/archive/refs/tags/${snappy.version}.tar.gz - snappy-v${snappy.version}.tar.gz - ${project.build.directory}/snappy + + + org.rocksdb + rocksdbjni + jar + false + ${project.build.directory}/rocksdbjni + + + + + + com.googlecode.maven-download-plugin + download-maven-plugin + - zstd source download + rocksdb source download generate-sources wget - https://github.com/facebook/zstd/archive/refs/tags/v${zstd.version}.tar.gz - zstd-v${zstd.version}.tar.gz - ${project.build.directory}/zstd + https://github.com/facebook/rocksdb/archive/refs/tags/v${rocksdb.version}.tar.gz + rocksdb-v${rocksdb.version}.tar.gz + ${project.build.directory}/rocksdb @@ -226,88 +230,6 @@ - - - - - - - - - run - - - - build-zlib - process-sources - - - - - - - - - - - - run - - - - build-bzip2 - process-sources - - - - - - - - - run - - - - build-lz4 - process-sources - - - - - - - - - run - - - - build-zstd - process-sources - - - - - - - - - run - - - - build-snappy - process-sources - - - - - - - - - @@ -326,11 +248,10 @@ - - - + + - + @@ -353,14 +274,11 @@ - - - - - - + + - + @@ -430,8 +348,8 @@ ${env.JAVA_HOME}/bin/javah - org.apache.hadoop.hdds.utils.db.managed.ManagedSSTDumpTool - org.apache.hadoop.hdds.utils.db.managed.PipeInputStream + org.apache.hadoop.hdds.utils.db.managed.ManagedRawSSTFileReader + org.apache.hadoop.hdds.utils.db.managed.ManagedRawSSTFileIterator ${project.build.directory}/native/javah @@ -486,8 +404,8 @@ ${project.build.outputDirectory}:${project.build.directory}/dependency/* -h ${project.build.directory}/native/javah - ${project.basedir}/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedSSTDumpTool.java - ${project.basedir}/src/main/java/org/apache/hadoop/hdds/utils/db/managed/PipeInputStream.java + ${project.basedir}/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRawSSTFileReader.java + ${project.basedir}/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRawSSTFileReaderIterator.java diff --git a/hadoop-hdds/rocks-native/src/CMakeLists.txt b/hadoop-hdds/rocks-native/src/CMakeLists.txt index 051660777493..4639e2a8c927 100644 --- a/hadoop-hdds/rocks-native/src/CMakeLists.txt +++ b/hadoop-hdds/rocks-native/src/CMakeLists.txt @@ -21,6 +21,7 @@ # cmake_minimum_required(VERSION 2.8) +add_definitions(-D_GLIBCXX_USE_CXX11_ABI=0) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fPIC") set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fPIC") project(ozone_native) @@ -36,43 +37,18 @@ endif() include_directories(${GENERATED_JAVAH}) if(${SST_DUMP_INCLUDE}) include_directories(${ROCKSDB_HEADERS}) - set(SOURCE_FILES ${NATIVE_DIR}/SSTDumpTool.cpp ${NATIVE_DIR}/PipeInputStream.cpp ${NATIVE_DIR}/Pipe.h ${NATIVE_DIR}/Pipe.cpp ${NATIVE_DIR}/cplusplus_to_java_convert.h) - ADD_LIBRARY(rocksdb STATIC IMPORTED) + set(SOURCE_FILES ${NATIVE_DIR}/ManagedRawSSTFileReader.cpp ${NATIVE_DIR}/ManagedRawSSTFileIterator.cpp ${NATIVE_DIR}/cplusplus_to_java_convert.h) + ADD_LIBRARY(rocksdb SHARED IMPORTED) set_target_properties( rocksdb PROPERTIES - IMPORTED_LOCATION ${ROCKSDB_LIB}/librocksdb.a) + IMPORTED_LOCATION ${ROCKSDB_LIB}) ADD_LIBRARY(rocks_tools STATIC IMPORTED) set_target_properties( rocks_tools PROPERTIES - IMPORTED_LOCATION ${ROCKSDB_LIB}/librocksdb_tools.a) - ADD_LIBRARY(bz2 STATIC IMPORTED) - set_target_properties( - bz2 - PROPERTIES - IMPORTED_LOCATION ${BZIP2_LIB}/libbz2.a) - ADD_LIBRARY(zlib STATIC IMPORTED) - set_target_properties( - zlib - PROPERTIES - IMPORTED_LOCATION ${ZLIB_LIB}/libz.a) - ADD_LIBRARY(lz4 STATIC IMPORTED) - set_target_properties( - lz4 - PROPERTIES - IMPORTED_LOCATION ${LZ4_LIB}/liblz4.a) - ADD_LIBRARY(snappy STATIC IMPORTED) - set_target_properties( - snappy - PROPERTIES - IMPORTED_LOCATION ${SNAPPY_LIB}/libsnappy.a) - ADD_LIBRARY(zstd STATIC IMPORTED) - set_target_properties( - zstd - PROPERTIES - IMPORTED_LOCATION ${ZSTD_LIB}/libzstd.a) - set(linked_libraries ${linked_libraries} bz2 zlib rocks_tools rocksdb lz4 snappy zstd) + IMPORTED_LOCATION ${ROCKSDB_TOOLS_LIB}/librocksdb_tools.a) + set(linked_libraries ${linked_libraries} rocks_tools rocksdb) endif() add_library(ozone_rocksdb_tools SHARED ${SOURCE_FILES}) target_link_libraries(ozone_rocksdb_tools ${linked_libraries}) diff --git a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/NativeConstants.java b/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/NativeConstants.java index d3121144d37a..8937f0803a18 100644 --- a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/NativeConstants.java +++ b/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/NativeConstants.java @@ -26,6 +26,5 @@ public final class NativeConstants { private NativeConstants() { } - public static final String ROCKS_TOOLS_NATIVE_LIBRARY_NAME - = "ozone_rocksdb_tools"; + public static final String ROCKS_TOOLS_NATIVE_LIBRARY_NAME = "ozone_rocksdb_tools"; } diff --git a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRawSSTFileIterator.java b/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRawSSTFileIterator.java new file mode 100644 index 000000000000..02125951c1fe --- /dev/null +++ b/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRawSSTFileIterator.java @@ -0,0 +1,118 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.utils.db.managed; + +import com.google.common.primitives.UnsignedLong; +import org.apache.hadoop.hdds.StringUtils; +import org.apache.hadoop.util.ClosableIterator; + +import java.util.Arrays; +import java.util.NoSuchElementException; +import java.util.function.Function; + +/** + * Iterator for SSTFileReader which would read all entries including tombstones. + */ +public class ManagedRawSSTFileIterator implements ClosableIterator { + // Native address of pointer to the object. + private final long nativeHandle; + private final Function transformer; + + ManagedRawSSTFileIterator(long nativeHandle, Function transformer) { + this.nativeHandle = nativeHandle; + this.transformer = transformer; + } + + private native boolean hasNext(long handle); + private native void next(long handle); + private native byte[] getKey(long handle); + private native byte[] getValue(long handle); + private native long getSequenceNumber(long handle); + private native int getType(long handle); + + @Override + public boolean hasNext() { + return this.hasNext(nativeHandle); + } + + @Override + public T next() { + if (!hasNext()) { + throw new NoSuchElementException(); + } + + KeyValue keyValue = new KeyValue(this.getKey(nativeHandle), + UnsignedLong.fromLongBits(this.getSequenceNumber(this.nativeHandle)), + this.getType(nativeHandle), + this.getValue(nativeHandle)); + this.next(nativeHandle); + return this.transformer.apply(keyValue); + } + + private native void closeInternal(long handle); + + @Override + public void close() { + this.closeInternal(this.nativeHandle); + } + + /** + * Class containing Parsed KeyValue Record from RawSstReader output. + */ + public static final class KeyValue { + + private final byte[] key; + private final UnsignedLong sequence; + private final Integer type; + private final byte[] value; + + private KeyValue(byte[] key, UnsignedLong sequence, Integer type, + byte[] value) { + this.key = key; + this.sequence = sequence; + this.type = type; + this.value = value; + } + + public byte[] getKey() { + return Arrays.copyOf(key, key.length); + } + + public UnsignedLong getSequence() { + return sequence; + } + + public Integer getType() { + return type; + } + + public byte[] getValue() { + return Arrays.copyOf(value, value.length); + } + + @Override + public String toString() { + return "KeyValue{" + + "key=" + StringUtils.bytes2String(key) + + ", sequence=" + sequence + + ", type=" + type + + ", value=" + StringUtils.bytes2String(value) + + '}'; + } + } +} diff --git a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRawSSTFileReader.java b/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRawSSTFileReader.java new file mode 100644 index 000000000000..7c8783b43948 --- /dev/null +++ b/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRawSSTFileReader.java @@ -0,0 +1,78 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.utils.db.managed; + +import org.apache.hadoop.hdds.utils.NativeLibraryLoader; +import org.apache.hadoop.hdds.utils.NativeLibraryNotLoadedException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.Closeable; +import java.util.function.Function; + +import static org.apache.hadoop.hdds.utils.NativeConstants.ROCKS_TOOLS_NATIVE_LIBRARY_NAME; + +/** + * JNI for RocksDB RawSSTFileReader. + */ +public class ManagedRawSSTFileReader implements Closeable { + + public static boolean loadLibrary() throws NativeLibraryNotLoadedException { + ManagedRocksObjectUtils.loadRocksDBLibrary(); + if (!NativeLibraryLoader.getInstance().loadLibrary(ROCKS_TOOLS_NATIVE_LIBRARY_NAME)) { + throw new NativeLibraryNotLoadedException(ROCKS_TOOLS_NATIVE_LIBRARY_NAME); + } + return true; + } + + private final String fileName; + // Native address of pointer to the object. + private final long nativeHandle; + private static final Logger LOG = LoggerFactory.getLogger(ManagedRawSSTFileReader.class); + + public ManagedRawSSTFileReader(final ManagedOptions options, final String fileName, final int readAheadSize) { + this.fileName = fileName; + this.nativeHandle = this.newRawSSTFileReader(options.getNativeHandle(), fileName, readAheadSize); + } + + public ManagedRawSSTFileIterator newIterator( + Function transformerFunction, + ManagedSlice fromSlice, ManagedSlice toSlice) { + long fromNativeHandle = fromSlice == null ? 0 : fromSlice.getNativeHandle(); + long toNativeHandle = toSlice == null ? 0 : toSlice.getNativeHandle(); + LOG.info("Iterating SST file: {} with native lib. " + + "LowerBound: {}, UpperBound: {}", fileName, fromSlice, toSlice); + return new ManagedRawSSTFileIterator<>( + newIterator(this.nativeHandle, fromSlice != null, + fromNativeHandle, toSlice != null, toNativeHandle), + transformerFunction); + } + + private native long newRawSSTFileReader(long optionsHandle, String filePath, int readSize); + + + private native long newIterator(long handle, boolean hasFrom, long fromSliceHandle, boolean hasTo, + long toSliceHandle); + + private native void disposeInternal(long handle); + + @Override + public void close() { + disposeInternal(nativeHandle); + } +} diff --git a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedSSTDumpIterator.java b/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedSSTDumpIterator.java deleted file mode 100644 index d8844eaacbcd..000000000000 --- a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedSSTDumpIterator.java +++ /dev/null @@ -1,314 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.utils.db.managed; - -import com.google.common.collect.Maps; -import com.google.common.primitives.UnsignedLong; -import org.apache.hadoop.hdds.StringUtils; -import org.apache.hadoop.util.ClosableIterator; -import org.eclipse.jetty.io.RuntimeIOException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.io.IOException; -import java.io.InputStream; -import java.io.UncheckedIOException; -import java.nio.ByteBuffer; -import java.util.Map; -import java.util.Objects; -import java.util.Optional; -import java.util.Arrays; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.stream.Collectors; - -/** - * Iterator to Parse output of RocksDBSSTDumpTool. - */ -public abstract class ManagedSSTDumpIterator implements ClosableIterator { - - private static final Logger LOG = - LoggerFactory.getLogger(ManagedSSTDumpIterator.class); - // Since we don't have any restriction on the key & value, we are prepending - // the length of the pattern in the sst dump tool output. - // The first token in the pattern is the key. - // The second tells the sequence number of the key. - // The third token gives the type of key in the sst file. - // The fourth token - private InputStream processOutput; - private Optional currentKey; - private byte[] intBuffer; - private Optional nextKey; - - private ManagedSSTDumpTool.SSTDumpToolTask sstDumpToolTask; - private AtomicBoolean open; - private StackTraceElement[] stackTrace; - - public ManagedSSTDumpIterator(ManagedSSTDumpTool sstDumpTool, - String sstFilePath, ManagedOptions options) - throws IOException { - this(sstDumpTool, sstFilePath, options, null, null); - } - - public ManagedSSTDumpIterator(ManagedSSTDumpTool sstDumpTool, - String sstFilePath, ManagedOptions options, - ManagedSlice lowerKeyBound, - ManagedSlice upperKeyBound) - throws IOException { - File sstFile = new File(sstFilePath); - if (!sstFile.exists()) { - throw new IOException(String.format("File in path : %s doesn't exist", - sstFile.getAbsolutePath())); - } - if (!sstFile.isFile()) { - throw new IOException(String.format("Path given: %s is not a file", - sstFile.getAbsolutePath())); - } - init(sstDumpTool, sstFile, options, lowerKeyBound, upperKeyBound); - this.stackTrace = Thread.currentThread().getStackTrace(); - } - - /** - * Parses next occuring number in the stream. - * - * @return Optional of the integer empty if no integer exists - */ - private Optional getNextNumberInStream() throws IOException { - int n = processOutput.read(intBuffer, 0, 4); - if (n == 4) { - return Optional.of(ByteBuffer.wrap(intBuffer).getInt()); - } else if (n >= 0) { - throw new IllegalStateException(String.format("Integer expects " + - "4 bytes to be read from the stream, but read only %d bytes", n)); - } - return Optional.empty(); - } - - private Optional getNextByteArray() throws IOException { - Optional size = getNextNumberInStream(); - if (size.isPresent()) { - if (LOG.isDebugEnabled()) { - LOG.debug("Allocating byte array, size: {}", size.get()); - } - byte[] b = new byte[size.get()]; - int n = processOutput.read(b); - if (n >= 0 && n != size.get()) { - throw new IllegalStateException(String.format("Integer expects " + - "4 bytes to be read from the stream, but read only %d bytes", n)); - } - return Optional.of(b); - } - return Optional.empty(); - } - - private Optional getNextUnsignedLong() throws IOException { - long val = 0; - for (int i = 0; i < 8; i++) { - val = val << 8; - int nextByte = processOutput.read(); - if (nextByte < 0) { - if (i == 0) { - return Optional.empty(); - } - throw new IllegalStateException(String.format("Long expects " + - "8 bytes to be read from the stream, but read only %d bytes", i)); - } - val += nextByte; - } - return Optional.of(UnsignedLong.fromLongBits(val)); - } - - private void init(ManagedSSTDumpTool sstDumpTool, File sstFile, - ManagedOptions options, ManagedSlice lowerKeyBound, - ManagedSlice upperKeyBound) { - Map argMap = Maps.newHashMap(); - argMap.put("file", sstFile.getAbsolutePath()); - argMap.put("silent", null); - argMap.put("command", "scan"); - // strings containing '\0' do not have the same value when encode UTF-8 on - // java which is 0. But in jni the utf-8 encoded value for '\0' - // becomes -64 -128. Thus the value becomes different. - // In order to support this, changes have been made on the rocks-tools - // to pass the address of the ManagedSlice and the jni can use the object - // of slice directly from there. - if (Objects.nonNull(lowerKeyBound)) { - argMap.put("from", String.valueOf(lowerKeyBound.getNativeHandle())); - } - if (Objects.nonNull(upperKeyBound)) { - argMap.put("to", String.valueOf(upperKeyBound.getNativeHandle())); - } - this.sstDumpToolTask = sstDumpTool.run(argMap, options); - processOutput = sstDumpToolTask.getPipedOutput(); - intBuffer = new byte[4]; - open = new AtomicBoolean(true); - currentKey = Optional.empty(); - nextKey = Optional.empty(); - next(); - } - - /** - * Throws Runtime exception in the case iterator is closed or - * the native Dumptool exited with non zero exit value. - */ - private void checkSanityOfProcess() { - if (!this.open.get()) { - throw new RuntimeException("Iterator has been closed"); - } - if (sstDumpToolTask.getFuture().isDone() && - sstDumpToolTask.exitValue() != 0) { - throw new RuntimeException("Process Terminated with non zero " + - String.format("exit value %d", sstDumpToolTask.exitValue())); - } - } - - /** - * Checks the status of the process & sees if there is another record. - * - * @return True if next exists & false otherwise - * Throws Runtime Exception in case of SST File read failure - */ - - @Override - public boolean hasNext() { - checkSanityOfProcess(); - return nextKey.isPresent(); - } - - /** - * Transforms Key to a certain value. - * - * @param value - * @return transformed Value - */ - protected abstract T getTransformedValue(Optional value); - - /** - * Returns the next record from SSTDumpTool. - * - * @return next Key - * Throws Runtime Exception incase of failure. - */ - @Override - public T next() { - checkSanityOfProcess(); - currentKey = nextKey; - nextKey = Optional.empty(); - try { - Optional key = getNextByteArray(); - if (!key.isPresent()) { - return getTransformedValue(currentKey); - } - UnsignedLong sequenceNumber = getNextUnsignedLong() - .orElseThrow(() -> new IllegalStateException( - String.format("Error while trying to read sequence number" + - " for key %s", StringUtils.bytes2String(key.get())))); - - Integer type = getNextNumberInStream() - .orElseThrow(() -> new IllegalStateException( - String.format("Error while trying to read sequence number for " + - "key %s with sequence number %s", - StringUtils.bytes2String(key.get()), - sequenceNumber.toString()))); - byte[] val = getNextByteArray().orElseThrow(() -> - new IllegalStateException( - String.format("Error while trying to read sequence number for " + - "key %s with sequence number %s of type %d", - StringUtils.bytes2String(key.get()), - sequenceNumber.toString(), type))); - nextKey = Optional.of(new KeyValue(key.get(), sequenceNumber, type, val)); - } catch (IOException e) { - // TODO [SNAPSHOT] Throw custom snapshot exception - throw new RuntimeIOException(e); - } - return getTransformedValue(currentKey); - } - - @Override - public synchronized void close() throws UncheckedIOException { - if (this.sstDumpToolTask != null) { - if (!this.sstDumpToolTask.getFuture().isDone()) { - this.sstDumpToolTask.getFuture().cancel(true); - } - try { - this.processOutput.close(); - } catch (IOException e) { - throw new UncheckedIOException(e); - } - } - open.compareAndSet(true, false); - } - - @Override - protected void finalize() throws Throwable { - if (open.get()) { - LOG.warn("{} is not closed properly." + - " StackTrace for unclosed instance: {}", - this.getClass().getName(), - Arrays.stream(stackTrace) - .map(StackTraceElement::toString).collect( - Collectors.joining("\n"))); - } - this.close(); - super.finalize(); - } - - /** - * Class containing Parsed KeyValue Record from Sst Dumptool output. - */ - public static final class KeyValue { - - private final byte[] key; - private final UnsignedLong sequence; - private final Integer type; - private final byte[] value; - - private KeyValue(byte[] key, UnsignedLong sequence, Integer type, - byte[] value) { - this.key = key; - this.sequence = sequence; - this.type = type; - this.value = value; - } - - public byte[] getKey() { - return key; - } - - public UnsignedLong getSequence() { - return sequence; - } - - public Integer getType() { - return type; - } - - public byte[] getValue() { - return value; - } - - @Override - public String toString() { - return "KeyValue{" + - "key=" + StringUtils.bytes2String(key) + - ", sequence=" + sequence + - ", type=" + type + - ", value=" + StringUtils.bytes2String(value) + - '}'; - } - } -} diff --git a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedSSTDumpTool.java b/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedSSTDumpTool.java deleted file mode 100644 index 5d965d7398e0..000000000000 --- a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedSSTDumpTool.java +++ /dev/null @@ -1,99 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.utils.db.managed; - -import org.apache.hadoop.hdds.utils.NativeLibraryLoader; -import org.apache.hadoop.hdds.utils.NativeLibraryNotLoadedException; - -import java.io.InputStream; -import java.util.Map; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Future; - -import static org.apache.hadoop.hdds.utils.NativeConstants.ROCKS_TOOLS_NATIVE_LIBRARY_NAME; - -/** - * JNI for RocksDB SSTDumpTool. Pipes the output to an output stream - */ -public class ManagedSSTDumpTool { - - private int bufferCapacity; - private ExecutorService executorService; - - public ManagedSSTDumpTool(ExecutorService executorService, - int bufferCapacity) - throws NativeLibraryNotLoadedException { - if (!NativeLibraryLoader.getInstance() - .loadLibrary(ROCKS_TOOLS_NATIVE_LIBRARY_NAME)) { - throw new NativeLibraryNotLoadedException( - ROCKS_TOOLS_NATIVE_LIBRARY_NAME); - } - this.bufferCapacity = bufferCapacity; - this.executorService = executorService; - } - - public SSTDumpToolTask run(String[] args, ManagedOptions options) { - PipeInputStream pipeInputStream = new PipeInputStream(bufferCapacity); - return new SSTDumpToolTask(this.executorService.submit(() -> - this.runInternal(args, options.getNativeHandle(), - pipeInputStream.getNativeHandle())), pipeInputStream); - } - - public SSTDumpToolTask run(Map args, ManagedOptions options) { - return this.run(args.entrySet().stream().map(e -> "--" - + (e.getValue() == null || e.getValue().isEmpty() ? e.getKey() : - e.getKey() + "=" + e.getValue())).toArray(String[]::new), options); - } - - private native int runInternal(String[] args, long optionsHandle, - long pipeHandle); - - /** - * Class holding piped output of SST Dumptool & future of command. - */ - static class SSTDumpToolTask { - private Future future; - private InputStream pipedOutput; - - SSTDumpToolTask(Future future, InputStream pipedOutput) { - this.future = future; - this.pipedOutput = pipedOutput; - } - - public Future getFuture() { - return future; - } - - public InputStream getPipedOutput() { - return pipedOutput; - } - - public int exitValue() { - if (this.future.isDone()) { - try { - return future.get(); - } catch (InterruptedException | ExecutionException e) { - return 1; - } - } - return 0; - } - } -} diff --git a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/PipeInputStream.java b/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/PipeInputStream.java deleted file mode 100644 index df4f613f98e2..000000000000 --- a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/PipeInputStream.java +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.utils.db.managed; - -import java.io.InputStream; -import java.util.concurrent.atomic.AtomicBoolean; - -/** - * JNI for reading data from pipe. - */ -public class PipeInputStream extends InputStream { - - private byte[] byteBuffer; - private long nativeHandle; - private int numberOfBytesLeftToRead; - private int index = 0; - private int capacity; - - private AtomicBoolean cleanup; - - PipeInputStream(int capacity) { - this.byteBuffer = new byte[capacity]; - this.numberOfBytesLeftToRead = 0; - this.capacity = capacity; - this.nativeHandle = newPipe(); - this.cleanup = new AtomicBoolean(false); - } - - long getNativeHandle() { - return nativeHandle; - } - - @Override - public int read() { - if (numberOfBytesLeftToRead < 0) { - this.close(); - return -1; - } - while (numberOfBytesLeftToRead == 0) { - numberOfBytesLeftToRead = readInternal(byteBuffer, capacity, - nativeHandle); - index = 0; - if (numberOfBytesLeftToRead != 0) { - return read(); - } - } - numberOfBytesLeftToRead--; - int ret = byteBuffer[index] & 0xFF; - index += 1; - return ret; - } - - private native long newPipe(); - - private native int readInternal(byte[] buff, int numberOfBytes, - long pipeHandle); - - private native void closeInternal(long pipeHandle); - - @Override - public void close() { - if (this.cleanup.compareAndSet(false, true)) { - closeInternal(this.nativeHandle); - } - } - - @Override - protected void finalize() throws Throwable { - close(); - super.finalize(); - } -} diff --git a/hadoop-hdds/rocks-native/src/main/native/ManagedRawSSTFileIterator.cpp b/hadoop-hdds/rocks-native/src/main/native/ManagedRawSSTFileIterator.cpp new file mode 100644 index 000000000000..1cf222528379 --- /dev/null +++ b/hadoop-hdds/rocks-native/src/main/native/ManagedRawSSTFileIterator.cpp @@ -0,0 +1,90 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "org_apache_hadoop_hdds_utils_db_managed_ManagedRawSSTFileIterator.h" +#include "rocksdb/options.h" +#include "rocksdb/raw_iterator.h" +#include +#include "cplusplus_to_java_convert.h" +#include + +jboolean Java_org_apache_hadoop_hdds_utils_db_managed_ManagedRawSSTFileIterator_hasNext(JNIEnv *env, jobject obj, + jlong native_handle) { + return static_cast(reinterpret_cast(native_handle)->Valid()); +} + +void Java_org_apache_hadoop_hdds_utils_db_managed_ManagedRawSSTFileIterator_next(JNIEnv *env, jobject obj, + jlong native_handle) { + reinterpret_cast(native_handle)->Next(); +} + +jbyteArray Java_org_apache_hadoop_hdds_utils_db_managed_ManagedRawSSTFileIterator_getKey(JNIEnv *env, + jobject obj, + jlong native_handle) { + ROCKSDB_NAMESPACE::Slice slice = reinterpret_cast(native_handle)->key(); + jbyteArray jkey = env->NewByteArray(static_cast(slice.size())); + if (jkey == nullptr) { + // exception thrown: OutOfMemoryError + return nullptr; + } + env->SetByteArrayRegion( + jkey, 0, static_cast(slice.size()), + const_cast(reinterpret_cast(slice.data()))); + return jkey; +} + + +jbyteArray Java_org_apache_hadoop_hdds_utils_db_managed_ManagedRawSSTFileIterator_getValue(JNIEnv *env, + jobject obj, + jlong native_handle) { + ROCKSDB_NAMESPACE::Slice slice = reinterpret_cast(native_handle)->value(); + jbyteArray jkey = env->NewByteArray(static_cast(slice.size())); + if (jkey == nullptr) { + // exception thrown: OutOfMemoryError + return nullptr; + } + env->SetByteArrayRegion( + jkey, 0, static_cast(slice.size()), + const_cast(reinterpret_cast(slice.data()))); + return jkey; +} + +jlong Java_org_apache_hadoop_hdds_utils_db_managed_ManagedRawSSTFileIterator_getSequenceNumber(JNIEnv *env, + jobject obj, + jlong native_handle) { + uint64_t sequence_number = + reinterpret_cast(native_handle)->sequenceNumber(); + jlong result; + std::memcpy(&result, &sequence_number, sizeof(jlong)); + return result; +} + + +jint Java_org_apache_hadoop_hdds_utils_db_managed_ManagedRawSSTFileIterator_getType(JNIEnv *env, + jobject obj, + jlong native_handle) { + uint32_t type = reinterpret_cast(native_handle)->type(); + return static_cast(type); +} + + +void Java_org_apache_hadoop_hdds_utils_db_managed_ManagedRawSSTFileIterator_closeInternal(JNIEnv *env, + jobject obj, + jlong native_handle) { + delete reinterpret_cast(native_handle); +} diff --git a/hadoop-hdds/rocks-native/src/main/native/ManagedRawSSTFileReader.cpp b/hadoop-hdds/rocks-native/src/main/native/ManagedRawSSTFileReader.cpp new file mode 100644 index 000000000000..f3b8dc02639d --- /dev/null +++ b/hadoop-hdds/rocks-native/src/main/native/ManagedRawSSTFileReader.cpp @@ -0,0 +1,65 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "org_apache_hadoop_hdds_utils_db_managed_ManagedRawSSTFileReader.h" +#include "rocksdb/options.h" +#include "rocksdb/raw_sst_file_reader.h" +#include "rocksdb/raw_iterator.h" +#include +#include "cplusplus_to_java_convert.h" +#include + +jlong Java_org_apache_hadoop_hdds_utils_db_managed_ManagedRawSSTFileReader_newRawSSTFileReader(JNIEnv *env, jobject obj, + jlong options_handle, + jstring jfilename, + jint readahead_size) { + ROCKSDB_NAMESPACE::Options *options = reinterpret_cast(options_handle); + const char *file_path = env->GetStringUTFChars(jfilename, nullptr); + size_t read_ahead_size_value = static_cast(readahead_size); + ROCKSDB_NAMESPACE::RawSstFileReader* raw_sst_file_reader = + new ROCKSDB_NAMESPACE::RawSstFileReader(*options, file_path, read_ahead_size_value, true, true); + env->ReleaseStringUTFChars(jfilename, file_path); + return GET_CPLUSPLUS_POINTER(raw_sst_file_reader); +} + +jlong Java_org_apache_hadoop_hdds_utils_db_managed_ManagedRawSSTFileReader_newIterator(JNIEnv *env, jobject obj, + jlong native_handle, + jboolean jhas_from, + jlong from_slice_handle, + jboolean jhas_to, + jlong to_slice_handle) { + ROCKSDB_NAMESPACE::Slice* from_slice = nullptr; + ROCKSDB_NAMESPACE::Slice* to_slice = nullptr; + ROCKSDB_NAMESPACE::RawSstFileReader* raw_sst_file_reader = + reinterpret_cast(native_handle); + bool has_from = static_cast(jhas_from); + bool has_to = static_cast(jhas_to); + if (has_from) { + from_slice = reinterpret_cast(from_slice_handle); + } + if (has_to) { + to_slice = reinterpret_cast(to_slice_handle); + } + ROCKSDB_NAMESPACE::RawIterator* iterator = raw_sst_file_reader->newIterator(has_from, from_slice, has_to, to_slice); + return GET_CPLUSPLUS_POINTER(iterator); +} + +void Java_org_apache_hadoop_hdds_utils_db_managed_ManagedRawSSTFileReader_disposeInternal(JNIEnv *env, jobject obj, + jlong native_handle) { + delete reinterpret_cast(native_handle); +} diff --git a/hadoop-hdds/rocks-native/src/main/native/Pipe.cpp b/hadoop-hdds/rocks-native/src/main/native/Pipe.cpp deleted file mode 100644 index f1dd54438700..000000000000 --- a/hadoop-hdds/rocks-native/src/main/native/Pipe.cpp +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "Pipe.h" -#include - -const int Pipe::READ_FILE_DESCRIPTOR_IDX = 0; -const int Pipe::WRITE_FILE_DESCRIPTOR_IDX = 1; - -Pipe::Pipe() { - pipe(p); - open = true; -} - -Pipe::~Pipe() { - ::close(p[Pipe::READ_FILE_DESCRIPTOR_IDX]); - ::close(p[Pipe::WRITE_FILE_DESCRIPTOR_IDX]); -} - -void Pipe::close() { - open = false; -} diff --git a/hadoop-hdds/rocks-native/src/main/native/Pipe.h b/hadoop-hdds/rocks-native/src/main/native/Pipe.h deleted file mode 100644 index aa75c6311cbc..000000000000 --- a/hadoop-hdds/rocks-native/src/main/native/Pipe.h +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ROCKS_NATIVE_PIPE_H -#define ROCKS_NATIVE_PIPE_H - -#include - -class Pipe { - public: - static const int READ_FILE_DESCRIPTOR_IDX; - static const int WRITE_FILE_DESCRIPTOR_IDX; - Pipe(); - ~Pipe(); - void close(); - int getReadFd() { - return getPipeFileDescriptorIndex(READ_FILE_DESCRIPTOR_IDX); - } - - int getWriteFd() { - return getPipeFileDescriptorIndex(WRITE_FILE_DESCRIPTOR_IDX); - } - - int getPipeFileDescriptorIndex(int idx) { - return p[idx]; - } - - bool isOpen() { - return open; - } - - - private: - int p[2]; - FILE* wr; - bool open; - -}; - -#endif //ROCKS_NATIVE_PIPE_H diff --git a/hadoop-hdds/rocks-native/src/main/native/PipeInputStream.cpp b/hadoop-hdds/rocks-native/src/main/native/PipeInputStream.cpp deleted file mode 100644 index 53f60cdd65af..000000000000 --- a/hadoop-hdds/rocks-native/src/main/native/PipeInputStream.cpp +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include -#include -#include "Pipe.h" -#include "cplusplus_to_java_convert.h" -#include "org_apache_hadoop_hdds_utils_db_managed_PipeInputStream.h" - - -jlong Java_org_apache_hadoop_hdds_utils_db_managed_PipeInputStream_newPipe(JNIEnv *, jobject) { - Pipe *pipe = new Pipe(); - return GET_CPLUSPLUS_POINTER(pipe); -} - -jint Java_org_apache_hadoop_hdds_utils_db_managed_PipeInputStream_readInternal(JNIEnv *env, jobject object, jbyteArray jbyteArray, jint capacity, jlong nativeHandle) { - int cap_int = capacity; - Pipe *pipe = reinterpret_cast(nativeHandle); - jbyte *b = (env)->GetByteArrayElements(jbyteArray, JNI_FALSE); - cap_int = read(pipe->getReadFd(), b, cap_int); - if (cap_int == 0) { - if (!pipe->isOpen()) { - cap_int = -1; - } - } - env->ReleaseByteArrayElements(jbyteArray, b, 0); - return cap_int; -} - -void Java_org_apache_hadoop_hdds_utils_db_managed_PipeInputStream_closeInternal(JNIEnv *env, jobject object, jlong nativeHandle) { - delete reinterpret_cast(nativeHandle); -} - diff --git a/hadoop-hdds/rocks-native/src/main/native/SSTDumpTool.cpp b/hadoop-hdds/rocks-native/src/main/native/SSTDumpTool.cpp deleted file mode 100644 index 285c5906c2d8..000000000000 --- a/hadoop-hdds/rocks-native/src/main/native/SSTDumpTool.cpp +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "org_apache_hadoop_hdds_utils_db_managed_ManagedSSTDumpTool.h" -#include "rocksdb/options.h" -#include "rocksdb/sst_dump_tool.h" -#include -#include "cplusplus_to_java_convert.h" -#include "Pipe.h" -#include - -jint Java_org_apache_hadoop_hdds_utils_db_managed_ManagedSSTDumpTool_runInternal(JNIEnv *env, jobject obj, - jobjectArray argsArray, jlong optionsHandle, jlong pipeHandle) { - ROCKSDB_NAMESPACE::SSTDumpTool dumpTool; - ROCKSDB_NAMESPACE::Options options; - Pipe *pipe = reinterpret_cast(pipeHandle); - int length = env->GetArrayLength(argsArray); - char *args[length + 1]; - for (int i = 0; i < length; i++) { - jstring str_val = (jstring)env->GetObjectArrayElement(argsArray, (jsize)i); - char *utf_str = (char *)env->GetStringUTFChars(str_val, JNI_FALSE); - args[i + 1] = utf_str; - } - FILE *wr = fdopen(pipe->getWriteFd(), "w"); - int ret = dumpTool.Run(length + 1, args, options, wr); - for (int i = 1; i < length + 1; i++) { - jstring str_val = (jstring)env->GetObjectArrayElement(argsArray, (jsize)(i - 1)); - env->ReleaseStringUTFChars(str_val, args[i]); - } - fclose(wr); - pipe->close(); - return ret; -} diff --git a/hadoop-hdds/rocks-native/src/main/native/cplusplus_to_java_convert.h b/hadoop-hdds/rocks-native/src/main/native/cplusplus_to_java_convert.h index efe9d4a5be24..4862ea12a1b9 100644 --- a/hadoop-hdds/rocks-native/src/main/native/cplusplus_to_java_convert.h +++ b/hadoop-hdds/rocks-native/src/main/native/cplusplus_to_java_convert.h @@ -16,7 +16,7 @@ * limitations under the License. */ -// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. // This source code is licensed under both the GPLv2 (found in the // COPYING file in the root directory) and Apache 2.0 License // (found in the LICENSE.Apache file in the root directory). diff --git a/hadoop-hdds/rocks-native/src/main/patches/rocks-native.patch b/hadoop-hdds/rocks-native/src/main/patches/rocks-native.patch index 841c2533b863..12dc74614a45 100644 --- a/hadoop-hdds/rocks-native/src/main/patches/rocks-native.patch +++ b/hadoop-hdds/rocks-native/src/main/patches/rocks-native.patch @@ -16,592 +16,531 @@ * limitations under the License. */ -diff --git a/include/rocksdb/sst_dump_tool.h b/include/rocksdb/sst_dump_tool.h -index 9261ba47d..1e62b88a3 100644 ---- a/include/rocksdb/sst_dump_tool.h -+++ b/include/rocksdb/sst_dump_tool.h -@@ -11,7 +11,8 @@ namespace ROCKSDB_NAMESPACE { - - class SSTDumpTool { - public: -- int Run(int argc, char const* const* argv, Options options = Options()); -+ int Run(int argc, char const* const* argv, Options options = Options(), -+ FILE* out = stdout, FILE* err = stderr); - }; - - } // namespace ROCKSDB_NAMESPACE -diff --git a/table/sst_file_dumper.cc b/table/sst_file_dumper.cc -index eefbaaeee..734a2f0dd 100644 ---- a/table/sst_file_dumper.cc -+++ b/table/sst_file_dumper.cc -@@ -45,7 +45,7 @@ SstFileDumper::SstFileDumper(const Options& options, - Temperature file_temp, size_t readahead_size, - bool verify_checksum, bool output_hex, - bool decode_blob_index, const EnvOptions& soptions, -- bool silent) -+ bool silent, FILE* out, FILE* err) - : file_name_(file_path), - read_num_(0), - file_temp_(file_temp), -@@ -57,10 +57,13 @@ SstFileDumper::SstFileDumper(const Options& options, - ioptions_(options_), - moptions_(ColumnFamilyOptions(options_)), - read_options_(verify_checksum, false), -- internal_comparator_(BytewiseComparator()) { -+ internal_comparator_(BytewiseComparator()), -+ out_(out), -+ err_(err) -+ { - read_options_.readahead_size = readahead_size; - if (!silent_) { -- fprintf(stdout, "Process %s\n", file_path.c_str()); -+ fprintf(out_, "Process %s\n", file_path.c_str()); - } - init_result_ = GetTableReader(file_name_); - } -@@ -253,17 +256,17 @@ Status SstFileDumper::ShowAllCompressionSizes( - int32_t compress_level_from, int32_t compress_level_to, - uint32_t max_dict_bytes, uint32_t zstd_max_train_bytes, - uint64_t max_dict_buffer_bytes, bool use_zstd_dict_trainer) { -- fprintf(stdout, "Block Size: %" ROCKSDB_PRIszt "\n", block_size); -+ fprintf(out_, "Block Size: %" ROCKSDB_PRIszt "\n", block_size); - for (auto& i : compression_types) { - if (CompressionTypeSupported(i.first)) { -- fprintf(stdout, "Compression: %-24s\n", i.second); -+ fprintf(out_, "Compression: %-24s\n", i.second); - CompressionOptions compress_opt; - compress_opt.max_dict_bytes = max_dict_bytes; - compress_opt.zstd_max_train_bytes = zstd_max_train_bytes; - compress_opt.max_dict_buffer_bytes = max_dict_buffer_bytes; - compress_opt.use_zstd_dict_trainer = use_zstd_dict_trainer; - for (int32_t j = compress_level_from; j <= compress_level_to; j++) { -- fprintf(stdout, "Compression level: %d", j); -+ fprintf(out_, "Compression level: %d", j); - compress_opt.level = j; - Status s = ShowCompressionSize(block_size, i.first, compress_opt); - if (!s.ok()) { -@@ -271,7 +274,7 @@ Status SstFileDumper::ShowAllCompressionSizes( - } - } - } else { -- fprintf(stdout, "Unsupported compression type: %s.\n", i.second); -+ fprintf(err_, "Unsupported compression type: %s.\n", i.second); - } - } - return Status::OK(); -@@ -307,9 +310,9 @@ Status SstFileDumper::ShowCompressionSize( - } - - std::chrono::steady_clock::time_point end = std::chrono::steady_clock::now(); -- fprintf(stdout, " Size: %10" PRIu64, file_size); -- fprintf(stdout, " Blocks: %6" PRIu64, num_data_blocks); -- fprintf(stdout, " Time Taken: %10s microsecs", -+ fprintf(out_, " Size: %10" PRIu64, file_size); -+ fprintf(out_, " Blocks: %6" PRIu64, num_data_blocks); -+ fprintf(out_, " Time Taken: %10s microsecs", - std::to_string( - std::chrono::duration_cast(end - start) - .count()) -@@ -342,11 +345,11 @@ Status SstFileDumper::ShowCompressionSize( - : ((static_cast(not_compressed_blocks) / - static_cast(num_data_blocks)) * - 100.0); -- fprintf(stdout, " Compressed: %6" PRIu64 " (%5.1f%%)", compressed_blocks, -+ fprintf(out_, " Compressed: %6" PRIu64 " (%5.1f%%)", compressed_blocks, - compressed_pcnt); -- fprintf(stdout, " Not compressed (ratio): %6" PRIu64 " (%5.1f%%)", -+ fprintf(out_, " Not compressed (ratio): %6" PRIu64 " (%5.1f%%)", - ratio_not_compressed_blocks, ratio_not_compressed_pcnt); -- fprintf(stdout, " Not compressed (abort): %6" PRIu64 " (%5.1f%%)\n", -+ fprintf(out_, " Not compressed (abort): %6" PRIu64 " (%5.1f%%)\n", - not_compressed_blocks, not_compressed_pcnt); - return Status::OK(); - } -@@ -362,7 +365,7 @@ Status SstFileDumper::ReadTableProperties(uint64_t table_magic_number, - /* memory_allocator= */ nullptr, prefetch_buffer); - if (!s.ok()) { - if (!silent_) { -- fprintf(stdout, "Not able to read table properties\n"); -+ fprintf(err_, "Not able to read table properties\n"); - } - } - return s; -@@ -410,7 +413,7 @@ Status SstFileDumper::SetTableOptionsByMagicNumber( - - options_.table_factory.reset(NewPlainTableFactory(plain_table_options)); - if (!silent_) { -- fprintf(stdout, "Sst file format: plain table\n"); -+ fprintf(out_, "Sst file format: plain table\n"); - } - } else { - char error_msg_buffer[80]; -@@ -427,15 +430,56 @@ Status SstFileDumper::SetOldTableOptions() { - assert(table_properties_ == nullptr); - options_.table_factory = std::make_shared(); - if (!silent_) { -- fprintf(stdout, "Sst file format: block-based(old version)\n"); -+ fprintf(out_, "Sst file format: block-based(old version)\n"); - } - - return Status::OK(); - } - -+void write(int value, FILE* file) { -+ char b[4]; -+ b[3] = value & 0x000000ff; -+ b[2] = (value & 0x0000ff00) >> 8; -+ b[1] = (value & 0x00ff0000) >> 16; -+ b[0] = (value & 0xff000000) >> 24; -+ std::fwrite(b, 4, 1, file); +diff --git a/include/rocksdb/raw_iterator.h b/include/rocksdb/raw_iterator.h +new file mode 100644 +index 000000000..21242ed15 +--- /dev/null ++++ b/include/rocksdb/raw_iterator.h +@@ -0,0 +1,25 @@ ++// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. ++// This source code is licensed under both the GPLv2 (found in the ++// COPYING file in the root directory) and Apache 2.0 License ++// (found in the LICENSE.Apache file in the root directory). ++#pragma once ++#ifndef ROCKSDB_LITE ++ ++ ++#include "rocksdb/advanced_options.h" ++namespace ROCKSDB_NAMESPACE { ++ ++class RawIterator { ++ public: ++ virtual ~RawIterator() {} ++ virtual bool Valid() const = 0; ++ virtual Slice key() const = 0; ++ virtual Slice value() const = 0; ++ virtual uint64_t sequenceNumber() const = 0; ++ virtual uint32_t type() const = 0; ++ virtual void Next() = 0; ++}; ++ ++} // namespace ROCKSDB_NAMESPACE ++ ++#endif // ROCKSDB_LITE +diff --git a/include/rocksdb/raw_sst_file_reader.h b/include/rocksdb/raw_sst_file_reader.h +new file mode 100644 +index 000000000..09e748208 +--- /dev/null ++++ b/include/rocksdb/raw_sst_file_reader.h +@@ -0,0 +1,62 @@ ++// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. ++// This source code is licensed under both the GPLv2 (found in the ++// COPYING file in the root directory) and Apache 2.0 License ++// (found in the LICENSE.Apache file in the root directory). ++#pragma once ++#ifndef ROCKSDB_LITE ++ ++#include ++#include ++ ++#include "rocksdb/raw_iterator.h" ++#include "rocksdb/advanced_options.h" ++#include "rocksdb/options.h" ++ ++ ++ ++namespace ROCKSDB_NAMESPACE { ++ ++class RawSstFileReader { ++ public: ++ ++ RawSstFileReader(const Options& options, const std::string& file_name, ++ size_t readahead_size, bool verify_checksum, ++ bool silent = false); ++ ~RawSstFileReader(); ++ ++ RawIterator* newIterator(bool has_from, Slice* from, ++ bool has_to, Slice *to); ++ Status getStatus() { return init_result_; } ++ ++ private: ++ // Get the TableReader implementation for the sst file ++ Status GetTableReader(const std::string& file_path); ++ Status ReadTableProperties(uint64_t table_magic_number, ++ uint64_t file_size); ++ ++ Status SetTableOptionsByMagicNumber(uint64_t table_magic_number); ++ Status SetOldTableOptions(); ++ ++ // Helper function to call the factory with settings specific to the ++ // factory implementation ++ Status NewTableReader(uint64_t file_size); ++ ++ std::string file_name_; ++ Temperature file_temp_; ++ ++ // less verbose in stdout/stderr ++ bool silent_; ++ ++ // options_ and internal_comparator_ will also be used in ++ // ReadSequential internally (specifically, seek-related operations) ++ Options options_; ++ ++ Status init_result_; ++ ++ struct Rep; ++ std::unique_ptr rep_; ++}; ++ ++} // namespace ROCKSDB_NAMESPACE ++ ++#endif // ROCKSDB_LITE +diff --git a/src.mk b/src.mk +index b94bc43ca..c13e5cde6 100644 +--- a/src.mk ++++ b/src.mk +@@ -338,11 +338,8 @@ RANGE_TREE_SOURCES =\ + utilities/transactions/lock/range/range_tree/range_tree_lock_tracker.cc + + TOOL_LIB_SOURCES = \ +- tools/io_tracer_parser_tool.cc \ +- tools/ldb_cmd.cc \ +- tools/ldb_tool.cc \ +- tools/sst_dump_tool.cc \ +- utilities/blob_db/blob_dump_tool.cc \ ++ tools/raw_sst_file_reader.cc \ ++ tools/raw_sst_file_iterator.cc \ + + ANALYZER_LIB_SOURCES = \ + tools/block_cache_analyzer/block_cache_trace_analyzer.cc \ +diff --git a/tools/raw_sst_file_iterator.cc b/tools/raw_sst_file_iterator.cc +new file mode 100644 +index 000000000..3051637a3 +--- /dev/null ++++ b/tools/raw_sst_file_iterator.cc +@@ -0,0 +1,76 @@ ++// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. ++// This source code is licensed under both the GPLv2 (found in the ++// COPYING file in the root directory) and Apache 2.0 License ++// (found in the LICENSE.Apache file in the root directory). ++// ++#ifndef ROCKSDB_LITE ++ ++ ++#include ++#include ++ ++#include "db/memtable.h" ++#include "db/write_batch_internal.h" ++#include "rocksdb/status.h" ++#include "rocksdb/utilities/ldb_cmd.h" ++#include "table/block_based/block.h" ++#include "table/block_based/block_based_table_builder.h" ++#include "table/block_based/block_based_table_factory.h" ++#include "table/meta_blocks.h" ++#include "table/plain/plain_table_factory.h" ++#include "tools/raw_sst_file_iterator.h" ++ ++namespace ROCKSDB_NAMESPACE { ++ ++RawSstFileIterator::RawSstFileIterator(InternalIterator* iterator, ++ bool has_from, Slice* from_key, ++ bool has_to, Slice* to_key) ++ : iter_(iterator), ++ ikey(new ParsedInternalKey()), ++ has_to_(has_to), ++ to_key_(to_key) { ++ if (has_from) { ++ InternalKey k; ++ k.SetMinPossibleForUserKey(*from_key); ++ iter_->Seek(k.Encode()); ++ } else { ++ iter_->SeekToFirst(); ++ } ++ initKey(); ++} ++ ++bool RawSstFileIterator::Valid() const { ++ return iter_->Valid() && (!has_to_ || ++ BytewiseComparator()->Compare( ++ key(), *to_key_) < 0); ++} ++ ++void RawSstFileIterator::initKey() { ++ if (iter_->Valid()) { ++ ParseInternalKey(iter_->key(), ikey, true /* log_err_key */); ++ } +} ++void RawSstFileIterator::Next() { ++ iter_->Next(); ++ initKey(); ++ ++} ++ ++Slice RawSstFileIterator::key() const { ++ return ikey->user_key; ++} ++ ++uint64_t RawSstFileIterator::sequenceNumber() const { ++ return ikey->sequence; ++} ++ ++uint32_t RawSstFileIterator::type() const { ++ return static_cast(ikey->type); ++} ++ ++Slice RawSstFileIterator::value() const { ++ return iter_->value(); ++} ++} // namespace ROCKSDB_NAMESPACE ++ ++#endif // ROCKSDB_LITE +diff --git a/tools/raw_sst_file_iterator.h b/tools/raw_sst_file_iterator.h +new file mode 100644 +index 000000000..58e34b260 +--- /dev/null ++++ b/tools/raw_sst_file_iterator.h +@@ -0,0 +1,45 @@ ++// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. ++// This source code is licensed under both the GPLv2 (found in the ++// COPYING file in the root directory) and Apache 2.0 License ++// (found in the LICENSE.Apache file in the root directory). ++#pragma once ++#ifndef ROCKSDB_LITE + -+void write(const char* value, int length, FILE* file) { -+ write(length, file); -+ fwrite(value, length, 1, file); ++#include ++#include ++#include "file/writable_file_writer.h" ++#include "rocksdb/advanced_options.h" ++#include "rocksdb/raw_iterator.h" ++ ++namespace ROCKSDB_NAMESPACE { ++ ++class RawSstFileIterator : public RawIterator { ++ public: ++ explicit RawSstFileIterator(InternalIterator* iterator, ++ bool has_from, ++ Slice* from_key, ++ bool has_to, ++ Slice* to_key); ++ ++ bool Valid() const override; ++ Slice key() const override; ++ Slice value() const override; ++ uint64_t sequenceNumber() const override; ++ uint32_t type() const override; ++ void Next() final override; ++ ++ ~RawSstFileIterator(){ ++ delete iter_; ++ } ++ ++ private: ++ void initKey(); ++ InternalIterator* iter_; ++ ParsedInternalKey* ikey; ++ bool has_to_; ++ Slice* to_key_; ++}; ++ ++} // namespace ROCKSDB_NAMESPACE ++ ++#endif // ROCKSDB_LITE +diff --git a/tools/raw_sst_file_reader.cc b/tools/raw_sst_file_reader.cc +new file mode 100644 +index 000000000..5ba8a82ee +--- /dev/null ++++ b/tools/raw_sst_file_reader.cc +@@ -0,0 +1,272 @@ ++// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. ++// This source code is licensed under both the GPLv2 (found in the ++// COPYING file in the root directory) and Apache 2.0 License ++// (found in the LICENSE.Apache file in the root directory). ++// ++#ifndef ROCKSDB_LITE ++ ++#include "rocksdb/raw_sst_file_reader.h" ++ ++#include ++#include ++#include ++#include ++ ++ ++#include "db/memtable.h" ++#include "db/write_batch_internal.h" ++#include "options/cf_options.h" ++#include "rocksdb/env.h" ++#include "rocksdb/slice_transform.h" ++#include "rocksdb/status.h" ++#include "rocksdb/table_properties.h" ++#include "rocksdb/utilities/ldb_cmd.h" ++#include "table/block_based/block.h" ++#include "table/block_based/block_based_table_builder.h" ++#include "table/block_based/block_based_table_factory.h" ++#include "table/format.h" ++#include "table/meta_blocks.h" ++#include "table/plain/plain_table_factory.h" ++#include "table/table_reader.h" ++#include "tools/raw_sst_file_iterator.h" ++#include "db/dbformat.h" ++ ++namespace ROCKSDB_NAMESPACE { ++ ++struct RawSstFileReader::Rep { ++ Options options; ++ EnvOptions soptions_; ++ ReadOptions read_options_; ++ ImmutableOptions ioptions_; ++ MutableCFOptions moptions_; ++ InternalKeyComparator internal_comparator_; ++ std::unique_ptr table_properties_; ++ std::unique_ptr table_reader_; ++ std::unique_ptr file_; ++ ++ Rep(const Options& opts, bool verify_checksum, size_t readahead_size) ++ : options(opts), ++ soptions_(EnvOptions()), ++ read_options_(verify_checksum, false), ++ ioptions_(options), ++ moptions_(ColumnFamilyOptions(options)), ++ internal_comparator_(InternalKeyComparator(BytewiseComparator())) { ++ read_options_.readahead_size = readahead_size; ++ } ++}; ++ ++RawSstFileReader::RawSstFileReader(const Options& options, ++ const std::string& file_name, ++ size_t readahead_size, ++ bool verify_checksum, ++ bool silent) :rep_(new Rep(options, ++ verify_checksum, ++ readahead_size)) { ++ file_name_ = file_name; ++ silent_ = silent; ++ options_ = options; ++ file_temp_ = Temperature::kUnknown; ++ init_result_ = GetTableReader(file_name_); +} + -+void write(const std::string& value, FILE* file) { -+ write(value.data(), (int)value.length(), file); ++RawSstFileReader::~RawSstFileReader() {} ++ ++ ++ ++extern const uint64_t kBlockBasedTableMagicNumber; ++extern const uint64_t kLegacyBlockBasedTableMagicNumber; ++extern const uint64_t kPlainTableMagicNumber; ++extern const uint64_t kLegacyPlainTableMagicNumber; ++ ++Status RawSstFileReader::GetTableReader(const std::string& file_path) { ++ // Warning about 'magic_number' being uninitialized shows up only in UBsan ++ // builds. Though access is guarded by 's.ok()' checks, fix the issue to ++ // avoid any warnings. ++ uint64_t magic_number = Footer::kNullTableMagicNumber; ++ ++ // read table magic number ++ Footer footer; ++ ++ const auto& fs = options_.env->GetFileSystem(); ++ std::unique_ptr file; ++ uint64_t file_size = 0; ++ FileOptions fopts = rep_->soptions_; ++ fopts.temperature = file_temp_; ++ Status s = fs->NewRandomAccessFile(file_path, fopts, &file, nullptr); ++ if (s.ok()) { ++ s = fs->GetFileSize(file_path, IOOptions(), &file_size, nullptr); ++ } ++ ++ // check empty file ++ // if true, skip further processing of this file ++ if (file_size == 0) { ++ return Status::Aborted(file_path, "Empty file"); ++ } ++ ++ rep_->file_.reset(new RandomAccessFileReader(std::move(file), file_path)); ++ ++ FilePrefetchBuffer prefetch_buffer( ++ 0 /* readahead_size */, 0 /* max_readahead_size */, true /* enable */, ++ false /* track_min_offset */); ++ if (s.ok()) { ++ const uint64_t kSstDumpTailPrefetchSize = 512 * 1024; ++ uint64_t prefetch_size = (file_size > kSstDumpTailPrefetchSize) ++ ? kSstDumpTailPrefetchSize ++ : file_size; ++ uint64_t prefetch_off = file_size - prefetch_size; ++ IOOptions opts; ++ s = prefetch_buffer.Prefetch(opts, rep_->file_.get(), prefetch_off, ++ static_cast(prefetch_size), ++ Env::IO_TOTAL /* rate_limiter_priority */); ++ ++ s = ReadFooterFromFile(opts, rep_->file_.get(), &prefetch_buffer, file_size, ++ &footer); ++ } ++ if (s.ok()) { ++ magic_number = footer.table_magic_number(); ++ } ++ ++ if (s.ok()) { ++ if (magic_number == kPlainTableMagicNumber || ++ magic_number == kLegacyPlainTableMagicNumber) { ++ rep_->soptions_.use_mmap_reads = true; ++ ++ fs->NewRandomAccessFile(file_path, fopts, &file, nullptr); ++ rep_->file_.reset(new RandomAccessFileReader(std::move(file), file_path)); ++ } ++ ++ s = ROCKSDB_NAMESPACE::ReadTableProperties( ++ rep_->file_.get(), file_size, magic_number, rep_->ioptions_, &(rep_->table_properties_), ++ /* memory_allocator= */ nullptr, (magic_number == kBlockBasedTableMagicNumber) ++ ? &prefetch_buffer ++ : nullptr); ++ // For old sst format, ReadTableProperties might fail but file can be read ++ if (s.ok()) { ++ s = SetTableOptionsByMagicNumber(magic_number); ++ if (s.ok()) { ++ if (rep_->table_properties_ && !rep_->table_properties_->comparator_name.empty()) { ++ ConfigOptions config_options; ++ const Comparator* user_comparator = nullptr; ++ s = Comparator::CreateFromString(config_options, ++ rep_->table_properties_->comparator_name, ++ &user_comparator); ++ if (s.ok()) { ++ assert(user_comparator); ++ rep_->internal_comparator_ = InternalKeyComparator(user_comparator); ++ } ++ } ++ } ++ } else { ++ if (!silent_) { ++ fprintf(stderr, "Not able to read table properties\n"); ++ } ++ s = SetOldTableOptions(); ++ } ++ options_.comparator = rep_->internal_comparator_.user_comparator(); ++ } ++ ++ if (s.ok()) { ++ s = NewTableReader(file_size); ++ } ++ return s; +} + -+void write(Slice &slice, FILE* file) { -+ int size = (int)slice.size(); -+ write(slice.data(), size, file); ++Status RawSstFileReader::NewTableReader(uint64_t file_size) { ++ auto t_opt = ++ TableReaderOptions(rep_->ioptions_, rep_->moptions_.prefix_extractor, rep_->soptions_, ++ rep_->internal_comparator_, false /* skip_filters */, ++ false /* imortal */, true /* force_direct_prefetch */); ++ // Allow open file with global sequence number for backward compatibility. ++ t_opt.largest_seqno = kMaxSequenceNumber; ++ ++ // We need to turn off pre-fetching of index and filter nodes for ++ // BlockBasedTable ++ if (options_.table_factory->IsInstanceOf( ++ TableFactory::kBlockBasedTableName())) { ++ return options_.table_factory->NewTableReader(t_opt, std::move(rep_->file_), ++ file_size, &(rep_->table_reader_), ++ /*enable_prefetch=*/false); ++ } ++ ++ // For all other factory implementation ++ return options_.table_factory->NewTableReader(t_opt, std::move(rep_->file_), ++ file_size, &(rep_->table_reader_)); +} + -+void write(SequenceNumber sequenceNumber, FILE* file) { ++Status RawSstFileReader::SetTableOptionsByMagicNumber( ++ uint64_t table_magic_number) { ++ assert(rep_->table_properties_); ++ if (table_magic_number == kBlockBasedTableMagicNumber || ++ table_magic_number == kLegacyBlockBasedTableMagicNumber) { ++ BlockBasedTableFactory* bbtf = new BlockBasedTableFactory(); ++ // To force tail prefetching, we fake reporting two useful reads of 512KB ++ // from the tail. ++ // It needs at least two data points to warm up the stats. ++ bbtf->tail_prefetch_stats()->RecordEffectiveSize(512 * 1024); ++ bbtf->tail_prefetch_stats()->RecordEffectiveSize(512 * 1024); ++ ++ options_.table_factory.reset(bbtf); ++ if (!silent_) { ++ fprintf(stdout, "Sst file format: block-based\n"); ++ } ++ ++ auto& props = rep_->table_properties_->user_collected_properties; ++ auto pos = props.find(BlockBasedTablePropertyNames::kIndexType); ++ if (pos != props.end()) { ++ auto index_type_on_file = static_cast( ++ DecodeFixed32(pos->second.c_str())); ++ if (index_type_on_file == ++ BlockBasedTableOptions::IndexType::kHashSearch) { ++ options_.prefix_extractor.reset(NewNoopTransform()); ++ } ++ } ++ } else if (table_magic_number == kPlainTableMagicNumber || ++ table_magic_number == kLegacyPlainTableMagicNumber) { ++ options_.allow_mmap_reads = true; + -+ char b[8]; -+ int idx = 7; -+ while (idx >= 0) { -+ b[idx] = sequenceNumber % 256; -+ sequenceNumber /= 256; -+ idx -= 1; ++ PlainTableOptions plain_table_options; ++ plain_table_options.user_key_len = kPlainTableVariableLength; ++ plain_table_options.bloom_bits_per_key = 0; ++ plain_table_options.hash_table_ratio = 0; ++ plain_table_options.index_sparseness = 1; ++ plain_table_options.huge_page_tlb_size = 0; ++ plain_table_options.encoding_type = kPlain; ++ plain_table_options.full_scan_mode = true; ++ ++ options_.table_factory.reset(NewPlainTableFactory(plain_table_options)); ++ if (!silent_) { ++ fprintf(stdout, "Sst file format: plain table\n"); ++ } ++ } else { ++ char error_msg_buffer[80]; ++ snprintf(error_msg_buffer, sizeof(error_msg_buffer) - 1, ++ "Unsupported table magic number --- %lx", ++ (long)table_magic_number); ++ return Status::InvalidArgument(error_msg_buffer); + } -+ fwrite(b, 8, 1, file); ++ ++ return Status::OK(); +} + -+void write(ParsedInternalKey &key, FILE* file) { -+ write(key.user_key, file); -+ write(key.sequence, file); -+ write(static_cast(key.type), file); ++Status RawSstFileReader::SetOldTableOptions() { ++ assert(rep_->table_properties_ == nullptr); ++ options_.table_factory = std::make_shared(); ++ if (!silent_) { ++ fprintf(stdout, "Sst file format: block-based(old version)\n"); ++ } ++ ++ return Status::OK(); +} + - Status SstFileDumper::ReadSequential(bool print_kv, uint64_t read_num, -- bool has_from, const std::string& from_key, -- bool has_to, const std::string& to_key, -+ bool has_from, const Slice& from_key, -+ bool has_to, const Slice& to_key, - bool use_from_as_prefix) { - if (!table_reader_) { - return init_result_; -@@ -446,6 +490,7 @@ Status SstFileDumper::ReadSequential(bool print_kv, uint64_t read_num, - /*arena=*/nullptr, /*skip_filters=*/false, - TableReaderCaller::kSSTDumpTool); - uint64_t i = 0; -+ - if (has_from) { - InternalKey ikey; - ikey.SetMinPossibleForUserKey(from_key); -@@ -453,6 +498,7 @@ Status SstFileDumper::ReadSequential(bool print_kv, uint64_t read_num, - } else { - iter->SeekToFirst(); - } -+ - for (; iter->Valid(); iter->Next()) { - Slice key = iter->key(); - Slice value = iter->value(); -@@ -478,22 +524,19 @@ Status SstFileDumper::ReadSequential(bool print_kv, uint64_t read_num, - - if (print_kv) { - if (!decode_blob_index_ || ikey.type != kTypeBlobIndex) { -- fprintf(stdout, "%s => %s\n", -- ikey.DebugString(true, output_hex_).c_str(), -- value.ToString(output_hex_).c_str()); -+ write(ikey, out_); -+ write(value, out_); - } else { - BlobIndex blob_index; -- - const Status s = blob_index.DecodeFrom(value); - if (!s.ok()) { -- fprintf(stderr, "%s => error decoding blob index\n", -- ikey.DebugString(true, output_hex_).c_str()); -+ write(ikey, err_); -+ write("error decoding blob index", err_); - continue; - } -- -- fprintf(stdout, "%s => %s\n", -- ikey.DebugString(true, output_hex_).c_str(), -- blob_index.DebugString(output_hex_).c_str()); -+ write(ikey, out_); -+ std::string v = blob_index.DebugString(output_hex_); -+ write(v, out_); - } - } - } -diff --git a/table/sst_file_dumper.h b/table/sst_file_dumper.h -index 7be876390..768c5b1e2 100644 ---- a/table/sst_file_dumper.h -+++ b/table/sst_file_dumper.h -@@ -22,11 +22,13 @@ class SstFileDumper { - bool verify_checksum, bool output_hex, - bool decode_blob_index, - const EnvOptions& soptions = EnvOptions(), -- bool silent = false); -+ bool silent = false, -+ FILE* out = stdout, -+ FILE* err = stderr); - - Status ReadSequential(bool print_kv, uint64_t read_num, bool has_from, -- const std::string& from_key, bool has_to, -- const std::string& to_key, -+ const Slice& from_key, bool has_to, -+ const Slice& to_key, - bool use_from_as_prefix = false); - - Status ReadTableProperties( -@@ -94,6 +96,8 @@ class SstFileDumper { - ReadOptions read_options_; - InternalKeyComparator internal_comparator_; - std::unique_ptr table_properties_; -+ FILE* out_; -+ FILE* err_; - }; - - } // namespace ROCKSDB_NAMESPACE -diff --git a/tools/sst_dump_tool.cc b/tools/sst_dump_tool.cc -index 7053366e7..8f248ddf3 100644 ---- a/tools/sst_dump_tool.cc -+++ b/tools/sst_dump_tool.cc -@@ -31,7 +31,7 @@ static const std::vector> - - namespace { - --void print_help(bool to_stderr) { -+void print_help(bool to_stderr, FILE* err_, FILE* out_) { - std::string supported_compressions; - for (CompressionType ct : GetSupportedCompressions()) { - if (!supported_compressions.empty()) { -@@ -43,7 +43,7 @@ void print_help(bool to_stderr) { - supported_compressions += str; - } - fprintf( -- to_stderr ? stderr : stdout, -+ to_stderr ? err_ : out_, - R"(sst_dump --file= [--command=check|scan|raw|recompress|identify] - --file= - Path to SST file or directory containing SST files -@@ -149,7 +149,13 @@ bool ParseIntArg(const char* arg, const std::string arg_name, - } - } // namespace - --int SSTDumpTool::Run(int argc, char const* const* argv, Options options) { -+Slice* AssignSlicePrependedWithLength(const char* buf) { -+ long val = std::stol(buf); -+ return reinterpret_cast(val); ++RawIterator* RawSstFileReader::newIterator( ++ bool has_from, Slice* from, bool has_to, Slice* to) { ++ InternalIterator* iter = rep_->table_reader_->NewIterator( ++ rep_->read_options_, rep_->moptions_.prefix_extractor.get(), ++ /*arena=*/nullptr, /*skip_filters=*/false, ++ TableReaderCaller::kSSTDumpTool); ++ return new RawSstFileIterator(iter, has_from, from, has_to, to); ++ +} ++} // namespace ROCKSDB_NAMESPACE + -+int SSTDumpTool::Run(int argc, char const* const* argv, Options options, -+ FILE* out, FILE* err) { - std::string env_uri, fs_uri; - const char* dir_or_file = nullptr; - uint64_t read_num = std::numeric_limits::max(); -@@ -170,8 +176,9 @@ int SSTDumpTool::Run(int argc, char const* const* argv, Options options) { - bool has_compression_level_from = false; - bool has_compression_level_to = false; - bool has_specified_compression_types = false; -- std::string from_key; -- std::string to_key; -+ bool silent = false; -+ Slice* from_key = nullptr; -+ Slice* to_key = nullptr; - std::string block_size_str; - std::string compression_level_from_str; - std::string compression_level_to_str; -@@ -197,7 +204,9 @@ int SSTDumpTool::Run(int argc, char const* const* argv, Options options) { - int64_t tmp_val; - - for (int i = 1; i < argc; i++) { -- if (strncmp(argv[i], "--env_uri=", 10) == 0) { -+ if (strncmp(argv[i], "--silent", 8) == 0) { -+ silent = true; -+ } else if (strncmp(argv[i], "--env_uri=", 10) == 0) { - env_uri = argv[i] + 10; - } else if (strncmp(argv[i], "--fs_uri=", 9) == 0) { - fs_uri = argv[i] + 9; -@@ -217,13 +226,13 @@ int SSTDumpTool::Run(int argc, char const* const* argv, Options options) { - } else if (strncmp(argv[i], "--command=", 10) == 0) { - command = argv[i] + 10; - } else if (strncmp(argv[i], "--from=", 7) == 0) { -- from_key = argv[i] + 7; -+ from_key = AssignSlicePrependedWithLength(argv[i] + 7); - has_from = true; - } else if (strncmp(argv[i], "--to=", 5) == 0) { -- to_key = argv[i] + 5; -+ to_key = AssignSlicePrependedWithLength(argv[i] + 5); - has_to = true; - } else if (strncmp(argv[i], "--prefix=", 9) == 0) { -- from_key = argv[i] + 9; -+ from_key = AssignSlicePrependedWithLength( argv[i] + 9); - use_from_as_prefix = true; - } else if (strcmp(argv[i], "--show_properties") == 0) { - show_properties = true; -@@ -273,7 +282,7 @@ int SSTDumpTool::Run(int argc, char const* const* argv, Options options) { - std::cerr << pik_status.getState() << "\n"; - retc = -1; - } -- fprintf(stdout, "key=%s\n", ikey.DebugString(true, true).c_str()); -+ fprintf(out, "key=%s\n", ikey.DebugString(true, true).c_str()); - return retc; - } else if (ParseIntArg(argv[i], "--compression_level_from=", - "compression_level_from must be numeric", -@@ -288,9 +297,9 @@ int SSTDumpTool::Run(int argc, char const* const* argv, Options options) { - "compression_max_dict_bytes must be numeric", - &tmp_val)) { - if (tmp_val < 0 || tmp_val > std::numeric_limits::max()) { -- fprintf(stderr, "compression_max_dict_bytes must be a uint32_t: '%s'\n", -+ fprintf(err, "compression_max_dict_bytes must be a uint32_t: '%s'\n", - argv[i]); -- print_help(/*to_stderr*/ true); -+ print_help(/*to_stderr*/ true, err, out); - return 1; - } - compression_max_dict_bytes = static_cast(tmp_val); -@@ -298,10 +307,10 @@ int SSTDumpTool::Run(int argc, char const* const* argv, Options options) { - "compression_zstd_max_train_bytes must be numeric", - &tmp_val)) { - if (tmp_val < 0 || tmp_val > std::numeric_limits::max()) { -- fprintf(stderr, -+ fprintf(err, - "compression_zstd_max_train_bytes must be a uint32_t: '%s'\n", - argv[i]); -- print_help(/*to_stderr*/ true); -+ print_help(/*to_stderr*/ true, err, out); - return 1; - } - compression_zstd_max_train_bytes = static_cast(tmp_val); -@@ -309,56 +318,56 @@ int SSTDumpTool::Run(int argc, char const* const* argv, Options options) { - "compression_max_dict_buffer_bytes must be numeric", - &tmp_val)) { - if (tmp_val < 0) { -- fprintf(stderr, -+ fprintf(err, - "compression_max_dict_buffer_bytes must be positive: '%s'\n", - argv[i]); -- print_help(/*to_stderr*/ true); -+ print_help(/*to_stderr*/ true, err, out); - return 1; - } - compression_max_dict_buffer_bytes = static_cast(tmp_val); - } else if (strcmp(argv[i], "--compression_use_zstd_finalize_dict") == 0) { - compression_use_zstd_finalize_dict = true; - } else if (strcmp(argv[i], "--help") == 0) { -- print_help(/*to_stderr*/ false); -+ print_help(/*to_stderr*/ false, err, out); - return 0; - } else if (strcmp(argv[i], "--version") == 0) { - printf("%s\n", GetRocksBuildInfoAsString("sst_dump").c_str()); - return 0; - } else { -- fprintf(stderr, "Unrecognized argument '%s'\n\n", argv[i]); -- print_help(/*to_stderr*/ true); -+ fprintf(err, "Unrecognized argument '%s'\n\n", argv[i]); -+ print_help(/*to_stderr*/ true, err, out); - return 1; - } - } - - if(has_compression_level_from && has_compression_level_to) { - if(!has_specified_compression_types || compression_types.size() != 1) { -- fprintf(stderr, "Specify one compression type.\n\n"); -+ fprintf(err, "Specify one compression type.\n\n"); - exit(1); - } - } else if(has_compression_level_from || has_compression_level_to) { -- fprintf(stderr, "Specify both --compression_level_from and " -+ fprintf(err, "Specify both --compression_level_from and " - "--compression_level_to.\n\n"); - exit(1); - } - - if (use_from_as_prefix && has_from) { -- fprintf(stderr, "Cannot specify --prefix and --from\n\n"); -+ fprintf(err, "Cannot specify --prefix and --from\n\n"); - exit(1); - } - - if (input_key_hex) { - if (has_from || use_from_as_prefix) { -- from_key = ROCKSDB_NAMESPACE::LDBCommand::HexToString(from_key); -+ *from_key = ROCKSDB_NAMESPACE::LDBCommand::HexToString(from_key -> ToString()); - } - if (has_to) { -- to_key = ROCKSDB_NAMESPACE::LDBCommand::HexToString(to_key); -+ *to_key = ROCKSDB_NAMESPACE::LDBCommand::HexToString(to_key->ToString()); - } - } - - if (dir_or_file == nullptr) { -- fprintf(stderr, "file or directory must be specified.\n\n"); -- print_help(/*to_stderr*/ true); -+ fprintf(err, "file or directory must be specified.\n\n"); -+ print_help(/*to_stderr*/ true, err, out); - exit(1); - } - -@@ -373,10 +382,10 @@ int SSTDumpTool::Run(int argc, char const* const* argv, Options options) { - Status s = Env::CreateFromUri(config_options, env_uri, fs_uri, &options.env, - &env_guard); - if (!s.ok()) { -- fprintf(stderr, "CreateEnvFromUri: %s\n", s.ToString().c_str()); -+ fprintf(err, "CreateEnvFromUri: %s\n", s.ToString().c_str()); - exit(1); -- } else { -- fprintf(stdout, "options.env is %p\n", options.env); -+ } else if (!silent){ -+ fprintf(out, "options.env is %p\n", options.env); - } - } - -@@ -390,7 +399,7 @@ int SSTDumpTool::Run(int argc, char const* const* argv, Options options) { - Status s = env->FileExists(dir_or_file); - // dir_or_file does not exist - if (!s.ok()) { -- fprintf(stderr, "%s%s: No such file or directory\n", s.ToString().c_str(), -+ fprintf(err, "%s%s: No such file or directory\n", s.ToString().c_str(), - dir_or_file); - return 1; - } -@@ -421,10 +430,11 @@ int SSTDumpTool::Run(int argc, char const* const* argv, Options options) { - - ROCKSDB_NAMESPACE::SstFileDumper dumper( - options, filename, Temperature::kUnknown, readahead_size, -- verify_checksum, output_hex, decode_blob_index); -+ verify_checksum, output_hex, decode_blob_index, EnvOptions(), -+ silent, out, err); - // Not a valid SST - if (!dumper.getStatus().ok()) { -- fprintf(stderr, "%s: %s\n", filename.c_str(), -+ fprintf(err, "%s: %s\n", filename.c_str(), - dumper.getStatus().ToString().c_str()); - continue; - } else { -@@ -433,10 +443,11 @@ int SSTDumpTool::Run(int argc, char const* const* argv, Options options) { - // where there is at least one valid SST - if (valid_sst_files.size() == 1) { - // from_key and to_key are only used for "check", "scan", or "" -- if (command == "check" || command == "scan" || command == "") { -- fprintf(stdout, "from [%s] to [%s]\n", -- ROCKSDB_NAMESPACE::Slice(from_key).ToString(true).c_str(), -- ROCKSDB_NAMESPACE::Slice(to_key).ToString(true).c_str()); -+ if (!silent && (command == "check" || command == "scan" || -+ command == "")) { -+ fprintf(out, "from [%s] to [%s]\n", -+ from_key->ToString(true).c_str(), -+ to_key->ToString(true).c_str()); - } - } - } -@@ -449,7 +460,7 @@ int SSTDumpTool::Run(int argc, char const* const* argv, Options options) { - compression_zstd_max_train_bytes, compression_max_dict_buffer_bytes, - !compression_use_zstd_finalize_dict); - if (!st.ok()) { -- fprintf(stderr, "Failed to recompress: %s\n", st.ToString().c_str()); -+ fprintf(err, "Failed to recompress: %s\n", st.ToString().c_str()); - exit(1); - } - return 0; -@@ -461,10 +472,10 @@ int SSTDumpTool::Run(int argc, char const* const* argv, Options options) { - - st = dumper.DumpTable(out_filename); - if (!st.ok()) { -- fprintf(stderr, "%s: %s\n", filename.c_str(), st.ToString().c_str()); -+ fprintf(err, "%s: %s\n", filename.c_str(), st.ToString().c_str()); - exit(1); - } else { -- fprintf(stdout, "raw dump written to file %s\n", &out_filename[0]); -+ fprintf(out, "raw dump written to file %s\n", &out_filename[0]); - } - continue; - } -@@ -473,10 +484,10 @@ int SSTDumpTool::Run(int argc, char const* const* argv, Options options) { - if (command == "" || command == "scan" || command == "check") { - st = dumper.ReadSequential( - command == "scan", read_num > 0 ? (read_num - total_read) : read_num, -- has_from || use_from_as_prefix, from_key, has_to, to_key, -+ has_from || use_from_as_prefix, *from_key, has_to, *to_key, - use_from_as_prefix); - if (!st.ok()) { -- fprintf(stderr, "%s: %s\n", filename.c_str(), -+ fprintf(err, "%s: %s\n", filename.c_str(), - st.ToString().c_str()); - } - total_read += dumper.GetReadNumber(); -@@ -488,10 +499,10 @@ int SSTDumpTool::Run(int argc, char const* const* argv, Options options) { - if (command == "verify") { - st = dumper.VerifyChecksum(); - if (!st.ok()) { -- fprintf(stderr, "%s is corrupted: %s\n", filename.c_str(), -+ fprintf(err, "%s is corrupted: %s\n", filename.c_str(), - st.ToString().c_str()); - } else { -- fprintf(stdout, "The file is ok\n"); -+ fprintf(out, "The file is ok\n"); - } - continue; - } -@@ -503,15 +514,15 @@ int SSTDumpTool::Run(int argc, char const* const* argv, Options options) { - table_properties_from_reader; - st = dumper.ReadTableProperties(&table_properties_from_reader); - if (!st.ok()) { -- fprintf(stderr, "%s: %s\n", filename.c_str(), st.ToString().c_str()); -- fprintf(stderr, "Try to use initial table properties\n"); -+ fprintf(err, "%s: %s\n", filename.c_str(), st.ToString().c_str()); -+ fprintf(err, "Try to use initial table properties\n"); - table_properties = dumper.GetInitTableProperties(); - } else { - table_properties = table_properties_from_reader.get(); - } - if (table_properties != nullptr) { - if (show_properties) { -- fprintf(stdout, -+ fprintf(out, - "Table Properties:\n" - "------------------------------\n" - " %s", -@@ -523,18 +534,18 @@ int SSTDumpTool::Run(int argc, char const* const* argv, Options options) { - total_index_block_size += table_properties->index_size; - total_filter_block_size += table_properties->filter_size; - if (show_properties) { -- fprintf(stdout, -+ fprintf(out, - "Raw user collected properties\n" - "------------------------------\n"); - for (const auto& kv : table_properties->user_collected_properties) { - std::string prop_name = kv.first; - std::string prop_val = Slice(kv.second).ToString(true); -- fprintf(stdout, " # %s: 0x%s\n", prop_name.c_str(), -+ fprintf(out, " # %s: 0x%s\n", prop_name.c_str(), - prop_val.c_str()); - } - } - } else { -- fprintf(stderr, "Reader unexpectedly returned null properties\n"); -+ fprintf(err, "Reader unexpectedly returned null properties\n"); - } - } - } -@@ -555,9 +566,9 @@ int SSTDumpTool::Run(int argc, char const* const* argv, Options options) { - // Exit with an error state - if (dir) { - fprintf(stdout, "------------------------------\n"); -- fprintf(stderr, "No valid SST files found in %s\n", dir_or_file); -+ fprintf(err, "No valid SST files found in %s\n", dir_or_file); - } else { -- fprintf(stderr, "%s is not a valid SST file\n", dir_or_file); -+ fprintf(err, "%s is not a valid SST file\n", dir_or_file); - } - return 1; - } else { ++#endif // ROCKSDB_LITE diff --git a/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/TestNativeLibraryLoader.java b/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/TestNativeLibraryLoader.java index 472954f2bd57..0b0943079c6b 100644 --- a/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/TestNativeLibraryLoader.java +++ b/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/TestNativeLibraryLoader.java @@ -17,11 +17,12 @@ */ package org.apache.hadoop.hdds.utils; - +import org.apache.hadoop.hdds.utils.db.managed.ManagedRawSSTFileReader; import org.apache.ozone.test.tag.Native; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.MethodSource; +import org.mockito.Matchers; import org.mockito.MockedStatic; import org.mockito.Mockito; @@ -34,8 +35,7 @@ import static org.apache.hadoop.hdds.utils.NativeConstants.ROCKS_TOOLS_NATIVE_LIBRARY_NAME; import static org.apache.hadoop.hdds.utils.NativeLibraryLoader.NATIVE_LIB_TMP_DIR; -import static org.mockito.ArgumentMatchers.anyString; -import static org.mockito.ArgumentMatchers.same; +import static org.mockito.Mockito.same; /** * Test class for NativeLibraryLoader. @@ -51,37 +51,27 @@ private static Stream nativeLibraryDirectoryLocations() @Native(ROCKS_TOOLS_NATIVE_LIBRARY_NAME) @ParameterizedTest @MethodSource("nativeLibraryDirectoryLocations") - public void testNativeLibraryLoader( - String nativeLibraryDirectoryLocation) { + public void testNativeLibraryLoader(String nativeLibraryDirectoryLocation) throws NativeLibraryNotLoadedException { Map libraryLoadedMap = new HashMap<>(); NativeLibraryLoader loader = new NativeLibraryLoader(libraryLoadedMap); - try (MockedStatic mockedNativeLibraryLoader = - Mockito.mockStatic(NativeLibraryLoader.class, - Mockito.CALLS_REAL_METHODS)) { - mockedNativeLibraryLoader.when(() -> - NativeLibraryLoader.getSystemProperty(same(NATIVE_LIB_TMP_DIR))) + try (MockedStatic mockedNativeLibraryLoader = Mockito.mockStatic(NativeLibraryLoader.class, + Mockito.CALLS_REAL_METHODS)) { + mockedNativeLibraryLoader.when(() -> NativeLibraryLoader.getSystemProperty(same(NATIVE_LIB_TMP_DIR))) .thenReturn(nativeLibraryDirectoryLocation); - mockedNativeLibraryLoader.when(() -> NativeLibraryLoader.getInstance()) - .thenReturn(loader); - Assertions.assertTrue(NativeLibraryLoader.getInstance() - .loadLibrary(ROCKS_TOOLS_NATIVE_LIBRARY_NAME)); - Assertions.assertTrue(NativeLibraryLoader - .isLibraryLoaded(ROCKS_TOOLS_NATIVE_LIBRARY_NAME)); + mockedNativeLibraryLoader.when(() -> NativeLibraryLoader.getInstance()).thenReturn(loader); + ManagedRawSSTFileReader.loadLibrary(); + Assertions.assertTrue(NativeLibraryLoader.isLibraryLoaded(ROCKS_TOOLS_NATIVE_LIBRARY_NAME)); // Mocking to force copy random bytes to create a lib file to // nativeLibraryDirectoryLocation. But load library will fail. - mockedNativeLibraryLoader.when(() -> - NativeLibraryLoader.getResourceStream(anyString())) + mockedNativeLibraryLoader.when(() -> NativeLibraryLoader.getResourceStream(Matchers.anyString())) .thenReturn(new ByteArrayInputStream(new byte[]{0, 1, 2, 3})); String dummyLibraryName = "dummy_lib"; NativeLibraryLoader.getInstance().loadLibrary(dummyLibraryName); NativeLibraryLoader.isLibraryLoaded(dummyLibraryName); // Checking if the resource with random was copied to a temp file. - File[] libPath = - new File(nativeLibraryDirectoryLocation == null ? "" : - nativeLibraryDirectoryLocation) - .getAbsoluteFile().listFiles((dir, name) -> - name.startsWith(dummyLibraryName) && - name.endsWith(NativeLibraryLoader.getLibOsSuffix())); + File[] libPath = new File(nativeLibraryDirectoryLocation == null ? "" : nativeLibraryDirectoryLocation) + .getAbsoluteFile().listFiles((dir, name) -> name.startsWith(dummyLibraryName) && + name.endsWith(NativeLibraryLoader.getLibOsSuffix())); Assertions.assertNotNull(libPath); Assertions.assertEquals(1, libPath.length); Assertions.assertTrue(libPath[0].delete()); diff --git a/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/db/managed/TestManagedRawSSTFileIterator.java b/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/db/managed/TestManagedRawSSTFileIterator.java new file mode 100644 index 000000000000..00816e60d7f2 --- /dev/null +++ b/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/db/managed/TestManagedRawSSTFileIterator.java @@ -0,0 +1,143 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdds.utils.db.managed; + +import org.apache.commons.lang3.tuple.Pair; +import org.apache.hadoop.hdds.StringUtils; +import org.apache.hadoop.hdds.utils.NativeLibraryNotLoadedException; +import org.apache.hadoop.hdds.utils.TestUtils; +import org.apache.ozone.test.tag.Native; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Named; +import org.junit.jupiter.api.io.TempDir; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; + +import java.io.File; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.TreeMap; +import java.util.function.Function; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import java.util.stream.Stream; + +import static org.apache.hadoop.hdds.utils.NativeConstants.ROCKS_TOOLS_NATIVE_LIBRARY_NAME; +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertEquals; + +/** + * Test for ManagedRawSSTFileReaderIterator. + */ +@Native(ROCKS_TOOLS_NATIVE_LIBRARY_NAME) +class TestManagedRawSSTFileIterator { + + @TempDir + private Path tempDir; + + private File createSSTFileWithKeys( + TreeMap, String> keys) throws Exception { + File file = Files.createFile(tempDir.resolve("tmp_sst_file.sst")).toFile(); + try (ManagedEnvOptions envOptions = new ManagedEnvOptions(); + ManagedOptions managedOptions = new ManagedOptions(); + ManagedSstFileWriter sstFileWriter = new ManagedSstFileWriter(envOptions, managedOptions)) { + sstFileWriter.open(file.getAbsolutePath()); + for (Map.Entry, String> entry : keys.entrySet()) { + if (entry.getKey().getValue() == 0) { + sstFileWriter.delete(entry.getKey().getKey().getBytes(StandardCharsets.UTF_8)); + } else { + sstFileWriter.put(entry.getKey().getKey().getBytes(StandardCharsets.UTF_8), + entry.getValue().getBytes(StandardCharsets.UTF_8)); + } + } + sstFileWriter.finish(); + } + return file; + } + + private static Stream keyValueFormatArgs() { + return Stream.of(Arguments.of(Named.of("Key starting with a single quote", "'key%1$d=>"), + Named.of("Value starting with a number ending with a single quote", "%1$dvalue'")), + Arguments.of(Named.of("Key ending with a number", "key%1$d"), + Named.of("Value starting & ending with a number", "%1$dvalue%1$d")), + Arguments.of(Named.of("Key starting with a single quote & ending with a number", "'key%1$d"), + Named.of("Value starting & ending with a number & elosed within quotes", "%1$d'value%1$d'")), + Arguments.of(Named.of("Key starting with a single quote & ending with a number", "'key%1$d"), + Named.of("Value starting & ending with a number & elosed within quotes", "%1$d'value%1$d'")), + Arguments.of(Named.of("Key ending with a number", "key%1$d"), + Named.of("Value starting & ending with a number & containing null character & new line character", + "%1$dvalue\n\0%1$d")), + Arguments.of(Named.of("Key ending with a number & containing a null character", "key\0%1$d"), + Named.of("Value starting & ending with a number & elosed within quotes", "%1$dvalue\r%1$d"))); + } + + @BeforeAll + public static void init() throws NativeLibraryNotLoadedException { + ManagedRawSSTFileReader.loadLibrary(); + } + + + @ParameterizedTest + @MethodSource("keyValueFormatArgs") + public void testSSTDumpIteratorWithKeyFormat(String keyFormat, String valueFormat) throws Exception { + TreeMap, String> keys = IntStream.range(0, 100).boxed().collect(Collectors.toMap( + i -> Pair.of(String.format(keyFormat, i), i % 2), + i -> i % 2 == 0 ? "" : String.format(valueFormat, i), + (v1, v2) -> v2, + TreeMap::new)); + File file = createSSTFileWithKeys(keys); + try (ManagedOptions options = new ManagedOptions(); + ManagedRawSSTFileReader reader = new ManagedRawSSTFileReader<>( + options, file.getAbsolutePath(), 2 * 1024 * 1024)) { + List> testBounds = TestUtils.getTestingBounds(keys.keySet().stream() + .collect(Collectors.toMap(Pair::getKey, Pair::getValue, (v1, v2) -> v1, TreeMap::new))); + for (Optional keyStart : testBounds) { + for (Optional keyEnd : testBounds) { + Map, String> expectedKeys = keys.entrySet().stream() + .filter(e -> keyStart.map(s -> e.getKey().getKey().compareTo(s) >= 0).orElse(true)) + .filter(e -> keyEnd.map(s -> e.getKey().getKey().compareTo(s) < 0).orElse(true)) + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + Optional lowerBound = keyStart.map(s -> new ManagedSlice(StringUtils.string2Bytes(s))); + Optional upperBound = keyEnd.map(s -> new ManagedSlice(StringUtils.string2Bytes(s))); + try (ManagedRawSSTFileIterator iterator + = reader.newIterator(Function.identity(), lowerBound.orElse(null), upperBound.orElse(null))) { + while (iterator.hasNext()) { + ManagedRawSSTFileIterator.KeyValue r = iterator.next(); + String key = StringUtils.bytes2String(r.getKey()); + Pair recordKey = Pair.of(key, r.getType()); + assertThat(expectedKeys).containsKey(recordKey); + assertEquals(Optional.ofNullable(expectedKeys.get(recordKey)).orElse(""), + StringUtils.bytes2String(r.getValue())); + expectedKeys.remove(recordKey); + } + assertEquals(0, expectedKeys.size()); + } finally { + lowerBound.ifPresent(ManagedSlice::close); + upperBound.ifPresent(ManagedSlice::close); + } + } + } + } + } +} diff --git a/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/db/managed/TestManagedSSTDumpIterator.java b/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/db/managed/TestManagedSSTDumpIterator.java deleted file mode 100644 index 99d2a6ced59f..000000000000 --- a/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/db/managed/TestManagedSSTDumpIterator.java +++ /dev/null @@ -1,275 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.utils.db.managed; - -import com.google.common.primitives.Bytes; -import org.apache.commons.lang3.tuple.Pair; -import org.apache.hadoop.hdds.StringUtils; -import org.apache.hadoop.hdds.utils.NativeLibraryLoader; -import org.apache.hadoop.hdds.utils.TestUtils; -import org.apache.ozone.test.tag.Native; -import org.apache.ozone.test.tag.Unhealthy; -import org.junit.jupiter.api.Assertions; -import org.junit.jupiter.api.Assumptions; -import org.junit.jupiter.api.Named; -import org.junit.jupiter.params.ParameterizedTest; -import org.junit.jupiter.params.provider.Arguments; -import org.junit.jupiter.params.provider.MethodSource; -import org.mockito.Matchers; -import org.mockito.Mockito; - -import java.io.ByteArrayInputStream; -import java.io.File; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.nio.ByteOrder; -import java.nio.charset.StandardCharsets; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.TreeMap; -import java.util.concurrent.ArrayBlockingQueue; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Future; -import java.util.concurrent.ThreadPoolExecutor; -import java.util.concurrent.TimeUnit; -import java.util.stream.Collectors; -import java.util.stream.IntStream; -import java.util.stream.Stream; - -import static org.apache.hadoop.hdds.utils.NativeConstants.ROCKS_TOOLS_NATIVE_LIBRARY_NAME; - -/** - * Test for ManagedSSTDumpIterator. - */ -class TestManagedSSTDumpIterator { - - private File createSSTFileWithKeys( - TreeMap, String> keys) throws Exception { - File file = File.createTempFile("tmp_sst_file", ".sst"); - file.deleteOnExit(); - try (ManagedEnvOptions envOptions = new ManagedEnvOptions(); - ManagedOptions managedOptions = new ManagedOptions(); - ManagedSstFileWriter sstFileWriter = new ManagedSstFileWriter( - envOptions, managedOptions)) { - sstFileWriter.open(file.getAbsolutePath()); - for (Map.Entry, String> entry : keys.entrySet()) { - if (entry.getKey().getValue() == 0) { - sstFileWriter.delete(entry.getKey().getKey() - .getBytes(StandardCharsets.UTF_8)); - } else { - sstFileWriter.put(entry.getKey().getKey() - .getBytes(StandardCharsets.UTF_8), - entry.getValue().getBytes(StandardCharsets.UTF_8)); - } - } - sstFileWriter.finish(); - } - return file; - } - - private static Stream keyValueFormatArgs() { - return Stream.of( - Arguments.of( - Named.of("Key starting with a single quote", - "'key%1$d=>"), - Named.of("Value starting with a number ending with a" + - " single quote", "%1$dvalue'") - ), - Arguments.of( - Named.of("Key ending with a number", "key%1$d"), - Named.of("Value starting & ending with a number", "%1$dvalue%1$d") - ), - Arguments.of( - Named.of("Key starting with a single quote & ending" + - " with a number", "'key%1$d"), - Named.of("Value starting & ending with a number " + - "& elosed within quotes", "%1$d'value%1$d'")), - Arguments.of( - Named.of("Key starting with a single quote & ending" + - " with a number", "'key%1$d"), - Named.of("Value starting & ending with a number " + - "& elosed within quotes", "%1$d'value%1$d'") - ), - Arguments.of( - Named.of("Key ending with a number", "key%1$d"), - Named.of("Value starting & ending with a number " + - "& containing null character & new line character", - "%1$dvalue\n\0%1$d") - ), - Arguments.of( - Named.of("Key ending with a number & containing" + - " a null character", "key\0%1$d"), - Named.of("Value starting & ending with a number " + - "& elosed within quotes", "%1$dvalue\r%1$d") - ) - ); - } - - private static byte[] getBytes(Integer val) { - ByteBuffer destByteBuffer = ByteBuffer.allocate(4); - destByteBuffer.order(ByteOrder.BIG_ENDIAN); - destByteBuffer.putInt(val); - return destByteBuffer.array(); - } - - private static byte[] getBytes(Long val) { - ByteBuffer destByteBuffer = ByteBuffer.allocate(8); - destByteBuffer.order(ByteOrder.BIG_ENDIAN); - destByteBuffer.putLong(val); - return destByteBuffer.array(); - } - - private static byte[] getBytes(String val) { - byte[] b = new byte[val.length()]; - for (int i = 0; i < val.length(); i++) { - b[i] = (byte) val.charAt(i); - } - return b; - } - - private static Stream invalidPipeInputStreamBytes() { - return Stream.of( - Arguments.of(Named.of("Invalid 3 byte integer", - new byte[]{0, 0, 0})), - Arguments.of(Named.of("Invalid 2 byte integer", - new byte[]{0, 0})), - Arguments.of(Named.of("Invalid 1 byte integer", - new byte[]{0, 0})), - Arguments.of(Named.of("Invalid key name length", - Bytes.concat(getBytes(4), getBytes("key")))), - Arguments.of(Named.of("Invalid Unsigned Long length", - Bytes.concat(getBytes(4), getBytes("key1"), - new byte[]{0, 0}))), - Arguments.of(Named.of("Invalid Sequence number", - Bytes.concat(getBytes(4), getBytes("key1")))), - Arguments.of(Named.of("Invalid Type", - Bytes.concat(getBytes(4), getBytes("key1"), - getBytes(4L)))), - Arguments.of(Named.of("Invalid Value", - Bytes.concat(getBytes(4), getBytes("key"), - getBytes(4L), getBytes(0)))), - Arguments.of(Named.of("Invalid Value length", - Bytes.concat(getBytes(4), getBytes("key"), - getBytes(4L), getBytes(1), getBytes(6), - getBytes("val")))) - ); - } - - @Native(ROCKS_TOOLS_NATIVE_LIBRARY_NAME) - @ParameterizedTest - @MethodSource("keyValueFormatArgs") - @Unhealthy("HDDS-9274") - public void testSSTDumpIteratorWithKeyFormat(String keyFormat, - String valueFormat) - throws Exception { - Assumptions.assumeTrue(NativeLibraryLoader.getInstance() - .loadLibrary(ROCKS_TOOLS_NATIVE_LIBRARY_NAME)); - - TreeMap, String> keys = - IntStream.range(0, 100).boxed().collect( - Collectors.toMap( - i -> Pair.of(String.format(keyFormat, i), i % 2), - i -> i % 2 == 0 ? "" : String.format(valueFormat, i), - (v1, v2) -> v2, - TreeMap::new)); - File file = createSSTFileWithKeys(keys); - ExecutorService executorService = - new ThreadPoolExecutor(1, 1, 0, TimeUnit.SECONDS, - new ArrayBlockingQueue<>(1), - new ThreadPoolExecutor.CallerRunsPolicy()); - ManagedSSTDumpTool tool = new ManagedSSTDumpTool(executorService, 8192); - List> testBounds = TestUtils.getTestingBounds( - keys.keySet().stream().collect(Collectors.toMap(Pair::getKey, - Pair::getValue, (v1, v2) -> v1, TreeMap::new))); - for (Optional keyStart : testBounds) { - for (Optional keyEnd : testBounds) { - Map, String> expectedKeys = keys.entrySet() - .stream().filter(e -> keyStart.map(s -> e.getKey().getKey() - .compareTo(s) >= 0).orElse(true)) - .filter(e -> keyEnd.map(s -> e.getKey().getKey().compareTo(s) < 0) - .orElse(true)) - .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); - Optional lowerBound = keyStart - .map(s -> new ManagedSlice(StringUtils.string2Bytes(s))); - Optional upperBound = keyEnd - .map(s -> new ManagedSlice(StringUtils.string2Bytes(s))); - try (ManagedOptions options = new ManagedOptions(); - ManagedSSTDumpIterator iterator = - new ManagedSSTDumpIterator(tool, - file.getAbsolutePath(), options, lowerBound.orElse(null), - upperBound.orElse(null)) { - @Override - protected KeyValue getTransformedValue( - Optional value) { - return value.orElse(null); - } - } - ) { - while (iterator.hasNext()) { - ManagedSSTDumpIterator.KeyValue r = iterator.next(); - String key = new String(r.getKey(), StandardCharsets.UTF_8); - Pair recordKey = Pair.of(key, r.getType()); - Assertions.assertTrue(expectedKeys.containsKey(recordKey)); - Assertions.assertEquals(Optional.ofNullable(expectedKeys - .get(recordKey)).orElse(""), - new String(r.getValue(), StandardCharsets.UTF_8)); - expectedKeys.remove(recordKey); - } - Assertions.assertEquals(0, expectedKeys.size()); - } finally { - lowerBound.ifPresent(ManagedSlice::close); - upperBound.ifPresent(ManagedSlice::close); - } - } - } - executorService.shutdown(); - } - - - @ParameterizedTest - @MethodSource("invalidPipeInputStreamBytes") - public void testInvalidSSTDumpIteratorWithKeyFormat(byte[] inputBytes) - throws ExecutionException, - InterruptedException, IOException { - ByteArrayInputStream byteArrayInputStream = - new ByteArrayInputStream(inputBytes); - ManagedSSTDumpTool tool = Mockito.mock(ManagedSSTDumpTool.class); - File file = File.createTempFile("tmp", ".sst"); - Future future = Mockito.mock(Future.class); - Mockito.when(future.isDone()).thenReturn(false); - Mockito.when(future.get()).thenReturn(0); - Mockito.when(tool.run(Matchers.any(Map.class), - Matchers.any(ManagedOptions.class))) - .thenReturn(new ManagedSSTDumpTool.SSTDumpToolTask(future, - byteArrayInputStream)); - try (ManagedOptions options = new ManagedOptions()) { - Assertions.assertThrows(IllegalStateException.class, - () -> new ManagedSSTDumpIterator( - tool, file.getAbsolutePath(), options) { - @Override - protected KeyValue getTransformedValue( - Optional value) { - return value.orElse(null); - } - }); - } - } -} diff --git a/hadoop-hdds/rocks-native/src/test/resources/auditlog.properties b/hadoop-hdds/rocks-native/src/test/resources/auditlog.properties new file mode 100644 index 000000000000..959da047fb7f --- /dev/null +++ b/hadoop-hdds/rocks-native/src/test/resources/auditlog.properties @@ -0,0 +1,76 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with this +# work for additional information regarding copyright ownership. The ASF +# licenses this file to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +#

+# http://www.apache.org/licenses/LICENSE-2.0 +#

+# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS,WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations under +# the License. +# +name=PropertiesConfig + +# Checks for config change periodically and reloads +monitorInterval=5 + +filter=read, write +# filter.read.onMatch = DENY avoids logging all READ events +# filter.read.onMatch = ACCEPT permits logging all READ events +# The above two settings ignore the log levels in configuration +# filter.read.onMatch = NEUTRAL permits logging of only those READ events +# which are attempted at log level equal or greater than log level specified +# in the configuration +filter.read.type = MarkerFilter +filter.read.marker = READ +filter.read.onMatch = NEUTRAL +filter.read.onMismatch = NEUTRAL + +# filter.write.onMatch = DENY avoids logging all WRITE events +# filter.write.onMatch = ACCEPT permits logging all WRITE events +# The above two settings ignore the log levels in configuration +# filter.write.onMatch = NEUTRAL permits logging of only those WRITE events +# which are attempted at log level equal or greater than log level specified +# in the configuration +filter.write.type = MarkerFilter +filter.write.marker = WRITE +filter.write.onMatch = NEUTRAL +filter.write.onMismatch = NEUTRAL + +# Log Levels are organized from most specific to least: +# OFF (most specific, no logging) +# FATAL (most specific, little data) +# ERROR +# WARN +# INFO +# DEBUG +# TRACE (least specific, a lot of data) +# ALL (least specific, all data) + +appenders = console, audit +appender.console.type = Console +appender.console.name = STDOUT +appender.console.layout.type = PatternLayout +appender.console.layout.pattern = %-5level | %c{1} | %msg%n + +appender.audit.type = File +appender.audit.name = AUDITLOG +appender.audit.fileName=audit.log +appender.audit.layout.type=PatternLayout +appender.audit.layout.pattern= %-5level | %c{1} | %C | %msg%n + +loggers=audit +logger.audit.type=AsyncLogger +logger.audit.name=OMAudit +logger.audit.level = INFO +logger.audit.appenderRefs = audit +logger.audit.appenderRef.file.ref = AUDITLOG + +rootLogger.level = INFO +rootLogger.appenderRefs = stdout +rootLogger.appenderRef.stdout.ref = STDOUT diff --git a/hadoop-hdds/rocks-native/src/test/resources/log4j.properties b/hadoop-hdds/rocks-native/src/test/resources/log4j.properties new file mode 100644 index 000000000000..398786689af3 --- /dev/null +++ b/hadoop-hdds/rocks-native/src/test/resources/log4j.properties @@ -0,0 +1,23 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# log4j configuration used during build and unit tests + +log4j.rootLogger=INFO,stdout +log4j.threshold=ALL +log4j.appender.stdout=org.apache.log4j.ConsoleAppender +log4j.appender.stdout.layout=org.apache.log4j.PatternLayout +log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} [%t] %-5p %c{2} (%F:%M(%L)) - %m%n diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdb/util/SstFileSetReader.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdb/util/SstFileSetReader.java index 5a1df61b4436..058d3d955315 100644 --- a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdb/util/SstFileSetReader.java +++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdb/util/SstFileSetReader.java @@ -20,14 +20,14 @@ import org.apache.hadoop.hdds.StringUtils; import org.apache.hadoop.hdds.utils.IOUtils; +import org.apache.hadoop.hdds.utils.db.managed.ManagedRawSSTFileIterator; +import org.apache.hadoop.hdds.utils.db.managed.ManagedRawSSTFileReader; import org.apache.hadoop.hdds.utils.db.managed.ManagedSlice; import org.apache.hadoop.hdds.utils.db.managed.ManagedSstFileReader; import org.apache.hadoop.hdds.utils.db.managed.ManagedSstFileReaderIterator; import org.apache.hadoop.util.ClosableIterator; import org.apache.hadoop.hdds.utils.db.managed.ManagedOptions; import org.apache.hadoop.hdds.utils.db.managed.ManagedReadOptions; -import org.apache.hadoop.hdds.utils.db.managed.ManagedSSTDumpIterator; -import org.apache.hadoop.hdds.utils.db.managed.ManagedSSTDumpTool; import org.rocksdb.RocksDBException; import java.io.IOException; @@ -36,9 +36,9 @@ import java.util.Iterator; import java.util.NoSuchElementException; import java.util.Objects; -import java.util.Optional; import java.util.Spliterator; import java.util.Spliterators; +import java.util.function.Function; import java.util.stream.Stream; import java.util.stream.StreamSupport; @@ -90,100 +90,87 @@ public long getEstimatedTotalKeys() throws RocksDBException { } public Stream getKeyStream(String lowerBound, - String upperBound) throws RocksDBException { + String upperBound) throws RocksDBException { // TODO: [SNAPSHOT] Check if default Options and ReadOptions is enough. - final MultipleSstFileIterator itr = - new MultipleSstFileIterator(sstFiles) { - private ManagedOptions options; - private ManagedReadOptions readOptions; - - private ManagedSlice lowerBoundSLice; - - private ManagedSlice upperBoundSlice; - - @Override - protected void init() { - this.options = new ManagedOptions(); - this.readOptions = new ManagedReadOptions(); - if (Objects.nonNull(lowerBound)) { - this.lowerBoundSLice = new ManagedSlice( - StringUtils.string2Bytes(lowerBound)); - readOptions.setIterateLowerBound(lowerBoundSLice); - } - - if (Objects.nonNull(upperBound)) { - this.upperBoundSlice = new ManagedSlice( - StringUtils.string2Bytes(upperBound)); - readOptions.setIterateUpperBound(upperBoundSlice); - } - } + final MultipleSstFileIterator itr = new MultipleSstFileIterator(sstFiles) { + private ManagedOptions options; + private ManagedReadOptions readOptions; + + private ManagedSlice lowerBoundSLice; + + private ManagedSlice upperBoundSlice; + + @Override + protected void init() { + this.options = new ManagedOptions(); + this.readOptions = new ManagedReadOptions(); + if (Objects.nonNull(lowerBound)) { + this.lowerBoundSLice = new ManagedSlice( + StringUtils.string2Bytes(lowerBound)); + readOptions.setIterateLowerBound(lowerBoundSLice); + } - @Override - protected ClosableIterator getKeyIteratorForFile(String file) - throws RocksDBException { - return new ManagedSstFileIterator(file, options, readOptions) { - @Override - protected String getIteratorValue( - ManagedSstFileReaderIterator iterator) { - return new String(iterator.get().key(), UTF_8); - } - }; - } + if (Objects.nonNull(upperBound)) { + this.upperBoundSlice = new ManagedSlice( + StringUtils.string2Bytes(upperBound)); + readOptions.setIterateUpperBound(upperBoundSlice); + } + } + @Override + protected ClosableIterator getKeyIteratorForFile(String file) throws RocksDBException { + return new ManagedSstFileIterator(file, options, readOptions) { @Override - public void close() throws UncheckedIOException { - super.close(); - options.close(); - readOptions.close(); - IOUtils.closeQuietly(lowerBoundSLice, upperBoundSlice); + protected String getIteratorValue(ManagedSstFileReaderIterator iterator) { + return new String(iterator.get().key(), UTF_8); } }; + } + + @Override + public void close() throws UncheckedIOException { + super.close(); + options.close(); + readOptions.close(); + IOUtils.closeQuietly(lowerBoundSLice, upperBoundSlice); + } + }; return getStreamFromIterator(itr); } - public Stream getKeyStreamWithTombstone( - ManagedSSTDumpTool sstDumpTool, String lowerBound, - String upperBound) throws RocksDBException { - final MultipleSstFileIterator itr = - new MultipleSstFileIterator(sstFiles) { - //TODO: [SNAPSHOT] Check if default Options is enough. - private ManagedOptions options; - private ManagedSlice lowerBoundSlice; - private ManagedSlice upperBoundSlice; - - @Override - protected void init() { - this.options = new ManagedOptions(); - if (Objects.nonNull(lowerBound)) { - this.lowerBoundSlice = new ManagedSlice( - StringUtils.string2Bytes(lowerBound)); - } - if (Objects.nonNull(upperBound)) { - this.upperBoundSlice = new ManagedSlice( - StringUtils.string2Bytes(upperBound)); - } - } + public Stream getKeyStreamWithTombstone(String lowerBound, String upperBound) throws RocksDBException { + final MultipleSstFileIterator itr = new MultipleSstFileIterator(sstFiles) { + //TODO: [SNAPSHOT] Check if default Options is enough. + private ManagedOptions options; + private ManagedSlice lowerBoundSlice; + private ManagedSlice upperBoundSlice; + + @Override + protected void init() { + this.options = new ManagedOptions(); + if (Objects.nonNull(lowerBound)) { + this.lowerBoundSlice = new ManagedSlice( + StringUtils.string2Bytes(lowerBound)); + } + if (Objects.nonNull(upperBound)) { + this.upperBoundSlice = new ManagedSlice( + StringUtils.string2Bytes(upperBound)); + } + } - @Override - protected ClosableIterator getKeyIteratorForFile(String file) - throws IOException { - return new ManagedSSTDumpIterator(sstDumpTool, file, - options, lowerBoundSlice, upperBoundSlice) { - @Override - protected String getTransformedValue(Optional value) { - return value.map(v -> StringUtils.bytes2String(v.getKey())) - .orElse(null); - } - }; - } + @Override + protected ClosableIterator getKeyIteratorForFile(String file) { + return new ManagedRawSstFileIterator(file, options, lowerBoundSlice, upperBoundSlice, + keyValue -> StringUtils.bytes2String(keyValue.getKey())); + } - @Override - public void close() throws UncheckedIOException { - super.close(); - options.close(); - IOUtils.closeQuietly(lowerBoundSlice, upperBoundSlice); - } - }; + @Override + public void close() throws UncheckedIOException { + super.close(); + options.close(); + IOUtils.closeQuietly(lowerBoundSlice, upperBoundSlice); + } + }; return getStreamFromIterator(itr); } @@ -191,8 +178,7 @@ private abstract static class ManagedSstFileIterator implements ClosableIterator private final ManagedSstFileReader fileReader; private final ManagedSstFileReaderIterator fileReaderIterator; - ManagedSstFileIterator(String path, ManagedOptions options, - ManagedReadOptions readOptions) + ManagedSstFileIterator(String path, ManagedOptions options, ManagedReadOptions readOptions) throws RocksDBException { this.fileReader = new ManagedSstFileReader(options); this.fileReader.open(path); @@ -221,8 +207,35 @@ public String next() { } } - private abstract static class MultipleSstFileIterator implements - ClosableIterator { + private static class ManagedRawSstFileIterator implements ClosableIterator { + private final ManagedRawSSTFileReader fileReader; + private final ManagedRawSSTFileIterator fileReaderIterator; + private static final int READ_AHEAD_SIZE = 2 * 1024 * 1024; + + ManagedRawSstFileIterator(String path, ManagedOptions options, ManagedSlice lowerBound, ManagedSlice upperBound, + Function keyValueFunction) { + this.fileReader = new ManagedRawSSTFileReader<>(options, path, READ_AHEAD_SIZE); + this.fileReaderIterator = fileReader.newIterator(keyValueFunction, lowerBound, upperBound); + } + + @Override + public void close() { + this.fileReaderIterator.close(); + this.fileReader.close(); + } + + @Override + public boolean hasNext() { + return fileReaderIterator.hasNext(); + } + + @Override + public String next() { + return fileReaderIterator.next(); + } + } + + private abstract static class MultipleSstFileIterator implements ClosableIterator { private final Iterator fileNameIterator; @@ -236,16 +249,13 @@ private MultipleSstFileIterator(Collection files) { protected abstract void init(); - protected abstract ClosableIterator getKeyIteratorForFile(String file) - throws RocksDBException, - IOException; + protected abstract ClosableIterator getKeyIteratorForFile(String file) throws RocksDBException, IOException; @Override public boolean hasNext() { try { do { - if (Objects.nonNull(currentFileIterator) && - currentFileIterator.hasNext()) { + if (Objects.nonNull(currentFileIterator) && currentFileIterator.hasNext()) { return true; } } while (moveToNextFile()); diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdb/util/TestSstFileSetReader.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdb/util/TestSstFileSetReader.java index 676a3e35c04c..51bc7e7e3dd3 100644 --- a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdb/util/TestSstFileSetReader.java +++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdb/util/TestSstFileSetReader.java @@ -17,18 +17,15 @@ */ package org.apache.ozone.rocksdb.util; -import com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.StringUtils; -import org.apache.hadoop.hdds.utils.NativeLibraryLoader; import org.apache.hadoop.hdds.utils.NativeLibraryNotLoadedException; import org.apache.hadoop.hdds.utils.TestUtils; import org.apache.hadoop.hdds.utils.db.managed.ManagedEnvOptions; import org.apache.hadoop.hdds.utils.db.managed.ManagedOptions; -import org.apache.hadoop.hdds.utils.db.managed.ManagedSSTDumpTool; +import org.apache.hadoop.hdds.utils.db.managed.ManagedRawSSTFileReader; import org.apache.hadoop.hdds.utils.db.managed.ManagedSstFileWriter; import org.apache.ozone.test.tag.Native; -import org.apache.ozone.test.tag.Unhealthy; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Assumptions; import org.junit.jupiter.params.ParameterizedTest; @@ -43,10 +40,6 @@ import java.util.Optional; import java.util.SortedMap; import java.util.TreeMap; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.SynchronousQueue; -import java.util.concurrent.ThreadPoolExecutor; -import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; import java.util.stream.IntStream; import java.util.stream.Stream; @@ -155,51 +148,38 @@ public void testGetKeyStream(int numberOfFiles) @Native(ROCKS_TOOLS_NATIVE_LIBRARY_NAME) @ParameterizedTest @ValueSource(ints = {0, 1, 2, 3, 7, 10}) - @Unhealthy("HDDS-9274") public void testGetKeyStreamWithTombstone(int numberOfFiles) throws RocksDBException, IOException, NativeLibraryNotLoadedException { - Assumptions.assumeTrue(NativeLibraryLoader.getInstance() - .loadLibrary(ROCKS_TOOLS_NATIVE_LIBRARY_NAME)); + Assumptions.assumeTrue(ManagedRawSSTFileReader.loadLibrary()); Pair, List> data = createDummyData(numberOfFiles); List files = data.getRight(); SortedMap keys = data.getLeft(); - ExecutorService executorService = new ThreadPoolExecutor(0, - 2, 60, TimeUnit.SECONDS, - new SynchronousQueue<>(), new ThreadFactoryBuilder() - .setNameFormat("snapshot-diff-manager-sst-dump-tool-TID-%d") - .build(), new ThreadPoolExecutor.DiscardPolicy()); - ManagedSSTDumpTool sstDumpTool = - new ManagedSSTDumpTool(executorService, 256); // Getting every possible combination of 2 elements from the sampled keys. // Reading the sst file lying within the given bounds and // validating the keys read from the sst file. List> bounds = TestUtils.getTestingBounds(keys); - try { - for (Optional lowerBound : bounds) { - for (Optional upperBound : bounds) { - // Calculating the expected keys which lie in the given boundary. - Map keysInBoundary = - keys.entrySet().stream().filter(entry -> lowerBound - .map(l -> entry.getKey().compareTo(l) >= 0) - .orElse(true) && - upperBound.map(u -> entry.getKey().compareTo(u) < 0) - .orElse(true)) - .collect(Collectors.toMap(Map.Entry::getKey, - Map.Entry::getValue)); - try (Stream keyStream = new SstFileSetReader(files) - .getKeyStreamWithTombstone(sstDumpTool, lowerBound.orElse(null), - upperBound.orElse(null))) { - keyStream.forEach( - key -> { - Assertions.assertNotNull(keysInBoundary.remove(key)); - }); - } - Assertions.assertEquals(0, keysInBoundary.size()); + for (Optional lowerBound : bounds) { + for (Optional upperBound : bounds) { + // Calculating the expected keys which lie in the given boundary. + Map keysInBoundary = + keys.entrySet().stream().filter(entry -> lowerBound + .map(l -> entry.getKey().compareTo(l) >= 0) + .orElse(true) && + upperBound.map(u -> entry.getKey().compareTo(u) < 0) + .orElse(true)) + .collect(Collectors.toMap(Map.Entry::getKey, + Map.Entry::getValue)); + try (Stream keyStream = new SstFileSetReader(files) + .getKeyStreamWithTombstone(lowerBound.orElse(null), + upperBound.orElse(null))) { + keyStream.forEach( + key -> { + Assertions.assertNotNull(keysInBoundary.remove(key)); + }); } + Assertions.assertEquals(0, keysInBoundary.size()); } - } finally { - executorService.shutdown(); } } } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java index 95ec7914129a..6c4f8d956863 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java @@ -148,10 +148,10 @@ public void testGetVersionTask() throws Exception { try (EndpointStateMachine rpcEndPoint = createEndpoint(ozoneConf, serverAddress, 1000)) { ozoneConf.setBoolean( - OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_ENABLED, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED, true); ozoneConf.setBoolean( - OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, true); + OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, true); OzoneContainer ozoneContainer = new OzoneContainer(dnDetails, ozoneConf, ContainerTestUtils.getMockContext(dnDetails, ozoneConf)); rpcEndPoint.setState(EndpointStateMachine.EndPointStates.GETVERSION); @@ -177,9 +177,9 @@ public void testGetVersionTask() throws Exception { */ @Test public void testDeletedContainersClearedOnStartup() throws Exception { - ozoneConf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT, + ozoneConf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_IPC_RANDOM_PORT, true); - ozoneConf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, + ozoneConf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_RANDOM_PORT, true); ozoneConf.setFromObject(new ReplicationConfig().setPort(0)); try (EndpointStateMachine rpcEndPoint = createEndpoint(ozoneConf, @@ -216,12 +216,12 @@ public void testDeletedContainersClearedOnStartup() throws Exception { @Test public void testCheckVersionResponse() throws Exception { - ozoneConf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT, + ozoneConf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_IPC_RANDOM_PORT, true); - ozoneConf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, + ozoneConf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_RANDOM_PORT, true); ozoneConf.setBoolean( - OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, true); + OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, true); ozoneConf.setFromObject(new ReplicationConfig().setPort(0)); try (EndpointStateMachine rpcEndPoint = createEndpoint(ozoneConf, serverAddress, 1000)) { @@ -270,7 +270,7 @@ public void testCheckVersionResponse() throws Exception { */ @Test public void testDnLayoutVersionFile() throws Exception { - ozoneConf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, + ozoneConf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_RANDOM_PORT, true); try (EndpointStateMachine rpcEndPoint = createEndpoint(ozoneConf, serverAddress, 1000)) { @@ -590,7 +590,7 @@ private StateContext heartbeatTaskHelper( // Mini Ozone cluster will not come up if the port is not true, since // Ratis will exit if the server port cannot be bound. We can remove this // hard coding once we fix the Ratis default behaviour. - conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, true); + conf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_RANDOM_PORT, true); // Create a datanode state machine for stateConext used by endpoint task try (DatanodeStateMachine stateMachine = new DatanodeStateMachine( diff --git a/hadoop-hdds/test-utils/pom.xml b/hadoop-hdds/test-utils/pom.xml index 0dde6c013137..bf6d81a04e0a 100644 --- a/hadoop-hdds/test-utils/pom.xml +++ b/hadoop-hdds/test-utils/pom.xml @@ -33,6 +33,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> + + org.assertj + assertj-core + com.google.guava guava @@ -66,6 +70,17 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.slf4j slf4j-api + + org.apache.hadoop + hadoop-common + provided + + + * + * + + + org.apache.logging.log4j log4j-api @@ -85,6 +100,11 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> mockito-core provided + + org.assertj + assertj-core + compile + diff --git a/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/MetricsAsserts.java b/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/MetricsAsserts.java new file mode 100644 index 000000000000..28d3b936ecab --- /dev/null +++ b/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/MetricsAsserts.java @@ -0,0 +1,470 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.ozone.test; + +import static org.apache.hadoop.metrics2.lib.Interns.info; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.AdditionalMatchers.geq; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.ArgumentMatchers.argThat; +import static org.mockito.Mockito.atLeast; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import org.apache.hadoop.metrics2.MetricsInfo; +import org.apache.hadoop.metrics2.MetricsCollector; +import org.apache.hadoop.metrics2.MetricsSource; +import org.apache.hadoop.metrics2.MetricsRecordBuilder; +import org.apache.hadoop.metrics2.MetricsSystem; +import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; +import org.apache.hadoop.metrics2.lib.MutableQuantiles; +import org.apache.hadoop.metrics2.util.Quantile; +import org.assertj.core.data.Offset; +import org.mockito.ArgumentCaptor; +import org.mockito.ArgumentMatcher; +import org.mockito.stubbing.Answer; +import org.mockito.invocation.InvocationOnMock; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.Objects; + +/** + * Helpers for metrics source tests. + */ +public final class MetricsAsserts { + + private static final Logger LOG = LoggerFactory.getLogger(MetricsAsserts.class); + private static final Offset EPSILON = Offset.offset(0.00001); + private static final Offset EPSILON_FLOAT = Offset.offset(0.00001f); + + public static MetricsSystem mockMetricsSystem() { + MetricsSystem ms = mock(MetricsSystem.class); + DefaultMetricsSystem.setInstance(ms); + return ms; + } + + public static MetricsRecordBuilder mockMetricsRecordBuilder() { + final MetricsCollector mc = mock(MetricsCollector.class); + MetricsRecordBuilder rb = mock(MetricsRecordBuilder.class, new Answer() { + @Override + public Object answer(InvocationOnMock invocation) { + String methodName = invocation.getMethod().getName(); + if (LOG.isDebugEnabled()) { + Object[] args = invocation.getArguments(); + StringBuilder sb = new StringBuilder(); + for (Object o : args) { + if (sb.length() > 0) { + sb.append(", "); + } + sb.append(String.valueOf(o)); + } + LOG.debug("{}: {}", methodName, sb); + } + return methodName.equals("parent") || methodName.equals("endRecord") ? + mc : invocation.getMock(); + } + }); + when(mc.addRecord(anyString())).thenReturn(rb); + when(mc.addRecord(anyInfo())).thenReturn(rb); + return rb; + } + + /** + * Call getMetrics on source and get a record builder mock to verify. + * @param source the metrics source + * @param all if true, return all metrics even if not changed + * @return the record builder mock to verifyÏ + */ + public static MetricsRecordBuilder getMetrics(MetricsSource source, + boolean all) { + MetricsRecordBuilder rb = mockMetricsRecordBuilder(); + MetricsCollector mc = rb.parent(); + source.getMetrics(mc, all); + return rb; + } + + public static MetricsRecordBuilder getMetrics(String name) { + return getMetrics(DefaultMetricsSystem.instance().getSource(name)); + } + + public static MetricsRecordBuilder getMetrics(MetricsSource source) { + return getMetrics(source, true); + } + + private static class InfoWithSameName implements ArgumentMatcher { + private final String expected; + + InfoWithSameName(MetricsInfo info) { + expected = Objects.requireNonNull(info.name(), "info name"); + } + + @Override + public boolean matches(MetricsInfo info) { + return expected.equals(info.name()); + } + + @Override + public String toString() { + return "Info with name=" + expected; + } + } + + /** + * MetricInfo with the same name. + * @param info to match + * @return null + */ + public static MetricsInfo eqName(MetricsInfo info) { + return argThat(new InfoWithSameName(info)); + } + + private static class AnyInfo implements ArgumentMatcher { + @Override + public boolean matches(MetricsInfo info) { + return info != null; + } + } + + public static MetricsInfo anyInfo() { + return argThat(new AnyInfo()); + } + + /** + * Assert an int gauge metric as expected. + * @param name of the metric + * @param expected value of the metric + * @param rb the record builder mock used to getMetrics + */ + public static void assertGauge(String name, int expected, + MetricsRecordBuilder rb) { + assertThat(getIntGauge(name, rb)).as(name) + .isEqualTo(expected); + } + + public static int getIntGauge(String name, MetricsRecordBuilder rb) { + ArgumentCaptor captor = ArgumentCaptor.forClass(Integer.class); + verify(rb, atLeast(0)).addGauge(eqName(info(name, "")), captor.capture()); + checkCaptured(captor, name); + return captor.getValue(); + } + + /** + * Assert an int counter metric as expected. + * @param name of the metric + * @param expected value of the metric + * @param rb the record builder mock used to getMetrics + */ + public static void assertCounter(String name, int expected, + MetricsRecordBuilder rb) { + assertThat(getIntCounter(name, rb)).as(name) + .isEqualTo(expected); + } + + public static int getIntCounter(String name, MetricsRecordBuilder rb) { + ArgumentCaptor captor = ArgumentCaptor.forClass( + Integer.class); + verify(rb, atLeast(0)).addCounter(eqName(info(name, "")), captor.capture()); + checkCaptured(captor, name); + return captor.getValue(); + } + + /** + * Assert a long gauge metric as expected. + * @param name of the metric + * @param expected value of the metric + * @param rb the record builder mock used to getMetrics + */ + public static void assertGauge(String name, long expected, + MetricsRecordBuilder rb) { + assertThat(getLongGauge(name, rb)).as(name) + .isEqualTo(expected); + } + + public static long getLongGauge(String name, MetricsRecordBuilder rb) { + ArgumentCaptor captor = ArgumentCaptor.forClass(Long.class); + verify(rb, atLeast(0)).addGauge(eqName(info(name, "")), captor.capture()); + checkCaptured(captor, name); + return captor.getValue(); + } + + /** + * Assert a double gauge metric as expected. + * @param name of the metric + * @param expected value of the metric + * @param rb the record builder mock used to getMetrics + */ + public static void assertGauge(String name, double expected, MetricsRecordBuilder rb) { + assertThat(getDoubleGauge(name, rb)).as(name) + .isCloseTo(expected, EPSILON); + } + + public static double getDoubleGauge(String name, MetricsRecordBuilder rb) { + ArgumentCaptor captor = ArgumentCaptor.forClass(Double.class); + verify(rb, atLeast(0)).addGauge(eqName(info(name, "")), captor.capture()); + checkCaptured(captor, name); + return captor.getValue(); + } + + /** + * Assert a long counter metric as expected. + * @param name of the metric + * @param expected value of the metric + * @param rb the record builder mock used to getMetrics + */ + public static void assertCounter(String name, long expected, + MetricsRecordBuilder rb) { + assertThat(getLongCounter(name, rb)).as(name) + .isEqualTo(expected); + } + + public static long getLongCounter(String name, MetricsRecordBuilder rb) { + ArgumentCaptor captor = ArgumentCaptor.forClass(Long.class); + verify(rb, atLeast(0)).addCounter(eqName(info(name, "")), captor.capture()); + checkCaptured(captor, name); + return captor.getValue(); + } + + public static long getLongCounterWithoutCheck(String name, + MetricsRecordBuilder rb) { + ArgumentCaptor captor = ArgumentCaptor.forClass(Long.class); + verify(rb, atLeast(0)).addCounter(eqName(info(name, "")), captor.capture()); + return captor.getValue(); + } + + public static String getStringMetric(String name, MetricsRecordBuilder rb) { + ArgumentCaptor captor = ArgumentCaptor.forClass(String.class); + verify(rb, atLeast(0)).tag(eqName(info(name, "")), captor.capture()); + checkCaptured(captor, name); + return captor.getValue(); + } + + /** + * Assert a float gauge metric as expected. + * @param name of the metric + * @param expected value of the metric + * @param rb the record builder mock used to getMetrics + */ + public static void assertGauge(String name, float expected, + MetricsRecordBuilder rb) { + assertThat(getFloatGauge(name, rb)).as(name) + .isCloseTo(expected, EPSILON_FLOAT); + } + + public static float getFloatGauge(String name, MetricsRecordBuilder rb) { + ArgumentCaptor captor = ArgumentCaptor.forClass(Float.class); + verify(rb, atLeast(0)).addGauge(eqName(info(name, "")), captor.capture()); + checkCaptured(captor, name); + return captor.getValue(); + } + + /** + * Check that this metric was captured exactly once. + */ + private static void checkCaptured(ArgumentCaptor captor, String name) { + assertThat(captor.getAllValues()).as(name) + .hasSize(1); + } + + /** + * Assert an int gauge metric as expected. + * @param name of the metric + * @param expected value of the metric + * @param source to get metrics from + */ + public static void assertGauge(String name, int expected, + MetricsSource source) { + assertGauge(name, expected, getMetrics(source)); + } + + /** + * Assert an int counter metric as expected. + * @param name of the metric + * @param expected value of the metric + * @param source to get metrics from + */ + public static void assertCounter(String name, int expected, + MetricsSource source) { + assertCounter(name, expected, getMetrics(source)); + } + + /** + * Assert a long gauge metric as expected. + * @param name of the metric + * @param expected value of the metric + * @param source to get metrics from + */ + public static void assertGauge(String name, long expected, + MetricsSource source) { + assertGauge(name, expected, getMetrics(source)); + } + + /** + * Assert a long counter metric as expected. + * @param name of the metric + * @param expected value of the metric + * @param source to get metrics from + */ + public static void assertCounter(String name, long expected, + MetricsSource source) { + assertCounter(name, expected, getMetrics(source)); + } + + /** + * Assert that a long counter metric is greater than a value. + * @param name of the metric + * @param greater value of the metric should be greater than this + * @param rb the record builder mock used to getMetrics + */ + public static void assertCounterGt(String name, long greater, + MetricsRecordBuilder rb) { + assertThat(getLongCounter(name, rb)).as(name) + .isGreaterThan(greater); + } + + /** + * Assert that a long counter metric is greater than a value. + * @param name of the metric + * @param greater value of the metric should be greater than this + * @param source the metrics source + */ + public static void assertCounterGt(String name, long greater, + MetricsSource source) { + assertCounterGt(name, greater, getMetrics(source)); + } + + /** + * Assert that a double gauge metric is greater than a value. + * @param name of the metric + * @param greater value of the metric should be greater than this + * @param rb the record builder mock used to getMetrics + */ + public static void assertGaugeGt(String name, double greater, + MetricsRecordBuilder rb) { + assertThat(getDoubleGauge(name, rb)).as(name) + .isGreaterThan(greater); + } + + /** + * Assert that a double gauge metric is greater than or equal to a value. + * @param name of the metric + * @param greater value of the metric should be greater than or equal to this + * @param rb the record builder mock used to getMetrics + */ + public static void assertGaugeGte(String name, double greater, + MetricsRecordBuilder rb) { + double curValue = getDoubleGauge(name, rb); + assertThat(curValue).as(name) + .isGreaterThanOrEqualTo(greater); + } + + /** + * Assert that a double gauge metric is greater than a value. + * @param name of the metric + * @param greater value of the metric should be greater than this + * @param source the metrics source + */ + public static void assertGaugeGt(String name, double greater, + MetricsSource source) { + assertGaugeGt(name, greater, getMetrics(source)); + } + + /** + * Asserts that the NumOps and quantiles for a metric with value name + * "Latency" have been changed at some point to a non-zero value. + * + * @param prefix of the metric + * @param rb MetricsRecordBuilder with the metric + */ + public static void assertQuantileGauges(String prefix, + MetricsRecordBuilder rb) { + assertQuantileGauges(prefix, rb, "Latency"); + } + + /** + * Asserts that the NumOps and quantiles for a metric have been changed at + * some point to a non-zero value, for the specified value name of the + * metrics (e.g., "Latency", "Count"). + * + * @param prefix of the metric + * @param rb MetricsRecordBuilder with the metric + * @param valueName the value name for the metric + */ + public static void assertQuantileGauges(String prefix, + MetricsRecordBuilder rb, String valueName) { + verify(rb).addGauge(eqName(info(prefix + "NumOps", "")), geq(0L)); + for (Quantile q : MutableQuantiles.quantiles) { + String nameTemplate = prefix + "%dthPercentile" + valueName; + int percentile = (int) (100 * q.quantile); + verify(rb).addGauge( + eqName(info(String.format(nameTemplate, percentile), "")), + geq(0L)); + } + } + + /** + * Asserts that the NumOps and inverse quantiles for a metric have been changed at + * some point to a non-zero value, for the specified value name of the + * metrics (e.g., "Rate"). + * + * @param prefix of the metric + * @param rb MetricsRecordBuilder with the metric + * @param valueName the value name for the metric + */ + public static void assertInverseQuantileGauges(String prefix, + MetricsRecordBuilder rb, String valueName) { + verify(rb).addGauge(eqName(info(prefix + "NumOps", "")), geq(0L)); + for (Quantile q : MutableQuantiles.quantiles) { + String nameTemplate = prefix + "%dthInversePercentile" + valueName; + int percentile = (int) (100 * q.quantile); + verify(rb).addGauge( + eqName(info(String.format(nameTemplate, percentile), "")), + geq(0L)); + } + } + + /** + * Assert a tag of metric as expected. + * @param name of the metric tag + * @param expected value of the metric tag + * @param rb the record builder mock used to getMetrics + */ + public static void assertTag(String name, String expected, + MetricsRecordBuilder rb) { + assertThat(getStringTag(name, rb)).as("Tag for metric " + name) + .isEqualTo(expected); + } + + /** + * get the value tag for the metric. + * @param name of the metric tag + * @param rb value of the metric tag + * @return the value tag for the metric + */ + public static String getStringTag(String name, MetricsRecordBuilder rb) { + ArgumentCaptor captor = ArgumentCaptor.forClass(String.class); + verify(rb).tag(eqName(info(name, "")), captor.capture()); + checkCaptured(captor, name); + return captor.getValue(); + } + + private MetricsAsserts() { + // no instances + } +} diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java index 7aa91cec73c8..03c09203c968 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java @@ -97,8 +97,8 @@ public ContainerOperationClient(OzoneConfiguration conf) throws IOException { containerSizeB = (int) conf.getStorageSize(OZONE_SCM_CONTAINER_SIZE, OZONE_SCM_CONTAINER_SIZE_DEFAULT, StorageUnit.BYTES); boolean useRatis = conf.getBoolean( - ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY, - ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_DEFAULT); + ScmConfigKeys.HDDS_CONTAINER_RATIS_ENABLED_KEY, + ScmConfigKeys.HDDS_CONTAINER_RATIS_ENABLED_DEFAULT); if (useRatis) { replicationFactor = HddsProtos.ReplicationFactor.THREE; replicationType = HddsProtos.ReplicationType.RATIS; diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java index 9b844cc74fdf..519edf20f139 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java @@ -28,21 +28,9 @@ * Ozone Manager Constants. */ public final class OMConfigKeys { - public static final String - OZONE_OM_SNAPSHOT_SST_DUMPTOOL_EXECUTOR_POOL_SIZE = - "ozone.om.snapshot.sst_dumptool.pool.size"; - public static final int - OZONE_OM_SNAPSHOT_SST_DUMPTOOL_EXECUTOR_POOL_SIZE_DEFAULT = 1; - public static final String OZONE_OM_SNAPSHOT_LOAD_NATIVE_LIB = "ozone.om.snapshot.load.native.lib"; public static final boolean OZONE_OM_SNAPSHOT_LOAD_NATIVE_LIB_DEFAULT = true; - public static final String - OZONE_OM_SNAPSHOT_SST_DUMPTOOL_EXECUTOR_BUFFER_SIZE = - "ozone.om.snapshot.sst_dumptool.buffer.size"; - public static final String - OZONE_OM_SNAPSHOT_SST_DUMPTOOL_EXECUTOR_BUFFER_SIZE_DEFAULT = "8KB"; - /** * Never constructed. */ diff --git a/hadoop-ozone/dev-support/checks/native.sh b/hadoop-ozone/dev-support/checks/native.sh index 4d22e695e76f..89206b3bdf2f 100755 --- a/hadoop-ozone/dev-support/checks/native.sh +++ b/hadoop-ozone/dev-support/checks/native.sh @@ -19,13 +19,5 @@ DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" CHECK=native -zlib_version=$(mvn -N help:evaluate -Dexpression=zlib.version -q -DforceStdout) -if [[ -z "${zlib_version}" ]]; then - echo "ERROR zlib.version not defined in pom.xml" - exit 1 -fi - -source "${DIR}/junit.sh" -Pnative -Drocks_tools_native \ - -Dzlib.url="https://github.com/madler/zlib/releases/download/v${zlib_version}/zlib-${zlib_version}.tar.gz" \ - -DexcludedGroups="unhealthy | org.apache.ozone.test.UnhealthyTest" \ +source "${DIR}/junit.sh" -Pnative -Drocks_tools_native -DexcludedGroups="unhealthy" \ "$@" diff --git a/hadoop-ozone/dev-support/intellij/runConfigurations/Datanode2-ha.xml b/hadoop-ozone/dev-support/intellij/runConfigurations/Datanode2-ha.xml index 171494aa5dbe..df9c4c0ab3e6 100644 --- a/hadoop-ozone/dev-support/intellij/runConfigurations/Datanode2-ha.xml +++ b/hadoop-ozone/dev-support/intellij/runConfigurations/Datanode2-ha.xml @@ -18,7 +18,7 @@