From 2d95bb5ce01ba09276c1d4346450906c79d1bd5d Mon Sep 17 00:00:00 2001 From: sarvekshayr Date: Wed, 7 Feb 2024 14:43:00 +0530 Subject: [PATCH 1/6] HDDS-815. Rename Ozone/HDDS config keys prefixed with 'dfs' --- .../hadoop/hdds/scm/XceiverClientGrpc.java | 2 +- .../hadoop/hdds/scm/XceiverClientRatis.java | 2 +- .../apache/hadoop/hdds/ratis/RatisHelper.java | 2 +- .../apache/hadoop/hdds/scm/ScmConfigKeys.java | 36 +++++------ .../apache/hadoop/ozone/OzoneConfigKeys.java | 60 +++++++++---------- .../src/main/resources/ozone-default.xml | 34 +++++------ .../transport/server/XceiverServerGrpc.java | 4 +- .../server/ratis/ContainerStateMachine.java | 2 +- .../server/ratis/XceiverServerRatis.java | 22 +++---- .../container/common/ContainerTestUtils.java | 2 +- .../ozone/container/common/SCMTestUtils.java | 2 +- .../common/TestDatanodeStateMachine.java | 4 +- .../TestDatanodeConfiguration.java | 2 +- .../volume/TestPeriodicVolumeChecker.java | 2 +- .../common/volume/TestVolumeSet.java | 2 +- .../volume/TestVolumeSetDiskChecks.java | 2 +- .../ozoneimpl/TestContainerReader.java | 2 +- .../ozoneimpl/TestOzoneContainer.java | 2 +- .../hadoop/hdds/utils/HddsServerUtil.java | 4 +- .../hdds/scm/node/TestContainerPlacement.java | 2 +- .../ozone/container/common/TestEndPoint.java | 12 ++-- .../scm/cli/ContainerOperationClient.java | 2 +- .../runConfigurations/Datanode2-ha.xml | 2 +- .../intellij/runConfigurations/Datanode2.xml | 2 +- .../runConfigurations/Datanode3-ha.xml | 2 +- .../intellij/runConfigurations/Datanode3.xml | 2 +- .../main/compose/ozoneblockade/docker-config | 2 +- .../hadoop/ozone/MiniOzoneChaosCluster.java | 4 +- .../hdds/scm/TestRatisPipelineLeader.java | 4 +- .../hadoop/ozone/MiniOzoneClusterImpl.java | 10 ++-- .../apache/hadoop/ozone/RatisTestHelper.java | 6 +- .../hadoop/ozone/TestMiniOzoneCluster.java | 12 ++-- .../client/rpc/TestContainerStateMachine.java | 2 +- .../TestContainerStateMachineFailures.java | 10 ++-- .../TestContainerStateMachineFlushDelay.java | 2 +- .../rpc/TestContainerStateMachineStream.java | 2 +- .../rpc/TestFailureHandlingByClient.java | 2 +- ...TestFailureHandlingByClientFlushDelay.java | 2 +- .../TestMultiBlockWritesWithDnFailures.java | 2 +- .../server/ratis/TestCSMMetrics.java | 4 +- .../metrics/TestContainerMetrics.java | 2 +- .../ozoneimpl/TestOzoneContainer.java | 4 +- .../ozoneimpl/TestSecureOzoneContainer.java | 4 +- .../container/server/TestContainerServer.java | 8 +-- .../server/TestSecureContainerServer.java | 6 +- .../ozone/dn/ratis/TestDnRatisLogParser.java | 2 +- .../hadoop/ozone/shell/TestScmAdminHA.java | 2 +- .../fs/ozone/BasicOzoneClientAdapterImpl.java | 2 +- .../BasicRootedOzoneClientAdapterImpl.java | 2 +- 49 files changed, 154 insertions(+), 154 deletions(-) diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java index 0a38e6604897..3bd0102a9cf5 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java @@ -166,7 +166,7 @@ private synchronized void connectToDatanode(DatanodeDetails dn) // port. int port = dn.getPort(DatanodeDetails.Port.Name.STANDALONE).getValue(); if (port == 0) { - port = config.getInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, + port = config.getInt(OzoneConfigKeys.OZONE_CONTAINER_IPC_PORT, OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT); } diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java index aff0aa966a79..d01b7b0cca13 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java @@ -83,7 +83,7 @@ public static XceiverClientRatis newXceiverClientRatis( org.apache.hadoop.hdds.scm.pipeline.Pipeline pipeline, ConfigurationSource ozoneConf, ClientTrustManager trustManager) { final String rpcType = ozoneConf - .get(ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY, + .get(ScmConfigKeys.OZONE_CONTAINER_RATIS_RPC_TYPE_KEY, ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT); final RetryPolicy retryPolicy = RatisHelper.createRetryPolicy(ozoneConf); final GrpcTlsConfig tlsConfig = RatisHelper.createTlsClientConfig(new diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java index cb7f6f8a3b31..6f357bbab309 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java @@ -234,7 +234,7 @@ public static RaftClient newRaftClient(RpcType rpcType, Pipeline pipeline, private static RpcType getRpcType(ConfigurationSource conf) { return SupportedRpcType.valueOfIgnoreCase(conf.get( - ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY, + ScmConfigKeys.OZONE_CONTAINER_RATIS_RPC_TYPE_KEY, ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT)); } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java index c6760451c693..c8a693abb16a 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java @@ -41,12 +41,12 @@ public final class ScmConfigKeys { public static final String OZONE_SCM_DB_DIRS_PERMISSIONS = "ozone.scm.db.dirs.permissions"; - public static final String DFS_CONTAINER_RATIS_ENABLED_KEY - = "dfs.container.ratis.enabled"; + public static final String OZONE_CONTAINER_RATIS_ENABLED_KEY + = "ozone.container.ratis.enabled"; public static final boolean DFS_CONTAINER_RATIS_ENABLED_DEFAULT = false; - public static final String DFS_CONTAINER_RATIS_RPC_TYPE_KEY - = "dfs.container.ratis.rpc.type"; + public static final String OZONE_CONTAINER_RATIS_RPC_TYPE_KEY + = "ozone.container.ratis.rpc.type"; public static final String DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT = "GRPC"; public static final String @@ -55,25 +55,25 @@ public final class ScmConfigKeys { public static final int DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_DEFAULT = 10; - public static final String DFS_CONTAINER_RATIS_REPLICATION_LEVEL_KEY - = "dfs.container.ratis.replication.level"; + public static final String OZONE_CONTAINER_RATIS_REPLICATION_LEVEL_KEY + = "ozone.container.ratis.replication.level"; public static final ReplicationLevel DFS_CONTAINER_RATIS_REPLICATION_LEVEL_DEFAULT = ReplicationLevel.MAJORITY; - public static final String DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY - = "dfs.container.ratis.num.container.op.executors"; + public static final String OZONE_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY + = "ozone.container.ratis.num.container.op.executors"; public static final int DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_DEFAULT = 10; - public static final String DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY = - "dfs.container.ratis.segment.size"; + public static final String OZONE_CONTAINER_RATIS_SEGMENT_SIZE_KEY = + "ozone.container.ratis.segment.size"; public static final String DFS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT = "64MB"; - public static final String DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY = - "dfs.container.ratis.segment.preallocated.size"; + public static final String OZONE_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY = + "ozone.container.ratis.segment.preallocated.size"; public static final String DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT = "4MB"; public static final String - DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT = - "dfs.container.ratis.statemachinedata.sync.timeout"; + OZONE_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT = + "ozone.container.ratis.statemachinedata.sync.timeout"; public static final TimeDuration DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT = TimeDuration.valueOf(10, TimeUnit.SECONDS); @@ -121,14 +121,14 @@ public final class ScmConfigKeys { DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT = TimeDuration.valueOf(600000, TimeUnit.MILLISECONDS); public static final String - DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY = - "dfs.ratis.leader.election.minimum.timeout.duration"; + OZONE_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY = + "ozone.ratis.leader.election.minimum.timeout.duration "; public static final TimeDuration DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT = TimeDuration.valueOf(5, TimeUnit.SECONDS); - public static final String DFS_RATIS_SNAPSHOT_THRESHOLD_KEY = - "dfs.ratis.snapshot.threshold"; + public static final String OZONE_RATIS_SNAPSHOT_THRESHOLD_KEY = + "ozone.ratis.snapshot.threshold"; public static final long DFS_RATIS_SNAPSHOT_THRESHOLD_DEFAULT = 100000; // TODO : this is copied from OzoneConsts, may need to move to a better place diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java index 7bfda0184096..574c851540c6 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java @@ -36,8 +36,8 @@ @InterfaceAudience.Public @InterfaceStability.Unstable public final class OzoneConfigKeys { - public static final String DFS_CONTAINER_IPC_PORT = - "dfs.container.ipc"; + public static final String OZONE_CONTAINER_IPC_PORT = + "ozone.container.ipc.port"; public static final int DFS_CONTAINER_IPC_PORT_DEFAULT = 9859; public static final String OZONE_METADATA_DIRS = "ozone.metadata.dirs"; @@ -56,11 +56,11 @@ public final class OzoneConfigKeys { * so that a mini cluster is able to launch multiple containers on a node. * * When set to false (default), the container port will be specified as - * {@link #DFS_CONTAINER_IPC_PORT} and the default value will be specified + * {@link #OZONE_CONTAINER_IPC_PORT} and the default value will be specified * as {@link #DFS_CONTAINER_IPC_PORT_DEFAULT}. */ - public static final String DFS_CONTAINER_IPC_RANDOM_PORT = - "dfs.container.ipc.random.port"; + public static final String OZONE_CONTAINER_IPC_RANDOM_PORT = + "ozone.container.ipc.random.port"; public static final boolean DFS_CONTAINER_IPC_RANDOM_PORT_DEFAULT = false; @@ -76,8 +76,8 @@ public final class OzoneConfigKeys { /** * Ratis Port where containers listen to. */ - public static final String DFS_CONTAINER_RATIS_IPC_PORT = - "dfs.container.ratis.ipc"; + public static final String OZONE_CONTAINER_RATIS_IPC_PORT = + "ozone.container.ratis.ipc"; public static final int DFS_CONTAINER_RATIS_IPC_PORT_DEFAULT = 9858; /** * Ratis Port where containers listen to admin requests. @@ -133,9 +133,9 @@ public final class OzoneConfigKeys { * When set to true, allocate a random free port for ozone container, so that * a mini cluster is able to launch multiple containers on a node. */ - public static final String DFS_CONTAINER_RATIS_IPC_RANDOM_PORT = - "dfs.container.ratis.ipc.random.port"; - public static final boolean DFS_CONTAINER_RATIS_IPC_RANDOM_PORT_DEFAULT = + public static final String OZONE_CONTAINER_RATIS_IPC_RANDOM_PORT = + "ozone.container.ratis.ipc.random.port"; + public static final boolean OZONE_CONTAINER_RATIS_IPC_RANDOM_PORT_DEFAULT = false; public static final String OZONE_METADATA_STORE_ROCKSDB_STATISTICS = @@ -325,12 +325,12 @@ public final class OzoneConfigKeys { public static final int OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL_DEFAULT = 10; - public static final String DFS_CONTAINER_RATIS_ENABLED_KEY - = ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY; + public static final String OZONE_CONTAINER_RATIS_ENABLED_KEY + = ScmConfigKeys.OZONE_CONTAINER_RATIS_ENABLED_KEY; public static final boolean DFS_CONTAINER_RATIS_ENABLED_DEFAULT = ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_DEFAULT; - public static final String DFS_CONTAINER_RATIS_RPC_TYPE_KEY - = ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY; + public static final String OZONE_CONTAINER_RATIS_RPC_TYPE_KEY + = ScmConfigKeys.OZONE_CONTAINER_RATIS_RPC_TYPE_KEY; public static final String DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT = ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT; public static final String @@ -340,35 +340,35 @@ public final class OzoneConfigKeys { DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_DEFAULT = ScmConfigKeys. DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_DEFAULT; - public static final String DFS_CONTAINER_RATIS_REPLICATION_LEVEL_KEY - = ScmConfigKeys.DFS_CONTAINER_RATIS_REPLICATION_LEVEL_KEY; + public static final String OZONE_CONTAINER_RATIS_REPLICATION_LEVEL_KEY + = ScmConfigKeys.OZONE_CONTAINER_RATIS_REPLICATION_LEVEL_KEY; public static final ReplicationLevel DFS_CONTAINER_RATIS_REPLICATION_LEVEL_DEFAULT = ScmConfigKeys.DFS_CONTAINER_RATIS_REPLICATION_LEVEL_DEFAULT; - public static final String DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY - = ScmConfigKeys.DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY; + public static final String OZONE_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY + = ScmConfigKeys.OZONE_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY; public static final int DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_DEFAULT = ScmConfigKeys.DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_DEFAULT; - public static final String DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY - = ScmConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY; + public static final String OZONE_CONTAINER_RATIS_SEGMENT_SIZE_KEY + = ScmConfigKeys.OZONE_CONTAINER_RATIS_SEGMENT_SIZE_KEY; public static final String DFS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT = ScmConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT; - public static final String DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY - = ScmConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY; + public static final String OZONE_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY + = ScmConfigKeys.OZONE_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY; public static final String DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT = ScmConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT; // config settings to enable stateMachineData write timeout public static final String - DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT = - ScmConfigKeys.DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT; + OZONE_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT = + ScmConfigKeys.OZONE_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT; public static final TimeDuration DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT = ScmConfigKeys.DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT; - public static final String DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR = - "dfs.container.ratis.datanode.storage.dir"; + public static final String OZONE_CONTAINER_RATIS_DATANODE_STORAGE_DIR = + "ozone.container.ratis.datanode.storage.dir"; public static final String DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY = ScmConfigKeys.DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY; @@ -407,13 +407,13 @@ public final class OzoneConfigKeys { DFS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT_DEFAULT = ScmConfigKeys.DFS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT_DEFAULT; public static final String - DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY = - ScmConfigKeys.DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY; + OZONE_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY = + ScmConfigKeys.OZONE_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY; public static final TimeDuration DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT = ScmConfigKeys.DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT; - public static final String DFS_RATIS_SNAPSHOT_THRESHOLD_KEY = - ScmConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_KEY; + public static final String OZONE_RATIS_SNAPSHOT_THRESHOLD_KEY = + ScmConfigKeys.OZONE_RATIS_SNAPSHOT_THRESHOLD_KEY; public static final long DFS_RATIS_SNAPSHOT_THRESHOLD_DEFAULT = ScmConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_DEFAULT; diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml index 094fbff16da7..dba417227952 100644 --- a/hadoop-hdds/common/src/main/resources/ozone-default.xml +++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml @@ -46,7 +46,7 @@ - dfs.container.ipc + ozone.container.ipc.port 9859 OZONE, CONTAINER, MANAGEMENT The ipc port number of container. @@ -73,7 +73,7 @@ - dfs.container.ipc.random.port + ozone.container.ipc.random.port false OZONE, DEBUG, CONTAINER Allocates a random free port for ozone container. This is used @@ -90,7 +90,7 @@ - dfs.container.ratis.statemachinedata.sync.timeout + ozone.container.ratis.statemachinedata.sync.timeout 10s OZONE, DEBUG, CONTAINER, RATIS Timeout for StateMachine data writes by Ratis. @@ -102,7 +102,7 @@ OZONE, DEBUG, CONTAINER, RATIS Number of times the WriteStateMachineData op will be tried before failing. If the value is not configured, it will default - to (hdds.ratis.rpc.slowness.timeout / dfs.container.ratis.statemachinedata.sync.timeout), + to (hdds.ratis.rpc.slowness.timeout / ozone.container.ratis.statemachinedata.sync.timeout), which means that the WriteStatMachineData will be retried for every sync timeout until the configured slowness timeout is hit, after which the StateMachine will close down the pipeline. @@ -149,7 +149,7 @@ - dfs.container.ratis.datanode.storage.dir + ozone.container.ratis.datanode.storage.dir OZONE, CONTAINER, STORAGE, MANAGEMENT, RATIS This directory is used for storing Ratis metadata like logs. If @@ -223,7 +223,7 @@ - dfs.container.ratis.enabled + ozone.container.ratis.enabled false OZONE, MANAGEMENT, PIPELINE, RATIS Ozone supports different kinds of replication pipelines. Ratis @@ -232,7 +232,7 @@ - dfs.container.ratis.ipc + ozone.container.ratis.ipc 9858 OZONE, CONTAINER, PIPELINE, RATIS The ipc port number of container for clients. @@ -250,7 +250,7 @@ The ipc port number of container for server-server communication. - dfs.container.ratis.ipc.random.port + ozone.container.ratis.ipc.random.port false OZONE,DEBUG Allocates a random free port for ozone ratis port for the @@ -259,7 +259,7 @@ - dfs.container.ratis.rpc.type + ozone.container.ratis.rpc.type GRPC OZONE, RATIS, MANAGEMENT Ratis supports different kinds of transports like netty, GRPC, @@ -268,7 +268,7 @@ - dfs.ratis.snapshot.threshold + ozone.ratis.snapshot.threshold 10000 OZONE, RATIS Number of transactions after which a ratis snapshot should be @@ -281,7 +281,7 @@ OZONE, RATIS Maximum number of pending apply transactions in a data pipeline. The default value is kept same as default snapshot threshold - dfs.ratis.snapshot.threshold. + ozone.ratis.snapshot.threshold. @@ -303,7 +303,7 @@ - dfs.container.ratis.replication.level + ozone.container.ratis.replication.level MAJORITY OZONE, RATIS Replication level to be used by datanode for submitting a @@ -312,7 +312,7 @@ - dfs.container.ratis.num.container.op.executors + ozone.container.ratis.num.container.op.executors 10 OZONE, RATIS, PERFORMANCE Number of executors that will be used by Ratis to execute @@ -320,7 +320,7 @@ - dfs.container.ratis.segment.size + ozone.container.ratis.segment.size 64MB OZONE, RATIS, PERFORMANCE The size of the raft segment file used @@ -328,7 +328,7 @@ - dfs.container.ratis.segment.preallocated.size + ozone.container.ratis.segment.preallocated.size 4MB OZONE, RATIS, PERFORMANCE The pre-allocated file size for raft segment used @@ -342,7 +342,7 @@ Retry Cache entry timeout for ratis server. - dfs.ratis.leader.election.minimum.timeout.duration + ozone.ratis.leader.election.minimum.timeout.duration 5s OZONE, RATIS, MANAGEMENT The minimum timeout duration for ratis leader election. @@ -707,7 +707,7 @@ For production clusters or any time you care about performance, it is recommended that ozone.om.db.dirs, ozone.scm.db.dirs and - dfs.container.ratis.datanode.storage.dir be configured separately. + ozone.container.ratis.datanode.storage.dir be configured separately. diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java index 009e6396e0d2..48ab46ed93cd 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java @@ -99,10 +99,10 @@ public XceiverServerGrpc(DatanodeDetails datanodeDetails, this.id = datanodeDetails.getUuid(); this.datanodeDetails = datanodeDetails; - this.port = conf.getInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, + this.port = conf.getInt(OzoneConfigKeys.OZONE_CONTAINER_IPC_PORT, OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT); - if (conf.getBoolean(OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT, + if (conf.getBoolean(OzoneConfigKeys.OZONE_CONTAINER_IPC_RANDOM_PORT, OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT_DEFAULT)) { this.port = 0; } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java index e3c2913ec5af..59a10234739b 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java @@ -238,7 +238,7 @@ public ContainerStateMachine(RaftGroupId gid, this.container2BCSIDMap = new ConcurrentHashMap<>(); final int numContainerOpExecutors = conf.getInt( - OzoneConfigKeys.DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY, + OzoneConfigKeys.OZONE_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY, OzoneConfigKeys.DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_DEFAULT); int maxPendingApplyTransactions = conf.getInt( ScmConfigKeys. diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java index fcc611ea3f10..87627719e944 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java @@ -115,7 +115,7 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS; import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS_DEFAULT; import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT; -import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_CONTAINER_RATIS_SEGMENT_SIZE_KEY; import static org.apache.ratis.util.Preconditions.assertTrue; /** @@ -217,7 +217,7 @@ private XceiverServerRatis(DatanodeDetails dd, private void assignPorts() { clientPort = determinePort( - OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_PORT, + OzoneConfigKeys.OZONE_CONTAINER_RATIS_IPC_PORT, OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_PORT_DEFAULT); if (DatanodeVersion.fromProtoValue(datanodeDetails.getInitialVersion()) @@ -236,8 +236,8 @@ private void assignPorts() { private int determinePort(String key, int defaultValue) { boolean randomPort = conf.getBoolean( - OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, - OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT_DEFAULT); + OzoneConfigKeys.OZONE_CONTAINER_RATIS_IPC_RANDOM_PORT, + OzoneConfigKeys.OZONE_CONTAINER_RATIS_IPC_RANDOM_PORT_DEFAULT); return randomPort ? 0 : conf.getInt(key, defaultValue); } @@ -327,7 +327,7 @@ public RaftProperties newRaftProperties() { } long snapshotThreshold = - conf.getLong(OzoneConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_KEY, + conf.getLong(OzoneConfigKeys.OZONE_RATIS_SNAPSHOT_THRESHOLD_KEY, OzoneConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_DEFAULT); RaftServerConfigKeys.Snapshot. setAutoTriggerEnabled(properties, true); @@ -378,7 +378,7 @@ private void setRatisLeaderElectionTimeout(RaftProperties properties) { DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT .getUnit(); duration = conf.getTimeDuration( - OzoneConfigKeys.DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY, + OzoneConfigKeys.OZONE_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY, OzoneConfigKeys. DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT .getDuration(), leaderElectionMinTimeoutUnit); @@ -410,7 +410,7 @@ private void setTimeoutForRetryCache(RaftProperties properties) { private long setRaftSegmentPreallocatedSize(RaftProperties properties) { final long raftSegmentPreallocatedSize = (long) conf.getStorageSize( - OzoneConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY, + OzoneConfigKeys.OZONE_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY, OzoneConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT, StorageUnit.BYTES); RaftServerConfigKeys.Log.setPreallocatedSize(properties, @@ -428,7 +428,7 @@ private void setRaftSegmentAndWriteBufferSize(RaftProperties properties) { StorageUnit.BYTES); final long raftSegmentSize = (long) conf.getStorageSize( - DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY, + OZONE_CONTAINER_RATIS_SEGMENT_SIZE_KEY, DFS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT, StorageUnit.BYTES); final long raftSegmentBufferSize = logAppenderQueueByteLimit + 8; @@ -436,7 +436,7 @@ private void setRaftSegmentAndWriteBufferSize(RaftProperties properties) { assertTrue(raftSegmentBufferSize <= raftSegmentSize, () -> DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT + " = " + logAppenderQueueByteLimit - + " must be <= (" + DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY + " - 8" + + " must be <= (" + OZONE_CONTAINER_RATIS_SEGMENT_SIZE_KEY + " - 8" + " = " + (raftSegmentSize - 8) + ")"); RaftServerConfigKeys.Log.Appender.setBufferElementLimit(properties, @@ -456,7 +456,7 @@ private void setStateMachineDataConfigurations(RaftProperties properties) { TimeUnit timeUnit = OzoneConfigKeys. DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT.getUnit(); long duration = conf.getTimeDuration( - OzoneConfigKeys.DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT, + OzoneConfigKeys.OZONE_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT, OzoneConfigKeys. DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT .getDuration(), timeUnit); @@ -507,7 +507,7 @@ private void setStateMachineDataConfigurations(RaftProperties properties) { private RpcType setRpcType(RaftProperties properties) { final String rpcType = conf.get( - OzoneConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY, + OzoneConfigKeys.OZONE_CONTAINER_RATIS_RPC_TYPE_KEY, OzoneConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT); final RpcType rpc = SupportedRpcType.valueOfIgnoreCase(rpcType); RatisHelper.setRpcType(properties, rpc); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java index 33bc4a851664..8733f4f6cfd6 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java @@ -344,7 +344,7 @@ public static ContainerController getEmptyContainerController() { public static XceiverServerRatis newXceiverServerRatis( DatanodeDetails dn, OzoneConfiguration conf) throws IOException { - conf.setInt(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_PORT, + conf.setInt(OzoneConfigKeys.OZONE_CONTAINER_RATIS_IPC_PORT, dn.getPort(DatanodeDetails.Port.Name.RATIS).getValue()); return XceiverServerRatis.newXceiverServerRatis(dn, conf, diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java index 7917a4ce55cd..8452f233b379 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java @@ -155,7 +155,7 @@ public static HddsProtos.ReplicationFactor getReplicationFactor( private static boolean isUseRatis(ConfigurationSource c) { return c.getBoolean( - ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY, + ScmConfigKeys.OZONE_CONTAINER_RATIS_ENABLED_KEY, ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_DEFAULT); } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java index 4f5b87dd3e54..cdaae62eeeb2 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java @@ -82,8 +82,8 @@ void setUp() throws Exception { conf = SCMTestUtils.getConf(testRoot); conf.setTimeDuration(OZONE_SCM_HEARTBEAT_RPC_TIMEOUT, 500, TimeUnit.MILLISECONDS); - conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, true); - conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT, true); + conf.setBoolean(OzoneConfigKeys.OZONE_CONTAINER_RATIS_IPC_RANDOM_PORT, true); + conf.setBoolean(OzoneConfigKeys.OZONE_CONTAINER_IPC_RANDOM_PORT, true); conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_ENABLED, true); conf.setBoolean( diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/TestDatanodeConfiguration.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/TestDatanodeConfiguration.java index 565853c22dde..3d189416368c 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/TestDatanodeConfiguration.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/TestDatanodeConfiguration.java @@ -178,7 +178,7 @@ public void isCreatedWitDefaultValues() { public void testConf() throws Exception { final OzoneConfiguration conf = new OzoneConfiguration(); final String dir = "dummy/dir"; - conf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dir); + conf.set(OzoneConfigKeys.OZONE_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dir); final DatanodeRatisServerConfig ratisConf = conf.getObject( DatanodeRatisServerConfig.class); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestPeriodicVolumeChecker.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestPeriodicVolumeChecker.java index 3859cd47c9b9..d5c7cd05022d 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestPeriodicVolumeChecker.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestPeriodicVolumeChecker.java @@ -59,7 +59,7 @@ public class TestPeriodicVolumeChecker { public void setup() throws IOException { conf = new OzoneConfiguration(); conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, folder.toString()); - conf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, + conf.set(OzoneConfigKeys.OZONE_CONTAINER_RATIS_DATANODE_STORAGE_DIR, Files.createDirectory(folder.resolve("VolumeCheckerDir")).toString()); } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java index 1159d4277c78..fc499737e3dc 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java @@ -82,7 +82,7 @@ public void setup() throws Exception { volumes.add(volume1); volumes.add(volume2); conf.set(DFSConfigKeysLegacy.DFS_DATANODE_DATA_DIR_KEY, dataDirKey); - conf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, + conf.set(OzoneConfigKeys.OZONE_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dataDirKey); initializeVolumeSet(); } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java index e3c610bfe47a..5d14663c775e 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java @@ -228,7 +228,7 @@ private OzoneConfiguration getConfWithDataNodeDirs(int numDirs) { for (int i = 0; i < numDirs; ++i) { metaDirs.add(new File(dir, randomAlphanumeric(10)).toString()); } - ozoneConf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, + ozoneConf.set(OzoneConfigKeys.OZONE_CONTAINER_RATIS_DATANODE_STORAGE_DIR, String.join(",", metaDirs)); final List dbDirs = new ArrayList<>(); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java index 7f38eab785b8..56d41c3d93e1 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java @@ -378,7 +378,7 @@ public void testMultipleContainerReader(ContainerTestVersionInfo versionInfo) BlockUtils.shutdownCache(conf); conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, datanodeDirs.toString()); - conf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, + conf.set(OzoneConfigKeys.OZONE_CONTAINER_RATIS_DATANODE_STORAGE_DIR, datanodeDirs.toString()); MutableVolumeSet volumeSets = new MutableVolumeSet(datanodeId.toString(), clusterId, conf, null, diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java index 497418dcdcb9..9223976d2f03 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java @@ -165,7 +165,7 @@ public void testBuildNodeReport(ContainerTestVersionInfo versionInfo) throws Exception { initTest(versionInfo); String path = folder.toString(); - conf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, + conf.set(OzoneConfigKeys.OZONE_CONTAINER_RATIS_DATANODE_STORAGE_DIR, String.join(",", path + "/ratis1", path + "/ratis2", path + "ratis3")); diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java index 70d394e73b31..af06ce421fa3 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java @@ -382,14 +382,14 @@ public static int getLogWarnInterval(ConfigurationSource conf) { * @return port number. */ public static int getContainerPort(ConfigurationSource conf) { - return conf.getInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, + return conf.getInt(OzoneConfigKeys.OZONE_CONTAINER_IPC_PORT, OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT); } public static Collection getOzoneDatanodeRatisDirectory( ConfigurationSource conf) { Collection rawLocations = conf.getTrimmedStringCollection( - OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR); + OzoneConfigKeys.OZONE_CONTAINER_RATIS_DATANODE_STORAGE_DIR); if (rawLocations.isEmpty()) { rawLocations = new ArrayList<>(1); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java index b241ac0f2d28..a84ecb6f7852 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java @@ -186,7 +186,7 @@ public void testContainerPlacementCapacity() throws IOException, testDir.getAbsolutePath()); conf.setClass(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY, SCMContainerPlacementCapacity.class, PlacementPolicy.class); - conf.setBoolean(ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY, true); + conf.setBoolean(ScmConfigKeys.OZONE_CONTAINER_RATIS_ENABLED_KEY, true); SCMNodeManager scmNodeManager = createNodeManager(conf); containerManager = createContainerManager(); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java index 58f65df8fd85..6856a6885895 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java @@ -179,9 +179,9 @@ public void testGetVersionTask() throws Exception { */ @Test public void testDeletedContainersClearedOnStartup() throws Exception { - ozoneConf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT, + ozoneConf.setBoolean(OzoneConfigKeys.OZONE_CONTAINER_IPC_RANDOM_PORT, true); - ozoneConf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, + ozoneConf.setBoolean(OzoneConfigKeys.OZONE_CONTAINER_RATIS_IPC_RANDOM_PORT, true); ozoneConf.setFromObject(new ReplicationConfig().setPort(0)); try (EndpointStateMachine rpcEndPoint = createEndpoint(ozoneConf, @@ -217,9 +217,9 @@ public void testDeletedContainersClearedOnStartup() throws Exception { @Test public void testCheckVersionResponse() throws Exception { - ozoneConf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT, + ozoneConf.setBoolean(OzoneConfigKeys.OZONE_CONTAINER_IPC_RANDOM_PORT, true); - ozoneConf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, + ozoneConf.setBoolean(OzoneConfigKeys.OZONE_CONTAINER_RATIS_IPC_RANDOM_PORT, true); ozoneConf.setBoolean( OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, true); @@ -267,7 +267,7 @@ public void testCheckVersionResponse() throws Exception { */ @Test public void testDnLayoutVersionFile() throws Exception { - ozoneConf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, + ozoneConf.setBoolean(OzoneConfigKeys.OZONE_CONTAINER_RATIS_IPC_RANDOM_PORT, true); try (EndpointStateMachine rpcEndPoint = createEndpoint(ozoneConf, serverAddress, 1000)) { @@ -579,7 +579,7 @@ private StateContext heartbeatTaskHelper( // Mini Ozone cluster will not come up if the port is not true, since // Ratis will exit if the server port cannot be bound. We can remove this // hard coding once we fix the Ratis default behaviour. - conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, true); + conf.setBoolean(OzoneConfigKeys.OZONE_CONTAINER_RATIS_IPC_RANDOM_PORT, true); // Create a datanode state machine for stateConext used by endpoint task try (DatanodeStateMachine stateMachine = new DatanodeStateMachine( diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java index d07e696e7ef0..387e4624bede 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java @@ -98,7 +98,7 @@ public ContainerOperationClient(OzoneConfiguration conf) throws IOException { containerSizeB = (int) conf.getStorageSize(OZONE_SCM_CONTAINER_SIZE, OZONE_SCM_CONTAINER_SIZE_DEFAULT, StorageUnit.BYTES); boolean useRatis = conf.getBoolean( - ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY, + ScmConfigKeys.OZONE_CONTAINER_RATIS_ENABLED_KEY, ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_DEFAULT); if (useRatis) { replicationFactor = HddsProtos.ReplicationFactor.THREE; diff --git a/hadoop-ozone/dev-support/intellij/runConfigurations/Datanode2-ha.xml b/hadoop-ozone/dev-support/intellij/runConfigurations/Datanode2-ha.xml index 171494aa5dbe..100fd6cfc471 100644 --- a/hadoop-ozone/dev-support/intellij/runConfigurations/Datanode2-ha.xml +++ b/hadoop-ozone/dev-support/intellij/runConfigurations/Datanode2-ha.xml @@ -18,7 +18,7 @@ - ozone.ratis.leader.election.minimum.timeout.duration + ozone.ratis.leader.election.minimum.timeout.duration 5s OZONE, RATIS, MANAGEMENT The minimum timeout duration for ratis leader election. From 3a77d1360dbdadba5238597d5eaae92959986adb Mon Sep 17 00:00:00 2001 From: sarvekshayr Date: Fri, 9 Feb 2024 10:13:20 +0530 Subject: [PATCH 6/6] Revert "Merge remote-tracking branch 'origin' into HDDS-815" This reverts commit 9527771605947b5452c0593de48fefeabde065d8, reversing changes made to 2cbabcc090dc6f78b0af8c4221c777b550bdc92b. --- .../src/main/resources/ozone-default.xml | 8 - .../hadoop/ozone/client/VolumeArgs.java | 47 +- .../hadoop/ozone/client/rpc/RpcClient.java | 5 +- .../apache/hadoop/ozone/om/OMConfigKeys.java | 2 - .../dashboards/Ozone - JVM Metrics.json | 357 ++++++++++---- .../dashboards/Ozone - ListKey Metrics.json | 85 +++- .../dashboards/Ozone - Object Metrics.json | 1 + .../AbstractRootedOzoneFileSystemTest.java | 20 +- .../org/apache/hadoop/fs/ozone/TestHSync.java | 3 +- .../hadoop/fs/ozone/TestLeaseRecovery.java | 4 +- .../TestOzoneFileSystemWithStreaming.java | 3 +- .../hadoop/fs/ozone/TestOzoneFsHAURLs.java | 1 + ...estSCMContainerPlacementPolicyMetrics.java | 2 +- .../hdds/scm/TestStorageContainerManager.java | 18 +- .../hadoop/hdds/scm/node/TestQueryNode.java | 9 +- .../scm/pipeline/TestLeaderChoosePolicy.java | 9 +- .../hdds/scm/pipeline/TestNodeFailure.java | 5 +- .../TestRatisPipelineCreateAndDestroy.java | 8 +- .../hdds/scm/pipeline/TestSCMRestart.java | 11 +- .../hdds/scm/storage/TestCommitWatcher.java | 1 + .../hadoop/hdds/upgrade/TestHDDSUpgrade.java | 19 +- .../hdds/upgrade/TestScmHAFinalization.java | 4 +- .../apache/hadoop/ozone/MiniOzoneCluster.java | 55 +++ .../hadoop/ozone/MiniOzoneHAClusterImpl.java | 11 + .../ozone/client/TestOzoneClientFactory.java | 3 +- .../rpc/AbstractTestECKeyOutputStream.java | 7 +- .../client/rpc/Test2WayCommitInRatis.java | 3 + .../hadoop/ozone/client/rpc/TestBCSID.java | 4 +- .../client/rpc/TestBlockDataStreamOutput.java | 1 + .../client/rpc/TestBlockOutputStream.java | 1 + .../rpc/TestContainerReplicationEndToEnd.java | 4 +- .../client/rpc/TestContainerStateMachine.java | 3 +- ...estContainerStateMachineFailureOnRead.java | 4 +- .../TestContainerStateMachineFailures.java | 4 +- .../TestContainerStateMachineFlushDelay.java | 3 +- .../rpc/TestContainerStateMachineStream.java | 10 +- .../rpc/TestDeleteWithInAdequateDN.java | 10 +- .../rpc/TestFailureHandlingByClient.java | 3 +- ...TestFailureHandlingByClientFlushDelay.java | 2 +- .../rpc/TestHybridPipelineOnDatanode.java | 4 +- .../TestMultiBlockWritesWithDnFailures.java | 1 + ...TestOzoneClientMultipartUploadWithFSO.java | 3 +- ...oneClientRetriesOnExceptionFlushDelay.java | 1 + .../TestOzoneClientRetriesOnExceptions.java | 1 + .../rpc/TestOzoneRpcClientAbstract.java | 3 +- .../rpc/TestValidateBCSIDOnRestart.java | 5 +- .../client/rpc/read/TestInputStreamBase.java | 2 +- .../container/TestECContainerRecovery.java | 5 +- .../commandhandler/TestBlockDeletion.java | 1 + .../TestCloseContainerByPipeline.java | 2 +- .../apache/hadoop/ozone/debug/TestLDBCli.java | 5 +- .../ozone/dn/ratis/TestDnRatisLogParser.java | 4 +- .../hadoop/ozone/freon/TestDataValidate.java | 5 +- .../TestFreonWithDatanodeFastRestart.java | 4 +- .../freon/TestFreonWithPipelineDestroy.java | 5 +- .../hadoop/ozone/om/TestKeyManagerImpl.java | 23 +- .../hadoop/ozone/om/TestKeyPurging.java | 3 +- .../ozone/om/TestOMBucketLayoutUpgrade.java | 3 +- .../ozone/om/TestOMUpgradeFinalization.java | 3 +- .../hadoop/ozone/om/TestScmSafeMode.java | 9 +- .../ozone/om/TestSnapshotDeletingService.java | 5 +- .../om/multitenant/TestMultiTenantVolume.java | 5 +- .../ozone/om/snapshot/TestOmSnapshot.java | 3 +- .../snapshot/TestOzoneManagerSnapshotAcl.java | 2 +- .../src/test/resources/ozone-site.xml | 15 - .../om/request/key/OMKeyCommitRequest.java | 8 +- .../key/OMKeyCommitRequestWithFSO.java | 8 +- .../ozone/om/TestOmMetadataManager.java | 37 +- .../ozone/om/request/OMRequestTestUtils.java | 240 +++++++--- .../bucket/TestOMBucketDeleteRequest.java | 13 +- .../file/TestOMDirectoryCreateRequest.java | 11 +- .../TestOMDirectoryCreateRequestWithFSO.java | 19 +- .../request/file/TestOMFileCreateRequest.java | 48 +- .../file/TestOMFileCreateRequestWithFSO.java | 35 +- .../file/TestOMRecoverLeaseRequest.java | 12 +- .../key/TestOMAllocateBlockRequest.java | 8 +- .../TestOMAllocateBlockRequestWithFSO.java | 14 +- ...tOMDirectoriesPurgeRequestAndResponse.java | 2 +- .../om/request/key/TestOMKeyAclRequest.java | 6 +- .../key/TestOMKeyAclRequestWithFSO.java | 23 +- .../request/key/TestOMKeyCommitRequest.java | 66 +-- .../key/TestOMKeyCommitRequestWithFSO.java | 17 +- .../request/key/TestOMKeyCreateRequest.java | 6 +- .../key/TestOMKeyCreateRequestWithFSO.java | 17 +- .../request/key/TestOMKeyDeleteRequest.java | 4 +- .../key/TestOMKeyDeleteRequestWithFSO.java | 24 +- .../key/TestOMKeyPurgeRequestAndResponse.java | 2 +- .../request/key/TestOMKeyRenameRequest.java | 2 +- .../key/TestOMKeyRenameRequestWithFSO.java | 13 +- .../om/request/key/TestOMKeyRequest.java | 8 +- .../request/key/TestOMKeysDeleteRequest.java | 6 +- .../key/TestOMKeysDeleteRequestWithFSO.java | 16 +- .../request/key/TestOMKeysRenameRequest.java | 7 +- .../om/request/key/TestOMSetTimesRequest.java | 2 +- .../key/TestOMSetTimesRequestWithFSO.java | 15 +- ...S3ExpiredMultipartUploadsAbortRequest.java | 19 +- ...estS3MultipartUploadCommitPartRequest.java | 7 +- ...ltipartUploadCommitPartRequestWithFSO.java | 17 +- .../TestS3MultipartUploadCompleteRequest.java | 6 +- ...MultipartUploadCompleteRequestWithFSO.java | 17 +- .../snapshot/TestOMSnapshotCreateRequest.java | 12 +- .../file/TestOMDirectoryCreateResponse.java | 4 +- .../file/TestOMFileCreateResponseWithFSO.java | 11 +- .../key/TestOMAllocateBlockResponse.java | 2 +- .../TestOMAllocateBlockResponseWithFSO.java | 16 +- .../response/key/TestOMKeyCommitResponse.java | 4 +- .../key/TestOMKeyCommitResponseWithFSO.java | 26 +- .../key/TestOMKeyCreateResponseWithFSO.java | 15 +- .../response/key/TestOMKeyDeleteResponse.java | 6 +- .../key/TestOMKeyDeleteResponseWithFSO.java | 26 +- .../response/key/TestOMKeyRenameResponse.java | 6 +- .../key/TestOMKeyRenameResponseWithFSO.java | 34 +- .../om/response/key/TestOMKeyResponse.java | 21 +- .../key/TestOMKeysDeleteResponse.java | 4 +- .../key/TestOMKeysDeleteResponseWithFSO.java | 13 +- .../key/TestOMKeysRenameResponse.java | 5 +- .../key/TestOMOpenKeysDeleteResponse.java | 2 +- ...3ExpiredMultipartUploadsAbortResponse.java | 14 +- ...ultipartUploadCompleteResponseWithFSO.java | 56 +-- .../service/TestDirectoryDeletingService.java | 15 +- .../ozone/om/service/TestQuotaRepairTask.java | 21 +- .../ozone/recon/api/OMDBInsightEndpoint.java | 31 -- .../tasks/DeletedKeysInsightHandler.java | 147 ------ .../ozone/recon/tasks/OmTableHandler.java | 131 ----- .../ozone/recon/tasks/OmTableInsightTask.java | 321 +++++++++---- .../recon/tasks/OpenKeysInsightHandler.java | 163 ------- .../recon/OMMetadataManagerTestUtils.java | 18 +- .../hadoop/ozone/recon/api/TestEndpoints.java | 15 +- .../recon/tasks/TestOmTableInsightTask.java | 448 ++++-------------- .../hadoop/ozone/client/ObjectStoreStub.java | 3 +- 130 files changed, 1451 insertions(+), 1766 deletions(-) delete mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/DeletedKeysInsightHandler.java delete mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OmTableHandler.java delete mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OpenKeysInsightHandler.java diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml index a3db8fc184f9..f970c58cdf7e 100644 --- a/hadoop-hdds/common/src/main/resources/ozone-default.xml +++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml @@ -2226,14 +2226,6 @@ OZONE, SECURITY, KERBEROS The OzoneManager service principal. Ex om/_HOST@REALM.COM - - ozone.om.kerberos.principal.pattern - * - - A client-side RegEx that can be configured to control - allowed realms to authenticate with (useful in cross-realm env.) - - ozone.om.http.auth.kerberos.principal HTTP/_HOST@REALM diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/VolumeArgs.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/VolumeArgs.java index a1c9cd55bb3f..9d683c5393c2 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/VolumeArgs.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/VolumeArgs.java @@ -18,13 +18,10 @@ package org.apache.hadoop.ozone.client; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableMap; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.OzoneConsts; import java.io.IOException; -import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -39,8 +36,8 @@ public final class VolumeArgs { private final String owner; private final long quotaInBytes; private final long quotaInNamespace; - private final ImmutableList acls; - private final ImmutableMap metadata; + private final List acls; + private Map metadata; /** * Private constructor, constructed via builder. @@ -61,8 +58,8 @@ private VolumeArgs(String admin, this.owner = owner; this.quotaInBytes = quotaInBytes; this.quotaInNamespace = quotaInNamespace; - this.acls = acls == null ? ImmutableList.of() : ImmutableList.copyOf(acls); - this.metadata = metadata == null ? ImmutableMap.of() : ImmutableMap.copyOf(metadata); + this.acls = acls; + this.metadata = metadata; } /** @@ -110,20 +107,34 @@ public List getAcls() { return acls; } + /** + * Returns new builder class that builds a OmVolumeArgs. + * + * @return Builder + */ public static VolumeArgs.Builder newBuilder() { return new VolumeArgs.Builder(); } /** - * Builder for VolumeArgs. + * Builder for OmVolumeArgs. */ + @SuppressWarnings("checkstyle:hiddenfield") public static class Builder { private String adminName; private String ownerName; - private long quotaInBytes = OzoneConsts.QUOTA_RESET; - private long quotaInNamespace = OzoneConsts.QUOTA_RESET; - private List acls; - private Map metadata; + private long quotaInBytes; + private long quotaInNamespace; + private List listOfAcls; + private Map metadata = new HashMap<>(); + + /** + * Constructs a builder. + */ + public Builder() { + quotaInBytes = OzoneConsts.QUOTA_RESET; + quotaInNamespace = OzoneConsts.QUOTA_RESET; + } public VolumeArgs.Builder setAdmin(String admin) { this.adminName = admin; @@ -146,18 +157,12 @@ public VolumeArgs.Builder setQuotaInNamespace(long quota) { } public VolumeArgs.Builder addMetadata(String key, String value) { - if (metadata == null) { - metadata = new HashMap<>(); - } metadata.put(key, value); return this; } - public VolumeArgs.Builder addAcl(OzoneAcl acl) + public VolumeArgs.Builder setAcls(List acls) throws IOException { - if (acls == null) { - acls = new ArrayList<>(); - } - acls.add(acl); + this.listOfAcls = acls; return this; } @@ -167,7 +172,7 @@ public VolumeArgs.Builder addAcl(OzoneAcl acl) */ public VolumeArgs build() { return new VolumeArgs(adminName, ownerName, quotaInBytes, - quotaInNamespace, acls, metadata); + quotaInNamespace, listOfAcls, metadata); } } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java index 8343b8740169..7e1e6fe45602 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java @@ -430,9 +430,8 @@ public void createVolume(String volumeName, VolumeArgs volArgs) userGroups.stream().forEach((group) -> listOfAcls.add( new OzoneAcl(ACLIdentityType.GROUP, group, groupRights, ACCESS))); //ACLs from VolumeArgs - List volumeAcls = volArgs.getAcls(); - if (volumeAcls != null) { - listOfAcls.addAll(volumeAcls); + if (volArgs.getAcls() != null) { + listOfAcls.addAll(volArgs.getAcls()); } OmVolumeArgs.Builder builder = OmVolumeArgs.newBuilder(); diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java index faa5096baf98..5dd7579eb916 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java @@ -289,8 +289,6 @@ private OMConfigKeys() { + "kerberos.keytab.file"; public static final String OZONE_OM_KERBEROS_PRINCIPAL_KEY = "ozone.om" + ".kerberos.principal"; - public static final String OZONE_OM_KERBEROS_PRINCIPAL_PATTERN_KEY = - "ozone.om.kerberos.principal.pattern"; public static final String OZONE_OM_HTTP_KERBEROS_KEYTAB_FILE = "ozone.om.http.auth.kerberos.keytab"; public static final String OZONE_OM_HTTP_KERBEROS_PRINCIPAL_KEY diff --git a/hadoop-ozone/dist/src/main/compose/common/grafana/dashboards/Ozone - JVM Metrics.json b/hadoop-ozone/dist/src/main/compose/common/grafana/dashboards/Ozone - JVM Metrics.json index 73c6722176ec..339a7b005e11 100644 --- a/hadoop-ozone/dist/src/main/compose/common/grafana/dashboards/Ozone - JVM Metrics.json +++ b/hadoop-ozone/dist/src/main/compose/common/grafana/dashboards/Ozone - JVM Metrics.json @@ -1,9 +1,43 @@ { + "__inputs": [ + { + "name": "DS_PROMETHEUS", + "label": "Prometheus", + "description": "", + "type": "datasource", + "pluginId": "prometheus", + "pluginName": "Prometheus" + } + ], + "__elements": {}, + "__requires": [ + { + "type": "grafana", + "id": "grafana", + "name": "Grafana", + "version": "9.1.5" + }, + { + "type": "datasource", + "id": "prometheus", + "name": "Prometheus", + "version": "1.0.0" + }, + { + "type": "panel", + "id": "timeseries", + "name": "Time series", + "version": "" + } + ], "annotations": { "list": [ { "builtIn": 1, - "datasource": "-- Grafana --", + "datasource": { + "type": "grafana", + "uid": "-- Grafana --" + }, "enable": true, "hide": true, "iconColor": "rgba(0, 211, 255, 1)", @@ -40,7 +74,8 @@ }, { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -117,7 +152,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", "expr": "increase(jvm_metrics_gc_time_millis{processname=\"OzoneManager\"}[1m]) / 60", @@ -131,7 +167,8 @@ }, { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -207,7 +244,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", "expr": "jvm_metrics_gc_count{processname=\"OzoneManager\"}", @@ -231,7 +269,8 @@ "panels": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -307,10 +346,11 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", - "expr": "jvm_metrics_cpu_jvm_load{instance=~\".+9875|.+9876\"}", + "expr": "jvm_metrics_cpu_jvm_load{instance=~\".+9875\"}", "legendFormat": "{{instance}}", "range": true, "refId": "A" @@ -335,7 +375,8 @@ "panels": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -410,7 +451,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", "expr": "jvm_metrics_threads_runnable{processname=\"OzoneManager\"}", @@ -424,7 +466,8 @@ }, { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -483,6 +526,7 @@ "options": { "mode": "exclude", "names": [ + "rhelnn02.cdip.cisco.local:9875" ], "prefix": "All except:", "readOnly": true @@ -523,7 +567,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", "expr": "jvm_metrics_threads_blocked{processname=\"OzoneManager\"}", @@ -537,7 +582,8 @@ }, { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -612,7 +658,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", "expr": "jvm_metrics_threads_waiting{processname=\"OzoneManager\"}", @@ -640,7 +687,8 @@ "panels": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -700,6 +748,7 @@ "options": { "mode": "exclude", "names": [ + "rhelnn02.cdip.cisco.local:9875" ], "prefix": "All except:", "readOnly": true @@ -740,7 +789,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", "expr": "jvm_metrics_mem_heap_used_m{processname=\"OzoneManager\"}", @@ -754,7 +804,8 @@ }, { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -830,7 +881,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", "expr": "jvm_metrics_mem_non_heap_used_m{processname=\"OzoneManager\"}", @@ -858,7 +910,8 @@ "panels": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -918,6 +971,7 @@ "options": { "mode": "exclude", "names": [ + "rhelnn02.cdip.cisco.local:9889" ], "prefix": "All except:", "readOnly": true @@ -958,7 +1012,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", "exemplar": false, @@ -975,7 +1030,8 @@ }, { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -1050,7 +1106,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", "expr": "jvm_metrics_gc_count{processname=\"Recon\"}", @@ -1078,7 +1135,8 @@ "panels": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -1154,10 +1212,11 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", - "expr": "jvm_metrics_cpu_jvm_load{instance=~\".+9889|.+9888\"}", + "expr": "jvm_metrics_cpu_jvm_load{instance=~\".+9889\"}", "legendFormat": "{{instance}}", "range": true, "refId": "A" @@ -1182,7 +1241,8 @@ "panels": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -1257,7 +1317,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", "expr": "jvm_metrics_threads_runnable{processname=\"Recon\"}", @@ -1271,7 +1332,8 @@ }, { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -1346,7 +1408,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", "expr": "jvm_metrics_threads_blocked{processname=\"Recon\"}", @@ -1360,7 +1423,8 @@ }, { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -1435,7 +1499,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", "expr": "jvm_metrics_threads_waiting{processname=\"Recon\"}", @@ -1463,7 +1528,8 @@ "panels": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -1523,6 +1589,7 @@ "options": { "mode": "exclude", "names": [ + "rhelnn02.cdip.cisco.local:9889" ], "prefix": "All except:", "readOnly": true @@ -1563,7 +1630,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", "expr": "jvm_metrics_mem_heap_used_m{processname=\"Recon\"}", @@ -1577,7 +1645,8 @@ }, { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -1637,6 +1706,7 @@ "options": { "mode": "exclude", "names": [ + "rhelnn02.cdip.cisco.local:9889" ], "prefix": "All except:", "readOnly": true @@ -1677,7 +1747,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", "expr": "jvm_metrics_mem_non_heap_used_m{processname=\"Recon\"}", @@ -1705,7 +1776,8 @@ "panels": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -1765,6 +1837,7 @@ "options": { "mode": "exclude", "names": [ + "rhel01.cdip.cisco.local:9883" ], "prefix": "All except:", "readOnly": true @@ -1805,7 +1878,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", "expr": "increase(jvm_metrics_gc_time_millis{processname=\"HddsDatanode\"}[1m]) / 60", @@ -1819,7 +1893,8 @@ }, { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -1894,7 +1969,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", "expr": "jvm_metrics_gc_count{processname=\"HddsDatanode\"}", @@ -1922,7 +1998,8 @@ "panels": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -1998,10 +2075,11 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", - "expr": "jvm_metrics_cpu_jvm_load{instance=~\".+9883|.+9882\"}", + "expr": "jvm_metrics_cpu_jvm_load{instance=~\".+9883\"}", "legendFormat": "{{instance}}", "range": true, "refId": "A" @@ -2026,7 +2104,8 @@ "panels": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -2101,7 +2180,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", "expr": "jvm_metrics_threads_runnable{processname=\"HddsDatanode\"}", @@ -2115,7 +2195,8 @@ }, { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -2174,6 +2255,7 @@ "options": { "mode": "exclude", "names": [ + "rhel01.cdip.cisco.local:9883" ], "prefix": "All except:", "readOnly": true @@ -2214,7 +2296,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", "expr": "jvm_metrics_threads_blocked{processname=\"HddsDatanode\"}", @@ -2228,7 +2311,8 @@ }, { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -2303,7 +2387,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", "expr": "jvm_metrics_threads_waiting{processname=\"HddsDatanode\"}", @@ -2331,7 +2416,8 @@ "panels": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -2391,6 +2477,7 @@ "options": { "mode": "exclude", "names": [ + "rhel01.cdip.cisco.local:9883" ], "prefix": "All except:", "readOnly": true @@ -2431,7 +2518,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", "expr": "jvm_metrics_mem_heap_used_m{processname=\"HddsDatanode\"}", @@ -2445,7 +2533,8 @@ }, { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -2505,6 +2594,7 @@ "options": { "mode": "exclude", "names": [ + "rhel01.cdip.cisco.local:9883" ], "prefix": "All except:", "readOnly": true @@ -2545,7 +2635,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", "expr": "jvm_metrics_mem_non_heap_used_m{processname=\"HddsDatanode\"}", @@ -2573,7 +2664,8 @@ "panels": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -2649,7 +2741,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", "expr": "increase(jvm_metrics_gc_time_millis{processname=\"StorageContainerManager\"}[1m]) / 60", @@ -2663,7 +2756,8 @@ }, { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -2738,7 +2832,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", "expr": "jvm_metrics_gc_count{processname=\"StorageContainerManager\"}", @@ -2766,7 +2861,8 @@ "panels": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -2842,10 +2938,11 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", - "expr": "jvm_metrics_cpu_jvm_load{instance=~\".+9877|.+9876\"}", + "expr": "jvm_metrics_cpu_jvm_load{instance=~\".+9877\"}", "legendFormat": "{{instance}}", "range": true, "refId": "A" @@ -2870,7 +2967,8 @@ "panels": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -2945,7 +3043,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", "expr": "jvm_metrics_threads_runnable{processname=\"StorageContainerManager\"}", @@ -2959,7 +3058,8 @@ }, { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -3034,7 +3134,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", "expr": "jvm_metrics_threads_blocked{processname=\"StorageContainerManager\"}", @@ -3048,7 +3149,8 @@ }, { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -3123,7 +3225,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", "expr": "jvm_metrics_threads_waiting{processname=\"StorageContainerManager\"}", @@ -3151,7 +3254,8 @@ "panels": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -3211,6 +3315,7 @@ "options": { "mode": "exclude", "names": [ + "rhelnn01.cdip.cisco.local:9877" ], "prefix": "All except:", "readOnly": true @@ -3251,7 +3356,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", "expr": "jvm_metrics_mem_heap_used_m{processname=\"StorageContainerManager\"}", @@ -3265,7 +3371,8 @@ }, { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -3325,6 +3432,7 @@ "options": { "mode": "exclude", "names": [ + "rhelnn01.cdip.cisco.local:9877" ], "prefix": "All except:", "readOnly": true @@ -3365,7 +3473,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", "expr": "jvm_metrics_mem_non_heap_used_m{processname=\"StorageContainerManager\"}", @@ -3393,7 +3502,8 @@ "panels": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -3453,6 +3563,7 @@ "options": { "mode": "exclude", "names": [ + "rhel04.cdip.cisco.local:9879" ], "prefix": "All except:", "readOnly": true @@ -3493,7 +3604,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", "exemplar": false, @@ -3505,7 +3617,8 @@ }, { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "hide": false, "refId": "B" @@ -3516,7 +3629,8 @@ }, { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -3591,7 +3705,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", "expr": "jvm_metrics_gc_count{processname=\"S3Gateway\"}", @@ -3619,7 +3734,8 @@ "panels": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -3679,6 +3795,7 @@ "options": { "mode": "exclude", "names": [ + "rhel02.cdip.cisco.local:9879" ], "prefix": "All except:", "readOnly": true @@ -3719,10 +3836,11 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", - "expr": "jvm_metrics_cpu_jvm_load{instance=~\".+9879|.+9878\"}", + "expr": "jvm_metrics_cpu_jvm_load{instance=~\".+9879\"}", "legendFormat": "{{instance}}", "range": true, "refId": "A" @@ -3747,7 +3865,8 @@ "panels": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -3822,7 +3941,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", "expr": "jvm_metrics_threads_runnable{processname=\"S3Gateway\"}", @@ -3836,7 +3956,8 @@ }, { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -3911,7 +4032,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", "expr": "jvm_metrics_threads_blocked{processname=\"S3Gateway\"}", @@ -3925,7 +4047,8 @@ }, { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -4000,7 +4123,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", "expr": "jvm_metrics_threads_waiting{processname=\"S3Gateway\"}", @@ -4028,7 +4152,8 @@ "panels": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -4088,6 +4213,7 @@ "options": { "mode": "exclude", "names": [ + "rhel01.cdip.cisco.local:9879" ], "prefix": "All except:", "readOnly": true @@ -4128,7 +4254,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", "expr": "jvm_metrics_mem_heap_used_m{processname=\"S3Gateway\"}", @@ -4142,7 +4269,8 @@ }, { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -4202,6 +4330,7 @@ "options": { "mode": "exclude", "names": [ + "rhel01.cdip.cisco.local:9879" ], "prefix": "All except:", "readOnly": true @@ -4242,7 +4371,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", "expr": "jvm_metrics_mem_non_heap_used_m{processname=\"S3Gateway\"}", @@ -4270,7 +4400,8 @@ "panels": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -4346,7 +4477,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", "expr": "increase(jvm_metrics_gc_time_millis[1m]) / 60", @@ -4360,7 +4492,8 @@ }, { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -4435,7 +4568,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", "expr": "jvm_metrics_gc_count", @@ -4463,7 +4597,8 @@ "panels": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -4539,7 +4674,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", "expr": "jvm_metrics_cpu_jvm_load", @@ -4567,7 +4703,8 @@ "panels": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -4642,7 +4779,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", "expr": "jvm_metrics_threads_runnable", @@ -4656,7 +4794,8 @@ }, { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -4731,7 +4870,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", "expr": "jvm_metrics_threads_blocked", @@ -4745,7 +4885,8 @@ }, { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -4820,7 +4961,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", "expr": "jvm_metrics_threads_waiting", @@ -4848,7 +4990,8 @@ "panels": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -4908,6 +5051,7 @@ "options": { "mode": "exclude", "names": [ + "rhel01.cdip.cisco.local:9883" ], "prefix": "All except:", "readOnly": true @@ -4948,7 +5092,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", "expr": "jvm_metrics_mem_heap_used_m", @@ -4962,7 +5107,8 @@ }, { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -5022,6 +5168,7 @@ "options": { "mode": "exclude", "names": [ + "rhel01.cdip.cisco.local:9883" ], "prefix": "All except:", "readOnly": true @@ -5062,7 +5209,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", "expr": "jvm_metrics_mem_non_heap_used_m", @@ -5095,6 +5243,7 @@ "timepicker": {}, "timezone": "", "title": "JVM Metrics", + "uid": "DtIgEEmSz", "version": 16, "weekStart": "" -} +} \ No newline at end of file diff --git a/hadoop-ozone/dist/src/main/compose/common/grafana/dashboards/Ozone - ListKey Metrics.json b/hadoop-ozone/dist/src/main/compose/common/grafana/dashboards/Ozone - ListKey Metrics.json index c5db476b69a2..a0771b509075 100644 --- a/hadoop-ozone/dist/src/main/compose/common/grafana/dashboards/Ozone - ListKey Metrics.json +++ b/hadoop-ozone/dist/src/main/compose/common/grafana/dashboards/Ozone - ListKey Metrics.json @@ -1,9 +1,51 @@ { + "__inputs": [ + { + "name": "DS_PROMETHEUS-1", + "label": "Prometheus-1", + "description": "", + "type": "datasource", + "pluginId": "prometheus", + "pluginName": "Prometheus" + }, + { + "name": "DS_PROMETHEUS-0", + "label": "Prometheus-0", + "description": "", + "type": "datasource", + "pluginId": "prometheus", + "pluginName": "Prometheus" + } + ], + "__elements": {}, + "__requires": [ + { + "type": "grafana", + "id": "grafana", + "name": "Grafana", + "version": "10.1.1" + }, + { + "type": "datasource", + "id": "prometheus", + "name": "Prometheus", + "version": "1.0.0" + }, + { + "type": "panel", + "id": "timeseries", + "name": "Time series", + "version": "" + } + ], "annotations": { "list": [ { "builtIn": 1, - "datasource": "-- Grafana --", + "datasource": { + "type": "grafana", + "uid": "-- Grafana --" + }, "enable": true, "hide": true, "iconColor": "rgba(0, 211, 255, 1)", @@ -34,7 +76,8 @@ }, { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS-1}" }, "fieldConfig": { "defaults": { @@ -95,6 +138,7 @@ "options": { "mode": "exclude", "names": [ + "mus-test2-1.mus-test2.root.hwx.site" ], "prefix": "All except:", "readOnly": true @@ -135,7 +179,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS-1}" }, "disableTextWrap": false, "editorMode": "builder", @@ -167,7 +212,8 @@ }, { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS-1}" }, "fieldConfig": { "defaults": { @@ -244,7 +290,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS-1}" }, "disableTextWrap": false, "editorMode": "builder", @@ -259,7 +306,8 @@ }, { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS-1}" }, "disableTextWrap": false, "editorMode": "builder", @@ -279,7 +327,8 @@ }, { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS-0}" }, "fieldConfig": { "defaults": { @@ -357,7 +406,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS-0}" }, "disableTextWrap": false, "editorMode": "builder", @@ -376,7 +426,8 @@ }, { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS-1}" }, "fieldConfig": { "defaults": { @@ -437,6 +488,7 @@ "options": { "mode": "exclude", "names": [ + "mus-test2-1.mus-test2.root.hwx.site:9875" ], "prefix": "All except:", "readOnly": true @@ -477,11 +529,12 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS-1}" }, "disableTextWrap": false, - "editorMode": "code", - "expr": "rate(om_performance_metrics_list_keys_latency_ns_num_ops[60s])", + "editorMode": "builder", + "expr": "om_performance_metrics_list_keys_ops_per_sec_num_ops", "fullMetaSearch": false, "includeNullMetadata": true, "instant": false, @@ -496,7 +549,8 @@ }, { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS-1}" }, "fieldConfig": { "defaults": { @@ -557,6 +611,7 @@ "options": { "mode": "exclude", "names": [ + "mus-test2-1.mus-test2.root.hwx.site:9875" ], "prefix": "All except:", "readOnly": true @@ -597,7 +652,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "${DS_PROMETHEUS-1}" }, "disableTextWrap": false, "editorMode": "builder", @@ -629,6 +685,7 @@ "timepicker": {}, "timezone": "", "title": "ListKey Dashboard", + "uid": "cac0d75b-49a2-41f2-b8bf-57f9c86bfa8c", "version": 14, "weekStart": "" } \ No newline at end of file diff --git a/hadoop-ozone/dist/src/main/compose/common/grafana/dashboards/Ozone - Object Metrics.json b/hadoop-ozone/dist/src/main/compose/common/grafana/dashboards/Ozone - Object Metrics.json index dced4f391b17..7644b12f2a25 100644 --- a/hadoop-ozone/dist/src/main/compose/common/grafana/dashboards/Ozone - Object Metrics.json +++ b/hadoop-ozone/dist/src/main/compose/common/grafana/dashboards/Ozone - Object Metrics.json @@ -1339,5 +1339,6 @@ }, "timezone": "", "title": "Ozone - Object Metrics", + "uid": "yakEh0Eik", "version": 1 } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTest.java index 8ee82633d59a..cb33970d8712 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTest.java @@ -1185,14 +1185,18 @@ void testSharedTmpDir() throws IOException { BitSet aclRights = new BitSet(); aclRights.set(READ.ordinal()); aclRights.set(WRITE.ordinal()); + List objectAcls = new ArrayList<>(); + objectAcls.add(new OzoneAcl(ACLIdentityType.WORLD, "", + aclRights, ACCESS)); + objectAcls.add(new OzoneAcl(ACLIdentityType.USER, "admin", userRights, + ACCESS)); // volume acls have all access to admin and read+write access to world // Construct VolumeArgs - VolumeArgs volumeArgs = VolumeArgs.newBuilder() + VolumeArgs volumeArgs = new VolumeArgs.Builder() .setAdmin("admin") .setOwner("admin") - .addAcl(new OzoneAcl(ACLIdentityType.WORLD, "", aclRights, ACCESS)) - .addAcl(new OzoneAcl(ACLIdentityType.USER, "admin", userRights, ACCESS)) + .setAcls(Collections.unmodifiableList(objectAcls)) .setQuotaInNamespace(1000) .setQuotaInBytes(Long.MAX_VALUE).build(); // Sanity check @@ -1223,7 +1227,7 @@ void testSharedTmpDir() throws IOException { } // set acls for shared tmp mount under the tmp volume - List objectAcls = new ArrayList<>(); + objectAcls.clear(); objectAcls.add(new OzoneAcl(ACLIdentityType.USER, "admin", userRights, ACCESS)); aclRights.clear(DELETE.ordinal()); @@ -1298,8 +1302,8 @@ void testTempMount() throws IOException { OzoneAcl aclWorldAccess = new OzoneAcl(ACLIdentityType.WORLD, "", userRights, ACCESS); // Construct VolumeArgs - VolumeArgs volumeArgs = VolumeArgs.newBuilder() - .addAcl(aclWorldAccess) + VolumeArgs volumeArgs = new VolumeArgs.Builder() + .setAcls(Collections.singletonList(aclWorldAccess)) .setQuotaInNamespace(1000).build(); // Sanity check assertNull(volumeArgs.getOwner()); @@ -2299,8 +2303,8 @@ void testNonPrivilegedUserMkdirCreateBucket() throws IOException { OzoneAcl aclWorldAccess = new OzoneAcl(ACLIdentityType.WORLD, "", userRights, ACCESS); // Construct VolumeArgs, set ACL to world access - VolumeArgs volumeArgs = VolumeArgs.newBuilder() - .addAcl(aclWorldAccess) + VolumeArgs volumeArgs = new VolumeArgs.Builder() + .setAcls(Collections.singletonList(aclWorldAccess)) .build(); proxy.createVolume(volume, volumeArgs); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSync.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSync.java index d2e2ea6f7ef9..05d297d38ed8 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSync.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSync.java @@ -77,7 +77,6 @@ import org.slf4j.event.Level; import static java.nio.charset.StandardCharsets.UTF_8; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_OFS_URI_SCHEME; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_ROOT; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; @@ -118,7 +117,6 @@ public static void init() throws Exception { CONF.setBoolean(OZONE_OM_RATIS_ENABLE_KEY, false); CONF.set(OZONE_DEFAULT_BUCKET_LAYOUT, layout.name()); CONF.setBoolean(OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED, true); - CONF.setInt(OZONE_SCM_RATIS_PIPELINE_LIMIT, 10); ClientConfigForTesting.newBuilder(StorageUnit.BYTES) .setBlockSize(blockSize) .setChunkSize(chunkSize) @@ -131,6 +129,7 @@ public static void init() throws Exception { cluster = MiniOzoneCluster.newBuilder(CONF) .setNumDatanodes(5) + .setTotalPipelineNumLimit(10) .build(); cluster.waitForClusterToBeReady(); client = cluster.newClient(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestLeaseRecovery.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestLeaseRecovery.java index ffd54cfea869..ca68aad45515 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestLeaseRecovery.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestLeaseRecovery.java @@ -43,7 +43,6 @@ import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.TimeoutException; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_OFS_URI_SCHEME; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_ROOT; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; @@ -93,8 +92,6 @@ public void init() throws IOException, InterruptedException, conf.setBoolean(OZONE_OM_RATIS_ENABLE_KEY, false); conf.setBoolean(OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED, true); conf.set(OZONE_DEFAULT_BUCKET_LAYOUT, layout.name()); - conf.setInt(OZONE_SCM_RATIS_PIPELINE_LIMIT, 10); - ClientConfigForTesting.newBuilder(StorageUnit.BYTES) .setBlockSize(blockSize) .setChunkSize(chunkSize) @@ -107,6 +104,7 @@ public void init() throws IOException, InterruptedException, cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(5) + .setTotalPipelineNumLimit(10) .build(); cluster.waitForClusterToBeReady(); client = cluster.newClient(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemWithStreaming.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemWithStreaming.java index 6ec6a32d4fba..2a6c8c456b9c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemWithStreaming.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemWithStreaming.java @@ -44,7 +44,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT; import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_ENABLED; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_DATASTREAM_AUTO_THRESHOLD; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_DATASTREAM_ENABLED; @@ -88,7 +87,6 @@ public static void init() throws Exception { CONF.set(OZONE_FS_DATASTREAM_AUTO_THRESHOLD, AUTO_THRESHOLD + "B"); CONF.setBoolean(OZONE_OM_RATIS_ENABLE_KEY, true); CONF.set(OZONE_DEFAULT_BUCKET_LAYOUT, layout.name()); - CONF.setInt(OZONE_SCM_RATIS_PIPELINE_LIMIT, 10); ClientConfigForTesting.newBuilder(StorageUnit.BYTES) .setBlockSize(blockSize) @@ -102,6 +100,7 @@ public static void init() throws Exception { cluster = MiniOzoneCluster.newBuilder(CONF) .setNumDatanodes(5) + .setTotalPipelineNumLimit(10) .build(); cluster.waitForClusterToBeReady(); client = cluster.newClient(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsHAURLs.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsHAURLs.java index 98b87d9d3031..47dc9ac0c3ba 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsHAURLs.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsHAURLs.java @@ -109,6 +109,7 @@ static void initClass(@TempDir File tempDir) throws Exception { // Start the cluster cluster = MiniOzoneCluster.newOMHABuilder(conf) .setNumDatanodes(5) + .setTotalPipelineNumLimit(3) .setOMServiceId(omServiceId) .setNumOfOzoneManagers(numOfOMs) .build(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMContainerPlacementPolicyMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMContainerPlacementPolicyMetrics.java index 4ac44315556c..90f8375f829b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMContainerPlacementPolicyMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMContainerPlacementPolicyMetrics.java @@ -85,9 +85,9 @@ public void setup() throws Exception { StaticMapping.addNodeToRack(NetUtils.normalizeHostNames( Collections.singleton(HddsUtils.getHostName(conf))).get(0), "/rack1"); - conf.setInt(ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT, 10); cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(4) + .setTotalPipelineNumLimit(10) .build(); cluster.waitForClusterToBeReady(); metrics = getMetrics(SCMContainerPlacementMetrics.class.getSimpleName()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java index f64736ac88a8..e973c842de44 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java @@ -291,6 +291,7 @@ public void testBlockDeletionTransactions() throws Exception { numKeys); MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf) + .setHbInterval(50) .build(); cluster.waitForClusterToBeReady(); @@ -370,11 +371,10 @@ public void testBlockDeletionTransactions() throws Exception { @Test public void testOldDNRegistersToReInitialisedSCM() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); - conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1000, TimeUnit.MILLISECONDS); - conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 3000, TimeUnit.MILLISECONDS); - MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(1) - .build(); + MiniOzoneCluster cluster = + MiniOzoneCluster.newBuilder(conf).setHbInterval(1000) + .setHbProcessorInterval(3000).setNumDatanodes(1) + .build(); cluster.waitForClusterToBeReady(); try { @@ -462,10 +462,10 @@ public void testBlockDeletingThrottling() throws Exception { conf.setInt(ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT, numKeys); conf.setBoolean(HDDS_SCM_SAFEMODE_PIPELINE_CREATION, false); - conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1000, TimeUnit.MILLISECONDS); - conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 3000, TimeUnit.MILLISECONDS); MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf) + .setHbInterval(1000) + .setHbProcessorInterval(3000) .setNumDatanodes(1) .build(); cluster.waitForClusterToBeReady(); @@ -826,10 +826,10 @@ public void testCloseContainerCommandOnRestart() throws Exception { conf.setInt(ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT, numKeys); conf.setBoolean(HDDS_SCM_SAFEMODE_PIPELINE_CREATION, false); - conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1000, TimeUnit.MILLISECONDS); - conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 3000, TimeUnit.MILLISECONDS); MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf) + .setHbInterval(1000) + .setHbProcessorInterval(3000) .setNumDatanodes(1) .build(); cluster.waitForClusterToBeReady(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/node/TestQueryNode.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/node/TestQueryNode.java index 683a0c176eb9..e8dc7455a11c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/node/TestQueryNode.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/node/TestQueryNode.java @@ -32,9 +32,11 @@ import java.util.List; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; import static java.util.concurrent.TimeUnit.SECONDS; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL; @@ -48,6 +50,7 @@ import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.IN_MAINTENANCE; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DEADNODE_INTERVAL; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -62,7 +65,11 @@ public class TestQueryNode { @BeforeEach public void setUp() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); + final int interval = 1000; + conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, + interval, TimeUnit.MILLISECONDS); + conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1, SECONDS); conf.setTimeDuration(HDDS_PIPELINE_REPORT_INTERVAL, 1, SECONDS); conf.setTimeDuration(HDDS_COMMAND_STATUS_REPORT_INTERVAL, 1, SECONDS); conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 1, SECONDS); @@ -70,10 +77,10 @@ public void setUp() throws Exception { conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, SECONDS); conf.setTimeDuration(OZONE_SCM_DEADNODE_INTERVAL, 6, SECONDS); conf.setInt(ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT, 3); - conf.setInt(ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT, numOfDatanodes + numOfDatanodes / 2); cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(numOfDatanodes) + .setTotalPipelineNumLimit(numOfDatanodes + numOfDatanodes / 2) .build(); cluster.waitForClusterToBeReady(); scmClient = new ContainerOperationClient(conf); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java index 51b5d84a13e9..439b563d6330 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java @@ -37,12 +37,9 @@ import java.util.Map; import java.util.UUID; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_AUTO_CREATE_FACTOR_ONE; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_LEADER_CHOOSING_POLICY; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -59,12 +56,12 @@ public class TestLeaderChoosePolicy { public void init(int numDatanodes, int datanodePipelineLimit) throws Exception { conf.setInt(OZONE_DATANODE_PIPELINE_LIMIT, datanodePipelineLimit); - conf.setInt(OZONE_SCM_RATIS_PIPELINE_LIMIT, numDatanodes + numDatanodes / 3); - conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 2000, TimeUnit.MILLISECONDS); - conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 1000, TimeUnit.MILLISECONDS); cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(numDatanodes) + .setTotalPipelineNumLimit(numDatanodes + numDatanodes / 3) + .setHbInterval(2000) + .setHbProcessorInterval(1000) .build(); cluster.waitForClusterToBeReady(); StorageContainerManager scm = cluster.getStorageContainerManager(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNodeFailure.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNodeFailure.java index d8840436ee0b..c73ffb982cf6 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNodeFailure.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNodeFailure.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hdds.scm.pipeline; -import static java.util.concurrent.TimeUnit.MILLISECONDS; import static org.junit.jupiter.api.Assertions.fail; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.client.RatisReplicationConfig; @@ -62,11 +61,11 @@ public static void init() throws Exception { conf.setFromObject(ratisServerConfig); conf.setInt(ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT, 1); conf.set(HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVAL, "2s"); - conf.setTimeDuration(HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL, 1000, MILLISECONDS); - conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 1000, MILLISECONDS); cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(6) + .setHbInterval(1000) + .setHbProcessorInterval(1000) .build(); cluster.waitForClusterToBeReady(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineCreateAndDestroy.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineCreateAndDestroy.java index 829a9581f663..443105b6ccb6 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineCreateAndDestroy.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineCreateAndDestroy.java @@ -38,10 +38,8 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_AUTO_CREATE_FACTOR_ONE; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; @@ -57,14 +55,14 @@ public class TestRatisPipelineCreateAndDestroy { public void init(int numDatanodes) throws Exception { conf.setInt(OZONE_DATANODE_PIPELINE_LIMIT, 2); - conf.setInt(OZONE_SCM_RATIS_PIPELINE_LIMIT, numDatanodes + numDatanodes / 3); - conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 2000, TimeUnit.MILLISECONDS); - conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 1000, TimeUnit.MILLISECONDS); conf.setTimeDuration( ScmConfigKeys.OZONE_SCM_PIPELINE_CREATION_INTERVAL, 500, TimeUnit.MILLISECONDS); cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(numDatanodes) + .setTotalPipelineNumLimit(numDatanodes + numDatanodes / 3) + .setHbInterval(2000) + .setHbProcessorInterval(1000) .build(); cluster.waitForClusterToBeReady(); StorageContainerManager scm = cluster.getStorageContainerManager(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMRestart.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMRestart.java index 6ce05ad3be74..988f163adab5 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMRestart.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMRestart.java @@ -35,7 +35,6 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVAL; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotSame; @@ -61,17 +60,17 @@ public class TestSCMRestart { */ @BeforeAll public static void init() throws Exception { - final int numOfNodes = 4; conf = new OzoneConfiguration(); conf.setTimeDuration(HDDS_PIPELINE_REPORT_INTERVAL, 1000, TimeUnit.MILLISECONDS); - conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1000, TimeUnit.MILLISECONDS); - conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 1000, TimeUnit.MILLISECONDS); conf.setInt(ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT, 1); - // allow only one FACTOR THREE pipeline. - conf.setInt(ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT, numOfNodes + 1); + int numOfNodes = 4; cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(numOfNodes) + // allow only one FACTOR THREE pipeline. + .setTotalPipelineNumLimit(numOfNodes + 1) + .setHbInterval(1000) + .setHbProcessorInterval(1000) .build(); cluster.waitForClusterToBeReady(); StorageContainerManager scm = cluster.getStorageContainerManager(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestCommitWatcher.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestCommitWatcher.java index 1363dc2269a4..563e0162acc6 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestCommitWatcher.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestCommitWatcher.java @@ -144,6 +144,7 @@ public void init() throws Exception { StorageUnit.MB); cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(5) + .setTotalPipelineNumLimit(3) .build(); cluster.waitForClusterToBeReady(); //the easiest way to create an open container is creating a key diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHDDSUpgrade.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHDDSUpgrade.java index 199b4b63ff74..57e807b7c751 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHDDSUpgrade.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHDDSUpgrade.java @@ -20,17 +20,12 @@ import static java.lang.Thread.sleep; import static java.nio.charset.StandardCharsets.UTF_8; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVAL; import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State.CLOSED; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.HEALTHY; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.HEALTHY_READONLY; -import static org.apache.hadoop.hdds.scm.ScmConfig.ConfigStrings.HDDS_SCM_INIT_DEFAULT_LAYOUT_VERSION; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT; import static org.apache.hadoop.hdds.scm.pipeline.Pipeline.PipelineState.OPEN; -import static org.apache.hadoop.ozone.om.OmUpgradeConfig.ConfigStrings.OZONE_OM_INIT_DEFAULT_LAYOUT_VERSION; import static org.apache.hadoop.ozone.upgrade.InjectedUpgradeFinalizationExecutor.UpgradeTestInjectionPoints.AFTER_COMPLETE_FINALIZATION; import static org.apache.hadoop.ozone.upgrade.InjectedUpgradeFinalizationExecutor.UpgradeTestInjectionPoints.AFTER_POST_FINALIZE_UPGRADE; import static org.apache.hadoop.ozone.upgrade.InjectedUpgradeFinalizationExecutor.UpgradeTestInjectionPoints.AFTER_PRE_FINALIZE_UPGRADE; @@ -153,13 +148,7 @@ public static void initClass() { OzoneConfiguration conf = new OzoneConfiguration(); conf.setTimeDuration(HDDS_PIPELINE_REPORT_INTERVAL, 1000, TimeUnit.MILLISECONDS); - conf.setInt(OZONE_DATANODE_PIPELINE_LIMIT, 1); - // allow only one FACTOR THREE pipeline. - conf.setInt(OZONE_SCM_RATIS_PIPELINE_LIMIT, NUM_DATA_NODES + 1); - conf.setInt(HDDS_SCM_INIT_DEFAULT_LAYOUT_VERSION, HDDSLayoutFeature.INITIAL_VERSION.layoutVersion()); - conf.setInt(OZONE_OM_INIT_DEFAULT_LAYOUT_VERSION, OMLayoutFeature.INITIAL_VERSION.layoutVersion()); - conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 500, TimeUnit.MILLISECONDS); - conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 500, TimeUnit.MILLISECONDS); + conf.set(OZONE_DATANODE_PIPELINE_LIMIT, "1"); scmFinalizationExecutor = new InjectedUpgradeFinalizationExecutor<>(); SCMConfigurator scmConfigurator = new SCMConfigurator(); @@ -170,6 +159,12 @@ public static void initClass() { .setNumDatanodes(NUM_DATA_NODES) .setNumOfStorageContainerManagers(NUM_SCMS) .setSCMConfigurator(scmConfigurator) + // allow only one FACTOR THREE pipeline. + .setTotalPipelineNumLimit(NUM_DATA_NODES + 1) + .setHbInterval(500) + .setHbProcessorInterval(500) + .setOmLayoutVersion(OMLayoutFeature.INITIAL_VERSION.layoutVersion()) + .setScmLayoutVersion(HDDSLayoutFeature.INITIAL_VERSION.layoutVersion()) .setDnLayoutVersion(HDDSLayoutFeature.INITIAL_VERSION.layoutVersion()); // Setting the provider to a max of 100 clusters. Some of the tests here diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestScmHAFinalization.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestScmHAFinalization.java index aa9f561aa02b..d2ae30efcebc 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestScmHAFinalization.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestScmHAFinalization.java @@ -55,7 +55,6 @@ import java.util.stream.Stream; import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State.CLOSED; -import static org.apache.hadoop.hdds.scm.ScmConfig.ConfigStrings.HDDS_SCM_INIT_DEFAULT_LAYOUT_VERSION; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; @@ -88,12 +87,11 @@ public void init(OzoneConfiguration conf, SCMConfigurator configurator = new SCMConfigurator(); configurator.setUpgradeFinalizationExecutor(executor); - conf.setInt(HDDS_SCM_INIT_DEFAULT_LAYOUT_VERSION, HDDSLayoutFeature.INITIAL_VERSION.layoutVersion()); - MiniOzoneCluster.Builder clusterBuilder = new MiniOzoneHAClusterImpl.Builder(conf) .setNumOfStorageContainerManagers(NUM_SCMS) .setNumOfActiveSCMs(NUM_SCMS - numInactiveSCMs) + .setScmLayoutVersion(HDDSLayoutFeature.INITIAL_VERSION.layoutVersion()) .setSCMServiceId("scmservice") .setSCMConfigurator(configurator) .setNumOfOzoneManagers(1) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java index 667f7448a1bb..e864cae00b37 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java @@ -297,8 +297,11 @@ default String getBaseDir() { @SuppressWarnings("visibilitymodifier") abstract class Builder { + protected static final int DEFAULT_HB_INTERVAL_MS = 1000; + protected static final int DEFAULT_HB_PROCESSOR_INTERVAL_MS = 100; protected static final int ACTIVE_OMS_NOT_SET = -1; protected static final int ACTIVE_SCMS_NOT_SET = -1; + protected static final int DEFAULT_PIPELINE_LIMIT = 3; protected static final int DEFAULT_RATIS_RPC_TIMEOUT_SEC = 1; protected OzoneConfiguration conf; @@ -314,12 +317,17 @@ abstract class Builder { protected int numOfActiveSCMs = ACTIVE_SCMS_NOT_SET; protected SCMConfigurator scmConfigurator; + protected Optional hbInterval = Optional.empty(); + protected Optional hbProcessorInterval = Optional.empty(); protected String scmId = UUID.randomUUID().toString(); protected String omId = UUID.randomUUID().toString(); protected Optional datanodeReservedSpace = Optional.empty(); protected boolean includeRecon = false; + + protected Optional omLayoutVersion = Optional.empty(); + protected Optional scmLayoutVersion = Optional.empty(); protected Optional dnLayoutVersion = Optional.empty(); protected int numOfDatanodes = 3; @@ -327,6 +335,7 @@ abstract class Builder { protected boolean startDataNodes = true; protected CertificateClient certClient; protected SecretKeyClient secretKeyClient; + protected int pipelineNumLimit = DEFAULT_PIPELINE_LIMIT; protected Builder(OzoneConfiguration conf) { this.conf = conf; @@ -415,6 +424,42 @@ public Builder setNumDataVolumes(int val) { return this; } + /** + * Sets the total number of pipelines to create. + * @param val number of pipelines + * @return MiniOzoneCluster.Builder + */ + public Builder setTotalPipelineNumLimit(int val) { + pipelineNumLimit = val; + return this; + } + + /** + * Sets the number of HeartBeat Interval of Datanodes, the value should be + * in MilliSeconds. + * + * @param val HeartBeat interval in milliseconds + * + * @return MiniOzoneCluster.Builder + */ + public Builder setHbInterval(int val) { + hbInterval = Optional.of(val); + return this; + } + + /** + * Sets the number of HeartBeat Processor Interval of Datanodes, + * the value should be in MilliSeconds. + * + * @param val HeartBeat Processor interval in milliseconds + * + * @return MiniOzoneCluster.Builder + */ + public Builder setHbProcessorInterval(int val) { + hbProcessorInterval = Optional.of(val); + return this; + } + /** * Sets the reserved space * {@link org.apache.hadoop.hdds.scm.ScmConfigKeys} @@ -468,6 +513,16 @@ public Builder setSCMServiceId(String serviceId) { return this; } + public Builder setScmLayoutVersion(int layoutVersion) { + scmLayoutVersion = Optional.of(layoutVersion); + return this; + } + + public Builder setOmLayoutVersion(int layoutVersion) { + omLayoutVersion = Optional.of(layoutVersion); + return this; + } + public Builder setDnLayoutVersion(int layoutVersion) { dnLayoutVersion = Optional.of(layoutVersion); return this; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java index ceade72e7d4d..797a7515f206 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java @@ -56,6 +56,8 @@ import static java.util.Collections.singletonList; import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; +import static org.apache.hadoop.hdds.scm.ScmConfig.ConfigStrings.HDDS_SCM_INIT_DEFAULT_LAYOUT_VERSION; +import static org.apache.hadoop.ozone.om.OmUpgradeConfig.ConfigStrings.OZONE_OM_INIT_DEFAULT_LAYOUT_VERSION; import static org.apache.ozone.test.GenericTestUtils.PortAllocator.getFreePort; import static org.apache.ozone.test.GenericTestUtils.PortAllocator.localhostWithFreePort; @@ -485,6 +487,11 @@ protected OMHAService createOMService() throws IOException, String metaDirPath = path + "/" + nodeId; config.set(OZONE_METADATA_DIRS, metaDirPath); + // Set non standard layout version if needed. + omLayoutVersion.ifPresent(integer -> + config.set(OZONE_OM_INIT_DEFAULT_LAYOUT_VERSION, + String.valueOf(integer))); + OzoneManager.omInit(config); OzoneManager om = OzoneManager.createOm(config); if (certClient != null) { @@ -548,6 +555,10 @@ protected SCMHAService createSCMService() scmConfig.set(ScmConfigKeys.OZONE_SCM_NODE_ID_KEY, nodeId); scmConfig.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); + scmLayoutVersion.ifPresent(integer -> + scmConfig.set(HDDS_SCM_INIT_DEFAULT_LAYOUT_VERSION, + String.valueOf(integer))); + configureSCM(); if (i == 1) { StorageContainerManager.scmInit(scmConfig, clusterId); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/TestOzoneClientFactory.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/TestOzoneClientFactory.java index e2a15595b553..f2a079ca80ca 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/TestOzoneClientFactory.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/TestOzoneClientFactory.java @@ -16,7 +16,6 @@ */ package org.apache.hadoop.ozone.client; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT; import static org.junit.jupiter.api.Assertions.assertInstanceOf; import static org.junit.jupiter.api.Assertions.assertThrows; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -37,10 +36,10 @@ public class TestOzoneClientFactory { public void testRemoteException() { OzoneConfiguration conf = new OzoneConfiguration(); - conf.setInt(OZONE_SCM_RATIS_PIPELINE_LIMIT, 10); Exception e = assertThrows(Exception.class, () -> { MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(3) + .setTotalPipelineNumLimit(10) .build(); String omPort = cluster.getOzoneManager().getRpcPort(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/AbstractTestECKeyOutputStream.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/AbstractTestECKeyOutputStream.java index b40b0bbcc626..0b0b2586c9e2 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/AbstractTestECKeyOutputStream.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/AbstractTestECKeyOutputStream.java @@ -27,7 +27,6 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.OzoneClientConfig; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.cli.ContainerOperationClient; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; @@ -122,7 +121,6 @@ protected static void init(boolean zeroCopyEnabled) throws Exception { TimeUnit.SECONDS); conf.setBoolean(OzoneConfigKeys.OZONE_EC_GRPC_ZERO_COPY_ENABLED, zeroCopyEnabled); - conf.setInt(ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT, 10); ClientConfigForTesting.newBuilder(StorageUnit.BYTES) .setBlockSize(blockSize) @@ -131,9 +129,8 @@ protected static void init(boolean zeroCopyEnabled) throws Exception { .setStreamBufferMaxSize(maxFlushSize) .applyTo(conf); - cluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(10) - .build(); + cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(10) + .setTotalPipelineNumLimit(10).build(); cluster.waitForClusterToBeReady(); client = OzoneClientFactory.getRpcClient(conf); objectStore = client.getObjectStore(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/Test2WayCommitInRatis.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/Test2WayCommitInRatis.java index d7ce08338db8..e7c8be170ca1 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/Test2WayCommitInRatis.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/Test2WayCommitInRatis.java @@ -17,6 +17,7 @@ package org.apache.hadoop.ozone.client.rpc; +import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.conf.DatanodeRatisServerConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.StorageUnit; @@ -84,6 +85,8 @@ private void startCluster(OzoneConfiguration conf) throws Exception { blockSize = 2 * maxFlushSize; // Make sure the pipeline does not get destroyed quickly + conf.setTimeDuration(HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL, + 60, TimeUnit.SECONDS); conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 60000, TimeUnit.SECONDS); DatanodeRatisServerConfig ratisServerConfig = diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBCSID.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBCSID.java index d668bb4b6522..c0ae49f3bf41 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBCSID.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBCSID.java @@ -49,7 +49,6 @@ HDDS_COMMAND_STATUS_REPORT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys. HDDS_CONTAINER_REPORT_INTERVAL; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys .HDDS_SCM_SAFEMODE_PIPELINE_CREATION; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; @@ -85,11 +84,10 @@ public static void init() throws Exception { conf.setTimeDuration(HDDS_COMMAND_STATUS_REPORT_INTERVAL, 200, TimeUnit.MILLISECONDS); conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, TimeUnit.SECONDS); - conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 200, TimeUnit.MILLISECONDS); conf.setQuietMode(false); conf.setBoolean(HDDS_SCM_SAFEMODE_PIPELINE_CREATION, false); cluster = - MiniOzoneCluster.newBuilder(conf).setNumDatanodes(1) + MiniOzoneCluster.newBuilder(conf).setNumDatanodes(1).setHbInterval(200) .build(); cluster.waitForClusterToBeReady(); //the easiest way to create an open container is creating a key diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java index ea1b16b0483a..8bb791bb103e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java @@ -105,6 +105,7 @@ public static void init() throws Exception { cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(5) + .setTotalPipelineNumLimit(3) .build(); cluster.waitForClusterToBeReady(); //the easiest way to create an open container is creating a key diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStream.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStream.java index e15e1e4d63ba..1e9cefbaa481 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStream.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStream.java @@ -115,6 +115,7 @@ static MiniOzoneCluster createCluster() throws IOException, MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(5) + .setTotalPipelineNumLimit(3) .build(); cluster.waitForClusterToBeReady(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java index 78a4e78647eb..fe08b9e0f4ba 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java @@ -59,7 +59,6 @@ import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_DESTROY_TIMEOUT; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT; @@ -116,12 +115,11 @@ public static void init() throws Exception { replicationConf.setInterval(Duration.ofMillis(containerReportInterval)); conf.setFromObject(replicationConf); conf.setInt(OZONE_DATANODE_PIPELINE_LIMIT, 2); - conf.setInt(ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT, 6); - conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 200, TimeUnit.MILLISECONDS); conf.setQuietMode(false); cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(4) + .setTotalPipelineNumLimit(6).setHbInterval(200) .build(); cluster.waitForClusterToBeReady(); cluster.getStorageContainerManager().getReplicationManager().start(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachine.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachine.java index ca8d021ee7f7..0cad1940eb22 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachine.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachine.java @@ -53,7 +53,6 @@ import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -93,7 +92,6 @@ public void setup() throws Exception { baseDir.mkdirs(); conf.setInt(ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT, 1); - conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 200, TimeUnit.MILLISECONDS); conf.setBoolean(HDDS_BLOCK_TOKEN_ENABLED, true); // conf.setBoolean(OZONE_SECURITY_ENABLED_KEY, true); conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 200, @@ -115,6 +113,7 @@ public void setup() throws Exception { // conf.set(HADOOP_SECURITY_AUTHENTICATION, KERBEROS.toString()); cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(1) + .setHbInterval(200) .setCertificateClient(new CertificateClientTestImpl(conf)) .setSecretKeyClient(new SecretKeyTestClient()) .build(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailureOnRead.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailureOnRead.java index eea068a8742f..1d0f25b3a041 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailureOnRead.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailureOnRead.java @@ -51,7 +51,6 @@ import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVAL; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_DESTROY_TIMEOUT; @@ -95,8 +94,6 @@ public void setup() throws Exception { conf.setTimeDuration(OZONE_SCM_PIPELINE_DESTROY_TIMEOUT, 1000, TimeUnit.SECONDS); conf.setInt(OZONE_DATANODE_PIPELINE_LIMIT, 1); - conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 200, TimeUnit.MILLISECONDS); - DatanodeRatisServerConfig ratisServerConfig = conf.getObject(DatanodeRatisServerConfig.class); ratisServerConfig.setFollowerSlownessTimeout(Duration.ofSeconds(1000)); @@ -121,6 +118,7 @@ public void setup() throws Exception { conf.setQuietMode(false); cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(3) + .setHbInterval(200) .build(); cluster.waitForClusterToBeReady(); client = OzoneClientFactory.getRpcClient(conf); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java index 58b56862b42a..db07c9a32cca 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java @@ -83,7 +83,6 @@ import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVAL; import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State.QUASI_CLOSED; import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State.UNHEALTHY; @@ -144,7 +143,6 @@ public static void init() throws Exception { TimeUnit.MILLISECONDS); conf.setTimeDuration(HDDS_PIPELINE_REPORT_INTERVAL, 200, TimeUnit.MILLISECONDS); - conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 200, TimeUnit.MILLISECONDS); conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 30, TimeUnit.SECONDS); conf.set(OzoneConfigKeys.OZONE_SCM_CLOSE_CONTAINER_WAIT_DURATION, "2s"); conf.set(ScmConfigKeys.OZONE_SCM_PIPELINE_SCRUB_INTERVAL, "2s"); @@ -171,7 +169,7 @@ public static void init() throws Exception { conf.setLong(OzoneConfigKeys.OZONE_RATIS_SNAPSHOT_THRESHOLD_KEY, 1); conf.setQuietMode(false); cluster = - MiniOzoneCluster.newBuilder(conf).setNumDatanodes(10) + MiniOzoneCluster.newBuilder(conf).setNumDatanodes(10).setHbInterval(200) .build(); cluster.waitForClusterToBeReady(); cluster.waitForPipelineTobeReady(HddsProtos.ReplicationFactor.ONE, 60000); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFlushDelay.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFlushDelay.java index c9dd917e70bb..da1cf6b452a9 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFlushDelay.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFlushDelay.java @@ -56,7 +56,6 @@ import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertSame; @@ -102,7 +101,6 @@ public void setup() throws Exception { TimeUnit.MILLISECONDS); conf.setTimeDuration(HDDS_COMMAND_STATUS_REPORT_INTERVAL, 200, TimeUnit.MILLISECONDS); - conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 200, TimeUnit.MILLISECONDS); conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, TimeUnit.SECONDS); conf.setQuietMode(false); OzoneManager.setTestSecureOmFlag(true); @@ -121,6 +119,7 @@ public void setup() throws Exception { cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(1) + .setHbInterval(200) .setCertificateClient(new CertificateClientTestImpl(conf)) .setSecretKeyClient(new SecretKeyTestClient()) .build(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineStream.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineStream.java index c2762cd9efaf..83a3b6a580c1 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineStream.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineStream.java @@ -50,7 +50,6 @@ import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVAL; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_DESTROY_TIMEOUT; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; @@ -90,7 +89,6 @@ public void setup() throws Exception { TimeUnit.MILLISECONDS); conf.setTimeDuration(HDDS_COMMAND_STATUS_REPORT_INTERVAL, 200, TimeUnit.MILLISECONDS); - conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 200, TimeUnit.MILLISECONDS); conf.setTimeDuration(HDDS_PIPELINE_REPORT_INTERVAL, 200, TimeUnit.MILLISECONDS); conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 30, TimeUnit.SECONDS); @@ -125,9 +123,11 @@ public void setup() throws Exception { conf.setLong(OzoneConfigKeys.OZONE_RATIS_SNAPSHOT_THRESHOLD_KEY, 1); conf.setQuietMode(false); - cluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(3) - .build(); + cluster = + MiniOzoneCluster.newBuilder(conf) + .setNumDatanodes(3) + .setHbInterval(200) + .build(); cluster.waitForClusterToBeReady(); cluster.waitForPipelineTobeReady(HddsProtos.ReplicationFactor.ONE, 60000); //the easiest way to create an open container is creating a key diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithInAdequateDN.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithInAdequateDN.java index d4ff85736273..fa50dac64f7e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithInAdequateDN.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithInAdequateDN.java @@ -65,7 +65,6 @@ import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_CREATION_INTERVAL; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_DESTROY_TIMEOUT; @@ -102,20 +101,15 @@ public class TestDeleteWithInAdequateDN { */ @BeforeAll public static void init() throws Exception { - final int numOfDatanodes = 3; - conf = new OzoneConfiguration(); path = GenericTestUtils .getTempPath(TestContainerStateMachineFailures.class.getSimpleName()); File baseDir = new File(path); baseDir.mkdirs(); - conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 100, - TimeUnit.MILLISECONDS); conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 200, TimeUnit.MILLISECONDS); conf.setInt(ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT, 1); - conf.setInt(ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT, numOfDatanodes + FACTOR_THREE_PIPELINE_COUNT); // Make the stale, dead and server failure timeout higher so that a dead // node is not detecte at SCM as well as the pipeline close action // never gets initiated early at Datanode in the test. @@ -162,8 +156,12 @@ public static void init() throws Exception { conf.setFromObject(ratisClientConfig); conf.setQuietMode(false); + int numOfDatanodes = 3; cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(numOfDatanodes) + .setTotalPipelineNumLimit( + numOfDatanodes + FACTOR_THREE_PIPELINE_COUNT) + .setHbInterval(100) .build(); cluster.waitForClusterToBeReady(); cluster.waitForPipelineTobeReady(THREE, 60000); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java index 65266200d423..4bb3561a7358 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java @@ -116,7 +116,6 @@ private void init() throws Exception { conf.setBoolean( OzoneConfigKeys.OZONE_NETWORK_TOPOLOGY_AWARE_READ_KEY, true); conf.setInt(ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT, 2); - conf.setInt(ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT, 15); DatanodeRatisServerConfig ratisServerConfig = conf.getObject(DatanodeRatisServerConfig.class); ratisServerConfig.setRequestTimeOut(Duration.ofSeconds(3)); @@ -140,7 +139,7 @@ private void init() throws Exception { Collections.singleton(HddsUtils.getHostName(conf))).get(0), "/rack1"); cluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(10).build(); + .setNumDatanodes(10).setTotalPipelineNumLimit(15).build(); cluster.waitForClusterToBeReady(); //the easiest way to create an open container is creating a key client = OzoneClientFactory.getRpcClient(conf); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java index 00e6c7f1aa6b..2068230c666c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java @@ -112,7 +112,6 @@ private void init() throws Exception { conf.setBoolean( OzoneConfigKeys.OZONE_NETWORK_TOPOLOGY_AWARE_READ_KEY, true); conf.setInt(ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT, 2); - conf.setInt(ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT, 15); DatanodeRatisServerConfig ratisServerConfig = conf.getObject(DatanodeRatisServerConfig.class); ratisServerConfig.setRequestTimeOut(Duration.ofSeconds(3)); @@ -141,6 +140,7 @@ private void init() throws Exception { cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(10) + .setTotalPipelineNumLimit(15) .build(); cluster.waitForClusterToBeReady(); //the easiest way to create an open container is creating a key diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestHybridPipelineOnDatanode.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestHybridPipelineOnDatanode.java index 51ebf3fa0ccd..4ccdd0e2d4b3 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestHybridPipelineOnDatanode.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestHybridPipelineOnDatanode.java @@ -44,7 +44,6 @@ import java.io.IOException; import static java.nio.charset.StandardCharsets.UTF_8; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertArrayEquals; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -74,9 +73,8 @@ public class TestHybridPipelineOnDatanode { @BeforeAll public static void init() throws Exception { conf = new OzoneConfiguration(); - conf.setInt(OZONE_SCM_RATIS_PIPELINE_LIMIT, 5); cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(3) - .build(); + .setTotalPipelineNumLimit(5).build(); cluster.waitForClusterToBeReady(); //the easiest way to create an open container is creating a key client = OzoneClientFactory.getRpcClient(conf); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java index eec6168386d1..843dc5fbe076 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java @@ -112,6 +112,7 @@ private void startCluster(int datanodes) throws Exception { conf.setQuietMode(false); cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(datanodes) + .setTotalPipelineNumLimit(0) .build(); cluster.waitForClusterToBeReady(); //the easiest way to create an open container is creating a key diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java index 268a192640c6..a89e61769966 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java @@ -78,7 +78,6 @@ import static org.apache.hadoop.hdds.client.ReplicationFactor.ONE; import static org.apache.hadoop.hdds.client.ReplicationType.RATIS; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -115,7 +114,6 @@ public class TestOzoneClientMultipartUploadWithFSO { @BeforeAll public static void init() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); - conf.setInt(OZONE_SCM_RATIS_PIPELINE_LIMIT, 10); OMRequestTestUtils.configureFSOptimizedPaths(conf, true); startCluster(conf); } @@ -137,6 +135,7 @@ public static void shutdown() throws IOException { static void startCluster(OzoneConfiguration conf) throws Exception { cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(5) + .setTotalPipelineNumLimit(10) .build(); cluster.waitForClusterToBeReady(); ozClient = OzoneClientFactory.getRpcClient(conf); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptionFlushDelay.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptionFlushDelay.java index ad59621e0c75..c3e8a8d461b8 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptionFlushDelay.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptionFlushDelay.java @@ -109,6 +109,7 @@ public void init() throws Exception { cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(5) + .setTotalPipelineNumLimit(3) .build(); cluster.waitForClusterToBeReady(); //the easiest way to create an open container is creating a key diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptions.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptions.java index 3e1667a38a68..cd99382f300b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptions.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptions.java @@ -118,6 +118,7 @@ public void init() throws Exception { cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(5) + .setTotalPipelineNumLimit(3) .build(); cluster.waitForClusterToBeReady(); //the easiest way to create an open container is creating a key diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java index 98bf65ad6b6f..a87d05321e27 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java @@ -129,7 +129,6 @@ import static org.apache.hadoop.hdds.client.ReplicationFactor.ONE; import static org.apache.hadoop.hdds.client.ReplicationFactor.THREE; import static org.apache.hadoop.hdds.client.ReplicationType.RATIS; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT; import static org.apache.hadoop.ozone.OmUtils.LOG; import static org.apache.hadoop.ozone.OmUtils.MAX_TRXN_ID; import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS; @@ -204,7 +203,6 @@ static void startCluster(OzoneConfiguration conf) throws Exception { // Reduce long wait time in MiniOzoneClusterImpl#waitForHddsDatanodesStop // for testZReadKeyWithUnhealthyContainerReplica. conf.set("ozone.scm.stale.node.interval", "10s"); - conf.setInt(OZONE_SCM_RATIS_PIPELINE_LIMIT, 10); ClientConfigForTesting.newBuilder(StorageUnit.MB) .setDataStreamMinPacketSize(1) @@ -212,6 +210,7 @@ static void startCluster(OzoneConfiguration conf) throws Exception { cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(14) + .setTotalPipelineNumLimit(10) .build(); cluster.waitForClusterToBeReady(); ozClient = OzoneClientFactory.getRpcClient(conf); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestValidateBCSIDOnRestart.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestValidateBCSIDOnRestart.java index 1e22613f929b..a8029987fedd 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestValidateBCSIDOnRestart.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestValidateBCSIDOnRestart.java @@ -57,7 +57,6 @@ import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVAL; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_DESTROY_TIMEOUT; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; @@ -101,7 +100,6 @@ public static void init() throws Exception { TimeUnit.MILLISECONDS); conf.setTimeDuration(HDDS_COMMAND_STATUS_REPORT_INTERVAL, 200, TimeUnit.MILLISECONDS); - conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 200, TimeUnit.MILLISECONDS); conf.setTimeDuration(HDDS_PIPELINE_REPORT_INTERVAL, 200, TimeUnit.MILLISECONDS); conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 10, TimeUnit.SECONDS); @@ -127,7 +125,8 @@ public static void init() throws Exception { conf.setFromObject(raftClientConfig); cluster = - MiniOzoneCluster.newBuilder(conf).setNumDatanodes(2) + MiniOzoneCluster.newBuilder(conf).setNumDatanodes(2). + setHbInterval(200) .build(); cluster.waitForClusterToBeReady(); cluster.waitForPipelineTobeReady(HddsProtos.ReplicationFactor.ONE, 60000); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestInputStreamBase.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestInputStreamBase.java index 256148dfb8de..9f5d04c56f94 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestInputStreamBase.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestInputStreamBase.java @@ -53,7 +53,6 @@ protected static MiniOzoneCluster newCluster( conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, TimeUnit.SECONDS); conf.setTimeDuration(OZONE_SCM_DEADNODE_INTERVAL, 6, TimeUnit.SECONDS); conf.setInt(ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT, 1); - conf.setInt(ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT, 5); conf.setQuietMode(false); conf.setStorageSize(OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE, 64, StorageUnit.MB); @@ -74,6 +73,7 @@ protected static MiniOzoneCluster newCluster( return MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(5) + .setTotalPipelineNumLimit(5) .build(); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestECContainerRecovery.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestECContainerRecovery.java index 24064ae5c883..e045b48bda96 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestECContainerRecovery.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestECContainerRecovery.java @@ -140,7 +140,6 @@ public static void init() throws Exception { TimeUnit.MILLISECONDS); conf.setTimeDuration(HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL, 1, TimeUnit.SECONDS); - conf.setInt(ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT, 10); ClientConfigForTesting.newBuilder(StorageUnit.BYTES) .setBlockSize(blockSize) @@ -149,8 +148,8 @@ public static void init() throws Exception { .setStreamBufferMaxSize(maxFlushSize) .applyTo(conf); - cluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(10) + cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(10) + .setTotalPipelineNumLimit(10) .build(); cluster.waitForClusterToBeReady(); client = OzoneClientFactory.getRpcClient(conf); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java index 5ff8d713649e..d5564ac2315e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java @@ -180,6 +180,7 @@ public void init() throws Exception { conf.setFromObject(replicationConf); cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(3) + .setHbInterval(50) .build(); cluster.waitForClusterToBeReady(); client = cluster.newClient(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java index 8d22eddadc59..cd25ee25c8f4 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java @@ -85,10 +85,10 @@ public static void init() throws Exception { conf = new OzoneConfiguration(); conf.set(ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT, "1"); conf.setInt(OZONE_DATANODE_PIPELINE_LIMIT, 2); - conf.setInt(ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT, 15); cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(10) + .setTotalPipelineNumLimit(15) .build(); cluster.waitForClusterToBeReady(); //the easiest way to create an open container is creating a key diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestLDBCli.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestLDBCli.java index e94f46a398b3..ec7eb81db33d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestLDBCli.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestLDBCli.java @@ -22,7 +22,6 @@ import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.StringUtils; import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.utils.db.DBStore; @@ -62,7 +61,6 @@ import java.util.stream.Stream; import static java.nio.charset.StandardCharsets.UTF_8; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType.STAND_ALONE; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -297,7 +295,8 @@ private void prepareTable(String tableName, boolean schemaV3) for (int i = 1; i <= 5; i++) { String key = "key" + i; OmKeyInfo value = OMRequestTestUtils.createOmKeyInfo("vol1", "buck1", - key, ReplicationConfig.fromProtoTypeAndFactor(STAND_ALONE, HddsProtos.ReplicationFactor.ONE)).build(); + key, HddsProtos.ReplicationType.STAND_ALONE, + HddsProtos.ReplicationFactor.ONE); keyTable.put(key.getBytes(UTF_8), value.getProtobuf(ClientVersion.CURRENT_VERSION).toByteArray()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/ratis/TestDnRatisLogParser.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/ratis/TestDnRatisLogParser.java index 6e653b4b8627..cbda9f13f7b9 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/ratis/TestDnRatisLogParser.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/ratis/TestDnRatisLogParser.java @@ -35,7 +35,6 @@ import org.junit.jupiter.api.Timeout; import static java.nio.charset.StandardCharsets.UTF_8; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT; import static org.assertj.core.api.Assertions.assertThat; /** @@ -51,9 +50,8 @@ public class TestDnRatisLogParser { @BeforeEach public void setup() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); - conf.setInt(OZONE_SCM_RATIS_PIPELINE_LIMIT, 2); cluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(1).build(); + .setNumDatanodes(1).setTotalPipelineNumLimit(2).build(); cluster.waitForClusterToBeReady(); System.setOut(new PrintStream(out, false, UTF_8.name())); System.setErr(new PrintStream(err, false, UTF_8.name())); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestDataValidate.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestDataValidate.java index e1f2061c7d46..28cc863c26d5 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestDataValidate.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestDataValidate.java @@ -22,7 +22,6 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.junit.jupiter.api.Test; import picocli.CommandLine; @@ -53,10 +52,8 @@ static void startCluster(OzoneConfiguration conf) throws Exception { raftClientConfig.setRpcWatchRequestTimeout(Duration.ofSeconds(10)); conf.setFromObject(raftClientConfig); - conf.setInt(ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT, 8); - cluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(5).build(); + .setNumDatanodes(5).setTotalPipelineNumLimit(8).build(); cluster.waitForClusterToBeReady(); cluster.waitForPipelineTobeReady(HddsProtos.ReplicationFactor.THREE, 180000); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithDatanodeFastRestart.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithDatanodeFastRestart.java index 862b52c8e9e1..0798731a839d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithDatanodeFastRestart.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithDatanodeFastRestart.java @@ -34,7 +34,6 @@ import picocli.CommandLine; import java.util.concurrent.TimeUnit; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -55,8 +54,9 @@ public class TestFreonWithDatanodeFastRestart { @BeforeAll public static void init() throws Exception { conf = new OzoneConfiguration(); - conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 1000, TimeUnit.MILLISECONDS); cluster = MiniOzoneCluster.newBuilder(conf) + .setHbProcessorInterval(1000) + .setHbInterval(1000) .setNumDatanodes(3) .build(); cluster.waitForClusterToBeReady(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithPipelineDestroy.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithPipelineDestroy.java index 08c1b3bd3b35..d78beff7e78b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithPipelineDestroy.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithPipelineDestroy.java @@ -61,8 +61,6 @@ public static void init() throws Exception { 1, TimeUnit.SECONDS); conf.setTimeDuration(HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVAL, 1, TimeUnit.SECONDS); - conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 1000, TimeUnit.MILLISECONDS); - conf.setInt(ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT, 8); DatanodeRatisServerConfig ratisServerConfig = conf.getObject(DatanodeRatisServerConfig.class); ratisServerConfig.setRequestTimeOut(Duration.ofSeconds(3)); @@ -76,7 +74,10 @@ public static void init() throws Exception { conf.setFromObject(raftClientConfig); cluster = MiniOzoneCluster.newBuilder(conf) + .setHbProcessorInterval(1000) + .setHbInterval(1000) .setNumDatanodes(3) + .setTotalPipelineNumLimit(8) .build(); cluster.waitForClusterToBeReady(); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java index ba98a28280a9..67ab3169b69c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java @@ -44,7 +44,9 @@ import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; import org.apache.hadoop.hdds.scm.HddsWhiteboxTestUtils; import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.container.ContainerInfo; @@ -974,11 +976,12 @@ public void testListStatusWithTableCache() throws Exception { if (i % 2 == 0) { // Add to DB OMRequestTestUtils.addKeyToTable(false, VOLUME_NAME, BUCKET_NAME, prefixKeyInDB + i, - 1000L, RatisReplicationConfig.getInstance(ONE), metadataManager); + 1000L, HddsProtos.ReplicationType.RATIS, + ONE, metadataManager); } else { // Add to TableCache OMRequestTestUtils.addKeyToTableCache( VOLUME_NAME, BUCKET_NAME, prefixKeyInCache + i, - RatisReplicationConfig.getInstance(ONE), + HddsProtos.ReplicationType.RATIS, ONE, metadataManager); } } @@ -1045,12 +1048,13 @@ public void testListStatusWithTableCacheRecursive() throws Exception { OMRequestTestUtils.addKeyToTable(false, VOLUME_NAME, BUCKET_NAME, keyNameDir1Subdir1 + OZONE_URI_DELIMITER + prefixKeyInDB + i, - 1000L, RatisReplicationConfig.getInstance(ONE), metadataManager); + 1000L, HddsProtos.ReplicationType.RATIS, + ONE, metadataManager); } else { // Add to TableCache OMRequestTestUtils.addKeyToTableCache( VOLUME_NAME, BUCKET_NAME, keyNameDir1Subdir1 + OZONE_URI_DELIMITER + prefixKeyInCache + i, - RatisReplicationConfig.getInstance(ONE), + HddsProtos.ReplicationType.RATIS, ONE, metadataManager); } } @@ -1088,12 +1092,13 @@ public void testListStatusWithDeletedEntriesInCache() throws Exception { if (i % 2 == 0) { OMRequestTestUtils.addKeyToTable(false, VOLUME_NAME, BUCKET_NAME, prefixKey + i, - 1000L, RatisReplicationConfig.getInstance(ONE), metadataManager); + 1000L, HddsProtos.ReplicationType.RATIS, + ONE, metadataManager); existKeySet.add(prefixKey + i); } else { OMRequestTestUtils.addKeyToTableCache( VOLUME_NAME, BUCKET_NAME, prefixKey + i, - RatisReplicationConfig.getInstance(ONE), + HddsProtos.ReplicationType.RATIS, ONE, metadataManager); String key = metadataManager.getOzoneKey( @@ -1441,7 +1446,8 @@ public void testRefreshPipeline() throws Exception { when(scmClientMock.getContainerClient()).thenReturn(sclProtocolMock); OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo("v1", - "b1", "k1", RatisReplicationConfig.getInstance(THREE)).build(); + "b1", "k1", ReplicationType.RATIS, + ReplicationFactor.THREE); // Add block to key. List omKeyLocationInfoList = new ArrayList<>(); @@ -1495,7 +1501,8 @@ public void testRefreshPipelineException() throws Exception { OMPerformanceMetrics metrics = mock(OMPerformanceMetrics.class); OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo("v1", - "b1", "k1", RatisReplicationConfig.getInstance(THREE)).build(); + "b1", "k1", ReplicationType.RATIS, + ReplicationFactor.THREE); // Add block to key. List omKeyLocationInfoList = new ArrayList<>(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyPurging.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyPurging.java index e3bb5b5bccb8..83eac0ab288b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyPurging.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyPurging.java @@ -45,7 +45,6 @@ import java.util.UUID; import java.util.concurrent.TimeUnit; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL; import static org.assertj.core.api.Assertions.assertThat; import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL; @@ -68,7 +67,6 @@ public class TestKeyPurging { @BeforeEach public void setup() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); - conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 200, TimeUnit.MILLISECONDS); conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 100, TimeUnit.MILLISECONDS); conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 200, @@ -79,6 +77,7 @@ public void setup() throws Exception { cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(3) + .setHbInterval(200) .build(); cluster.waitForClusterToBeReady(); client = OzoneClientFactory.getRpcClient(conf); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMBucketLayoutUpgrade.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMBucketLayoutUpgrade.java index 58d19d846d4a..b8e115864727 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMBucketLayoutUpgrade.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMBucketLayoutUpgrade.java @@ -50,7 +50,6 @@ import static org.apache.hadoop.ozone.OzoneConsts.LAYOUT_VERSION_KEY; import static org.apache.hadoop.ozone.om.OMUpgradeTestUtils.waitForFinalization; -import static org.apache.hadoop.ozone.om.OmUpgradeConfig.ConfigStrings.OZONE_OM_INIT_DEFAULT_LAYOUT_VERSION; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_SUPPORTED_OPERATION_PRIOR_FINALIZATION; import static org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature.INITIAL_VERSION; import static org.apache.hadoop.ozone.om.upgrade.OMLayoutVersionManager.maxLayoutVersion; @@ -92,12 +91,12 @@ class TestOMBucketLayoutUpgrade { @BeforeAll void setup() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); - conf.setInt(OZONE_OM_INIT_DEFAULT_LAYOUT_VERSION, fromLayoutVersion); String omServiceId = UUID.randomUUID().toString(); cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newOMHABuilder(conf) .setOMServiceId(omServiceId) .setNumOfOzoneManagers(3) .setNumDatanodes(1) + .setOmLayoutVersion(fromLayoutVersion) .build(); cluster.waitForClusterToBeReady(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMUpgradeFinalization.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMUpgradeFinalization.java index 22358cbe6bb7..fa84130c9d6f 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMUpgradeFinalization.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMUpgradeFinalization.java @@ -20,7 +20,6 @@ import static org.apache.hadoop.ozone.OzoneConsts.LAYOUT_VERSION_KEY; import static org.apache.hadoop.ozone.om.OMUpgradeTestUtils.assertClusterPrepared; import static org.apache.hadoop.ozone.om.OMUpgradeTestUtils.waitForFinalization; -import static org.apache.hadoop.ozone.om.OmUpgradeConfig.ConfigStrings.OZONE_OM_INIT_DEFAULT_LAYOUT_VERSION; import static org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature.INITIAL_VERSION; import static org.apache.hadoop.ozone.om.upgrade.OMLayoutVersionManager.maxLayoutVersion; import static org.apache.ozone.test.GenericTestUtils.waitFor; @@ -104,11 +103,11 @@ void testOMUpgradeFinalizationWithOneOMDown() throws Exception { private static MiniOzoneHAClusterImpl newCluster(OzoneConfiguration conf) throws IOException { - conf.setInt(OZONE_OM_INIT_DEFAULT_LAYOUT_VERSION, INITIAL_VERSION.layoutVersion()); return (MiniOzoneHAClusterImpl) MiniOzoneCluster.newOMHABuilder(conf) .setOMServiceId(UUID.randomUUID().toString()) .setNumOfOzoneManagers(3) .setNumDatanodes(1) + .setOmLayoutVersion(INITIAL_VERSION.layoutVersion()) .build(); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmSafeMode.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmSafeMode.java index 2f06304bd1e3..14b1a30b44f1 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmSafeMode.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmSafeMode.java @@ -56,12 +56,9 @@ import java.util.List; import java.util.Map; -import static java.util.concurrent.TimeUnit.MILLISECONDS; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL; import static org.apache.hadoop.hdds.client.ReplicationType.RATIS; import static org.apache.hadoop.hdds.client.ReplicationFactor.ONE; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DEADNODE_INTERVAL; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -100,9 +97,9 @@ public void init() throws Exception { conf = new OzoneConfiguration(); conf.set(OZONE_SCM_STALENODE_INTERVAL, "10s"); conf.set(OZONE_SCM_DEADNODE_INTERVAL, "25s"); - conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1000, MILLISECONDS); - conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 500, MILLISECONDS); builder = MiniOzoneCluster.newBuilder(conf) + .setHbInterval(1000) + .setHbProcessorInterval(500) .setStartDataNodes(false); cluster = builder.build(); cluster.startHddsDatanodes(); @@ -326,6 +323,8 @@ public void testSCMSafeModeDisabled() throws Exception { conf.setBoolean(HddsConfigKeys.HDDS_SCM_SAFEMODE_ENABLED, false); conf.setInt(HddsConfigKeys.HDDS_SCM_SAFEMODE_MIN_DATANODE, 3); builder = MiniOzoneCluster.newBuilder(conf) + .setHbInterval(1000) + .setHbProcessorInterval(500) .setNumDatanodes(3); cluster = builder.build(); StorageContainerManager scm = cluster.getStorageContainerManager(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotDeletingService.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotDeletingService.java index e627a880fd21..6e3e4fd7f404 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotDeletingService.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotDeletingService.java @@ -484,9 +484,8 @@ private void createSnapshotDataForBucket1() throws Exception { client.getProxy().deleteKey(VOLUME_NAME, BUCKET_NAME_ONE, "bucket1key0", false); assertTableRowCount(keyTable, 0); - // one copy of bucket1key0 should also be reclaimed as it not same - // but original deleted key created during overwrite should not be deleted - assertTableRowCount(deletedTable, 2); + // bucket1key0 should also be reclaimed as it not same + assertTableRowCount(deletedTable, 1); // Create Snapshot 2. client.getProxy().createSnapshot(VOLUME_NAME, BUCKET_NAME_ONE, diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/multitenant/TestMultiTenantVolume.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/multitenant/TestMultiTenantVolume.java index 078266581cbc..1cb436dcb38d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/multitenant/TestMultiTenantVolume.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/multitenant/TestMultiTenantVolume.java @@ -49,7 +49,6 @@ import static org.apache.hadoop.ozone.admin.scm.FinalizeUpgradeCommandUtil.isDone; import static org.apache.hadoop.ozone.admin.scm.FinalizeUpgradeCommandUtil.isStarting; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_MULTITENANCY_ENABLED; -import static org.apache.hadoop.ozone.om.OmUpgradeConfig.ConfigStrings.OZONE_OM_INIT_DEFAULT_LAYOUT_VERSION; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -77,9 +76,9 @@ public static void initClusterProvider() throws Exception { conf.setBoolean( OMMultiTenantManagerImpl.OZONE_OM_TENANT_DEV_SKIP_RANGER, true); conf.setBoolean(OZONE_OM_MULTITENANCY_ENABLED, true); - conf.setInt(OZONE_OM_INIT_DEFAULT_LAYOUT_VERSION, OMLayoutFeature.INITIAL_VERSION.layoutVersion()); MiniOzoneCluster.Builder builder = MiniOzoneCluster.newBuilder(conf) - .withoutDatanodes(); + .withoutDatanodes() + .setOmLayoutVersion(OMLayoutFeature.INITIAL_VERSION.layoutVersion()); cluster = builder.build(); client = cluster.newClient(); s3VolumeName = HddsClientUtils.getDefaultS3VolumeName(conf); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java index 587ae18f8627..029b0813bb55 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java @@ -119,7 +119,6 @@ import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_DIFF_DISABLE_NATIVE_LIBS; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_FORCE_FULL_DIFF; import static org.apache.hadoop.ozone.om.OmSnapshotManager.DELIMITER; -import static org.apache.hadoop.ozone.om.OmUpgradeConfig.ConfigStrings.OZONE_OM_INIT_DEFAULT_LAYOUT_VERSION; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.CONTAINS_SNAPSHOT; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; @@ -208,10 +207,10 @@ private void init() throws Exception { conf.setEnum(HDDS_DB_PROFILE, DBProfile.TEST); // Enable filesystem snapshot feature for the test regardless of the default conf.setBoolean(OMConfigKeys.OZONE_FILESYSTEM_SNAPSHOT_ENABLED_KEY, true); - conf.setInt(OZONE_OM_INIT_DEFAULT_LAYOUT_VERSION, OMLayoutFeature.BUCKET_LAYOUT_SUPPORT.layoutVersion()); cluster = MiniOzoneCluster.newBuilder(conf) .setNumOfOzoneManagers(3) + .setOmLayoutVersion(OMLayoutFeature.BUCKET_LAYOUT_SUPPORT.layoutVersion()) .build(); cluster.waitForClusterToBeReady(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotAcl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotAcl.java index e9b7e59b4fd6..8c0b375c3ca9 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotAcl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotAcl.java @@ -630,7 +630,7 @@ private void createBucket(BucketLayout bucketLayout, private void createVolume() throws IOException { final String volumePrefix = "volume-"; volumeName = volumePrefix + RandomStringUtils.randomNumeric(32); - final VolumeArgs volumeArgs = VolumeArgs.newBuilder() + final VolumeArgs volumeArgs = new VolumeArgs.Builder() .setAdmin(ADMIN) .setOwner(ADMIN) .build(); diff --git a/hadoop-ozone/integration-test/src/test/resources/ozone-site.xml b/hadoop-ozone/integration-test/src/test/resources/ozone-site.xml index 21a7715305f8..4e79ae97fc24 100644 --- a/hadoop-ozone/integration-test/src/test/resources/ozone-site.xml +++ b/hadoop-ozone/integration-test/src/test/resources/ozone-site.xml @@ -56,21 +56,6 @@ true - - - hdds.heartbeat.interval - 1s - - - ozone.scm.heartbeat.thread.interval - 100ms - - - - ozone.scm.ratis.pipeline.limit - 3 - - ozone.scm.close.container.wait.duration 1s diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java index b28b390efd73..e09c3bcef669 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java @@ -270,10 +270,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn keyToDelete, trxnLogIndex, ozoneManager.isRatisEnabled()); checkBucketQuotaInBytes(omMetadataManager, omBucketInfo, correctedSpace); - // using pseudoObjId as objectId can be same in case of overwrite key - long pseudoObjId = ozoneManager.getObjectIdFromTxId(trxnLogIndex); String delKeyName = omMetadataManager.getOzoneDeletePathKey( - pseudoObjId, dbOzoneKey); + keyToDelete.getObjectID(), dbOzoneKey); if (null == oldKeyVersionsToDeleteMap) { oldKeyVersionsToDeleteMap = new HashMap<>(); } @@ -305,8 +303,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn if (null == oldKeyVersionsToDeleteMap) { oldKeyVersionsToDeleteMap = new HashMap<>(); } - oldKeyVersionsToDeleteMap.computeIfAbsent(delKeyName, - key -> new RepeatedOmKeyInfo()).addOmKeyInfo(pseudoKeyInfo); + oldKeyVersionsToDeleteMap.put(delKeyName, + new RepeatedOmKeyInfo(pseudoKeyInfo)); } // Add to cache of open key table and key table. diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestWithFSO.java index 704e9e91c47d..f062e71106e0 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestWithFSO.java @@ -203,10 +203,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn correctedSpace); String delKeyName = omMetadataManager .getOzoneKey(volumeName, bucketName, fileName); - // using pseudoObjId as objectId can be same in case of overwrite key - long pseudoObjId = ozoneManager.getObjectIdFromTxId(trxnLogIndex); delKeyName = omMetadataManager.getOzoneDeletePathKey( - pseudoObjId, delKeyName); + keyToDelete.getObjectID(), delKeyName); if (null == oldKeyVersionsToDeleteMap) { oldKeyVersionsToDeleteMap = new HashMap<>(); } @@ -240,8 +238,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn if (null == oldKeyVersionsToDeleteMap) { oldKeyVersionsToDeleteMap = new HashMap<>(); } - oldKeyVersionsToDeleteMap.computeIfAbsent(delKeyName, - key -> new RepeatedOmKeyInfo()).addOmKeyInfo(pseudoKeyInfo); + oldKeyVersionsToDeleteMap.put(delKeyName, + new RepeatedOmKeyInfo(pseudoKeyInfo)); } // Add to cache of open key table and key table. diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java index 15af3910e90f..e6debcdc23be 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java @@ -16,7 +16,6 @@ */ package org.apache.hadoop.ozone.om; -import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.StorageType; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; @@ -27,7 +26,6 @@ import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.hdds.utils.TransactionInfo; @@ -51,7 +49,6 @@ import java.io.File; import java.time.Duration; import java.time.Instant; -import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.HashSet; @@ -64,7 +61,6 @@ import java.util.stream.Collectors; import java.util.stream.Stream; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_MPU_EXPIRE_THRESHOLD; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_MPU_EXPIRE_THRESHOLD_DEFAULT; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_OPEN_KEY_EXPIRE_THRESHOLD; @@ -623,10 +619,9 @@ private void testGetExpiredOpenKeys(BucketLayout bucketLayout) for (int i = 0; i < numExpiredOpenKeys + numUnexpiredOpenKeys; i++) { final long creationTime = i < numExpiredOpenKeys ? expiredOpenKeyCreationTime : Time.now(); - final OmKeyInfo keyInfo = OMRequestTestUtils.createOmKeyInfo( - volumeName, bucketName, "expired" + i, RatisReplicationConfig.getInstance(ONE)) - .setCreationTime(creationTime) - .build(); + final OmKeyInfo keyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, + bucketName, "expired" + i, HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE, 0L, creationTime); final String dbOpenKeyName; if (bucketLayout.isFileSystemOptimized()) { @@ -694,10 +689,10 @@ private void testGetExpiredOpenKeysExcludeMPUKeys( // Ensure that "expired" MPU-related open keys are not fetched. // MPU-related open keys, identified by isMultipartKey = false for (int i = 0; i < numExpiredMPUOpenKeys; i++) { - final OmKeyInfo keyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, "expired" + i, - RatisReplicationConfig.getInstance(ONE), new OmKeyLocationInfoGroup(0L, new ArrayList<>(), true)) - .setCreationTime(expiredOpenKeyCreationTime) - .build(); + final OmKeyInfo keyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, + bucketName, "expired" + i, + HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, + 0L, expiredOpenKeyCreationTime, true); final String uploadId = OMMultipartUploadUtils.getMultipartUploadId(); final OmMultipartKeyInfo multipartKeyInfo = OMRequestTestUtils. @@ -727,10 +722,10 @@ private void testGetExpiredOpenKeysExcludeMPUKeys( // HDDS-9017. Although these open keys are MPU-related, // the isMultipartKey flags are set to false for (int i = numExpiredMPUOpenKeys; i < 2 * numExpiredMPUOpenKeys; i++) { - final OmKeyInfo keyInfo = OMRequestTestUtils.createOmKeyInfo( - volumeName, bucketName, "expired" + i, RatisReplicationConfig.getInstance(ONE)) - .setCreationTime(expiredOpenKeyCreationTime) - .build(); + final OmKeyInfo keyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, + bucketName, "expired" + i, + HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, + 0L, expiredOpenKeyCreationTime, false); final String uploadId = OMMultipartUploadUtils.getMultipartUploadId(); final OmMultipartKeyInfo multipartKeyInfo = OMRequestTestUtils. @@ -793,9 +788,8 @@ private void testGetExpiredMPUs() throws Exception { String keyName = "expired" + i; // Key info to construct the MPU DB key final OmKeyInfo keyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, - bucketName, keyName, RatisReplicationConfig.getInstance(ONE)) - .setCreationTime(creationTime) - .build(); + bucketName, keyName, HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE, 0L, creationTime); for (int j = 1; j <= numPartsPerMPU; j++) { @@ -867,10 +861,11 @@ private void addKeysToOM(String volumeName, String bucketName, if (i % 2 == 0) { OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, keyName, - 1000L, RatisReplicationConfig.getInstance(ONE), omMetadataManager); + 1000L, HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE, omMetadataManager); } else { OMRequestTestUtils.addKeyToTableCache(volumeName, bucketName, keyName, - RatisReplicationConfig.getInstance(ONE), + HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, omMetadataManager); } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java index 21b94ce5f05a..bdc6509247b1 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java @@ -157,22 +157,23 @@ public static void addVolumeAndBucketToDB( @SuppressWarnings("parameterNumber") public static void addKeyToTableAndCache(String volumeName, String bucketName, - String keyName, long clientID, ReplicationConfig replicationConfig, long trxnLogIndex, + String keyName, long clientID, HddsProtos.ReplicationType replicationType, + HddsProtos.ReplicationFactor replicationFactor, long trxnLogIndex, OMMetadataManager omMetadataManager) throws Exception { addKeyToTable(false, true, volumeName, bucketName, keyName, clientID, - replicationConfig, trxnLogIndex, omMetadataManager); + replicationType, replicationFactor, trxnLogIndex, omMetadataManager); } /** * Add key entry to KeyTable. if openKeyTable flag is true, add's entries * to openKeyTable, else add's it to keyTable. - * * @param openKeyTable * @param volumeName * @param bucketName * @param keyName * @param clientID - * @param replicationConfig + * @param replicationType + * @param replicationFactor * @param omMetadataManager * @param locationList * @throws Exception @@ -180,11 +181,12 @@ public static void addKeyToTableAndCache(String volumeName, String bucketName, @SuppressWarnings("parameterNumber") public static void addKeyToTable(boolean openKeyTable, String volumeName, String bucketName, String keyName, long clientID, - ReplicationConfig replicationConfig, + HddsProtos.ReplicationType replicationType, + HddsProtos.ReplicationFactor replicationFactor, OMMetadataManager omMetadataManager, List locationList, long version) throws Exception { addKeyToTable(openKeyTable, false, volumeName, bucketName, keyName, - clientID, replicationConfig, 0L, omMetadataManager, + clientID, replicationType, replicationFactor, 0L, omMetadataManager, locationList, version); } @@ -192,23 +194,24 @@ public static void addKeyToTable(boolean openKeyTable, String volumeName, /** * Add key entry to KeyTable. if openKeyTable flag is true, add's entries * to openKeyTable, else add's it to keyTable. - * * @param openKeyTable * @param volumeName * @param bucketName * @param keyName * @param clientID - * @param replicationConfig + * @param replicationType + * @param replicationFactor * @param omMetadataManager * @throws Exception */ @SuppressWarnings("parameterNumber") public static void addKeyToTable(boolean openKeyTable, String volumeName, String bucketName, String keyName, long clientID, - ReplicationConfig replicationConfig, + HddsProtos.ReplicationType replicationType, + HddsProtos.ReplicationFactor replicationFactor, OMMetadataManager omMetadataManager) throws Exception { addKeyToTable(openKeyTable, false, volumeName, bucketName, keyName, - clientID, replicationConfig, 0L, omMetadataManager); + clientID, replicationType, replicationFactor, 0L, omMetadataManager); } /** @@ -222,17 +225,20 @@ public static void addKeyToTable(boolean openKeyTable, String volumeName, * @param bucketName * @param keyName * @param clientID - * @param replicationConfig + * @param replicationType + * @param replicationFactor * @param omMetadataManager * @throws Exception */ @SuppressWarnings("parameterNumber") public static void addKeyToTable(boolean openKeyTable, boolean isMultipartKey, String volumeName, String bucketName, String keyName, long clientID, - ReplicationConfig replicationConfig, + HddsProtos.ReplicationType replicationType, + HddsProtos.ReplicationFactor replicationFactor, OMMetadataManager omMetadataManager) throws Exception { addKeyToTable(openKeyTable, isMultipartKey, false, - volumeName, bucketName, keyName, clientID, replicationConfig, 0L, omMetadataManager); + volumeName, bucketName, keyName, clientID, replicationType, + replicationFactor, 0L, omMetadataManager); } /** @@ -242,20 +248,19 @@ public static void addKeyToTable(boolean openKeyTable, boolean isMultipartKey, */ @SuppressWarnings("parameternumber") public static void addKeyToTable(boolean openKeyTable, boolean addToCache, - String volumeName, String bucketName, String keyName, long clientID, ReplicationConfig replicationConfig, - long trxnLogIndex, + String volumeName, String bucketName, String keyName, long clientID, + HddsProtos.ReplicationType replicationType, + HddsProtos.ReplicationFactor replicationFactor, long trxnLogIndex, OMMetadataManager omMetadataManager, List locationList, long version) throws Exception { OmKeyInfo omKeyInfo = createOmKeyInfo(volumeName, bucketName, keyName, - replicationConfig, new OmKeyLocationInfoGroup(version, new ArrayList<>(), false)) - .setObjectID(trxnLogIndex) - .build(); - + replicationType, replicationFactor, trxnLogIndex, Time.now(), version, + false); omKeyInfo.appendNewBlocks(locationList, false); addKeyToTable(openKeyTable, addToCache, omKeyInfo, clientID, trxnLogIndex, - omMetadataManager); + omMetadataManager); } /** @@ -266,11 +271,12 @@ replicationConfig, new OmKeyLocationInfoGroup(version, new ArrayList<>(), false) @SuppressWarnings("parameternumber") public static void addKeyToTable(boolean openKeyTable, boolean addToCache, String volumeName, String bucketName, String keyName, long clientID, - ReplicationConfig replicationConfig, long trxnLogIndex, + HddsProtos.ReplicationType replicationType, + HddsProtos.ReplicationFactor replicationFactor, long trxnLogIndex, OMMetadataManager omMetadataManager) throws Exception { - OmKeyInfo omKeyInfo = createOmKeyInfo(volumeName, bucketName, keyName, replicationConfig) - .setObjectID(trxnLogIndex).build(); + OmKeyInfo omKeyInfo = createOmKeyInfo(volumeName, bucketName, keyName, + replicationType, replicationFactor, trxnLogIndex); addKeyToTable(openKeyTable, addToCache, omKeyInfo, clientID, trxnLogIndex, omMetadataManager); @@ -284,13 +290,13 @@ public static void addKeyToTable(boolean openKeyTable, boolean addToCache, @SuppressWarnings("parameternumber") public static void addKeyToTable(boolean openKeyTable, boolean isMultipartKey, boolean addToCache, String volumeName, String bucketName, String keyName, - long clientID, ReplicationConfig replicationConfig, long trxnLogIndex, + long clientID, HddsProtos.ReplicationType replicationType, + HddsProtos.ReplicationFactor replicationFactor, long trxnLogIndex, OMMetadataManager omMetadataManager) throws Exception { OmKeyInfo omKeyInfo = createOmKeyInfo(volumeName, bucketName, keyName, - replicationConfig, new OmKeyLocationInfoGroup(0, new ArrayList<>(), isMultipartKey)) - .setObjectID(trxnLogIndex) - .build(); + replicationType, replicationFactor, trxnLogIndex, Time.now(), 0L, + isMultipartKey); addKeyToTable(openKeyTable, addToCache, omKeyInfo, clientID, trxnLogIndex, omMetadataManager); @@ -425,22 +431,23 @@ public static void addPart(PartKeyInfo partKeyInfo, /** * Add key entry to key table cache. - * * @param volumeName * @param bucketName * @param keyName - * @param replicationConfig + * @param replicationType + * @param replicationFactor * @param omMetadataManager */ @SuppressWarnings("parameterNumber") public static void addKeyToTableCache(String volumeName, String bucketName, String keyName, - ReplicationConfig replicationConfig, + HddsProtos.ReplicationType replicationType, + HddsProtos.ReplicationFactor replicationFactor, OMMetadataManager omMetadataManager) { OmKeyInfo omKeyInfo = createOmKeyInfo(volumeName, bucketName, keyName, - replicationConfig).build(); + replicationType, replicationFactor); omMetadataManager.getKeyTable(getDefaultBucketLayout()).addCacheEntry( new CacheKey<>(omMetadataManager.getOzoneKey(volumeName, bucketName, @@ -536,42 +543,87 @@ public static void addSnapshotToTable( /** * Create OmKeyInfo. - * Initializes most values to a sensible default. */ - public static OmKeyInfo.Builder createOmKeyInfo(String volumeName, String bucketName, - String keyName, ReplicationConfig replicationConfig, OmKeyLocationInfoGroup omKeyLocationInfoGroup) { - return new OmKeyInfo.Builder() - .setVolumeName(volumeName) - .setBucketName(bucketName) - .setKeyName(keyName) - .setFileName(OzoneFSUtils.getFileName(keyName)) - .setReplicationConfig(replicationConfig) - .setObjectID(0L) - .setUpdateID(0L) - .setCreationTime(Time.now()) - .addOmKeyLocationInfoGroup(omKeyLocationInfoGroup) - .setDataSize(1000L); - } - - public static OmKeyInfo.Builder createOmKeyInfo(String volumeName, String bucketName, - String keyName, ReplicationConfig replicationConfig) { - return createOmKeyInfo(volumeName, bucketName, keyName, replicationConfig, - new OmKeyLocationInfoGroup(0L, new ArrayList<>(), false)); + public static OmKeyInfo createOmKeyInfo(String volumeName, String bucketName, + String keyName, HddsProtos.ReplicationType replicationType, + HddsProtos.ReplicationFactor replicationFactor) { + return createOmKeyInfo(volumeName, bucketName, keyName, replicationType, + replicationFactor, 0L); } /** * Create OmDirectoryInfo. */ public static OmDirectoryInfo createOmDirectoryInfo(String keyName, - long objectID, - long parentObjID) { + long objectID, + long parentObjID) { return new OmDirectoryInfo.Builder() - .setName(keyName) - .setCreationTime(Time.now()) + .setName(keyName) + .setCreationTime(Time.now()) + .setModificationTime(Time.now()) + .setObjectID(objectID) + .setParentObjectID(parentObjID) + .setUpdateID(50) + .build(); + } + + /** + * Create OmKeyInfo. + */ + public static OmKeyInfo createOmKeyInfo(String volumeName, String bucketName, + String keyName, HddsProtos.ReplicationType replicationType, + HddsProtos.ReplicationFactor replicationFactor, long objectID) { + return createOmKeyInfo(volumeName, bucketName, keyName, replicationType, + replicationFactor, objectID, Time.now()); + } + + /** + * Create OmKeyInfo. + */ + public static OmKeyInfo createOmKeyInfo(String volumeName, String bucketName, + String keyName, HddsProtos.ReplicationType replicationType, + HddsProtos.ReplicationFactor replicationFactor, long objectID, + long creationTime) { + return createOmKeyInfo(volumeName, bucketName, keyName, replicationType, + replicationFactor, objectID, creationTime, 0L, false); + } + + /** + * Create OmKeyInfo. + */ + @SuppressWarnings("parameterNumber") + public static OmKeyInfo createOmKeyInfo(String volumeName, String bucketName, + String keyName, HddsProtos.ReplicationType replicationType, + HddsProtos.ReplicationFactor replicationFactor, long objectID, + long creationTime, boolean isMultipartKey) { + return createOmKeyInfo(volumeName, bucketName, keyName, replicationType, + replicationFactor, objectID, creationTime, 0L, isMultipartKey); + } + + /** + * Create OmKeyInfo for LEGACY/OBS bucket. + */ + @SuppressWarnings("parameterNumber") + private static OmKeyInfo createOmKeyInfo(String volumeName, String bucketName, + String keyName, HddsProtos.ReplicationType replicationType, + HddsProtos.ReplicationFactor replicationFactor, long objectID, + long creationTime, long version, boolean isMultipartKey) { + return new OmKeyInfo.Builder() + .setVolumeName(volumeName) + .setBucketName(bucketName) + .setKeyName(keyName) + .setFileName(OzoneFSUtils.getFileName(keyName)) + .setOmKeyLocationInfos(Collections.singletonList( + new OmKeyLocationInfoGroup(version, new ArrayList<>(), + isMultipartKey))) + .setCreationTime(creationTime) .setModificationTime(Time.now()) + .setDataSize(1000L) + .setReplicationConfig( + ReplicationConfig + .fromProtoTypeAndFactor(replicationType, replicationFactor)) .setObjectID(objectID) - .setParentObjectID(parentObjID) - .setUpdateID(50) + .setUpdateID(objectID) .build(); } @@ -579,8 +631,8 @@ public static OmDirectoryInfo createOmDirectoryInfo(String keyName, * Create OmMultipartKeyInfo for OBS/LEGACY bucket. */ public static OmMultipartKeyInfo createOmMultipartKeyInfo(String uploadId, - long creationTime, HddsProtos.ReplicationType replicationType, - HddsProtos.ReplicationFactor replicationFactor, long objectID) { + long creationTime, HddsProtos.ReplicationType replicationType, + HddsProtos.ReplicationFactor replicationFactor, long objectID) { return new OmMultipartKeyInfo.Builder() .setUploadID(uploadId) .setCreationTime(creationTime) @@ -1356,6 +1408,76 @@ public static void addVolumeToOM(OMMetadataManager omMetadataManager, CacheValue.get(1L, omVolumeArgs)); } + /** + * Create OmKeyInfo. + */ + @SuppressWarnings("parameterNumber") + public static OmKeyInfo createOmKeyInfo(String volumeName, String bucketName, + String keyName, HddsProtos.ReplicationType replicationType, + HddsProtos.ReplicationFactor replicationFactor, long objectID, + long parentID, long trxnLogIndex, long creationTime) { + return createOmKeyInfo(volumeName, bucketName, keyName, + replicationType, replicationFactor, objectID, + parentID, trxnLogIndex, creationTime, 0L, false); + } + + /** + * Create OmKeyInfo with isMultipartKey flag. + */ + @SuppressWarnings("parameterNumber") + public static OmKeyInfo createOmKeyInfo(String volumeName, String bucketName, + String keyName, HddsProtos.ReplicationType replicationType, + HddsProtos.ReplicationFactor replicationFactor, long objectID, + long parentID, long trxnLogIndex, long creationTime, + boolean isMultipartKey) { + return createOmKeyInfo(volumeName, bucketName, keyName, + replicationType, replicationFactor, objectID, + parentID, trxnLogIndex, creationTime, 0L, isMultipartKey); + } + + /** + * Create OmKeyInfo. + */ + @SuppressWarnings("parameterNumber") + public static OmKeyInfo createOmKeyInfo(String volumeName, String bucketName, + String keyName, HddsProtos.ReplicationType replicationType, + HddsProtos.ReplicationFactor replicationFactor, long objectID, + long parentID, long trxnLogIndex, long creationTime, long version) { + return createOmKeyInfo(volumeName, bucketName, keyName, replicationType, + replicationFactor, objectID, parentID, trxnLogIndex, creationTime, + version, false); + } + + /** + * Create OmKeyInfo for FSO bucket. + */ + @SuppressWarnings("parameterNumber") + private static OmKeyInfo createOmKeyInfo(String volumeName, String bucketName, + String keyName, HddsProtos.ReplicationType replicationType, + HddsProtos.ReplicationFactor replicationFactor, long objectID, + long parentID, long trxnLogIndex, long creationTime, long version, + boolean isMultipartKey) { + String fileName = OzoneFSUtils.getFileName(keyName); + return new OmKeyInfo.Builder() + .setVolumeName(volumeName) + .setBucketName(bucketName) + .setKeyName(keyName) + .setOmKeyLocationInfos(Collections.singletonList( + new OmKeyLocationInfoGroup(version, new ArrayList<>(), + isMultipartKey))) + .setCreationTime(creationTime) + .setModificationTime(Time.now()) + .setDataSize(1000L) + .setReplicationConfig(ReplicationConfig + .fromProtoTypeAndFactor(replicationType, replicationFactor)) + .setObjectID(objectID) + .setUpdateID(trxnLogIndex) + .setParentObjectID(parentID) + .setFileName(fileName) + .build(); + } + + /** * Add key entry to KeyTable. if openKeyTable flag is true, add's entries * to openKeyTable, else add's it to keyTable. diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketDeleteRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketDeleteRequest.java index fdc13e369c08..34f348a688dc 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketDeleteRequest.java @@ -19,21 +19,16 @@ package org.apache.hadoop.ozone.om.request.bucket; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; - -import java.util.ArrayList; import java.util.UUID; -import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.junit.jupiter.api.Test; @@ -124,10 +119,12 @@ public void testBucketContainsIncompleteMPUs() throws Exception { new OMBucketDeleteRequest(omRequest); // Create a MPU key in the MPU table to simulate incomplete MPU + long creationTime = Time.now(); String uploadId = OMMultipartUploadUtils.getMultipartUploadId(); - final OmKeyInfo keyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, UUID.randomUUID().toString(), - RatisReplicationConfig.getInstance(ONE), new OmKeyLocationInfoGroup(0L, new ArrayList<>(), true)) - .build(); + final OmKeyInfo keyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, + bucketName, UUID.randomUUID().toString(), + HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, + 0L, creationTime, true); final OmMultipartKeyInfo multipartKeyInfo = OMRequestTestUtils. createOmMultipartKeyInfo(uploadId, Time.now(), HddsProtos.ReplicationType.RATIS, diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequest.java index 7af60c18d94a..275e8a6f2aae 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequest.java @@ -27,7 +27,7 @@ import java.util.stream.Collectors; import org.apache.commons.lang3.RandomStringUtils; -import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.om.ResolvedBucket; import org.apache.hadoop.ozone.om.exceptions.OMException; @@ -60,7 +60,6 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotEquals; @@ -298,7 +297,8 @@ public void testValidateAndUpdateCacheWithSubDirectoryInPath() omMetadataManager); OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, - keyName.substring(0, 12), 1L, RatisReplicationConfig.getInstance(ONE), omMetadataManager); + keyName.substring(0, 12), 1L, HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE, omMetadataManager); OMRequest omRequest = createDirectoryRequest(volumeName, bucketName, keyName); OMDirectoryCreateRequest omDirectoryCreateRequest = @@ -340,7 +340,7 @@ public void testValidateAndUpdateCacheWithDirectoryAlreadyExists() OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, OzoneFSUtils.addTrailingSlashIfNeeded(keyName), 1L, - RatisReplicationConfig.getInstance(ONE), + HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, omMetadataManager); OMRequest omRequest = createDirectoryRequest(volumeName, bucketName, keyName); @@ -383,7 +383,8 @@ public void testValidateAndUpdateCacheWithFilesInPath() throws Exception { omMetadataManager); // Add a key with first two levels. OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, - keyName.substring(0, 11), 1L, RatisReplicationConfig.getInstance(ONE), omMetadataManager); + keyName.substring(0, 11), 1L, HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE, omMetadataManager); OMRequest omRequest = createDirectoryRequest(volumeName, bucketName, keyName); OMDirectoryCreateRequest omDirectoryCreateRequest = diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequestWithFSO.java index e0460ba81a99..0eceb2246ee2 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequestWithFSO.java @@ -19,8 +19,8 @@ package org.apache.hadoop.ozone.om.request.file; import org.apache.commons.lang3.RandomStringUtils; -import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.OzoneAcl; @@ -59,7 +59,6 @@ import java.util.UUID; import java.util.stream.Collectors; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.VOLUME_NOT_FOUND; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotEquals; @@ -423,7 +422,8 @@ public void testValidateAndUpdateCacheWithFilesInPath() throws Exception { // Add a file into the FileTable, this is to simulate "file exists" check. OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, - bucketName, keyName, RatisReplicationConfig.getInstance(THREE)).setObjectID(objID++).build(); + bucketName, keyName, HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.THREE, objID++); final long volumeId = omMetadataManager.getVolumeId(volumeName); final long bucketId = omBucketInfo.getObjectID(); @@ -492,22 +492,21 @@ public void testValidateAndUpdateCacheWithFileExistsInGivenPath() // for index=0, parentID is bucketID OmDirectoryInfo omDirInfo = OMRequestTestUtils.createOmDirectoryInfo( - dirs.get(0), objID++, parentID); + dirs.get(0), objID++, parentID); OMRequestTestUtils.addDirKeyToDirTable(true, omDirInfo, - volumeName, bucketName, txnID, omMetadataManager); + volumeName, bucketName, txnID, omMetadataManager); parentID = omDirInfo.getObjectID(); // Add a key in second level. - OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, - RatisReplicationConfig.getInstance(THREE)) - .setObjectID(objID) - .build(); + OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, + bucketName, keyName, HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.THREE, objID); final long volumeId = omMetadataManager.getVolumeId(volumeName); final long bucketId = omBucketInfo.getObjectID(); final String ozoneKey = omMetadataManager.getOzonePathKey( - volumeId, bucketId, parentID, dirs.get(1)); + volumeId, bucketId, parentID, dirs.get(1)); ++txnID; omMetadataManager.getKeyTable(getBucketLayout()) .addCacheEntry(new CacheKey<>(ozoneKey), diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java index 74b067a76a45..b39068fd7341 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java @@ -24,7 +24,6 @@ import java.util.UUID; import java.util.stream.Collectors; -import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; @@ -191,7 +190,7 @@ public void testValidateAndUpdateCacheWithNamespaceQuotaExceeded() .setBucketName(bucketName) .setBucketLayout(getBucketLayout()) .setQuotaInNamespace(1)); - + OMFileCreateRequest omFileCreateRequest = getOMFileCreateRequest(omRequest); OMRequest modifiedOmRequest = omFileCreateRequest.preExecute(ozoneManager); @@ -244,17 +243,19 @@ public void testValidateAndUpdateCacheWithNonRecursive() throws Exception { testNonRecursivePath(UUID.randomUUID().toString(), false, false, false); testNonRecursivePath("a/b", false, false, true); - ReplicationConfig replicationConfig = ReplicationConfig.fromProtoTypeAndFactor(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE); // Create some child keys for the path OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, - "a/b/c/d", 0L, replicationConfig, omMetadataManager); + "a/b/c/d", 0L, HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE, omMetadataManager); OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, - "a/b/c/", 0L, replicationConfig, omMetadataManager); + "a/b/c/", 0L, HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE, omMetadataManager); OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, - "a/b/", 0L, replicationConfig, omMetadataManager); + "a/b/", 0L, HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE, omMetadataManager); OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, - "a/", 0L, replicationConfig, omMetadataManager); + "a/", 0L, HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE, omMetadataManager); // cannot create file if directory of same name exists testNonRecursivePath("a/b/c", false, false, true); @@ -274,14 +275,14 @@ public void testValidateAndUpdateCacheWithRecursive() throws Exception { // Should be able to create file even if parent directories does not // exist and key already exist, as this is with overwrite enabled. testNonRecursivePath(UUID.randomUUID().toString(), false, false, false); - ReplicationConfig replicationConfig = ReplicationConfig.fromProtoTypeAndFactor(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE); OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, - "c/d/e/f", 0L, replicationConfig, omMetadataManager); + "c/d/e/f", 0L, HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE, omMetadataManager); testNonRecursivePath("c/d/e/f", true, true, false); // Create some child keys for the path OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, - "a/b/c/d", 0L, replicationConfig, omMetadataManager); + "a/b/c/d", 0L, HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE, omMetadataManager); testNonRecursivePath("a/b/c", false, true, false); } @@ -292,17 +293,16 @@ public void testValidateAndUpdateCacheWithRecursiveAndOverWrite() String key = "c/d/e/f"; // Should be able to create file even if parent directories does not exist testNonRecursivePath(key, false, true, false); - + // 3 parent directory created c/d/e assertEquals(omMetadataManager.getBucketTable().get( omMetadataManager.getBucketKey(volumeName, bucketName)) .getUsedNamespace(), 3); - + // Add the key to key table - ReplicationConfig replicationConfig = ReplicationConfig.fromProtoTypeAndFactor(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE); OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, - key, 0L, replicationConfig, omMetadataManager); + key, 0L, HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE, omMetadataManager); // Even if key exists, should be able to create file as overwrite is set // to true @@ -315,21 +315,23 @@ public void testValidateAndUpdateCacheWithNonRecursiveAndOverWrite() throws Exception { String key = "c/d/e/f"; - ReplicationConfig replicationConfig = ReplicationConfig.fromProtoTypeAndFactor(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE); // Need to add the path which starts with "c/d/e" to keyTable as this is // non-recursive parent should exist. OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, - "c/", 0L, replicationConfig, omMetadataManager); + "c/", 0L, HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE, omMetadataManager); OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, - "c/d/", 0L, replicationConfig, omMetadataManager); + "c/d/", 0L, HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE, omMetadataManager); OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, - "c/d/e/", 0L, replicationConfig, omMetadataManager); + "c/d/e/", 0L, HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE, omMetadataManager); testNonRecursivePath(key, false, false, false); // Add the key to key table OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, - key, 0L, replicationConfig, omMetadataManager); + key, 0L, HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE, omMetadataManager); // Even if key exists, should be able to create file as overwrite is set // to true diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequestWithFSO.java index e988949c5b85..1b7b7452c82c 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequestWithFSO.java @@ -18,7 +18,6 @@ package org.apache.hadoop.ozone.om.request.file; -import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; @@ -29,11 +28,11 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.Time; import org.junit.jupiter.api.Test; import java.util.UUID; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.apache.hadoop.ozone.OzoneConsts.OM_SNAPSHOT_INDICATOR; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; @@ -56,7 +55,8 @@ public void testValidateAndUpdateCacheWithNonRecursive() throws Exception { "a/b/c", omMetadataManager); String fileNameD = "d"; OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, - "a/b/c/" + fileNameD, 0L, RatisReplicationConfig.getInstance(ONE), omMetadataManager); + "a/b/c/" + fileNameD, 0L, HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE, omMetadataManager); // cannot create file if directory of same name exists testNonRecursivePath("a/b/c", false, false, true); @@ -80,7 +80,7 @@ public void testValidateAndUpdateCacheWithNonRecursive() throws Exception { public void testValidateAndUpdateCacheWithNamespaceQuotaExceeded() throws Exception { OMRequest omRequest = createFileRequest(volumeName, bucketName, - "/test/a1/a2", ONE, + "/test/a1/a2", HddsProtos.ReplicationFactor.ONE, HddsProtos.ReplicationType.RATIS, false, true); // create bucket with quota limit 1 @@ -114,11 +114,11 @@ public void testValidateAndUpdateCacheWithRecursiveAndOverWrite() // Add the key to key table OmDirectoryInfo omDirInfo = getDirInfo("c/d/e"); OmKeyInfo omKeyInfo = - OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, key, RatisReplicationConfig.getInstance(ONE)) - .setObjectID(omDirInfo.getObjectID() + 10) - .setParentObjectID(omDirInfo.getObjectID()) - .setUpdateID(100) - .build(); + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, key, + HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE, + omDirInfo.getObjectID() + 10, + omDirInfo.getObjectID(), 100, Time.now()); OMRequestTestUtils.addFileToKeyTable(false, false, "f", omKeyInfo, -1, omDirInfo.getObjectID() + 10, omMetadataManager); @@ -136,22 +136,23 @@ public void testValidateAndUpdateCacheWithNonRecursiveAndOverWrite() String fileName = "f"; String key = parentDir + "/" + fileName; OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, - omMetadataManager, getBucketLayout()); + omMetadataManager, getBucketLayout()); // Create parent dirs for the path long parentId = OMRequestTestUtils.addParentsToDirTable(volumeName, - bucketName, parentDir, omMetadataManager); + bucketName, parentDir, omMetadataManager); // Need to add the path which starts with "c/d/e" to OpenKeyTable as this is // non-recursive parent should exist. testNonRecursivePath(key, false, false, false); OmKeyInfo omKeyInfo = - OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, key, RatisReplicationConfig.getInstance(ONE)) - .setObjectID(parentId + 1L) - .setParentObjectID(parentId) - .setUpdateID(100L) - .build(); - OMRequestTestUtils.addFileToKeyTable(false, false, fileName, omKeyInfo, -1, 50, omMetadataManager); + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, key, + HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE, + parentId + 1, + parentId, 100, Time.now()); + OMRequestTestUtils.addFileToKeyTable(false, false, + fileName, omKeyInfo, -1, 50, omMetadataManager); // Even if key exists in KeyTable, should be able to create file as // overwrite is set to true diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMRecoverLeaseRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMRecoverLeaseRequest.java index 294281555a56..3a1ab92c1b5a 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMRecoverLeaseRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMRecoverLeaseRequest.java @@ -23,7 +23,6 @@ import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.request.key.TestOMKeyRequest; @@ -36,6 +35,7 @@ .RecoverLeaseRequest; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; import org.apache.hadoop.ozone.security.acl.OzoneObj; +import org.apache.hadoop.util.Time; import jakarta.annotation.Nonnull; import org.junit.jupiter.api.Test; @@ -272,9 +272,8 @@ private OMRequest doPreExecute(OMRequest originalOMRequest) throws Exception { String addToOpenFileTable(List locationList) throws Exception { OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, - bucketName, keyName, replicationConfig, new OmKeyLocationInfoGroup(version, new ArrayList<>(), false)) - .setParentObjectID(parentId) - .build(); + bucketName, keyName, replicationType, replicationFactor, 0, parentId, + 0, Time.now(), version); omKeyInfo.appendNewBlocks(locationList, false); omKeyInfo.getMetadata().put(OzoneConsts.HSYNC_CLIENT_ID, String.valueOf(clientID)); @@ -295,9 +294,8 @@ bucketName, keyName, replicationConfig, new OmKeyLocationInfoGroup(version, new String addToFileTable(List locationList) throws Exception { OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, - bucketName, keyName, replicationConfig, new OmKeyLocationInfoGroup(version, new ArrayList<>(), false)) - .setParentObjectID(parentId) - .build(); + bucketName, keyName, replicationType, replicationFactor, 0, parentId, + 0, Time.now(), version); omKeyInfo.appendNewBlocks(locationList, false); OMRequestTestUtils.addFileToKeyTable( diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequest.java index 9fb0e79953e1..eb99cd932568 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequest.java @@ -26,7 +26,6 @@ import java.util.List; import java.util.UUID; -import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; @@ -237,8 +236,7 @@ protected OMRequest createAllocateBlockRequest() { KeyArgs keyArgs = KeyArgs.newBuilder() .setVolumeName(volumeName).setBucketName(bucketName) .setKeyName(keyName) - .setFactor(((RatisReplicationConfig) replicationConfig).getReplicationFactor()) - .setType(replicationConfig.getReplicationType()) + .setFactor(replicationFactor).setType(replicationType) .build(); AllocateBlockRequest allocateBlockRequest = @@ -255,8 +253,8 @@ protected OMRequest createAllocateBlockRequest() { protected String addKeyToOpenKeyTable(String volumeName, String bucketName) throws Exception { OMRequestTestUtils.addKeyToTable(true, volumeName, bucketName, - keyName, clientID, replicationConfig, - omMetadataManager); + keyName, clientID, replicationType, replicationFactor, + omMetadataManager); return ""; } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequestWithFSO.java index 1ecbfed71624..33512d355c0d 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequestWithFSO.java @@ -20,12 +20,10 @@ package org.apache.hadoop.ozone.om.request.key; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.fail; - -import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; @@ -33,6 +31,7 @@ import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.Time; import jakarta.annotation.Nonnull; /** @@ -66,11 +65,10 @@ protected String addKeyToOpenKeyTable(String volumeName, String bucketName) long objectId = parentID + 1; OmKeyInfo omKeyInfoFSO = - OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, RatisReplicationConfig.getInstance(ONE)) - .setObjectID(objectId) - .setParentObjectID(parentID) - .setUpdateID(txnId) - .build(); + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, + HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE, objectId, parentID, txnId, + Time.now()); // add key to openFileTable OMRequestTestUtils.addFileToKeyTable(true, false, diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMDirectoriesPurgeRequestAndResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMDirectoriesPurgeRequestAndResponse.java index cbb782e184fe..f040bd508177 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMDirectoriesPurgeRequestAndResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMDirectoriesPurgeRequestAndResponse.java @@ -68,7 +68,7 @@ private List createAndDeleteKeys(Integer trxnIndex, String bucket) for (int i = 1; i <= numKeys; i++) { String key = keyName + "-" + i; OMRequestTestUtils.addKeyToTable(false, false, volumeName, bucket, - key, clientID, replicationConfig, trxnIndex++, + key, clientID, replicationType, replicationFactor, trxnIndex++, omMetadataManager); String ozoneKey = omMetadataManager.getOzoneKey( volumeName, bucket, key); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyAclRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyAclRequest.java index b9aa70b4c7e8..c9559ff41e1f 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyAclRequest.java @@ -20,12 +20,8 @@ import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; - import java.util.List; import java.util.UUID; - -import org.apache.hadoop.hdds.client.RatisReplicationConfig; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; @@ -251,7 +247,7 @@ private OMRequest createSetAclKeyRequest(OzoneAcl acl) { protected String addKeyToTable() throws Exception { OMRequestTestUtils.addKeyToTable(false, false, volumeName, bucketName, - keyName, clientID, RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE), 1L, + keyName, clientID, replicationType, replicationFactor, 1L, omMetadataManager); return omMetadataManager.getOzoneKey(volumeName, bucketName, diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyAclRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyAclRequestWithFSO.java index ea9c3223de5a..48d92e608b3e 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyAclRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyAclRequestWithFSO.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.ozone.om.request.key; -import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; @@ -26,8 +26,7 @@ import org.apache.hadoop.ozone.om.request.key.acl.OMKeyRemoveAclRequestWithFSO; import org.apache.hadoop.ozone.om.request.key.acl.OMKeySetAclRequestWithFSO; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; - -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; +import org.apache.hadoop.util.Time; /** * Test Key ACL requests for prefix layout. @@ -45,22 +44,20 @@ protected String addKeyToTable() throws Exception { .addParentsToDirTable(volumeName, bucketName, parentDir, omMetadataManager); - OmKeyInfo omKeyInfo = - OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, key, RatisReplicationConfig.getInstance(ONE)) - .setObjectID(parentId + 1L) - .setParentObjectID(parentId) - .setUpdateID(100L) - .build(); + OmKeyInfo omKeyInfo = OMRequestTestUtils + .createOmKeyInfo(volumeName, bucketName, key, + HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, + parentId + 1, parentId, 100, Time.now()); OMRequestTestUtils .addFileToKeyTable(false, false, fileName, omKeyInfo, -1, 50, omMetadataManager); final long volumeId = omMetadataManager.getVolumeId( - omKeyInfo.getVolumeName()); + omKeyInfo.getVolumeName()); final long bucketId = omMetadataManager.getBucketId( - omKeyInfo.getVolumeName(), omKeyInfo.getBucketName()); + omKeyInfo.getVolumeName(), omKeyInfo.getBucketName()); return omMetadataManager.getOzonePathKey( - volumeId, bucketId, omKeyInfo.getParentObjectID(), - fileName); + volumeId, bucketId, omKeyInfo.getParentObjectID(), + fileName); } @Override diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java index 9719865db196..3251fff97490 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java @@ -26,11 +26,7 @@ import java.util.UUID; import java.util.stream.Collectors; -import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.utils.db.BatchOperation; -import org.apache.hadoop.hdds.utils.db.Table; -import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.exceptions.OMException; @@ -60,13 +56,10 @@ import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.Mockito.when; /** * Class tests OMKeyCommitRequest class. @@ -562,17 +555,16 @@ public void testValidateAndUpdateCacheWithKeyNotFound() throws Exception { @Test public void testValidateAndUpdateCacheOnOverwrite() throws Exception { - when(ozoneManager.getObjectIdFromTxId(anyLong())).thenAnswer(tx -> - OmUtils.getObjectIdFromTxId(2, tx.getArgument(0))); testValidateAndUpdateCache(); // Become a new client and set next version number clientID = Time.now(); version += 1; - OMRequest modifiedOmRequest = doPreExecute(createCommitKeyRequest(getKeyLocation(10).subList(4, 10), false)); + OMRequest modifiedOmRequest = doPreExecute(createCommitKeyRequest()); - OMKeyCommitRequest omKeyCommitRequest = getOmKeyCommitRequest(modifiedOmRequest); + OMKeyCommitRequest omKeyCommitRequest = + getOmKeyCommitRequest(modifiedOmRequest); KeyArgs keyArgs = modifiedOmRequest.getCommitKeyRequest().getKeyArgs(); @@ -584,54 +576,49 @@ public void testValidateAndUpdateCacheOnOverwrite() throws Exception { assertNotNull(omKeyInfo); // Previously committed version - assertEquals(0L, omKeyInfo.getLatestVersionLocations().getVersion()); + assertEquals(0L, + omKeyInfo.getLatestVersionLocations().getVersion()); // Append new blocks List allocatedLocationList = - keyArgs.getKeyLocationsList().stream() - .map(OmKeyLocationInfo::getFromProtobuf) - .collect(Collectors.toList()); + keyArgs.getKeyLocationsList().stream() + .map(OmKeyLocationInfo::getFromProtobuf) + .collect(Collectors.toList()); addKeyToOpenKeyTable(allocatedLocationList); OMClientResponse omClientResponse = omKeyCommitRequest.validateAndUpdateCache(ozoneManager, 102L); - assertEquals(OzoneManagerProtocolProtos.Status.OK, omClientResponse.getOMResponse().getStatus()); + assertEquals(OzoneManagerProtocolProtos.Status.OK, + omClientResponse.getOMResponse().getStatus()); // New entry should be created in key Table. - omKeyInfo = omMetadataManager.getKeyTable(omKeyCommitRequest.getBucketLayout()).get(ozoneKey); + omKeyInfo = + omMetadataManager.getKeyTable(omKeyCommitRequest.getBucketLayout()) + .get(ozoneKey); assertNotNull(omKeyInfo); - assertEquals(version, omKeyInfo.getLatestVersionLocations().getVersion()); + assertEquals(version, + omKeyInfo.getLatestVersionLocations().getVersion()); // DB keyInfo format verifyKeyName(omKeyInfo); // Check modification time CommitKeyRequest commitKeyRequest = modifiedOmRequest.getCommitKeyRequest(); - assertEquals(commitKeyRequest.getKeyArgs().getModificationTime(), omKeyInfo.getModificationTime()); + assertEquals(commitKeyRequest.getKeyArgs().getModificationTime(), + omKeyInfo.getModificationTime()); // Check block location. List locationInfoListFromCommitKeyRequest = - commitKeyRequest.getKeyArgs().getKeyLocationsList().stream().map(OmKeyLocationInfo::getFromProtobuf) - .collect(Collectors.toList()); + commitKeyRequest.getKeyArgs() + .getKeyLocationsList().stream().map(OmKeyLocationInfo::getFromProtobuf) + .collect(Collectors.toList()); - assertEquals(locationInfoListFromCommitKeyRequest, omKeyInfo.getLatestVersionLocations().getLocationList()); - assertEquals(allocatedLocationList, omKeyInfo.getLatestVersionLocations().getLocationList()); + assertEquals(locationInfoListFromCommitKeyRequest, + omKeyInfo.getLatestVersionLocations().getLocationList()); + assertEquals(allocatedLocationList, + omKeyInfo.getLatestVersionLocations().getLocationList()); assertEquals(1, omKeyInfo.getKeyLocationVersions().size()); - - // flush response content to db - BatchOperation batchOperation = omMetadataManager.getStore().initBatchOperation(); - ((OMKeyCommitResponse) omClientResponse).addToDBBatch(omMetadataManager, batchOperation); - omMetadataManager.getStore().commitBatchOperation(batchOperation); - - // verify deleted key is unique generated - String deletedKey = omMetadataManager.getOzoneKey(volumeName, omKeyInfo.getBucketName(), keyName); - List> rangeKVs - = omMetadataManager.getDeletedTable().getRangeKVs(null, 100, deletedKey); - assertThat(rangeKVs.size()).isGreaterThan(0); - assertEquals(1, rangeKVs.get(0).getValue().getOmKeyInfoList().size()); - assertFalse(rangeKVs.get(0).getKey().endsWith(rangeKVs.get(0).getValue().getOmKeyInfoList().get(0).getObjectID() - + "")); } /** @@ -699,8 +686,7 @@ private OMRequest createCommitKeyRequest( KeyArgs keyArgs = KeyArgs.newBuilder().setDataSize(dataSize).setVolumeName(volumeName) .setKeyName(keyName).setBucketName(bucketName) - .setType(replicationConfig.getReplicationType()) - .setFactor(((RatisReplicationConfig) replicationConfig).getReplicationFactor()) + .setType(replicationType).setFactor(replicationFactor) .addAllKeyLocations(keyLocations).build(); CommitKeyRequest commitKeyRequest = @@ -745,7 +731,7 @@ protected String getOzonePathKey() throws IOException { protected String addKeyToOpenKeyTable(List locationList) throws Exception { OMRequestTestUtils.addKeyToTable(true, volumeName, bucketName, keyName, - clientID, replicationConfig, omMetadataManager, + clientID, replicationType, replicationFactor, omMetadataManager, locationList, version); return omMetadataManager.getOpenKey(volumeName, bucketName, diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequestWithFSO.java index 48cc52773a33..d258c1cfde43 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequestWithFSO.java @@ -19,22 +19,19 @@ package org.apache.hadoop.ozone.om.request.key; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.junit.jupiter.api.Assertions.assertEquals; - -import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.util.Time; import jakarta.annotation.Nonnull; import java.io.IOException; -import java.util.ArrayList; import java.util.List; /** @@ -81,12 +78,10 @@ protected String addKeyToOpenKeyTable(List locationList) long objectId = 100; OmKeyInfo omKeyInfoFSO = - OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, - RatisReplicationConfig.getInstance(ONE), new OmKeyLocationInfoGroup(version, new ArrayList<>(), false)) - .setObjectID(objectId) - .setParentObjectID(parentID) - .setUpdateID(100L) - .build(); + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, + HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE, objectId, parentID, 100, + Time.now(), version); omKeyInfoFSO.appendNewBlocks(locationList, false); String fileName = OzoneFSUtils.getFileName(keyName); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java index 5d79e7771520..12d9d02a72d6 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java @@ -31,7 +31,6 @@ import java.util.HashMap; import org.apache.hadoop.hdds.client.ECReplicationConfig; -import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.OzoneAcl; @@ -547,8 +546,7 @@ private OMRequest createKeyRequest(boolean isMultipartKey, int partNumber, KeyArgs.Builder keyArgs = KeyArgs.newBuilder() .setVolumeName(volumeName).setBucketName(bucketName) .setKeyName(keyName).setIsMultipartKey(isMultipartKey) - .setFactor(((RatisReplicationConfig) replicationConfig).getReplicationFactor()) - .setType(replicationConfig.getReplicationType()) + .setFactor(replicationFactor).setType(replicationType) .setLatestVersionLocation(true); if (isMultipartKey) { @@ -795,7 +793,7 @@ private void verifyKeyInheritAcls(List keyAcls, protected void addToKeyTable(String keyName) throws Exception { OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, - keyName.substring(1), 0L, RatisReplicationConfig.getInstance(THREE), omMetadataManager); + keyName.substring(1), 0L, RATIS, THREE, omMetadataManager); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequestWithFSO.java index 2a25a9b09686..0750c9512618 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequestWithFSO.java @@ -18,8 +18,8 @@ package org.apache.hadoop.ozone.om.request.key; -import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; @@ -31,6 +31,7 @@ import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.util.Time; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.MethodSource; @@ -41,7 +42,6 @@ import java.util.Arrays; import java.util.Collection; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.apache.hadoop.ozone.OzoneConsts.OM_SNAPSHOT_INDICATOR; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; @@ -107,13 +107,12 @@ protected void addToKeyTable(String keyName) throws Exception { Path keyPath = Paths.get(keyName); long parentId = checkIntermediatePaths(keyPath); String fileName = OzoneFSUtils.getFileName(keyName); - OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, fileName, - RatisReplicationConfig.getInstance(ONE)) - .setObjectID(parentId + 1L) - .setParentObjectID(parentId) - .setUpdateID(100L) - .build(); - OMRequestTestUtils.addFileToKeyTable(false, false, fileName, omKeyInfo, -1, 50, omMetadataManager); + OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, + bucketName, fileName, HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE, parentId + 1, parentId, 100, + Time.now()); + OMRequestTestUtils.addFileToKeyTable(false, false, + fileName, omKeyInfo, -1, 50, omMetadataManager); } @Override diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequest.java index 9f1bee28c047..00d1883d749c 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequest.java @@ -192,8 +192,8 @@ protected String addKeyToTable() throws Exception { protected String addKeyToTable(String key) throws Exception { OMRequestTestUtils.addKeyToTable(false, volumeName, - bucketName, key, clientID, replicationConfig, - omMetadataManager); + bucketName, key, clientID, replicationType, replicationFactor, + omMetadataManager); return omMetadataManager.getOzoneKey(volumeName, bucketName, key); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequestWithFSO.java index 07094ad2923f..9dafab090295 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequestWithFSO.java @@ -18,14 +18,13 @@ package org.apache.hadoop.ozone.om.request.key; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; -import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.om.OzonePrefixPathImpl; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.BucketLayout; @@ -34,6 +33,7 @@ import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.security.acl.OzonePrefixPath; +import org.apache.hadoop.util.Time; import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.ValueSource; @@ -72,11 +72,11 @@ protected String addKeyToTable() throws Exception { bucketName, PARENT_DIR, omMetadataManager); OmKeyInfo omKeyInfo = - OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, FILE_KEY, RatisReplicationConfig.getInstance(ONE)) - .setObjectID(parentId + 1L) - .setParentObjectID(parentId) - .setUpdateID(100L) - .build(); + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, FILE_KEY, + HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE, + parentId + 1, + parentId, 100, Time.now()); omKeyInfo.setKeyName(FILE_NAME); OMRequestTestUtils.addFileToKeyTable(false, false, FILE_NAME, omKeyInfo, -1, 50, omMetadataManager); @@ -96,11 +96,11 @@ protected String addKeyToDirTable(String volumeName, String bucketName, bucketName, key, omMetadataManager); OmKeyInfo omKeyInfo = - OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, key, RatisReplicationConfig.getInstance(ONE)) - .setObjectID(parentId + 1L) - .setParentObjectID(parentId) - .setUpdateID(100L) - .build(); + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, key, + HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE, + parentId + 1, + parentId, 100, Time.now()); omKeyInfo.setKeyName(key); return omKeyInfo.getPath(); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyPurgeRequestAndResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyPurgeRequestAndResponse.java index ff3db1abbe20..a1d616c07563 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyPurgeRequestAndResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyPurgeRequestAndResponse.java @@ -76,7 +76,7 @@ private List createAndDeleteKeys(Integer trxnIndex, String bucket) for (int i = 1; i <= numKeys; i++) { String key = keyName + "-" + i; OMRequestTestUtils.addKeyToTable(false, false, volumeName, bucket, - key, clientID, replicationConfig, trxnIndex++, + key, clientID, replicationType, replicationFactor, trxnIndex++, omMetadataManager); ozoneKeyNames.add(omMetadataManager.getOzoneKey( volumeName, bucket, key)); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRenameRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRenameRequest.java index 0a2dcfd5d67a..a6015870d09b 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRenameRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRenameRequest.java @@ -240,7 +240,7 @@ protected OMRequest createRenameKeyRequest( protected OmKeyInfo getOmKeyInfo(String keyName) { return OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, - replicationConfig).build(); + replicationType, replicationFactor, 0L); } protected String addKeyToTable(OmKeyInfo keyInfo) throws Exception { diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRenameRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRenameRequestWithFSO.java index 40c5156b5dbe..c91b8e158214 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRenameRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRenameRequestWithFSO.java @@ -18,14 +18,12 @@ package org.apache.hadoop.ozone.om.request.key; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertThrows; - import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.exceptions.OMException; @@ -39,6 +37,7 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RenameKeyRequest; +import org.apache.hadoop.util.Time; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -180,10 +179,10 @@ private OMRequest doPreExecute(OMRequest originalOmRequest) throws Exception { @Override protected OmKeyInfo getOmKeyInfo(String keyName) { long bucketId = random.nextLong(); - return OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, RatisReplicationConfig.getInstance(ONE)) - .setObjectID(bucketId + 100L) - .setParentObjectID(bucketId + 101L) - .build(); + return OMRequestTestUtils.createOmKeyInfo( + volumeName, bucketName, keyName, + HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, + bucketId + 100L, bucketId + 101L, 0L, Time.now()); } @Override diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java index 47b090f88d43..4fced8a7a8c9 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java @@ -25,7 +25,6 @@ import java.util.UUID; import org.apache.commons.lang3.tuple.Pair; -import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; import org.apache.hadoop.ozone.OzoneConfigKeys; @@ -56,6 +55,7 @@ import org.apache.hadoop.hdds.client.ContainerBlockID; import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; @@ -110,7 +110,8 @@ public class TestOMKeyRequest { protected String volumeName; protected String bucketName; protected String keyName; - protected ReplicationConfig replicationConfig; + protected HddsProtos.ReplicationType replicationType; + protected HddsProtos.ReplicationFactor replicationFactor; protected long clientID; protected long scmBlockSize = 1000L; protected long dataSize; @@ -208,7 +209,8 @@ public void setup() throws Exception { volumeName = UUID.randomUUID().toString(); bucketName = UUID.randomUUID().toString(); keyName = UUID.randomUUID().toString(); - replicationConfig = RatisReplicationConfig.getInstance(ReplicationFactor.ONE); + replicationFactor = HddsProtos.ReplicationFactor.ONE; + replicationType = HddsProtos.ReplicationType.RATIS; clientID = Time.now(); dataSize = 1000L; random = new Random(); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysDeleteRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysDeleteRequest.java index d0cfd48e35dc..d48131de4bd3 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysDeleteRequest.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.om.request.key; -import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; @@ -31,7 +31,6 @@ import java.util.List; import java.util.UUID; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.PARTIAL_DELETE; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type.DeleteKeys; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -146,7 +145,8 @@ protected void createPreRequisites() throws Exception { for (int i = 0; i < count; i++) { key = parentDir.concat("/key" + i); OMRequestTestUtils.addKeyToTableCache(volumeName, bucketName, - parentDir.concat("/key" + i), RatisReplicationConfig.getInstance(THREE), omMetadataManager); + parentDir.concat("/key" + i), HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.THREE, omMetadataManager); deleteKeyArgs.addKeys(key); deleteKeyList.add(key); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysDeleteRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysDeleteRequestWithFSO.java index 2da80550275a..f28ca2e2685f 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysDeleteRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysDeleteRequestWithFSO.java @@ -17,17 +17,17 @@ */ package org.apache.hadoop.ozone.om.request.key; -import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.util.Time; import org.junit.jupiter.api.Test; import java.util.ArrayList; import java.util.UUID; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type.DeleteKeys; /** @@ -83,13 +83,11 @@ protected void createPreRequisites() throws Exception { long parentId = OMRequestTestUtils .addParentsToDirTable(volumeName, bucketName, dir, omMetadataManager); - OmKeyInfo omKeyInfo = - OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, dir + "/" + file, - RatisReplicationConfig.getInstance(ONE)) - .setObjectID(parentId + 1L) - .setParentObjectID(parentId) - .setUpdateID(100L) - .build(); + OmKeyInfo omKeyInfo = OMRequestTestUtils + .createOmKeyInfo(volumeName, bucketName, dir + "/" + file, + HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE, parentId + 1, parentId, 100, + Time.now()); omKeyInfo.setKeyName(file); OMRequestTestUtils .addFileToKeyTable(false, false, file, omKeyInfo, -1, 50, diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysRenameRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysRenameRequest.java index 340b6e36eb0b..3d429f4d6847 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysRenameRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysRenameRequest.java @@ -18,14 +18,12 @@ package org.apache.hadoop.ozone.om.request.key; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertTrue; - -import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.response.OMClientResponse; @@ -129,7 +127,8 @@ private OMRequest createRenameKeyRequest(Boolean isIllegal) throws Exception { String key = parentDir.concat("/key" + i); String toKey = parentDir.concat("/newKey" + i); OMRequestTestUtils.addKeyToTableCache(volumeName, bucketName, - parentDir.concat("/key" + i), RatisReplicationConfig.getInstance(THREE), omMetadataManager); + parentDir.concat("/key" + i), HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.THREE, omMetadataManager); RenameKeysMap.Builder renameKey = RenameKeysMap.newBuilder() .setFromKeyName(key) diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMSetTimesRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMSetTimesRequest.java index ad834fa556bf..bfae424cc954 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMSetTimesRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMSetTimesRequest.java @@ -100,7 +100,7 @@ private OMRequest createSetTimesKeyRequest(long mtime, long atime) { protected String addKeyToTable() throws Exception { OMRequestTestUtils.addKeyToTable(false, false, volumeName, bucketName, - keyName, clientID, replicationConfig, 1L, + keyName, clientID, replicationType, replicationFactor, 1L, omMetadataManager); return omMetadataManager.getOzoneKey(volumeName, bucketName, diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMSetTimesRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMSetTimesRequestWithFSO.java index 0960125b0575..2cd9273c25a5 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMSetTimesRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMSetTimesRequestWithFSO.java @@ -17,17 +17,17 @@ */ package org.apache.hadoop.ozone.om.request.key; -import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.junit.jupiter.api.Test; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.request.file.OMFileRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.util.Time; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -115,13 +115,10 @@ protected String addKeyToTable() throws Exception { .addParentsToDirTable(volumeName, bucketName, PARENT_DIR, omMetadataManager); - OmKeyInfo omKeyInfo = - OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, FILE_NAME, - RatisReplicationConfig.getInstance(ONE)) - .setObjectID(parentId + 1L) - .setParentObjectID(parentId) - .setUpdateID(100L) - .build(); + OmKeyInfo omKeyInfo = OMRequestTestUtils + .createOmKeyInfo(volumeName, bucketName, FILE_NAME, + HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, + parentId + 1, parentId, 100, Time.now()); OMRequestTestUtils .addFileToKeyTable(false, false, FILE_NAME, omKeyInfo, -1, 50, omMetadataManager); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3ExpiredMultipartUploadsAbortRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3ExpiredMultipartUploadsAbortRequest.java index f02e1ee23679..25c908b18a2d 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3ExpiredMultipartUploadsAbortRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3ExpiredMultipartUploadsAbortRequest.java @@ -19,7 +19,6 @@ package org.apache.hadoop.ozone.om.request.s3.multipart; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotEquals; @@ -28,15 +27,14 @@ import static org.junit.jupiter.api.Assertions.assertSame; import static org.junit.jupiter.api.Assertions.assertTrue; -import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationFactor; import org.apache.hadoop.hdds.client.ReplicationType; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.utils.UniqueId; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.om.OMMetrics; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.om.helpers.OmMultipartUpload; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; @@ -53,6 +51,7 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ExpiredMultipartUploadsBucket; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartUploadsExpiredAbortRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.util.Time; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.MethodSource; @@ -481,13 +480,10 @@ private List createMPUsWithFSO(String volume, String bucket, commitMultipartRequest, BucketLayout.FILE_SYSTEM_OPTIMIZED); // Add key to open key table to be used in MPU commit processing - OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volume, bucket, keyName, - RatisReplicationConfig.getInstance(ONE), new OmKeyLocationInfoGroup(0L, new ArrayList<>(), true)) - .setObjectID(parentID + j) - .setParentObjectID(parentID) - .setUpdateID(trxnLogIndex) - .build(); - + OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volume, + bucket, keyName, HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE, parentID + j, parentID, + trxnLogIndex, Time.now(), true); String fileName = OzoneFSUtils.getFileName(keyName); OMRequestTestUtils.addFileToKeyTable(true, false, fileName, omKeyInfo, clientID, trxnLogIndex, omMetadataManager); @@ -567,7 +563,8 @@ private List createMPUs(String volume, String bucket, // Add key to open key table to be used in MPU commit processing OMRequestTestUtils.addKeyToTable( true, true, - volume, bucket, keyName, clientID, RatisReplicationConfig.getInstance(ONE), omMetadataManager); + volume, bucket, keyName, clientID, HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE, omMetadataManager); OMClientResponse commitResponse = s3MultipartUploadCommitPartRequest.validateAndUpdateCache( diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequest.java index 014b4e021cb3..61c792a83de3 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequest.java @@ -24,8 +24,6 @@ import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertSame; import static org.junit.jupiter.api.Assertions.assertTrue; - -import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; @@ -226,8 +224,9 @@ public void testValidateAndUpdateCacheBucketFound() throws Exception { protected void addKeyToOpenKeyTable(String volumeName, String bucketName, String keyName, long clientID) throws Exception { - OMRequestTestUtils.addKeyToTable(true, true, volumeName, bucketName, - keyName, clientID, RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE), omMetadataManager); + OMRequestTestUtils.addKeyToTable(true, true, volumeName, bucketName, + keyName, clientID, HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE, omMetadataManager); } protected String getKeyName() { diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequestWithFSO.java index 24480c249cc8..4c8e4881d925 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequestWithFSO.java @@ -24,17 +24,15 @@ import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertTrue; import org.apache.commons.lang3.StringUtils; -import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.util.Time; import java.io.IOException; -import java.util.ArrayList; import java.util.UUID; /** @@ -70,16 +68,13 @@ protected String getKeyName() { protected void addKeyToOpenKeyTable(String volumeName, String bucketName, String keyName, long clientID) throws Exception { long txnLogId = 0L; - OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, - RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE), - new OmKeyLocationInfoGroup(0L, new ArrayList<>(), true)) - .setObjectID(parentID + 1) - .setParentObjectID(parentID) - .setUpdateID(txnLogId) - .build(); + OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, + bucketName, keyName, HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE, parentID + 1, parentID, + txnLogId, Time.now(), true); String fileName = OzoneFSUtils.getFileName(keyName); OMRequestTestUtils.addFileToKeyTable(true, false, - fileName, omKeyInfo, clientID, txnLogId, omMetadataManager); + fileName, omKeyInfo, clientID, txnLogId, omMetadataManager); } @Override diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java index 0a1ce8f7246f..733c790bcf17 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java @@ -18,7 +18,6 @@ package org.apache.hadoop.ozone.om.request.s3.multipart; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; @@ -29,7 +28,6 @@ import java.util.List; import java.util.UUID; -import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; @@ -39,6 +37,7 @@ import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.junit.jupiter.api.Test; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos @@ -316,7 +315,8 @@ public void testValidateAndUpdateCacheNoSuchMultipartUploadError() protected void addKeyToTable(String volumeName, String bucketName, String keyName, long clientID) throws Exception { OMRequestTestUtils.addKeyToTable(true, true, volumeName, bucketName, - keyName, clientID, RatisReplicationConfig.getInstance(ONE), omMetadataManager); + keyName, clientID, HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE, omMetadataManager); } protected String getMultipartKey(String volumeName, String bucketName, diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequestWithFSO.java index 1762f38b44bd..5926b5fd1d9c 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequestWithFSO.java @@ -18,21 +18,18 @@ package org.apache.hadoop.ozone.om.request.s3.multipart; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.junit.jupiter.api.Assertions.assertNotEquals; - -import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.request.file.OMFileRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.util.Time; import java.io.IOException; -import java.util.ArrayList; import java.util.UUID; /** @@ -75,12 +72,10 @@ protected void addKeyToTable(String volumeName, String bucketName, long objectId = parentID + 1; OmKeyInfo omKeyInfoFSO = - OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, - RatisReplicationConfig.getInstance(ONE), new OmKeyLocationInfoGroup(0L, new ArrayList<>(), true)) - .setObjectID(objectId) - .setParentObjectID(parentID) - .setUpdateID(txnId) - .build(); + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, + HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE, objectId, parentID, txnId, + Time.now(), true); // add key to openFileTable String fileName = OzoneFSUtils.getFileName(keyName); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotCreateRequest.java index a3e83986b531..45e5b1007531 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotCreateRequest.java @@ -18,8 +18,8 @@ package org.apache.hadoop.ozone.om.request.snapshot; -import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.ozone.audit.AuditLogger; @@ -52,7 +52,6 @@ import java.io.IOException; import java.util.UUID; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; import static org.apache.hadoop.ozone.om.helpers.SnapshotInfo.getFromProtobuf; import static org.apache.hadoop.ozone.om.helpers.SnapshotInfo.getTableKey; import static org.apache.hadoop.ozone.om.request.OMRequestTestUtils.createSnapshotRequest; @@ -322,9 +321,8 @@ private void renameDir(String fromKey, String toKey, long offset) throws Exception { String fromKeyParentName = UUID.randomUUID().toString(); OmKeyInfo fromKeyParent = OMRequestTestUtils.createOmKeyInfo(volumeName, - bucketName, fromKeyParentName, RatisReplicationConfig.getInstance(THREE)) - .setObjectID(100L) - .build(); + bucketName, fromKeyParentName, HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.THREE, 100L); OmKeyInfo toKeyInfo = addKey(toKey, offset + 4L); OmKeyInfo fromKeyInfo = addKey(fromKey, offset + 5L); @@ -383,8 +381,8 @@ public static OMSnapshotCreateRequest doPreExecute( private OmKeyInfo addKey(String keyName, long objectId) { return OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, - RatisReplicationConfig.getInstance(THREE)).setObjectID(objectId) - .build(); + HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE, + objectId); } protected String addKeyToTable(OmKeyInfo keyInfo) throws Exception { diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMDirectoryCreateResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMDirectoryCreateResponse.java index 7d6487493861..811e13ac173e 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMDirectoryCreateResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMDirectoryCreateResponse.java @@ -20,8 +20,6 @@ import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; - -import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.utils.db.Table; @@ -84,7 +82,7 @@ public void testAddToDBBatch() throws Exception { OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, OzoneFSUtils.addTrailingSlashIfNeeded(keyName), - RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE)).build(); + HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE); ThreadLocalRandom random = ThreadLocalRandom.current(); long usedNamespace = Math.abs(random.nextLong(Long.MAX_VALUE)); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMFileCreateResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMFileCreateResponseWithFSO.java index c639c77c08e3..c7e2c265b7bb 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMFileCreateResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMFileCreateResponseWithFSO.java @@ -26,6 +26,7 @@ import org.apache.hadoop.ozone.om.response.key.OMKeyCreateResponse; import org.apache.hadoop.ozone.om.response.key.TestOMKeyCreateResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; +import org.apache.hadoop.util.Time; import jakarta.annotation.Nonnull; import java.io.IOException; @@ -40,11 +41,11 @@ public class TestOMFileCreateResponseWithFSO extends TestOMKeyCreateResponse { @Override protected OmKeyInfo getOmKeyInfo() { assertNotNull(omBucketInfo); - return OMRequestTestUtils.createOmKeyInfo(volumeName, omBucketInfo.getBucketName(), keyName, replicationConfig) - .setObjectID(omBucketInfo.getObjectID() + 1) - .setParentObjectID(omBucketInfo.getObjectID()) - .setUpdateID(100L) - .build(); + return OMRequestTestUtils.createOmKeyInfo(volumeName, + omBucketInfo.getBucketName(), keyName, replicationType, + replicationFactor, + omBucketInfo.getObjectID() + 1, + omBucketInfo.getObjectID(), 100, Time.now()); } @Nonnull diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponse.java index 88ef2964d17e..e5a6b0ab14f5 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponse.java @@ -92,7 +92,7 @@ public void testAddToDBBatchWithErrorResponse() throws Exception { protected OmKeyInfo createOmKeyInfo() throws Exception { return OMRequestTestUtils.createOmKeyInfo(volumeName, - bucketName, keyName, replicationConfig).build(); + bucketName, keyName, replicationType, replicationFactor); } protected String getOpenKey() throws Exception { diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponseWithFSO.java index b574b8548132..85e9354ca8c9 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponseWithFSO.java @@ -18,19 +18,18 @@ package org.apache.hadoop.ozone.om.response.key; -import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; +import org.apache.hadoop.util.Time; import jakarta.annotation.Nonnull; import java.io.IOException; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; - /** * Tests OMAllocateBlockResponse - prefix layout. */ @@ -50,11 +49,12 @@ protected OmKeyInfo createOmKeyInfo() throws Exception { long txnId = 50; long objectId = parentID + 1; - return OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, RatisReplicationConfig.getInstance(ONE)) - .setObjectID(objectId) - .setParentObjectID(parentID) - .setUpdateID(txnId) - .build(); + OmKeyInfo omKeyInfoFSO = + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, + HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE, objectId, parentID, txnId, + Time.now()); + return omKeyInfoFSO; } @Override diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponse.java index 89b179391cee..bb95c43107c3 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponse.java @@ -81,7 +81,7 @@ public void testAddToDBBatch() throws Exception { public void testAddToDBBatchNoOp() throws Exception { OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, - bucketName, keyName, replicationConfig).build(); + bucketName, keyName, replicationType, replicationFactor); OzoneManagerProtocolProtos.OMResponse omResponse = OzoneManagerProtocolProtos.OMResponse.newBuilder().setCommitKeyResponse( @@ -135,7 +135,7 @@ public void testAddToDBBatchOnOverwrite() throws Exception { @Nonnull protected void addKeyToOpenKeyTable() throws Exception { OMRequestTestUtils.addKeyToTable(true, volumeName, bucketName, keyName, - clientID, replicationConfig, omMetadataManager); + clientID, replicationType, replicationFactor, omMetadataManager); } @Nonnull diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponseWithFSO.java index 32d55d3e961c..a1173e554325 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponseWithFSO.java @@ -18,19 +18,17 @@ package org.apache.hadoop.ozone.om.response.key; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.junit.jupiter.api.Assertions.assertNotNull; - import java.util.HashMap; import java.util.Map; - -import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.util.Time; import jakarta.annotation.Nonnull; import java.io.IOException; @@ -64,11 +62,11 @@ protected OMKeyCommitResponse getOmKeyCommitResponse(OmKeyInfo omKeyInfo, @Override protected OmKeyInfo getOmKeyInfo() { assertNotNull(omBucketInfo); - return OMRequestTestUtils.createOmKeyInfo(volumeName, omBucketInfo.getBucketName(), keyName, replicationConfig) - .setObjectID(omBucketInfo.getObjectID() + 1) - .setParentObjectID(omBucketInfo.getObjectID()) - .setUpdateID(100L) - .build(); + return OMRequestTestUtils.createOmKeyInfo(volumeName, + omBucketInfo.getBucketName(), keyName, replicationType, + replicationFactor, + omBucketInfo.getObjectID() + 1, + omBucketInfo.getObjectID(), 100, Time.now()); } @Nonnull @@ -79,11 +77,11 @@ protected void addKeyToOpenKeyTable() throws Exception { long objectId = parentID + 10; OmKeyInfo omKeyInfoFSO = - OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, RatisReplicationConfig.getInstance(ONE)) - .setObjectID(objectId) - .setParentObjectID(parentID) - .setUpdateID(100L) - .build(); + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, + HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE, objectId, parentID, 100, + Time.now()); + String fileName = OzoneFSUtils.getFileName(keyName); OMRequestTestUtils.addFileToKeyTable(true, false, fileName, omKeyInfoFSO, clientID, txnLogId, omMetadataManager); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponseWithFSO.java index 53d86e667367..ee83f3671277 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponseWithFSO.java @@ -18,15 +18,13 @@ package org.apache.hadoop.ozone.om.response.key; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.junit.jupiter.api.Assertions.assertNotNull; - -import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; +import org.apache.hadoop.util.Time; import jakarta.annotation.Nonnull; import java.io.IOException; @@ -52,12 +50,11 @@ protected String getOpenKeyName() throws IOException { @Override protected OmKeyInfo getOmKeyInfo() { assertNotNull(omBucketInfo); - return OMRequestTestUtils.createOmKeyInfo(volumeName, omBucketInfo.getBucketName(), keyName, - RatisReplicationConfig.getInstance(ONE)) - .setObjectID(omBucketInfo.getObjectID() + 1) - .setParentObjectID(omBucketInfo.getObjectID()) - .setUpdateID(100L) - .build(); + return OMRequestTestUtils.createOmKeyInfo(volumeName, + omBucketInfo.getBucketName(), keyName, replicationType, + replicationFactor, + omBucketInfo.getObjectID() + 1, + omBucketInfo.getObjectID(), 100, Time.now()); } @Nonnull diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponse.java index 6440edd0327c..4690b6f56f72 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponse.java @@ -22,6 +22,7 @@ import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertTrue; import org.apache.hadoop.hdds.client.BlockID; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; import org.apache.hadoop.hdds.utils.db.Table; @@ -88,7 +89,8 @@ public void testAddToDBBatchWithNonEmptyBlocks() throws Exception { Pipeline pipeline = Pipeline.newBuilder() .setState(Pipeline.PipelineState.OPEN) .setId(PipelineID.randomId()) - .setReplicationConfig(replicationConfig) + .setReplicationConfig(RatisReplicationConfig + .getInstance(replicationFactor)) .setNodes(new ArrayList<>()) .build(); @@ -165,7 +167,7 @@ protected String addKeyToTable() throws Exception { keyName); OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, keyName, - clientID, replicationConfig, omMetadataManager); + clientID, replicationType, replicationFactor, omMetadataManager); return ozoneKey; } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponseWithFSO.java index 557839f44f7a..fda72eb91243 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponseWithFSO.java @@ -18,14 +18,13 @@ package org.apache.hadoop.ozone.om.response.key; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.junit.jupiter.api.Assertions.assertNotNull; - -import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.util.Time; /** * Tests OMKeyDeleteResponse - prefix layout. @@ -51,11 +50,11 @@ protected String addKeyToTable() throws Exception { bucketName, "", omMetadataManager); OmKeyInfo omKeyInfo = - OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, RatisReplicationConfig.getInstance(ONE)) - .setObjectID(parentId + 1) - .setParentObjectID(parentId) - .setUpdateID(100L) - .build(); + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, + HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE, + parentId + 1, + parentId, 100, Time.now()); OMRequestTestUtils.addFileToKeyTable(false, false, keyName, omKeyInfo, -1, 50, omMetadataManager); return omMetadataManager.getOzonePathKey( @@ -67,12 +66,11 @@ protected String addKeyToTable() throws Exception { @Override protected OmKeyInfo getOmKeyInfo() { assertNotNull(getOmBucketInfo()); - return OMRequestTestUtils.createOmKeyInfo(volumeName, omBucketInfo.getBucketName(), keyName, - replicationConfig) - .setObjectID(getOmBucketInfo().getObjectID() + 1) - .setParentObjectID(getOmBucketInfo().getObjectID()) - .setUpdateID(100L) - .build(); + return OMRequestTestUtils.createOmKeyInfo(volumeName, + getOmBucketInfo().getBucketName(), keyName, replicationType, + replicationFactor, + getOmBucketInfo().getObjectID() + 1, + getOmBucketInfo().getObjectID(), 100, Time.now()); } @Override diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyRenameResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyRenameResponse.java index 07c094cc98a1..2dcef56330f2 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyRenameResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyRenameResponse.java @@ -154,10 +154,12 @@ public void testAddToDBBatchWithErrorResponse() throws Exception { } protected OmKeyInfo getOmKeyInfo(String keyName) { - return OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, replicationConfig).build(); + return OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, + replicationType, replicationFactor, 0L); } - protected OmKeyInfo getOmKeyInfo(OmKeyInfo toKeyInfo, String keyName) { + protected OmKeyInfo getOmKeyInfo(OmKeyInfo toKeyInfo, + String keyName) { return getOmKeyInfo(keyName); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyRenameResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyRenameResponseWithFSO.java index edbb50d66f86..f2f9ccaf872e 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyRenameResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyRenameResponseWithFSO.java @@ -18,17 +18,17 @@ package org.apache.hadoop.ozone.om.response.key; -import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.response.TestOMResponseUtils; +import org.apache.hadoop.util.Time; import java.io.IOException; import java.util.UUID; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; /** @@ -38,21 +38,19 @@ public class TestOMKeyRenameResponseWithFSO extends TestOMKeyRenameResponse { @Override protected OmKeyInfo getOmKeyInfo(String keyName) { long bucketId = random.nextLong(); - return OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, RatisReplicationConfig.getInstance(ONE)) - .setObjectID(bucketId + 100) - .setParentObjectID(bucketId + 101) - .build(); + return OMRequestTestUtils.createOmKeyInfo( + volumeName, bucketName, keyName, + HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, + bucketId + 100L, bucketId + 101L, 0L, Time.now()); } @Override protected OmKeyInfo getOmKeyInfo(OmKeyInfo toKeyInfo, String keyName) { - return OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, RatisReplicationConfig.getInstance(ONE)) - .setObjectID(toKeyInfo.getObjectID()) - .setParentObjectID(toKeyInfo.getParentObjectID()) - .setUpdateID(0L) - .setCreationTime(toKeyInfo.getCreationTime()) - .build(); + return OMRequestTestUtils.createOmKeyInfo(toKeyInfo.getVolumeName(), + toKeyInfo.getBucketName(), keyName, replicationType, + replicationFactor, toKeyInfo.getObjectID(), + toKeyInfo.getParentObjectID(), 0L, toKeyInfo.getCreationTime()); } @Override @@ -82,12 +80,12 @@ protected void createParent() { long bucketId = random.nextLong(); String fromKeyParentName = UUID.randomUUID().toString(); String toKeyParentName = UUID.randomUUID().toString(); - fromKeyParent = OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, fromKeyParentName, replicationConfig) - .setObjectID(bucketId + 100L) - .build(); - toKeyParent = OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, toKeyParentName, replicationConfig) - .setObjectID(bucketId + 101L) - .build(); + fromKeyParent = OMRequestTestUtils.createOmKeyInfo(volumeName, + bucketName, fromKeyParentName, replicationType, replicationFactor, + bucketId + 100L); + toKeyParent = OMRequestTestUtils.createOmKeyInfo(volumeName, + bucketName, toKeyParentName, replicationType, replicationFactor, + bucketId + 101L); fromKeyParent.setParentObjectID(bucketId); toKeyParent.setParentObjectID(bucketId); fromKeyParent.setFileName(OzoneFSUtils.getFileName( diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyResponse.java index bc4c34bd0db3..1cbf5c6d0b2d 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyResponse.java @@ -24,7 +24,6 @@ import java.util.Random; import java.util.UUID; -import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.protocol.StorageType; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; @@ -60,7 +59,8 @@ public class TestOMKeyResponse { protected String volumeName; protected String bucketName; protected String keyName; - protected ReplicationConfig replicationConfig; + protected HddsProtos.ReplicationFactor replicationFactor; + protected HddsProtos.ReplicationType replicationType; protected OmBucketInfo omBucketInfo; protected long clientID; protected Random random; @@ -78,18 +78,18 @@ public void setup() throws Exception { volumeName = UUID.randomUUID().toString(); bucketName = UUID.randomUUID().toString(); keyName = UUID.randomUUID().toString(); - replicationConfig = ReplicationConfig.fromProtoTypeAndFactor( - HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE); + replicationFactor = HddsProtos.ReplicationFactor.ONE; + replicationType = HddsProtos.ReplicationType.RATIS; clientID = 1000L; random = new Random(); keysToDelete = null; final OmVolumeArgs volumeArgs = OmVolumeArgs.newBuilder() - .setVolume(volumeName) - .setAdminName("admin") - .setOwnerName("owner") - .setObjectID(System.currentTimeMillis()) - .build(); + .setVolume(volumeName) + .setAdminName("admin") + .setOwnerName("owner") + .setObjectID(System.currentTimeMillis()) + .build(); omMetadataManager.getVolumeTable().addCacheEntry( new CacheKey<>(omMetadataManager.getVolumeKey(volumeName)), @@ -117,7 +117,8 @@ protected String getOpenKeyName() throws IOException { @Nonnull protected OmKeyInfo getOmKeyInfo() { - return OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, replicationConfig).build(); + return OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, + replicationType, replicationFactor); } @Nonnull diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysDeleteResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysDeleteResponse.java index 7a14e15a19bd..0c9c725c1b86 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysDeleteResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysDeleteResponse.java @@ -18,7 +18,6 @@ package org.apache.hadoop.ozone.om.response.key; -import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; @@ -32,6 +31,7 @@ import java.util.List; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType.RATIS; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.KEY_NOT_FOUND; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.OK; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type.DeleteKeys; @@ -63,7 +63,7 @@ protected void createPreRequisities() throws Exception { for (int i = 0; i < 10; i++) { keyName = parent.concat(key + i); OMRequestTestUtils.addKeyToTable(false, volumeName, - bucketName, keyName, 0L, RatisReplicationConfig.getInstance(THREE), omMetadataManager); + bucketName, keyName, 0L, RATIS, THREE, omMetadataManager); ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName, keyName); omKeyInfoList .add(omMetadataManager.getKeyTable(getBucketLayout()).get(ozoneKey)); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysDeleteResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysDeleteResponseWithFSO.java index 6a3a709c341c..fd70308c43d1 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysDeleteResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysDeleteResponseWithFSO.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.om.response.key; -import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.om.helpers.BucketLayout; @@ -33,13 +33,13 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteKeysResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; +import org.apache.hadoop.util.Time; import org.junit.jupiter.api.Test; import java.io.IOException; import java.util.ArrayList; import java.util.List; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.OK; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type.DeleteKeys; import static org.junit.jupiter.api.Assertions.assertNotNull; @@ -93,11 +93,10 @@ protected void createPreRequisities() throws Exception { keyName = keyPrefix + i; OmKeyInfo omKeyInfo = - OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, RatisReplicationConfig.getInstance(ONE)) - .setObjectID(dirId + 1) - .setParentObjectID(buckId) - .setUpdateID(dirId + 1) - .build(); + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, + HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE, dirId + 1, buckId, + dirId + 1, Time.now()); ozoneDBKey = OMRequestTestUtils.addFileToKeyTable(false, false, keyName, omKeyInfo, -1, 50, omMetadataManager); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysRenameResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysRenameResponse.java index 72a76a1aca4f..0824f7c33de7 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysRenameResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysRenameResponse.java @@ -18,7 +18,6 @@ package org.apache.hadoop.ozone.om.response.key; -import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.ozone.om.helpers.OmRenameKeys; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; @@ -33,6 +32,7 @@ import java.util.Map; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType.RATIS; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -117,8 +117,7 @@ private void createPreRequisities() throws Exception { String key = parentDir.concat("/key" + i); String toKey = parentDir.concat("/newKey" + i); OMRequestTestUtils.addKeyToTable(false, volumeName, - bucketName, parentDir.concat("/key" + i), 0L, - RatisReplicationConfig.getInstance(THREE), + bucketName, parentDir.concat("/key" + i), 0L, RATIS, THREE, omMetadataManager); OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable(getBucketLayout()) diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMOpenKeysDeleteResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMOpenKeysDeleteResponse.java index c9a4109809ed..f4f0e729f05d 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMOpenKeysDeleteResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMOpenKeysDeleteResponse.java @@ -208,7 +208,7 @@ private Map addOpenKeysToDB(String volume, int numKeys, long parentID = random.nextLong(); OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volume, - bucket, key, replicationConfig).build(); + bucket, key, replicationType, replicationFactor); if (keyLength > 0) { OMRequestTestUtils.addKeyLocationInfo(omKeyInfo, 0, keyLength); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3ExpiredMultipartUploadsAbortResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3ExpiredMultipartUploadsAbortResponse.java index 35600c331f3f..b356dddd6b57 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3ExpiredMultipartUploadsAbortResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3ExpiredMultipartUploadsAbortResponse.java @@ -19,19 +19,15 @@ package org.apache.hadoop.ozone.om.response.s3.multipart; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertTrue; - -import org.apache.hadoop.hdds.client.RatisReplicationConfig; -import org.apache.hadoop.hdds.client.ReplicationConfig; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; import org.apache.hadoop.hdds.utils.UniqueId; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.om.helpers.OmMultipartAbortInfo; import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; @@ -281,10 +277,10 @@ private Map> addMPUsToDB( OmBucketInfo omBucketInfo = OMRequestTestUtils.addBucketToDB(volume, bucket, omMetadataManager, getBucketLayout()); - ReplicationConfig replicationConfig = RatisReplicationConfig.getInstance(ONE); - final OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volume, bucket, keyName, replicationConfig, - new OmKeyLocationInfoGroup(0L, new ArrayList<>(), true)) - .build(); + final OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volume, + bucket, keyName, + HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, + 0L, Time.now(), true); if (getBucketLayout().equals(BucketLayout.FILE_SYSTEM_OPTIMIZED)) { omKeyInfo.setParentObjectID(omBucketInfo.getObjectID()); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadCompleteResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadCompleteResponseWithFSO.java index e7a570350cff..47aa641c1ebb 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadCompleteResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadCompleteResponseWithFSO.java @@ -18,17 +18,14 @@ package org.apache.hadoop.ozone.om.response.s3.multipart; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; - -import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import org.apache.hadoop.ozone.om.helpers.BucketLayout; @@ -88,16 +85,14 @@ public void testAddDBToBatch() throws Exception { omMetadataManager.getStore().commitBatchOperation(batchOperation); String dbOpenKey = omMetadataManager.getOpenFileName(volumeId, bucketId, - parentID, fileName, clientId); + parentID, fileName, clientId); String dbKey = omMetadataManager.getOzonePathKey(volumeId, bucketId, - parentID, fileName); + parentID, fileName); OmKeyInfo omKeyInfoFSO = - OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, - RatisReplicationConfig.getInstance(ONE), new OmKeyLocationInfoGroup(0L, new ArrayList<>(), true)) - .setObjectID(objectId) - .setParentObjectID(parentID) - .setUpdateID(txnId) - .build(); + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, + HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE, objectId, parentID, txnId, + Time.now(), true); // add key to openFileTable omKeyInfoFSO.setKeyName(fileName); @@ -180,11 +175,9 @@ public void testAddDBToBatchWithNullBucketInfo() throws Exception { parentID, fileName); OmKeyInfo omKeyInfoFSO = OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, - RatisReplicationConfig.getInstance(ONE), new OmKeyLocationInfoGroup(0L, new ArrayList<>(), true)) - .setObjectID(objectId) - .setParentObjectID(parentID) - .setUpdateID(txnId) - .build(); + HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE, objectId, parentID, txnId, + Time.now(), true); // add key to openFileTable omKeyInfoFSO.setKeyName(fileName); @@ -251,20 +244,20 @@ public void testAddDBToBatchWithPartsWithKeyInDeleteTable() throws Exception { String keyName = getKeyName(); OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, - omMetadataManager); + omMetadataManager); createParentPath(volumeName, bucketName); // Put an entry to delete table with the same key prior to multipart commit - OmKeyInfo prevKey = OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, - RatisReplicationConfig.getInstance(ONE), new OmKeyLocationInfoGroup(0L, new ArrayList<>(), true)) - .setObjectID(parentID + 8) - .setParentObjectID(parentID) - .setUpdateID(8) - .build(); + OmKeyInfo prevKey = OMRequestTestUtils.createOmKeyInfo(volumeName, + bucketName, keyName, + HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE, + parentID + 8, + parentID, 8, Time.now(), true); RepeatedOmKeyInfo prevKeys = new RepeatedOmKeyInfo(prevKey); String ozoneKey = omMetadataManager - .getOzoneKey(prevKey.getVolumeName(), - prevKey.getBucketName(), prevKey.getFileName()); + .getOzoneKey(prevKey.getVolumeName(), + prevKey.getBucketName(), prevKey.getFileName()); omMetadataManager.getDeletedTable().put(ozoneKey, prevKeys); long oId = runAddDBToBatchWithParts(volumeName, bucketName, keyName, 1); @@ -319,12 +312,11 @@ private long runAddDBToBatchWithParts(String volumeName, omMetadataManager.getBucketTable().get(bucketKey); OmKeyInfo omKeyInfo = - OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, - RatisReplicationConfig.getInstance(ONE), new OmKeyLocationInfoGroup(0L, new ArrayList<>(), true)) - .setObjectID(parentID + 9) - .setParentObjectID(parentID) - .setUpdateID(100) - .build(); + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, + HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE, + parentID + 9, + parentID, 100, Time.now(), true); List unUsedParts = new ArrayList<>(); unUsedParts.add(omKeyInfo); S3MultipartUploadCompleteResponse s3MultipartUploadCompleteResponse = diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestDirectoryDeletingService.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestDirectoryDeletingService.java index 8dcb030d637a..c8a3faae4cca 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestDirectoryDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestDirectoryDeletingService.java @@ -23,10 +23,9 @@ import java.io.IOException; import java.nio.file.Path; import java.util.concurrent.TimeUnit; - -import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.server.ServerUtils; import org.apache.hadoop.hdds.utils.db.DBConfigFromFile; import org.apache.hadoop.ozone.om.KeyManager; @@ -48,7 +47,6 @@ import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_DIR_DELETING_SERVICE_INTERVAL; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -131,11 +129,10 @@ public void testDeleteDirectoryCrossingSizeLimit() throws Exception { for (int i = 0; i < 2000; ++i) { String keyName = "key" + longName + i; OmKeyInfo omKeyInfo = - OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, RatisReplicationConfig.getInstance(ONE)) - .setObjectID(dir1.getObjectID() + 1 + i) - .setParentObjectID(dir1.getObjectID()) - .setUpdateID(100L) - .build(); + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, + keyName, HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE, dir1.getObjectID() + 1 + i, + dir1.getObjectID(), 100, Time.now()); OMRequestTestUtils.addFileToKeyTable(false, true, keyName, omKeyInfo, 1234L, i + 1, om.getMetadataManager()); } @@ -146,7 +143,7 @@ public void testDeleteDirectoryCrossingSizeLimit() throws Exception { .setBucketName(bucketName) .setKeyName("dir" + longName) .setReplicationConfig(StandaloneReplicationConfig.getInstance( - ONE)) + HddsProtos.ReplicationFactor.ONE)) .setDataSize(0).setRecursive(true) .build(); writeClient.deleteKey(delArgs); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestQuotaRepairTask.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestQuotaRepairTask.java index 1a0db1183311..5ac7835f8ce6 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestQuotaRepairTask.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestQuotaRepairTask.java @@ -19,11 +19,8 @@ package org.apache.hadoop.ozone.om.service; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; import static org.junit.jupiter.api.Assertions.assertEquals; - -import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.om.helpers.BucketLayout; @@ -51,7 +48,8 @@ public void testQuotaRepair() throws Exception { String parentDir = "/user"; for (int i = 0; i < count; i++) { OMRequestTestUtils.addKeyToTableAndCache(volumeName, bucketName, - parentDir.concat("/key" + i), -1, RatisReplicationConfig.getInstance(THREE), 150 + i, omMetadataManager); + parentDir.concat("/key" + i), -1, HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.THREE, 150 + i, omMetadataManager); } String fsoBucketName = "fso" + bucketName; @@ -61,13 +59,12 @@ public void testQuotaRepair() throws Exception { fsoBucketName, "c/d/e", omMetadataManager); for (int i = 0; i < count; i++) { String fileName = "file1" + i; - OmKeyInfo omKeyInfo = - OMRequestTestUtils.createOmKeyInfo(volumeName, fsoBucketName, fileName, - RatisReplicationConfig.getInstance(ONE)) - .setObjectID(parentId + 1 + i) - .setParentObjectID(parentId) - .setUpdateID(100L + i) - .build(); + OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo( + volumeName, fsoBucketName, fileName, + HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE, + parentId + 1 + i, + parentId, 100 + i, Time.now()); omKeyInfo.setKeyName(fileName); OMRequestTestUtils.addFileToKeyTable(false, false, fileName, omKeyInfo, -1, 50 + i, omMetadataManager); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java index baa9c522be10..84f55749a68f 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java @@ -58,7 +58,6 @@ import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_FILE_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_KEY_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_TABLE; -import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_DIR_TABLE; import static org.apache.hadoop.ozone.recon.ReconConstants.DEFAULT_FETCH_COUNT; import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_QUERY_LIMIT; import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_QUERY_PREVKEY; @@ -653,36 +652,6 @@ public Response getDeletedDirInfo( return Response.ok(deletedDirInsightInfo).build(); } - /** - * Retrieves the summary of deleted directories. - * - * This method calculates and returns a summary of deleted directories. - * @return The HTTP response body includes a map with the following entries: - * - "totalDeletedDirectories": the total number of deleted directories - * - * Example response: - * { - * "totalDeletedDirectories": 8, - * } - */ - @GET - @Path("/deletePending/dirs/summary") - public Response getDeletedDirectorySummary() { - Map dirSummary = new HashMap<>(); - // Create a keys summary for deleted directories - createSummaryForDeletedDirectories(dirSummary); - return Response.ok(dirSummary).build(); - } - - private void createSummaryForDeletedDirectories( - Map dirSummary) { - // Fetch the necessary metrics for deleted directories. - Long deletedDirCount = getValueFromId(globalStatsDao.findById( - OmTableInsightTask.getTableCountKeyFromTable(DELETED_DIR_TABLE))); - // Calculate the total number of deleted directories - dirSummary.put("totalDeletedDirectories", deletedDirCount); - } - private void updateReplicatedAndUnReplicatedTotal( KeyInsightInfoResponse deletedKeyAndDirInsightInfo, RepeatedOmKeyInfo repeatedOmKeyInfo) { diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/DeletedKeysInsightHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/DeletedKeysInsightHandler.java deleted file mode 100644 index 5a6d7a256e49..000000000000 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/DeletedKeysInsightHandler.java +++ /dev/null @@ -1,147 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.recon.tasks; - -import org.apache.commons.lang3.tuple.Pair; -import org.apache.commons.lang3.tuple.Triple; -import org.apache.hadoop.hdds.utils.db.Table; -import org.apache.hadoop.hdds.utils.db.TableIterator; -import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.HashMap; - -/** - * Manages records in the Deleted Table, updating counts and sizes of - * pending Key Deletions in the backend. - */ -public class DeletedKeysInsightHandler implements OmTableHandler { - - private static final Logger LOG = - LoggerFactory.getLogger(DeletedKeysInsightHandler.class); - - /** - * Invoked by the process method to add information on those keys that have - * been backlogged in the backend for deletion. - */ - @Override - public void handlePutEvent(OMDBUpdateEvent event, - String tableName, - HashMap objectCountMap, - HashMap unReplicatedSizeMap, - HashMap replicatedSizeMap) { - - String countKey = getTableCountKeyFromTable(tableName); - String unReplicatedSizeKey = getUnReplicatedSizeKeyFromTable(tableName); - String replicatedSizeKey = getReplicatedSizeKeyFromTable(tableName); - - if (event.getValue() != null) { - RepeatedOmKeyInfo repeatedOmKeyInfo = - (RepeatedOmKeyInfo) event.getValue(); - objectCountMap.computeIfPresent(countKey, - (k, count) -> count + repeatedOmKeyInfo.getOmKeyInfoList().size()); - Pair result = repeatedOmKeyInfo.getTotalSize(); - unReplicatedSizeMap.computeIfPresent(unReplicatedSizeKey, - (k, size) -> size + result.getLeft()); - replicatedSizeMap.computeIfPresent(replicatedSizeKey, - (k, size) -> size + result.getRight()); - } else { - LOG.warn("Put event does not have the Key Info for {}.", - event.getKey()); - } - - } - - /** - * Invoked by the process method to remove information on those keys that have - * been successfully deleted from the backend. - */ - @Override - public void handleDeleteEvent(OMDBUpdateEvent event, - String tableName, - HashMap objectCountMap, - HashMap unReplicatedSizeMap, - HashMap replicatedSizeMap) { - - String countKey = getTableCountKeyFromTable(tableName); - String unReplicatedSizeKey = getUnReplicatedSizeKeyFromTable(tableName); - String replicatedSizeKey = getReplicatedSizeKeyFromTable(tableName); - - if (event.getValue() != null) { - RepeatedOmKeyInfo repeatedOmKeyInfo = - (RepeatedOmKeyInfo) event.getValue(); - objectCountMap.computeIfPresent(countKey, (k, count) -> - count > 0 ? count - repeatedOmKeyInfo.getOmKeyInfoList().size() : 0L); - Pair result = repeatedOmKeyInfo.getTotalSize(); - unReplicatedSizeMap.computeIfPresent(unReplicatedSizeKey, - (k, size) -> size > result.getLeft() ? size - result.getLeft() : 0L); - replicatedSizeMap.computeIfPresent(replicatedSizeKey, - (k, size) -> size > result.getRight() ? size - result.getRight() : - 0L); - } else { - LOG.warn("Delete event does not have the Key Info for {}.", - event.getKey()); - } - } - - /** - * Invoked by the process method to update the statistics on the keys - * pending to be deleted. - */ - @Override - public void handleUpdateEvent(OMDBUpdateEvent event, - String tableName, - HashMap objectCountMap, - HashMap unReplicatedSizeMap, - HashMap replicatedSizeMap) { - // The size of deleted keys cannot change hence no-op. - return; - } - - /** - * Invoked by the reprocess method to calculate the records count of the - * deleted table and the sizes of replicated and unreplicated keys that are - * pending deletion in Ozone. - */ - @Override - public Triple getTableSizeAndCount( - TableIterator> iterator) - throws IOException { - long count = 0; - long unReplicatedSize = 0; - long replicatedSize = 0; - - if (iterator != null) { - while (iterator.hasNext()) { - Table.KeyValue kv = iterator.next(); - if (kv != null && kv.getValue() != null) { - RepeatedOmKeyInfo repeatedOmKeyInfo = (RepeatedOmKeyInfo) kv - .getValue(); - Pair result = repeatedOmKeyInfo.getTotalSize(); - unReplicatedSize += result.getRight(); - replicatedSize += result.getLeft(); - count += repeatedOmKeyInfo.getOmKeyInfoList().size(); - } - } - } - return Triple.of(count, unReplicatedSize, replicatedSize); - } -} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OmTableHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OmTableHandler.java deleted file mode 100644 index 5ae23b68a703..000000000000 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OmTableHandler.java +++ /dev/null @@ -1,131 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.recon.tasks; - -import org.apache.commons.lang3.tuple.Triple; -import org.apache.hadoop.hdds.utils.db.Table; -import org.apache.hadoop.hdds.utils.db.TableIterator; - -import java.io.IOException; -import java.util.HashMap; - -/** - * Interface for handling PUT, DELETE and UPDATE events for size-related - * tables for OM Insights. - */ -public interface OmTableHandler { - - /** - * Handles a PUT event for size-related tables by updating both the data - * sizes and their corresponding record counts in the tables. - * - * @param event The PUT event to be processed. - * @param tableName Table name associated with the event. - * @param objectCountMap A map storing object counts. - * @param unReplicatedSizeMap A map storing unReplicated size counts. - * @param replicatedSizeMap A map storing replicated size counts. - */ - void handlePutEvent(OMDBUpdateEvent event, - String tableName, - HashMap objectCountMap, - HashMap unReplicatedSizeMap, - HashMap replicatedSizeMap); - - - /** - * Handles a DELETE event for size-related tables by updating both the data - * sizes and their corresponding record counts in the tables. - * - * @param event The DELETE event to be processed. - * @param tableName Table name associated with the event. - * @param objectCountMap A map storing object counts. - * @param unReplicatedSizeMap A map storing unReplicated size counts. - * @param replicatedSizeMap A map storing replicated size counts. - */ - void handleDeleteEvent(OMDBUpdateEvent event, - String tableName, - HashMap objectCountMap, - HashMap unReplicatedSizeMap, - HashMap replicatedSizeMap); - - - /** - * Handles an UPDATE event for size-related tables by updating both the data - * sizes and their corresponding record counts in the tables. - * - * @param event The UPDATE event to be processed. - * @param tableName Table name associated with the event. - * @param objectCountMap A map storing object counts. - * @param unReplicatedSizeMap A map storing unReplicated size counts. - * @param replicatedSizeMap A map storing replicated size counts. - */ - void handleUpdateEvent(OMDBUpdateEvent event, - String tableName, - HashMap objectCountMap, - HashMap unReplicatedSizeMap, - HashMap replicatedSizeMap); - - - /** - * Returns a triple with the total count of records (left), total unreplicated - * size (middle), and total replicated size (right) in the given iterator. - * Increments count for each record and adds the dataSize if a record's value - * is an instance of OmKeyInfo,RepeatedOmKeyInfo. - * If the iterator is null, returns (0, 0, 0). - * - * @param iterator The iterator over the table to be iterated. - * @return A Triple with three Long values representing the count, - * unReplicated size and replicated size. - * @throws IOException If an I/O error occurs during the iterator traversal. - */ - Triple getTableSizeAndCount( - TableIterator> iterator) - throws IOException; - - - /** - * Returns the count key for the given table. - * - * @param tableName The name of the table. - * @return The count key for the table. - */ - default String getTableCountKeyFromTable(String tableName) { - return tableName + "Count"; - } - - /** - * Returns the replicated size key for the given table. - * - * @param tableName The name of the table. - * @return The replicated size key for the table. - */ - default String getReplicatedSizeKeyFromTable(String tableName) { - return tableName + "ReplicatedDataSize"; - } - - /** - * Returns the unreplicated size key for the given table. - * - * @param tableName The name of the table. - * @return The unreplicated size key for the table. - */ - default String getUnReplicatedSizeKeyFromTable(String tableName) { - return tableName + "UnReplicatedDataSize"; - } -} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OmTableInsightTask.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OmTableInsightTask.java index 3e84f311c942..c814d9d9e33f 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OmTableInsightTask.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OmTableInsightTask.java @@ -26,6 +26,8 @@ import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; import org.hadoop.ozone.recon.schema.tables.daos.GlobalStatsDao; import org.hadoop.ozone.recon.schema.tables.pojos.GlobalStats; @@ -35,20 +37,22 @@ import java.io.IOException; import java.sql.Timestamp; +import java.util.ArrayList; +import java.util.Collection; import java.util.HashMap; import java.util.Iterator; +import java.util.List; import java.util.Map; -import java.util.Collection; + + import java.util.Map.Entry; -import java.util.ArrayList; -import java.util.List; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_KEY_TABLE; -import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_FILE_TABLE; +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_TABLE; +import static org.jooq.impl.DSL.currentTimestamp; import static org.jooq.impl.DSL.select; import static org.jooq.impl.DSL.using; -import static org.jooq.impl.DSL.currentTimestamp; /** * Class to iterate over the OM DB and store the total counts of volumes, @@ -61,21 +65,14 @@ public class OmTableInsightTask implements ReconOmTask { private GlobalStatsDao globalStatsDao; private Configuration sqlConfiguration; private ReconOMMetadataManager reconOMMetadataManager; - private Map tableHandlers; @Inject public OmTableInsightTask(GlobalStatsDao globalStatsDao, - Configuration sqlConfiguration, - ReconOMMetadataManager reconOMMetadataManager) { + Configuration sqlConfiguration, + ReconOMMetadataManager reconOMMetadataManager) { this.globalStatsDao = globalStatsDao; this.sqlConfiguration = sqlConfiguration; this.reconOMMetadataManager = reconOMMetadataManager; - - // Initialize table handlers - tableHandlers = new HashMap<>(); - tableHandlers.put(OPEN_KEY_TABLE, new OpenKeysInsightHandler()); - tableHandlers.put(OPEN_FILE_TABLE, new OpenKeysInsightHandler()); - tableHandlers.put(DELETED_TABLE, new DeletedKeysInsightHandler()); } /** @@ -93,8 +90,8 @@ public OmTableInsightTask(GlobalStatsDao globalStatsDao, @Override public Pair reprocess(OMMetadataManager omMetadataManager) { HashMap objectCountMap = initializeCountMap(); - HashMap unReplicatedSizeMap = initializeSizeMap(false); - HashMap replicatedSizeMap = initializeSizeMap(true); + HashMap unReplicatedSizeCountMap = initializeSizeMap(false); + HashMap replicatedSizeCountMap = initializeSizeMap(true); for (String tableName : getTaskTables()) { Table table = omMetadataManager.getTable(tableName); @@ -103,16 +100,16 @@ public Pair reprocess(OMMetadataManager omMetadataManager) { return new ImmutablePair<>(getTaskName(), false); } - try (TableIterator> iterator - = table.iterator()) { - if (tableHandlers.containsKey(tableName)) { - Triple details = - tableHandlers.get(tableName).getTableSizeAndCount(iterator); + try ( + TableIterator> iterator + = table.iterator()) { + if (getTablesToCalculateSize().contains(tableName)) { + Triple details = getTableSizeAndCount(iterator); objectCountMap.put(getTableCountKeyFromTable(tableName), details.getLeft()); - unReplicatedSizeMap.put( + unReplicatedSizeCountMap.put( getUnReplicatedSizeKeyFromTable(tableName), details.getMiddle()); - replicatedSizeMap.put(getReplicatedSizeKeyFromTable(tableName), + replicatedSizeCountMap.put(getReplicatedSizeKeyFromTable(tableName), details.getRight()); } else { long count = Iterators.size(iterator); @@ -127,17 +124,72 @@ public Pair reprocess(OMMetadataManager omMetadataManager) { if (!objectCountMap.isEmpty()) { writeDataToDB(objectCountMap); } - if (!unReplicatedSizeMap.isEmpty()) { - writeDataToDB(unReplicatedSizeMap); + if (!unReplicatedSizeCountMap.isEmpty()) { + writeDataToDB(unReplicatedSizeCountMap); } - if (!replicatedSizeMap.isEmpty()) { - writeDataToDB(replicatedSizeMap); + if (!replicatedSizeCountMap.isEmpty()) { + writeDataToDB(replicatedSizeCountMap); } LOG.info("Completed a 'reprocess' run of OmTableInsightTask."); return new ImmutablePair<>(getTaskName(), true); } + /** + * Returns a triple with the total count of records (left), total unreplicated + * size (middle), and total replicated size (right) in the given iterator. + * Increments count for each record and adds the dataSize if a record's value + * is an instance of OmKeyInfo. If the iterator is null, returns (0, 0, 0). + * + * @param iterator The iterator over the table to be iterated. + * @return A Triple with three Long values representing the count, + * unreplicated size and replicated size. + * @throws IOException If an I/O error occurs during the iterator traversal. + */ + private Triple getTableSizeAndCount( + TableIterator> iterator) + throws IOException { + long count = 0; + long unReplicatedSize = 0; + long replicatedSize = 0; + + if (iterator != null) { + while (iterator.hasNext()) { + Table.KeyValue kv = iterator.next(); + if (kv != null && kv.getValue() != null) { + if (kv.getValue() instanceof OmKeyInfo) { + OmKeyInfo omKeyInfo = (OmKeyInfo) kv.getValue(); + unReplicatedSize += omKeyInfo.getDataSize(); + replicatedSize += omKeyInfo.getReplicatedSize(); + count++; + } + if (kv.getValue() instanceof RepeatedOmKeyInfo) { + RepeatedOmKeyInfo repeatedOmKeyInfo = (RepeatedOmKeyInfo) kv + .getValue(); + Pair result = repeatedOmKeyInfo.getTotalSize(); + unReplicatedSize += result.getRight(); + replicatedSize += result.getLeft(); + // Since we can have multiple deleted keys of same name + count += repeatedOmKeyInfo.getOmKeyInfoList().size(); + } + } + } + } + + return Triple.of(count, unReplicatedSize, replicatedSize); + } + + /** + * Returns a collection of table names that require data size calculation. + */ + public Collection getTablesToCalculateSize() { + List taskTables = new ArrayList<>(); + taskTables.add(OPEN_KEY_TABLE); + taskTables.add(OPEN_FILE_TABLE); + taskTables.add(DELETED_TABLE); + return taskTables; + } + @Override public String getTaskName() { return "OmTableInsightTask"; @@ -159,9 +211,10 @@ public Pair process(OMUpdateEventBatch events) { Iterator eventIterator = events.getIterator(); // Initialize maps to store count and size information HashMap objectCountMap = initializeCountMap(); - HashMap unReplicatedSizeMap = initializeSizeMap(false); - HashMap replicatedSizeMap = initializeSizeMap(true); + HashMap unreplicatedSizeCountMap = initializeSizeMap(false); + HashMap replicatedSizeCountMap = initializeSizeMap(true); final Collection taskTables = getTaskTables(); + final Collection sizeRelatedTables = getTablesToCalculateSize(); // Process each update event while (eventIterator.hasNext()) { @@ -170,21 +223,22 @@ public Pair process(OMUpdateEventBatch events) { if (!taskTables.contains(tableName)) { continue; } + try { switch (omdbUpdateEvent.getAction()) { case PUT: - handlePutEvent(omdbUpdateEvent, tableName, objectCountMap, - unReplicatedSizeMap, replicatedSizeMap); + handlePutEvent(omdbUpdateEvent, tableName, sizeRelatedTables, + objectCountMap, unreplicatedSizeCountMap, replicatedSizeCountMap); break; case DELETE: - handleDeleteEvent(omdbUpdateEvent, tableName, objectCountMap, - unReplicatedSizeMap, replicatedSizeMap); + handleDeleteEvent(omdbUpdateEvent, tableName, sizeRelatedTables, + objectCountMap, unreplicatedSizeCountMap, replicatedSizeCountMap); break; case UPDATE: - handleUpdateEvent(omdbUpdateEvent, tableName, objectCountMap, - unReplicatedSizeMap, replicatedSizeMap); + handleUpdateEvent(omdbUpdateEvent, tableName, sizeRelatedTables, + objectCountMap, unreplicatedSizeCountMap, replicatedSizeCountMap); break; default: @@ -202,11 +256,11 @@ public Pair process(OMUpdateEventBatch events) { if (!objectCountMap.isEmpty()) { writeDataToDB(objectCountMap); } - if (!unReplicatedSizeMap.isEmpty()) { - writeDataToDB(unReplicatedSizeMap); + if (!unreplicatedSizeCountMap.isEmpty()) { + writeDataToDB(unreplicatedSizeCountMap); } - if (!replicatedSizeMap.isEmpty()) { - writeDataToDB(replicatedSizeMap); + if (!replicatedSizeCountMap.isEmpty()) { + writeDataToDB(replicatedSizeCountMap); } LOG.info("Completed a 'process' run of OmTableInsightTask."); return new ImmutablePair<>(getTaskName(), true); @@ -214,34 +268,65 @@ public Pair process(OMUpdateEventBatch events) { private void handlePutEvent(OMDBUpdateEvent event, String tableName, + Collection sizeRelatedTables, HashMap objectCountMap, - HashMap unReplicatedSizeMap, - HashMap replicatedSizeMap) - throws IOException { - OmTableHandler tableHandler = tableHandlers.get(tableName); - if (event.getValue() != null) { - if (tableHandler != null) { - tableHandler.handlePutEvent(event, tableName, objectCountMap, - unReplicatedSizeMap, replicatedSizeMap); - } else { - String countKey = getTableCountKeyFromTable(tableName); - objectCountMap.computeIfPresent(countKey, (k, count) -> count + 1L); - } + HashMap unreplicatedSizeCountMap, + HashMap replicatedSizeCountMap) { + + if (sizeRelatedTables.contains(tableName)) { + handleSizeRelatedTablePutEvent(event, tableName, objectCountMap, + unreplicatedSizeCountMap, replicatedSizeCountMap); + } else { + String countKey = getTableCountKeyFromTable(tableName); + objectCountMap.computeIfPresent(countKey, (k, count) -> count + 1L); + } + } + + private void handleSizeRelatedTablePutEvent( + OMDBUpdateEvent event, + String tableName, + HashMap objectCountMap, + HashMap unreplicatedSizeCountMap, + HashMap replicatedSizeCountMap) { + + String countKey = getTableCountKeyFromTable(tableName); + String unReplicatedSizeKey = getUnReplicatedSizeKeyFromTable(tableName); + String replicatedSizeKey = getReplicatedSizeKeyFromTable(tableName); + + if (event.getValue() instanceof OmKeyInfo) { + // Handle PUT for OpenKeyTable & OpenFileTable + OmKeyInfo omKeyInfo = (OmKeyInfo) event.getValue(); + objectCountMap.computeIfPresent(countKey, (k, count) -> count + 1L); + unreplicatedSizeCountMap.computeIfPresent(unReplicatedSizeKey, + (k, size) -> size + omKeyInfo.getDataSize()); + replicatedSizeCountMap.computeIfPresent(replicatedSizeKey, + (k, size) -> size + omKeyInfo.getReplicatedSize()); + } else if (event.getValue() instanceof RepeatedOmKeyInfo) { + // Handle PUT for DeletedTable + RepeatedOmKeyInfo repeatedOmKeyInfo = + (RepeatedOmKeyInfo) event.getValue(); + objectCountMap.computeIfPresent(countKey, + (k, count) -> count + repeatedOmKeyInfo.getOmKeyInfoList().size()); + Pair result = repeatedOmKeyInfo.getTotalSize(); + unreplicatedSizeCountMap.computeIfPresent(unReplicatedSizeKey, + (k, size) -> size + result.getLeft()); + replicatedSizeCountMap.computeIfPresent(replicatedSizeKey, + (k, size) -> size + result.getRight()); } } private void handleDeleteEvent(OMDBUpdateEvent event, String tableName, + Collection sizeRelatedTables, HashMap objectCountMap, - HashMap unReplicatedSizeMap, - HashMap replicatedSizeMap) - throws IOException { - OmTableHandler tableHandler = tableHandlers.get(tableName); + HashMap unreplicatedSizeCountMap, + HashMap replicatedSizeCountMap) { + if (event.getValue() != null) { - if (tableHandler != null) { - tableHandler.handleDeleteEvent(event, tableName, objectCountMap, - unReplicatedSizeMap, replicatedSizeMap); + if (sizeRelatedTables.contains(tableName)) { + handleSizeRelatedTableDeleteEvent(event, tableName, objectCountMap, + unreplicatedSizeCountMap, replicatedSizeCountMap); } else { String countKey = getTableCountKeyFromTable(tableName); objectCountMap.computeIfPresent(countKey, @@ -250,28 +335,109 @@ private void handleDeleteEvent(OMDBUpdateEvent event, } } + private void handleSizeRelatedTableDeleteEvent( + OMDBUpdateEvent event, + String tableName, + HashMap objectCountMap, + HashMap unreplicatedSizeCountMap, + HashMap replicatedSizeCountMap) { + + String countKey = getTableCountKeyFromTable(tableName); + String unReplicatedSizeKey = getUnReplicatedSizeKeyFromTable(tableName); + String replicatedSizeKey = getReplicatedSizeKeyFromTable(tableName); + + if (event.getValue() instanceof OmKeyInfo) { + // Handle DELETE for OpenKeyTable & OpenFileTable + OmKeyInfo omKeyInfo = (OmKeyInfo) event.getValue(); + objectCountMap.computeIfPresent(countKey, + (k, count) -> count > 0 ? count - 1L : 0L); + unreplicatedSizeCountMap.computeIfPresent(unReplicatedSizeKey, + (k, size) -> size > omKeyInfo.getDataSize() ? + size - omKeyInfo.getDataSize() : 0L); + replicatedSizeCountMap.computeIfPresent(replicatedSizeKey, + (k, size) -> size > omKeyInfo.getReplicatedSize() ? + size - omKeyInfo.getReplicatedSize() : 0L); + } else if (event.getValue() instanceof RepeatedOmKeyInfo) { + // Handle DELETE for DeletedTable + RepeatedOmKeyInfo repeatedOmKeyInfo = + (RepeatedOmKeyInfo) event.getValue(); + objectCountMap.computeIfPresent(countKey, (k, count) -> count > 0 ? + count - repeatedOmKeyInfo.getOmKeyInfoList().size() : 0L); + Pair result = repeatedOmKeyInfo.getTotalSize(); + unreplicatedSizeCountMap.computeIfPresent(unReplicatedSizeKey, + (k, size) -> size > result.getLeft() ? size - result.getLeft() : 0L); + replicatedSizeCountMap.computeIfPresent(replicatedSizeKey, + (k, size) -> size > result.getRight() ? size - result.getRight() : + 0L); + } + } private void handleUpdateEvent(OMDBUpdateEvent event, String tableName, + Collection sizeRelatedTables, HashMap objectCountMap, - HashMap unReplicatedSizeMap, - HashMap replicatedSizeMap) { + HashMap unreplicatedSizeCountMap, + HashMap replicatedSizeCountMap) { - OmTableHandler tableHandler = tableHandlers.get(tableName); if (event.getValue() != null) { - if (tableHandler != null) { + if (sizeRelatedTables.contains(tableName)) { // Handle update for only size related tables - tableHandler.handleUpdateEvent(event, tableName, objectCountMap, - unReplicatedSizeMap, replicatedSizeMap); + handleSizeRelatedTableUpdateEvent(event, tableName, objectCountMap, + unreplicatedSizeCountMap, replicatedSizeCountMap); } } } - /** - * Write the updated count and size information to the database. - * - * @param dataMap Map containing the updated count and size information. - */ + + private void handleSizeRelatedTableUpdateEvent( + OMDBUpdateEvent event, + String tableName, + HashMap objectCountMap, + HashMap unreplicatedSizeCountMap, + HashMap replicatedSizeCountMap) { + + if (event.getOldValue() == null) { + LOG.warn("Update event does not have the old Key Info for {}.", + event.getKey()); + return; + } + String countKey = getTableCountKeyFromTable(tableName); + String unReplicatedSizeKey = getUnReplicatedSizeKeyFromTable(tableName); + String replicatedSizeKey = getReplicatedSizeKeyFromTable(tableName); + + // In Update event the count for the open table will not change. So we don't + // need to update the count. Except for RepeatedOmKeyInfo, for which the + // size of omKeyInfoList can change + if (event.getValue() instanceof OmKeyInfo) { + // Handle UPDATE for OpenKeyTable & OpenFileTable + OmKeyInfo oldKeyInfo = (OmKeyInfo) event.getOldValue(); + OmKeyInfo newKeyInfo = (OmKeyInfo) event.getValue(); + unreplicatedSizeCountMap.computeIfPresent(unReplicatedSizeKey, + (k, size) -> size - oldKeyInfo.getDataSize() + + newKeyInfo.getDataSize()); + replicatedSizeCountMap.computeIfPresent(replicatedSizeKey, + (k, size) -> size - oldKeyInfo.getReplicatedSize() + + newKeyInfo.getReplicatedSize()); + } else if (event.getValue() instanceof RepeatedOmKeyInfo) { + // Handle UPDATE for DeletedTable + RepeatedOmKeyInfo oldRepeatedOmKeyInfo = + (RepeatedOmKeyInfo) event.getOldValue(); + RepeatedOmKeyInfo newRepeatedOmKeyInfo = + (RepeatedOmKeyInfo) event.getValue(); + objectCountMap.computeIfPresent(countKey, + (k, count) -> count > 0 ? + count - oldRepeatedOmKeyInfo.getOmKeyInfoList().size() + + newRepeatedOmKeyInfo.getOmKeyInfoList().size() : 0L); + Pair oldSize = oldRepeatedOmKeyInfo.getTotalSize(); + Pair newSize = newRepeatedOmKeyInfo.getTotalSize(); + unreplicatedSizeCountMap.computeIfPresent(unReplicatedSizeKey, + (k, size) -> size - oldSize.getLeft() + newSize.getLeft()); + replicatedSizeCountMap.computeIfPresent(replicatedSizeKey, + (k, size) -> size - oldSize.getRight() + newSize.getRight()); + } + } + + private void writeDataToDB(Map dataMap) { List insertGlobalStats = new ArrayList<>(); List updateGlobalStats = new ArrayList<>(); @@ -295,11 +461,6 @@ private void writeDataToDB(Map dataMap) { globalStatsDao.update(updateGlobalStats); } - /** - * Initializes and returns a count map with the counts for the tables. - * - * @return The count map containing the counts for each table. - */ private HashMap initializeCountMap() { Collection tables = getTaskTables(); HashMap objectCountMap = new HashMap<>(tables.size()); @@ -317,13 +478,11 @@ private HashMap initializeCountMap() { * @return The size map containing the size counts for each table. */ private HashMap initializeSizeMap(boolean replicated) { - HashMap sizeCountMap = new HashMap<>(); - for (Map.Entry entry : tableHandlers.entrySet()) { - String tableName = entry.getKey(); - OmTableHandler tableHandler = entry.getValue(); - String key = - replicated ? tableHandler.getReplicatedSizeKeyFromTable(tableName) : - tableHandler.getUnReplicatedSizeKeyFromTable(tableName); + Collection tables = getTablesToCalculateSize(); + HashMap sizeCountMap = new HashMap<>(tables.size()); + for (String tableName : tables) { + String key = replicated ? getReplicatedSizeKeyFromTable(tableName) : + getUnReplicatedSizeKeyFromTable(tableName); sizeCountMap.put(key, getValueForKey(key)); } return sizeCountMap; diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OpenKeysInsightHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OpenKeysInsightHandler.java deleted file mode 100644 index 7a27d29d8f28..000000000000 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OpenKeysInsightHandler.java +++ /dev/null @@ -1,163 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.recon.tasks; - -import org.apache.commons.lang3.tuple.Triple; -import org.apache.hadoop.hdds.utils.db.Table; -import org.apache.hadoop.hdds.utils.db.TableIterator; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.HashMap; - -/** - * Manages records in the OpenKey Table, updating counts and sizes of - * open keys in the backend. - */ -public class OpenKeysInsightHandler implements OmTableHandler { - - private static final Logger LOG = - LoggerFactory.getLogger(OpenKeysInsightHandler.class); - - /** - * Invoked by the process method to add information on those keys that have - * been open in the backend. - */ - @Override - public void handlePutEvent(OMDBUpdateEvent event, - String tableName, - HashMap objectCountMap, - HashMap unReplicatedSizeMap, - HashMap replicatedSizeMap) { - - String countKey = getTableCountKeyFromTable(tableName); - String unReplicatedSizeKey = getUnReplicatedSizeKeyFromTable(tableName); - String replicatedSizeKey = getReplicatedSizeKeyFromTable(tableName); - - if (event.getValue() != null) { - OmKeyInfo omKeyInfo = (OmKeyInfo) event.getValue(); - objectCountMap.computeIfPresent(countKey, (k, count) -> count + 1L); - unReplicatedSizeMap.computeIfPresent(unReplicatedSizeKey, - (k, size) -> size + omKeyInfo.getDataSize()); - replicatedSizeMap.computeIfPresent(replicatedSizeKey, - (k, size) -> size + omKeyInfo.getReplicatedSize()); - } else { - LOG.warn("Put event does not have the Key Info for {}.", - event.getKey()); - } - } - - /** - * Invoked by the process method to delete information on those keys that are - * no longer closed in the backend. - */ - @Override - public void handleDeleteEvent(OMDBUpdateEvent event, - String tableName, - HashMap objectCountMap, - HashMap unReplicatedSizeMap, - HashMap replicatedSizeMap) { - - String countKey = getTableCountKeyFromTable(tableName); - String unReplicatedSizeKey = getUnReplicatedSizeKeyFromTable(tableName); - String replicatedSizeKey = getReplicatedSizeKeyFromTable(tableName); - - if (event.getValue() != null) { - OmKeyInfo omKeyInfo = (OmKeyInfo) event.getValue(); - objectCountMap.computeIfPresent(countKey, - (k, count) -> count > 0 ? count - 1L : 0L); - unReplicatedSizeMap.computeIfPresent(unReplicatedSizeKey, - (k, size) -> size > omKeyInfo.getDataSize() ? - size - omKeyInfo.getDataSize() : 0L); - replicatedSizeMap.computeIfPresent(replicatedSizeKey, - (k, size) -> size > omKeyInfo.getReplicatedSize() ? - size - omKeyInfo.getReplicatedSize() : 0L); - } else { - LOG.warn("Delete event does not have the Key Info for {}.", - event.getKey()); - } - } - - /** - * Invoked by the process method to update information on those open keys that - * have been updated in the backend. - */ - @Override - public void handleUpdateEvent(OMDBUpdateEvent event, - String tableName, - HashMap objectCountMap, - HashMap unReplicatedSizeMap, - HashMap replicatedSizeMap) { - - if (event.getValue() != null) { - if (event.getOldValue() == null) { - LOG.warn("Update event does not have the old Key Info for {}.", - event.getKey()); - return; - } - String unReplicatedSizeKey = getUnReplicatedSizeKeyFromTable(tableName); - String replicatedSizeKey = getReplicatedSizeKeyFromTable(tableName); - - // In Update event the count for the open table will not change. So we - // don't need to update the count. - OmKeyInfo oldKeyInfo = (OmKeyInfo) event.getOldValue(); - OmKeyInfo newKeyInfo = (OmKeyInfo) event.getValue(); - unReplicatedSizeMap.computeIfPresent(unReplicatedSizeKey, - (k, size) -> size - oldKeyInfo.getDataSize() + - newKeyInfo.getDataSize()); - replicatedSizeMap.computeIfPresent(replicatedSizeKey, - (k, size) -> size - oldKeyInfo.getReplicatedSize() + - newKeyInfo.getReplicatedSize()); - } else { - LOG.warn("Update event does not have the Key Info for {}.", - event.getKey()); - } - } - - /** - * This method is called by the reprocess method. It calculates the record - * counts for both the open key table and the open file table. Additionally, - * it computes the sizes of both replicated and unreplicated keys - * that are currently open in the backend. - */ - @Override - public Triple getTableSizeAndCount( - TableIterator> iterator) - throws IOException { - long count = 0; - long unReplicatedSize = 0; - long replicatedSize = 0; - - if (iterator != null) { - while (iterator.hasNext()) { - Table.KeyValue kv = iterator.next(); - if (kv != null && kv.getValue() != null) { - OmKeyInfo omKeyInfo = (OmKeyInfo) kv.getValue(); - unReplicatedSize += omKeyInfo.getDataSize(); - replicatedSize += omKeyInfo.getReplicatedSize(); - count++; - } - } - } - return Triple.of(count, unReplicatedSize, replicatedSize); - } - -} diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/OMMetadataManagerTestUtils.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/OMMetadataManagerTestUtils.java index b1aecc9a4f4e..42d69e030f31 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/OMMetadataManagerTestUtils.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/OMMetadataManagerTestUtils.java @@ -397,31 +397,23 @@ public static void writeDirToOm(OMMetadataManager omMetadataManager, .build()); } - @SuppressWarnings("parameternumber") public static void writeDeletedDirToOm(OMMetadataManager omMetadataManager, String bucketName, String volumeName, String dirName, long parentObjectId, long bucketObjectId, - long volumeObjectId, - long objectId) + long volumeObjectId) throws IOException { - // DB key in DeletedDirectoryTable => - // "volumeID/bucketID/parentId/dirName/dirObjectId" - - String ozoneDbKey = omMetadataManager.getOzonePathKey(volumeObjectId, - bucketObjectId, parentObjectId, dirName); - String ozoneDeleteKey = omMetadataManager.getOzoneDeletePathKey( - objectId, ozoneDbKey); - + // DB key in DeletedDirectoryTable => "volumeID/bucketID/parentId/dirName" + String omKey = omMetadataManager.getOzonePathKey(volumeObjectId, + bucketObjectId, parentObjectId, dirName); - omMetadataManager.getDeletedDirTable().put(ozoneDeleteKey, + omMetadataManager.getDeletedDirTable().put(omKey, new OmKeyInfo.Builder() .setBucketName(bucketName) .setVolumeName(volumeName) .setKeyName(dirName) - .setObjectID(objectId) .setReplicationConfig(StandaloneReplicationConfig.getInstance(ONE)) .build()); } diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java index 42aabef0cf15..05d9927d6c93 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java @@ -288,9 +288,8 @@ private void initializeInjector() throws Exception { utilizationSchemaDefinition); fileSizeCountTask = new FileSizeCountTask(fileCountBySizeDao, utilizationSchemaDefinition); - omTableInsightTask = - new OmTableInsightTask(globalStatsDao, sqlConfiguration, - reconOMMetadataManager); + omTableInsightTask = new OmTableInsightTask( + globalStatsDao, sqlConfiguration, reconOMMetadataManager); containerHealthSchemaManager = reconTestInjector.getInstance(ContainerHealthSchemaManager.class); clusterStateEndpoint = @@ -516,11 +515,11 @@ public void setUp() throws Exception { // Populate the deletedDirectories table in OM DB writeDeletedDirToOm(reconOMMetadataManager, "Bucket1", "Volume1", "dir1", - 3L, 2L, 1L, 23L); + 3L, 2L, 1L); writeDeletedDirToOm(reconOMMetadataManager, "Bucket2", "Volume2", "dir2", - 6L, 5L, 4L, 22L); + 6L, 5L, 4L); writeDeletedDirToOm(reconOMMetadataManager, "Bucket3", "Volume3", "dir3", - 9L, 8L, 7L, 21L); + 9L, 8L, 7L); // Truncate global stats table before running each test dslContext.truncate(GLOBAL_STATS); @@ -595,7 +594,7 @@ public void testGetDatanodes() throws Exception { (DatanodesResponse) response1.getEntity(); DatanodeMetadata datanodeMetadata1 = datanodesResponse1.getDatanodes().stream().filter(datanodeMetadata -> - datanodeMetadata.getHostname().equals("host1.datanode")) + datanodeMetadata.getHostname().equals("host1.datanode")) .findFirst().orElse(null); return (datanodeMetadata1 != null && datanodeMetadata1.getContainers() == 1 && @@ -700,7 +699,7 @@ public void testGetMetricsResponse() throws Exception { byte[] fileBytes = FileUtils.readFileToByteArray( new File(classLoader.getResource(PROMETHEUS_TEST_RESPONSE_FILE) .getFile()) - ); + ); verify(outputStreamMock).write(fileBytes, 0, fileBytes.length); } diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOmTableInsightTask.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOmTableInsightTask.java index 56d8fe213152..df014f4276fa 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOmTableInsightTask.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOmTableInsightTask.java @@ -21,28 +21,20 @@ import org.apache.commons.lang3.tuple.ImmutablePair; import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.TypedTable; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; -import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; -import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; -import org.apache.hadoop.ozone.recon.ReconTestInjector; -import org.apache.hadoop.ozone.recon.api.types.NSSummary; import org.apache.hadoop.ozone.recon.persistence.AbstractReconSqlDBTest; import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; -import org.apache.hadoop.ozone.recon.spi.impl.ReconNamespaceSummaryManagerImpl; +import org.apache.hadoop.ozone.recon.tasks.OMDBUpdateEvent.OMUpdateEventBuilder; + import org.hadoop.ozone.recon.schema.tables.daos.GlobalStatsDao; import org.jooq.DSLContext; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; import org.junit.jupiter.api.io.TempDir; import java.io.IOException; @@ -52,20 +44,18 @@ import java.util.Arrays; import java.util.List; -import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.BUCKET_TABLE; +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.FILE_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.KEY_TABLE; -import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.VOLUME_TABLE; -import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_DIR_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_FILE_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_KEY_TABLE; +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.VOLUME_TABLE; +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.BUCKET_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_TABLE; -import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getTestReconOmMetadataManager; -import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.initializeNewOmMetadataManager; -import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeKeyToOm; -import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeDeletedDirToOm; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeDeletedKeysToOm; import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeOpenKeyToOm; import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeOpenFileToOm; -import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeDeletedKeysToOm; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getTestReconOmMetadataManager; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.initializeNewOmMetadataManager; import static org.apache.hadoop.ozone.recon.tasks.OMDBUpdateEvent.OMDBUpdateAction.DELETE; import static org.apache.hadoop.ozone.recon.tasks.OMDBUpdateEvent.OMDBUpdateAction.PUT; import static org.apache.hadoop.ozone.recon.tasks.OMDBUpdateEvent.OMDBUpdateAction.UPDATE; @@ -76,83 +66,29 @@ import static org.mockito.Mockito.when; /** - * This test class is designed for the OM Table Insight Task. It conducts tests - * for tables that require both Size and Count, as well as for those that only - * require Count. + * Unit test for Object Count Task. */ public class TestOmTableInsightTask extends AbstractReconSqlDBTest { @TempDir private Path temporaryFolder; - private static GlobalStatsDao globalStatsDao; - private static OmTableInsightTask omTableInsightTask; - private static DSLContext dslContext; + private GlobalStatsDao globalStatsDao; + private OmTableInsightTask omTableInsightTask; + private DSLContext dslContext; private boolean isSetupDone = false; - private static ReconOMMetadataManager reconOMMetadataManager; - private static NSSummaryTaskWithFSO nSSummaryTaskWithFso; - private static OzoneConfiguration ozoneConfiguration; - private static ReconNamespaceSummaryManagerImpl reconNamespaceSummaryManager; - - // Object names in FSO-enabled format - private static final String VOL = "volume1"; - private static final String BUCKET_ONE = "bucket1"; - private static final String BUCKET_TWO = "bucket2"; - private static final String KEY_ONE = "file1"; - private static final String KEY_TWO = "file2"; - private static final String KEY_THREE = "dir1/dir2/file3"; - private static final String FILE_ONE = "file1"; - private static final String FILE_TWO = "file2"; - private static final String FILE_THREE = "file3"; - private static final String DIR_ONE = "dir1"; - private static final String DIR_TWO = "dir2"; - private static final String DIR_THREE = "dir3"; - - - private static final long VOL_OBJECT_ID = 0L; - private static final long BUCKET_ONE_OBJECT_ID = 1L; - private static final long BUCKET_TWO_OBJECT_ID = 2L; - private static final long KEY_ONE_OBJECT_ID = 3L; - private static final long DIR_ONE_OBJECT_ID = 14L; - private static final long KEY_TWO_OBJECT_ID = 5L; - private static final long DIR_TWO_OBJECT_ID = 17L; - private static final long KEY_THREE_OBJECT_ID = 8L; - private static final long DIR_THREE_OBJECT_ID = 10L; - - private static final long KEY_ONE_SIZE = 500L; - private static final long KEY_TWO_SIZE = 1025L; - private static final long KEY_THREE_SIZE = 2000L; - - // mock client's path requests - private static final String TEST_USER = "TestUser"; - - @Mock - private Table nsSummaryTable; + private ReconOMMetadataManager reconOMMetadataManager; public TestOmTableInsightTask() { super(); } private void initializeInjector() throws IOException { - ozoneConfiguration = new OzoneConfiguration(); reconOMMetadataManager = getTestReconOmMetadataManager( initializeNewOmMetadataManager(Files.createDirectory( temporaryFolder.resolve("JunitOmDBDir")).toFile()), Files.createDirectory(temporaryFolder.resolve("NewDir")).toFile()); globalStatsDao = getDao(GlobalStatsDao.class); - - ReconTestInjector reconTestInjector = - new ReconTestInjector.Builder(temporaryFolder.toFile()) - .withReconSqlDb() - .withReconOm(reconOMMetadataManager) - .withContainerDB() - .build(); - reconNamespaceSummaryManager = reconTestInjector.getInstance( - ReconNamespaceSummaryManagerImpl.class); - omTableInsightTask = new OmTableInsightTask( globalStatsDao, getConfiguration(), reconOMMetadataManager); - nSSummaryTaskWithFso = new NSSummaryTaskWithFSO( - reconNamespaceSummaryManager, reconOMMetadataManager, - ozoneConfiguration); dslContext = getDslContext(); } @@ -163,182 +99,10 @@ public void setUp() throws IOException { initializeInjector(); isSetupDone = true; } - MockitoAnnotations.openMocks(this); // Truncate table before running each test dslContext.truncate(GLOBAL_STATS); } - /** - * Populate OM-DB with the following structure. - * volume1 - * | \ - * bucket1 bucket2 - * / \ \ - * dir1 dir2 dir3 - * / \ \ - * file1 file2 file3 - * - * @throws IOException - */ - private void populateOMDB() throws IOException { - - // Create 2 Buckets bucket1 and bucket2 - OmBucketInfo bucketInfo1 = OmBucketInfo.newBuilder() - .setVolumeName(VOL) - .setBucketName(BUCKET_ONE) - .setObjectID(BUCKET_ONE_OBJECT_ID) - .build(); - String bucketKey = reconOMMetadataManager.getBucketKey( - bucketInfo1.getVolumeName(), bucketInfo1.getBucketName()); - reconOMMetadataManager.getBucketTable().put(bucketKey, bucketInfo1); - OmBucketInfo bucketInfo2 = OmBucketInfo.newBuilder() - .setVolumeName(VOL) - .setBucketName(BUCKET_TWO) - .setObjectID(BUCKET_TWO_OBJECT_ID) - .build(); - bucketKey = reconOMMetadataManager.getBucketKey( - bucketInfo2.getVolumeName(), bucketInfo2.getBucketName()); - reconOMMetadataManager.getBucketTable().put(bucketKey, bucketInfo2); - - // Create a single volume named volume1 - String volumeKey = reconOMMetadataManager.getVolumeKey(VOL); - OmVolumeArgs args = - OmVolumeArgs.newBuilder() - .setObjectID(VOL_OBJECT_ID) - .setVolume(VOL) - .setAdminName(TEST_USER) - .setOwnerName(TEST_USER) - .build(); - reconOMMetadataManager.getVolumeTable().put(volumeKey, args); - - // Generate keys for the File Table - writeKeyToOm(reconOMMetadataManager, - KEY_ONE, - BUCKET_ONE, - VOL, - FILE_ONE, - KEY_ONE_OBJECT_ID, - DIR_ONE_OBJECT_ID, - BUCKET_ONE_OBJECT_ID, - VOL_OBJECT_ID, - KEY_ONE_SIZE, - BucketLayout.FILE_SYSTEM_OPTIMIZED); - writeKeyToOm(reconOMMetadataManager, - KEY_TWO, - BUCKET_ONE, - VOL, - FILE_TWO, - KEY_TWO_OBJECT_ID, - DIR_ONE_OBJECT_ID, - BUCKET_ONE_OBJECT_ID, - VOL_OBJECT_ID, - KEY_TWO_SIZE, - BucketLayout.FILE_SYSTEM_OPTIMIZED); - writeKeyToOm(reconOMMetadataManager, - KEY_THREE, - BUCKET_ONE, - VOL, - FILE_THREE, - KEY_THREE_OBJECT_ID, - DIR_TWO_OBJECT_ID, - BUCKET_ONE_OBJECT_ID, - VOL_OBJECT_ID, - KEY_THREE_SIZE, - BucketLayout.FILE_SYSTEM_OPTIMIZED); - - // Generate Deleted Directories in OM - writeDeletedDirToOm(reconOMMetadataManager, - BUCKET_ONE, - VOL, - DIR_ONE, - BUCKET_ONE_OBJECT_ID, - BUCKET_ONE_OBJECT_ID, - VOL_OBJECT_ID, - DIR_ONE_OBJECT_ID); - writeDeletedDirToOm(reconOMMetadataManager, - BUCKET_ONE, - VOL, - DIR_TWO, - BUCKET_ONE_OBJECT_ID, - BUCKET_ONE_OBJECT_ID, - VOL_OBJECT_ID, - DIR_TWO_OBJECT_ID); - writeDeletedDirToOm(reconOMMetadataManager, - BUCKET_TWO, - VOL, - DIR_THREE, - BUCKET_TWO_OBJECT_ID, - BUCKET_TWO_OBJECT_ID, - VOL_OBJECT_ID, - DIR_THREE_OBJECT_ID); - } - - @Test - public void testReprocessForDeletedDirectory() throws Exception { - // Create keys and deleted directories - populateOMDB(); - - // Generate NamespaceSummary for the OM DB - nSSummaryTaskWithFso.reprocessWithFSO(reconOMMetadataManager); - - Pair result = - omTableInsightTask.reprocess(reconOMMetadataManager); - assertTrue(result.getRight()); - assertEquals(3, getCountForTable(DELETED_DIR_TABLE)); - } - - @Test - public void testProcessForDeletedDirectoryTable() throws IOException { - // Prepare mock data size - Long expectedSize1 = 1000L; - Long expectedSize2 = 2000L; - NSSummary nsSummary1 = new NSSummary(); - NSSummary nsSummary2 = new NSSummary(); - nsSummary1.setSizeOfFiles(expectedSize1); - nsSummary2.setSizeOfFiles(expectedSize2); - when(nsSummaryTable.get(1L)).thenReturn(nsSummary1); - when(nsSummaryTable.get(2L)).thenReturn(nsSummary1); - when(nsSummaryTable.get(3L)).thenReturn(nsSummary2); - when(nsSummaryTable.get(4L)).thenReturn(nsSummary2); - when(nsSummaryTable.get(5L)).thenReturn(nsSummary2); - - /* DB key in DeletedDirectoryTable => - "/volumeId/bucketId/parentId/dirName/dirObjectId" */ - List paths = Arrays.asList( - "/18/28/22/dir1/1", - "/18/26/23/dir1/2", - "/18/20/24/dir1/3", - "/18/21/25/dir1/4", - "/18/27/26/dir1/5" - ); - - // Testing PUT events - // Create 5 OMDBUpdateEvent instances for 5 different deletedDirectory paths - ArrayList putEvents = new ArrayList<>(); - for (long i = 0L; i < 5L; i++) { - putEvents.add(getOMUpdateEvent(paths.get((int) i), - getOmKeyInfo("vol1", "bucket1", DIR_ONE, (i + 1), false), - DELETED_DIR_TABLE, PUT, null)); - } - OMUpdateEventBatch putEventBatch = new OMUpdateEventBatch(putEvents); - omTableInsightTask.process(putEventBatch); - assertEquals(5, getCountForTable(DELETED_DIR_TABLE)); - - - // Testing DELETE events - // Create 2 OMDBUpdateEvent instances for 2 different deletedDirectory paths - ArrayList deleteEvents = new ArrayList<>(); - deleteEvents.add(getOMUpdateEvent(paths.get(0), - getOmKeyInfo("vol1", "bucket1", DIR_ONE, 1L, false), DELETED_DIR_TABLE, - DELETE, null)); - deleteEvents.add(getOMUpdateEvent(paths.get(2), - getOmKeyInfo("vol1", "bucket1", DIR_ONE, 3L, false), DELETED_DIR_TABLE, - DELETE, null)); - OMUpdateEventBatch deleteEventBatch = new OMUpdateEventBatch(deleteEvents); - omTableInsightTask.process(deleteEventBatch); - assertEquals(3, getCountForTable(DELETED_DIR_TABLE)); - } - @Test public void testReprocessForCount() throws Exception { OMMetadataManager omMetadataManager = mock(OmMetadataManagerImpl.class); @@ -346,32 +110,27 @@ public void testReprocessForCount() throws Exception { // Mock 5 rows in each table and test the count for (String tableName : omTableInsightTask.getTaskTables()) { TypedTable table = mock(TypedTable.class); - TypedTable.TypedTableIterator mockIter = - mock(TypedTable.TypedTableIterator.class); + TypedTable.TypedTableIterator mockIter = mock(TypedTable + .TypedTableIterator.class); when(table.iterator()).thenReturn(mockIter); when(omMetadataManager.getTable(tableName)).thenReturn(table); - when(mockIter.hasNext()).thenReturn(true, true, true, true, true, false); - + when(mockIter.hasNext()) + .thenReturn(true) + .thenReturn(true) + .thenReturn(true) + .thenReturn(true) + .thenReturn(true) + .thenReturn(false); TypedTable.TypedKeyValue mockKeyValue = mock(TypedTable.TypedKeyValue.class); - - if (tableName.equals(DELETED_TABLE)) { - RepeatedOmKeyInfo keyInfo = mock(RepeatedOmKeyInfo.class); - when(keyInfo.getTotalSize()).thenReturn(ImmutablePair.of(100L, 100L)); - when(keyInfo.getOmKeyInfoList()).thenReturn( - Arrays.asList(mock(OmKeyInfo.class))); - when(mockKeyValue.getValue()).thenReturn(keyInfo); - } else { - when(mockKeyValue.getValue()).thenReturn(mock(OmKeyInfo.class)); - } - + when(mockKeyValue.getValue()).thenReturn(mock(OmKeyInfo.class)); when(mockIter.next()).thenReturn(mockKeyValue); } Pair result = omTableInsightTask.reprocess(omMetadataManager); - assertTrue(result.getRight()); + assertEquals(5L, getCountForTable(KEY_TABLE)); assertEquals(5L, getCountForTable(VOLUME_TABLE)); assertEquals(5L, getCountForTable(BUCKET_TABLE)); @@ -379,6 +138,7 @@ public void testReprocessForCount() throws Exception { assertEquals(5L, getCountForTable(DELETED_TABLE)); } + @Test public void testReprocessForOpenKeyTable() throws Exception { // Populate the OpenKeys table in OM DB @@ -443,73 +203,44 @@ public void testReprocessForDeletedTable() throws Exception { @Test public void testProcessForCount() { - List initialEvents = new ArrayList<>(); - - // Creating events for each table except the deleted table + ArrayList events = new ArrayList<>(); + // Create 5 put, 1 delete and 1 update event for each table for (String tableName : omTableInsightTask.getTaskTables()) { - if (tableName.equals(DELETED_TABLE)) { - continue; // Skipping deleted table as it has a separate test - } - - // Adding 5 PUT events per table for (int i = 0; i < 5; i++) { - initialEvents.add( - getOMUpdateEvent("item" + i, mock(OmKeyInfo.class), tableName, PUT, - null)); + events.add(getOMUpdateEvent("item" + i, null, tableName, PUT, null)); } - - // Adding 1 DELETE event where value is null, indicating non-existence - // in the database. - initialEvents.add( - getOMUpdateEvent("item0", mock(OmKeyInfo.class), tableName, DELETE, - null)); - // Adding 1 UPDATE event. This should not affect the count. - initialEvents.add( - getOMUpdateEvent("item1", mock(OmKeyInfo.class), tableName, UPDATE, - mock(OmKeyInfo.class))); + // for delete event, if value is set to null, the counter will not be + // decremented. This is because the value will be null if item does not + // exist in the database and there is no need to delete. + events.add(getOMUpdateEvent("item0", mock(OmKeyInfo.class), tableName, + DELETE, null)); + events.add(getOMUpdateEvent("item1", null, tableName, UPDATE, null)); } + OMUpdateEventBatch omUpdateEventBatch = new OMUpdateEventBatch(events); + omTableInsightTask.process(omUpdateEventBatch); - // Processing the initial batch of events - OMUpdateEventBatch initialBatch = new OMUpdateEventBatch(initialEvents); - omTableInsightTask.process(initialBatch); + // Verify 4 items in each table. (5 puts - 1 delete + 0 update) + assertEquals(4L, getCountForTable(KEY_TABLE)); + assertEquals(4L, getCountForTable(VOLUME_TABLE)); + assertEquals(4L, getCountForTable(BUCKET_TABLE)); + assertEquals(4L, getCountForTable(FILE_TABLE)); - // Verifying the count in each table + // add a new key and simulate delete on non-existing item (value: null) + ArrayList newEvents = new ArrayList<>(); for (String tableName : omTableInsightTask.getTaskTables()) { - if (tableName.equals(DELETED_TABLE)) { - continue; - } - assertEquals(4L, getCountForTable( - tableName)); // 4 items expected after processing (5 puts - 1 delete) + newEvents.add(getOMUpdateEvent("item5", null, tableName, PUT, null)); + // This delete event should be a noop since value is null + newEvents.add(getOMUpdateEvent("item0", null, tableName, DELETE, null)); } - List additionalEvents = new ArrayList<>(); - // Simulating new PUT and DELETE events - for (String tableName : omTableInsightTask.getTaskTables()) { - if (tableName.equals(DELETED_TABLE)) { - continue; - } - // Adding 1 new PUT event - additionalEvents.add( - getOMUpdateEvent("item6", mock(OmKeyInfo.class), tableName, PUT, - null)); - // Attempting to delete a non-existing item (value: null) - additionalEvents.add( - getOMUpdateEvent("item0", null, tableName, DELETE, null)); - } + omUpdateEventBatch = new OMUpdateEventBatch(newEvents); + omTableInsightTask.process(omUpdateEventBatch); - // Processing the additional events - OMUpdateEventBatch additionalBatch = - new OMUpdateEventBatch(additionalEvents); - omTableInsightTask.process(additionalBatch); - // Verifying the final count in each table - for (String tableName : omTableInsightTask.getTaskTables()) { - if (tableName.equals(DELETED_TABLE)) { - continue; - } - // 5 items expected after processing the additional events. - assertEquals(5L, getCountForTable( - tableName)); - } + // Verify 5 items in each table. (1 new put + 0 delete) + assertEquals(5L, getCountForTable(KEY_TABLE)); + assertEquals(5L, getCountForTable(VOLUME_TABLE)); + assertEquals(5L, getCountForTable(BUCKET_TABLE)); + assertEquals(5L, getCountForTable(FILE_TABLE)); } @Test @@ -520,38 +251,35 @@ public void testProcessForOpenKeyTableAndOpenFileTable() { when(omKeyInfo.getDataSize()).thenReturn(sizeToBeReturned); when(omKeyInfo.getReplicatedSize()).thenReturn(sizeToBeReturned * 3); - // Test PUT events. - // Add 5 PUT events for OpenKeyTable and OpenFileTable. + // Test PUT events ArrayList putEvents = new ArrayList<>(); - for (int i = 0; i < 10; i++) { - String table = (i < 5) ? OPEN_KEY_TABLE : OPEN_FILE_TABLE; - putEvents.add(getOMUpdateEvent("item" + i, omKeyInfo, table, PUT, null)); + for (String tableName : omTableInsightTask.getTablesToCalculateSize()) { + for (int i = 0; i < 5; i++) { + putEvents.add( + getOMUpdateEvent("item" + i, omKeyInfo, tableName, PUT, null)); + } } - OMUpdateEventBatch putEventBatch = new OMUpdateEventBatch(putEvents); omTableInsightTask.process(putEventBatch); - // After 5 PUTs, size should be 5 * 1000 = 5000 - for (String tableName : new ArrayList<>( - Arrays.asList(OPEN_KEY_TABLE, OPEN_FILE_TABLE))) { + // After 5 PUTs, size should be 5 * 1000 = 5000 for each size-related table + for (String tableName : omTableInsightTask.getTablesToCalculateSize()) { assertEquals(5000L, getUnReplicatedSizeForTable(tableName)); assertEquals(15000L, getReplicatedSizeForTable(tableName)); } // Test DELETE events ArrayList deleteEvents = new ArrayList<>(); - // Delete "item0" for OpenKeyTable and OpenFileTable. - deleteEvents.add( - getOMUpdateEvent("item0", omKeyInfo, OPEN_KEY_TABLE, DELETE, null)); - deleteEvents.add( - getOMUpdateEvent("item0", omKeyInfo, OPEN_FILE_TABLE, DELETE, null)); - + for (String tableName : omTableInsightTask.getTablesToCalculateSize()) { + // Delete "item0" + deleteEvents.add( + getOMUpdateEvent("item0", omKeyInfo, tableName, DELETE, null)); + } OMUpdateEventBatch deleteEventBatch = new OMUpdateEventBatch(deleteEvents); omTableInsightTask.process(deleteEventBatch); // After deleting "item0", size should be 4 * 1000 = 4000 - for (String tableName : new ArrayList<>( - Arrays.asList(OPEN_KEY_TABLE, OPEN_FILE_TABLE))) { + for (String tableName : omTableInsightTask.getTablesToCalculateSize()) { assertEquals(4000L, getUnReplicatedSizeForTable(tableName)); assertEquals(12000L, getReplicatedSizeForTable(tableName)); } @@ -559,8 +287,7 @@ public void testProcessForOpenKeyTableAndOpenFileTable() { // Test UPDATE events ArrayList updateEvents = new ArrayList<>(); Long newSizeToBeReturned = 2000L; - for (String tableName : new ArrayList<>( - Arrays.asList(OPEN_KEY_TABLE, OPEN_FILE_TABLE))) { + for (String tableName : omTableInsightTask.getTablesToCalculateSize()) { // Update "item1" with a new size OmKeyInfo newKeyInfo = mock(OmKeyInfo.class); when(newKeyInfo.getDataSize()).thenReturn(newSizeToBeReturned); @@ -568,14 +295,12 @@ public void testProcessForOpenKeyTableAndOpenFileTable() { updateEvents.add( getOMUpdateEvent("item1", newKeyInfo, tableName, UPDATE, omKeyInfo)); } - OMUpdateEventBatch updateEventBatch = new OMUpdateEventBatch(updateEvents); omTableInsightTask.process(updateEventBatch); // After updating "item1", size should be 4000 - 1000 + 2000 = 5000 // presentValue - oldValue + newValue = updatedValue - for (String tableName : new ArrayList<>( - Arrays.asList(OPEN_KEY_TABLE, OPEN_FILE_TABLE))) { + for (String tableName : omTableInsightTask.getTablesToCalculateSize()) { assertEquals(5000L, getUnReplicatedSizeForTable(tableName)); assertEquals(15000L, getReplicatedSizeForTable(tableName)); } @@ -588,10 +313,9 @@ public void testProcessForDeletedTable() { new ImmutablePair<>(1000L, 3000L); ArrayList omKeyInfoList = new ArrayList<>(); // Add 5 OmKeyInfo objects to the list - for (long i = 0; i < 5; i++) { + for (int i = 0; i < 5; i++) { OmKeyInfo omKeyInfo = - getOmKeyInfo("sampleVol", "non_fso_Bucket", "non_fso_key1", i + 1, - true); + getOmKeyInfo("sampleVol", "non_fso_Bucket", "non_fso_key1", true); // Set properties of OmKeyInfo object if needed omKeyInfoList.add(omKeyInfo); } @@ -629,14 +353,38 @@ public void testProcessForDeletedTable() { // After deleting "item0", size should be 4 * 1000 = 4000 assertEquals(4000L, getUnReplicatedSizeForTable(DELETED_TABLE)); assertEquals(12000L, getReplicatedSizeForTable(DELETED_TABLE)); + + + // Test UPDATE events + ArrayList updateEvents = new ArrayList<>(); + // Update "item1" with new sizes + ImmutablePair newSizesToBeReturned = + new ImmutablePair<>(500L, 1500L); + RepeatedOmKeyInfo newRepeatedOmKeyInfo = mock(RepeatedOmKeyInfo.class); + when(newRepeatedOmKeyInfo.getTotalSize()).thenReturn(newSizesToBeReturned); + when(newRepeatedOmKeyInfo.getOmKeyInfoList()).thenReturn( + omKeyInfoList.subList(1, 5)); + OMUpdateEventBatch updateEventBatch = new OMUpdateEventBatch(updateEvents); + // For item1, newSize=500 and totalCount of deleted keys should be 4 + updateEvents.add( + getOMUpdateEvent("item1", newRepeatedOmKeyInfo, DELETED_TABLE, UPDATE, + repeatedOmKeyInfo)); + omTableInsightTask.process(updateEventBatch); + // Since one key has been deleted, total deleted keys should be 19 + assertEquals(19L, getCountForTable(DELETED_TABLE)); + // After updating "item1", size should be 4000 - 1000 + 500 = 3500 + // presentValue - oldValue + newValue = updatedValue + assertEquals(3500L, getUnReplicatedSizeForTable(DELETED_TABLE)); + assertEquals(10500L, getReplicatedSizeForTable(DELETED_TABLE)); } + private OMDBUpdateEvent getOMUpdateEvent( String name, Object value, String table, OMDBUpdateEvent.OMDBUpdateAction action, Object oldValue) { - return new OMDBUpdateEvent.OMUpdateEventBuilder() + return new OMUpdateEventBuilder() .setAction(action) .setKey(name) .setValue(value) @@ -661,8 +409,7 @@ private long getReplicatedSizeForTable(String tableName) { } private OmKeyInfo getOmKeyInfo(String volumeName, String bucketName, - String keyName, Long objectID, - boolean isFile) { + String keyName, boolean isFile) { return new OmKeyInfo.Builder() .setVolumeName(volumeName) .setBucketName(bucketName) @@ -671,7 +418,6 @@ private OmKeyInfo getOmKeyInfo(String volumeName, String bucketName, .setReplicationConfig(StandaloneReplicationConfig .getInstance(HddsProtos.ReplicationFactor.ONE)) .setDataSize(100L) - .setObjectID(objectID) .build(); } } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ObjectStoreStub.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ObjectStoreStub.java index e9fb15e613fe..b79e49f834cb 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ObjectStoreStub.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ObjectStoreStub.java @@ -20,6 +20,7 @@ package org.apache.hadoop.ozone.client; import java.io.IOException; +import java.util.ArrayList; import java.util.HashMap; import java.util.Iterator; import java.util.Map; @@ -61,7 +62,7 @@ public void createVolume(String volumeName) throws IOException { .setAdmin("root") .setOwner("root") .setQuotaInBytes(Integer.MAX_VALUE) - .build()); + .setAcls(new ArrayList<>()).build()); } @Override