diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java index 0a38e6604897..3bd0102a9cf5 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java @@ -166,7 +166,7 @@ private synchronized void connectToDatanode(DatanodeDetails dn) // port. int port = dn.getPort(DatanodeDetails.Port.Name.STANDALONE).getValue(); if (port == 0) { - port = config.getInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, + port = config.getInt(OzoneConfigKeys.OZONE_CONTAINER_IPC_PORT, OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT); } diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java index aff0aa966a79..d01b7b0cca13 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java @@ -83,7 +83,7 @@ public static XceiverClientRatis newXceiverClientRatis( org.apache.hadoop.hdds.scm.pipeline.Pipeline pipeline, ConfigurationSource ozoneConf, ClientTrustManager trustManager) { final String rpcType = ozoneConf - .get(ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY, + .get(ScmConfigKeys.OZONE_CONTAINER_RATIS_RPC_TYPE_KEY, ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT); final RetryPolicy retryPolicy = RatisHelper.createRetryPolicy(ozoneConf); final GrpcTlsConfig tlsConfig = RatisHelper.createTlsClientConfig(new diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java index 69cce8db6d6b..d50c8b121784 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java @@ -47,6 +47,7 @@ import org.apache.hadoop.hdds.utils.LegacyHadoopConfigurationSource; import com.google.common.base.Preconditions; +import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.ratis.server.RaftServerConfigKeys; import static java.util.Collections.unmodifiableSortedSet; @@ -323,7 +324,35 @@ private static void addDeprecatedKeys() { new DeprecationDelta("ozone.scm.chunk.layout", ScmConfigKeys.OZONE_SCM_CONTAINER_LAYOUT_KEY), new DeprecationDelta("hdds.datanode.replication.work.dir", - OZONE_CONTAINER_COPY_WORKDIR) + OZONE_CONTAINER_COPY_WORKDIR), + new DeprecationDelta("dfs.container.ratis.enabled", + ScmConfigKeys.OZONE_CONTAINER_RATIS_ENABLED_KEY), + new DeprecationDelta("dfs.container.ratis.rpc.type", + ScmConfigKeys.OZONE_CONTAINER_RATIS_RPC_TYPE_KEY), + new DeprecationDelta("dfs.container.ratis.replication.level", + ScmConfigKeys.OZONE_CONTAINER_RATIS_REPLICATION_LEVEL_KEY), + new DeprecationDelta("dfs.container.ratis.num.container.op.executors", + ScmConfigKeys.OZONE_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY), + new DeprecationDelta("dfs.container.ratis.segment.size", + ScmConfigKeys.OZONE_CONTAINER_RATIS_SEGMENT_SIZE_KEY), + new DeprecationDelta("dfs.container.ratis.segment.preallocated.size", + ScmConfigKeys.OZONE_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY), + new DeprecationDelta("dfs.container.ratis.statemachinedata.sync.timeout", + ScmConfigKeys.OZONE_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT), + new DeprecationDelta("dfs.ratis.leader.election.minimum.timeout.duration", + ScmConfigKeys.OZONE_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY), + new DeprecationDelta("dfs.ratis.snapshot.threshold", + ScmConfigKeys.OZONE_RATIS_SNAPSHOT_THRESHOLD_KEY), + new DeprecationDelta("dfs.container.ipc", + OzoneConfigKeys.OZONE_CONTAINER_IPC_PORT), + new DeprecationDelta("dfs.container.ipc.random.port", + OzoneConfigKeys.OZONE_CONTAINER_IPC_RANDOM_PORT), + new DeprecationDelta("dfs.container.ratis.ipc", + OzoneConfigKeys.OZONE_CONTAINER_RATIS_IPC_PORT), + new DeprecationDelta("dfs.container.ratis.ipc.random.port", + OzoneConfigKeys.OZONE_CONTAINER_RATIS_IPC_RANDOM_PORT), + new DeprecationDelta("dfs.container.ratis.datanode.storage.dir", + OzoneConfigKeys.OZONE_CONTAINER_RATIS_DATANODE_STORAGE_DIR) }); } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java index cb7f6f8a3b31..6f357bbab309 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java @@ -234,7 +234,7 @@ public static RaftClient newRaftClient(RpcType rpcType, Pipeline pipeline, private static RpcType getRpcType(ConfigurationSource conf) { return SupportedRpcType.valueOfIgnoreCase(conf.get( - ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY, + ScmConfigKeys.OZONE_CONTAINER_RATIS_RPC_TYPE_KEY, ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT)); } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java index c6760451c693..ae86b24db587 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java @@ -41,12 +41,12 @@ public final class ScmConfigKeys { public static final String OZONE_SCM_DB_DIRS_PERMISSIONS = "ozone.scm.db.dirs.permissions"; - public static final String DFS_CONTAINER_RATIS_ENABLED_KEY - = "dfs.container.ratis.enabled"; + public static final String OZONE_CONTAINER_RATIS_ENABLED_KEY + = "ozone.container.ratis.enabled"; public static final boolean DFS_CONTAINER_RATIS_ENABLED_DEFAULT = false; - public static final String DFS_CONTAINER_RATIS_RPC_TYPE_KEY - = "dfs.container.ratis.rpc.type"; + public static final String OZONE_CONTAINER_RATIS_RPC_TYPE_KEY + = "ozone.container.ratis.rpc.type"; public static final String DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT = "GRPC"; public static final String @@ -55,25 +55,25 @@ public final class ScmConfigKeys { public static final int DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_DEFAULT = 10; - public static final String DFS_CONTAINER_RATIS_REPLICATION_LEVEL_KEY - = "dfs.container.ratis.replication.level"; + public static final String OZONE_CONTAINER_RATIS_REPLICATION_LEVEL_KEY + = "ozone.container.ratis.replication.level"; public static final ReplicationLevel DFS_CONTAINER_RATIS_REPLICATION_LEVEL_DEFAULT = ReplicationLevel.MAJORITY; - public static final String DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY - = "dfs.container.ratis.num.container.op.executors"; + public static final String OZONE_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY + = "ozone.container.ratis.num.container.op.executors"; public static final int DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_DEFAULT = 10; - public static final String DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY = - "dfs.container.ratis.segment.size"; + public static final String OZONE_CONTAINER_RATIS_SEGMENT_SIZE_KEY = + "ozone.container.ratis.segment.size"; public static final String DFS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT = "64MB"; - public static final String DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY = - "dfs.container.ratis.segment.preallocated.size"; + public static final String OZONE_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY = + "ozone.container.ratis.segment.preallocated.size"; public static final String DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT = "4MB"; public static final String - DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT = - "dfs.container.ratis.statemachinedata.sync.timeout"; + OZONE_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT = + "ozone.container.ratis.statemachinedata.sync.timeout"; public static final TimeDuration DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT = TimeDuration.valueOf(10, TimeUnit.SECONDS); @@ -121,14 +121,14 @@ public final class ScmConfigKeys { DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT = TimeDuration.valueOf(600000, TimeUnit.MILLISECONDS); public static final String - DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY = - "dfs.ratis.leader.election.minimum.timeout.duration"; + OZONE_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY = + "ozone.ratis.leader.election.minimum.timeout.duration"; public static final TimeDuration DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT = TimeDuration.valueOf(5, TimeUnit.SECONDS); - public static final String DFS_RATIS_SNAPSHOT_THRESHOLD_KEY = - "dfs.ratis.snapshot.threshold"; + public static final String OZONE_RATIS_SNAPSHOT_THRESHOLD_KEY = + "ozone.ratis.snapshot.threshold"; public static final long DFS_RATIS_SNAPSHOT_THRESHOLD_DEFAULT = 100000; // TODO : this is copied from OzoneConsts, may need to move to a better place diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java index 7bfda0184096..f00efb0e3bac 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java @@ -36,8 +36,8 @@ @InterfaceAudience.Public @InterfaceStability.Unstable public final class OzoneConfigKeys { - public static final String DFS_CONTAINER_IPC_PORT = - "dfs.container.ipc"; + public static final String OZONE_CONTAINER_IPC_PORT = + "ozone.container.ipc.port"; public static final int DFS_CONTAINER_IPC_PORT_DEFAULT = 9859; public static final String OZONE_METADATA_DIRS = "ozone.metadata.dirs"; @@ -56,11 +56,11 @@ public final class OzoneConfigKeys { * so that a mini cluster is able to launch multiple containers on a node. * * When set to false (default), the container port will be specified as - * {@link #DFS_CONTAINER_IPC_PORT} and the default value will be specified + * {@link #OZONE_CONTAINER_IPC_PORT} and the default value will be specified * as {@link #DFS_CONTAINER_IPC_PORT_DEFAULT}. */ - public static final String DFS_CONTAINER_IPC_RANDOM_PORT = - "dfs.container.ipc.random.port"; + public static final String OZONE_CONTAINER_IPC_RANDOM_PORT = + "ozone.container.ipc.random.port"; public static final boolean DFS_CONTAINER_IPC_RANDOM_PORT_DEFAULT = false; @@ -76,8 +76,8 @@ public final class OzoneConfigKeys { /** * Ratis Port where containers listen to. */ - public static final String DFS_CONTAINER_RATIS_IPC_PORT = - "dfs.container.ratis.ipc"; + public static final String OZONE_CONTAINER_RATIS_IPC_PORT = + "ozone.container.ratis.ipc"; public static final int DFS_CONTAINER_RATIS_IPC_PORT_DEFAULT = 9858; /** * Ratis Port where containers listen to admin requests. @@ -133,8 +133,8 @@ public final class OzoneConfigKeys { * When set to true, allocate a random free port for ozone container, so that * a mini cluster is able to launch multiple containers on a node. */ - public static final String DFS_CONTAINER_RATIS_IPC_RANDOM_PORT = - "dfs.container.ratis.ipc.random.port"; + public static final String OZONE_CONTAINER_RATIS_IPC_RANDOM_PORT = + "ozone.container.ratis.ipc.random.port"; public static final boolean DFS_CONTAINER_RATIS_IPC_RANDOM_PORT_DEFAULT = false; @@ -325,12 +325,12 @@ public final class OzoneConfigKeys { public static final int OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL_DEFAULT = 10; - public static final String DFS_CONTAINER_RATIS_ENABLED_KEY - = ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY; + public static final String OZONE_CONTAINER_RATIS_ENABLED_KEY + = ScmConfigKeys.OZONE_CONTAINER_RATIS_ENABLED_KEY; public static final boolean DFS_CONTAINER_RATIS_ENABLED_DEFAULT = ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_DEFAULT; - public static final String DFS_CONTAINER_RATIS_RPC_TYPE_KEY - = ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY; + public static final String OZONE_CONTAINER_RATIS_RPC_TYPE_KEY + = ScmConfigKeys.OZONE_CONTAINER_RATIS_RPC_TYPE_KEY; public static final String DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT = ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT; public static final String @@ -340,35 +340,35 @@ public final class OzoneConfigKeys { DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_DEFAULT = ScmConfigKeys. DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_DEFAULT; - public static final String DFS_CONTAINER_RATIS_REPLICATION_LEVEL_KEY - = ScmConfigKeys.DFS_CONTAINER_RATIS_REPLICATION_LEVEL_KEY; + public static final String OZONE_CONTAINER_RATIS_REPLICATION_LEVEL_KEY + = ScmConfigKeys.OZONE_CONTAINER_RATIS_REPLICATION_LEVEL_KEY; public static final ReplicationLevel DFS_CONTAINER_RATIS_REPLICATION_LEVEL_DEFAULT = ScmConfigKeys.DFS_CONTAINER_RATIS_REPLICATION_LEVEL_DEFAULT; - public static final String DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY - = ScmConfigKeys.DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY; + public static final String OZONE_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY + = ScmConfigKeys.OZONE_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY; public static final int DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_DEFAULT = ScmConfigKeys.DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_DEFAULT; - public static final String DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY - = ScmConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY; + public static final String OZONE_CONTAINER_RATIS_SEGMENT_SIZE_KEY + = ScmConfigKeys.OZONE_CONTAINER_RATIS_SEGMENT_SIZE_KEY; public static final String DFS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT = ScmConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT; - public static final String DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY - = ScmConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY; + public static final String OZONE_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY + = ScmConfigKeys.OZONE_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY; public static final String DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT = ScmConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT; // config settings to enable stateMachineData write timeout public static final String - DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT = - ScmConfigKeys.DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT; + OZONE_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT = + ScmConfigKeys.OZONE_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT; public static final TimeDuration DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT = ScmConfigKeys.DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT; - public static final String DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR = - "dfs.container.ratis.datanode.storage.dir"; + public static final String OZONE_CONTAINER_RATIS_DATANODE_STORAGE_DIR = + "ozone.container.ratis.datanode.storage.dir"; public static final String DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY = ScmConfigKeys.DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY; @@ -407,13 +407,13 @@ public final class OzoneConfigKeys { DFS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT_DEFAULT = ScmConfigKeys.DFS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT_DEFAULT; public static final String - DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY = - ScmConfigKeys.DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY; + OZONE_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY = + ScmConfigKeys.OZONE_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY; public static final TimeDuration DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT = ScmConfigKeys.DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT; - public static final String DFS_RATIS_SNAPSHOT_THRESHOLD_KEY = - ScmConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_KEY; + public static final String OZONE_RATIS_SNAPSHOT_THRESHOLD_KEY = + ScmConfigKeys.OZONE_RATIS_SNAPSHOT_THRESHOLD_KEY; public static final long DFS_RATIS_SNAPSHOT_THRESHOLD_DEFAULT = ScmConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_DEFAULT; diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml index bfb0547caf60..f970c58cdf7e 100644 --- a/hadoop-hdds/common/src/main/resources/ozone-default.xml +++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml @@ -46,7 +46,7 @@ - dfs.container.ipc + ozone.container.ipc.port 9859 OZONE, CONTAINER, MANAGEMENT The ipc port number of container. @@ -73,7 +73,7 @@ - dfs.container.ipc.random.port + ozone.container.ipc.random.port false OZONE, DEBUG, CONTAINER Allocates a random free port for ozone container. This is used @@ -90,7 +90,7 @@ - dfs.container.ratis.statemachinedata.sync.timeout + ozone.container.ratis.statemachinedata.sync.timeout 10s OZONE, DEBUG, CONTAINER, RATIS Timeout for StateMachine data writes by Ratis. @@ -102,7 +102,7 @@ OZONE, DEBUG, CONTAINER, RATIS Number of times the WriteStateMachineData op will be tried before failing. If the value is not configured, it will default - to (hdds.ratis.rpc.slowness.timeout / dfs.container.ratis.statemachinedata.sync.timeout), + to (hdds.ratis.rpc.slowness.timeout / ozone.container.ratis.statemachinedata.sync.timeout), which means that the WriteStatMachineData will be retried for every sync timeout until the configured slowness timeout is hit, after which the StateMachine will close down the pipeline. @@ -149,7 +149,7 @@ - dfs.container.ratis.datanode.storage.dir + ozone.container.ratis.datanode.storage.dir OZONE, CONTAINER, STORAGE, MANAGEMENT, RATIS This directory is used for storing Ratis metadata like logs. If @@ -223,7 +223,7 @@ - dfs.container.ratis.enabled + ozone.container.ratis.enabled false OZONE, MANAGEMENT, PIPELINE, RATIS Ozone supports different kinds of replication pipelines. Ratis @@ -232,7 +232,7 @@ - dfs.container.ratis.ipc + ozone.container.ratis.ipc 9858 OZONE, CONTAINER, PIPELINE, RATIS The ipc port number of container for clients. @@ -250,7 +250,7 @@ The ipc port number of container for server-server communication. - dfs.container.ratis.ipc.random.port + ozone.container.ratis.ipc.random.port false OZONE,DEBUG Allocates a random free port for ozone ratis port for the @@ -259,7 +259,7 @@ - dfs.container.ratis.rpc.type + ozone.container.ratis.rpc.type GRPC OZONE, RATIS, MANAGEMENT Ratis supports different kinds of transports like netty, GRPC, @@ -268,7 +268,7 @@ - dfs.ratis.snapshot.threshold + ozone.ratis.snapshot.threshold 10000 OZONE, RATIS Number of transactions after which a ratis snapshot should be @@ -281,7 +281,7 @@ OZONE, RATIS Maximum number of pending apply transactions in a data pipeline. The default value is kept same as default snapshot threshold - dfs.ratis.snapshot.threshold. + ozone.ratis.snapshot.threshold. @@ -303,7 +303,7 @@ - dfs.container.ratis.replication.level + ozone.container.ratis.replication.level MAJORITY OZONE, RATIS Replication level to be used by datanode for submitting a @@ -312,7 +312,7 @@ - dfs.container.ratis.num.container.op.executors + ozone.container.ratis.num.container.op.executors 10 OZONE, RATIS, PERFORMANCE Number of executors that will be used by Ratis to execute @@ -320,7 +320,7 @@ - dfs.container.ratis.segment.size + ozone.container.ratis.segment.size 64MB OZONE, RATIS, PERFORMANCE The size of the raft segment file used @@ -328,7 +328,7 @@ - dfs.container.ratis.segment.preallocated.size + ozone.container.ratis.segment.preallocated.size 4MB OZONE, RATIS, PERFORMANCE The pre-allocated file size for raft segment used @@ -342,7 +342,7 @@ Retry Cache entry timeout for ratis server. - dfs.ratis.leader.election.minimum.timeout.duration + ozone.ratis.leader.election.minimum.timeout.duration 5s OZONE, RATIS, MANAGEMENT The minimum timeout duration for ratis leader election. @@ -707,7 +707,7 @@ For production clusters or any time you care about performance, it is recommended that ozone.om.db.dirs, ozone.scm.db.dirs and - dfs.container.ratis.datanode.storage.dir be configured separately. + ozone.container.ratis.datanode.storage.dir be configured separately. @@ -2226,14 +2226,6 @@ OZONE, SECURITY, KERBEROS The OzoneManager service principal. Ex om/_HOST@REALM.COM - - ozone.om.kerberos.principal.pattern - * - - A client-side RegEx that can be configured to control - allowed realms to authenticate with (useful in cross-realm env.) - - ozone.om.http.auth.kerberos.principal HTTP/_HOST@REALM diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java index 009e6396e0d2..48ab46ed93cd 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java @@ -99,10 +99,10 @@ public XceiverServerGrpc(DatanodeDetails datanodeDetails, this.id = datanodeDetails.getUuid(); this.datanodeDetails = datanodeDetails; - this.port = conf.getInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, + this.port = conf.getInt(OzoneConfigKeys.OZONE_CONTAINER_IPC_PORT, OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT); - if (conf.getBoolean(OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT, + if (conf.getBoolean(OzoneConfigKeys.OZONE_CONTAINER_IPC_RANDOM_PORT, OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT_DEFAULT)) { this.port = 0; } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java index e3c2913ec5af..59a10234739b 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java @@ -238,7 +238,7 @@ public ContainerStateMachine(RaftGroupId gid, this.container2BCSIDMap = new ConcurrentHashMap<>(); final int numContainerOpExecutors = conf.getInt( - OzoneConfigKeys.DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY, + OzoneConfigKeys.OZONE_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY, OzoneConfigKeys.DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_DEFAULT); int maxPendingApplyTransactions = conf.getInt( ScmConfigKeys. diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java index fcc611ea3f10..564fa8025a8b 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java @@ -115,7 +115,7 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS; import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS_DEFAULT; import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT; -import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_CONTAINER_RATIS_SEGMENT_SIZE_KEY; import static org.apache.ratis.util.Preconditions.assertTrue; /** @@ -217,7 +217,7 @@ private XceiverServerRatis(DatanodeDetails dd, private void assignPorts() { clientPort = determinePort( - OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_PORT, + OzoneConfigKeys.OZONE_CONTAINER_RATIS_IPC_PORT, OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_PORT_DEFAULT); if (DatanodeVersion.fromProtoValue(datanodeDetails.getInitialVersion()) @@ -236,7 +236,7 @@ private void assignPorts() { private int determinePort(String key, int defaultValue) { boolean randomPort = conf.getBoolean( - OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, + OzoneConfigKeys.OZONE_CONTAINER_RATIS_IPC_RANDOM_PORT, OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT_DEFAULT); return randomPort ? 0 : conf.getInt(key, defaultValue); } @@ -327,7 +327,7 @@ public RaftProperties newRaftProperties() { } long snapshotThreshold = - conf.getLong(OzoneConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_KEY, + conf.getLong(OzoneConfigKeys.OZONE_RATIS_SNAPSHOT_THRESHOLD_KEY, OzoneConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_DEFAULT); RaftServerConfigKeys.Snapshot. setAutoTriggerEnabled(properties, true); @@ -378,7 +378,7 @@ private void setRatisLeaderElectionTimeout(RaftProperties properties) { DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT .getUnit(); duration = conf.getTimeDuration( - OzoneConfigKeys.DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY, + OzoneConfigKeys.OZONE_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY, OzoneConfigKeys. DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT .getDuration(), leaderElectionMinTimeoutUnit); @@ -410,7 +410,7 @@ private void setTimeoutForRetryCache(RaftProperties properties) { private long setRaftSegmentPreallocatedSize(RaftProperties properties) { final long raftSegmentPreallocatedSize = (long) conf.getStorageSize( - OzoneConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY, + OzoneConfigKeys.OZONE_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY, OzoneConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT, StorageUnit.BYTES); RaftServerConfigKeys.Log.setPreallocatedSize(properties, @@ -428,7 +428,7 @@ private void setRaftSegmentAndWriteBufferSize(RaftProperties properties) { StorageUnit.BYTES); final long raftSegmentSize = (long) conf.getStorageSize( - DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY, + OZONE_CONTAINER_RATIS_SEGMENT_SIZE_KEY, DFS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT, StorageUnit.BYTES); final long raftSegmentBufferSize = logAppenderQueueByteLimit + 8; @@ -436,7 +436,7 @@ private void setRaftSegmentAndWriteBufferSize(RaftProperties properties) { assertTrue(raftSegmentBufferSize <= raftSegmentSize, () -> DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT + " = " + logAppenderQueueByteLimit - + " must be <= (" + DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY + " - 8" + + " must be <= (" + OZONE_CONTAINER_RATIS_SEGMENT_SIZE_KEY + " - 8" + " = " + (raftSegmentSize - 8) + ")"); RaftServerConfigKeys.Log.Appender.setBufferElementLimit(properties, @@ -456,7 +456,7 @@ private void setStateMachineDataConfigurations(RaftProperties properties) { TimeUnit timeUnit = OzoneConfigKeys. DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT.getUnit(); long duration = conf.getTimeDuration( - OzoneConfigKeys.DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT, + OzoneConfigKeys.OZONE_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT, OzoneConfigKeys. DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT .getDuration(), timeUnit); @@ -507,7 +507,7 @@ private void setStateMachineDataConfigurations(RaftProperties properties) { private RpcType setRpcType(RaftProperties properties) { final String rpcType = conf.get( - OzoneConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY, + OzoneConfigKeys.OZONE_CONTAINER_RATIS_RPC_TYPE_KEY, OzoneConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT); final RpcType rpc = SupportedRpcType.valueOfIgnoreCase(rpcType); RatisHelper.setRpcType(properties, rpc); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java index 33bc4a851664..8733f4f6cfd6 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java @@ -344,7 +344,7 @@ public static ContainerController getEmptyContainerController() { public static XceiverServerRatis newXceiverServerRatis( DatanodeDetails dn, OzoneConfiguration conf) throws IOException { - conf.setInt(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_PORT, + conf.setInt(OzoneConfigKeys.OZONE_CONTAINER_RATIS_IPC_PORT, dn.getPort(DatanodeDetails.Port.Name.RATIS).getValue()); return XceiverServerRatis.newXceiverServerRatis(dn, conf, diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java index 7917a4ce55cd..8452f233b379 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java @@ -155,7 +155,7 @@ public static HddsProtos.ReplicationFactor getReplicationFactor( private static boolean isUseRatis(ConfigurationSource c) { return c.getBoolean( - ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY, + ScmConfigKeys.OZONE_CONTAINER_RATIS_ENABLED_KEY, ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_DEFAULT); } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java index 5738f5c1106e..b81d891e09e3 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java @@ -82,8 +82,8 @@ void setUp() throws Exception { conf = SCMTestUtils.getConf(testRoot); conf.setTimeDuration(OZONE_SCM_HEARTBEAT_RPC_TIMEOUT, 500, TimeUnit.MILLISECONDS); - conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, true); - conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT, true); + conf.setBoolean(OzoneConfigKeys.OZONE_CONTAINER_RATIS_IPC_RANDOM_PORT, true); + conf.setBoolean(OzoneConfigKeys.OZONE_CONTAINER_IPC_RANDOM_PORT, true); conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_ENABLED, true); conf.setBoolean( diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/TestDatanodeConfiguration.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/TestDatanodeConfiguration.java index 565853c22dde..3d189416368c 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/TestDatanodeConfiguration.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/TestDatanodeConfiguration.java @@ -178,7 +178,7 @@ public void isCreatedWitDefaultValues() { public void testConf() throws Exception { final OzoneConfiguration conf = new OzoneConfiguration(); final String dir = "dummy/dir"; - conf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dir); + conf.set(OzoneConfigKeys.OZONE_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dir); final DatanodeRatisServerConfig ratisConf = conf.getObject( DatanodeRatisServerConfig.class); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestPeriodicVolumeChecker.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestPeriodicVolumeChecker.java index 3859cd47c9b9..d5c7cd05022d 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestPeriodicVolumeChecker.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestPeriodicVolumeChecker.java @@ -59,7 +59,7 @@ public class TestPeriodicVolumeChecker { public void setup() throws IOException { conf = new OzoneConfiguration(); conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, folder.toString()); - conf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, + conf.set(OzoneConfigKeys.OZONE_CONTAINER_RATIS_DATANODE_STORAGE_DIR, Files.createDirectory(folder.resolve("VolumeCheckerDir")).toString()); } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java index 1159d4277c78..fc499737e3dc 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java @@ -82,7 +82,7 @@ public void setup() throws Exception { volumes.add(volume1); volumes.add(volume2); conf.set(DFSConfigKeysLegacy.DFS_DATANODE_DATA_DIR_KEY, dataDirKey); - conf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, + conf.set(OzoneConfigKeys.OZONE_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dataDirKey); initializeVolumeSet(); } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java index e3c610bfe47a..5d14663c775e 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java @@ -228,7 +228,7 @@ private OzoneConfiguration getConfWithDataNodeDirs(int numDirs) { for (int i = 0; i < numDirs; ++i) { metaDirs.add(new File(dir, randomAlphanumeric(10)).toString()); } - ozoneConf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, + ozoneConf.set(OzoneConfigKeys.OZONE_CONTAINER_RATIS_DATANODE_STORAGE_DIR, String.join(",", metaDirs)); final List dbDirs = new ArrayList<>(); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java index 7f38eab785b8..56d41c3d93e1 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java @@ -378,7 +378,7 @@ public void testMultipleContainerReader(ContainerTestVersionInfo versionInfo) BlockUtils.shutdownCache(conf); conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, datanodeDirs.toString()); - conf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, + conf.set(OzoneConfigKeys.OZONE_CONTAINER_RATIS_DATANODE_STORAGE_DIR, datanodeDirs.toString()); MutableVolumeSet volumeSets = new MutableVolumeSet(datanodeId.toString(), clusterId, conf, null, diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java index 497418dcdcb9..9223976d2f03 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java @@ -165,7 +165,7 @@ public void testBuildNodeReport(ContainerTestVersionInfo versionInfo) throws Exception { initTest(versionInfo); String path = folder.toString(); - conf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, + conf.set(OzoneConfigKeys.OZONE_CONTAINER_RATIS_DATANODE_STORAGE_DIR, String.join(",", path + "/ratis1", path + "/ratis2", path + "ratis3")); diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java index 70d394e73b31..af06ce421fa3 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java @@ -382,14 +382,14 @@ public static int getLogWarnInterval(ConfigurationSource conf) { * @return port number. */ public static int getContainerPort(ConfigurationSource conf) { - return conf.getInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, + return conf.getInt(OzoneConfigKeys.OZONE_CONTAINER_IPC_PORT, OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT); } public static Collection getOzoneDatanodeRatisDirectory( ConfigurationSource conf) { Collection rawLocations = conf.getTrimmedStringCollection( - OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR); + OzoneConfigKeys.OZONE_CONTAINER_RATIS_DATANODE_STORAGE_DIR); if (rawLocations.isEmpty()) { rawLocations = new ArrayList<>(1); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java index b241ac0f2d28..a84ecb6f7852 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java @@ -186,7 +186,7 @@ public void testContainerPlacementCapacity() throws IOException, testDir.getAbsolutePath()); conf.setClass(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY, SCMContainerPlacementCapacity.class, PlacementPolicy.class); - conf.setBoolean(ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY, true); + conf.setBoolean(ScmConfigKeys.OZONE_CONTAINER_RATIS_ENABLED_KEY, true); SCMNodeManager scmNodeManager = createNodeManager(conf); containerManager = createContainerManager(); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java index 58f65df8fd85..6856a6885895 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java @@ -179,9 +179,9 @@ public void testGetVersionTask() throws Exception { */ @Test public void testDeletedContainersClearedOnStartup() throws Exception { - ozoneConf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT, + ozoneConf.setBoolean(OzoneConfigKeys.OZONE_CONTAINER_IPC_RANDOM_PORT, true); - ozoneConf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, + ozoneConf.setBoolean(OzoneConfigKeys.OZONE_CONTAINER_RATIS_IPC_RANDOM_PORT, true); ozoneConf.setFromObject(new ReplicationConfig().setPort(0)); try (EndpointStateMachine rpcEndPoint = createEndpoint(ozoneConf, @@ -217,9 +217,9 @@ public void testDeletedContainersClearedOnStartup() throws Exception { @Test public void testCheckVersionResponse() throws Exception { - ozoneConf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT, + ozoneConf.setBoolean(OzoneConfigKeys.OZONE_CONTAINER_IPC_RANDOM_PORT, true); - ozoneConf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, + ozoneConf.setBoolean(OzoneConfigKeys.OZONE_CONTAINER_RATIS_IPC_RANDOM_PORT, true); ozoneConf.setBoolean( OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, true); @@ -267,7 +267,7 @@ public void testCheckVersionResponse() throws Exception { */ @Test public void testDnLayoutVersionFile() throws Exception { - ozoneConf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, + ozoneConf.setBoolean(OzoneConfigKeys.OZONE_CONTAINER_RATIS_IPC_RANDOM_PORT, true); try (EndpointStateMachine rpcEndPoint = createEndpoint(ozoneConf, serverAddress, 1000)) { @@ -579,7 +579,7 @@ private StateContext heartbeatTaskHelper( // Mini Ozone cluster will not come up if the port is not true, since // Ratis will exit if the server port cannot be bound. We can remove this // hard coding once we fix the Ratis default behaviour. - conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, true); + conf.setBoolean(OzoneConfigKeys.OZONE_CONTAINER_RATIS_IPC_RANDOM_PORT, true); // Create a datanode state machine for stateConext used by endpoint task try (DatanodeStateMachine stateMachine = new DatanodeStateMachine( diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java index d07e696e7ef0..387e4624bede 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java @@ -98,7 +98,7 @@ public ContainerOperationClient(OzoneConfiguration conf) throws IOException { containerSizeB = (int) conf.getStorageSize(OZONE_SCM_CONTAINER_SIZE, OZONE_SCM_CONTAINER_SIZE_DEFAULT, StorageUnit.BYTES); boolean useRatis = conf.getBoolean( - ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY, + ScmConfigKeys.OZONE_CONTAINER_RATIS_ENABLED_KEY, ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_DEFAULT); if (useRatis) { replicationFactor = HddsProtos.ReplicationFactor.THREE; diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/VolumeArgs.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/VolumeArgs.java index a1c9cd55bb3f..9d683c5393c2 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/VolumeArgs.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/VolumeArgs.java @@ -18,13 +18,10 @@ package org.apache.hadoop.ozone.client; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableMap; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.OzoneConsts; import java.io.IOException; -import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -39,8 +36,8 @@ public final class VolumeArgs { private final String owner; private final long quotaInBytes; private final long quotaInNamespace; - private final ImmutableList acls; - private final ImmutableMap metadata; + private final List acls; + private Map metadata; /** * Private constructor, constructed via builder. @@ -61,8 +58,8 @@ private VolumeArgs(String admin, this.owner = owner; this.quotaInBytes = quotaInBytes; this.quotaInNamespace = quotaInNamespace; - this.acls = acls == null ? ImmutableList.of() : ImmutableList.copyOf(acls); - this.metadata = metadata == null ? ImmutableMap.of() : ImmutableMap.copyOf(metadata); + this.acls = acls; + this.metadata = metadata; } /** @@ -110,20 +107,34 @@ public List getAcls() { return acls; } + /** + * Returns new builder class that builds a OmVolumeArgs. + * + * @return Builder + */ public static VolumeArgs.Builder newBuilder() { return new VolumeArgs.Builder(); } /** - * Builder for VolumeArgs. + * Builder for OmVolumeArgs. */ + @SuppressWarnings("checkstyle:hiddenfield") public static class Builder { private String adminName; private String ownerName; - private long quotaInBytes = OzoneConsts.QUOTA_RESET; - private long quotaInNamespace = OzoneConsts.QUOTA_RESET; - private List acls; - private Map metadata; + private long quotaInBytes; + private long quotaInNamespace; + private List listOfAcls; + private Map metadata = new HashMap<>(); + + /** + * Constructs a builder. + */ + public Builder() { + quotaInBytes = OzoneConsts.QUOTA_RESET; + quotaInNamespace = OzoneConsts.QUOTA_RESET; + } public VolumeArgs.Builder setAdmin(String admin) { this.adminName = admin; @@ -146,18 +157,12 @@ public VolumeArgs.Builder setQuotaInNamespace(long quota) { } public VolumeArgs.Builder addMetadata(String key, String value) { - if (metadata == null) { - metadata = new HashMap<>(); - } metadata.put(key, value); return this; } - public VolumeArgs.Builder addAcl(OzoneAcl acl) + public VolumeArgs.Builder setAcls(List acls) throws IOException { - if (acls == null) { - acls = new ArrayList<>(); - } - acls.add(acl); + this.listOfAcls = acls; return this; } @@ -167,7 +172,7 @@ public VolumeArgs.Builder addAcl(OzoneAcl acl) */ public VolumeArgs build() { return new VolumeArgs(adminName, ownerName, quotaInBytes, - quotaInNamespace, acls, metadata); + quotaInNamespace, listOfAcls, metadata); } } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java index 8343b8740169..7e1e6fe45602 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java @@ -430,9 +430,8 @@ public void createVolume(String volumeName, VolumeArgs volArgs) userGroups.stream().forEach((group) -> listOfAcls.add( new OzoneAcl(ACLIdentityType.GROUP, group, groupRights, ACCESS))); //ACLs from VolumeArgs - List volumeAcls = volArgs.getAcls(); - if (volumeAcls != null) { - listOfAcls.addAll(volumeAcls); + if (volArgs.getAcls() != null) { + listOfAcls.addAll(volArgs.getAcls()); } OmVolumeArgs.Builder builder = OmVolumeArgs.newBuilder(); diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java index faa5096baf98..5dd7579eb916 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java @@ -289,8 +289,6 @@ private OMConfigKeys() { + "kerberos.keytab.file"; public static final String OZONE_OM_KERBEROS_PRINCIPAL_KEY = "ozone.om" + ".kerberos.principal"; - public static final String OZONE_OM_KERBEROS_PRINCIPAL_PATTERN_KEY = - "ozone.om.kerberos.principal.pattern"; public static final String OZONE_OM_HTTP_KERBEROS_KEYTAB_FILE = "ozone.om.http.auth.kerberos.keytab"; public static final String OZONE_OM_HTTP_KERBEROS_PRINCIPAL_KEY diff --git a/hadoop-ozone/dev-support/intellij/runConfigurations/Datanode2-ha.xml b/hadoop-ozone/dev-support/intellij/runConfigurations/Datanode2-ha.xml index 171494aa5dbe..100fd6cfc471 100644 --- a/hadoop-ozone/dev-support/intellij/runConfigurations/Datanode2-ha.xml +++ b/hadoop-ozone/dev-support/intellij/runConfigurations/Datanode2-ha.xml @@ -18,7 +18,7 @@ - - - hdds.heartbeat.interval - 1s - - - ozone.scm.heartbeat.thread.interval - 100ms - - - - ozone.scm.ratis.pipeline.limit - 3 - - ozone.scm.close.container.wait.duration 1s diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java index b28b390efd73..e09c3bcef669 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java @@ -270,10 +270,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn keyToDelete, trxnLogIndex, ozoneManager.isRatisEnabled()); checkBucketQuotaInBytes(omMetadataManager, omBucketInfo, correctedSpace); - // using pseudoObjId as objectId can be same in case of overwrite key - long pseudoObjId = ozoneManager.getObjectIdFromTxId(trxnLogIndex); String delKeyName = omMetadataManager.getOzoneDeletePathKey( - pseudoObjId, dbOzoneKey); + keyToDelete.getObjectID(), dbOzoneKey); if (null == oldKeyVersionsToDeleteMap) { oldKeyVersionsToDeleteMap = new HashMap<>(); } @@ -305,8 +303,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn if (null == oldKeyVersionsToDeleteMap) { oldKeyVersionsToDeleteMap = new HashMap<>(); } - oldKeyVersionsToDeleteMap.computeIfAbsent(delKeyName, - key -> new RepeatedOmKeyInfo()).addOmKeyInfo(pseudoKeyInfo); + oldKeyVersionsToDeleteMap.put(delKeyName, + new RepeatedOmKeyInfo(pseudoKeyInfo)); } // Add to cache of open key table and key table. diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestWithFSO.java index 704e9e91c47d..f062e71106e0 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestWithFSO.java @@ -203,10 +203,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn correctedSpace); String delKeyName = omMetadataManager .getOzoneKey(volumeName, bucketName, fileName); - // using pseudoObjId as objectId can be same in case of overwrite key - long pseudoObjId = ozoneManager.getObjectIdFromTxId(trxnLogIndex); delKeyName = omMetadataManager.getOzoneDeletePathKey( - pseudoObjId, delKeyName); + keyToDelete.getObjectID(), delKeyName); if (null == oldKeyVersionsToDeleteMap) { oldKeyVersionsToDeleteMap = new HashMap<>(); } @@ -240,8 +238,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn if (null == oldKeyVersionsToDeleteMap) { oldKeyVersionsToDeleteMap = new HashMap<>(); } - oldKeyVersionsToDeleteMap.computeIfAbsent(delKeyName, - key -> new RepeatedOmKeyInfo()).addOmKeyInfo(pseudoKeyInfo); + oldKeyVersionsToDeleteMap.put(delKeyName, + new RepeatedOmKeyInfo(pseudoKeyInfo)); } // Add to cache of open key table and key table. diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java index 15af3910e90f..e6debcdc23be 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java @@ -16,7 +16,6 @@ */ package org.apache.hadoop.ozone.om; -import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.StorageType; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; @@ -27,7 +26,6 @@ import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.hdds.utils.TransactionInfo; @@ -51,7 +49,6 @@ import java.io.File; import java.time.Duration; import java.time.Instant; -import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.HashSet; @@ -64,7 +61,6 @@ import java.util.stream.Collectors; import java.util.stream.Stream; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_MPU_EXPIRE_THRESHOLD; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_MPU_EXPIRE_THRESHOLD_DEFAULT; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_OPEN_KEY_EXPIRE_THRESHOLD; @@ -623,10 +619,9 @@ private void testGetExpiredOpenKeys(BucketLayout bucketLayout) for (int i = 0; i < numExpiredOpenKeys + numUnexpiredOpenKeys; i++) { final long creationTime = i < numExpiredOpenKeys ? expiredOpenKeyCreationTime : Time.now(); - final OmKeyInfo keyInfo = OMRequestTestUtils.createOmKeyInfo( - volumeName, bucketName, "expired" + i, RatisReplicationConfig.getInstance(ONE)) - .setCreationTime(creationTime) - .build(); + final OmKeyInfo keyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, + bucketName, "expired" + i, HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE, 0L, creationTime); final String dbOpenKeyName; if (bucketLayout.isFileSystemOptimized()) { @@ -694,10 +689,10 @@ private void testGetExpiredOpenKeysExcludeMPUKeys( // Ensure that "expired" MPU-related open keys are not fetched. // MPU-related open keys, identified by isMultipartKey = false for (int i = 0; i < numExpiredMPUOpenKeys; i++) { - final OmKeyInfo keyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, "expired" + i, - RatisReplicationConfig.getInstance(ONE), new OmKeyLocationInfoGroup(0L, new ArrayList<>(), true)) - .setCreationTime(expiredOpenKeyCreationTime) - .build(); + final OmKeyInfo keyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, + bucketName, "expired" + i, + HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, + 0L, expiredOpenKeyCreationTime, true); final String uploadId = OMMultipartUploadUtils.getMultipartUploadId(); final OmMultipartKeyInfo multipartKeyInfo = OMRequestTestUtils. @@ -727,10 +722,10 @@ private void testGetExpiredOpenKeysExcludeMPUKeys( // HDDS-9017. Although these open keys are MPU-related, // the isMultipartKey flags are set to false for (int i = numExpiredMPUOpenKeys; i < 2 * numExpiredMPUOpenKeys; i++) { - final OmKeyInfo keyInfo = OMRequestTestUtils.createOmKeyInfo( - volumeName, bucketName, "expired" + i, RatisReplicationConfig.getInstance(ONE)) - .setCreationTime(expiredOpenKeyCreationTime) - .build(); + final OmKeyInfo keyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, + bucketName, "expired" + i, + HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, + 0L, expiredOpenKeyCreationTime, false); final String uploadId = OMMultipartUploadUtils.getMultipartUploadId(); final OmMultipartKeyInfo multipartKeyInfo = OMRequestTestUtils. @@ -793,9 +788,8 @@ private void testGetExpiredMPUs() throws Exception { String keyName = "expired" + i; // Key info to construct the MPU DB key final OmKeyInfo keyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, - bucketName, keyName, RatisReplicationConfig.getInstance(ONE)) - .setCreationTime(creationTime) - .build(); + bucketName, keyName, HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE, 0L, creationTime); for (int j = 1; j <= numPartsPerMPU; j++) { @@ -867,10 +861,11 @@ private void addKeysToOM(String volumeName, String bucketName, if (i % 2 == 0) { OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, keyName, - 1000L, RatisReplicationConfig.getInstance(ONE), omMetadataManager); + 1000L, HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE, omMetadataManager); } else { OMRequestTestUtils.addKeyToTableCache(volumeName, bucketName, keyName, - RatisReplicationConfig.getInstance(ONE), + HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, omMetadataManager); } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java index 21b94ce5f05a..bdc6509247b1 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java @@ -157,22 +157,23 @@ public static void addVolumeAndBucketToDB( @SuppressWarnings("parameterNumber") public static void addKeyToTableAndCache(String volumeName, String bucketName, - String keyName, long clientID, ReplicationConfig replicationConfig, long trxnLogIndex, + String keyName, long clientID, HddsProtos.ReplicationType replicationType, + HddsProtos.ReplicationFactor replicationFactor, long trxnLogIndex, OMMetadataManager omMetadataManager) throws Exception { addKeyToTable(false, true, volumeName, bucketName, keyName, clientID, - replicationConfig, trxnLogIndex, omMetadataManager); + replicationType, replicationFactor, trxnLogIndex, omMetadataManager); } /** * Add key entry to KeyTable. if openKeyTable flag is true, add's entries * to openKeyTable, else add's it to keyTable. - * * @param openKeyTable * @param volumeName * @param bucketName * @param keyName * @param clientID - * @param replicationConfig + * @param replicationType + * @param replicationFactor * @param omMetadataManager * @param locationList * @throws Exception @@ -180,11 +181,12 @@ public static void addKeyToTableAndCache(String volumeName, String bucketName, @SuppressWarnings("parameterNumber") public static void addKeyToTable(boolean openKeyTable, String volumeName, String bucketName, String keyName, long clientID, - ReplicationConfig replicationConfig, + HddsProtos.ReplicationType replicationType, + HddsProtos.ReplicationFactor replicationFactor, OMMetadataManager omMetadataManager, List locationList, long version) throws Exception { addKeyToTable(openKeyTable, false, volumeName, bucketName, keyName, - clientID, replicationConfig, 0L, omMetadataManager, + clientID, replicationType, replicationFactor, 0L, omMetadataManager, locationList, version); } @@ -192,23 +194,24 @@ public static void addKeyToTable(boolean openKeyTable, String volumeName, /** * Add key entry to KeyTable. if openKeyTable flag is true, add's entries * to openKeyTable, else add's it to keyTable. - * * @param openKeyTable * @param volumeName * @param bucketName * @param keyName * @param clientID - * @param replicationConfig + * @param replicationType + * @param replicationFactor * @param omMetadataManager * @throws Exception */ @SuppressWarnings("parameterNumber") public static void addKeyToTable(boolean openKeyTable, String volumeName, String bucketName, String keyName, long clientID, - ReplicationConfig replicationConfig, + HddsProtos.ReplicationType replicationType, + HddsProtos.ReplicationFactor replicationFactor, OMMetadataManager omMetadataManager) throws Exception { addKeyToTable(openKeyTable, false, volumeName, bucketName, keyName, - clientID, replicationConfig, 0L, omMetadataManager); + clientID, replicationType, replicationFactor, 0L, omMetadataManager); } /** @@ -222,17 +225,20 @@ public static void addKeyToTable(boolean openKeyTable, String volumeName, * @param bucketName * @param keyName * @param clientID - * @param replicationConfig + * @param replicationType + * @param replicationFactor * @param omMetadataManager * @throws Exception */ @SuppressWarnings("parameterNumber") public static void addKeyToTable(boolean openKeyTable, boolean isMultipartKey, String volumeName, String bucketName, String keyName, long clientID, - ReplicationConfig replicationConfig, + HddsProtos.ReplicationType replicationType, + HddsProtos.ReplicationFactor replicationFactor, OMMetadataManager omMetadataManager) throws Exception { addKeyToTable(openKeyTable, isMultipartKey, false, - volumeName, bucketName, keyName, clientID, replicationConfig, 0L, omMetadataManager); + volumeName, bucketName, keyName, clientID, replicationType, + replicationFactor, 0L, omMetadataManager); } /** @@ -242,20 +248,19 @@ public static void addKeyToTable(boolean openKeyTable, boolean isMultipartKey, */ @SuppressWarnings("parameternumber") public static void addKeyToTable(boolean openKeyTable, boolean addToCache, - String volumeName, String bucketName, String keyName, long clientID, ReplicationConfig replicationConfig, - long trxnLogIndex, + String volumeName, String bucketName, String keyName, long clientID, + HddsProtos.ReplicationType replicationType, + HddsProtos.ReplicationFactor replicationFactor, long trxnLogIndex, OMMetadataManager omMetadataManager, List locationList, long version) throws Exception { OmKeyInfo omKeyInfo = createOmKeyInfo(volumeName, bucketName, keyName, - replicationConfig, new OmKeyLocationInfoGroup(version, new ArrayList<>(), false)) - .setObjectID(trxnLogIndex) - .build(); - + replicationType, replicationFactor, trxnLogIndex, Time.now(), version, + false); omKeyInfo.appendNewBlocks(locationList, false); addKeyToTable(openKeyTable, addToCache, omKeyInfo, clientID, trxnLogIndex, - omMetadataManager); + omMetadataManager); } /** @@ -266,11 +271,12 @@ replicationConfig, new OmKeyLocationInfoGroup(version, new ArrayList<>(), false) @SuppressWarnings("parameternumber") public static void addKeyToTable(boolean openKeyTable, boolean addToCache, String volumeName, String bucketName, String keyName, long clientID, - ReplicationConfig replicationConfig, long trxnLogIndex, + HddsProtos.ReplicationType replicationType, + HddsProtos.ReplicationFactor replicationFactor, long trxnLogIndex, OMMetadataManager omMetadataManager) throws Exception { - OmKeyInfo omKeyInfo = createOmKeyInfo(volumeName, bucketName, keyName, replicationConfig) - .setObjectID(trxnLogIndex).build(); + OmKeyInfo omKeyInfo = createOmKeyInfo(volumeName, bucketName, keyName, + replicationType, replicationFactor, trxnLogIndex); addKeyToTable(openKeyTable, addToCache, omKeyInfo, clientID, trxnLogIndex, omMetadataManager); @@ -284,13 +290,13 @@ public static void addKeyToTable(boolean openKeyTable, boolean addToCache, @SuppressWarnings("parameternumber") public static void addKeyToTable(boolean openKeyTable, boolean isMultipartKey, boolean addToCache, String volumeName, String bucketName, String keyName, - long clientID, ReplicationConfig replicationConfig, long trxnLogIndex, + long clientID, HddsProtos.ReplicationType replicationType, + HddsProtos.ReplicationFactor replicationFactor, long trxnLogIndex, OMMetadataManager omMetadataManager) throws Exception { OmKeyInfo omKeyInfo = createOmKeyInfo(volumeName, bucketName, keyName, - replicationConfig, new OmKeyLocationInfoGroup(0, new ArrayList<>(), isMultipartKey)) - .setObjectID(trxnLogIndex) - .build(); + replicationType, replicationFactor, trxnLogIndex, Time.now(), 0L, + isMultipartKey); addKeyToTable(openKeyTable, addToCache, omKeyInfo, clientID, trxnLogIndex, omMetadataManager); @@ -425,22 +431,23 @@ public static void addPart(PartKeyInfo partKeyInfo, /** * Add key entry to key table cache. - * * @param volumeName * @param bucketName * @param keyName - * @param replicationConfig + * @param replicationType + * @param replicationFactor * @param omMetadataManager */ @SuppressWarnings("parameterNumber") public static void addKeyToTableCache(String volumeName, String bucketName, String keyName, - ReplicationConfig replicationConfig, + HddsProtos.ReplicationType replicationType, + HddsProtos.ReplicationFactor replicationFactor, OMMetadataManager omMetadataManager) { OmKeyInfo omKeyInfo = createOmKeyInfo(volumeName, bucketName, keyName, - replicationConfig).build(); + replicationType, replicationFactor); omMetadataManager.getKeyTable(getDefaultBucketLayout()).addCacheEntry( new CacheKey<>(omMetadataManager.getOzoneKey(volumeName, bucketName, @@ -536,42 +543,87 @@ public static void addSnapshotToTable( /** * Create OmKeyInfo. - * Initializes most values to a sensible default. */ - public static OmKeyInfo.Builder createOmKeyInfo(String volumeName, String bucketName, - String keyName, ReplicationConfig replicationConfig, OmKeyLocationInfoGroup omKeyLocationInfoGroup) { - return new OmKeyInfo.Builder() - .setVolumeName(volumeName) - .setBucketName(bucketName) - .setKeyName(keyName) - .setFileName(OzoneFSUtils.getFileName(keyName)) - .setReplicationConfig(replicationConfig) - .setObjectID(0L) - .setUpdateID(0L) - .setCreationTime(Time.now()) - .addOmKeyLocationInfoGroup(omKeyLocationInfoGroup) - .setDataSize(1000L); - } - - public static OmKeyInfo.Builder createOmKeyInfo(String volumeName, String bucketName, - String keyName, ReplicationConfig replicationConfig) { - return createOmKeyInfo(volumeName, bucketName, keyName, replicationConfig, - new OmKeyLocationInfoGroup(0L, new ArrayList<>(), false)); + public static OmKeyInfo createOmKeyInfo(String volumeName, String bucketName, + String keyName, HddsProtos.ReplicationType replicationType, + HddsProtos.ReplicationFactor replicationFactor) { + return createOmKeyInfo(volumeName, bucketName, keyName, replicationType, + replicationFactor, 0L); } /** * Create OmDirectoryInfo. */ public static OmDirectoryInfo createOmDirectoryInfo(String keyName, - long objectID, - long parentObjID) { + long objectID, + long parentObjID) { return new OmDirectoryInfo.Builder() - .setName(keyName) - .setCreationTime(Time.now()) + .setName(keyName) + .setCreationTime(Time.now()) + .setModificationTime(Time.now()) + .setObjectID(objectID) + .setParentObjectID(parentObjID) + .setUpdateID(50) + .build(); + } + + /** + * Create OmKeyInfo. + */ + public static OmKeyInfo createOmKeyInfo(String volumeName, String bucketName, + String keyName, HddsProtos.ReplicationType replicationType, + HddsProtos.ReplicationFactor replicationFactor, long objectID) { + return createOmKeyInfo(volumeName, bucketName, keyName, replicationType, + replicationFactor, objectID, Time.now()); + } + + /** + * Create OmKeyInfo. + */ + public static OmKeyInfo createOmKeyInfo(String volumeName, String bucketName, + String keyName, HddsProtos.ReplicationType replicationType, + HddsProtos.ReplicationFactor replicationFactor, long objectID, + long creationTime) { + return createOmKeyInfo(volumeName, bucketName, keyName, replicationType, + replicationFactor, objectID, creationTime, 0L, false); + } + + /** + * Create OmKeyInfo. + */ + @SuppressWarnings("parameterNumber") + public static OmKeyInfo createOmKeyInfo(String volumeName, String bucketName, + String keyName, HddsProtos.ReplicationType replicationType, + HddsProtos.ReplicationFactor replicationFactor, long objectID, + long creationTime, boolean isMultipartKey) { + return createOmKeyInfo(volumeName, bucketName, keyName, replicationType, + replicationFactor, objectID, creationTime, 0L, isMultipartKey); + } + + /** + * Create OmKeyInfo for LEGACY/OBS bucket. + */ + @SuppressWarnings("parameterNumber") + private static OmKeyInfo createOmKeyInfo(String volumeName, String bucketName, + String keyName, HddsProtos.ReplicationType replicationType, + HddsProtos.ReplicationFactor replicationFactor, long objectID, + long creationTime, long version, boolean isMultipartKey) { + return new OmKeyInfo.Builder() + .setVolumeName(volumeName) + .setBucketName(bucketName) + .setKeyName(keyName) + .setFileName(OzoneFSUtils.getFileName(keyName)) + .setOmKeyLocationInfos(Collections.singletonList( + new OmKeyLocationInfoGroup(version, new ArrayList<>(), + isMultipartKey))) + .setCreationTime(creationTime) .setModificationTime(Time.now()) + .setDataSize(1000L) + .setReplicationConfig( + ReplicationConfig + .fromProtoTypeAndFactor(replicationType, replicationFactor)) .setObjectID(objectID) - .setParentObjectID(parentObjID) - .setUpdateID(50) + .setUpdateID(objectID) .build(); } @@ -579,8 +631,8 @@ public static OmDirectoryInfo createOmDirectoryInfo(String keyName, * Create OmMultipartKeyInfo for OBS/LEGACY bucket. */ public static OmMultipartKeyInfo createOmMultipartKeyInfo(String uploadId, - long creationTime, HddsProtos.ReplicationType replicationType, - HddsProtos.ReplicationFactor replicationFactor, long objectID) { + long creationTime, HddsProtos.ReplicationType replicationType, + HddsProtos.ReplicationFactor replicationFactor, long objectID) { return new OmMultipartKeyInfo.Builder() .setUploadID(uploadId) .setCreationTime(creationTime) @@ -1356,6 +1408,76 @@ public static void addVolumeToOM(OMMetadataManager omMetadataManager, CacheValue.get(1L, omVolumeArgs)); } + /** + * Create OmKeyInfo. + */ + @SuppressWarnings("parameterNumber") + public static OmKeyInfo createOmKeyInfo(String volumeName, String bucketName, + String keyName, HddsProtos.ReplicationType replicationType, + HddsProtos.ReplicationFactor replicationFactor, long objectID, + long parentID, long trxnLogIndex, long creationTime) { + return createOmKeyInfo(volumeName, bucketName, keyName, + replicationType, replicationFactor, objectID, + parentID, trxnLogIndex, creationTime, 0L, false); + } + + /** + * Create OmKeyInfo with isMultipartKey flag. + */ + @SuppressWarnings("parameterNumber") + public static OmKeyInfo createOmKeyInfo(String volumeName, String bucketName, + String keyName, HddsProtos.ReplicationType replicationType, + HddsProtos.ReplicationFactor replicationFactor, long objectID, + long parentID, long trxnLogIndex, long creationTime, + boolean isMultipartKey) { + return createOmKeyInfo(volumeName, bucketName, keyName, + replicationType, replicationFactor, objectID, + parentID, trxnLogIndex, creationTime, 0L, isMultipartKey); + } + + /** + * Create OmKeyInfo. + */ + @SuppressWarnings("parameterNumber") + public static OmKeyInfo createOmKeyInfo(String volumeName, String bucketName, + String keyName, HddsProtos.ReplicationType replicationType, + HddsProtos.ReplicationFactor replicationFactor, long objectID, + long parentID, long trxnLogIndex, long creationTime, long version) { + return createOmKeyInfo(volumeName, bucketName, keyName, replicationType, + replicationFactor, objectID, parentID, trxnLogIndex, creationTime, + version, false); + } + + /** + * Create OmKeyInfo for FSO bucket. + */ + @SuppressWarnings("parameterNumber") + private static OmKeyInfo createOmKeyInfo(String volumeName, String bucketName, + String keyName, HddsProtos.ReplicationType replicationType, + HddsProtos.ReplicationFactor replicationFactor, long objectID, + long parentID, long trxnLogIndex, long creationTime, long version, + boolean isMultipartKey) { + String fileName = OzoneFSUtils.getFileName(keyName); + return new OmKeyInfo.Builder() + .setVolumeName(volumeName) + .setBucketName(bucketName) + .setKeyName(keyName) + .setOmKeyLocationInfos(Collections.singletonList( + new OmKeyLocationInfoGroup(version, new ArrayList<>(), + isMultipartKey))) + .setCreationTime(creationTime) + .setModificationTime(Time.now()) + .setDataSize(1000L) + .setReplicationConfig(ReplicationConfig + .fromProtoTypeAndFactor(replicationType, replicationFactor)) + .setObjectID(objectID) + .setUpdateID(trxnLogIndex) + .setParentObjectID(parentID) + .setFileName(fileName) + .build(); + } + + /** * Add key entry to KeyTable. if openKeyTable flag is true, add's entries * to openKeyTable, else add's it to keyTable. diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketDeleteRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketDeleteRequest.java index fdc13e369c08..34f348a688dc 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketDeleteRequest.java @@ -19,21 +19,16 @@ package org.apache.hadoop.ozone.om.request.bucket; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; - -import java.util.ArrayList; import java.util.UUID; -import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.junit.jupiter.api.Test; @@ -124,10 +119,12 @@ public void testBucketContainsIncompleteMPUs() throws Exception { new OMBucketDeleteRequest(omRequest); // Create a MPU key in the MPU table to simulate incomplete MPU + long creationTime = Time.now(); String uploadId = OMMultipartUploadUtils.getMultipartUploadId(); - final OmKeyInfo keyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, UUID.randomUUID().toString(), - RatisReplicationConfig.getInstance(ONE), new OmKeyLocationInfoGroup(0L, new ArrayList<>(), true)) - .build(); + final OmKeyInfo keyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, + bucketName, UUID.randomUUID().toString(), + HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, + 0L, creationTime, true); final OmMultipartKeyInfo multipartKeyInfo = OMRequestTestUtils. createOmMultipartKeyInfo(uploadId, Time.now(), HddsProtos.ReplicationType.RATIS, diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequest.java index 7af60c18d94a..275e8a6f2aae 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequest.java @@ -27,7 +27,7 @@ import java.util.stream.Collectors; import org.apache.commons.lang3.RandomStringUtils; -import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.om.ResolvedBucket; import org.apache.hadoop.ozone.om.exceptions.OMException; @@ -60,7 +60,6 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotEquals; @@ -298,7 +297,8 @@ public void testValidateAndUpdateCacheWithSubDirectoryInPath() omMetadataManager); OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, - keyName.substring(0, 12), 1L, RatisReplicationConfig.getInstance(ONE), omMetadataManager); + keyName.substring(0, 12), 1L, HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE, omMetadataManager); OMRequest omRequest = createDirectoryRequest(volumeName, bucketName, keyName); OMDirectoryCreateRequest omDirectoryCreateRequest = @@ -340,7 +340,7 @@ public void testValidateAndUpdateCacheWithDirectoryAlreadyExists() OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, OzoneFSUtils.addTrailingSlashIfNeeded(keyName), 1L, - RatisReplicationConfig.getInstance(ONE), + HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, omMetadataManager); OMRequest omRequest = createDirectoryRequest(volumeName, bucketName, keyName); @@ -383,7 +383,8 @@ public void testValidateAndUpdateCacheWithFilesInPath() throws Exception { omMetadataManager); // Add a key with first two levels. OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, - keyName.substring(0, 11), 1L, RatisReplicationConfig.getInstance(ONE), omMetadataManager); + keyName.substring(0, 11), 1L, HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE, omMetadataManager); OMRequest omRequest = createDirectoryRequest(volumeName, bucketName, keyName); OMDirectoryCreateRequest omDirectoryCreateRequest = diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequestWithFSO.java index e0460ba81a99..0eceb2246ee2 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequestWithFSO.java @@ -19,8 +19,8 @@ package org.apache.hadoop.ozone.om.request.file; import org.apache.commons.lang3.RandomStringUtils; -import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.OzoneAcl; @@ -59,7 +59,6 @@ import java.util.UUID; import java.util.stream.Collectors; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.VOLUME_NOT_FOUND; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotEquals; @@ -423,7 +422,8 @@ public void testValidateAndUpdateCacheWithFilesInPath() throws Exception { // Add a file into the FileTable, this is to simulate "file exists" check. OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, - bucketName, keyName, RatisReplicationConfig.getInstance(THREE)).setObjectID(objID++).build(); + bucketName, keyName, HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.THREE, objID++); final long volumeId = omMetadataManager.getVolumeId(volumeName); final long bucketId = omBucketInfo.getObjectID(); @@ -492,22 +492,21 @@ public void testValidateAndUpdateCacheWithFileExistsInGivenPath() // for index=0, parentID is bucketID OmDirectoryInfo omDirInfo = OMRequestTestUtils.createOmDirectoryInfo( - dirs.get(0), objID++, parentID); + dirs.get(0), objID++, parentID); OMRequestTestUtils.addDirKeyToDirTable(true, omDirInfo, - volumeName, bucketName, txnID, omMetadataManager); + volumeName, bucketName, txnID, omMetadataManager); parentID = omDirInfo.getObjectID(); // Add a key in second level. - OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, - RatisReplicationConfig.getInstance(THREE)) - .setObjectID(objID) - .build(); + OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, + bucketName, keyName, HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.THREE, objID); final long volumeId = omMetadataManager.getVolumeId(volumeName); final long bucketId = omBucketInfo.getObjectID(); final String ozoneKey = omMetadataManager.getOzonePathKey( - volumeId, bucketId, parentID, dirs.get(1)); + volumeId, bucketId, parentID, dirs.get(1)); ++txnID; omMetadataManager.getKeyTable(getBucketLayout()) .addCacheEntry(new CacheKey<>(ozoneKey), diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java index 74b067a76a45..b39068fd7341 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java @@ -24,7 +24,6 @@ import java.util.UUID; import java.util.stream.Collectors; -import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; @@ -191,7 +190,7 @@ public void testValidateAndUpdateCacheWithNamespaceQuotaExceeded() .setBucketName(bucketName) .setBucketLayout(getBucketLayout()) .setQuotaInNamespace(1)); - + OMFileCreateRequest omFileCreateRequest = getOMFileCreateRequest(omRequest); OMRequest modifiedOmRequest = omFileCreateRequest.preExecute(ozoneManager); @@ -244,17 +243,19 @@ public void testValidateAndUpdateCacheWithNonRecursive() throws Exception { testNonRecursivePath(UUID.randomUUID().toString(), false, false, false); testNonRecursivePath("a/b", false, false, true); - ReplicationConfig replicationConfig = ReplicationConfig.fromProtoTypeAndFactor(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE); // Create some child keys for the path OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, - "a/b/c/d", 0L, replicationConfig, omMetadataManager); + "a/b/c/d", 0L, HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE, omMetadataManager); OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, - "a/b/c/", 0L, replicationConfig, omMetadataManager); + "a/b/c/", 0L, HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE, omMetadataManager); OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, - "a/b/", 0L, replicationConfig, omMetadataManager); + "a/b/", 0L, HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE, omMetadataManager); OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, - "a/", 0L, replicationConfig, omMetadataManager); + "a/", 0L, HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE, omMetadataManager); // cannot create file if directory of same name exists testNonRecursivePath("a/b/c", false, false, true); @@ -274,14 +275,14 @@ public void testValidateAndUpdateCacheWithRecursive() throws Exception { // Should be able to create file even if parent directories does not // exist and key already exist, as this is with overwrite enabled. testNonRecursivePath(UUID.randomUUID().toString(), false, false, false); - ReplicationConfig replicationConfig = ReplicationConfig.fromProtoTypeAndFactor(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE); OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, - "c/d/e/f", 0L, replicationConfig, omMetadataManager); + "c/d/e/f", 0L, HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE, omMetadataManager); testNonRecursivePath("c/d/e/f", true, true, false); // Create some child keys for the path OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, - "a/b/c/d", 0L, replicationConfig, omMetadataManager); + "a/b/c/d", 0L, HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE, omMetadataManager); testNonRecursivePath("a/b/c", false, true, false); } @@ -292,17 +293,16 @@ public void testValidateAndUpdateCacheWithRecursiveAndOverWrite() String key = "c/d/e/f"; // Should be able to create file even if parent directories does not exist testNonRecursivePath(key, false, true, false); - + // 3 parent directory created c/d/e assertEquals(omMetadataManager.getBucketTable().get( omMetadataManager.getBucketKey(volumeName, bucketName)) .getUsedNamespace(), 3); - + // Add the key to key table - ReplicationConfig replicationConfig = ReplicationConfig.fromProtoTypeAndFactor(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE); OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, - key, 0L, replicationConfig, omMetadataManager); + key, 0L, HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE, omMetadataManager); // Even if key exists, should be able to create file as overwrite is set // to true @@ -315,21 +315,23 @@ public void testValidateAndUpdateCacheWithNonRecursiveAndOverWrite() throws Exception { String key = "c/d/e/f"; - ReplicationConfig replicationConfig = ReplicationConfig.fromProtoTypeAndFactor(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE); // Need to add the path which starts with "c/d/e" to keyTable as this is // non-recursive parent should exist. OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, - "c/", 0L, replicationConfig, omMetadataManager); + "c/", 0L, HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE, omMetadataManager); OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, - "c/d/", 0L, replicationConfig, omMetadataManager); + "c/d/", 0L, HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE, omMetadataManager); OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, - "c/d/e/", 0L, replicationConfig, omMetadataManager); + "c/d/e/", 0L, HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE, omMetadataManager); testNonRecursivePath(key, false, false, false); // Add the key to key table OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, - key, 0L, replicationConfig, omMetadataManager); + key, 0L, HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE, omMetadataManager); // Even if key exists, should be able to create file as overwrite is set // to true diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequestWithFSO.java index e988949c5b85..1b7b7452c82c 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequestWithFSO.java @@ -18,7 +18,6 @@ package org.apache.hadoop.ozone.om.request.file; -import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; @@ -29,11 +28,11 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.Time; import org.junit.jupiter.api.Test; import java.util.UUID; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.apache.hadoop.ozone.OzoneConsts.OM_SNAPSHOT_INDICATOR; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; @@ -56,7 +55,8 @@ public void testValidateAndUpdateCacheWithNonRecursive() throws Exception { "a/b/c", omMetadataManager); String fileNameD = "d"; OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, - "a/b/c/" + fileNameD, 0L, RatisReplicationConfig.getInstance(ONE), omMetadataManager); + "a/b/c/" + fileNameD, 0L, HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE, omMetadataManager); // cannot create file if directory of same name exists testNonRecursivePath("a/b/c", false, false, true); @@ -80,7 +80,7 @@ public void testValidateAndUpdateCacheWithNonRecursive() throws Exception { public void testValidateAndUpdateCacheWithNamespaceQuotaExceeded() throws Exception { OMRequest omRequest = createFileRequest(volumeName, bucketName, - "/test/a1/a2", ONE, + "/test/a1/a2", HddsProtos.ReplicationFactor.ONE, HddsProtos.ReplicationType.RATIS, false, true); // create bucket with quota limit 1 @@ -114,11 +114,11 @@ public void testValidateAndUpdateCacheWithRecursiveAndOverWrite() // Add the key to key table OmDirectoryInfo omDirInfo = getDirInfo("c/d/e"); OmKeyInfo omKeyInfo = - OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, key, RatisReplicationConfig.getInstance(ONE)) - .setObjectID(omDirInfo.getObjectID() + 10) - .setParentObjectID(omDirInfo.getObjectID()) - .setUpdateID(100) - .build(); + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, key, + HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE, + omDirInfo.getObjectID() + 10, + omDirInfo.getObjectID(), 100, Time.now()); OMRequestTestUtils.addFileToKeyTable(false, false, "f", omKeyInfo, -1, omDirInfo.getObjectID() + 10, omMetadataManager); @@ -136,22 +136,23 @@ public void testValidateAndUpdateCacheWithNonRecursiveAndOverWrite() String fileName = "f"; String key = parentDir + "/" + fileName; OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, - omMetadataManager, getBucketLayout()); + omMetadataManager, getBucketLayout()); // Create parent dirs for the path long parentId = OMRequestTestUtils.addParentsToDirTable(volumeName, - bucketName, parentDir, omMetadataManager); + bucketName, parentDir, omMetadataManager); // Need to add the path which starts with "c/d/e" to OpenKeyTable as this is // non-recursive parent should exist. testNonRecursivePath(key, false, false, false); OmKeyInfo omKeyInfo = - OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, key, RatisReplicationConfig.getInstance(ONE)) - .setObjectID(parentId + 1L) - .setParentObjectID(parentId) - .setUpdateID(100L) - .build(); - OMRequestTestUtils.addFileToKeyTable(false, false, fileName, omKeyInfo, -1, 50, omMetadataManager); + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, key, + HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE, + parentId + 1, + parentId, 100, Time.now()); + OMRequestTestUtils.addFileToKeyTable(false, false, + fileName, omKeyInfo, -1, 50, omMetadataManager); // Even if key exists in KeyTable, should be able to create file as // overwrite is set to true diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMRecoverLeaseRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMRecoverLeaseRequest.java index 294281555a56..3a1ab92c1b5a 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMRecoverLeaseRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMRecoverLeaseRequest.java @@ -23,7 +23,6 @@ import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.request.key.TestOMKeyRequest; @@ -36,6 +35,7 @@ .RecoverLeaseRequest; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; import org.apache.hadoop.ozone.security.acl.OzoneObj; +import org.apache.hadoop.util.Time; import jakarta.annotation.Nonnull; import org.junit.jupiter.api.Test; @@ -272,9 +272,8 @@ private OMRequest doPreExecute(OMRequest originalOMRequest) throws Exception { String addToOpenFileTable(List locationList) throws Exception { OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, - bucketName, keyName, replicationConfig, new OmKeyLocationInfoGroup(version, new ArrayList<>(), false)) - .setParentObjectID(parentId) - .build(); + bucketName, keyName, replicationType, replicationFactor, 0, parentId, + 0, Time.now(), version); omKeyInfo.appendNewBlocks(locationList, false); omKeyInfo.getMetadata().put(OzoneConsts.HSYNC_CLIENT_ID, String.valueOf(clientID)); @@ -295,9 +294,8 @@ bucketName, keyName, replicationConfig, new OmKeyLocationInfoGroup(version, new String addToFileTable(List locationList) throws Exception { OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, - bucketName, keyName, replicationConfig, new OmKeyLocationInfoGroup(version, new ArrayList<>(), false)) - .setParentObjectID(parentId) - .build(); + bucketName, keyName, replicationType, replicationFactor, 0, parentId, + 0, Time.now(), version); omKeyInfo.appendNewBlocks(locationList, false); OMRequestTestUtils.addFileToKeyTable( diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequest.java index 9fb0e79953e1..eb99cd932568 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequest.java @@ -26,7 +26,6 @@ import java.util.List; import java.util.UUID; -import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; @@ -237,8 +236,7 @@ protected OMRequest createAllocateBlockRequest() { KeyArgs keyArgs = KeyArgs.newBuilder() .setVolumeName(volumeName).setBucketName(bucketName) .setKeyName(keyName) - .setFactor(((RatisReplicationConfig) replicationConfig).getReplicationFactor()) - .setType(replicationConfig.getReplicationType()) + .setFactor(replicationFactor).setType(replicationType) .build(); AllocateBlockRequest allocateBlockRequest = @@ -255,8 +253,8 @@ protected OMRequest createAllocateBlockRequest() { protected String addKeyToOpenKeyTable(String volumeName, String bucketName) throws Exception { OMRequestTestUtils.addKeyToTable(true, volumeName, bucketName, - keyName, clientID, replicationConfig, - omMetadataManager); + keyName, clientID, replicationType, replicationFactor, + omMetadataManager); return ""; } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequestWithFSO.java index 1ecbfed71624..33512d355c0d 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequestWithFSO.java @@ -20,12 +20,10 @@ package org.apache.hadoop.ozone.om.request.key; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.fail; - -import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; @@ -33,6 +31,7 @@ import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.Time; import jakarta.annotation.Nonnull; /** @@ -66,11 +65,10 @@ protected String addKeyToOpenKeyTable(String volumeName, String bucketName) long objectId = parentID + 1; OmKeyInfo omKeyInfoFSO = - OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, RatisReplicationConfig.getInstance(ONE)) - .setObjectID(objectId) - .setParentObjectID(parentID) - .setUpdateID(txnId) - .build(); + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, + HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE, objectId, parentID, txnId, + Time.now()); // add key to openFileTable OMRequestTestUtils.addFileToKeyTable(true, false, diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMDirectoriesPurgeRequestAndResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMDirectoriesPurgeRequestAndResponse.java index cbb782e184fe..f040bd508177 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMDirectoriesPurgeRequestAndResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMDirectoriesPurgeRequestAndResponse.java @@ -68,7 +68,7 @@ private List createAndDeleteKeys(Integer trxnIndex, String bucket) for (int i = 1; i <= numKeys; i++) { String key = keyName + "-" + i; OMRequestTestUtils.addKeyToTable(false, false, volumeName, bucket, - key, clientID, replicationConfig, trxnIndex++, + key, clientID, replicationType, replicationFactor, trxnIndex++, omMetadataManager); String ozoneKey = omMetadataManager.getOzoneKey( volumeName, bucket, key); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyAclRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyAclRequest.java index b9aa70b4c7e8..c9559ff41e1f 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyAclRequest.java @@ -20,12 +20,8 @@ import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; - import java.util.List; import java.util.UUID; - -import org.apache.hadoop.hdds.client.RatisReplicationConfig; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; @@ -251,7 +247,7 @@ private OMRequest createSetAclKeyRequest(OzoneAcl acl) { protected String addKeyToTable() throws Exception { OMRequestTestUtils.addKeyToTable(false, false, volumeName, bucketName, - keyName, clientID, RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE), 1L, + keyName, clientID, replicationType, replicationFactor, 1L, omMetadataManager); return omMetadataManager.getOzoneKey(volumeName, bucketName, diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyAclRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyAclRequestWithFSO.java index ea9c3223de5a..48d92e608b3e 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyAclRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyAclRequestWithFSO.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.ozone.om.request.key; -import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; @@ -26,8 +26,7 @@ import org.apache.hadoop.ozone.om.request.key.acl.OMKeyRemoveAclRequestWithFSO; import org.apache.hadoop.ozone.om.request.key.acl.OMKeySetAclRequestWithFSO; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; - -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; +import org.apache.hadoop.util.Time; /** * Test Key ACL requests for prefix layout. @@ -45,22 +44,20 @@ protected String addKeyToTable() throws Exception { .addParentsToDirTable(volumeName, bucketName, parentDir, omMetadataManager); - OmKeyInfo omKeyInfo = - OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, key, RatisReplicationConfig.getInstance(ONE)) - .setObjectID(parentId + 1L) - .setParentObjectID(parentId) - .setUpdateID(100L) - .build(); + OmKeyInfo omKeyInfo = OMRequestTestUtils + .createOmKeyInfo(volumeName, bucketName, key, + HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, + parentId + 1, parentId, 100, Time.now()); OMRequestTestUtils .addFileToKeyTable(false, false, fileName, omKeyInfo, -1, 50, omMetadataManager); final long volumeId = omMetadataManager.getVolumeId( - omKeyInfo.getVolumeName()); + omKeyInfo.getVolumeName()); final long bucketId = omMetadataManager.getBucketId( - omKeyInfo.getVolumeName(), omKeyInfo.getBucketName()); + omKeyInfo.getVolumeName(), omKeyInfo.getBucketName()); return omMetadataManager.getOzonePathKey( - volumeId, bucketId, omKeyInfo.getParentObjectID(), - fileName); + volumeId, bucketId, omKeyInfo.getParentObjectID(), + fileName); } @Override diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java index 9719865db196..3251fff97490 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java @@ -26,11 +26,7 @@ import java.util.UUID; import java.util.stream.Collectors; -import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.utils.db.BatchOperation; -import org.apache.hadoop.hdds.utils.db.Table; -import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.exceptions.OMException; @@ -60,13 +56,10 @@ import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.Mockito.when; /** * Class tests OMKeyCommitRequest class. @@ -562,17 +555,16 @@ public void testValidateAndUpdateCacheWithKeyNotFound() throws Exception { @Test public void testValidateAndUpdateCacheOnOverwrite() throws Exception { - when(ozoneManager.getObjectIdFromTxId(anyLong())).thenAnswer(tx -> - OmUtils.getObjectIdFromTxId(2, tx.getArgument(0))); testValidateAndUpdateCache(); // Become a new client and set next version number clientID = Time.now(); version += 1; - OMRequest modifiedOmRequest = doPreExecute(createCommitKeyRequest(getKeyLocation(10).subList(4, 10), false)); + OMRequest modifiedOmRequest = doPreExecute(createCommitKeyRequest()); - OMKeyCommitRequest omKeyCommitRequest = getOmKeyCommitRequest(modifiedOmRequest); + OMKeyCommitRequest omKeyCommitRequest = + getOmKeyCommitRequest(modifiedOmRequest); KeyArgs keyArgs = modifiedOmRequest.getCommitKeyRequest().getKeyArgs(); @@ -584,54 +576,49 @@ public void testValidateAndUpdateCacheOnOverwrite() throws Exception { assertNotNull(omKeyInfo); // Previously committed version - assertEquals(0L, omKeyInfo.getLatestVersionLocations().getVersion()); + assertEquals(0L, + omKeyInfo.getLatestVersionLocations().getVersion()); // Append new blocks List allocatedLocationList = - keyArgs.getKeyLocationsList().stream() - .map(OmKeyLocationInfo::getFromProtobuf) - .collect(Collectors.toList()); + keyArgs.getKeyLocationsList().stream() + .map(OmKeyLocationInfo::getFromProtobuf) + .collect(Collectors.toList()); addKeyToOpenKeyTable(allocatedLocationList); OMClientResponse omClientResponse = omKeyCommitRequest.validateAndUpdateCache(ozoneManager, 102L); - assertEquals(OzoneManagerProtocolProtos.Status.OK, omClientResponse.getOMResponse().getStatus()); + assertEquals(OzoneManagerProtocolProtos.Status.OK, + omClientResponse.getOMResponse().getStatus()); // New entry should be created in key Table. - omKeyInfo = omMetadataManager.getKeyTable(omKeyCommitRequest.getBucketLayout()).get(ozoneKey); + omKeyInfo = + omMetadataManager.getKeyTable(omKeyCommitRequest.getBucketLayout()) + .get(ozoneKey); assertNotNull(omKeyInfo); - assertEquals(version, omKeyInfo.getLatestVersionLocations().getVersion()); + assertEquals(version, + omKeyInfo.getLatestVersionLocations().getVersion()); // DB keyInfo format verifyKeyName(omKeyInfo); // Check modification time CommitKeyRequest commitKeyRequest = modifiedOmRequest.getCommitKeyRequest(); - assertEquals(commitKeyRequest.getKeyArgs().getModificationTime(), omKeyInfo.getModificationTime()); + assertEquals(commitKeyRequest.getKeyArgs().getModificationTime(), + omKeyInfo.getModificationTime()); // Check block location. List locationInfoListFromCommitKeyRequest = - commitKeyRequest.getKeyArgs().getKeyLocationsList().stream().map(OmKeyLocationInfo::getFromProtobuf) - .collect(Collectors.toList()); + commitKeyRequest.getKeyArgs() + .getKeyLocationsList().stream().map(OmKeyLocationInfo::getFromProtobuf) + .collect(Collectors.toList()); - assertEquals(locationInfoListFromCommitKeyRequest, omKeyInfo.getLatestVersionLocations().getLocationList()); - assertEquals(allocatedLocationList, omKeyInfo.getLatestVersionLocations().getLocationList()); + assertEquals(locationInfoListFromCommitKeyRequest, + omKeyInfo.getLatestVersionLocations().getLocationList()); + assertEquals(allocatedLocationList, + omKeyInfo.getLatestVersionLocations().getLocationList()); assertEquals(1, omKeyInfo.getKeyLocationVersions().size()); - - // flush response content to db - BatchOperation batchOperation = omMetadataManager.getStore().initBatchOperation(); - ((OMKeyCommitResponse) omClientResponse).addToDBBatch(omMetadataManager, batchOperation); - omMetadataManager.getStore().commitBatchOperation(batchOperation); - - // verify deleted key is unique generated - String deletedKey = omMetadataManager.getOzoneKey(volumeName, omKeyInfo.getBucketName(), keyName); - List> rangeKVs - = omMetadataManager.getDeletedTable().getRangeKVs(null, 100, deletedKey); - assertThat(rangeKVs.size()).isGreaterThan(0); - assertEquals(1, rangeKVs.get(0).getValue().getOmKeyInfoList().size()); - assertFalse(rangeKVs.get(0).getKey().endsWith(rangeKVs.get(0).getValue().getOmKeyInfoList().get(0).getObjectID() - + "")); } /** @@ -699,8 +686,7 @@ private OMRequest createCommitKeyRequest( KeyArgs keyArgs = KeyArgs.newBuilder().setDataSize(dataSize).setVolumeName(volumeName) .setKeyName(keyName).setBucketName(bucketName) - .setType(replicationConfig.getReplicationType()) - .setFactor(((RatisReplicationConfig) replicationConfig).getReplicationFactor()) + .setType(replicationType).setFactor(replicationFactor) .addAllKeyLocations(keyLocations).build(); CommitKeyRequest commitKeyRequest = @@ -745,7 +731,7 @@ protected String getOzonePathKey() throws IOException { protected String addKeyToOpenKeyTable(List locationList) throws Exception { OMRequestTestUtils.addKeyToTable(true, volumeName, bucketName, keyName, - clientID, replicationConfig, omMetadataManager, + clientID, replicationType, replicationFactor, omMetadataManager, locationList, version); return omMetadataManager.getOpenKey(volumeName, bucketName, diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequestWithFSO.java index 48cc52773a33..d258c1cfde43 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequestWithFSO.java @@ -19,22 +19,19 @@ package org.apache.hadoop.ozone.om.request.key; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.junit.jupiter.api.Assertions.assertEquals; - -import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.util.Time; import jakarta.annotation.Nonnull; import java.io.IOException; -import java.util.ArrayList; import java.util.List; /** @@ -81,12 +78,10 @@ protected String addKeyToOpenKeyTable(List locationList) long objectId = 100; OmKeyInfo omKeyInfoFSO = - OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, - RatisReplicationConfig.getInstance(ONE), new OmKeyLocationInfoGroup(version, new ArrayList<>(), false)) - .setObjectID(objectId) - .setParentObjectID(parentID) - .setUpdateID(100L) - .build(); + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, + HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE, objectId, parentID, 100, + Time.now(), version); omKeyInfoFSO.appendNewBlocks(locationList, false); String fileName = OzoneFSUtils.getFileName(keyName); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java index 5d79e7771520..12d9d02a72d6 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java @@ -31,7 +31,6 @@ import java.util.HashMap; import org.apache.hadoop.hdds.client.ECReplicationConfig; -import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.OzoneAcl; @@ -547,8 +546,7 @@ private OMRequest createKeyRequest(boolean isMultipartKey, int partNumber, KeyArgs.Builder keyArgs = KeyArgs.newBuilder() .setVolumeName(volumeName).setBucketName(bucketName) .setKeyName(keyName).setIsMultipartKey(isMultipartKey) - .setFactor(((RatisReplicationConfig) replicationConfig).getReplicationFactor()) - .setType(replicationConfig.getReplicationType()) + .setFactor(replicationFactor).setType(replicationType) .setLatestVersionLocation(true); if (isMultipartKey) { @@ -795,7 +793,7 @@ private void verifyKeyInheritAcls(List keyAcls, protected void addToKeyTable(String keyName) throws Exception { OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, - keyName.substring(1), 0L, RatisReplicationConfig.getInstance(THREE), omMetadataManager); + keyName.substring(1), 0L, RATIS, THREE, omMetadataManager); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequestWithFSO.java index 2a25a9b09686..0750c9512618 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequestWithFSO.java @@ -18,8 +18,8 @@ package org.apache.hadoop.ozone.om.request.key; -import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; @@ -31,6 +31,7 @@ import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.util.Time; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.MethodSource; @@ -41,7 +42,6 @@ import java.util.Arrays; import java.util.Collection; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.apache.hadoop.ozone.OzoneConsts.OM_SNAPSHOT_INDICATOR; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; @@ -107,13 +107,12 @@ protected void addToKeyTable(String keyName) throws Exception { Path keyPath = Paths.get(keyName); long parentId = checkIntermediatePaths(keyPath); String fileName = OzoneFSUtils.getFileName(keyName); - OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, fileName, - RatisReplicationConfig.getInstance(ONE)) - .setObjectID(parentId + 1L) - .setParentObjectID(parentId) - .setUpdateID(100L) - .build(); - OMRequestTestUtils.addFileToKeyTable(false, false, fileName, omKeyInfo, -1, 50, omMetadataManager); + OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, + bucketName, fileName, HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE, parentId + 1, parentId, 100, + Time.now()); + OMRequestTestUtils.addFileToKeyTable(false, false, + fileName, omKeyInfo, -1, 50, omMetadataManager); } @Override diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequest.java index 9f1bee28c047..00d1883d749c 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequest.java @@ -192,8 +192,8 @@ protected String addKeyToTable() throws Exception { protected String addKeyToTable(String key) throws Exception { OMRequestTestUtils.addKeyToTable(false, volumeName, - bucketName, key, clientID, replicationConfig, - omMetadataManager); + bucketName, key, clientID, replicationType, replicationFactor, + omMetadataManager); return omMetadataManager.getOzoneKey(volumeName, bucketName, key); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequestWithFSO.java index 07094ad2923f..9dafab090295 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequestWithFSO.java @@ -18,14 +18,13 @@ package org.apache.hadoop.ozone.om.request.key; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; -import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.om.OzonePrefixPathImpl; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.BucketLayout; @@ -34,6 +33,7 @@ import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.security.acl.OzonePrefixPath; +import org.apache.hadoop.util.Time; import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.ValueSource; @@ -72,11 +72,11 @@ protected String addKeyToTable() throws Exception { bucketName, PARENT_DIR, omMetadataManager); OmKeyInfo omKeyInfo = - OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, FILE_KEY, RatisReplicationConfig.getInstance(ONE)) - .setObjectID(parentId + 1L) - .setParentObjectID(parentId) - .setUpdateID(100L) - .build(); + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, FILE_KEY, + HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE, + parentId + 1, + parentId, 100, Time.now()); omKeyInfo.setKeyName(FILE_NAME); OMRequestTestUtils.addFileToKeyTable(false, false, FILE_NAME, omKeyInfo, -1, 50, omMetadataManager); @@ -96,11 +96,11 @@ protected String addKeyToDirTable(String volumeName, String bucketName, bucketName, key, omMetadataManager); OmKeyInfo omKeyInfo = - OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, key, RatisReplicationConfig.getInstance(ONE)) - .setObjectID(parentId + 1L) - .setParentObjectID(parentId) - .setUpdateID(100L) - .build(); + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, key, + HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE, + parentId + 1, + parentId, 100, Time.now()); omKeyInfo.setKeyName(key); return omKeyInfo.getPath(); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyPurgeRequestAndResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyPurgeRequestAndResponse.java index ff3db1abbe20..a1d616c07563 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyPurgeRequestAndResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyPurgeRequestAndResponse.java @@ -76,7 +76,7 @@ private List createAndDeleteKeys(Integer trxnIndex, String bucket) for (int i = 1; i <= numKeys; i++) { String key = keyName + "-" + i; OMRequestTestUtils.addKeyToTable(false, false, volumeName, bucket, - key, clientID, replicationConfig, trxnIndex++, + key, clientID, replicationType, replicationFactor, trxnIndex++, omMetadataManager); ozoneKeyNames.add(omMetadataManager.getOzoneKey( volumeName, bucket, key)); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRenameRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRenameRequest.java index 0a2dcfd5d67a..a6015870d09b 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRenameRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRenameRequest.java @@ -240,7 +240,7 @@ protected OMRequest createRenameKeyRequest( protected OmKeyInfo getOmKeyInfo(String keyName) { return OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, - replicationConfig).build(); + replicationType, replicationFactor, 0L); } protected String addKeyToTable(OmKeyInfo keyInfo) throws Exception { diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRenameRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRenameRequestWithFSO.java index 40c5156b5dbe..c91b8e158214 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRenameRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRenameRequestWithFSO.java @@ -18,14 +18,12 @@ package org.apache.hadoop.ozone.om.request.key; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertThrows; - import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.exceptions.OMException; @@ -39,6 +37,7 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RenameKeyRequest; +import org.apache.hadoop.util.Time; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -180,10 +179,10 @@ private OMRequest doPreExecute(OMRequest originalOmRequest) throws Exception { @Override protected OmKeyInfo getOmKeyInfo(String keyName) { long bucketId = random.nextLong(); - return OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, RatisReplicationConfig.getInstance(ONE)) - .setObjectID(bucketId + 100L) - .setParentObjectID(bucketId + 101L) - .build(); + return OMRequestTestUtils.createOmKeyInfo( + volumeName, bucketName, keyName, + HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, + bucketId + 100L, bucketId + 101L, 0L, Time.now()); } @Override diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java index 47b090f88d43..4fced8a7a8c9 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java @@ -25,7 +25,6 @@ import java.util.UUID; import org.apache.commons.lang3.tuple.Pair; -import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; import org.apache.hadoop.ozone.OzoneConfigKeys; @@ -56,6 +55,7 @@ import org.apache.hadoop.hdds.client.ContainerBlockID; import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; @@ -110,7 +110,8 @@ public class TestOMKeyRequest { protected String volumeName; protected String bucketName; protected String keyName; - protected ReplicationConfig replicationConfig; + protected HddsProtos.ReplicationType replicationType; + protected HddsProtos.ReplicationFactor replicationFactor; protected long clientID; protected long scmBlockSize = 1000L; protected long dataSize; @@ -208,7 +209,8 @@ public void setup() throws Exception { volumeName = UUID.randomUUID().toString(); bucketName = UUID.randomUUID().toString(); keyName = UUID.randomUUID().toString(); - replicationConfig = RatisReplicationConfig.getInstance(ReplicationFactor.ONE); + replicationFactor = HddsProtos.ReplicationFactor.ONE; + replicationType = HddsProtos.ReplicationType.RATIS; clientID = Time.now(); dataSize = 1000L; random = new Random(); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysDeleteRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysDeleteRequest.java index d0cfd48e35dc..d48131de4bd3 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysDeleteRequest.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.om.request.key; -import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; @@ -31,7 +31,6 @@ import java.util.List; import java.util.UUID; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.PARTIAL_DELETE; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type.DeleteKeys; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -146,7 +145,8 @@ protected void createPreRequisites() throws Exception { for (int i = 0; i < count; i++) { key = parentDir.concat("/key" + i); OMRequestTestUtils.addKeyToTableCache(volumeName, bucketName, - parentDir.concat("/key" + i), RatisReplicationConfig.getInstance(THREE), omMetadataManager); + parentDir.concat("/key" + i), HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.THREE, omMetadataManager); deleteKeyArgs.addKeys(key); deleteKeyList.add(key); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysDeleteRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysDeleteRequestWithFSO.java index 2da80550275a..f28ca2e2685f 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysDeleteRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysDeleteRequestWithFSO.java @@ -17,17 +17,17 @@ */ package org.apache.hadoop.ozone.om.request.key; -import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.util.Time; import org.junit.jupiter.api.Test; import java.util.ArrayList; import java.util.UUID; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type.DeleteKeys; /** @@ -83,13 +83,11 @@ protected void createPreRequisites() throws Exception { long parentId = OMRequestTestUtils .addParentsToDirTable(volumeName, bucketName, dir, omMetadataManager); - OmKeyInfo omKeyInfo = - OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, dir + "/" + file, - RatisReplicationConfig.getInstance(ONE)) - .setObjectID(parentId + 1L) - .setParentObjectID(parentId) - .setUpdateID(100L) - .build(); + OmKeyInfo omKeyInfo = OMRequestTestUtils + .createOmKeyInfo(volumeName, bucketName, dir + "/" + file, + HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE, parentId + 1, parentId, 100, + Time.now()); omKeyInfo.setKeyName(file); OMRequestTestUtils .addFileToKeyTable(false, false, file, omKeyInfo, -1, 50, diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysRenameRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysRenameRequest.java index 340b6e36eb0b..3d429f4d6847 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysRenameRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysRenameRequest.java @@ -18,14 +18,12 @@ package org.apache.hadoop.ozone.om.request.key; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertTrue; - -import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.response.OMClientResponse; @@ -129,7 +127,8 @@ private OMRequest createRenameKeyRequest(Boolean isIllegal) throws Exception { String key = parentDir.concat("/key" + i); String toKey = parentDir.concat("/newKey" + i); OMRequestTestUtils.addKeyToTableCache(volumeName, bucketName, - parentDir.concat("/key" + i), RatisReplicationConfig.getInstance(THREE), omMetadataManager); + parentDir.concat("/key" + i), HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.THREE, omMetadataManager); RenameKeysMap.Builder renameKey = RenameKeysMap.newBuilder() .setFromKeyName(key) diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMSetTimesRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMSetTimesRequest.java index ad834fa556bf..bfae424cc954 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMSetTimesRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMSetTimesRequest.java @@ -100,7 +100,7 @@ private OMRequest createSetTimesKeyRequest(long mtime, long atime) { protected String addKeyToTable() throws Exception { OMRequestTestUtils.addKeyToTable(false, false, volumeName, bucketName, - keyName, clientID, replicationConfig, 1L, + keyName, clientID, replicationType, replicationFactor, 1L, omMetadataManager); return omMetadataManager.getOzoneKey(volumeName, bucketName, diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMSetTimesRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMSetTimesRequestWithFSO.java index 0960125b0575..2cd9273c25a5 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMSetTimesRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMSetTimesRequestWithFSO.java @@ -17,17 +17,17 @@ */ package org.apache.hadoop.ozone.om.request.key; -import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.junit.jupiter.api.Test; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.request.file.OMFileRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.util.Time; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -115,13 +115,10 @@ protected String addKeyToTable() throws Exception { .addParentsToDirTable(volumeName, bucketName, PARENT_DIR, omMetadataManager); - OmKeyInfo omKeyInfo = - OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, FILE_NAME, - RatisReplicationConfig.getInstance(ONE)) - .setObjectID(parentId + 1L) - .setParentObjectID(parentId) - .setUpdateID(100L) - .build(); + OmKeyInfo omKeyInfo = OMRequestTestUtils + .createOmKeyInfo(volumeName, bucketName, FILE_NAME, + HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, + parentId + 1, parentId, 100, Time.now()); OMRequestTestUtils .addFileToKeyTable(false, false, FILE_NAME, omKeyInfo, -1, 50, omMetadataManager); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3ExpiredMultipartUploadsAbortRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3ExpiredMultipartUploadsAbortRequest.java index f02e1ee23679..25c908b18a2d 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3ExpiredMultipartUploadsAbortRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3ExpiredMultipartUploadsAbortRequest.java @@ -19,7 +19,6 @@ package org.apache.hadoop.ozone.om.request.s3.multipart; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotEquals; @@ -28,15 +27,14 @@ import static org.junit.jupiter.api.Assertions.assertSame; import static org.junit.jupiter.api.Assertions.assertTrue; -import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationFactor; import org.apache.hadoop.hdds.client.ReplicationType; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.utils.UniqueId; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.om.OMMetrics; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.om.helpers.OmMultipartUpload; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; @@ -53,6 +51,7 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ExpiredMultipartUploadsBucket; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartUploadsExpiredAbortRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.util.Time; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.MethodSource; @@ -481,13 +480,10 @@ private List createMPUsWithFSO(String volume, String bucket, commitMultipartRequest, BucketLayout.FILE_SYSTEM_OPTIMIZED); // Add key to open key table to be used in MPU commit processing - OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volume, bucket, keyName, - RatisReplicationConfig.getInstance(ONE), new OmKeyLocationInfoGroup(0L, new ArrayList<>(), true)) - .setObjectID(parentID + j) - .setParentObjectID(parentID) - .setUpdateID(trxnLogIndex) - .build(); - + OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volume, + bucket, keyName, HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE, parentID + j, parentID, + trxnLogIndex, Time.now(), true); String fileName = OzoneFSUtils.getFileName(keyName); OMRequestTestUtils.addFileToKeyTable(true, false, fileName, omKeyInfo, clientID, trxnLogIndex, omMetadataManager); @@ -567,7 +563,8 @@ private List createMPUs(String volume, String bucket, // Add key to open key table to be used in MPU commit processing OMRequestTestUtils.addKeyToTable( true, true, - volume, bucket, keyName, clientID, RatisReplicationConfig.getInstance(ONE), omMetadataManager); + volume, bucket, keyName, clientID, HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE, omMetadataManager); OMClientResponse commitResponse = s3MultipartUploadCommitPartRequest.validateAndUpdateCache( diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequest.java index 014b4e021cb3..61c792a83de3 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequest.java @@ -24,8 +24,6 @@ import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertSame; import static org.junit.jupiter.api.Assertions.assertTrue; - -import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; @@ -226,8 +224,9 @@ public void testValidateAndUpdateCacheBucketFound() throws Exception { protected void addKeyToOpenKeyTable(String volumeName, String bucketName, String keyName, long clientID) throws Exception { - OMRequestTestUtils.addKeyToTable(true, true, volumeName, bucketName, - keyName, clientID, RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE), omMetadataManager); + OMRequestTestUtils.addKeyToTable(true, true, volumeName, bucketName, + keyName, clientID, HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE, omMetadataManager); } protected String getKeyName() { diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequestWithFSO.java index 24480c249cc8..4c8e4881d925 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequestWithFSO.java @@ -24,17 +24,15 @@ import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertTrue; import org.apache.commons.lang3.StringUtils; -import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.util.Time; import java.io.IOException; -import java.util.ArrayList; import java.util.UUID; /** @@ -70,16 +68,13 @@ protected String getKeyName() { protected void addKeyToOpenKeyTable(String volumeName, String bucketName, String keyName, long clientID) throws Exception { long txnLogId = 0L; - OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, - RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE), - new OmKeyLocationInfoGroup(0L, new ArrayList<>(), true)) - .setObjectID(parentID + 1) - .setParentObjectID(parentID) - .setUpdateID(txnLogId) - .build(); + OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, + bucketName, keyName, HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE, parentID + 1, parentID, + txnLogId, Time.now(), true); String fileName = OzoneFSUtils.getFileName(keyName); OMRequestTestUtils.addFileToKeyTable(true, false, - fileName, omKeyInfo, clientID, txnLogId, omMetadataManager); + fileName, omKeyInfo, clientID, txnLogId, omMetadataManager); } @Override diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java index 0a1ce8f7246f..733c790bcf17 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java @@ -18,7 +18,6 @@ package org.apache.hadoop.ozone.om.request.s3.multipart; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; @@ -29,7 +28,6 @@ import java.util.List; import java.util.UUID; -import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; @@ -39,6 +37,7 @@ import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.junit.jupiter.api.Test; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos @@ -316,7 +315,8 @@ public void testValidateAndUpdateCacheNoSuchMultipartUploadError() protected void addKeyToTable(String volumeName, String bucketName, String keyName, long clientID) throws Exception { OMRequestTestUtils.addKeyToTable(true, true, volumeName, bucketName, - keyName, clientID, RatisReplicationConfig.getInstance(ONE), omMetadataManager); + keyName, clientID, HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE, omMetadataManager); } protected String getMultipartKey(String volumeName, String bucketName, diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequestWithFSO.java index 1762f38b44bd..5926b5fd1d9c 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequestWithFSO.java @@ -18,21 +18,18 @@ package org.apache.hadoop.ozone.om.request.s3.multipart; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.junit.jupiter.api.Assertions.assertNotEquals; - -import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.request.file.OMFileRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.util.Time; import java.io.IOException; -import java.util.ArrayList; import java.util.UUID; /** @@ -75,12 +72,10 @@ protected void addKeyToTable(String volumeName, String bucketName, long objectId = parentID + 1; OmKeyInfo omKeyInfoFSO = - OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, - RatisReplicationConfig.getInstance(ONE), new OmKeyLocationInfoGroup(0L, new ArrayList<>(), true)) - .setObjectID(objectId) - .setParentObjectID(parentID) - .setUpdateID(txnId) - .build(); + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, + HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE, objectId, parentID, txnId, + Time.now(), true); // add key to openFileTable String fileName = OzoneFSUtils.getFileName(keyName); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotCreateRequest.java index a3e83986b531..45e5b1007531 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotCreateRequest.java @@ -18,8 +18,8 @@ package org.apache.hadoop.ozone.om.request.snapshot; -import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.ozone.audit.AuditLogger; @@ -52,7 +52,6 @@ import java.io.IOException; import java.util.UUID; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; import static org.apache.hadoop.ozone.om.helpers.SnapshotInfo.getFromProtobuf; import static org.apache.hadoop.ozone.om.helpers.SnapshotInfo.getTableKey; import static org.apache.hadoop.ozone.om.request.OMRequestTestUtils.createSnapshotRequest; @@ -322,9 +321,8 @@ private void renameDir(String fromKey, String toKey, long offset) throws Exception { String fromKeyParentName = UUID.randomUUID().toString(); OmKeyInfo fromKeyParent = OMRequestTestUtils.createOmKeyInfo(volumeName, - bucketName, fromKeyParentName, RatisReplicationConfig.getInstance(THREE)) - .setObjectID(100L) - .build(); + bucketName, fromKeyParentName, HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.THREE, 100L); OmKeyInfo toKeyInfo = addKey(toKey, offset + 4L); OmKeyInfo fromKeyInfo = addKey(fromKey, offset + 5L); @@ -383,8 +381,8 @@ public static OMSnapshotCreateRequest doPreExecute( private OmKeyInfo addKey(String keyName, long objectId) { return OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, - RatisReplicationConfig.getInstance(THREE)).setObjectID(objectId) - .build(); + HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE, + objectId); } protected String addKeyToTable(OmKeyInfo keyInfo) throws Exception { diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMDirectoryCreateResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMDirectoryCreateResponse.java index 7d6487493861..811e13ac173e 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMDirectoryCreateResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMDirectoryCreateResponse.java @@ -20,8 +20,6 @@ import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; - -import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.utils.db.Table; @@ -84,7 +82,7 @@ public void testAddToDBBatch() throws Exception { OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, OzoneFSUtils.addTrailingSlashIfNeeded(keyName), - RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE)).build(); + HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE); ThreadLocalRandom random = ThreadLocalRandom.current(); long usedNamespace = Math.abs(random.nextLong(Long.MAX_VALUE)); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMFileCreateResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMFileCreateResponseWithFSO.java index c639c77c08e3..c7e2c265b7bb 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMFileCreateResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMFileCreateResponseWithFSO.java @@ -26,6 +26,7 @@ import org.apache.hadoop.ozone.om.response.key.OMKeyCreateResponse; import org.apache.hadoop.ozone.om.response.key.TestOMKeyCreateResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; +import org.apache.hadoop.util.Time; import jakarta.annotation.Nonnull; import java.io.IOException; @@ -40,11 +41,11 @@ public class TestOMFileCreateResponseWithFSO extends TestOMKeyCreateResponse { @Override protected OmKeyInfo getOmKeyInfo() { assertNotNull(omBucketInfo); - return OMRequestTestUtils.createOmKeyInfo(volumeName, omBucketInfo.getBucketName(), keyName, replicationConfig) - .setObjectID(omBucketInfo.getObjectID() + 1) - .setParentObjectID(omBucketInfo.getObjectID()) - .setUpdateID(100L) - .build(); + return OMRequestTestUtils.createOmKeyInfo(volumeName, + omBucketInfo.getBucketName(), keyName, replicationType, + replicationFactor, + omBucketInfo.getObjectID() + 1, + omBucketInfo.getObjectID(), 100, Time.now()); } @Nonnull diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponse.java index 88ef2964d17e..e5a6b0ab14f5 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponse.java @@ -92,7 +92,7 @@ public void testAddToDBBatchWithErrorResponse() throws Exception { protected OmKeyInfo createOmKeyInfo() throws Exception { return OMRequestTestUtils.createOmKeyInfo(volumeName, - bucketName, keyName, replicationConfig).build(); + bucketName, keyName, replicationType, replicationFactor); } protected String getOpenKey() throws Exception { diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponseWithFSO.java index b574b8548132..85e9354ca8c9 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponseWithFSO.java @@ -18,19 +18,18 @@ package org.apache.hadoop.ozone.om.response.key; -import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; +import org.apache.hadoop.util.Time; import jakarta.annotation.Nonnull; import java.io.IOException; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; - /** * Tests OMAllocateBlockResponse - prefix layout. */ @@ -50,11 +49,12 @@ protected OmKeyInfo createOmKeyInfo() throws Exception { long txnId = 50; long objectId = parentID + 1; - return OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, RatisReplicationConfig.getInstance(ONE)) - .setObjectID(objectId) - .setParentObjectID(parentID) - .setUpdateID(txnId) - .build(); + OmKeyInfo omKeyInfoFSO = + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, + HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE, objectId, parentID, txnId, + Time.now()); + return omKeyInfoFSO; } @Override diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponse.java index 89b179391cee..bb95c43107c3 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponse.java @@ -81,7 +81,7 @@ public void testAddToDBBatch() throws Exception { public void testAddToDBBatchNoOp() throws Exception { OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, - bucketName, keyName, replicationConfig).build(); + bucketName, keyName, replicationType, replicationFactor); OzoneManagerProtocolProtos.OMResponse omResponse = OzoneManagerProtocolProtos.OMResponse.newBuilder().setCommitKeyResponse( @@ -135,7 +135,7 @@ public void testAddToDBBatchOnOverwrite() throws Exception { @Nonnull protected void addKeyToOpenKeyTable() throws Exception { OMRequestTestUtils.addKeyToTable(true, volumeName, bucketName, keyName, - clientID, replicationConfig, omMetadataManager); + clientID, replicationType, replicationFactor, omMetadataManager); } @Nonnull diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponseWithFSO.java index 32d55d3e961c..a1173e554325 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponseWithFSO.java @@ -18,19 +18,17 @@ package org.apache.hadoop.ozone.om.response.key; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.junit.jupiter.api.Assertions.assertNotNull; - import java.util.HashMap; import java.util.Map; - -import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.util.Time; import jakarta.annotation.Nonnull; import java.io.IOException; @@ -64,11 +62,11 @@ protected OMKeyCommitResponse getOmKeyCommitResponse(OmKeyInfo omKeyInfo, @Override protected OmKeyInfo getOmKeyInfo() { assertNotNull(omBucketInfo); - return OMRequestTestUtils.createOmKeyInfo(volumeName, omBucketInfo.getBucketName(), keyName, replicationConfig) - .setObjectID(omBucketInfo.getObjectID() + 1) - .setParentObjectID(omBucketInfo.getObjectID()) - .setUpdateID(100L) - .build(); + return OMRequestTestUtils.createOmKeyInfo(volumeName, + omBucketInfo.getBucketName(), keyName, replicationType, + replicationFactor, + omBucketInfo.getObjectID() + 1, + omBucketInfo.getObjectID(), 100, Time.now()); } @Nonnull @@ -79,11 +77,11 @@ protected void addKeyToOpenKeyTable() throws Exception { long objectId = parentID + 10; OmKeyInfo omKeyInfoFSO = - OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, RatisReplicationConfig.getInstance(ONE)) - .setObjectID(objectId) - .setParentObjectID(parentID) - .setUpdateID(100L) - .build(); + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, + HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE, objectId, parentID, 100, + Time.now()); + String fileName = OzoneFSUtils.getFileName(keyName); OMRequestTestUtils.addFileToKeyTable(true, false, fileName, omKeyInfoFSO, clientID, txnLogId, omMetadataManager); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponseWithFSO.java index 53d86e667367..ee83f3671277 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponseWithFSO.java @@ -18,15 +18,13 @@ package org.apache.hadoop.ozone.om.response.key; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.junit.jupiter.api.Assertions.assertNotNull; - -import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; +import org.apache.hadoop.util.Time; import jakarta.annotation.Nonnull; import java.io.IOException; @@ -52,12 +50,11 @@ protected String getOpenKeyName() throws IOException { @Override protected OmKeyInfo getOmKeyInfo() { assertNotNull(omBucketInfo); - return OMRequestTestUtils.createOmKeyInfo(volumeName, omBucketInfo.getBucketName(), keyName, - RatisReplicationConfig.getInstance(ONE)) - .setObjectID(omBucketInfo.getObjectID() + 1) - .setParentObjectID(omBucketInfo.getObjectID()) - .setUpdateID(100L) - .build(); + return OMRequestTestUtils.createOmKeyInfo(volumeName, + omBucketInfo.getBucketName(), keyName, replicationType, + replicationFactor, + omBucketInfo.getObjectID() + 1, + omBucketInfo.getObjectID(), 100, Time.now()); } @Nonnull diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponse.java index 6440edd0327c..4690b6f56f72 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponse.java @@ -22,6 +22,7 @@ import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertTrue; import org.apache.hadoop.hdds.client.BlockID; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; import org.apache.hadoop.hdds.utils.db.Table; @@ -88,7 +89,8 @@ public void testAddToDBBatchWithNonEmptyBlocks() throws Exception { Pipeline pipeline = Pipeline.newBuilder() .setState(Pipeline.PipelineState.OPEN) .setId(PipelineID.randomId()) - .setReplicationConfig(replicationConfig) + .setReplicationConfig(RatisReplicationConfig + .getInstance(replicationFactor)) .setNodes(new ArrayList<>()) .build(); @@ -165,7 +167,7 @@ protected String addKeyToTable() throws Exception { keyName); OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, keyName, - clientID, replicationConfig, omMetadataManager); + clientID, replicationType, replicationFactor, omMetadataManager); return ozoneKey; } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponseWithFSO.java index 557839f44f7a..fda72eb91243 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponseWithFSO.java @@ -18,14 +18,13 @@ package org.apache.hadoop.ozone.om.response.key; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.junit.jupiter.api.Assertions.assertNotNull; - -import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.util.Time; /** * Tests OMKeyDeleteResponse - prefix layout. @@ -51,11 +50,11 @@ protected String addKeyToTable() throws Exception { bucketName, "", omMetadataManager); OmKeyInfo omKeyInfo = - OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, RatisReplicationConfig.getInstance(ONE)) - .setObjectID(parentId + 1) - .setParentObjectID(parentId) - .setUpdateID(100L) - .build(); + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, + HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE, + parentId + 1, + parentId, 100, Time.now()); OMRequestTestUtils.addFileToKeyTable(false, false, keyName, omKeyInfo, -1, 50, omMetadataManager); return omMetadataManager.getOzonePathKey( @@ -67,12 +66,11 @@ protected String addKeyToTable() throws Exception { @Override protected OmKeyInfo getOmKeyInfo() { assertNotNull(getOmBucketInfo()); - return OMRequestTestUtils.createOmKeyInfo(volumeName, omBucketInfo.getBucketName(), keyName, - replicationConfig) - .setObjectID(getOmBucketInfo().getObjectID() + 1) - .setParentObjectID(getOmBucketInfo().getObjectID()) - .setUpdateID(100L) - .build(); + return OMRequestTestUtils.createOmKeyInfo(volumeName, + getOmBucketInfo().getBucketName(), keyName, replicationType, + replicationFactor, + getOmBucketInfo().getObjectID() + 1, + getOmBucketInfo().getObjectID(), 100, Time.now()); } @Override diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyRenameResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyRenameResponse.java index 07c094cc98a1..2dcef56330f2 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyRenameResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyRenameResponse.java @@ -154,10 +154,12 @@ public void testAddToDBBatchWithErrorResponse() throws Exception { } protected OmKeyInfo getOmKeyInfo(String keyName) { - return OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, replicationConfig).build(); + return OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, + replicationType, replicationFactor, 0L); } - protected OmKeyInfo getOmKeyInfo(OmKeyInfo toKeyInfo, String keyName) { + protected OmKeyInfo getOmKeyInfo(OmKeyInfo toKeyInfo, + String keyName) { return getOmKeyInfo(keyName); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyRenameResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyRenameResponseWithFSO.java index edbb50d66f86..f2f9ccaf872e 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyRenameResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyRenameResponseWithFSO.java @@ -18,17 +18,17 @@ package org.apache.hadoop.ozone.om.response.key; -import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.response.TestOMResponseUtils; +import org.apache.hadoop.util.Time; import java.io.IOException; import java.util.UUID; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; /** @@ -38,21 +38,19 @@ public class TestOMKeyRenameResponseWithFSO extends TestOMKeyRenameResponse { @Override protected OmKeyInfo getOmKeyInfo(String keyName) { long bucketId = random.nextLong(); - return OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, RatisReplicationConfig.getInstance(ONE)) - .setObjectID(bucketId + 100) - .setParentObjectID(bucketId + 101) - .build(); + return OMRequestTestUtils.createOmKeyInfo( + volumeName, bucketName, keyName, + HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, + bucketId + 100L, bucketId + 101L, 0L, Time.now()); } @Override protected OmKeyInfo getOmKeyInfo(OmKeyInfo toKeyInfo, String keyName) { - return OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, RatisReplicationConfig.getInstance(ONE)) - .setObjectID(toKeyInfo.getObjectID()) - .setParentObjectID(toKeyInfo.getParentObjectID()) - .setUpdateID(0L) - .setCreationTime(toKeyInfo.getCreationTime()) - .build(); + return OMRequestTestUtils.createOmKeyInfo(toKeyInfo.getVolumeName(), + toKeyInfo.getBucketName(), keyName, replicationType, + replicationFactor, toKeyInfo.getObjectID(), + toKeyInfo.getParentObjectID(), 0L, toKeyInfo.getCreationTime()); } @Override @@ -82,12 +80,12 @@ protected void createParent() { long bucketId = random.nextLong(); String fromKeyParentName = UUID.randomUUID().toString(); String toKeyParentName = UUID.randomUUID().toString(); - fromKeyParent = OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, fromKeyParentName, replicationConfig) - .setObjectID(bucketId + 100L) - .build(); - toKeyParent = OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, toKeyParentName, replicationConfig) - .setObjectID(bucketId + 101L) - .build(); + fromKeyParent = OMRequestTestUtils.createOmKeyInfo(volumeName, + bucketName, fromKeyParentName, replicationType, replicationFactor, + bucketId + 100L); + toKeyParent = OMRequestTestUtils.createOmKeyInfo(volumeName, + bucketName, toKeyParentName, replicationType, replicationFactor, + bucketId + 101L); fromKeyParent.setParentObjectID(bucketId); toKeyParent.setParentObjectID(bucketId); fromKeyParent.setFileName(OzoneFSUtils.getFileName( diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyResponse.java index bc4c34bd0db3..1cbf5c6d0b2d 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyResponse.java @@ -24,7 +24,6 @@ import java.util.Random; import java.util.UUID; -import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.protocol.StorageType; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; @@ -60,7 +59,8 @@ public class TestOMKeyResponse { protected String volumeName; protected String bucketName; protected String keyName; - protected ReplicationConfig replicationConfig; + protected HddsProtos.ReplicationFactor replicationFactor; + protected HddsProtos.ReplicationType replicationType; protected OmBucketInfo omBucketInfo; protected long clientID; protected Random random; @@ -78,18 +78,18 @@ public void setup() throws Exception { volumeName = UUID.randomUUID().toString(); bucketName = UUID.randomUUID().toString(); keyName = UUID.randomUUID().toString(); - replicationConfig = ReplicationConfig.fromProtoTypeAndFactor( - HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE); + replicationFactor = HddsProtos.ReplicationFactor.ONE; + replicationType = HddsProtos.ReplicationType.RATIS; clientID = 1000L; random = new Random(); keysToDelete = null; final OmVolumeArgs volumeArgs = OmVolumeArgs.newBuilder() - .setVolume(volumeName) - .setAdminName("admin") - .setOwnerName("owner") - .setObjectID(System.currentTimeMillis()) - .build(); + .setVolume(volumeName) + .setAdminName("admin") + .setOwnerName("owner") + .setObjectID(System.currentTimeMillis()) + .build(); omMetadataManager.getVolumeTable().addCacheEntry( new CacheKey<>(omMetadataManager.getVolumeKey(volumeName)), @@ -117,7 +117,8 @@ protected String getOpenKeyName() throws IOException { @Nonnull protected OmKeyInfo getOmKeyInfo() { - return OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, replicationConfig).build(); + return OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, + replicationType, replicationFactor); } @Nonnull diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysDeleteResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysDeleteResponse.java index 7a14e15a19bd..0c9c725c1b86 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysDeleteResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysDeleteResponse.java @@ -18,7 +18,6 @@ package org.apache.hadoop.ozone.om.response.key; -import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; @@ -32,6 +31,7 @@ import java.util.List; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType.RATIS; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.KEY_NOT_FOUND; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.OK; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type.DeleteKeys; @@ -63,7 +63,7 @@ protected void createPreRequisities() throws Exception { for (int i = 0; i < 10; i++) { keyName = parent.concat(key + i); OMRequestTestUtils.addKeyToTable(false, volumeName, - bucketName, keyName, 0L, RatisReplicationConfig.getInstance(THREE), omMetadataManager); + bucketName, keyName, 0L, RATIS, THREE, omMetadataManager); ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName, keyName); omKeyInfoList .add(omMetadataManager.getKeyTable(getBucketLayout()).get(ozoneKey)); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysDeleteResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysDeleteResponseWithFSO.java index 6a3a709c341c..fd70308c43d1 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysDeleteResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysDeleteResponseWithFSO.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.om.response.key; -import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.om.helpers.BucketLayout; @@ -33,13 +33,13 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteKeysResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; +import org.apache.hadoop.util.Time; import org.junit.jupiter.api.Test; import java.io.IOException; import java.util.ArrayList; import java.util.List; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.OK; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type.DeleteKeys; import static org.junit.jupiter.api.Assertions.assertNotNull; @@ -93,11 +93,10 @@ protected void createPreRequisities() throws Exception { keyName = keyPrefix + i; OmKeyInfo omKeyInfo = - OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, RatisReplicationConfig.getInstance(ONE)) - .setObjectID(dirId + 1) - .setParentObjectID(buckId) - .setUpdateID(dirId + 1) - .build(); + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, + HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE, dirId + 1, buckId, + dirId + 1, Time.now()); ozoneDBKey = OMRequestTestUtils.addFileToKeyTable(false, false, keyName, omKeyInfo, -1, 50, omMetadataManager); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysRenameResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysRenameResponse.java index 72a76a1aca4f..0824f7c33de7 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysRenameResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysRenameResponse.java @@ -18,7 +18,6 @@ package org.apache.hadoop.ozone.om.response.key; -import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.ozone.om.helpers.OmRenameKeys; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; @@ -33,6 +32,7 @@ import java.util.Map; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType.RATIS; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -117,8 +117,7 @@ private void createPreRequisities() throws Exception { String key = parentDir.concat("/key" + i); String toKey = parentDir.concat("/newKey" + i); OMRequestTestUtils.addKeyToTable(false, volumeName, - bucketName, parentDir.concat("/key" + i), 0L, - RatisReplicationConfig.getInstance(THREE), + bucketName, parentDir.concat("/key" + i), 0L, RATIS, THREE, omMetadataManager); OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable(getBucketLayout()) diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMOpenKeysDeleteResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMOpenKeysDeleteResponse.java index c9a4109809ed..f4f0e729f05d 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMOpenKeysDeleteResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMOpenKeysDeleteResponse.java @@ -208,7 +208,7 @@ private Map addOpenKeysToDB(String volume, int numKeys, long parentID = random.nextLong(); OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volume, - bucket, key, replicationConfig).build(); + bucket, key, replicationType, replicationFactor); if (keyLength > 0) { OMRequestTestUtils.addKeyLocationInfo(omKeyInfo, 0, keyLength); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3ExpiredMultipartUploadsAbortResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3ExpiredMultipartUploadsAbortResponse.java index 35600c331f3f..b356dddd6b57 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3ExpiredMultipartUploadsAbortResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3ExpiredMultipartUploadsAbortResponse.java @@ -19,19 +19,15 @@ package org.apache.hadoop.ozone.om.response.s3.multipart; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertTrue; - -import org.apache.hadoop.hdds.client.RatisReplicationConfig; -import org.apache.hadoop.hdds.client.ReplicationConfig; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; import org.apache.hadoop.hdds.utils.UniqueId; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.om.helpers.OmMultipartAbortInfo; import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; @@ -281,10 +277,10 @@ private Map> addMPUsToDB( OmBucketInfo omBucketInfo = OMRequestTestUtils.addBucketToDB(volume, bucket, omMetadataManager, getBucketLayout()); - ReplicationConfig replicationConfig = RatisReplicationConfig.getInstance(ONE); - final OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volume, bucket, keyName, replicationConfig, - new OmKeyLocationInfoGroup(0L, new ArrayList<>(), true)) - .build(); + final OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volume, + bucket, keyName, + HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, + 0L, Time.now(), true); if (getBucketLayout().equals(BucketLayout.FILE_SYSTEM_OPTIMIZED)) { omKeyInfo.setParentObjectID(omBucketInfo.getObjectID()); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadCompleteResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadCompleteResponseWithFSO.java index e7a570350cff..47aa641c1ebb 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadCompleteResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadCompleteResponseWithFSO.java @@ -18,17 +18,14 @@ package org.apache.hadoop.ozone.om.response.s3.multipart; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; - -import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import org.apache.hadoop.ozone.om.helpers.BucketLayout; @@ -88,16 +85,14 @@ public void testAddDBToBatch() throws Exception { omMetadataManager.getStore().commitBatchOperation(batchOperation); String dbOpenKey = omMetadataManager.getOpenFileName(volumeId, bucketId, - parentID, fileName, clientId); + parentID, fileName, clientId); String dbKey = omMetadataManager.getOzonePathKey(volumeId, bucketId, - parentID, fileName); + parentID, fileName); OmKeyInfo omKeyInfoFSO = - OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, - RatisReplicationConfig.getInstance(ONE), new OmKeyLocationInfoGroup(0L, new ArrayList<>(), true)) - .setObjectID(objectId) - .setParentObjectID(parentID) - .setUpdateID(txnId) - .build(); + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, + HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE, objectId, parentID, txnId, + Time.now(), true); // add key to openFileTable omKeyInfoFSO.setKeyName(fileName); @@ -180,11 +175,9 @@ public void testAddDBToBatchWithNullBucketInfo() throws Exception { parentID, fileName); OmKeyInfo omKeyInfoFSO = OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, - RatisReplicationConfig.getInstance(ONE), new OmKeyLocationInfoGroup(0L, new ArrayList<>(), true)) - .setObjectID(objectId) - .setParentObjectID(parentID) - .setUpdateID(txnId) - .build(); + HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE, objectId, parentID, txnId, + Time.now(), true); // add key to openFileTable omKeyInfoFSO.setKeyName(fileName); @@ -251,20 +244,20 @@ public void testAddDBToBatchWithPartsWithKeyInDeleteTable() throws Exception { String keyName = getKeyName(); OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, - omMetadataManager); + omMetadataManager); createParentPath(volumeName, bucketName); // Put an entry to delete table with the same key prior to multipart commit - OmKeyInfo prevKey = OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, - RatisReplicationConfig.getInstance(ONE), new OmKeyLocationInfoGroup(0L, new ArrayList<>(), true)) - .setObjectID(parentID + 8) - .setParentObjectID(parentID) - .setUpdateID(8) - .build(); + OmKeyInfo prevKey = OMRequestTestUtils.createOmKeyInfo(volumeName, + bucketName, keyName, + HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE, + parentID + 8, + parentID, 8, Time.now(), true); RepeatedOmKeyInfo prevKeys = new RepeatedOmKeyInfo(prevKey); String ozoneKey = omMetadataManager - .getOzoneKey(prevKey.getVolumeName(), - prevKey.getBucketName(), prevKey.getFileName()); + .getOzoneKey(prevKey.getVolumeName(), + prevKey.getBucketName(), prevKey.getFileName()); omMetadataManager.getDeletedTable().put(ozoneKey, prevKeys); long oId = runAddDBToBatchWithParts(volumeName, bucketName, keyName, 1); @@ -319,12 +312,11 @@ private long runAddDBToBatchWithParts(String volumeName, omMetadataManager.getBucketTable().get(bucketKey); OmKeyInfo omKeyInfo = - OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, - RatisReplicationConfig.getInstance(ONE), new OmKeyLocationInfoGroup(0L, new ArrayList<>(), true)) - .setObjectID(parentID + 9) - .setParentObjectID(parentID) - .setUpdateID(100) - .build(); + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, + HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE, + parentID + 9, + parentID, 100, Time.now(), true); List unUsedParts = new ArrayList<>(); unUsedParts.add(omKeyInfo); S3MultipartUploadCompleteResponse s3MultipartUploadCompleteResponse = diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestDirectoryDeletingService.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestDirectoryDeletingService.java index 8dcb030d637a..c8a3faae4cca 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestDirectoryDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestDirectoryDeletingService.java @@ -23,10 +23,9 @@ import java.io.IOException; import java.nio.file.Path; import java.util.concurrent.TimeUnit; - -import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.server.ServerUtils; import org.apache.hadoop.hdds.utils.db.DBConfigFromFile; import org.apache.hadoop.ozone.om.KeyManager; @@ -48,7 +47,6 @@ import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_DIR_DELETING_SERVICE_INTERVAL; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -131,11 +129,10 @@ public void testDeleteDirectoryCrossingSizeLimit() throws Exception { for (int i = 0; i < 2000; ++i) { String keyName = "key" + longName + i; OmKeyInfo omKeyInfo = - OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, RatisReplicationConfig.getInstance(ONE)) - .setObjectID(dir1.getObjectID() + 1 + i) - .setParentObjectID(dir1.getObjectID()) - .setUpdateID(100L) - .build(); + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, + keyName, HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE, dir1.getObjectID() + 1 + i, + dir1.getObjectID(), 100, Time.now()); OMRequestTestUtils.addFileToKeyTable(false, true, keyName, omKeyInfo, 1234L, i + 1, om.getMetadataManager()); } @@ -146,7 +143,7 @@ public void testDeleteDirectoryCrossingSizeLimit() throws Exception { .setBucketName(bucketName) .setKeyName("dir" + longName) .setReplicationConfig(StandaloneReplicationConfig.getInstance( - ONE)) + HddsProtos.ReplicationFactor.ONE)) .setDataSize(0).setRecursive(true) .build(); writeClient.deleteKey(delArgs); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestQuotaRepairTask.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestQuotaRepairTask.java index 1a0db1183311..5ac7835f8ce6 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestQuotaRepairTask.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestQuotaRepairTask.java @@ -19,11 +19,8 @@ package org.apache.hadoop.ozone.om.service; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; import static org.junit.jupiter.api.Assertions.assertEquals; - -import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.om.helpers.BucketLayout; @@ -51,7 +48,8 @@ public void testQuotaRepair() throws Exception { String parentDir = "/user"; for (int i = 0; i < count; i++) { OMRequestTestUtils.addKeyToTableAndCache(volumeName, bucketName, - parentDir.concat("/key" + i), -1, RatisReplicationConfig.getInstance(THREE), 150 + i, omMetadataManager); + parentDir.concat("/key" + i), -1, HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.THREE, 150 + i, omMetadataManager); } String fsoBucketName = "fso" + bucketName; @@ -61,13 +59,12 @@ public void testQuotaRepair() throws Exception { fsoBucketName, "c/d/e", omMetadataManager); for (int i = 0; i < count; i++) { String fileName = "file1" + i; - OmKeyInfo omKeyInfo = - OMRequestTestUtils.createOmKeyInfo(volumeName, fsoBucketName, fileName, - RatisReplicationConfig.getInstance(ONE)) - .setObjectID(parentId + 1 + i) - .setParentObjectID(parentId) - .setUpdateID(100L + i) - .build(); + OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo( + volumeName, fsoBucketName, fileName, + HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE, + parentId + 1 + i, + parentId, 100 + i, Time.now()); omKeyInfo.setKeyName(fileName); OMRequestTestUtils.addFileToKeyTable(false, false, fileName, omKeyInfo, -1, 50 + i, omMetadataManager); diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java index e6892d9784db..f60ba6ddf42d 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java @@ -190,7 +190,7 @@ public BasicOzoneClientAdapterImpl(String omHost, int omPort, OzoneFSUtils.validateBucketLayout(bucket.getName(), resolvedBucketLayout); this.configuredDnPort = conf.getInt( - OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, + OzoneConfigKeys.OZONE_CONTAINER_IPC_PORT, OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT); this.config = conf; } diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java index 880427861205..0297a64277b0 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java @@ -198,7 +198,7 @@ public BasicRootedOzoneClientAdapterImpl(String omHost, int omPort, proxy = objectStore.getClientProxy(); this.configuredDnPort = conf.getInt( - OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, + OzoneConfigKeys.OZONE_CONTAINER_IPC_PORT, OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT); // Fetches the bucket layout to be used by OFS. diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java index baa9c522be10..84f55749a68f 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java @@ -58,7 +58,6 @@ import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_FILE_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_KEY_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_TABLE; -import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_DIR_TABLE; import static org.apache.hadoop.ozone.recon.ReconConstants.DEFAULT_FETCH_COUNT; import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_QUERY_LIMIT; import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_QUERY_PREVKEY; @@ -653,36 +652,6 @@ public Response getDeletedDirInfo( return Response.ok(deletedDirInsightInfo).build(); } - /** - * Retrieves the summary of deleted directories. - * - * This method calculates and returns a summary of deleted directories. - * @return The HTTP response body includes a map with the following entries: - * - "totalDeletedDirectories": the total number of deleted directories - * - * Example response: - * { - * "totalDeletedDirectories": 8, - * } - */ - @GET - @Path("/deletePending/dirs/summary") - public Response getDeletedDirectorySummary() { - Map dirSummary = new HashMap<>(); - // Create a keys summary for deleted directories - createSummaryForDeletedDirectories(dirSummary); - return Response.ok(dirSummary).build(); - } - - private void createSummaryForDeletedDirectories( - Map dirSummary) { - // Fetch the necessary metrics for deleted directories. - Long deletedDirCount = getValueFromId(globalStatsDao.findById( - OmTableInsightTask.getTableCountKeyFromTable(DELETED_DIR_TABLE))); - // Calculate the total number of deleted directories - dirSummary.put("totalDeletedDirectories", deletedDirCount); - } - private void updateReplicatedAndUnReplicatedTotal( KeyInsightInfoResponse deletedKeyAndDirInsightInfo, RepeatedOmKeyInfo repeatedOmKeyInfo) { diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/DeletedKeysInsightHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/DeletedKeysInsightHandler.java deleted file mode 100644 index 5a6d7a256e49..000000000000 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/DeletedKeysInsightHandler.java +++ /dev/null @@ -1,147 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.recon.tasks; - -import org.apache.commons.lang3.tuple.Pair; -import org.apache.commons.lang3.tuple.Triple; -import org.apache.hadoop.hdds.utils.db.Table; -import org.apache.hadoop.hdds.utils.db.TableIterator; -import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.HashMap; - -/** - * Manages records in the Deleted Table, updating counts and sizes of - * pending Key Deletions in the backend. - */ -public class DeletedKeysInsightHandler implements OmTableHandler { - - private static final Logger LOG = - LoggerFactory.getLogger(DeletedKeysInsightHandler.class); - - /** - * Invoked by the process method to add information on those keys that have - * been backlogged in the backend for deletion. - */ - @Override - public void handlePutEvent(OMDBUpdateEvent event, - String tableName, - HashMap objectCountMap, - HashMap unReplicatedSizeMap, - HashMap replicatedSizeMap) { - - String countKey = getTableCountKeyFromTable(tableName); - String unReplicatedSizeKey = getUnReplicatedSizeKeyFromTable(tableName); - String replicatedSizeKey = getReplicatedSizeKeyFromTable(tableName); - - if (event.getValue() != null) { - RepeatedOmKeyInfo repeatedOmKeyInfo = - (RepeatedOmKeyInfo) event.getValue(); - objectCountMap.computeIfPresent(countKey, - (k, count) -> count + repeatedOmKeyInfo.getOmKeyInfoList().size()); - Pair result = repeatedOmKeyInfo.getTotalSize(); - unReplicatedSizeMap.computeIfPresent(unReplicatedSizeKey, - (k, size) -> size + result.getLeft()); - replicatedSizeMap.computeIfPresent(replicatedSizeKey, - (k, size) -> size + result.getRight()); - } else { - LOG.warn("Put event does not have the Key Info for {}.", - event.getKey()); - } - - } - - /** - * Invoked by the process method to remove information on those keys that have - * been successfully deleted from the backend. - */ - @Override - public void handleDeleteEvent(OMDBUpdateEvent event, - String tableName, - HashMap objectCountMap, - HashMap unReplicatedSizeMap, - HashMap replicatedSizeMap) { - - String countKey = getTableCountKeyFromTable(tableName); - String unReplicatedSizeKey = getUnReplicatedSizeKeyFromTable(tableName); - String replicatedSizeKey = getReplicatedSizeKeyFromTable(tableName); - - if (event.getValue() != null) { - RepeatedOmKeyInfo repeatedOmKeyInfo = - (RepeatedOmKeyInfo) event.getValue(); - objectCountMap.computeIfPresent(countKey, (k, count) -> - count > 0 ? count - repeatedOmKeyInfo.getOmKeyInfoList().size() : 0L); - Pair result = repeatedOmKeyInfo.getTotalSize(); - unReplicatedSizeMap.computeIfPresent(unReplicatedSizeKey, - (k, size) -> size > result.getLeft() ? size - result.getLeft() : 0L); - replicatedSizeMap.computeIfPresent(replicatedSizeKey, - (k, size) -> size > result.getRight() ? size - result.getRight() : - 0L); - } else { - LOG.warn("Delete event does not have the Key Info for {}.", - event.getKey()); - } - } - - /** - * Invoked by the process method to update the statistics on the keys - * pending to be deleted. - */ - @Override - public void handleUpdateEvent(OMDBUpdateEvent event, - String tableName, - HashMap objectCountMap, - HashMap unReplicatedSizeMap, - HashMap replicatedSizeMap) { - // The size of deleted keys cannot change hence no-op. - return; - } - - /** - * Invoked by the reprocess method to calculate the records count of the - * deleted table and the sizes of replicated and unreplicated keys that are - * pending deletion in Ozone. - */ - @Override - public Triple getTableSizeAndCount( - TableIterator> iterator) - throws IOException { - long count = 0; - long unReplicatedSize = 0; - long replicatedSize = 0; - - if (iterator != null) { - while (iterator.hasNext()) { - Table.KeyValue kv = iterator.next(); - if (kv != null && kv.getValue() != null) { - RepeatedOmKeyInfo repeatedOmKeyInfo = (RepeatedOmKeyInfo) kv - .getValue(); - Pair result = repeatedOmKeyInfo.getTotalSize(); - unReplicatedSize += result.getRight(); - replicatedSize += result.getLeft(); - count += repeatedOmKeyInfo.getOmKeyInfoList().size(); - } - } - } - return Triple.of(count, unReplicatedSize, replicatedSize); - } -} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OmTableHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OmTableHandler.java deleted file mode 100644 index 5ae23b68a703..000000000000 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OmTableHandler.java +++ /dev/null @@ -1,131 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.recon.tasks; - -import org.apache.commons.lang3.tuple.Triple; -import org.apache.hadoop.hdds.utils.db.Table; -import org.apache.hadoop.hdds.utils.db.TableIterator; - -import java.io.IOException; -import java.util.HashMap; - -/** - * Interface for handling PUT, DELETE and UPDATE events for size-related - * tables for OM Insights. - */ -public interface OmTableHandler { - - /** - * Handles a PUT event for size-related tables by updating both the data - * sizes and their corresponding record counts in the tables. - * - * @param event The PUT event to be processed. - * @param tableName Table name associated with the event. - * @param objectCountMap A map storing object counts. - * @param unReplicatedSizeMap A map storing unReplicated size counts. - * @param replicatedSizeMap A map storing replicated size counts. - */ - void handlePutEvent(OMDBUpdateEvent event, - String tableName, - HashMap objectCountMap, - HashMap unReplicatedSizeMap, - HashMap replicatedSizeMap); - - - /** - * Handles a DELETE event for size-related tables by updating both the data - * sizes and their corresponding record counts in the tables. - * - * @param event The DELETE event to be processed. - * @param tableName Table name associated with the event. - * @param objectCountMap A map storing object counts. - * @param unReplicatedSizeMap A map storing unReplicated size counts. - * @param replicatedSizeMap A map storing replicated size counts. - */ - void handleDeleteEvent(OMDBUpdateEvent event, - String tableName, - HashMap objectCountMap, - HashMap unReplicatedSizeMap, - HashMap replicatedSizeMap); - - - /** - * Handles an UPDATE event for size-related tables by updating both the data - * sizes and their corresponding record counts in the tables. - * - * @param event The UPDATE event to be processed. - * @param tableName Table name associated with the event. - * @param objectCountMap A map storing object counts. - * @param unReplicatedSizeMap A map storing unReplicated size counts. - * @param replicatedSizeMap A map storing replicated size counts. - */ - void handleUpdateEvent(OMDBUpdateEvent event, - String tableName, - HashMap objectCountMap, - HashMap unReplicatedSizeMap, - HashMap replicatedSizeMap); - - - /** - * Returns a triple with the total count of records (left), total unreplicated - * size (middle), and total replicated size (right) in the given iterator. - * Increments count for each record and adds the dataSize if a record's value - * is an instance of OmKeyInfo,RepeatedOmKeyInfo. - * If the iterator is null, returns (0, 0, 0). - * - * @param iterator The iterator over the table to be iterated. - * @return A Triple with three Long values representing the count, - * unReplicated size and replicated size. - * @throws IOException If an I/O error occurs during the iterator traversal. - */ - Triple getTableSizeAndCount( - TableIterator> iterator) - throws IOException; - - - /** - * Returns the count key for the given table. - * - * @param tableName The name of the table. - * @return The count key for the table. - */ - default String getTableCountKeyFromTable(String tableName) { - return tableName + "Count"; - } - - /** - * Returns the replicated size key for the given table. - * - * @param tableName The name of the table. - * @return The replicated size key for the table. - */ - default String getReplicatedSizeKeyFromTable(String tableName) { - return tableName + "ReplicatedDataSize"; - } - - /** - * Returns the unreplicated size key for the given table. - * - * @param tableName The name of the table. - * @return The unreplicated size key for the table. - */ - default String getUnReplicatedSizeKeyFromTable(String tableName) { - return tableName + "UnReplicatedDataSize"; - } -} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OmTableInsightTask.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OmTableInsightTask.java index 3e84f311c942..c814d9d9e33f 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OmTableInsightTask.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OmTableInsightTask.java @@ -26,6 +26,8 @@ import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; import org.hadoop.ozone.recon.schema.tables.daos.GlobalStatsDao; import org.hadoop.ozone.recon.schema.tables.pojos.GlobalStats; @@ -35,20 +37,22 @@ import java.io.IOException; import java.sql.Timestamp; +import java.util.ArrayList; +import java.util.Collection; import java.util.HashMap; import java.util.Iterator; +import java.util.List; import java.util.Map; -import java.util.Collection; + + import java.util.Map.Entry; -import java.util.ArrayList; -import java.util.List; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_KEY_TABLE; -import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_FILE_TABLE; +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_TABLE; +import static org.jooq.impl.DSL.currentTimestamp; import static org.jooq.impl.DSL.select; import static org.jooq.impl.DSL.using; -import static org.jooq.impl.DSL.currentTimestamp; /** * Class to iterate over the OM DB and store the total counts of volumes, @@ -61,21 +65,14 @@ public class OmTableInsightTask implements ReconOmTask { private GlobalStatsDao globalStatsDao; private Configuration sqlConfiguration; private ReconOMMetadataManager reconOMMetadataManager; - private Map tableHandlers; @Inject public OmTableInsightTask(GlobalStatsDao globalStatsDao, - Configuration sqlConfiguration, - ReconOMMetadataManager reconOMMetadataManager) { + Configuration sqlConfiguration, + ReconOMMetadataManager reconOMMetadataManager) { this.globalStatsDao = globalStatsDao; this.sqlConfiguration = sqlConfiguration; this.reconOMMetadataManager = reconOMMetadataManager; - - // Initialize table handlers - tableHandlers = new HashMap<>(); - tableHandlers.put(OPEN_KEY_TABLE, new OpenKeysInsightHandler()); - tableHandlers.put(OPEN_FILE_TABLE, new OpenKeysInsightHandler()); - tableHandlers.put(DELETED_TABLE, new DeletedKeysInsightHandler()); } /** @@ -93,8 +90,8 @@ public OmTableInsightTask(GlobalStatsDao globalStatsDao, @Override public Pair reprocess(OMMetadataManager omMetadataManager) { HashMap objectCountMap = initializeCountMap(); - HashMap unReplicatedSizeMap = initializeSizeMap(false); - HashMap replicatedSizeMap = initializeSizeMap(true); + HashMap unReplicatedSizeCountMap = initializeSizeMap(false); + HashMap replicatedSizeCountMap = initializeSizeMap(true); for (String tableName : getTaskTables()) { Table table = omMetadataManager.getTable(tableName); @@ -103,16 +100,16 @@ public Pair reprocess(OMMetadataManager omMetadataManager) { return new ImmutablePair<>(getTaskName(), false); } - try (TableIterator> iterator - = table.iterator()) { - if (tableHandlers.containsKey(tableName)) { - Triple details = - tableHandlers.get(tableName).getTableSizeAndCount(iterator); + try ( + TableIterator> iterator + = table.iterator()) { + if (getTablesToCalculateSize().contains(tableName)) { + Triple details = getTableSizeAndCount(iterator); objectCountMap.put(getTableCountKeyFromTable(tableName), details.getLeft()); - unReplicatedSizeMap.put( + unReplicatedSizeCountMap.put( getUnReplicatedSizeKeyFromTable(tableName), details.getMiddle()); - replicatedSizeMap.put(getReplicatedSizeKeyFromTable(tableName), + replicatedSizeCountMap.put(getReplicatedSizeKeyFromTable(tableName), details.getRight()); } else { long count = Iterators.size(iterator); @@ -127,17 +124,72 @@ public Pair reprocess(OMMetadataManager omMetadataManager) { if (!objectCountMap.isEmpty()) { writeDataToDB(objectCountMap); } - if (!unReplicatedSizeMap.isEmpty()) { - writeDataToDB(unReplicatedSizeMap); + if (!unReplicatedSizeCountMap.isEmpty()) { + writeDataToDB(unReplicatedSizeCountMap); } - if (!replicatedSizeMap.isEmpty()) { - writeDataToDB(replicatedSizeMap); + if (!replicatedSizeCountMap.isEmpty()) { + writeDataToDB(replicatedSizeCountMap); } LOG.info("Completed a 'reprocess' run of OmTableInsightTask."); return new ImmutablePair<>(getTaskName(), true); } + /** + * Returns a triple with the total count of records (left), total unreplicated + * size (middle), and total replicated size (right) in the given iterator. + * Increments count for each record and adds the dataSize if a record's value + * is an instance of OmKeyInfo. If the iterator is null, returns (0, 0, 0). + * + * @param iterator The iterator over the table to be iterated. + * @return A Triple with three Long values representing the count, + * unreplicated size and replicated size. + * @throws IOException If an I/O error occurs during the iterator traversal. + */ + private Triple getTableSizeAndCount( + TableIterator> iterator) + throws IOException { + long count = 0; + long unReplicatedSize = 0; + long replicatedSize = 0; + + if (iterator != null) { + while (iterator.hasNext()) { + Table.KeyValue kv = iterator.next(); + if (kv != null && kv.getValue() != null) { + if (kv.getValue() instanceof OmKeyInfo) { + OmKeyInfo omKeyInfo = (OmKeyInfo) kv.getValue(); + unReplicatedSize += omKeyInfo.getDataSize(); + replicatedSize += omKeyInfo.getReplicatedSize(); + count++; + } + if (kv.getValue() instanceof RepeatedOmKeyInfo) { + RepeatedOmKeyInfo repeatedOmKeyInfo = (RepeatedOmKeyInfo) kv + .getValue(); + Pair result = repeatedOmKeyInfo.getTotalSize(); + unReplicatedSize += result.getRight(); + replicatedSize += result.getLeft(); + // Since we can have multiple deleted keys of same name + count += repeatedOmKeyInfo.getOmKeyInfoList().size(); + } + } + } + } + + return Triple.of(count, unReplicatedSize, replicatedSize); + } + + /** + * Returns a collection of table names that require data size calculation. + */ + public Collection getTablesToCalculateSize() { + List taskTables = new ArrayList<>(); + taskTables.add(OPEN_KEY_TABLE); + taskTables.add(OPEN_FILE_TABLE); + taskTables.add(DELETED_TABLE); + return taskTables; + } + @Override public String getTaskName() { return "OmTableInsightTask"; @@ -159,9 +211,10 @@ public Pair process(OMUpdateEventBatch events) { Iterator eventIterator = events.getIterator(); // Initialize maps to store count and size information HashMap objectCountMap = initializeCountMap(); - HashMap unReplicatedSizeMap = initializeSizeMap(false); - HashMap replicatedSizeMap = initializeSizeMap(true); + HashMap unreplicatedSizeCountMap = initializeSizeMap(false); + HashMap replicatedSizeCountMap = initializeSizeMap(true); final Collection taskTables = getTaskTables(); + final Collection sizeRelatedTables = getTablesToCalculateSize(); // Process each update event while (eventIterator.hasNext()) { @@ -170,21 +223,22 @@ public Pair process(OMUpdateEventBatch events) { if (!taskTables.contains(tableName)) { continue; } + try { switch (omdbUpdateEvent.getAction()) { case PUT: - handlePutEvent(omdbUpdateEvent, tableName, objectCountMap, - unReplicatedSizeMap, replicatedSizeMap); + handlePutEvent(omdbUpdateEvent, tableName, sizeRelatedTables, + objectCountMap, unreplicatedSizeCountMap, replicatedSizeCountMap); break; case DELETE: - handleDeleteEvent(omdbUpdateEvent, tableName, objectCountMap, - unReplicatedSizeMap, replicatedSizeMap); + handleDeleteEvent(omdbUpdateEvent, tableName, sizeRelatedTables, + objectCountMap, unreplicatedSizeCountMap, replicatedSizeCountMap); break; case UPDATE: - handleUpdateEvent(omdbUpdateEvent, tableName, objectCountMap, - unReplicatedSizeMap, replicatedSizeMap); + handleUpdateEvent(omdbUpdateEvent, tableName, sizeRelatedTables, + objectCountMap, unreplicatedSizeCountMap, replicatedSizeCountMap); break; default: @@ -202,11 +256,11 @@ public Pair process(OMUpdateEventBatch events) { if (!objectCountMap.isEmpty()) { writeDataToDB(objectCountMap); } - if (!unReplicatedSizeMap.isEmpty()) { - writeDataToDB(unReplicatedSizeMap); + if (!unreplicatedSizeCountMap.isEmpty()) { + writeDataToDB(unreplicatedSizeCountMap); } - if (!replicatedSizeMap.isEmpty()) { - writeDataToDB(replicatedSizeMap); + if (!replicatedSizeCountMap.isEmpty()) { + writeDataToDB(replicatedSizeCountMap); } LOG.info("Completed a 'process' run of OmTableInsightTask."); return new ImmutablePair<>(getTaskName(), true); @@ -214,34 +268,65 @@ public Pair process(OMUpdateEventBatch events) { private void handlePutEvent(OMDBUpdateEvent event, String tableName, + Collection sizeRelatedTables, HashMap objectCountMap, - HashMap unReplicatedSizeMap, - HashMap replicatedSizeMap) - throws IOException { - OmTableHandler tableHandler = tableHandlers.get(tableName); - if (event.getValue() != null) { - if (tableHandler != null) { - tableHandler.handlePutEvent(event, tableName, objectCountMap, - unReplicatedSizeMap, replicatedSizeMap); - } else { - String countKey = getTableCountKeyFromTable(tableName); - objectCountMap.computeIfPresent(countKey, (k, count) -> count + 1L); - } + HashMap unreplicatedSizeCountMap, + HashMap replicatedSizeCountMap) { + + if (sizeRelatedTables.contains(tableName)) { + handleSizeRelatedTablePutEvent(event, tableName, objectCountMap, + unreplicatedSizeCountMap, replicatedSizeCountMap); + } else { + String countKey = getTableCountKeyFromTable(tableName); + objectCountMap.computeIfPresent(countKey, (k, count) -> count + 1L); + } + } + + private void handleSizeRelatedTablePutEvent( + OMDBUpdateEvent event, + String tableName, + HashMap objectCountMap, + HashMap unreplicatedSizeCountMap, + HashMap replicatedSizeCountMap) { + + String countKey = getTableCountKeyFromTable(tableName); + String unReplicatedSizeKey = getUnReplicatedSizeKeyFromTable(tableName); + String replicatedSizeKey = getReplicatedSizeKeyFromTable(tableName); + + if (event.getValue() instanceof OmKeyInfo) { + // Handle PUT for OpenKeyTable & OpenFileTable + OmKeyInfo omKeyInfo = (OmKeyInfo) event.getValue(); + objectCountMap.computeIfPresent(countKey, (k, count) -> count + 1L); + unreplicatedSizeCountMap.computeIfPresent(unReplicatedSizeKey, + (k, size) -> size + omKeyInfo.getDataSize()); + replicatedSizeCountMap.computeIfPresent(replicatedSizeKey, + (k, size) -> size + omKeyInfo.getReplicatedSize()); + } else if (event.getValue() instanceof RepeatedOmKeyInfo) { + // Handle PUT for DeletedTable + RepeatedOmKeyInfo repeatedOmKeyInfo = + (RepeatedOmKeyInfo) event.getValue(); + objectCountMap.computeIfPresent(countKey, + (k, count) -> count + repeatedOmKeyInfo.getOmKeyInfoList().size()); + Pair result = repeatedOmKeyInfo.getTotalSize(); + unreplicatedSizeCountMap.computeIfPresent(unReplicatedSizeKey, + (k, size) -> size + result.getLeft()); + replicatedSizeCountMap.computeIfPresent(replicatedSizeKey, + (k, size) -> size + result.getRight()); } } private void handleDeleteEvent(OMDBUpdateEvent event, String tableName, + Collection sizeRelatedTables, HashMap objectCountMap, - HashMap unReplicatedSizeMap, - HashMap replicatedSizeMap) - throws IOException { - OmTableHandler tableHandler = tableHandlers.get(tableName); + HashMap unreplicatedSizeCountMap, + HashMap replicatedSizeCountMap) { + if (event.getValue() != null) { - if (tableHandler != null) { - tableHandler.handleDeleteEvent(event, tableName, objectCountMap, - unReplicatedSizeMap, replicatedSizeMap); + if (sizeRelatedTables.contains(tableName)) { + handleSizeRelatedTableDeleteEvent(event, tableName, objectCountMap, + unreplicatedSizeCountMap, replicatedSizeCountMap); } else { String countKey = getTableCountKeyFromTable(tableName); objectCountMap.computeIfPresent(countKey, @@ -250,28 +335,109 @@ private void handleDeleteEvent(OMDBUpdateEvent event, } } + private void handleSizeRelatedTableDeleteEvent( + OMDBUpdateEvent event, + String tableName, + HashMap objectCountMap, + HashMap unreplicatedSizeCountMap, + HashMap replicatedSizeCountMap) { + + String countKey = getTableCountKeyFromTable(tableName); + String unReplicatedSizeKey = getUnReplicatedSizeKeyFromTable(tableName); + String replicatedSizeKey = getReplicatedSizeKeyFromTable(tableName); + + if (event.getValue() instanceof OmKeyInfo) { + // Handle DELETE for OpenKeyTable & OpenFileTable + OmKeyInfo omKeyInfo = (OmKeyInfo) event.getValue(); + objectCountMap.computeIfPresent(countKey, + (k, count) -> count > 0 ? count - 1L : 0L); + unreplicatedSizeCountMap.computeIfPresent(unReplicatedSizeKey, + (k, size) -> size > omKeyInfo.getDataSize() ? + size - omKeyInfo.getDataSize() : 0L); + replicatedSizeCountMap.computeIfPresent(replicatedSizeKey, + (k, size) -> size > omKeyInfo.getReplicatedSize() ? + size - omKeyInfo.getReplicatedSize() : 0L); + } else if (event.getValue() instanceof RepeatedOmKeyInfo) { + // Handle DELETE for DeletedTable + RepeatedOmKeyInfo repeatedOmKeyInfo = + (RepeatedOmKeyInfo) event.getValue(); + objectCountMap.computeIfPresent(countKey, (k, count) -> count > 0 ? + count - repeatedOmKeyInfo.getOmKeyInfoList().size() : 0L); + Pair result = repeatedOmKeyInfo.getTotalSize(); + unreplicatedSizeCountMap.computeIfPresent(unReplicatedSizeKey, + (k, size) -> size > result.getLeft() ? size - result.getLeft() : 0L); + replicatedSizeCountMap.computeIfPresent(replicatedSizeKey, + (k, size) -> size > result.getRight() ? size - result.getRight() : + 0L); + } + } private void handleUpdateEvent(OMDBUpdateEvent event, String tableName, + Collection sizeRelatedTables, HashMap objectCountMap, - HashMap unReplicatedSizeMap, - HashMap replicatedSizeMap) { + HashMap unreplicatedSizeCountMap, + HashMap replicatedSizeCountMap) { - OmTableHandler tableHandler = tableHandlers.get(tableName); if (event.getValue() != null) { - if (tableHandler != null) { + if (sizeRelatedTables.contains(tableName)) { // Handle update for only size related tables - tableHandler.handleUpdateEvent(event, tableName, objectCountMap, - unReplicatedSizeMap, replicatedSizeMap); + handleSizeRelatedTableUpdateEvent(event, tableName, objectCountMap, + unreplicatedSizeCountMap, replicatedSizeCountMap); } } } - /** - * Write the updated count and size information to the database. - * - * @param dataMap Map containing the updated count and size information. - */ + + private void handleSizeRelatedTableUpdateEvent( + OMDBUpdateEvent event, + String tableName, + HashMap objectCountMap, + HashMap unreplicatedSizeCountMap, + HashMap replicatedSizeCountMap) { + + if (event.getOldValue() == null) { + LOG.warn("Update event does not have the old Key Info for {}.", + event.getKey()); + return; + } + String countKey = getTableCountKeyFromTable(tableName); + String unReplicatedSizeKey = getUnReplicatedSizeKeyFromTable(tableName); + String replicatedSizeKey = getReplicatedSizeKeyFromTable(tableName); + + // In Update event the count for the open table will not change. So we don't + // need to update the count. Except for RepeatedOmKeyInfo, for which the + // size of omKeyInfoList can change + if (event.getValue() instanceof OmKeyInfo) { + // Handle UPDATE for OpenKeyTable & OpenFileTable + OmKeyInfo oldKeyInfo = (OmKeyInfo) event.getOldValue(); + OmKeyInfo newKeyInfo = (OmKeyInfo) event.getValue(); + unreplicatedSizeCountMap.computeIfPresent(unReplicatedSizeKey, + (k, size) -> size - oldKeyInfo.getDataSize() + + newKeyInfo.getDataSize()); + replicatedSizeCountMap.computeIfPresent(replicatedSizeKey, + (k, size) -> size - oldKeyInfo.getReplicatedSize() + + newKeyInfo.getReplicatedSize()); + } else if (event.getValue() instanceof RepeatedOmKeyInfo) { + // Handle UPDATE for DeletedTable + RepeatedOmKeyInfo oldRepeatedOmKeyInfo = + (RepeatedOmKeyInfo) event.getOldValue(); + RepeatedOmKeyInfo newRepeatedOmKeyInfo = + (RepeatedOmKeyInfo) event.getValue(); + objectCountMap.computeIfPresent(countKey, + (k, count) -> count > 0 ? + count - oldRepeatedOmKeyInfo.getOmKeyInfoList().size() + + newRepeatedOmKeyInfo.getOmKeyInfoList().size() : 0L); + Pair oldSize = oldRepeatedOmKeyInfo.getTotalSize(); + Pair newSize = newRepeatedOmKeyInfo.getTotalSize(); + unreplicatedSizeCountMap.computeIfPresent(unReplicatedSizeKey, + (k, size) -> size - oldSize.getLeft() + newSize.getLeft()); + replicatedSizeCountMap.computeIfPresent(replicatedSizeKey, + (k, size) -> size - oldSize.getRight() + newSize.getRight()); + } + } + + private void writeDataToDB(Map dataMap) { List insertGlobalStats = new ArrayList<>(); List updateGlobalStats = new ArrayList<>(); @@ -295,11 +461,6 @@ private void writeDataToDB(Map dataMap) { globalStatsDao.update(updateGlobalStats); } - /** - * Initializes and returns a count map with the counts for the tables. - * - * @return The count map containing the counts for each table. - */ private HashMap initializeCountMap() { Collection tables = getTaskTables(); HashMap objectCountMap = new HashMap<>(tables.size()); @@ -317,13 +478,11 @@ private HashMap initializeCountMap() { * @return The size map containing the size counts for each table. */ private HashMap initializeSizeMap(boolean replicated) { - HashMap sizeCountMap = new HashMap<>(); - for (Map.Entry entry : tableHandlers.entrySet()) { - String tableName = entry.getKey(); - OmTableHandler tableHandler = entry.getValue(); - String key = - replicated ? tableHandler.getReplicatedSizeKeyFromTable(tableName) : - tableHandler.getUnReplicatedSizeKeyFromTable(tableName); + Collection tables = getTablesToCalculateSize(); + HashMap sizeCountMap = new HashMap<>(tables.size()); + for (String tableName : tables) { + String key = replicated ? getReplicatedSizeKeyFromTable(tableName) : + getUnReplicatedSizeKeyFromTable(tableName); sizeCountMap.put(key, getValueForKey(key)); } return sizeCountMap; diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OpenKeysInsightHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OpenKeysInsightHandler.java deleted file mode 100644 index 7a27d29d8f28..000000000000 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OpenKeysInsightHandler.java +++ /dev/null @@ -1,163 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.recon.tasks; - -import org.apache.commons.lang3.tuple.Triple; -import org.apache.hadoop.hdds.utils.db.Table; -import org.apache.hadoop.hdds.utils.db.TableIterator; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.HashMap; - -/** - * Manages records in the OpenKey Table, updating counts and sizes of - * open keys in the backend. - */ -public class OpenKeysInsightHandler implements OmTableHandler { - - private static final Logger LOG = - LoggerFactory.getLogger(OpenKeysInsightHandler.class); - - /** - * Invoked by the process method to add information on those keys that have - * been open in the backend. - */ - @Override - public void handlePutEvent(OMDBUpdateEvent event, - String tableName, - HashMap objectCountMap, - HashMap unReplicatedSizeMap, - HashMap replicatedSizeMap) { - - String countKey = getTableCountKeyFromTable(tableName); - String unReplicatedSizeKey = getUnReplicatedSizeKeyFromTable(tableName); - String replicatedSizeKey = getReplicatedSizeKeyFromTable(tableName); - - if (event.getValue() != null) { - OmKeyInfo omKeyInfo = (OmKeyInfo) event.getValue(); - objectCountMap.computeIfPresent(countKey, (k, count) -> count + 1L); - unReplicatedSizeMap.computeIfPresent(unReplicatedSizeKey, - (k, size) -> size + omKeyInfo.getDataSize()); - replicatedSizeMap.computeIfPresent(replicatedSizeKey, - (k, size) -> size + omKeyInfo.getReplicatedSize()); - } else { - LOG.warn("Put event does not have the Key Info for {}.", - event.getKey()); - } - } - - /** - * Invoked by the process method to delete information on those keys that are - * no longer closed in the backend. - */ - @Override - public void handleDeleteEvent(OMDBUpdateEvent event, - String tableName, - HashMap objectCountMap, - HashMap unReplicatedSizeMap, - HashMap replicatedSizeMap) { - - String countKey = getTableCountKeyFromTable(tableName); - String unReplicatedSizeKey = getUnReplicatedSizeKeyFromTable(tableName); - String replicatedSizeKey = getReplicatedSizeKeyFromTable(tableName); - - if (event.getValue() != null) { - OmKeyInfo omKeyInfo = (OmKeyInfo) event.getValue(); - objectCountMap.computeIfPresent(countKey, - (k, count) -> count > 0 ? count - 1L : 0L); - unReplicatedSizeMap.computeIfPresent(unReplicatedSizeKey, - (k, size) -> size > omKeyInfo.getDataSize() ? - size - omKeyInfo.getDataSize() : 0L); - replicatedSizeMap.computeIfPresent(replicatedSizeKey, - (k, size) -> size > omKeyInfo.getReplicatedSize() ? - size - omKeyInfo.getReplicatedSize() : 0L); - } else { - LOG.warn("Delete event does not have the Key Info for {}.", - event.getKey()); - } - } - - /** - * Invoked by the process method to update information on those open keys that - * have been updated in the backend. - */ - @Override - public void handleUpdateEvent(OMDBUpdateEvent event, - String tableName, - HashMap objectCountMap, - HashMap unReplicatedSizeMap, - HashMap replicatedSizeMap) { - - if (event.getValue() != null) { - if (event.getOldValue() == null) { - LOG.warn("Update event does not have the old Key Info for {}.", - event.getKey()); - return; - } - String unReplicatedSizeKey = getUnReplicatedSizeKeyFromTable(tableName); - String replicatedSizeKey = getReplicatedSizeKeyFromTable(tableName); - - // In Update event the count for the open table will not change. So we - // don't need to update the count. - OmKeyInfo oldKeyInfo = (OmKeyInfo) event.getOldValue(); - OmKeyInfo newKeyInfo = (OmKeyInfo) event.getValue(); - unReplicatedSizeMap.computeIfPresent(unReplicatedSizeKey, - (k, size) -> size - oldKeyInfo.getDataSize() + - newKeyInfo.getDataSize()); - replicatedSizeMap.computeIfPresent(replicatedSizeKey, - (k, size) -> size - oldKeyInfo.getReplicatedSize() + - newKeyInfo.getReplicatedSize()); - } else { - LOG.warn("Update event does not have the Key Info for {}.", - event.getKey()); - } - } - - /** - * This method is called by the reprocess method. It calculates the record - * counts for both the open key table and the open file table. Additionally, - * it computes the sizes of both replicated and unreplicated keys - * that are currently open in the backend. - */ - @Override - public Triple getTableSizeAndCount( - TableIterator> iterator) - throws IOException { - long count = 0; - long unReplicatedSize = 0; - long replicatedSize = 0; - - if (iterator != null) { - while (iterator.hasNext()) { - Table.KeyValue kv = iterator.next(); - if (kv != null && kv.getValue() != null) { - OmKeyInfo omKeyInfo = (OmKeyInfo) kv.getValue(); - unReplicatedSize += omKeyInfo.getDataSize(); - replicatedSize += omKeyInfo.getReplicatedSize(); - count++; - } - } - } - return Triple.of(count, unReplicatedSize, replicatedSize); - } - -} diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/OMMetadataManagerTestUtils.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/OMMetadataManagerTestUtils.java index b1aecc9a4f4e..42d69e030f31 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/OMMetadataManagerTestUtils.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/OMMetadataManagerTestUtils.java @@ -397,31 +397,23 @@ public static void writeDirToOm(OMMetadataManager omMetadataManager, .build()); } - @SuppressWarnings("parameternumber") public static void writeDeletedDirToOm(OMMetadataManager omMetadataManager, String bucketName, String volumeName, String dirName, long parentObjectId, long bucketObjectId, - long volumeObjectId, - long objectId) + long volumeObjectId) throws IOException { - // DB key in DeletedDirectoryTable => - // "volumeID/bucketID/parentId/dirName/dirObjectId" - - String ozoneDbKey = omMetadataManager.getOzonePathKey(volumeObjectId, - bucketObjectId, parentObjectId, dirName); - String ozoneDeleteKey = omMetadataManager.getOzoneDeletePathKey( - objectId, ozoneDbKey); - + // DB key in DeletedDirectoryTable => "volumeID/bucketID/parentId/dirName" + String omKey = omMetadataManager.getOzonePathKey(volumeObjectId, + bucketObjectId, parentObjectId, dirName); - omMetadataManager.getDeletedDirTable().put(ozoneDeleteKey, + omMetadataManager.getDeletedDirTable().put(omKey, new OmKeyInfo.Builder() .setBucketName(bucketName) .setVolumeName(volumeName) .setKeyName(dirName) - .setObjectID(objectId) .setReplicationConfig(StandaloneReplicationConfig.getInstance(ONE)) .build()); } diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java index 42aabef0cf15..05d9927d6c93 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java @@ -288,9 +288,8 @@ private void initializeInjector() throws Exception { utilizationSchemaDefinition); fileSizeCountTask = new FileSizeCountTask(fileCountBySizeDao, utilizationSchemaDefinition); - omTableInsightTask = - new OmTableInsightTask(globalStatsDao, sqlConfiguration, - reconOMMetadataManager); + omTableInsightTask = new OmTableInsightTask( + globalStatsDao, sqlConfiguration, reconOMMetadataManager); containerHealthSchemaManager = reconTestInjector.getInstance(ContainerHealthSchemaManager.class); clusterStateEndpoint = @@ -516,11 +515,11 @@ public void setUp() throws Exception { // Populate the deletedDirectories table in OM DB writeDeletedDirToOm(reconOMMetadataManager, "Bucket1", "Volume1", "dir1", - 3L, 2L, 1L, 23L); + 3L, 2L, 1L); writeDeletedDirToOm(reconOMMetadataManager, "Bucket2", "Volume2", "dir2", - 6L, 5L, 4L, 22L); + 6L, 5L, 4L); writeDeletedDirToOm(reconOMMetadataManager, "Bucket3", "Volume3", "dir3", - 9L, 8L, 7L, 21L); + 9L, 8L, 7L); // Truncate global stats table before running each test dslContext.truncate(GLOBAL_STATS); @@ -595,7 +594,7 @@ public void testGetDatanodes() throws Exception { (DatanodesResponse) response1.getEntity(); DatanodeMetadata datanodeMetadata1 = datanodesResponse1.getDatanodes().stream().filter(datanodeMetadata -> - datanodeMetadata.getHostname().equals("host1.datanode")) + datanodeMetadata.getHostname().equals("host1.datanode")) .findFirst().orElse(null); return (datanodeMetadata1 != null && datanodeMetadata1.getContainers() == 1 && @@ -700,7 +699,7 @@ public void testGetMetricsResponse() throws Exception { byte[] fileBytes = FileUtils.readFileToByteArray( new File(classLoader.getResource(PROMETHEUS_TEST_RESPONSE_FILE) .getFile()) - ); + ); verify(outputStreamMock).write(fileBytes, 0, fileBytes.length); } diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOmTableInsightTask.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOmTableInsightTask.java index 56d8fe213152..df014f4276fa 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOmTableInsightTask.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOmTableInsightTask.java @@ -21,28 +21,20 @@ import org.apache.commons.lang3.tuple.ImmutablePair; import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.TypedTable; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; -import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; -import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; -import org.apache.hadoop.ozone.recon.ReconTestInjector; -import org.apache.hadoop.ozone.recon.api.types.NSSummary; import org.apache.hadoop.ozone.recon.persistence.AbstractReconSqlDBTest; import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; -import org.apache.hadoop.ozone.recon.spi.impl.ReconNamespaceSummaryManagerImpl; +import org.apache.hadoop.ozone.recon.tasks.OMDBUpdateEvent.OMUpdateEventBuilder; + import org.hadoop.ozone.recon.schema.tables.daos.GlobalStatsDao; import org.jooq.DSLContext; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; import org.junit.jupiter.api.io.TempDir; import java.io.IOException; @@ -52,20 +44,18 @@ import java.util.Arrays; import java.util.List; -import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.BUCKET_TABLE; +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.FILE_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.KEY_TABLE; -import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.VOLUME_TABLE; -import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_DIR_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_FILE_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_KEY_TABLE; +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.VOLUME_TABLE; +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.BUCKET_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_TABLE; -import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getTestReconOmMetadataManager; -import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.initializeNewOmMetadataManager; -import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeKeyToOm; -import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeDeletedDirToOm; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeDeletedKeysToOm; import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeOpenKeyToOm; import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeOpenFileToOm; -import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeDeletedKeysToOm; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getTestReconOmMetadataManager; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.initializeNewOmMetadataManager; import static org.apache.hadoop.ozone.recon.tasks.OMDBUpdateEvent.OMDBUpdateAction.DELETE; import static org.apache.hadoop.ozone.recon.tasks.OMDBUpdateEvent.OMDBUpdateAction.PUT; import static org.apache.hadoop.ozone.recon.tasks.OMDBUpdateEvent.OMDBUpdateAction.UPDATE; @@ -76,83 +66,29 @@ import static org.mockito.Mockito.when; /** - * This test class is designed for the OM Table Insight Task. It conducts tests - * for tables that require both Size and Count, as well as for those that only - * require Count. + * Unit test for Object Count Task. */ public class TestOmTableInsightTask extends AbstractReconSqlDBTest { @TempDir private Path temporaryFolder; - private static GlobalStatsDao globalStatsDao; - private static OmTableInsightTask omTableInsightTask; - private static DSLContext dslContext; + private GlobalStatsDao globalStatsDao; + private OmTableInsightTask omTableInsightTask; + private DSLContext dslContext; private boolean isSetupDone = false; - private static ReconOMMetadataManager reconOMMetadataManager; - private static NSSummaryTaskWithFSO nSSummaryTaskWithFso; - private static OzoneConfiguration ozoneConfiguration; - private static ReconNamespaceSummaryManagerImpl reconNamespaceSummaryManager; - - // Object names in FSO-enabled format - private static final String VOL = "volume1"; - private static final String BUCKET_ONE = "bucket1"; - private static final String BUCKET_TWO = "bucket2"; - private static final String KEY_ONE = "file1"; - private static final String KEY_TWO = "file2"; - private static final String KEY_THREE = "dir1/dir2/file3"; - private static final String FILE_ONE = "file1"; - private static final String FILE_TWO = "file2"; - private static final String FILE_THREE = "file3"; - private static final String DIR_ONE = "dir1"; - private static final String DIR_TWO = "dir2"; - private static final String DIR_THREE = "dir3"; - - - private static final long VOL_OBJECT_ID = 0L; - private static final long BUCKET_ONE_OBJECT_ID = 1L; - private static final long BUCKET_TWO_OBJECT_ID = 2L; - private static final long KEY_ONE_OBJECT_ID = 3L; - private static final long DIR_ONE_OBJECT_ID = 14L; - private static final long KEY_TWO_OBJECT_ID = 5L; - private static final long DIR_TWO_OBJECT_ID = 17L; - private static final long KEY_THREE_OBJECT_ID = 8L; - private static final long DIR_THREE_OBJECT_ID = 10L; - - private static final long KEY_ONE_SIZE = 500L; - private static final long KEY_TWO_SIZE = 1025L; - private static final long KEY_THREE_SIZE = 2000L; - - // mock client's path requests - private static final String TEST_USER = "TestUser"; - - @Mock - private Table nsSummaryTable; + private ReconOMMetadataManager reconOMMetadataManager; public TestOmTableInsightTask() { super(); } private void initializeInjector() throws IOException { - ozoneConfiguration = new OzoneConfiguration(); reconOMMetadataManager = getTestReconOmMetadataManager( initializeNewOmMetadataManager(Files.createDirectory( temporaryFolder.resolve("JunitOmDBDir")).toFile()), Files.createDirectory(temporaryFolder.resolve("NewDir")).toFile()); globalStatsDao = getDao(GlobalStatsDao.class); - - ReconTestInjector reconTestInjector = - new ReconTestInjector.Builder(temporaryFolder.toFile()) - .withReconSqlDb() - .withReconOm(reconOMMetadataManager) - .withContainerDB() - .build(); - reconNamespaceSummaryManager = reconTestInjector.getInstance( - ReconNamespaceSummaryManagerImpl.class); - omTableInsightTask = new OmTableInsightTask( globalStatsDao, getConfiguration(), reconOMMetadataManager); - nSSummaryTaskWithFso = new NSSummaryTaskWithFSO( - reconNamespaceSummaryManager, reconOMMetadataManager, - ozoneConfiguration); dslContext = getDslContext(); } @@ -163,182 +99,10 @@ public void setUp() throws IOException { initializeInjector(); isSetupDone = true; } - MockitoAnnotations.openMocks(this); // Truncate table before running each test dslContext.truncate(GLOBAL_STATS); } - /** - * Populate OM-DB with the following structure. - * volume1 - * | \ - * bucket1 bucket2 - * / \ \ - * dir1 dir2 dir3 - * / \ \ - * file1 file2 file3 - * - * @throws IOException - */ - private void populateOMDB() throws IOException { - - // Create 2 Buckets bucket1 and bucket2 - OmBucketInfo bucketInfo1 = OmBucketInfo.newBuilder() - .setVolumeName(VOL) - .setBucketName(BUCKET_ONE) - .setObjectID(BUCKET_ONE_OBJECT_ID) - .build(); - String bucketKey = reconOMMetadataManager.getBucketKey( - bucketInfo1.getVolumeName(), bucketInfo1.getBucketName()); - reconOMMetadataManager.getBucketTable().put(bucketKey, bucketInfo1); - OmBucketInfo bucketInfo2 = OmBucketInfo.newBuilder() - .setVolumeName(VOL) - .setBucketName(BUCKET_TWO) - .setObjectID(BUCKET_TWO_OBJECT_ID) - .build(); - bucketKey = reconOMMetadataManager.getBucketKey( - bucketInfo2.getVolumeName(), bucketInfo2.getBucketName()); - reconOMMetadataManager.getBucketTable().put(bucketKey, bucketInfo2); - - // Create a single volume named volume1 - String volumeKey = reconOMMetadataManager.getVolumeKey(VOL); - OmVolumeArgs args = - OmVolumeArgs.newBuilder() - .setObjectID(VOL_OBJECT_ID) - .setVolume(VOL) - .setAdminName(TEST_USER) - .setOwnerName(TEST_USER) - .build(); - reconOMMetadataManager.getVolumeTable().put(volumeKey, args); - - // Generate keys for the File Table - writeKeyToOm(reconOMMetadataManager, - KEY_ONE, - BUCKET_ONE, - VOL, - FILE_ONE, - KEY_ONE_OBJECT_ID, - DIR_ONE_OBJECT_ID, - BUCKET_ONE_OBJECT_ID, - VOL_OBJECT_ID, - KEY_ONE_SIZE, - BucketLayout.FILE_SYSTEM_OPTIMIZED); - writeKeyToOm(reconOMMetadataManager, - KEY_TWO, - BUCKET_ONE, - VOL, - FILE_TWO, - KEY_TWO_OBJECT_ID, - DIR_ONE_OBJECT_ID, - BUCKET_ONE_OBJECT_ID, - VOL_OBJECT_ID, - KEY_TWO_SIZE, - BucketLayout.FILE_SYSTEM_OPTIMIZED); - writeKeyToOm(reconOMMetadataManager, - KEY_THREE, - BUCKET_ONE, - VOL, - FILE_THREE, - KEY_THREE_OBJECT_ID, - DIR_TWO_OBJECT_ID, - BUCKET_ONE_OBJECT_ID, - VOL_OBJECT_ID, - KEY_THREE_SIZE, - BucketLayout.FILE_SYSTEM_OPTIMIZED); - - // Generate Deleted Directories in OM - writeDeletedDirToOm(reconOMMetadataManager, - BUCKET_ONE, - VOL, - DIR_ONE, - BUCKET_ONE_OBJECT_ID, - BUCKET_ONE_OBJECT_ID, - VOL_OBJECT_ID, - DIR_ONE_OBJECT_ID); - writeDeletedDirToOm(reconOMMetadataManager, - BUCKET_ONE, - VOL, - DIR_TWO, - BUCKET_ONE_OBJECT_ID, - BUCKET_ONE_OBJECT_ID, - VOL_OBJECT_ID, - DIR_TWO_OBJECT_ID); - writeDeletedDirToOm(reconOMMetadataManager, - BUCKET_TWO, - VOL, - DIR_THREE, - BUCKET_TWO_OBJECT_ID, - BUCKET_TWO_OBJECT_ID, - VOL_OBJECT_ID, - DIR_THREE_OBJECT_ID); - } - - @Test - public void testReprocessForDeletedDirectory() throws Exception { - // Create keys and deleted directories - populateOMDB(); - - // Generate NamespaceSummary for the OM DB - nSSummaryTaskWithFso.reprocessWithFSO(reconOMMetadataManager); - - Pair result = - omTableInsightTask.reprocess(reconOMMetadataManager); - assertTrue(result.getRight()); - assertEquals(3, getCountForTable(DELETED_DIR_TABLE)); - } - - @Test - public void testProcessForDeletedDirectoryTable() throws IOException { - // Prepare mock data size - Long expectedSize1 = 1000L; - Long expectedSize2 = 2000L; - NSSummary nsSummary1 = new NSSummary(); - NSSummary nsSummary2 = new NSSummary(); - nsSummary1.setSizeOfFiles(expectedSize1); - nsSummary2.setSizeOfFiles(expectedSize2); - when(nsSummaryTable.get(1L)).thenReturn(nsSummary1); - when(nsSummaryTable.get(2L)).thenReturn(nsSummary1); - when(nsSummaryTable.get(3L)).thenReturn(nsSummary2); - when(nsSummaryTable.get(4L)).thenReturn(nsSummary2); - when(nsSummaryTable.get(5L)).thenReturn(nsSummary2); - - /* DB key in DeletedDirectoryTable => - "/volumeId/bucketId/parentId/dirName/dirObjectId" */ - List paths = Arrays.asList( - "/18/28/22/dir1/1", - "/18/26/23/dir1/2", - "/18/20/24/dir1/3", - "/18/21/25/dir1/4", - "/18/27/26/dir1/5" - ); - - // Testing PUT events - // Create 5 OMDBUpdateEvent instances for 5 different deletedDirectory paths - ArrayList putEvents = new ArrayList<>(); - for (long i = 0L; i < 5L; i++) { - putEvents.add(getOMUpdateEvent(paths.get((int) i), - getOmKeyInfo("vol1", "bucket1", DIR_ONE, (i + 1), false), - DELETED_DIR_TABLE, PUT, null)); - } - OMUpdateEventBatch putEventBatch = new OMUpdateEventBatch(putEvents); - omTableInsightTask.process(putEventBatch); - assertEquals(5, getCountForTable(DELETED_DIR_TABLE)); - - - // Testing DELETE events - // Create 2 OMDBUpdateEvent instances for 2 different deletedDirectory paths - ArrayList deleteEvents = new ArrayList<>(); - deleteEvents.add(getOMUpdateEvent(paths.get(0), - getOmKeyInfo("vol1", "bucket1", DIR_ONE, 1L, false), DELETED_DIR_TABLE, - DELETE, null)); - deleteEvents.add(getOMUpdateEvent(paths.get(2), - getOmKeyInfo("vol1", "bucket1", DIR_ONE, 3L, false), DELETED_DIR_TABLE, - DELETE, null)); - OMUpdateEventBatch deleteEventBatch = new OMUpdateEventBatch(deleteEvents); - omTableInsightTask.process(deleteEventBatch); - assertEquals(3, getCountForTable(DELETED_DIR_TABLE)); - } - @Test public void testReprocessForCount() throws Exception { OMMetadataManager omMetadataManager = mock(OmMetadataManagerImpl.class); @@ -346,32 +110,27 @@ public void testReprocessForCount() throws Exception { // Mock 5 rows in each table and test the count for (String tableName : omTableInsightTask.getTaskTables()) { TypedTable table = mock(TypedTable.class); - TypedTable.TypedTableIterator mockIter = - mock(TypedTable.TypedTableIterator.class); + TypedTable.TypedTableIterator mockIter = mock(TypedTable + .TypedTableIterator.class); when(table.iterator()).thenReturn(mockIter); when(omMetadataManager.getTable(tableName)).thenReturn(table); - when(mockIter.hasNext()).thenReturn(true, true, true, true, true, false); - + when(mockIter.hasNext()) + .thenReturn(true) + .thenReturn(true) + .thenReturn(true) + .thenReturn(true) + .thenReturn(true) + .thenReturn(false); TypedTable.TypedKeyValue mockKeyValue = mock(TypedTable.TypedKeyValue.class); - - if (tableName.equals(DELETED_TABLE)) { - RepeatedOmKeyInfo keyInfo = mock(RepeatedOmKeyInfo.class); - when(keyInfo.getTotalSize()).thenReturn(ImmutablePair.of(100L, 100L)); - when(keyInfo.getOmKeyInfoList()).thenReturn( - Arrays.asList(mock(OmKeyInfo.class))); - when(mockKeyValue.getValue()).thenReturn(keyInfo); - } else { - when(mockKeyValue.getValue()).thenReturn(mock(OmKeyInfo.class)); - } - + when(mockKeyValue.getValue()).thenReturn(mock(OmKeyInfo.class)); when(mockIter.next()).thenReturn(mockKeyValue); } Pair result = omTableInsightTask.reprocess(omMetadataManager); - assertTrue(result.getRight()); + assertEquals(5L, getCountForTable(KEY_TABLE)); assertEquals(5L, getCountForTable(VOLUME_TABLE)); assertEquals(5L, getCountForTable(BUCKET_TABLE)); @@ -379,6 +138,7 @@ public void testReprocessForCount() throws Exception { assertEquals(5L, getCountForTable(DELETED_TABLE)); } + @Test public void testReprocessForOpenKeyTable() throws Exception { // Populate the OpenKeys table in OM DB @@ -443,73 +203,44 @@ public void testReprocessForDeletedTable() throws Exception { @Test public void testProcessForCount() { - List initialEvents = new ArrayList<>(); - - // Creating events for each table except the deleted table + ArrayList events = new ArrayList<>(); + // Create 5 put, 1 delete and 1 update event for each table for (String tableName : omTableInsightTask.getTaskTables()) { - if (tableName.equals(DELETED_TABLE)) { - continue; // Skipping deleted table as it has a separate test - } - - // Adding 5 PUT events per table for (int i = 0; i < 5; i++) { - initialEvents.add( - getOMUpdateEvent("item" + i, mock(OmKeyInfo.class), tableName, PUT, - null)); + events.add(getOMUpdateEvent("item" + i, null, tableName, PUT, null)); } - - // Adding 1 DELETE event where value is null, indicating non-existence - // in the database. - initialEvents.add( - getOMUpdateEvent("item0", mock(OmKeyInfo.class), tableName, DELETE, - null)); - // Adding 1 UPDATE event. This should not affect the count. - initialEvents.add( - getOMUpdateEvent("item1", mock(OmKeyInfo.class), tableName, UPDATE, - mock(OmKeyInfo.class))); + // for delete event, if value is set to null, the counter will not be + // decremented. This is because the value will be null if item does not + // exist in the database and there is no need to delete. + events.add(getOMUpdateEvent("item0", mock(OmKeyInfo.class), tableName, + DELETE, null)); + events.add(getOMUpdateEvent("item1", null, tableName, UPDATE, null)); } + OMUpdateEventBatch omUpdateEventBatch = new OMUpdateEventBatch(events); + omTableInsightTask.process(omUpdateEventBatch); - // Processing the initial batch of events - OMUpdateEventBatch initialBatch = new OMUpdateEventBatch(initialEvents); - omTableInsightTask.process(initialBatch); + // Verify 4 items in each table. (5 puts - 1 delete + 0 update) + assertEquals(4L, getCountForTable(KEY_TABLE)); + assertEquals(4L, getCountForTable(VOLUME_TABLE)); + assertEquals(4L, getCountForTable(BUCKET_TABLE)); + assertEquals(4L, getCountForTable(FILE_TABLE)); - // Verifying the count in each table + // add a new key and simulate delete on non-existing item (value: null) + ArrayList newEvents = new ArrayList<>(); for (String tableName : omTableInsightTask.getTaskTables()) { - if (tableName.equals(DELETED_TABLE)) { - continue; - } - assertEquals(4L, getCountForTable( - tableName)); // 4 items expected after processing (5 puts - 1 delete) + newEvents.add(getOMUpdateEvent("item5", null, tableName, PUT, null)); + // This delete event should be a noop since value is null + newEvents.add(getOMUpdateEvent("item0", null, tableName, DELETE, null)); } - List additionalEvents = new ArrayList<>(); - // Simulating new PUT and DELETE events - for (String tableName : omTableInsightTask.getTaskTables()) { - if (tableName.equals(DELETED_TABLE)) { - continue; - } - // Adding 1 new PUT event - additionalEvents.add( - getOMUpdateEvent("item6", mock(OmKeyInfo.class), tableName, PUT, - null)); - // Attempting to delete a non-existing item (value: null) - additionalEvents.add( - getOMUpdateEvent("item0", null, tableName, DELETE, null)); - } + omUpdateEventBatch = new OMUpdateEventBatch(newEvents); + omTableInsightTask.process(omUpdateEventBatch); - // Processing the additional events - OMUpdateEventBatch additionalBatch = - new OMUpdateEventBatch(additionalEvents); - omTableInsightTask.process(additionalBatch); - // Verifying the final count in each table - for (String tableName : omTableInsightTask.getTaskTables()) { - if (tableName.equals(DELETED_TABLE)) { - continue; - } - // 5 items expected after processing the additional events. - assertEquals(5L, getCountForTable( - tableName)); - } + // Verify 5 items in each table. (1 new put + 0 delete) + assertEquals(5L, getCountForTable(KEY_TABLE)); + assertEquals(5L, getCountForTable(VOLUME_TABLE)); + assertEquals(5L, getCountForTable(BUCKET_TABLE)); + assertEquals(5L, getCountForTable(FILE_TABLE)); } @Test @@ -520,38 +251,35 @@ public void testProcessForOpenKeyTableAndOpenFileTable() { when(omKeyInfo.getDataSize()).thenReturn(sizeToBeReturned); when(omKeyInfo.getReplicatedSize()).thenReturn(sizeToBeReturned * 3); - // Test PUT events. - // Add 5 PUT events for OpenKeyTable and OpenFileTable. + // Test PUT events ArrayList putEvents = new ArrayList<>(); - for (int i = 0; i < 10; i++) { - String table = (i < 5) ? OPEN_KEY_TABLE : OPEN_FILE_TABLE; - putEvents.add(getOMUpdateEvent("item" + i, omKeyInfo, table, PUT, null)); + for (String tableName : omTableInsightTask.getTablesToCalculateSize()) { + for (int i = 0; i < 5; i++) { + putEvents.add( + getOMUpdateEvent("item" + i, omKeyInfo, tableName, PUT, null)); + } } - OMUpdateEventBatch putEventBatch = new OMUpdateEventBatch(putEvents); omTableInsightTask.process(putEventBatch); - // After 5 PUTs, size should be 5 * 1000 = 5000 - for (String tableName : new ArrayList<>( - Arrays.asList(OPEN_KEY_TABLE, OPEN_FILE_TABLE))) { + // After 5 PUTs, size should be 5 * 1000 = 5000 for each size-related table + for (String tableName : omTableInsightTask.getTablesToCalculateSize()) { assertEquals(5000L, getUnReplicatedSizeForTable(tableName)); assertEquals(15000L, getReplicatedSizeForTable(tableName)); } // Test DELETE events ArrayList deleteEvents = new ArrayList<>(); - // Delete "item0" for OpenKeyTable and OpenFileTable. - deleteEvents.add( - getOMUpdateEvent("item0", omKeyInfo, OPEN_KEY_TABLE, DELETE, null)); - deleteEvents.add( - getOMUpdateEvent("item0", omKeyInfo, OPEN_FILE_TABLE, DELETE, null)); - + for (String tableName : omTableInsightTask.getTablesToCalculateSize()) { + // Delete "item0" + deleteEvents.add( + getOMUpdateEvent("item0", omKeyInfo, tableName, DELETE, null)); + } OMUpdateEventBatch deleteEventBatch = new OMUpdateEventBatch(deleteEvents); omTableInsightTask.process(deleteEventBatch); // After deleting "item0", size should be 4 * 1000 = 4000 - for (String tableName : new ArrayList<>( - Arrays.asList(OPEN_KEY_TABLE, OPEN_FILE_TABLE))) { + for (String tableName : omTableInsightTask.getTablesToCalculateSize()) { assertEquals(4000L, getUnReplicatedSizeForTable(tableName)); assertEquals(12000L, getReplicatedSizeForTable(tableName)); } @@ -559,8 +287,7 @@ public void testProcessForOpenKeyTableAndOpenFileTable() { // Test UPDATE events ArrayList updateEvents = new ArrayList<>(); Long newSizeToBeReturned = 2000L; - for (String tableName : new ArrayList<>( - Arrays.asList(OPEN_KEY_TABLE, OPEN_FILE_TABLE))) { + for (String tableName : omTableInsightTask.getTablesToCalculateSize()) { // Update "item1" with a new size OmKeyInfo newKeyInfo = mock(OmKeyInfo.class); when(newKeyInfo.getDataSize()).thenReturn(newSizeToBeReturned); @@ -568,14 +295,12 @@ public void testProcessForOpenKeyTableAndOpenFileTable() { updateEvents.add( getOMUpdateEvent("item1", newKeyInfo, tableName, UPDATE, omKeyInfo)); } - OMUpdateEventBatch updateEventBatch = new OMUpdateEventBatch(updateEvents); omTableInsightTask.process(updateEventBatch); // After updating "item1", size should be 4000 - 1000 + 2000 = 5000 // presentValue - oldValue + newValue = updatedValue - for (String tableName : new ArrayList<>( - Arrays.asList(OPEN_KEY_TABLE, OPEN_FILE_TABLE))) { + for (String tableName : omTableInsightTask.getTablesToCalculateSize()) { assertEquals(5000L, getUnReplicatedSizeForTable(tableName)); assertEquals(15000L, getReplicatedSizeForTable(tableName)); } @@ -588,10 +313,9 @@ public void testProcessForDeletedTable() { new ImmutablePair<>(1000L, 3000L); ArrayList omKeyInfoList = new ArrayList<>(); // Add 5 OmKeyInfo objects to the list - for (long i = 0; i < 5; i++) { + for (int i = 0; i < 5; i++) { OmKeyInfo omKeyInfo = - getOmKeyInfo("sampleVol", "non_fso_Bucket", "non_fso_key1", i + 1, - true); + getOmKeyInfo("sampleVol", "non_fso_Bucket", "non_fso_key1", true); // Set properties of OmKeyInfo object if needed omKeyInfoList.add(omKeyInfo); } @@ -629,14 +353,38 @@ public void testProcessForDeletedTable() { // After deleting "item0", size should be 4 * 1000 = 4000 assertEquals(4000L, getUnReplicatedSizeForTable(DELETED_TABLE)); assertEquals(12000L, getReplicatedSizeForTable(DELETED_TABLE)); + + + // Test UPDATE events + ArrayList updateEvents = new ArrayList<>(); + // Update "item1" with new sizes + ImmutablePair newSizesToBeReturned = + new ImmutablePair<>(500L, 1500L); + RepeatedOmKeyInfo newRepeatedOmKeyInfo = mock(RepeatedOmKeyInfo.class); + when(newRepeatedOmKeyInfo.getTotalSize()).thenReturn(newSizesToBeReturned); + when(newRepeatedOmKeyInfo.getOmKeyInfoList()).thenReturn( + omKeyInfoList.subList(1, 5)); + OMUpdateEventBatch updateEventBatch = new OMUpdateEventBatch(updateEvents); + // For item1, newSize=500 and totalCount of deleted keys should be 4 + updateEvents.add( + getOMUpdateEvent("item1", newRepeatedOmKeyInfo, DELETED_TABLE, UPDATE, + repeatedOmKeyInfo)); + omTableInsightTask.process(updateEventBatch); + // Since one key has been deleted, total deleted keys should be 19 + assertEquals(19L, getCountForTable(DELETED_TABLE)); + // After updating "item1", size should be 4000 - 1000 + 500 = 3500 + // presentValue - oldValue + newValue = updatedValue + assertEquals(3500L, getUnReplicatedSizeForTable(DELETED_TABLE)); + assertEquals(10500L, getReplicatedSizeForTable(DELETED_TABLE)); } + private OMDBUpdateEvent getOMUpdateEvent( String name, Object value, String table, OMDBUpdateEvent.OMDBUpdateAction action, Object oldValue) { - return new OMDBUpdateEvent.OMUpdateEventBuilder() + return new OMUpdateEventBuilder() .setAction(action) .setKey(name) .setValue(value) @@ -661,8 +409,7 @@ private long getReplicatedSizeForTable(String tableName) { } private OmKeyInfo getOmKeyInfo(String volumeName, String bucketName, - String keyName, Long objectID, - boolean isFile) { + String keyName, boolean isFile) { return new OmKeyInfo.Builder() .setVolumeName(volumeName) .setBucketName(bucketName) @@ -671,7 +418,6 @@ private OmKeyInfo getOmKeyInfo(String volumeName, String bucketName, .setReplicationConfig(StandaloneReplicationConfig .getInstance(HddsProtos.ReplicationFactor.ONE)) .setDataSize(100L) - .setObjectID(objectID) .build(); } } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ObjectStoreStub.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ObjectStoreStub.java index e9fb15e613fe..b79e49f834cb 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ObjectStoreStub.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ObjectStoreStub.java @@ -20,6 +20,7 @@ package org.apache.hadoop.ozone.client; import java.io.IOException; +import java.util.ArrayList; import java.util.HashMap; import java.util.Iterator; import java.util.Map; @@ -61,7 +62,7 @@ public void createVolume(String volumeName) throws IOException { .setAdmin("root") .setOwner("root") .setQuotaInBytes(Integer.MAX_VALUE) - .build()); + .setAcls(new ArrayList<>()).build()); } @Override