diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/upgrade/TestUpgradeUtils.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/upgrade/UpgradeTestUtils.java similarity index 95% rename from hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/upgrade/TestUpgradeUtils.java rename to hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/upgrade/UpgradeTestUtils.java index 6daec6c80e4b..d3990e609823 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/upgrade/TestUpgradeUtils.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/upgrade/UpgradeTestUtils.java @@ -28,8 +28,8 @@ /** * Upgrade related test utility methods. */ -public final class TestUpgradeUtils { - private TestUpgradeUtils() { } +public final class UpgradeTestUtils { + private UpgradeTestUtils() { } /** * Creates a VERSION file for the specified node type under the directory diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDataNodeStartupSlvLessThanMlv.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDataNodeStartupSlvLessThanMlv.java index ebcdfee551c9..33395406438b 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDataNodeStartupSlvLessThanMlv.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDataNodeStartupSlvLessThanMlv.java @@ -29,7 +29,7 @@ import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine; -import org.apache.hadoop.ozone.upgrade.TestUpgradeUtils; +import org.apache.hadoop.ozone.upgrade.UpgradeTestUtils; import org.apache.ozone.test.GenericTestUtils; import org.junit.Assert; import org.junit.Rule; @@ -61,7 +61,7 @@ public void testStartupSlvLessThanMlv() throws Exception { // Create version file with MLV > SLV, which should fail the // DataNodeStateMachine construction. - TestUpgradeUtils.createVersionFile(datanodeSubdir, + UpgradeTestUtils.createVersionFile(datanodeSubdir, HddsProtos.NodeType.DATANODE, mlv); try { diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsTestUtils.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsTestUtils.java index d4d11ffd6218..1fc4f7625ef7 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsTestUtils.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsTestUtils.java @@ -17,36 +17,699 @@ */ package org.apache.hadoop.hdds.scm; -import java.util.ArrayList; -import java.util.List; - +import com.google.common.base.Preconditions; import org.apache.commons.lang3.RandomUtils; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.CRLStatusReport; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.PipelineAction; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.ClosePipelineInfo; import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos; + .StorageContainerDatanodeProtocolProtos.PipelineActionsProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.PipelineReport; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.PipelineReportsProto; import org.apache.hadoop.hdds.scm.container.ContainerInfo; -import org.apache.hadoop.hdds.scm.server.SCMDatanodeProtocolServer - .NodeRegistrationContainerReport; +import org.apache.hadoop.hdds.scm.container.ContainerManager; +import org.apache.hadoop.hdds.scm.container.ContainerReplica; +import org.apache.hadoop.hdds.scm.ha.MockSCMHAManager; +import org.apache.hadoop.hdds.scm.ha.SCMContext; +import org.apache.hadoop.hdds.scm.pipeline.Pipeline; +import org.apache.hadoop.hdds.scm.pipeline.PipelineID; +import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; +import org.apache.hadoop.hdds.scm.server.SCMConfigurator; +import org.apache.hadoop.hdds.scm.server + .SCMDatanodeHeartbeatDispatcher.PipelineActionsFromDatanode; +import org.apache.hadoop.hdds.scm.server + .SCMDatanodeHeartbeatDispatcher.PipelineReportFromDatanode; +import org.apache.hadoop.hdds.scm.container.ContainerID; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.ContainerReportsProto; +import org.apache.hadoop.hdds.protocol + .proto.StorageContainerDatanodeProtocolProtos.NodeReportProto; +import org.apache.hadoop.hdds.protocol + .proto.StorageContainerDatanodeProtocolProtos.CommandStatus; +import org.apache.hadoop.hdds.protocol + .proto.StorageContainerDatanodeProtocolProtos.CommandStatusReportsProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.StorageReportProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.StorageTypeProto; +import org.apache.hadoop.hdds.scm.node.SCMNodeManager; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.scm.server.SCMDatanodeProtocolServer; +import org.apache.hadoop.hdds.scm.server.SCMStorageConfig; +import org.apache.hadoop.hdds.scm.server.StorageContainerManager; +import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.ozone.common.Storage; +import org.apache.hadoop.ozone.common.statemachine.InvalidStateTransitionException; +import org.apache.hadoop.ozone.protocol.commands.RegisteredCommand; +import org.apache.hadoop.security.authentication.client + .AuthenticationException; + +import java.io.File; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.UUID; +import java.util.concurrent.ThreadLocalRandom; /** * Stateless helper functions for Hdds tests. */ public final class HddsTestUtils { + private static ThreadLocalRandom random = ThreadLocalRandom.current(); + private static PipelineID randomPipelineID = PipelineID.randomId(); + private HddsTestUtils() { } + /** + * Generates DatanodeDetails from RegisteredCommand. + * + * @param registeredCommand registration response from SCM + * + * @return DatanodeDetails + */ + public static DatanodeDetails getDatanodeDetails( + RegisteredCommand registeredCommand) { + return MockDatanodeDetails.createDatanodeDetails( + registeredCommand.getDatanode().getUuidString(), + registeredCommand.getDatanode().getHostName(), + registeredCommand.getDatanode().getIpAddress(), + null); + } + + /** + * Creates a random DatanodeDetails and register it with the given + * NodeManager. + * + * @param nodeManager NodeManager + * + * @return DatanodeDetails + */ + public static DatanodeDetails createRandomDatanodeAndRegister( + SCMNodeManager nodeManager) { + return getDatanodeDetails( + nodeManager.register(MockDatanodeDetails.randomDatanodeDetails(), null, + getRandomPipelineReports())); + } + + /** + * Get specified number of DatanodeDetails and register them with node + * manager. + * + * @param nodeManager node manager to register the datanode ids. + * @param count number of DatanodeDetails needed. + * + * @return list of DatanodeDetails + */ + public static List getListOfRegisteredDatanodeDetails( + SCMNodeManager nodeManager, int count) { + ArrayList datanodes = new ArrayList<>(); + for (int i = 0; i < count; i++) { + datanodes.add(createRandomDatanodeAndRegister(nodeManager)); + } + return datanodes; + } + + /** + * Generates a random NodeReport. + * + * @return NodeReportProto + */ + public static NodeReportProto getRandomNodeReport() { + return getRandomNodeReport(1, 1); + } + + /** + * Generates random NodeReport with the given number of storage report in it. + * + * @param numberOfStorageReport number of storage report this node report + * should have + * @param numberOfMetadataStorageReport number of metadata storage report + * this node report should have + * @return NodeReportProto + */ + public static NodeReportProto getRandomNodeReport(int numberOfStorageReport, + int numberOfMetadataStorageReport) { + UUID nodeId = UUID.randomUUID(); + return getRandomNodeReport(nodeId, File.separator + nodeId, + numberOfStorageReport, numberOfMetadataStorageReport); + } + + /** + * Generates random NodeReport for the given nodeId with the given + * base path and number of storage report in it. + * + * @param nodeId datanode id + * @param basePath base path of storage directory + * @param numberOfStorageReport number of storage report + * @param numberOfMetadataStorageReport number of metadata storage report + * + * @return NodeReportProto + */ + public static NodeReportProto getRandomNodeReport(UUID nodeId, + String basePath, int numberOfStorageReport, + int numberOfMetadataStorageReport) { + List storageReports = new ArrayList<>(); + for (int i = 0; i < numberOfStorageReport; i++) { + storageReports.add(getRandomStorageReport(nodeId, + basePath + File.separator + "data-" + i)); + } + List metadataStorageReports = + new ArrayList<>(); + for (int i = 0; i < numberOfMetadataStorageReport; i++) { + metadataStorageReports.add(getRandomMetadataStorageReport( + basePath + File.separator + "metadata-" + i)); + } + return createNodeReport(storageReports, metadataStorageReports); + } + + /** + * Creates NodeReport with the given storage reports. + * + * @param reports storage reports to be included in the node report. + * @param metaReports metadata storage reports to be included + * in the node report. + * @return NodeReportProto + */ + public static NodeReportProto createNodeReport( + List reports, + List metaReports) { + NodeReportProto.Builder nodeReport = NodeReportProto.newBuilder(); + nodeReport.addAllStorageReport(reports); + nodeReport.addAllMetadataStorageReport(metaReports); + return nodeReport.build(); + } + + /** + * Generates random storage report. + * + * @param nodeId datanode id for which the storage report belongs to + * @param path path of the storage + * + * @return StorageReportProto + */ + public static StorageReportProto getRandomStorageReport(UUID nodeId, + String path) { + return createStorageReport(nodeId, path, + random.nextInt(1000), + random.nextInt(500), + random.nextInt(500), + StorageTypeProto.DISK); + } + + /** + * Generates random metadata storage report. + * + * @param path path of the storage + * + * @return MetadataStorageReportProto + */ + public static MetadataStorageReportProto getRandomMetadataStorageReport( + String path) { + return createMetadataStorageReport(path, + random.nextInt(1000), + random.nextInt(500), + random.nextInt(500), + StorageTypeProto.DISK); + } + + public static StorageReportProto createStorageReport(UUID nodeId, String path, + long capacity) { + return createStorageReport(nodeId, path, + capacity, + 0, + capacity, + StorageTypeProto.DISK); + } + + public static StorageReportProto createStorageReport(UUID nodeId, String path, + long capacity, long used, long remaining, StorageTypeProto type) { + return createStorageReport(nodeId, path, capacity, used, remaining, + type, false); + } + /** + * Creates storage report with the given information. + * + * @param nodeId datanode id + * @param path storage dir + * @param capacity storage size + * @param used space used + * @param remaining space remaining + * @param type type of storage + * + * @return StorageReportProto + */ + public static StorageReportProto createStorageReport(UUID nodeId, String path, + long capacity, long used, long remaining, StorageTypeProto type, + boolean failed) { + Preconditions.checkNotNull(nodeId); + Preconditions.checkNotNull(path); + StorageReportProto.Builder srb = StorageReportProto.newBuilder(); + srb.setStorageUuid(nodeId.toString()) + .setStorageLocation(path) + .setCapacity(capacity) + .setScmUsed(used) + .setFailed(failed) + .setRemaining(remaining); + StorageTypeProto storageTypeProto = + type == null ? StorageTypeProto.DISK : type; + srb.setStorageType(storageTypeProto); + return srb.build(); + } + + public static MetadataStorageReportProto createMetadataStorageReport( + String path, long capacity) { + return createMetadataStorageReport(path, + capacity, + 0, + capacity, + StorageTypeProto.DISK, false); + } + + public static MetadataStorageReportProto createMetadataStorageReport( + String path, long capacity, long used, long remaining, + StorageTypeProto type) { + return createMetadataStorageReport(path, capacity, used, remaining, + type, false); + } + + /** + * Creates metadata storage report with the given information. + * + * @param path storage dir + * @param capacity storage size + * @param used space used + * @param remaining space remaining + * @param type type of storage + * + * @return StorageReportProto + */ + public static MetadataStorageReportProto createMetadataStorageReport( + String path, long capacity, long used, long remaining, + StorageTypeProto type, boolean failed) { + Preconditions.checkNotNull(path); + MetadataStorageReportProto.Builder srb = MetadataStorageReportProto + .newBuilder(); + srb.setStorageLocation(path) + .setCapacity(capacity) + .setScmUsed(used) + .setFailed(failed) + .setRemaining(remaining); + StorageTypeProto storageTypeProto = + type == null ? StorageTypeProto.DISK : type; + srb.setStorageType(storageTypeProto); + return srb.build(); + } + + /** + * Generates random container reports. + * + * @return ContainerReportsProto + */ + public static ContainerReportsProto getRandomContainerReports() { + return getRandomContainerReports(1); + } + + /** + * Generates random container report with the given number of containers. + * + * @param numberOfContainers number of containers to be in container report + * + * @return ContainerReportsProto + */ + public static ContainerReportsProto getRandomContainerReports( + int numberOfContainers) { + List containerInfos = new ArrayList<>(); + for (int i = 0; i < numberOfContainers; i++) { + containerInfos.add(getRandomContainerInfo(i)); + } + return getContainerReports(containerInfos); + } + + + public static PipelineReportsProto getRandomPipelineReports() { + return PipelineReportsProto.newBuilder().build(); + } + + public static PipelineReportFromDatanode getPipelineReportFromDatanode( + DatanodeDetails dn, PipelineID... pipelineIDs) { + PipelineReportsProto.Builder reportBuilder = + PipelineReportsProto.newBuilder(); + for (PipelineID pipelineID : pipelineIDs) { + reportBuilder.addPipelineReport( + PipelineReport.newBuilder() + .setPipelineID(pipelineID.getProtobuf()) + .setIsLeader(false)); + } + return new PipelineReportFromDatanode(dn, reportBuilder.build()); + } + + public static PipelineReportFromDatanode getPipelineReportFromDatanode( + DatanodeDetails dn, PipelineID pipelineID, boolean isLeader) { + PipelineReportsProto.Builder reportBuilder = + PipelineReportsProto.newBuilder(); + reportBuilder.addPipelineReport(PipelineReport.newBuilder() + .setPipelineID(pipelineID.getProtobuf()).setIsLeader(isLeader)); + return new PipelineReportFromDatanode(dn, reportBuilder.build()); + } + + public static void openAllRatisPipelines(PipelineManager pipelineManager) + throws IOException { + // Pipeline is created by background thread + for (ReplicationFactor factor : ReplicationFactor.values()) { + // Trigger the processed pipeline report event + for (Pipeline pipeline : pipelineManager + .getPipelines(new RatisReplicationConfig(factor))) { + pipelineManager.openPipeline(pipeline.getId()); + } + } + } + + public static PipelineActionsFromDatanode getPipelineActionFromDatanode( + DatanodeDetails dn, PipelineID... pipelineIDs) { + PipelineActionsProto.Builder actionsProtoBuilder = + PipelineActionsProto.newBuilder(); + for (PipelineID pipelineID : pipelineIDs) { + ClosePipelineInfo closePipelineInfo = + ClosePipelineInfo.newBuilder().setPipelineID(pipelineID.getProtobuf()) + .setReason(ClosePipelineInfo.Reason.PIPELINE_FAILED) + .setDetailedReason("").build(); + actionsProtoBuilder.addPipelineActions(PipelineAction.newBuilder() + .setClosePipeline(closePipelineInfo) + .setAction(PipelineAction.Action.CLOSE) + .build()); + } + return new PipelineActionsFromDatanode(dn, actionsProtoBuilder.build()); + } + + /** + * Creates container report with the given ContainerInfo(s). + * + * @param containerInfos one or more ContainerInfo + * + * @return ContainerReportsProto + */ + public static ContainerReportsProto getContainerReports( + ContainerReplicaProto... containerInfos) { + return getContainerReports(Arrays.asList(containerInfos)); + } + + /** + * Creates container report with the given ContainerInfo(s). + * + * @param containerInfos list of ContainerInfo + * + * @return ContainerReportsProto + */ + public static ContainerReportsProto getContainerReports( + List containerInfos) { + ContainerReportsProto.Builder + reportsBuilder = ContainerReportsProto.newBuilder(); + for (ContainerReplicaProto containerInfo : containerInfos) { + reportsBuilder.addReports(containerInfo); + } + return reportsBuilder.build(); + } + + /** + * Generates random ContainerInfo. + * + * @param containerId container id of the ContainerInfo + * + * @return ContainerInfo + */ + public static ContainerReplicaProto getRandomContainerInfo( + long containerId) { + return createContainerInfo(containerId, + OzoneConsts.GB * 5, + random.nextLong(1000), + OzoneConsts.GB * random.nextInt(5), + random.nextLong(1000), + OzoneConsts.GB * random.nextInt(2), + random.nextLong(1000), + OzoneConsts.GB * random.nextInt(5)); + } + + /** + * Creates ContainerInfo with the given details. + * + * @param containerId id of the container + * @param size size of container + * @param keyCount number of keys + * @param bytesUsed bytes used by the container + * @param readCount number of reads + * @param readBytes bytes read + * @param writeCount number of writes + * @param writeBytes bytes written + * + * @return ContainerInfo + */ + @SuppressWarnings("parameternumber") + public static ContainerReplicaProto createContainerInfo( + long containerId, long size, long keyCount, long bytesUsed, + long readCount, long readBytes, long writeCount, long writeBytes) { + return ContainerReplicaProto.newBuilder() + .setContainerID(containerId) + .setState(ContainerReplicaProto.State.OPEN) + .setSize(size) + .setKeyCount(keyCount) + .setUsed(bytesUsed) + .setReadCount(readCount) + .setReadBytes(readBytes) + .setWriteCount(writeCount) + .setWriteBytes(writeBytes) + .build(); + } + + /** + * Create Command Status report object. + * @return CommandStatusReportsProto + */ + public static CommandStatusReportsProto createCommandStatusReport( + List reports) { + CommandStatusReportsProto.Builder report = CommandStatusReportsProto + .newBuilder(); + report.addAllCmdStatus(reports); + return report.build(); + } + + /** + * Create CRL Status report object. + * @param pendingCRLIds List of Pending CRL Ids in the report. + * @param receivedCRLId Latest received CRL Id in the report. + * @return {@link CRLStatusReport} + */ + public static CRLStatusReport createCRLStatusReport( + List pendingCRLIds, long receivedCRLId) { + CRLStatusReport.Builder report = CRLStatusReport.newBuilder(); + report.addAllPendingCrlIds(pendingCRLIds); + report.setReceivedCrlId(receivedCRLId); + return report.build(); + } + + public static org.apache.hadoop.hdds.scm.container.ContainerInfo + allocateContainer(ContainerManager containerManager) + throws IOException { + return containerManager + .allocateContainer(new RatisReplicationConfig(ReplicationFactor.THREE), + "root"); + + } + + public static void closeContainer(ContainerManager containerManager, + ContainerID id) throws IOException, InvalidStateTransitionException { + containerManager.updateContainerState( + id, HddsProtos.LifeCycleEvent.FINALIZE); + containerManager.updateContainerState( + id, HddsProtos.LifeCycleEvent.CLOSE); + + } + + /** + * Move the container to Quaise close state. + * @param containerManager + * @param id + * @throws IOException + */ + public static void quasiCloseContainer(ContainerManager containerManager, + ContainerID id) throws IOException, InvalidStateTransitionException { + containerManager.updateContainerState( + id, HddsProtos.LifeCycleEvent.FINALIZE); + containerManager.updateContainerState( + id, HddsProtos.LifeCycleEvent.QUASI_CLOSE); + + } + + /** + * Construct and returns StorageContainerManager instance using the given + * configuration. + * + * @param conf OzoneConfiguration + * @return StorageContainerManager instance + * @throws IOException + * @throws AuthenticationException + */ + public static StorageContainerManager getScmSimple(OzoneConfiguration conf) + throws IOException, AuthenticationException { + SCMConfigurator configurator = new SCMConfigurator(); + // The default behaviour whether ratis will be enabled or not + // in SCM will be inferred from ozone-default.xml. + // conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); + return StorageContainerManager.createSCM(conf, configurator); + } + + /** + * Construct and returns StorageContainerManager instance using the given + * configuration. The ports used by this StorageContainerManager are + * randomly selected from free ports available. + * + * @param conf OzoneConfiguration + * @return StorageContainerManager instance + * @throws IOException + * @throws AuthenticationException + */ + public static StorageContainerManager getScm(OzoneConfiguration conf) + throws IOException, AuthenticationException { + SCMConfigurator configurator = new SCMConfigurator(); + configurator.setSCMHAManager(MockSCMHAManager.getInstance(true)); + configurator.setScmContext(SCMContext.emptyContext()); + return getScm(conf, configurator); + } + + /** + * Construct and returns StorageContainerManager instance using the given + * configuration and the configurator. The ports used by this + * StorageContainerManager are randomly selected from free ports available. + * + * @param conf OzoneConfiguration + * @param configurator SCMConfigurator + * @return StorageContainerManager instance + * @throws IOException + * @throws AuthenticationException + */ + public static StorageContainerManager getScm(OzoneConfiguration conf, + SCMConfigurator configurator) + throws IOException, AuthenticationException { + conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "127.0.0.1:0"); + conf.set(ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY, "127.0.0.1:0"); + conf.set(ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY, "127.0.0.1:0"); + conf.set(ScmConfigKeys.OZONE_SCM_HTTP_ADDRESS_KEY, "127.0.0.1:0"); + SCMStorageConfig scmStore = new SCMStorageConfig(conf); + if(scmStore.getState() != Storage.StorageState.INITIALIZED) { + String clusterId = UUID.randomUUID().toString(); + String scmId = UUID.randomUUID().toString(); + scmStore.setClusterId(clusterId); + scmStore.setScmId(scmId); + // writes the version file properties + scmStore.initialize(); + } + return StorageContainerManager.createSCM(conf, configurator); + } + + private static ContainerInfo.Builder getDefaultContainerInfoBuilder( + final HddsProtos.LifeCycleState state) { + return new ContainerInfo.Builder() + .setContainerID(RandomUtils.nextLong()) + .setReplicationConfig( + new RatisReplicationConfig(ReplicationFactor.THREE)) + .setState(state) + .setSequenceId(10000L) + .setOwner("TEST"); + } + + public static ContainerInfo getContainer( + final HddsProtos.LifeCycleState state) { + return getDefaultContainerInfoBuilder(state) + .setPipelineID(randomPipelineID) + .build(); + } + + public static ContainerInfo getContainer( + final HddsProtos.LifeCycleState state, PipelineID pipelineID) { + return getDefaultContainerInfoBuilder(state) + .setPipelineID(pipelineID) + .build(); + } + + public static Set getReplicas( + final ContainerID containerId, + final ContainerReplicaProto.State state, + final DatanodeDetails... datanodeDetails) { + return getReplicas(containerId, state, 10000L, datanodeDetails); + } + + public static Set getReplicas( + final ContainerID containerId, + final ContainerReplicaProto.State state, + final long sequenceId, + final DatanodeDetails... datanodeDetails) { + Set replicas = new HashSet<>(); + for (DatanodeDetails datanode : datanodeDetails) { + replicas.add(getReplicas(containerId, state, + sequenceId, datanode.getUuid(), datanode)); + } + return replicas; + } + + public static ContainerReplica getReplicas( + final ContainerID containerId, + final ContainerReplicaProto.State state, + final long sequenceId, + final UUID originNodeId, + final DatanodeDetails datanodeDetails) { + return ContainerReplica.newBuilder() + .setContainerID(containerId) + .setContainerState(state) + .setDatanodeDetails(datanodeDetails) + .setOriginNodeId(originNodeId) + .setSequenceId(sequenceId) + .setBytesUsed(100) + .build(); + } + + public static Pipeline getRandomPipeline() { + List nodes = new ArrayList<>(); + nodes.add(MockDatanodeDetails.randomDatanodeDetails()); + nodes.add(MockDatanodeDetails.randomDatanodeDetails()); + nodes.add(MockDatanodeDetails.randomDatanodeDetails()); + return Pipeline.newBuilder() + .setReplicationConfig( + new RatisReplicationConfig(ReplicationFactor.THREE)) + .setId(PipelineID.randomId()) + .setNodes(nodes) + .setState(Pipeline.PipelineState.OPEN) + .build(); + } + /** * Create Command Status report object. * * @param numOfContainers number of containers to be included in report. * @return CommandStatusReportsProto */ - public static NodeRegistrationContainerReport + public static SCMDatanodeProtocolServer.NodeRegistrationContainerReport createNodeRegistrationContainerReport(int numOfContainers) { - return new NodeRegistrationContainerReport( + return new SCMDatanodeProtocolServer.NodeRegistrationContainerReport( MockDatanodeDetails.randomDatanodeDetails(), - TestUtils.getRandomContainerReports(numOfContainers)); + getRandomContainerReports(numOfContainers)); } /** @@ -55,16 +718,16 @@ private HddsTestUtils() { * @param dnContainers List of containers to be included in report * @return NodeRegistrationContainerReport */ - public static NodeRegistrationContainerReport + public static SCMDatanodeProtocolServer.NodeRegistrationContainerReport createNodeRegistrationContainerReport(List dnContainers) { - List + List containers = new ArrayList<>(); dnContainers.forEach(c -> { - containers.add(TestUtils.getRandomContainerInfo(c.getContainerID())); + containers.add(getRandomContainerInfo(c.getContainerID())); }); - return new NodeRegistrationContainerReport( + return new SCMDatanodeProtocolServer.NodeRegistrationContainerReport( MockDatanodeDetails.randomDatanodeDetails(), - TestUtils.getContainerReports(containers)); + getContainerReports(containers)); } /** @@ -83,5 +746,4 @@ public static List getContainerInfo(int numContainers) { } return containerInfoList; } - } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java deleted file mode 100644 index 068f3d774c22..000000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java +++ /dev/null @@ -1,699 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.scm; - -import com.google.common.base.Preconditions; -import org.apache.commons.lang3.RandomUtils; -import org.apache.hadoop.hdds.client.RatisReplicationConfig; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.CRLStatusReport; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.PipelineAction; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ClosePipelineInfo; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.PipelineActionsProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.PipelineReport; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.PipelineReportsProto; -import org.apache.hadoop.hdds.scm.container.ContainerInfo; -import org.apache.hadoop.hdds.scm.container.ContainerManager; -import org.apache.hadoop.hdds.scm.container.ContainerReplica; -import org.apache.hadoop.hdds.scm.ha.MockSCMHAManager; -import org.apache.hadoop.hdds.scm.ha.SCMContext; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.scm.pipeline.PipelineID; -import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; -import org.apache.hadoop.hdds.scm.server.SCMConfigurator; -import org.apache.hadoop.hdds.scm.server - .SCMDatanodeHeartbeatDispatcher.PipelineActionsFromDatanode; -import org.apache.hadoop.hdds.scm.server - .SCMDatanodeHeartbeatDispatcher.PipelineReportFromDatanode; -import org.apache.hadoop.hdds.scm.container.ContainerID; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerReportsProto; -import org.apache.hadoop.hdds.protocol - .proto.StorageContainerDatanodeProtocolProtos.NodeReportProto; -import org.apache.hadoop.hdds.protocol - .proto.StorageContainerDatanodeProtocolProtos.CommandStatus; -import org.apache.hadoop.hdds.protocol - .proto.StorageContainerDatanodeProtocolProtos.CommandStatusReportsProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.StorageReportProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.StorageTypeProto; -import org.apache.hadoop.hdds.scm.node.SCMNodeManager; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.scm.server.SCMStorageConfig; -import org.apache.hadoop.hdds.scm.server.StorageContainerManager; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.common.Storage; -import org.apache.hadoop.ozone.common.statemachine.InvalidStateTransitionException; -import org.apache.hadoop.ozone.protocol.commands.RegisteredCommand; -import org.apache.hadoop.security.authentication.client - .AuthenticationException; - -import java.io.File; -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.HashSet; -import java.util.List; -import java.util.Set; -import java.util.UUID; -import java.util.concurrent.ThreadLocalRandom; - -/** - * Stateless helper functions to handler scm/datanode connection. - */ -public final class TestUtils { - - private static ThreadLocalRandom random = ThreadLocalRandom.current(); - private static PipelineID randomPipelineID = PipelineID.randomId(); - - private TestUtils() { - } - - /** - * Generates DatanodeDetails from RegisteredCommand. - * - * @param registeredCommand registration response from SCM - * - * @return DatanodeDetails - */ - public static DatanodeDetails getDatanodeDetails( - RegisteredCommand registeredCommand) { - return MockDatanodeDetails.createDatanodeDetails( - registeredCommand.getDatanode().getUuidString(), - registeredCommand.getDatanode().getHostName(), - registeredCommand.getDatanode().getIpAddress(), - null); - } - - /** - * Creates a random DatanodeDetails and register it with the given - * NodeManager. - * - * @param nodeManager NodeManager - * - * @return DatanodeDetails - */ - public static DatanodeDetails createRandomDatanodeAndRegister( - SCMNodeManager nodeManager) { - return getDatanodeDetails( - nodeManager.register(MockDatanodeDetails.randomDatanodeDetails(), null, - getRandomPipelineReports())); - } - - /** - * Get specified number of DatanodeDetails and register them with node - * manager. - * - * @param nodeManager node manager to register the datanode ids. - * @param count number of DatanodeDetails needed. - * - * @return list of DatanodeDetails - */ - public static List getListOfRegisteredDatanodeDetails( - SCMNodeManager nodeManager, int count) { - ArrayList datanodes = new ArrayList<>(); - for (int i = 0; i < count; i++) { - datanodes.add(createRandomDatanodeAndRegister(nodeManager)); - } - return datanodes; - } - - /** - * Generates a random NodeReport. - * - * @return NodeReportProto - */ - public static NodeReportProto getRandomNodeReport() { - return getRandomNodeReport(1, 1); - } - - /** - * Generates random NodeReport with the given number of storage report in it. - * - * @param numberOfStorageReport number of storage report this node report - * should have - * @param numberOfMetadataStorageReport number of metadata storage report - * this node report should have - * @return NodeReportProto - */ - public static NodeReportProto getRandomNodeReport(int numberOfStorageReport, - int numberOfMetadataStorageReport) { - UUID nodeId = UUID.randomUUID(); - return getRandomNodeReport(nodeId, File.separator + nodeId, - numberOfStorageReport, numberOfMetadataStorageReport); - } - - /** - * Generates random NodeReport for the given nodeId with the given - * base path and number of storage report in it. - * - * @param nodeId datanode id - * @param basePath base path of storage directory - * @param numberOfStorageReport number of storage report - * @param numberOfMetadataStorageReport number of metadata storage report - * - * @return NodeReportProto - */ - public static NodeReportProto getRandomNodeReport(UUID nodeId, - String basePath, int numberOfStorageReport, - int numberOfMetadataStorageReport) { - List storageReports = new ArrayList<>(); - for (int i = 0; i < numberOfStorageReport; i++) { - storageReports.add(getRandomStorageReport(nodeId, - basePath + File.separator + "data-" + i)); - } - List metadataStorageReports = - new ArrayList<>(); - for (int i = 0; i < numberOfMetadataStorageReport; i++) { - metadataStorageReports.add(getRandomMetadataStorageReport( - basePath + File.separator + "metadata-" + i)); - } - return createNodeReport(storageReports, metadataStorageReports); - } - - /** - * Creates NodeReport with the given storage reports. - * - * @param reports storage reports to be included in the node report. - * @param metaReports metadata storage reports to be included - * in the node report. - * @return NodeReportProto - */ - public static NodeReportProto createNodeReport( - List reports, - List metaReports) { - NodeReportProto.Builder nodeReport = NodeReportProto.newBuilder(); - nodeReport.addAllStorageReport(reports); - nodeReport.addAllMetadataStorageReport(metaReports); - return nodeReport.build(); - } - - /** - * Generates random storage report. - * - * @param nodeId datanode id for which the storage report belongs to - * @param path path of the storage - * - * @return StorageReportProto - */ - public static StorageReportProto getRandomStorageReport(UUID nodeId, - String path) { - return createStorageReport(nodeId, path, - random.nextInt(1000), - random.nextInt(500), - random.nextInt(500), - StorageTypeProto.DISK); - } - - /** - * Generates random metadata storage report. - * - * @param path path of the storage - * - * @return MetadataStorageReportProto - */ - public static MetadataStorageReportProto getRandomMetadataStorageReport( - String path) { - return createMetadataStorageReport(path, - random.nextInt(1000), - random.nextInt(500), - random.nextInt(500), - StorageTypeProto.DISK); - } - - public static StorageReportProto createStorageReport(UUID nodeId, String path, - long capacity) { - return createStorageReport(nodeId, path, - capacity, - 0, - capacity, - StorageTypeProto.DISK); - } - - public static StorageReportProto createStorageReport(UUID nodeId, String path, - long capacity, long used, long remaining, StorageTypeProto type) { - return createStorageReport(nodeId, path, capacity, used, remaining, - type, false); - } - /** - * Creates storage report with the given information. - * - * @param nodeId datanode id - * @param path storage dir - * @param capacity storage size - * @param used space used - * @param remaining space remaining - * @param type type of storage - * - * @return StorageReportProto - */ - public static StorageReportProto createStorageReport(UUID nodeId, String path, - long capacity, long used, long remaining, StorageTypeProto type, - boolean failed) { - Preconditions.checkNotNull(nodeId); - Preconditions.checkNotNull(path); - StorageReportProto.Builder srb = StorageReportProto.newBuilder(); - srb.setStorageUuid(nodeId.toString()) - .setStorageLocation(path) - .setCapacity(capacity) - .setScmUsed(used) - .setFailed(failed) - .setRemaining(remaining); - StorageTypeProto storageTypeProto = - type == null ? StorageTypeProto.DISK : type; - srb.setStorageType(storageTypeProto); - return srb.build(); - } - - public static MetadataStorageReportProto createMetadataStorageReport( - String path, long capacity) { - return createMetadataStorageReport(path, - capacity, - 0, - capacity, - StorageTypeProto.DISK, false); - } - - public static MetadataStorageReportProto createMetadataStorageReport( - String path, long capacity, long used, long remaining, - StorageTypeProto type) { - return createMetadataStorageReport(path, capacity, used, remaining, - type, false); - } - - /** - * Creates metadata storage report with the given information. - * - * @param path storage dir - * @param capacity storage size - * @param used space used - * @param remaining space remaining - * @param type type of storage - * - * @return StorageReportProto - */ - public static MetadataStorageReportProto createMetadataStorageReport( - String path, long capacity, long used, long remaining, - StorageTypeProto type, boolean failed) { - Preconditions.checkNotNull(path); - MetadataStorageReportProto.Builder srb = MetadataStorageReportProto - .newBuilder(); - srb.setStorageLocation(path) - .setCapacity(capacity) - .setScmUsed(used) - .setFailed(failed) - .setRemaining(remaining); - StorageTypeProto storageTypeProto = - type == null ? StorageTypeProto.DISK : type; - srb.setStorageType(storageTypeProto); - return srb.build(); - } - - /** - * Generates random container reports. - * - * @return ContainerReportsProto - */ - public static ContainerReportsProto getRandomContainerReports() { - return getRandomContainerReports(1); - } - - /** - * Generates random container report with the given number of containers. - * - * @param numberOfContainers number of containers to be in container report - * - * @return ContainerReportsProto - */ - public static ContainerReportsProto getRandomContainerReports( - int numberOfContainers) { - List containerInfos = new ArrayList<>(); - for (int i = 0; i < numberOfContainers; i++) { - containerInfos.add(getRandomContainerInfo(i)); - } - return getContainerReports(containerInfos); - } - - - public static PipelineReportsProto getRandomPipelineReports() { - return PipelineReportsProto.newBuilder().build(); - } - - public static PipelineReportFromDatanode getPipelineReportFromDatanode( - DatanodeDetails dn, PipelineID... pipelineIDs) { - PipelineReportsProto.Builder reportBuilder = - PipelineReportsProto.newBuilder(); - for (PipelineID pipelineID : pipelineIDs) { - reportBuilder.addPipelineReport( - PipelineReport.newBuilder() - .setPipelineID(pipelineID.getProtobuf()) - .setIsLeader(false)); - } - return new PipelineReportFromDatanode(dn, reportBuilder.build()); - } - - public static PipelineReportFromDatanode getPipelineReportFromDatanode( - DatanodeDetails dn, PipelineID pipelineID, boolean isLeader) { - PipelineReportsProto.Builder reportBuilder = - PipelineReportsProto.newBuilder(); - reportBuilder.addPipelineReport(PipelineReport.newBuilder() - .setPipelineID(pipelineID.getProtobuf()).setIsLeader(isLeader)); - return new PipelineReportFromDatanode(dn, reportBuilder.build()); - } - - public static void openAllRatisPipelines(PipelineManager pipelineManager) - throws IOException { - // Pipeline is created by background thread - for (ReplicationFactor factor : ReplicationFactor.values()) { - // Trigger the processed pipeline report event - for (Pipeline pipeline : pipelineManager - .getPipelines(new RatisReplicationConfig(factor))) { - pipelineManager.openPipeline(pipeline.getId()); - } - } - } - - public static PipelineActionsFromDatanode getPipelineActionFromDatanode( - DatanodeDetails dn, PipelineID... pipelineIDs) { - PipelineActionsProto.Builder actionsProtoBuilder = - PipelineActionsProto.newBuilder(); - for (PipelineID pipelineID : pipelineIDs) { - ClosePipelineInfo closePipelineInfo = - ClosePipelineInfo.newBuilder().setPipelineID(pipelineID.getProtobuf()) - .setReason(ClosePipelineInfo.Reason.PIPELINE_FAILED) - .setDetailedReason("").build(); - actionsProtoBuilder.addPipelineActions(PipelineAction.newBuilder() - .setClosePipeline(closePipelineInfo) - .setAction(PipelineAction.Action.CLOSE) - .build()); - } - return new PipelineActionsFromDatanode(dn, actionsProtoBuilder.build()); - } - - /** - * Creates container report with the given ContainerInfo(s). - * - * @param containerInfos one or more ContainerInfo - * - * @return ContainerReportsProto - */ - public static ContainerReportsProto getContainerReports( - ContainerReplicaProto... containerInfos) { - return getContainerReports(Arrays.asList(containerInfos)); - } - - /** - * Creates container report with the given ContainerInfo(s). - * - * @param containerInfos list of ContainerInfo - * - * @return ContainerReportsProto - */ - public static ContainerReportsProto getContainerReports( - List containerInfos) { - ContainerReportsProto.Builder - reportsBuilder = ContainerReportsProto.newBuilder(); - for (ContainerReplicaProto containerInfo : containerInfos) { - reportsBuilder.addReports(containerInfo); - } - return reportsBuilder.build(); - } - - /** - * Generates random ContainerInfo. - * - * @param containerId container id of the ContainerInfo - * - * @return ContainerInfo - */ - public static ContainerReplicaProto getRandomContainerInfo( - long containerId) { - return createContainerInfo(containerId, - OzoneConsts.GB * 5, - random.nextLong(1000), - OzoneConsts.GB * random.nextInt(5), - random.nextLong(1000), - OzoneConsts.GB * random.nextInt(2), - random.nextLong(1000), - OzoneConsts.GB * random.nextInt(5)); - } - - /** - * Creates ContainerInfo with the given details. - * - * @param containerId id of the container - * @param size size of container - * @param keyCount number of keys - * @param bytesUsed bytes used by the container - * @param readCount number of reads - * @param readBytes bytes read - * @param writeCount number of writes - * @param writeBytes bytes written - * - * @return ContainerInfo - */ - @SuppressWarnings("parameternumber") - public static ContainerReplicaProto createContainerInfo( - long containerId, long size, long keyCount, long bytesUsed, - long readCount, long readBytes, long writeCount, long writeBytes) { - return ContainerReplicaProto.newBuilder() - .setContainerID(containerId) - .setState(ContainerReplicaProto.State.OPEN) - .setSize(size) - .setKeyCount(keyCount) - .setUsed(bytesUsed) - .setReadCount(readCount) - .setReadBytes(readBytes) - .setWriteCount(writeCount) - .setWriteBytes(writeBytes) - .build(); - } - - /** - * Create Command Status report object. - * @return CommandStatusReportsProto - */ - public static CommandStatusReportsProto createCommandStatusReport( - List reports) { - CommandStatusReportsProto.Builder report = CommandStatusReportsProto - .newBuilder(); - report.addAllCmdStatus(reports); - return report.build(); - } - - /** - * Create CRL Status report object. - * @param pendingCRLIds List of Pending CRL Ids in the report. - * @param receivedCRLId Latest received CRL Id in the report. - * @return {@link CRLStatusReport} - */ - public static CRLStatusReport createCRLStatusReport( - List pendingCRLIds, long receivedCRLId) { - CRLStatusReport.Builder report = CRLStatusReport.newBuilder(); - report.addAllPendingCrlIds(pendingCRLIds); - report.setReceivedCrlId(receivedCRLId); - return report.build(); - } - - public static org.apache.hadoop.hdds.scm.container.ContainerInfo - allocateContainer(ContainerManager containerManager) - throws IOException { - return containerManager - .allocateContainer(new RatisReplicationConfig(ReplicationFactor.THREE), - "root"); - - } - - public static void closeContainer(ContainerManager containerManager, - ContainerID id) throws IOException, InvalidStateTransitionException { - containerManager.updateContainerState( - id, HddsProtos.LifeCycleEvent.FINALIZE); - containerManager.updateContainerState( - id, HddsProtos.LifeCycleEvent.CLOSE); - - } - - /** - * Move the container to Quaise close state. - * @param containerManager - * @param id - * @throws IOException - */ - public static void quasiCloseContainer(ContainerManager containerManager, - ContainerID id) throws IOException, InvalidStateTransitionException { - containerManager.updateContainerState( - id, HddsProtos.LifeCycleEvent.FINALIZE); - containerManager.updateContainerState( - id, HddsProtos.LifeCycleEvent.QUASI_CLOSE); - - } - - /** - * Construct and returns StorageContainerManager instance using the given - * configuration. - * - * @param conf OzoneConfiguration - * @return StorageContainerManager instance - * @throws IOException - * @throws AuthenticationException - */ - public static StorageContainerManager getScmSimple(OzoneConfiguration conf) - throws IOException, AuthenticationException { - SCMConfigurator configurator = new SCMConfigurator(); - // The default behaviour whether ratis will be enabled or not - // in SCM will be inferred from ozone-default.xml. - // conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); - return StorageContainerManager.createSCM(conf, configurator); - } - - /** - * Construct and returns StorageContainerManager instance using the given - * configuration. The ports used by this StorageContainerManager are - * randomly selected from free ports available. - * - * @param conf OzoneConfiguration - * @return StorageContainerManager instance - * @throws IOException - * @throws AuthenticationException - */ - public static StorageContainerManager getScm(OzoneConfiguration conf) - throws IOException, AuthenticationException { - SCMConfigurator configurator = new SCMConfigurator(); - configurator.setSCMHAManager(MockSCMHAManager.getInstance(true)); - configurator.setScmContext(SCMContext.emptyContext()); - return getScm(conf, configurator); - } - - /** - * Construct and returns StorageContainerManager instance using the given - * configuration and the configurator. The ports used by this - * StorageContainerManager are randomly selected from free ports available. - * - * @param conf OzoneConfiguration - * @param configurator SCMConfigurator - * @return StorageContainerManager instance - * @throws IOException - * @throws AuthenticationException - */ - public static StorageContainerManager getScm(OzoneConfiguration conf, - SCMConfigurator configurator) - throws IOException, AuthenticationException { - conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "127.0.0.1:0"); - conf.set(ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY, "127.0.0.1:0"); - conf.set(ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY, "127.0.0.1:0"); - conf.set(ScmConfigKeys.OZONE_SCM_HTTP_ADDRESS_KEY, "127.0.0.1:0"); - SCMStorageConfig scmStore = new SCMStorageConfig(conf); - if(scmStore.getState() != Storage.StorageState.INITIALIZED) { - String clusterId = UUID.randomUUID().toString(); - String scmId = UUID.randomUUID().toString(); - scmStore.setClusterId(clusterId); - scmStore.setScmId(scmId); - // writes the version file properties - scmStore.initialize(); - } - return StorageContainerManager.createSCM(conf, configurator); - } - - private static ContainerInfo.Builder getDefaultContainerInfoBuilder( - final HddsProtos.LifeCycleState state) { - return new ContainerInfo.Builder() - .setContainerID(RandomUtils.nextLong()) - .setReplicationConfig( - new RatisReplicationConfig(ReplicationFactor.THREE)) - .setState(state) - .setSequenceId(10000L) - .setOwner("TEST"); - } - - public static ContainerInfo getContainer( - final HddsProtos.LifeCycleState state) { - return getDefaultContainerInfoBuilder(state) - .setPipelineID(randomPipelineID) - .build(); - } - - public static ContainerInfo getContainer( - final HddsProtos.LifeCycleState state, PipelineID pipelineID) { - return getDefaultContainerInfoBuilder(state) - .setPipelineID(pipelineID) - .build(); - } - - public static Set getReplicas( - final ContainerID containerId, - final ContainerReplicaProto.State state, - final DatanodeDetails... datanodeDetails) { - return getReplicas(containerId, state, 10000L, datanodeDetails); - } - - public static Set getReplicas( - final ContainerID containerId, - final ContainerReplicaProto.State state, - final long sequenceId, - final DatanodeDetails... datanodeDetails) { - Set replicas = new HashSet<>(); - for (DatanodeDetails datanode : datanodeDetails) { - replicas.add(getReplicas(containerId, state, - sequenceId, datanode.getUuid(), datanode)); - } - return replicas; - } - - public static ContainerReplica getReplicas( - final ContainerID containerId, - final ContainerReplicaProto.State state, - final long sequenceId, - final UUID originNodeId, - final DatanodeDetails datanodeDetails) { - return ContainerReplica.newBuilder() - .setContainerID(containerId) - .setContainerState(state) - .setDatanodeDetails(datanodeDetails) - .setOriginNodeId(originNodeId) - .setSequenceId(sequenceId) - .setBytesUsed(100) - .build(); - } - - public static Pipeline getRandomPipeline() { - List nodes = new ArrayList<>(); - nodes.add(MockDatanodeDetails.randomDatanodeDetails()); - nodes.add(MockDatanodeDetails.randomDatanodeDetails()); - nodes.add(MockDatanodeDetails.randomDatanodeDetails()); - return Pipeline.newBuilder() - .setReplicationConfig( - new RatisReplicationConfig(ReplicationFactor.THREE)) - .setId(PipelineID.randomId()) - .setNodes(nodes) - .setState(Pipeline.PipelineState.OPEN) - .build(); - } -} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java index b6f537c1732c..e078b1f30a31 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java @@ -36,7 +36,7 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto; import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.TestUtils; +import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.ha.MockSCMHAManager; import org.apache.hadoop.hdds.scm.ha.SequenceIdGenerator; import org.apache.hadoop.hdds.scm.ha.SCMServiceManager; @@ -172,7 +172,7 @@ public void emitSafeModeStatus() { configurator.setMetadataStore(scmMetadataStore); configurator.setSCMHAManager(scmHAManager); configurator.setScmContext(scmContext); - scm = TestUtils.getScm(conf, configurator); + scm = HddsTestUtils.getScm(conf, configurator); // Initialize these fields so that the tests can pass. mapping = scm.getContainerManager(); @@ -198,7 +198,7 @@ public void cleanup() throws Exception { @Test public void testAllocateBlock() throws Exception { pipelineManager.createPipeline(replicationConfig); - TestUtils.openAllRatisPipelines(pipelineManager); + HddsTestUtils.openAllRatisPipelines(pipelineManager); AllocatedBlock block = blockManager.allocateBlock(DEFAULT_BLOCK_SIZE, replicationConfig, OzoneConsts.OZONE, new ExcludeList()); Assert.assertNotNull(block); @@ -212,7 +212,7 @@ public void testAllocateBlockWithExclusion() throws Exception { } } catch (IOException e) { } - TestUtils.openAllRatisPipelines(pipelineManager); + HddsTestUtils.openAllRatisPipelines(pipelineManager); ExcludeList excludeList = new ExcludeList(); excludeList .addPipeline(pipelineManager.getPipelines(replicationConfig) @@ -280,7 +280,7 @@ public void testBlockDistribution() throws Exception { executors.add(Executors.newSingleThreadExecutor()); } pipelineManager.createPipeline(replicationConfig); - TestUtils.openAllRatisPipelines(pipelineManager); + HddsTestUtils.openAllRatisPipelines(pipelineManager); Map> allocatedBlockMap = new ConcurrentHashMap<>(); List> futureList = @@ -342,7 +342,7 @@ public void testBlockDistributionWithMultipleDisks() throws Exception { executors.add(Executors.newSingleThreadExecutor()); } pipelineManager.createPipeline(replicationConfig); - TestUtils.openAllRatisPipelines(pipelineManager); + HddsTestUtils.openAllRatisPipelines(pipelineManager); Map> allocatedBlockMap = new ConcurrentHashMap<>(); List> futureList = @@ -410,7 +410,7 @@ public void testBlockDistributionWithMultipleRaftLogDisks() throws Exception { executors.add(Executors.newSingleThreadExecutor()); } pipelineManager.createPipeline(replicationConfig); - TestUtils.openAllRatisPipelines(pipelineManager); + HddsTestUtils.openAllRatisPipelines(pipelineManager); Map> allocatedBlockMap = new ConcurrentHashMap<>(); List> futureList = @@ -497,7 +497,7 @@ public void testMultipleBlockAllocation() pipelineManager.createPipeline(replicationConfig); pipelineManager.createPipeline(replicationConfig); - TestUtils.openAllRatisPipelines(pipelineManager); + HddsTestUtils.openAllRatisPipelines(pipelineManager); AllocatedBlock allocatedBlock = blockManager .allocateBlock(DEFAULT_BLOCK_SIZE, replicationConfig, OzoneConsts.OZONE, @@ -544,7 +544,7 @@ public void testMultipleBlockAllocationWithClosedContainer() / replicationConfig.getRequiredNodes(); i++) { pipelineManager.createPipeline(replicationConfig); } - TestUtils.openAllRatisPipelines(pipelineManager); + HddsTestUtils.openAllRatisPipelines(pipelineManager); // wait till each pipeline has the configured number of containers. // After this each pipeline has numContainerPerOwnerInPipeline containers diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java index 297b6119bd46..f3b3ccbd3ce1 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java @@ -25,7 +25,7 @@ import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto; import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.TestUtils; +import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerManager; import org.apache.hadoop.hdds.scm.container.ContainerReplica; @@ -101,7 +101,7 @@ public void setup() throws Exception { conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); conf.setInt(OZONE_SCM_BLOCK_DELETION_MAX_RETRY, 20); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath()); - scm = TestUtils.getScm(conf); + scm = HddsTestUtils.getScm(conf); containerManager = Mockito.mock(ContainerManager.class); containerTable = scm.getScmMetadataStore().getContainerTable(); scmHADBTransactionBuffer = diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/command/TestCommandStatusReportHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/command/TestCommandStatusReportHandler.java index 37bb4570c5e6..cfdb2c30980f 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/command/TestCommandStatusReportHandler.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/command/TestCommandStatusReportHandler.java @@ -26,7 +26,7 @@ .StorageContainerDatanodeProtocolProtos.CommandStatusReportsProto; import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type; -import org.apache.hadoop.hdds.scm.TestUtils; +import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher; import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher .CommandStatusReportFromDatanode; @@ -82,7 +82,7 @@ public void testCommandStatusReport() { private CommandStatusReportFromDatanode getStatusReport( List reports) { - CommandStatusReportsProto report = TestUtils.createCommandStatusReport( + CommandStatusReportsProto report = HddsTestUtils.createCommandStatusReport( reports); DatanodeDetails dn = MockDatanodeDetails.randomDatanodeDetails(); return new SCMDatanodeHeartbeatDispatcher.CommandStatusReportFromDatanode( diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java index 83a6b6243f9d..f6cab87daa1a 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java @@ -28,7 +28,7 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.LayoutVersionProto; import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.StorageReportProto; -import org.apache.hadoop.hdds.scm.TestUtils; +import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric; import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat; import org.apache.hadoop.hdds.scm.exceptions.SCMException; @@ -283,11 +283,11 @@ public List getNodes( long capacity = nodeMetricMap.get(dd).getCapacity().get(); long used = nodeMetricMap.get(dd).getScmUsed().get(); long remaining = nodeMetricMap.get(dd).getRemaining().get(); - StorageReportProto storage1 = TestUtils.createStorageReport( + StorageReportProto storage1 = HddsTestUtils.createStorageReport( di.getUuid(), "/data1-" + di.getUuidString(), capacity, used, remaining, null); MetadataStorageReportProto metaStorage1 = - TestUtils.createMetadataStorageReport( + HddsTestUtils.createMetadataStorageReport( "/metadata1-" + di.getUuidString(), capacity, used, remaining, null); di.updateStorageReports(new ArrayList<>(Arrays.asList(storage1))); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java index 3a04adfe2a17..1a86150448d9 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java @@ -29,7 +29,7 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.TestUtils; +import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.ha.MockSCMHAManager; import org.apache.hadoop.hdds.scm.ha.SCMContext; import org.apache.hadoop.hdds.scm.ha.SCMService.Event; @@ -123,7 +123,7 @@ public static void setUp() throws Exception { eventQueue.addHandler(DATANODE_COMMAND, nodeManager); // Move all pipelines created by background from ALLOCATED to OPEN state Thread.sleep(2000); - TestUtils.openAllRatisPipelines(pipelineManager); + HddsTestUtils.openAllRatisPipelines(pipelineManager); } @AfterClass diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java index cbaf3bf79387..b4cbf27349c6 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java @@ -60,8 +60,8 @@ import java.util.stream.Stream; import static junit.framework.TestCase.assertEquals; -import static org.apache.hadoop.hdds.scm.TestUtils.getReplicas; -import static org.apache.hadoop.hdds.scm.TestUtils.getContainer; +import static org.apache.hadoop.hdds.scm.HddsTestUtils.getReplicas; +import static org.apache.hadoop.hdds.scm.HddsTestUtils.getContainer; /** * Test the behaviour of the ContainerReportHandler. diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestIncrementalContainerReportHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestIncrementalContainerReportHandler.java index e8d4b38650ea..29cae36a0aa3 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestIncrementalContainerReportHandler.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestIncrementalContainerReportHandler.java @@ -69,8 +69,8 @@ import static org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails; import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State.CLOSED; -import static org.apache.hadoop.hdds.scm.TestUtils.getContainer; -import static org.apache.hadoop.hdds.scm.TestUtils.getReplicas; +import static org.apache.hadoop.hdds.scm.HddsTestUtils.getContainer; +import static org.apache.hadoop.hdds.scm.HddsTestUtils.getReplicas; import static org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager.maxLayoutVersion; /** diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestReplicationManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestReplicationManager.java index cfaabd74ad68..f6c47d320494 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestReplicationManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestReplicationManager.java @@ -91,8 +91,8 @@ import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.STALE; import static org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State.CLOSED; -import static org.apache.hadoop.hdds.scm.TestUtils.getContainer; -import static org.apache.hadoop.hdds.scm.TestUtils.getReplicas; +import static org.apache.hadoop.hdds.scm.HddsTestUtils.getContainer; +import static org.apache.hadoop.hdds.scm.HddsTestUtils.getReplicas; import static org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails; import static org.mockito.Mockito.when; diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestUnknownContainerReport.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestUnknownContainerReport.java index 3bb926324658..9333b172ac07 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestUnknownContainerReport.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestUnknownContainerReport.java @@ -16,7 +16,7 @@ */ package org.apache.hadoop.hdds.scm.container; -import static org.apache.hadoop.hdds.scm.TestUtils.getContainer; +import static org.apache.hadoop.hdds.scm.HddsTestUtils.getContainer; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestContainerPlacementFactory.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestContainerPlacementFactory.java index 1f0853d25c78..4ed2887ccb66 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestContainerPlacementFactory.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestContainerPlacementFactory.java @@ -30,7 +30,7 @@ import org.apache.hadoop.hdds.scm.ContainerPlacementStatus; import org.apache.hadoop.hdds.scm.PlacementPolicy; import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.TestUtils; +import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.exceptions.SCMException; import org.apache.hadoop.hdds.scm.net.NetworkTopology; import org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl; @@ -97,11 +97,11 @@ public void testRackAwarePolicy() throws IOException { datanodeDetails, NodeStatus.inServiceHealthy(), UpgradeUtils.defaultLayoutVersionProto()); - StorageReportProto storage1 = TestUtils.createStorageReport( + StorageReportProto storage1 = HddsTestUtils.createStorageReport( datanodeInfo.getUuid(), "/data1-" + datanodeInfo.getUuidString(), STORAGE_CAPACITY, 0, 100L, null); MetadataStorageReportProto metaStorage1 = - TestUtils.createMetadataStorageReport( + HddsTestUtils.createMetadataStorageReport( "/metadata1-" + datanodeInfo.getUuidString(), STORAGE_CAPACITY, 0, 100L, null); datanodeInfo.updateStorageReports( @@ -114,19 +114,19 @@ public void testRackAwarePolicy() throws IOException { dnInfos.add(datanodeInfo); } - StorageReportProto storage2 = TestUtils.createStorageReport( + StorageReportProto storage2 = HddsTestUtils.createStorageReport( dnInfos.get(2).getUuid(), "/data1-" + dnInfos.get(2).getUuidString(), STORAGE_CAPACITY, 90L, 10L, null); dnInfos.get(2).updateStorageReports( new ArrayList<>(Arrays.asList(storage2))); - StorageReportProto storage3 = TestUtils.createStorageReport( + StorageReportProto storage3 = HddsTestUtils.createStorageReport( dnInfos.get(3).getUuid(), "/data1-" + dnInfos.get(3).getUuidString(), STORAGE_CAPACITY, 80L, 20L, null); dnInfos.get(3).updateStorageReports( new ArrayList<>(Arrays.asList(storage3))); - StorageReportProto storage4 = TestUtils.createStorageReport( + StorageReportProto storage4 = HddsTestUtils.createStorageReport( dnInfos.get(4).getUuid(), "/data1-" + dnInfos.get(4).getUuidString(), STORAGE_CAPACITY, 70L, 30L, null); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java index bc88e9763fbd..ffe658e9c9e4 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java @@ -28,7 +28,7 @@ import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto; -import org.apache.hadoop.hdds.scm.TestUtils; +import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric; import org.apache.hadoop.hdds.scm.exceptions.SCMException; import org.apache.hadoop.hdds.scm.node.DatanodeInfo; @@ -63,11 +63,11 @@ public void chooseDatanodes() throws SCMException { NodeStatus.inServiceHealthy(), UpgradeUtils.defaultLayoutVersionProto()); - StorageReportProto storage1 = TestUtils.createStorageReport( + StorageReportProto storage1 = HddsTestUtils.createStorageReport( datanodeInfo.getUuid(), "/data1-" + datanodeInfo.getUuidString(), 100L, 0, 100L, null); MetadataStorageReportProto metaStorage1 = - TestUtils.createMetadataStorageReport( + HddsTestUtils.createMetadataStorageReport( "/metadata1-" + datanodeInfo.getUuidString(), 100L, 0, 100L, null); datanodeInfo.updateStorageReports( @@ -78,19 +78,19 @@ public void chooseDatanodes() throws SCMException { datanodes.add(datanodeInfo); } - StorageReportProto storage2 = TestUtils.createStorageReport( + StorageReportProto storage2 = HddsTestUtils.createStorageReport( datanodes.get(2).getUuid(), "/data1-" + datanodes.get(2).getUuidString(), 100L, 90L, 10L, null); datanodes.get(2).updateStorageReports( new ArrayList<>(Arrays.asList(storage2))); - StorageReportProto storage3 = TestUtils.createStorageReport( + StorageReportProto storage3 = HddsTestUtils.createStorageReport( datanodes.get(3).getUuid(), "/data1-" + datanodes.get(3).getUuidString(), 100L, 80L, 20L, null); datanodes.get(3).updateStorageReports( new ArrayList<>(Arrays.asList(storage3))); - StorageReportProto storage4 = TestUtils.createStorageReport( + StorageReportProto storage4 = HddsTestUtils.createStorageReport( datanodes.get(4).getUuid(), "/data1-" + datanodes.get(4).getUuidString(), 100L, 70L, 30L, null); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java index eba7703b46fa..37f4594a844d 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java @@ -29,7 +29,7 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto; import org.apache.hadoop.hdds.scm.ContainerPlacementStatus; -import org.apache.hadoop.hdds.scm.TestUtils; +import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.exceptions.SCMException; import org.apache.hadoop.hdds.scm.net.NetConstants; import org.apache.hadoop.hdds.scm.net.NetworkTopology; @@ -119,11 +119,11 @@ public void setup() { datanodeDetails, NodeStatus.inServiceHealthy(), UpgradeUtils.defaultLayoutVersionProto()); - StorageReportProto storage1 = TestUtils.createStorageReport( + StorageReportProto storage1 = HddsTestUtils.createStorageReport( datanodeInfo.getUuid(), "/data1-" + datanodeInfo.getUuidString(), STORAGE_CAPACITY, 0, 100L, null); MetadataStorageReportProto metaStorage1 = - TestUtils.createMetadataStorageReport( + HddsTestUtils.createMetadataStorageReport( "/metadata1-" + datanodeInfo.getUuidString(), STORAGE_CAPACITY, 0, 100L, null); datanodeInfo.updateStorageReports( @@ -137,39 +137,39 @@ public void setup() { } if (datanodeCount > 4) { - StorageReportProto storage2 = TestUtils.createStorageReport( + StorageReportProto storage2 = HddsTestUtils.createStorageReport( dnInfos.get(2).getUuid(), "/data1-" + datanodes.get(2).getUuidString(), STORAGE_CAPACITY, 90L, 10L, null); dnInfos.get(2).updateStorageReports( new ArrayList<>(Arrays.asList(storage2))); - StorageReportProto storage3 = TestUtils.createStorageReport( + StorageReportProto storage3 = HddsTestUtils.createStorageReport( dnInfos.get(3).getUuid(), "/data1-" + dnInfos.get(3).getUuidString(), STORAGE_CAPACITY, 80L, 20L, null); dnInfos.get(3).updateStorageReports( new ArrayList<>(Arrays.asList(storage3))); - StorageReportProto storage4 = TestUtils.createStorageReport( + StorageReportProto storage4 = HddsTestUtils.createStorageReport( dnInfos.get(4).getUuid(), "/data1-" + dnInfos.get(4).getUuidString(), STORAGE_CAPACITY, 70L, 30L, null); dnInfos.get(4).updateStorageReports( new ArrayList<>(Arrays.asList(storage4))); } else if (datanodeCount > 3) { - StorageReportProto storage2 = TestUtils.createStorageReport( + StorageReportProto storage2 = HddsTestUtils.createStorageReport( dnInfos.get(2).getUuid(), "/data1-" + dnInfos.get(2).getUuidString(), STORAGE_CAPACITY, 90L, 10L, null); dnInfos.get(2).updateStorageReports( new ArrayList<>(Arrays.asList(storage2))); - StorageReportProto storage3 = TestUtils.createStorageReport( + StorageReportProto storage3 = HddsTestUtils.createStorageReport( dnInfos.get(3).getUuid(), "/data1-" + dnInfos.get(3).getUuidString(), STORAGE_CAPACITY, 80L, 20L, null); dnInfos.get(3).updateStorageReports( new ArrayList<>(Arrays.asList(storage3))); } else if (datanodeCount > 2) { - StorageReportProto storage2 = TestUtils.createStorageReport( + StorageReportProto storage2 = HddsTestUtils.createStorageReport( dnInfos.get(2).getUuid(), "/data1-" + dnInfos.get(2).getUuidString(), STORAGE_CAPACITY, 84L, 16L, null); @@ -438,11 +438,11 @@ public void testDatanodeWithDefaultNetworkLocation() throws SCMException { dn, NodeStatus.inServiceHealthy(), UpgradeUtils.defaultLayoutVersionProto()); - StorageReportProto storage1 = TestUtils.createStorageReport( + StorageReportProto storage1 = HddsTestUtils.createStorageReport( dnInfo.getUuid(), "/data1-" + dnInfo.getUuidString(), STORAGE_CAPACITY, 0, 100L, null); MetadataStorageReportProto metaStorage1 = - TestUtils.createMetadataStorageReport( + HddsTestUtils.createMetadataStorageReport( "/metadata1-" + dnInfo.getUuidString(), STORAGE_CAPACITY, 0, 100L, null); dnInfo.updateStorageReports( diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java index c941111704a6..9ad03d1647da 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java @@ -27,7 +27,7 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto; import org.apache.hadoop.hdds.scm.ContainerPlacementStatus; -import org.apache.hadoop.hdds.scm.TestUtils; +import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.exceptions.SCMException; import org.apache.hadoop.hdds.scm.node.DatanodeInfo; import org.apache.hadoop.hdds.scm.node.NodeManager; @@ -63,11 +63,11 @@ public void chooseDatanodes() throws SCMException { NodeStatus.inServiceHealthy(), UpgradeUtils.defaultLayoutVersionProto()); - StorageReportProto storage1 = TestUtils.createStorageReport( + StorageReportProto storage1 = HddsTestUtils.createStorageReport( datanodeInfo.getUuid(), "/data1-" + datanodeInfo.getUuidString(), 100L, 0, 100L, null); MetadataStorageReportProto metaStorage1 = - TestUtils.createMetadataStorageReport( + HddsTestUtils.createMetadataStorageReport( "/metadata1-" + datanodeInfo.getUuidString(), 100L, 0, 100L, null); datanodeInfo.updateStorageReports( @@ -78,7 +78,7 @@ public void chooseDatanodes() throws SCMException { datanodes.add(datanodeInfo); } - StorageReportProto storage2 = TestUtils.createStorageReport( + StorageReportProto storage2 = HddsTestUtils.createStorageReport( datanodes.get(2).getUuid(), "/data1-" + datanodes.get(2).getUuidString(), 100L, 90L, 10L, null); @@ -172,11 +172,11 @@ public void testIsValidNode() throws SCMException { NodeStatus.inServiceHealthy(), UpgradeUtils.defaultLayoutVersionProto()); - StorageReportProto storage1 = TestUtils.createStorageReport( + StorageReportProto storage1 = HddsTestUtils.createStorageReport( datanodeInfo.getUuid(), "/data1-" + datanodeInfo.getUuidString(), 100L, 0, 100L, null); MetadataStorageReportProto metaStorage1 = - TestUtils.createMetadataStorageReport( + HddsTestUtils.createMetadataStorageReport( "/metadata1-" + datanodeInfo.getUuidString(), 100L, 0, 100L, null); datanodeInfo.updateStorageReports( @@ -187,7 +187,7 @@ public void testIsValidNode() throws SCMException { datanodes.add(datanodeInfo); } - StorageReportProto storage1 = TestUtils.createStorageReport( + StorageReportProto storage1 = HddsTestUtils.createStorageReport( datanodes.get(1).getUuid(), "/data1-" + datanodes.get(1).getUuidString(), 100L, 90L, 10L, null); @@ -195,7 +195,7 @@ public void testIsValidNode() throws SCMException { new ArrayList<>(Arrays.asList(storage1))); MetadataStorageReportProto metaStorage2 = - TestUtils.createMetadataStorageReport( + HddsTestUtils.createMetadataStorageReport( "/metadata1-" + datanodes.get(2).getUuidString(), 100L, 90, 10L, null); datanodes.get(2).updateMetaDataStorageReports( diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/crl/TestCRLStatusReportHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/crl/TestCRLStatusReportHandler.java index 1fda9c1d58d5..ab32e3688e80 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/crl/TestCRLStatusReportHandler.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/crl/TestCRLStatusReportHandler.java @@ -21,7 +21,7 @@ import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.CRLStatusReport; -import org.apache.hadoop.hdds.scm.TestUtils; +import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.metadata.SCMMetadataStore; import org.apache.hadoop.hdds.scm.metadata.SCMMetadataStoreImpl; import org.apache.hadoop.hdds.scm.server.SCMCertStore; @@ -125,7 +125,7 @@ private CRLStatusReportFromDatanode getCRLStatusReport( List pendingCRLIds, long receivedCRLId) { CRLStatusReport crlStatusReportProto = - TestUtils.createCRLStatusReport(pendingCRLIds, receivedCRLId); + HddsTestUtils.createCRLStatusReport(pendingCRLIds, receivedCRLId); return new CRLStatusReportFromDatanode(dn, crlStatusReportProto); } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java index 773713f3dbed..b307b576c030 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java @@ -34,7 +34,7 @@ .StorageContainerDatanodeProtocolProtos.LayoutVersionProto; import org.apache.hadoop.hdds.scm.PlacementPolicy; import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.TestUtils; +import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.XceiverClientManager; import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.ContainerManager; @@ -189,8 +189,8 @@ public void testContainerPlacementCapacity() throws IOException, SCMNodeManager scmNodeManager = createNodeManager(conf); containerManager = createContainerManager(); - List datanodes = - TestUtils.getListOfRegisteredDatanodeDetails(scmNodeManager, nodeCount); + List datanodes = HddsTestUtils + .getListOfRegisteredDatanodeDetails(scmNodeManager, nodeCount); XceiverClientManager xceiverClientManager = null; LayoutVersionManager versionManager = scmNodeManager.getLayoutVersionManager(); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java index b25b1e1a248f..64279bc76ba8 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java @@ -45,7 +45,7 @@ import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.StorageReportProto; import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.TestUtils; +import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerManager; import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException; @@ -101,7 +101,7 @@ public void setup() throws IOException, AuthenticationException { TestDeadNodeHandler.class.getSimpleName() + UUID.randomUUID()); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, storageDir); eventQueue = new EventQueue(); - scm = TestUtils.getScm(conf); + scm = HddsTestUtils.getScm(conf); nodeManager = (SCMNodeManager) scm.getScmNodeManager(); scmContext = new SCMContext.Builder().setIsInSafeMode(true) .setLeader(true).setIsPreCheckComplete(true) @@ -143,11 +143,11 @@ public void testOnMessage() throws Exception { String metaStoragePath = GenericTestUtils.getRandomizedTempPath() .concat("/metadata-" + datanode1.getUuidString()); - StorageReportProto storageOne = TestUtils.createStorageReport( + StorageReportProto storageOne = HddsTestUtils.createStorageReport( datanode1.getUuid(), storagePath, 100 * OzoneConsts.TB, 10 * OzoneConsts.TB, 90 * OzoneConsts.TB, null); MetadataStorageReportProto metaStorageOne = - TestUtils.createMetadataStorageReport(metaStoragePath, + HddsTestUtils.createMetadataStorageReport(metaStoragePath, 100 * OzoneConsts.GB, 10 * OzoneConsts.GB, 90 * OzoneConsts.GB, null); @@ -160,48 +160,48 @@ public void testOnMessage() throws Exception { // test case happy. nodeManager.register(datanode1, - TestUtils.createNodeReport(Arrays.asList(storageOne), + HddsTestUtils.createNodeReport(Arrays.asList(storageOne), Arrays.asList(metaStorageOne)), null); nodeManager.register(datanode2, - TestUtils.createNodeReport(Arrays.asList(storageOne), + HddsTestUtils.createNodeReport(Arrays.asList(storageOne), Arrays.asList(metaStorageOne)), null); nodeManager.register(datanode3, - TestUtils.createNodeReport(Arrays.asList(storageOne), + HddsTestUtils.createNodeReport(Arrays.asList(storageOne), Arrays.asList(metaStorageOne)), null); nodeManager.register(MockDatanodeDetails.randomDatanodeDetails(), - TestUtils.createNodeReport(Arrays.asList(storageOne), + HddsTestUtils.createNodeReport(Arrays.asList(storageOne), Arrays.asList(metaStorageOne)), null); nodeManager.register(MockDatanodeDetails.randomDatanodeDetails(), - TestUtils.createNodeReport(Arrays.asList(storageOne), + HddsTestUtils.createNodeReport(Arrays.asList(storageOne), Arrays.asList(metaStorageOne)), null); nodeManager.register(MockDatanodeDetails.randomDatanodeDetails(), - TestUtils.createNodeReport(Arrays.asList(storageOne), + HddsTestUtils.createNodeReport(Arrays.asList(storageOne), Arrays.asList(metaStorageOne)), null); nodeManager.register(MockDatanodeDetails.randomDatanodeDetails(), - TestUtils.createNodeReport(Arrays.asList(storageOne), + HddsTestUtils.createNodeReport(Arrays.asList(storageOne), Arrays.asList(metaStorageOne)), null); nodeManager.register(MockDatanodeDetails.randomDatanodeDetails(), - TestUtils.createNodeReport(Arrays.asList(storageOne), + HddsTestUtils.createNodeReport(Arrays.asList(storageOne), Arrays.asList(metaStorageOne)), null); nodeManager.register(MockDatanodeDetails.randomDatanodeDetails(), - TestUtils.createNodeReport(Arrays.asList(storageOne), + HddsTestUtils.createNodeReport(Arrays.asList(storageOne), Arrays.asList(metaStorageOne)), null); LambdaTestUtils.await(120000, 1000, () -> pipelineManager.getPipelines(new RatisReplicationConfig(THREE)) .size() > 3); - TestUtils.openAllRatisPipelines(pipelineManager); + HddsTestUtils.openAllRatisPipelines(pipelineManager); ContainerInfo container1 = - TestUtils.allocateContainer(containerManager); + HddsTestUtils.allocateContainer(containerManager); ContainerInfo container2 = - TestUtils.allocateContainer(containerManager); + HddsTestUtils.allocateContainer(containerManager); ContainerInfo container3 = - TestUtils.allocateContainer(containerManager); + HddsTestUtils.allocateContainer(containerManager); ContainerInfo container4 = - TestUtils.allocateContainer(containerManager); + HddsTestUtils.allocateContainer(containerManager); registerContainers(datanode1, container1, container2, container4); registerContainers(datanode2, container1, container2); @@ -212,9 +212,10 @@ public void testOnMessage() throws Exception { registerReplicas(containerManager, container3, datanode3); registerReplicas(containerManager, container4, datanode1); - TestUtils.closeContainer(containerManager, container1.containerID()); - TestUtils.closeContainer(containerManager, container2.containerID()); - TestUtils.quasiCloseContainer(containerManager, container3.containerID()); + HddsTestUtils.closeContainer(containerManager, container1.containerID()); + HddsTestUtils.closeContainer(containerManager, container2.containerID()); + HddsTestUtils.quasiCloseContainer(containerManager, + container3.containerID()); // First set the node to IN_MAINTENANCE and ensure the container replicas // are not removed on the dead event diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeDecommissionManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeDecommissionManager.java index 1b84be7b3077..4373238409ac 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeDecommissionManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeDecommissionManager.java @@ -22,7 +22,7 @@ import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.TestUtils; +import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.DatanodeAdminError; import org.apache.hadoop.hdds.scm.ha.SCMContext; import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException; @@ -282,7 +282,7 @@ public void testNodeDecommissionManagerOnBecomeLeader() throws Exception { private SCMNodeManager createNodeManager(OzoneConfiguration config) throws IOException, AuthenticationException { - scm = TestUtils.getScm(config); + scm = HddsTestUtils.getScm(config); return (SCMNodeManager) scm.getScmNodeManager(); } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeReportHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeReportHandler.java index 436a1e8aa7f5..41b759c58cb4 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeReportHandler.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeReportHandler.java @@ -28,7 +28,7 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.NodeReportProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto; -import org.apache.hadoop.hdds.scm.TestUtils; +import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric; import org.apache.hadoop.hdds.scm.ha.SCMContext; import org.apache.hadoop.hdds.scm.net.NetworkTopology; @@ -84,9 +84,9 @@ public void resetEventCollector() throws IOException { @Test public void testNodeReport() throws IOException { DatanodeDetails dn = MockDatanodeDetails.randomDatanodeDetails(); - StorageReportProto storageOne = TestUtils + StorageReportProto storageOne = HddsTestUtils .createStorageReport(dn.getUuid(), storagePath, 100, 10, 90, null); - MetadataStorageReportProto metaStorageOne = TestUtils + MetadataStorageReportProto metaStorageOne = HddsTestUtils .createMetadataStorageReport(metaStoragePath, 100, 10, 90, null); SCMNodeMetric nodeMetric = nodeManager.getNodeStat(dn); @@ -100,7 +100,7 @@ public void testNodeReport() throws IOException { Assert.assertTrue(nodeMetric.get().getRemaining().get() == 90); Assert.assertTrue(nodeMetric.get().getScmUsed().get() == 10); - StorageReportProto storageTwo = TestUtils + StorageReportProto storageTwo = HddsTestUtils .createStorageReport(dn.getUuid(), storagePath, 100, 10, 90, null); nodeReportHandler.onMessage( getNodeReport(dn, Arrays.asList(storageOne, storageTwo), @@ -117,7 +117,7 @@ private NodeReportFromDatanode getNodeReport(DatanodeDetails dn, List reports, List metaReports) { NodeReportProto nodeReportProto = - TestUtils.createNodeReport(reports, metaReports); + HddsTestUtils.createNodeReport(reports, metaReports); return new NodeReportFromDatanode(dn, nodeReportProto); } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java index efef7fcd8755..ab87a59e27e3 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java @@ -52,7 +52,7 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto; import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.TestUtils; +import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat; import org.apache.hadoop.hdds.scm.net.NetworkTopology; import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException; @@ -98,7 +98,7 @@ import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMRegisteredResponseProto.ErrorCode.errorNodeNotPermitted; import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMRegisteredResponseProto.ErrorCode.success; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.*; -import static org.apache.hadoop.hdds.scm.TestUtils.getRandomPipelineReports; +import static org.apache.hadoop.hdds.scm.HddsTestUtils.getRandomPipelineReports; import static org.apache.hadoop.hdds.scm.events.SCMEvents.*; import static org.apache.hadoop.hdds.scm.events.SCMEvents.DATANODE_COMMAND; import static org.apache.hadoop.ozone.container.upgrade.UpgradeUtils.toLayoutVersionProto; @@ -189,7 +189,7 @@ OzoneConfiguration getConf() { SCMNodeManager createNodeManager(OzoneConfiguration config) throws IOException, AuthenticationException { - scm = TestUtils.getScm(config); + scm = HddsTestUtils.getScm(config); scmContext = new SCMContext.Builder().setIsInSafeMode(true) .setLeader(true).setIsPreCheckComplete(true) .setSCM(scm).build(); @@ -220,7 +220,7 @@ public void testScmHeartbeat() int registeredNodes = 5; // Send some heartbeats from different nodes. for (int x = 0; x < registeredNodes; x++) { - DatanodeDetails datanodeDetails = TestUtils + DatanodeDetails datanodeDetails = HddsTestUtils .createRandomDatanodeAndRegister(nodeManager); nodeManager.processHeartbeat(datanodeDetails, layoutInfo); } @@ -290,15 +290,15 @@ private DatanodeDetails registerWithCapacity(SCMNodeManager nodeManager, DatanodeDetails details = MockDatanodeDetails.randomDatanodeDetails(); StorageReportProto storageReport = - TestUtils.createStorageReport(details.getUuid(), + HddsTestUtils.createStorageReport(details.getUuid(), details.getNetworkFullPath(), Long.MAX_VALUE); MetadataStorageReportProto metadataStorageReport = - TestUtils.createMetadataStorageReport(details.getNetworkFullPath(), + HddsTestUtils.createMetadataStorageReport(details.getNetworkFullPath(), Long.MAX_VALUE); RegisteredCommand cmd = nodeManager.register( MockDatanodeDetails.randomDatanodeDetails(), - TestUtils.createNodeReport(Arrays.asList(storageReport), + HddsTestUtils.createNodeReport(Arrays.asList(storageReport), Arrays.asList(metadataStorageReport)), getRandomPipelineReports(), layout); @@ -510,7 +510,7 @@ public void testScmShutdown() conf.getTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 100, TimeUnit.MILLISECONDS); SCMNodeManager nodeManager = createNodeManager(conf); - DatanodeDetails datanodeDetails = TestUtils + DatanodeDetails datanodeDetails = HddsTestUtils .createRandomDatanodeAndRegister(nodeManager); LayoutVersionManager versionManager = nodeManager.getLayoutVersionManager(); LayoutVersionProto layoutInfo = toLayoutVersionProto( @@ -549,7 +549,7 @@ public void testScmHealthyNodeCount() versionManager.getSoftwareLayoutVersion()); for (int x = 0; x < count; x++) { - DatanodeDetails datanodeDetails = TestUtils + DatanodeDetails datanodeDetails = HddsTestUtils .createRandomDatanodeAndRegister(nodeManager); nodeManager.processHeartbeat(datanodeDetails, layoutInfo); } @@ -609,7 +609,7 @@ public void testSetNodeOpStateAndCommandFired() conf.setBoolean(OZONE_SCM_PIPELINE_AUTO_CREATE_FACTOR_ONE, false); try (SCMNodeManager nodeManager = createNodeManager(conf)) { - DatanodeDetails dn = TestUtils.createRandomDatanodeAndRegister( + DatanodeDetails dn = HddsTestUtils.createRandomDatanodeAndRegister( nodeManager); LayoutVersionManager versionManager = @@ -677,7 +677,7 @@ public void testScmDetectStaleAndDeadNode() List nodeList = createNodeSet(nodeManager, nodeCount); - DatanodeDetails staleNode = TestUtils.createRandomDatanodeAndRegister( + DatanodeDetails staleNode = HddsTestUtils.createRandomDatanodeAndRegister( nodeManager); // Heartbeat once @@ -781,9 +781,9 @@ public void testScmHandleJvmPause() versionManager.getMetadataLayoutVersion(), versionManager.getSoftwareLayoutVersion()); DatanodeDetails node1 = - TestUtils.createRandomDatanodeAndRegister(nodeManager); + HddsTestUtils.createRandomDatanodeAndRegister(nodeManager); DatanodeDetails node2 = - TestUtils.createRandomDatanodeAndRegister(nodeManager); + HddsTestUtils.createRandomDatanodeAndRegister(nodeManager); nodeManager.processHeartbeat(node1, layoutInfo); nodeManager.processHeartbeat(node2, layoutInfo); @@ -857,7 +857,7 @@ public void testProcessLayoutVersionReportHigherMlv() throws IOException, try (SCMNodeManager nodeManager = createNodeManager(conf)) { DatanodeDetails node1 = - TestUtils.createRandomDatanodeAndRegister(nodeManager); + HddsTestUtils.createRandomDatanodeAndRegister(nodeManager); GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer .captureLogs(SCMNodeManager.LOG); int scmMlv = @@ -884,7 +884,7 @@ public void testProcessLayoutVersionLowerMlv() throws IOException { scmStorageConfig, eventPublisher, new NetworkTopologyImpl(conf), SCMContext.emptyContext(), lvm); DatanodeDetails node1 = - TestUtils.createRandomDatanodeAndRegister(nodeManager); + HddsTestUtils.createRandomDatanodeAndRegister(nodeManager); verify(eventPublisher, times(1)).fireEvent(NEW_NODE, node1); int scmMlv = @@ -990,11 +990,11 @@ public void testScmClusterIsInExpectedState1() versionManager.getMetadataLayoutVersion(), versionManager.getSoftwareLayoutVersion()); DatanodeDetails healthyNode = - TestUtils.createRandomDatanodeAndRegister(nodeManager); + HddsTestUtils.createRandomDatanodeAndRegister(nodeManager); DatanodeDetails staleNode = - TestUtils.createRandomDatanodeAndRegister(nodeManager); + HddsTestUtils.createRandomDatanodeAndRegister(nodeManager); DatanodeDetails deadNode = - TestUtils.createRandomDatanodeAndRegister(nodeManager); + HddsTestUtils.createRandomDatanodeAndRegister(nodeManager); nodeManager.processHeartbeat(healthyNode, layoutInfo); nodeManager.processHeartbeat(staleNode, layoutInfo); nodeManager.processHeartbeat(deadNode, layoutInfo); @@ -1130,7 +1130,7 @@ private List createNodeSet(SCMNodeManager nodeManager, int count) { List list = new ArrayList<>(); for (int x = 0; x < count; x++) { - DatanodeDetails datanodeDetails = TestUtils + DatanodeDetails datanodeDetails = HddsTestUtils .createRandomDatanodeAndRegister(nodeManager); list.add(datanodeDetails); } @@ -1346,9 +1346,9 @@ public void testScmStatsFromNodeReport() UUID dnId = dn.getUuid(); long free = capacity - used; String storagePath = testDir.getAbsolutePath() + "/" + dnId; - StorageReportProto report = TestUtils + StorageReportProto report = HddsTestUtils .createStorageReport(dnId, storagePath, capacity, used, free, null); - nodeManager.register(dn, TestUtils.createNodeReport( + nodeManager.register(dn, HddsTestUtils.createNodeReport( Arrays.asList(report), Collections.emptyList()), null); nodeManager.processHeartbeat(dn, layoutInfo); } @@ -1396,12 +1396,12 @@ public void tesVolumeInfoFromNodeReport() boolean failed = true; for (int x = 0; x < volumeCount; x++) { String storagePath = testDir.getAbsolutePath() + "/" + dnId; - reports.add(TestUtils + reports.add(HddsTestUtils .createStorageReport(dnId, storagePath, capacity, used, free, null, failed)); failed = !failed; } - nodeManager.register(dn, TestUtils.createNodeReport(reports, + nodeManager.register(dn, HddsTestUtils.createNodeReport(reports, Collections.emptyList()), null); LayoutVersionManager versionManager = nodeManager.getLayoutVersionManager(); @@ -1445,7 +1445,7 @@ public void testScmNodeReportUpdate() try (SCMNodeManager nodeManager = createNodeManager(conf)) { DatanodeDetails datanodeDetails = - TestUtils.createRandomDatanodeAndRegister(nodeManager); + HddsTestUtils.createRandomDatanodeAndRegister(nodeManager); NodeReportHandler nodeReportHandler = new NodeReportHandler(nodeManager); EventPublisher publisher = mock(EventPublisher.class); final long capacity = 2000; @@ -1455,10 +1455,10 @@ public void testScmNodeReportUpdate() long scmUsed = x * usedPerHeartbeat; long remaining = capacity - scmUsed; String storagePath = testDir.getAbsolutePath() + "/" + dnId; - StorageReportProto report = TestUtils + StorageReportProto report = HddsTestUtils .createStorageReport(dnId, storagePath, capacity, scmUsed, remaining, null); - NodeReportProto nodeReportProto = TestUtils.createNodeReport( + NodeReportProto nodeReportProto = HddsTestUtils.createNodeReport( Arrays.asList(report), Collections.emptyList()); nodeReportHandler.onMessage( new NodeReportFromDatanode(datanodeDetails, nodeReportProto), @@ -1583,16 +1583,16 @@ public void testHandlingSCMCommandEvent() UUID dnId = datanodeDetails.getUuid(); String storagePath = testDir.getAbsolutePath() + "/" + dnId; StorageReportProto report = - TestUtils.createStorageReport(dnId, storagePath, 100, 10, 90, null); + HddsTestUtils.createStorageReport(dnId, storagePath, 100, 10, 90, null); EventQueue eq = new EventQueue(); try (SCMNodeManager nodemanager = createNodeManager(conf)) { eq.addHandler(DATANODE_COMMAND, nodemanager); nodemanager - .register(datanodeDetails, TestUtils.createNodeReport( + .register(datanodeDetails, HddsTestUtils.createNodeReport( Arrays.asList(report), Collections.emptyList()), - TestUtils.getRandomPipelineReports()); + HddsTestUtils.getRandomPipelineReports()); eq.fireEvent(DATANODE_COMMAND, new CommandForDatanode<>(datanodeDetails.getUuid(), new CloseContainerCommand(1L, @@ -1770,13 +1770,13 @@ public void testGetNodeInfo() final long remaining = 1900; UUID dnId = datanodeDetails.getUuid(); String storagePath = testDir.getAbsolutePath() + "/" + dnId; - StorageReportProto report = TestUtils + StorageReportProto report = HddsTestUtils .createStorageReport(dnId, storagePath, capacity, used, remaining, null); - nodeManager.register(datanodeDetails, TestUtils.createNodeReport( + nodeManager.register(datanodeDetails, HddsTestUtils.createNodeReport( Arrays.asList(report), Collections.emptyList()), - TestUtils.getRandomPipelineReports()); + HddsTestUtils.getRandomPipelineReports()); LayoutVersionManager versionManager = nodeManager.getLayoutVersionManager(); @@ -1784,9 +1784,9 @@ public void testGetNodeInfo() versionManager.getMetadataLayoutVersion(), versionManager.getSoftwareLayoutVersion()); nodeManager.register(datanodeDetails, - TestUtils.createNodeReport(Arrays.asList(report), + HddsTestUtils.createNodeReport(Arrays.asList(report), Collections.emptyList()), - TestUtils.getRandomPipelineReports(), layoutInfo); + HddsTestUtils.getRandomPipelineReports(), layoutInfo); nodeManager.processHeartbeat(datanodeDetails, layoutInfo); if (i == 5) { nodeManager.setNodeOperationalState(datanodeDetails, diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java index bceed4210275..39a72db2702a 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java @@ -24,7 +24,7 @@ StorageContainerDatanodeProtocolProtos.NodeReportProto; import org.apache.hadoop.hdds.protocol.proto. StorageContainerDatanodeProtocolProtos.StorageReportProto; -import org.apache.hadoop.hdds.scm.TestUtils; +import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.exceptions.SCMException; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.container.common.impl.StorageLocationReport; @@ -148,10 +148,11 @@ public void testProcessNodeReportCheckOneNode() throws IOException { long reportCapacity = report.getCapacity(); long reportScmUsed = report.getScmUsed(); long reportRemaining = report.getRemaining(); - StorageReportProto storageReport = TestUtils.createStorageReport(storageId, - path, reportCapacity, reportScmUsed, reportRemaining, null); + StorageReportProto storageReport = HddsTestUtils.createStorageReport( + storageId, path, reportCapacity, reportScmUsed, reportRemaining, + null); StorageReportResult result = - map.processNodeReport(key, TestUtils.createNodeReport( + map.processNodeReport(key, HddsTestUtils.createNodeReport( Arrays.asList(storageReport), Collections.emptyList())); Assert.assertEquals(SCMNodeStorageStatMap.ReportStatus.ALL_IS_WELL, result.getStatus()); @@ -159,15 +160,15 @@ public void testProcessNodeReportCheckOneNode() throws IOException { NodeReportProto.newBuilder(); StorageReportProto srb = reportSet.iterator().next().getProtoBufMessage(); reportList.add(srb); - result = map.processNodeReport(key, TestUtils.createNodeReport( + result = map.processNodeReport(key, HddsTestUtils.createNodeReport( reportList, Collections.emptyList())); Assert.assertEquals(SCMNodeStorageStatMap.ReportStatus.ALL_IS_WELL, result.getStatus()); - reportList.add(TestUtils + reportList.add(HddsTestUtils .createStorageReport(UUID.randomUUID(), path, reportCapacity, reportCapacity, 0, null)); - result = map.processNodeReport(key, TestUtils.createNodeReport( + result = map.processNodeReport(key, HddsTestUtils.createNodeReport( reportList, Collections.emptyList())); Assert.assertEquals(SCMNodeStorageStatMap.ReportStatus.STORAGE_OUT_OF_SPACE, result.getStatus()); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestStatisticsUpdate.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestStatisticsUpdate.java index 93e227d51cf4..d60a368c5fba 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestStatisticsUpdate.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestStatisticsUpdate.java @@ -29,7 +29,7 @@ import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.NodeReportProto; import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.TestUtils; +import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric; import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat; import org.apache.hadoop.hdds.scm.events.SCMEvents; @@ -72,7 +72,7 @@ public void setup() throws IOException, AuthenticationException { conf.set(ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL, "1s"); conf.set(ScmConfigKeys.OZONE_SCM_DEADNODE_INTERVAL, "2s"); final EventQueue eventQueue = new EventQueue(); - final StorageContainerManager scm = TestUtils.getScm(conf); + final StorageContainerManager scm = HddsTestUtils.getScm(conf); nodeManager = scm.getScmNodeManager(); final DeadNodeHandler deadNodeHandler = new DeadNodeHandler( nodeManager, Mockito.mock(PipelineManager.class), @@ -92,21 +92,21 @@ public void testStatisticsUpdate() throws Exception { String storagePath2 = GenericTestUtils.getRandomizedTempPath() .concat("/" + datanode2.getUuidString()); - StorageReportProto storageOne = TestUtils.createStorageReport( + StorageReportProto storageOne = HddsTestUtils.createStorageReport( datanode1.getUuid(), storagePath1, 100, 10, 90, null); - StorageReportProto storageTwo = TestUtils.createStorageReport( + StorageReportProto storageTwo = HddsTestUtils.createStorageReport( datanode2.getUuid(), storagePath2, 200, 20, 180, null); nodeManager.register(datanode1, - TestUtils.createNodeReport(Arrays.asList(storageOne), + HddsTestUtils.createNodeReport(Arrays.asList(storageOne), Collections.emptyList()), null); nodeManager.register(datanode2, - TestUtils.createNodeReport(Arrays.asList(storageTwo), + HddsTestUtils.createNodeReport(Arrays.asList(storageTwo), Collections.emptyList()), null); - NodeReportProto nodeReportProto1 = TestUtils.createNodeReport( + NodeReportProto nodeReportProto1 = HddsTestUtils.createNodeReport( Arrays.asList(storageOne), Collections.emptyList()); - NodeReportProto nodeReportProto2 = TestUtils.createNodeReport( + NodeReportProto nodeReportProto2 = HddsTestUtils.createNodeReport( Arrays.asList(storageTwo), Collections.emptyList()); nodeReportHandler.onMessage( diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java index 5133d5820015..538656eaf74a 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java @@ -25,7 +25,7 @@ import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; -import org.apache.hadoop.hdds.scm.TestUtils; +import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.ContainerManager; @@ -90,7 +90,7 @@ public void init() throws Exception { TestPipelineManagerImpl.class.getSimpleName() + UUID.randomUUID()); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, GenericTestUtils.getRandomizedTempPath()); - scm = TestUtils.getScm(conf); + scm = HddsTestUtils.getScm(conf); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath()); dbStore = DBStoreBuilder.createDBStore(conf, new SCMDBDefinition()); nodeManager = new MockNodeManager(true, 20); @@ -307,7 +307,7 @@ public void testRemovePipeline() throws Exception { // Open the pipeline pipelineManager.openPipeline(pipeline.getId()); ContainerManager containerManager = scm.getContainerManager(); - ContainerInfo containerInfo = TestUtils. + ContainerInfo containerInfo = HddsTestUtils. getContainer(HddsProtos.LifeCycleState.CLOSED, pipeline.getId()); ContainerID containerID = containerInfo.containerID(); //Add Container to ContainerMap @@ -681,7 +681,7 @@ public void testPipelineCloseFlow() throws IOException { new RatisReplicationConfig(HddsProtos.ReplicationFactor.THREE)); PipelineID pipelineID = pipeline.getId(); ContainerManager containerManager = scm.getContainerManager(); - ContainerInfo containerInfo = TestUtils. + ContainerInfo containerInfo = HddsTestUtils. getContainer(HddsProtos.LifeCycleState.CLOSED, pipelineID); ContainerID containerID = containerInfo.containerID(); //Add Container to ContainerMap @@ -708,7 +708,8 @@ private void sendPipelineReport( PipelineReportHandler pipelineReportHandler, boolean isLeader) { SCMDatanodeHeartbeatDispatcher.PipelineReportFromDatanode report = - TestUtils.getPipelineReportFromDatanode(dn, pipeline.getId(), isLeader); + HddsTestUtils.getPipelineReportFromDatanode(dn, pipeline.getId(), + isLeader); pipelineReportHandler.onMessage(report, new EventQueue()); } } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMBlockProtocolServer.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMBlockProtocolServer.java index dac832d7ed36..5849398a971c 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMBlockProtocolServer.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMBlockProtocolServer.java @@ -22,7 +22,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos; -import org.apache.hadoop.hdds.scm.TestUtils; +import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.ha.MockSCMHAManager; import org.apache.hadoop.hdds.scm.ha.SCMContext; import org.apache.hadoop.hdds.scm.node.NodeManager; @@ -63,7 +63,7 @@ public void setUp() throws Exception { SCMConfigurator configurator = new SCMConfigurator(); configurator.setSCMHAManager(MockSCMHAManager.getInstance(true)); configurator.setScmContext(SCMContext.emptyContext()); - scm = TestUtils.getScm(config, configurator); + scm = HddsTestUtils.getScm(config, configurator); scm.start(); scm.exitSafeMode(); // add nodes to scm node manager diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/upgrade/TestSCMHAUnfinalizedStateValidationAction.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/upgrade/TestSCMHAUnfinalizedStateValidationAction.java index 64b81d465960..17c6dee1f471 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/upgrade/TestSCMHAUnfinalizedStateValidationAction.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/upgrade/TestSCMHAUnfinalizedStateValidationAction.java @@ -20,7 +20,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.scm.ScmConfig; import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.TestUtils; +import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.server.StorageContainerManager; import org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature; import org.apache.hadoop.ozone.OzoneConfigKeys; @@ -105,7 +105,7 @@ public void testUpgrade() throws Exception { // Set up new pre-finalized SCM. conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, haEnabledPreFinalized); - StorageContainerManager scm = TestUtils.getScm(conf); + StorageContainerManager scm = HddsTestUtils.getScm(conf); Assert.assertEquals(UpgradeFinalizer.Status.FINALIZATION_REQUIRED, scm.getUpgradeFinalizer().getStatus()); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/upgrade/TestScmStartupSlvLessThanMlv.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/upgrade/TestScmStartupSlvLessThanMlv.java index ee6608e5201e..9cdad811905b 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/upgrade/TestScmStartupSlvLessThanMlv.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/upgrade/TestScmStartupSlvLessThanMlv.java @@ -23,7 +23,7 @@ import org.apache.hadoop.hdds.scm.server.StorageContainerManager; import org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature; import org.apache.hadoop.ozone.upgrade.LayoutFeature; -import org.apache.hadoop.ozone.upgrade.TestUpgradeUtils; +import org.apache.hadoop.ozone.upgrade.UpgradeTestUtils; import org.apache.ozone.test.GenericTestUtils; import org.junit.Assert; import org.junit.Rule; @@ -61,7 +61,7 @@ public void testStartupSlvLessThanMlv() throws Exception { // Create version file with MLV > SLV, which should fail the SCM // construction. - TestUpgradeUtils.createVersionFile(scmSubdir, HddsProtos.NodeType.SCM, mlv); + UpgradeTestUtils.createVersionFile(scmSubdir, HddsProtos.NodeType.SCM, mlv); try { new StorageContainerManager(conf); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java index dc11ecef98cf..c0ff64669360 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java @@ -41,7 +41,7 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMVersionResponseProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto; -import org.apache.hadoop.hdds.scm.TestUtils; +import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.VersionInfo; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; import org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager; @@ -275,14 +275,14 @@ public void testRegister() throws Exception { try (EndpointStateMachine rpcEndPoint = createEndpoint( SCMTestUtils.getConf(), serverAddress, 1000)) { SCMRegisteredResponseProto responseProto = rpcEndPoint.getEndPoint() - .register(nodeToRegister.getExtendedProtoBufMessage(), TestUtils + .register(nodeToRegister.getExtendedProtoBufMessage(), HddsTestUtils .createNodeReport( Arrays.asList(getStorageReports( nodeToRegister.getUuid())), Arrays.asList(getMetadataStorageReports( nodeToRegister.getUuid()))), - TestUtils.getRandomContainerReports(10), - TestUtils.getRandomPipelineReports(), + HddsTestUtils.getRandomContainerReports(10), + HddsTestUtils.getRandomPipelineReports(), defaultLayoutVersionProto()); Assert.assertNotNull(responseProto); Assert.assertEquals(nodeToRegister.getUuidString(), @@ -296,12 +296,13 @@ public void testRegister() throws Exception { private StorageReportProto getStorageReports(UUID id) { String storagePath = testDir.getAbsolutePath() + "/data-" + id; - return TestUtils.createStorageReport(id, storagePath, 100, 10, 90, null); + return HddsTestUtils.createStorageReport(id, storagePath, 100, 10, 90, + null); } private MetadataStorageReportProto getMetadataStorageReports(UUID id) { String storagePath = testDir.getAbsolutePath() + "/metadata-" + id; - return TestUtils.createMetadataStorageReport(storagePath, 100, 10, 90, + return HddsTestUtils.createMetadataStorageReport(storagePath, 100, 10, 90, null); } @@ -315,15 +316,15 @@ private EndpointStateMachine registerTaskHelper(InetSocketAddress scmAddress, rpcEndPoint.setState(EndpointStateMachine.EndPointStates.REGISTER); OzoneContainer ozoneContainer = mock(OzoneContainer.class); UUID datanodeID = UUID.randomUUID(); - when(ozoneContainer.getNodeReport()).thenReturn(TestUtils + when(ozoneContainer.getNodeReport()).thenReturn(HddsTestUtils .createNodeReport(Arrays.asList(getStorageReports(datanodeID)), Arrays.asList(getMetadataStorageReports(datanodeID)))); ContainerController controller = Mockito.mock(ContainerController.class); when(controller.getContainerReport()).thenReturn( - TestUtils.getRandomContainerReports(10)); + HddsTestUtils.getRandomContainerReports(10)); when(ozoneContainer.getController()).thenReturn(controller); when(ozoneContainer.getPipelineReport()).thenReturn( - TestUtils.getRandomPipelineReports()); + HddsTestUtils.getRandomPipelineReports()); HDDSLayoutVersionManager versionManager = Mockito.mock(HDDSLayoutVersionManager.class); when(versionManager.getMetadataLayoutVersion()) @@ -393,7 +394,7 @@ public void testHeartbeat() throws Exception { serverAddress, 1000)) { SCMHeartbeatRequestProto request = SCMHeartbeatRequestProto.newBuilder() .setDatanodeDetails(dataNode.getProtoBufMessage()) - .setNodeReport(TestUtils.createNodeReport( + .setNodeReport(HddsTestUtils.createNodeReport( Arrays.asList(getStorageReports(dataNode.getUuid())), Arrays.asList(getMetadataStorageReports(dataNode.getUuid())))) .build(); @@ -416,7 +417,7 @@ public void testHeartbeatWithCommandStatusReport() throws Exception { SCMHeartbeatRequestProto request = SCMHeartbeatRequestProto.newBuilder() .setDatanodeDetails(dataNode.getProtoBufMessage()) - .setNodeReport(TestUtils.createNodeReport( + .setNodeReport(HddsTestUtils.createNodeReport( Arrays.asList(getStorageReports(dataNode.getUuid())), Arrays.asList(getMetadataStorageReports(dataNode.getUuid())))) .build(); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/scm/node/TestSCMNodeMetrics.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/scm/node/TestSCMNodeMetrics.java index ef79f7c6d132..f9391c7dd856 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/scm/node/TestSCMNodeMetrics.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/scm/node/TestSCMNodeMetrics.java @@ -31,7 +31,7 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.NodeReportProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReportsProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto; -import org.apache.hadoop.hdds.scm.TestUtils; +import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.ha.SCMContext; import org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl; import org.apache.hadoop.hdds.scm.node.SCMNodeManager; @@ -149,9 +149,8 @@ public void testNodeReportProcessing() throws InterruptedException { long nrProcessed = getCounter("NumNodeReportProcessed"); StorageReportProto storageReport = - TestUtils.createStorageReport(registeredDatanode.getUuid(), "/tmp", 100, - 10, 90, - null); + HddsTestUtils.createStorageReport(registeredDatanode.getUuid(), "/tmp", + 100, 10, 90, null); NodeReportProto nodeReport = NodeReportProto.newBuilder() .addStorageReport(storageReport).build(); @@ -170,7 +169,7 @@ public void testNodeReportProcessingFailure() { DatanodeDetails randomDatanode = MockDatanodeDetails.randomDatanodeDetails(); - StorageReportProto storageReport = TestUtils.createStorageReport( + StorageReportProto storageReport = HddsTestUtils.createStorageReport( randomDatanode.getUuid(), "/tmp", 100, 10, 90, null); NodeReportProto nodeReport = NodeReportProto.newBuilder() @@ -188,7 +187,7 @@ public void testNodeReportProcessingFailure() { @Test public void testNodeCountAndInfoMetricsReported() throws Exception { - StorageReportProto storageReport = TestUtils.createStorageReport( + StorageReportProto storageReport = HddsTestUtils.createStorageReport( registeredDatanode.getUuid(), "/tmp", 100, 10, 90, null); NodeReportProto nodeReport = NodeReportProto.newBuilder() .addStorageReport(storageReport).build(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java index cb868083d587..137a408ba31c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java @@ -26,7 +26,7 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReport; import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.TestUtils; +import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.ContainerManager; @@ -174,7 +174,7 @@ public void testPipelineCloseWithOpenContainer() public void testPipelineCloseWithPipelineAction() throws Exception { List dns = ratisContainer.getPipeline().getNodes(); PipelineActionsFromDatanode - pipelineActionsFromDatanode = TestUtils + pipelineActionsFromDatanode = HddsTestUtils .getPipelineActionFromDatanode(dns.get(0), ratisContainer.getPipeline().getId()); // send closing action for pipeline diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java index b2a71076c9d6..6e56a4d34b07 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java @@ -44,7 +44,7 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.TestUtils; +import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.ha.SCMHANodeDetails; import org.apache.hadoop.hdds.scm.ha.SCMHAUtils; import org.apache.hadoop.hdds.scm.ha.SCMRatisServerImpl; @@ -345,7 +345,7 @@ public void restartStorageContainerManager(boolean waitForDatanode) LOG.info("Restarting SCM in cluster " + this.getClass()); scm.stop(); scm.join(); - scm = TestUtils.getScmSimple(conf); + scm = HddsTestUtils.getScmSimple(conf); scm.start(); if (waitForDatanode) { waitForClusterToBeReady(); @@ -703,7 +703,7 @@ protected StorageContainerManager createSCM() scmStore = new SCMStorageConfig(conf); initializeScmStorage(scmStore); - StorageContainerManager scm = TestUtils.getScmSimple(conf); + StorageContainerManager scm = HddsTestUtils.getScmSimple(conf); HealthyPipelineSafeModeRule rule = scm.getScmSafeModeManager().getHealthyPipelineSafeModeRule(); if (rule != null) { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java index e0d061f723dd..440f5ca7574e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java @@ -26,7 +26,7 @@ import org.apache.hadoop.hdds.conf.ConfigurationTarget; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.TestUtils; +import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.ha.CheckedConsumer; import org.apache.hadoop.hdds.scm.safemode.HealthyPipelineSafeModeRule; import org.apache.hadoop.hdds.scm.server.StorageContainerManager; @@ -260,7 +260,7 @@ public void restartStorageContainerManager( OzoneConfiguration scmConf = scm.getConfiguration(); shutdownStorageContainerManager(scm); scm.join(); - scm = TestUtils.getScmSimple(scmConf); + scm = HddsTestUtils.getScmSimple(scmConf); scmhaService.activate(scm); scm.start(); if (waitForSCM) { @@ -583,7 +583,7 @@ protected SCMHAService createSCMService() } else { StorageContainerManager.scmBootstrap(scmConfig); } - StorageContainerManager scm = TestUtils.getScmSimple(scmConfig); + StorageContainerManager scm = HddsTestUtils.getScmSimple(scmConfig); HealthyPipelineSafeModeRule rule = scm.getScmSafeModeManager().getHealthyPipelineSafeModeRule(); if (rule != null) { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java index a9a991a3e760..494f74c04706 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java @@ -38,7 +38,7 @@ import org.apache.hadoop.hdds.protocol.SCMSecurityProtocol; import org.apache.hadoop.hdds.scm.ScmConfig; import org.apache.hadoop.hdds.scm.ScmInfo; -import org.apache.hadoop.hdds.scm.TestUtils; +import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.ha.HASecurityUtils; import org.apache.hadoop.hdds.scm.ha.SCMHANodeDetails; import org.apache.hadoop.hdds.scm.ha.SCMHAUtils; @@ -274,7 +274,7 @@ private void setSecureConfig() throws IOException { public void testSecureScmStartupSuccess() throws Exception { initSCM(); - scm = TestUtils.getScmSimple(conf); + scm = HddsTestUtils.getScmSimple(conf); //Reads the SCM Info from SCM instance ScmInfo scmInfo = scm.getClientProtocolServer().getScmInfo(); assertEquals(clusterId, scmInfo.getClusterId()); @@ -285,7 +285,7 @@ public void testSecureScmStartupSuccess() throws Exception { public void testSCMSecurityProtocol() throws Exception { initSCM(); - scm = TestUtils.getScmSimple(conf); + scm = HddsTestUtils.getScmSimple(conf); //Reads the SCM Info from SCM instance try { scm.start(); @@ -328,7 +328,7 @@ public void testSCMSecurityProtocol() throws Exception { @Test public void testAdminAccessControlException() throws Exception { initSCM(); - scm = TestUtils.getScmSimple(conf); + scm = HddsTestUtils.getScmSimple(conf); //Reads the SCM Info from SCM instance try { scm.start(); @@ -391,7 +391,7 @@ public void testSecureScmStartupFailure() throws Exception { LambdaTestUtils.intercept(IOException.class, "Running in secure mode, but config doesn't have a keytab", - () -> TestUtils.getScmSimple(conf)); + () -> HddsTestUtils.getScmSimple(conf)); conf.set(HDDS_SCM_KERBEROS_PRINCIPAL_KEY, "scm/_HOST@EXAMPLE.com"); @@ -399,7 +399,7 @@ public void testSecureScmStartupFailure() throws Exception { "/etc/security/keytabs/scm.keytab"); testCommonKerberosFailures( - () -> TestUtils.getScmSimple(conf)); + () -> HddsTestUtils.getScmSimple(conf)); } @@ -428,7 +428,7 @@ private void testCommonKerberosFailures(Callable test) throws Exception { public void testSecureOMInitializationFailure() throws Exception { initSCM(); // Create a secure SCM instance as om client will connect to it - scm = TestUtils.getScmSimple(conf); + scm = HddsTestUtils.getScmSimple(conf); setupOm(conf); conf.set(OZONE_OM_KERBEROS_PRINCIPAL_KEY, "non-existent-user@EXAMPLE.com"); @@ -442,7 +442,7 @@ public void testSecureOMInitializationFailure() throws Exception { public void testSecureOmInitializationSuccess() throws Exception { initSCM(); // Create a secure SCM instance as om client will connect to it - scm = TestUtils.getScmSimple(conf); + scm = HddsTestUtils.getScmSimple(conf); LogCapturer logs = LogCapturer.captureLogs(OzoneManager.getLogger()); GenericTestUtils.setLogLevel(OzoneManager.getLogger(), INFO); @@ -460,7 +460,7 @@ public void testSecureOmInitializationSuccess() throws Exception { public void testAccessControlExceptionOnClient() throws Exception { initSCM(); // Create a secure SCM instance as om client will connect to it - scm = TestUtils.getScmSimple(conf); + scm = HddsTestUtils.getScmSimple(conf); LogCapturer logs = LogCapturer.captureLogs(OzoneManager.getLogger()); GenericTestUtils.setLogLevel(OzoneManager.getLogger(), INFO); setupOm(conf); @@ -714,7 +714,7 @@ public void testSecureOmReInit() throws Exception { initSCM(); try { - scm = TestUtils.getScmSimple(conf); + scm = HddsTestUtils.getScmSimple(conf); scm.start(); conf.setBoolean(OZONE_SECURITY_ENABLED_KEY, false); OMStorage omStore = new OMStorage(conf); @@ -760,7 +760,7 @@ public void testSecureOmInitSuccess() throws Exception { omLogs.clearOutput(); initSCM(); try { - scm = TestUtils.getScmSimple(conf); + scm = HddsTestUtils.getScmSimple(conf); scm.start(); OMStorage omStore = new OMStorage(conf); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java index 6116212deadc..175537d10a7d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java @@ -55,7 +55,7 @@ import java.util.Arrays; import com.google.common.annotations.VisibleForTesting; -import org.apache.hadoop.hdds.scm.TestUtils; +import org.apache.hadoop.hdds.scm.HddsTestUtils; import com.google.common.collect.Lists; import com.google.common.collect.Maps; import org.apache.commons.io.FileUtils; @@ -649,7 +649,7 @@ public void testSCMInitializationFailure() exception.expect(SCMException.class); exception.expectMessage( "SCM not initialized due to storage config failure"); - TestUtils.getScmSimple(conf); + HddsTestUtils.getScmSimple(conf); } @Test @@ -667,7 +667,7 @@ public void testScmInfo() throws Exception { scmStore.setScmId(scmId); // writes the version file properties scmStore.initialize(); - StorageContainerManager scm = TestUtils.getScmSimple(conf); + StorageContainerManager scm = HddsTestUtils.getScmSimple(conf); //Reads the SCM Info from SCM instance ScmInfo scmInfo = scm.getClientProtocolServer().getScmInfo(); Assert.assertEquals(clusterId, scmInfo.getClusterId()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java index d772a3f20f84..e9f6a0681645 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java @@ -50,7 +50,7 @@ import static org.apache.hadoop.hdds.StringUtils.string2Bytes; import static org.apache.hadoop.hdds.client.ReplicationFactor.THREE; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.request.file.OMFileRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.ozone.test.GenericTestUtils; @@ -107,7 +107,7 @@ public class TestOzoneClientMultipartUploadWithFSO { @BeforeClass public static void init() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); - TestOMRequestUtils.configureFSOptimizedPaths(conf, true); + OMRequestTestUtils.configureFSOptimizedPaths(conf, true); startCluster(conf); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestReadRetries.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestReadRetries.java index 2e369b4d5912..751fd26ab81a 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestReadRetries.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestReadRetries.java @@ -55,7 +55,7 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.junit.After; import org.junit.Assert; @@ -117,7 +117,7 @@ public static Collection data() { public void init() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); conf.setInt(ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT, 1); - TestOMRequestUtils.configureFSOptimizedPaths(conf, + OMRequestTestUtils.configureFSOptimizedPaths(conf, true, BucketLayout.fromString(bucketLayout)); cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(3) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java index 61559fb08474..b6a5a0366e9a 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java @@ -40,7 +40,6 @@ import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics; import org.apache.hadoop.ozone.container.common.impl.ContainerSet; import org.apache.hadoop.ozone.container.common.impl.HddsDispatcher; -import org.apache.hadoop.ozone.container.common.impl.TestHddsDispatcher; import org.apache.hadoop.ozone.container.common.interfaces.Handler; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; @@ -113,7 +112,7 @@ public void testContainerMetrics() throws Exception { Handler.getHandlerForContainerType(containerType, conf, context.getParent().getDatanodeDetails().getUuidString(), containerSet, volumeSet, metrics, - TestHddsDispatcher.NO_OP_ICR_SENDER)); + c -> {})); } HddsDispatcher dispatcher = new HddsDispatcher(conf, containerSet, volumeSet, handlers, context, metrics, null); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java index 29f19eba0d70..a2131a66efe6 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java @@ -46,7 +46,6 @@ import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics; import org.apache.hadoop.ozone.container.common.impl.ContainerSet; import org.apache.hadoop.ozone.container.common.impl.HddsDispatcher; -import org.apache.hadoop.ozone.container.common.impl.TestHddsDispatcher; import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher; import org.apache.hadoop.ozone.container.common.interfaces.Handler; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine; @@ -213,7 +212,7 @@ public void testClientServerWithContainerDispatcher() throws Exception { Handler.getHandlerForContainerType(containerType, conf, context.getParent().getDatanodeDetails().getUuidString(), containerSet, volumeSet, metrics, - TestHddsDispatcher.NO_OP_ICR_SENDER)); + c -> {})); } HddsDispatcher dispatcher = new HddsDispatcher( conf, containerSet, volumeSet, handlers, context, metrics, null); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java index b15bb67fad81..a5fcbb9ff678 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java @@ -56,7 +56,6 @@ import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics; import org.apache.hadoop.ozone.container.common.impl.ContainerSet; import org.apache.hadoop.ozone.container.common.impl.HddsDispatcher; -import org.apache.hadoop.ozone.container.common.impl.TestHddsDispatcher; import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher; import org.apache.hadoop.ozone.container.common.interfaces.Handler; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine; @@ -195,7 +194,7 @@ private static HddsDispatcher createDispatcher(DatanodeDetails dd, UUID scmId, Handler.getHandlerForContainerType(containerType, conf, dd.getUuid().toString(), containerSet, volumeSet, metrics, - TestHddsDispatcher.NO_OP_ICR_SENDER)); + c -> {})); } HddsDispatcher hddsDispatcher = new HddsDispatcher( conf, containerSet, volumeSet, handlers, context, metrics, diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopDirTreeGeneratorWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopDirTreeGeneratorWithFSO.java index 497cdc1eb4f0..42905e2a4087 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopDirTreeGeneratorWithFSO.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopDirTreeGeneratorWithFSO.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.freon; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; /** * Test for HadoopDirTreeGenerator - prefix layout. @@ -27,7 +27,7 @@ public class TestHadoopDirTreeGeneratorWithFSO protected OzoneConfiguration getOzoneConfiguration() { OzoneConfiguration conf = new OzoneConfiguration(); - TestOMRequestUtils.configureFSOptimizedPaths(conf, true); + OMRequestTestUtils.configureFSOptimizedPaths(conf, true); return conf; } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java index 2ddac31a4c17..d691f2c324dc 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java @@ -46,7 +46,7 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; import org.apache.hadoop.hdds.scm.HddsWhiteboxTestUtils; -import org.apache.hadoop.hdds.scm.TestUtils; +import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.MockNodeManager; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; @@ -84,7 +84,7 @@ import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType; @@ -185,7 +185,7 @@ public static void setUp() throws Exception { configurator.setNetworkTopology(clusterMap); configurator.setSCMHAManager(MockSCMHAManager.getInstance(true)); configurator.setScmContext(SCMContext.emptyContext()); - scm = TestUtils.getScm(conf, configurator); + scm = HddsTestUtils.getScm(conf, configurator); scm.start(); scm.exitSafeMode(); scmBlockSize = (long) conf @@ -268,7 +268,7 @@ private static void createBucket(String volumeName, String bucketName, .setIsVersionEnabled(isVersionEnabled) .build(); - TestOMRequestUtils.addBucketToOM(metadataManager, bucketInfo); + OMRequestTestUtils.addBucketToOM(metadataManager, bucketInfo); } private static void createVolume(String volumeName) throws IOException { @@ -277,7 +277,7 @@ private static void createVolume(String volumeName) throws IOException { .setAdminName("bilbo") .setOwnerName("bilbo") .build(); - TestOMRequestUtils.addVolumeToOM(metadataManager, volumeArgs); + OMRequestTestUtils.addVolumeToOM(metadataManager, volumeArgs); } @Test @@ -951,12 +951,12 @@ public void testListStatusWithTableCache() throws Exception { // Add a total of 100 key entries to DB and TableCache (50 entries each) for (int i = 1; i <= 100; i++) { if (i % 2 == 0) { // Add to DB - TestOMRequestUtils.addKeyToTable(false, + OMRequestTestUtils.addKeyToTable(false, VOLUME_NAME, BUCKET_NAME, prefixKeyInDB + i, 1000L, HddsProtos.ReplicationType.RATIS, ONE, metadataManager); } else { // Add to TableCache - TestOMRequestUtils.addKeyToTableCache( + OMRequestTestUtils.addKeyToTableCache( VOLUME_NAME, BUCKET_NAME, prefixKeyInCache + i, HddsProtos.ReplicationType.RATIS, ONE, metadataManager); @@ -1022,13 +1022,13 @@ public void testListStatusWithTableCacheRecursive() throws Exception { String prefixKeyInCache = "key-c"; for (int i = 1; i <= 10; i++) { if (i % 2 == 0) { // Add to DB - TestOMRequestUtils.addKeyToTable(false, + OMRequestTestUtils.addKeyToTable(false, VOLUME_NAME, BUCKET_NAME, keyNameDir1Subdir1 + OZONE_URI_DELIMITER + prefixKeyInDB + i, 1000L, HddsProtos.ReplicationType.RATIS, ONE, metadataManager); } else { // Add to TableCache - TestOMRequestUtils.addKeyToTableCache( + OMRequestTestUtils.addKeyToTableCache( VOLUME_NAME, BUCKET_NAME, keyNameDir1Subdir1 + OZONE_URI_DELIMITER + prefixKeyInCache + i, HddsProtos.ReplicationType.RATIS, ONE, @@ -1067,13 +1067,13 @@ public void testListStatusWithDeletedEntriesInCache() throws Exception { for (int i = 1; i <= 100; i++) { if (i % 2 == 0) { - TestOMRequestUtils.addKeyToTable(false, + OMRequestTestUtils.addKeyToTable(false, VOLUME_NAME, BUCKET_NAME, prefixKey + i, 1000L, HddsProtos.ReplicationType.RATIS, ONE, metadataManager); existKeySet.add(prefixKey + i); } else { - TestOMRequestUtils.addKeyToTableCache( + OMRequestTestUtils.addKeyToTableCache( VOLUME_NAME, BUCKET_NAME, prefixKey + i, HddsProtos.ReplicationType.RATIS, ONE, metadataManager); @@ -1289,7 +1289,7 @@ public void testRefreshPipeline() throws Exception { ScmClient scmClientMock = mock(ScmClient.class); when(scmClientMock.getContainerClient()).thenReturn(sclProtocolMock); - OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo("v1", + OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo("v1", "b1", "k1", ReplicationType.RATIS, ReplicationFactor.THREE); @@ -1343,7 +1343,7 @@ public void testRefreshPipelineException() throws Exception { ScmClient scmClientMock = mock(ScmClient.class); when(scmClientMock.getContainerClient()).thenReturn(sclProtocolMock); - OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo("v1", + OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo("v1", "b1", "k1", ReplicationType.RATIS, ReplicationFactor.THREE); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithFSO.java index b8f37ce4ac8c..6b93ff0af198 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithFSO.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithFSO.java @@ -45,7 +45,7 @@ import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.util.StringUtils; import org.apache.ozone.test.GenericTestUtils; import org.junit.Assert; @@ -704,7 +704,7 @@ private void createTestKey(OzoneBucket bucket, String keyName, private OmDirectoryInfo getDirInfo(String parentKey) throws Exception { OMMetadataManager omMetadataManager = cluster.getOzoneManager().getMetadataManager(); - long bucketId = TestOMRequestUtils.getBucketId(volumeName, bucketName, + long bucketId = OMRequestTestUtils.getBucketId(volumeName, bucketName, omMetadataManager); String[] pathComponents = StringUtils.split(parentKey, '/'); long parentId = bucketId; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmLDBCli.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmLDBCli.java index 402745516fde..a59983fa3fa3 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmLDBCli.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmLDBCli.java @@ -26,7 +26,7 @@ import org.apache.hadoop.ozone.debug.DBScanner; import org.apache.hadoop.ozone.debug.RDBParser; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.junit.After; import org.junit.Before; import org.junit.Rule; @@ -89,7 +89,7 @@ public void testOMDB() throws Exception { .build(); // insert 5 keys for (int i = 0; i<5; i++) { - OmKeyInfo value = TestOMRequestUtils.createOmKeyInfo("sampleVol", + OmKeyInfo value = OMRequestTestUtils.createOmKeyInfo("sampleVol", "sampleBuck", "key" + (i+1), HddsProtos.ReplicationType.STAND_ALONE, HddsProtos.ReplicationFactor.ONE); String key = "key"+ (i); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmStartupSlvLessThanMlv.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmStartupSlvLessThanMlv.java index 79197bba7289..adccc7aabded 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmStartupSlvLessThanMlv.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmStartupSlvLessThanMlv.java @@ -25,7 +25,7 @@ import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature; import org.apache.hadoop.ozone.upgrade.LayoutFeature; -import org.apache.hadoop.ozone.upgrade.TestUpgradeUtils; +import org.apache.hadoop.ozone.upgrade.UpgradeTestUtils; import org.apache.ozone.test.GenericTestUtils; @@ -60,7 +60,7 @@ public void testStartupSlvLessThanMlv() throws Exception { int mlv = largestSlv + 1; // Create version file with MLV > SLV, which should fail the cluster build. - TestUpgradeUtils.createVersionFile(omSubdir, HddsProtos.NodeType.OM, mlv); + UpgradeTestUtils.createVersionFile(omSubdir, HddsProtos.NodeType.OM, mlv); MiniOzoneCluster.Builder clusterBuilder = MiniOzoneCluster.newBuilder(conf) .setClusterId(UUID.randomUUID().toString()) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestRecursiveAclWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestRecursiveAclWithFSO.java index b432826660c6..391f6619b1bc 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestRecursiveAclWithFSO.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestRecursiveAclWithFSO.java @@ -31,7 +31,7 @@ import org.apache.hadoop.ozone.client.io.OzoneOutputStream; import org.apache.hadoop.ozone.client.protocol.ClientProtocol; import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.security.acl.OzoneObj; import org.apache.hadoop.ozone.security.acl.OzoneObjInfo; import org.apache.hadoop.security.UserGroupInformation; @@ -255,7 +255,7 @@ private void startCluster() throws Exception { // Note: OM doesn't support live config reloading conf.setBoolean(OZONE_ACL_ENABLED, true); - TestOMRequestUtils.configureFSOptimizedPaths(conf, true); + OMRequestTestUtils.configureFSOptimizedPaths(conf, true); cluster = MiniOzoneCluster.newBuilder(conf).setClusterId(clusterId) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerRatisRequest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerRatisRequest.java index 0c5431e11ea6..63a4b4038328 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerRatisRequest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerRatisRequest.java @@ -24,7 +24,7 @@ import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils; import org.apache.hadoop.ozone.om.request.OMClientRequest; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.request.s3.multipart.S3MultipartUploadCompleteRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.junit.Assert; @@ -58,7 +58,7 @@ public void testRequestWithNonExistentBucket() String volumeName = "vol1"; String bucketName = "invalidBuck"; - OzoneManagerProtocolProtos.OMRequest omRequest = TestOMRequestUtils + OzoneManagerProtocolProtos.OMRequest omRequest = OMRequestTestUtils .createCompleteMPURequest(volumeName, bucketName, "mpuKey", "mpuKeyID", new ArrayList<>()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManager.java index 56233bd91cad..2818ed0ab52e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManager.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManager.java @@ -41,7 +41,7 @@ import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.TestUtils; +import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.utils.db.RDBStore; import org.apache.hadoop.hdds.utils.db.Table; @@ -351,7 +351,7 @@ private LinkedTreeMap getContainerResponseMap(String containerResponse, */ private void addKeys(int start, int end) throws Exception { for(int i = start; i < end; i++) { - Pipeline pipeline = TestUtils.getRandomPipeline(); + Pipeline pipeline = HddsTestUtils.getRandomPipeline(); List omKeyLocationInfoList = new ArrayList<>(); BlockID blockID = new BlockID(i, 1); OmKeyLocationInfo omKeyLocationInfo1 = getOmKeyLocationInfo(blockID, diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestFailoverWithSCMHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestFailoverWithSCMHA.java index 10ab52858eec..f0c3b1df101c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestFailoverWithSCMHA.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestFailoverWithSCMHA.java @@ -45,7 +45,7 @@ import java.util.Map; import java.util.UUID; -import static org.apache.hadoop.hdds.scm.TestUtils.getContainer; +import static org.apache.hadoop.hdds.scm.HddsTestUtils.getContainer; import static org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails; /** diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestNSSummaryAdmin.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestNSSummaryAdmin.java index c84f6b98bec5..60fffddbf0ac 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestNSSummaryAdmin.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestNSSummaryAdmin.java @@ -26,7 +26,7 @@ import org.apache.hadoop.ozone.client.ObjectStore; import org.apache.hadoop.ozone.client.OzoneVolume; import org.apache.hadoop.ozone.om.helpers.BucketLayout; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; @@ -54,7 +54,7 @@ public class TestNSSummaryAdmin extends TestStandardOutputUtil { @BeforeClass public static void init() throws Exception { conf = new OzoneConfiguration(); - TestOMRequestUtils.configureFSOptimizedPaths(conf, true); + OMRequestTestUtils.configureFSOptimizedPaths(conf, true); conf.set(OZONE_RECON_ADDRESS_KEY, "localhost:9888"); cluster = MiniOzoneCluster.newBuilder(conf) .withoutDatanodes().includeRecon(true).build(); diff --git a/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/codec/TestOmKeyInfoCodec.java b/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/codec/TestOmKeyInfoCodec.java index 1786c2a88365..23cc4662f5e3 100644 --- a/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/codec/TestOmKeyInfoCodec.java +++ b/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/codec/TestOmKeyInfoCodec.java @@ -21,7 +21,7 @@ import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.TestUtils; +import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; @@ -50,7 +50,7 @@ public class TestOmKeyInfoCodec { private OmKeyInfo getKeyInfo(int chunkNum) { List omKeyLocationInfoList = new ArrayList<>(); - Pipeline pipeline = TestUtils.getRandomPipeline(); + Pipeline pipeline = HddsTestUtils.getRandomPipeline(); for (int i = 0; i < chunkNum; i++) { BlockID blockID = new BlockID(i, i); OmKeyLocationInfo keyLocationInfo = new OmKeyLocationInfo.Builder() diff --git a/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/codec/TestRepeatedOmKeyInfoCodec.java b/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/codec/TestRepeatedOmKeyInfoCodec.java index d356552352b7..6dae8f1e4dfd 100644 --- a/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/codec/TestRepeatedOmKeyInfoCodec.java +++ b/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/codec/TestRepeatedOmKeyInfoCodec.java @@ -22,7 +22,7 @@ import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.TestUtils; +import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; @@ -55,7 +55,7 @@ public class TestRepeatedOmKeyInfoCodec { private OmKeyInfo getKeyInfo(int chunkNum) { List omKeyLocationInfoList = new ArrayList<>(); - Pipeline pipeline = TestUtils.getRandomPipeline(); + Pipeline pipeline = HddsTestUtils.getRandomPipeline(); for (int i = 0; i < chunkNum; i++) { BlockID blockID = new BlockID(i, i); OmKeyLocationInfo keyLocationInfo = new OmKeyLocationInfo.Builder() diff --git a/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestInstanceHelper.java b/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestInstanceHelper.java deleted file mode 100644 index 3758122d2385..000000000000 --- a/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestInstanceHelper.java +++ /dev/null @@ -1,68 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.helpers; - -import com.google.protobuf.ByteString; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.storage.proto.OzoneManagerStorageProtos; - - -/** - * Test TestInstanceHelper. - * - * Utility methods to create test instances of protobuf related classes - */ -public final class TestInstanceHelper { - - private TestInstanceHelper(){ - super(); - } - - public static OzoneManagerStorageProtos.OzoneAclInfo buildTestOzoneAclInfo( - String aclString){ - OzoneAcl oacl = OzoneAcl.parseAcl(aclString); - ByteString rights = ByteString.copyFrom(oacl.getAclBitSet().toByteArray()); - return OzoneManagerStorageProtos.OzoneAclInfo.newBuilder() - .setType(OzoneManagerStorageProtos.OzoneAclInfo.OzoneAclType.USER) - .setName(oacl.getName()) - .setRights(rights) - .setAclScope(OzoneManagerStorageProtos. - OzoneAclInfo.OzoneAclScope.ACCESS) - .build(); - } - - public static HddsProtos.KeyValue getDefaultTestMetadata( - String key, String value) { - return HddsProtos.KeyValue.newBuilder() - .setKey(key) - .setValue(value) - .build(); - } - - public static OzoneManagerStorageProtos.PersistedPrefixInfo - getDefaultTestPrefixInfo(String name, String aclString, - HddsProtos.KeyValue metadata) { - return OzoneManagerStorageProtos.PersistedPrefixInfo.newBuilder() - .setName(name) - .addAcls(buildTestOzoneAclInfo(aclString)) - .addMetadata(metadata) - .build(); - } -} diff --git a/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmPrefixInfo.java b/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmPrefixInfo.java index 8f00736030a3..fc16b3c88662 100644 --- a/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmPrefixInfo.java +++ b/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmPrefixInfo.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.om.helpers; +import com.google.protobuf.ByteString; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.storage.proto.OzoneManagerStorageProtos; @@ -36,7 +37,38 @@ */ public class TestOmPrefixInfo { - public OmPrefixInfo getOmPrefixInfoForTest(String path, + private static OzoneManagerStorageProtos.OzoneAclInfo buildTestOzoneAclInfo( + String aclString){ + OzoneAcl oacl = OzoneAcl.parseAcl(aclString); + ByteString rights = ByteString.copyFrom(oacl.getAclBitSet().toByteArray()); + return OzoneManagerStorageProtos.OzoneAclInfo.newBuilder() + .setType(OzoneManagerStorageProtos.OzoneAclInfo.OzoneAclType.USER) + .setName(oacl.getName()) + .setRights(rights) + .setAclScope(OzoneManagerStorageProtos. + OzoneAclInfo.OzoneAclScope.ACCESS) + .build(); + } + + private static HddsProtos.KeyValue getDefaultTestMetadata( + String key, String value) { + return HddsProtos.KeyValue.newBuilder() + .setKey(key) + .setValue(value) + .build(); + } + + private static OzoneManagerStorageProtos.PersistedPrefixInfo + getDefaultTestPrefixInfo(String name, String aclString, + HddsProtos.KeyValue metadata) { + return OzoneManagerStorageProtos.PersistedPrefixInfo.newBuilder() + .setName(name) + .addAcls(buildTestOzoneAclInfo(aclString)) + .addMetadata(metadata) + .build(); + } + + private OmPrefixInfo getOmPrefixInfoForTest(String path, IAccessAuthorizer.ACLIdentityType identityType, String identityString, IAccessAuthorizer.ACLType aclType, @@ -77,10 +109,9 @@ public void testgetFromProtobufOneMetadataOneAcl() { String aclString = "user:myuser:rw"; String metakey = "metakey"; String metaval = "metaval"; - HddsProtos.KeyValue metadata = TestInstanceHelper - .getDefaultTestMetadata(metakey, metaval); + HddsProtos.KeyValue metadata = getDefaultTestMetadata(metakey, metaval); OzoneManagerStorageProtos.PersistedPrefixInfo prefixInfo = - TestInstanceHelper.getDefaultTestPrefixInfo(prefixInfoPath, + getDefaultTestPrefixInfo(prefixInfoPath, aclString, metadata); OmPrefixInfo ompri = OmPrefixInfo.getFromProtobuf(prefixInfo); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestBucketManagerImpl.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestBucketManagerImpl.java index ae263b6c3bd9..542c77492210 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestBucketManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestBucketManagerImpl.java @@ -30,7 +30,7 @@ import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes; import org.apache.hadoop.ozone.om.helpers.*; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.junit.Assert; import org.junit.Ignore; import org.junit.Rule; @@ -223,7 +223,7 @@ public void testGetBucketInfo() throws Exception { .setAdminName("bilbo") .setOwnerName("bilbo") .build(); - TestOMRequestUtils.addVolumeToOM(metaMgr, args); + OMRequestTestUtils.addVolumeToOM(metaMgr, args); // Create bucket createBucket(metaMgr, bucketInfo); // Check exception thrown when bucket does not exist @@ -245,7 +245,7 @@ public void testGetBucketInfo() throws Exception { private void createBucket(OMMetadataManager metadataManager, OmBucketInfo bucketInfo) throws IOException { - TestOMRequestUtils.addBucketToOM(metadataManager, bucketInfo); + OMRequestTestUtils.addBucketToOM(metadataManager, bucketInfo); } @Test diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyDeletingService.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyDeletingService.java index 2860828223c6..9f17464fbc2c 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyDeletingService.java @@ -25,6 +25,8 @@ import java.util.Collections; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; + +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -39,7 +41,6 @@ import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.helpers.OpenKeySession; import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; import org.apache.hadoop.security.authentication.client.AuthenticationException; import org.apache.ozone.test.GenericTestUtils; import org.apache.hadoop.hdds.utils.db.DBConfigFromFile; @@ -215,14 +216,14 @@ private void createAndDeleteKeys(KeyManager keyManager, int keyCount, // cheat here, just create a volume and bucket entry so that we can // create the keys, we put the same data for key and value since the // system does not decode the object - TestOMRequestUtils.addVolumeToOM(keyManager.getMetadataManager(), + OMRequestTestUtils.addVolumeToOM(keyManager.getMetadataManager(), OmVolumeArgs.newBuilder() .setOwnerName("o") .setAdminName("a") .setVolume(volumeName) .build()); - TestOMRequestUtils.addBucketToOM(keyManager.getMetadataManager(), + OMRequestTestUtils.addBucketToOM(keyManager.getMetadataManager(), OmBucketInfo.newBuilder().setVolumeName(volumeName) .setBucketName(bucketName) .build()); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java index c1094f72b4f1..6a2cf5890cec 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java @@ -65,7 +65,7 @@ import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.ozone.test.GenericTestUtils; @@ -292,7 +292,7 @@ private void createBucket(OMMetadataManager omMetadataManager, .setIsVersionEnabled(false) .setAcls(new ArrayList<>()) .build(); - TestOMRequestUtils.addBucketToOM(omMetadataManager, omBucketInfo); + OMRequestTestUtils.addBucketToOM(omMetadataManager, omBucketInfo); } private OmMultipartInfo initMultipartUpload(OzoneManagerProtocol omtest, @@ -382,13 +382,13 @@ public void testLookupFileWithDnFailure() throws IOException { .setAdminName("admin") .setOwnerName("admin") .build(); - TestOMRequestUtils.addVolumeToOM(metadataManager, volumeArgs); + OMRequestTestUtils.addVolumeToOM(metadataManager, volumeArgs); final OmBucketInfo bucketInfo = OmBucketInfo.newBuilder() .setVolumeName("volumeOne") .setBucketName("bucketOne") .build(); - TestOMRequestUtils.addBucketToOM(metadataManager, bucketInfo); + OMRequestTestUtils.addBucketToOM(metadataManager, bucketInfo); final OmKeyLocationInfo keyLocationInfo = new OmKeyLocationInfo.Builder() .setBlockID(new BlockID(1L, 1L)) @@ -411,7 +411,7 @@ public void testLookupFileWithDnFailure() throws IOException { new RatisReplicationConfig(ReplicationFactor.THREE)) .setAcls(Collections.emptyList()) .build(); - TestOMRequestUtils.addKeyToOM(metadataManager, keyInfo); + OMRequestTestUtils.addKeyToOM(metadataManager, keyInfo); final OmKeyArgs.Builder keyArgs = new OmKeyArgs.Builder() .setVolumeName("volumeOne") @@ -444,10 +444,10 @@ public void listStatus() throws Exception { String keyPrefix = "key"; String client = "client.host"; - TestOMRequestUtils.addVolumeToDB(volume, OzoneConsts.OZONE, + OMRequestTestUtils.addVolumeToDB(volume, OzoneConsts.OZONE, metadataManager); - TestOMRequestUtils.addBucketToDB(volume, bucket, metadataManager); + OMRequestTestUtils.addBucketToDB(volume, bucket, metadataManager); final Pipeline pipeline = MockPipeline.createPipeline(3); final List nodes = pipeline.getNodes().stream() @@ -484,7 +484,7 @@ public void listStatus() throws Exception { .setUpdateID(i) .build(); keyInfo.appendNewBlocks(singletonList(keyLocationInfo), false); - TestOMRequestUtils.addKeyToOM(metadataManager, keyInfo); + OMRequestTestUtils.addKeyToOM(metadataManager, keyInfo); } when(containerClient.getContainerWithPipelineBatch(containerIDs)) diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java index dbe5497ac1d2..2ad44d163afa 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java @@ -27,7 +27,7 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.hdds.utils.TransactionInfo; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.junit.Assert; import org.junit.Before; import org.junit.Rule; @@ -105,8 +105,8 @@ public void testListVolumes() throws Exception { .setVolume(volName) .build(); - TestOMRequestUtils.addVolumeToOM(omMetadataManager, omVolumeArgs); - TestOMRequestUtils.addUserToDB(volName, ownerName, omMetadataManager); + OMRequestTestUtils.addVolumeToOM(omMetadataManager, omVolumeArgs); + OMRequestTestUtils.addUserToDB(volName, ownerName, omMetadataManager); } // Test list volumes with setting startVolume that @@ -133,16 +133,16 @@ public void testListAllVolumes() throws Exception { volName = "vola" + i; OmVolumeArgs omVolumeArgs = argsBuilder. setOwnerName(ownerName).setVolume(volName).build(); - TestOMRequestUtils.addVolumeToOM(omMetadataManager, omVolumeArgs); - TestOMRequestUtils.addUserToDB(volName, ownerName, omMetadataManager); + OMRequestTestUtils.addVolumeToOM(omMetadataManager, omVolumeArgs); + OMRequestTestUtils.addUserToDB(volName, ownerName, omMetadataManager); } for (int i = 0; i < 50; i++) { ownerName = "owner" + i; volName = "volb" + i; OmVolumeArgs omVolumeArgs = argsBuilder. setOwnerName(ownerName).setVolume(volName).build(); - TestOMRequestUtils.addVolumeToOM(omMetadataManager, omVolumeArgs); - TestOMRequestUtils.addUserToDB(volName, ownerName, omMetadataManager); + OMRequestTestUtils.addVolumeToOM(omMetadataManager, omVolumeArgs); + OMRequestTestUtils.addUserToDB(volName, ownerName, omMetadataManager); } String prefix = ""; @@ -177,7 +177,7 @@ public void testListBuckets() throws Exception { String prefixBucketNameWithOzoneOwner = "ozoneBucket"; String prefixBucketNameWithHadoopOwner = "hadoopBucket"; - TestOMRequestUtils.addVolumeToDB(volumeName1, omMetadataManager); + OMRequestTestUtils.addVolumeToDB(volumeName1, omMetadataManager); TreeSet volumeABucketsPrefixWithOzoneOwner = new TreeSet<>(); @@ -201,7 +201,7 @@ public void testListBuckets() throws Exception { String volumeName2 = "volumeB"; TreeSet volumeBBucketsPrefixWithOzoneOwner = new TreeSet<>(); TreeSet volumeBBucketsPrefixWithHadoopOwner = new TreeSet<>(); - TestOMRequestUtils.addVolumeToDB(volumeName2, omMetadataManager); + OMRequestTestUtils.addVolumeToDB(volumeName2, omMetadataManager); // Add exact name in prefixBucketNameWithOzoneOwner without postfix. volumeBBucketsPrefixWithOzoneOwner.add(prefixBucketNameWithOzoneOwner); @@ -329,8 +329,8 @@ public void testListKeys() throws Exception { String ozoneTestBucket = "ozoneBucket-Test"; // Create volumes and buckets. - TestOMRequestUtils.addVolumeToDB(volumeNameA, omMetadataManager); - TestOMRequestUtils.addVolumeToDB(volumeNameB, omMetadataManager); + OMRequestTestUtils.addVolumeToDB(volumeNameA, omMetadataManager); + OMRequestTestUtils.addVolumeToDB(volumeNameB, omMetadataManager); addBucketsToCache(volumeNameA, ozoneBucket); addBucketsToCache(volumeNameB, hadoopBucket); addBucketsToCache(volumeNameA, ozoneTestBucket); @@ -463,7 +463,7 @@ public void testListKeysWithFewDeleteEntriesInCache() throws Exception { String ozoneBucket = "ozoneBucket"; // Create volumes and bucket. - TestOMRequestUtils.addVolumeToDB(volumeNameA, omMetadataManager); + OMRequestTestUtils.addVolumeToDB(volumeNameA, omMetadataManager); addBucketsToCache(volumeNameA, ozoneBucket); @@ -565,11 +565,11 @@ public void testGetExpiredOpenKeys() throws Exception { // cache, since they will be picked up once the cache is flushed. Set expiredKeys = new HashSet<>(); for (int i = 0; i < numExpiredOpenKeys; i++) { - OmKeyInfo keyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName, + OmKeyInfo keyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, "expired" + i, HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, 0L, expiredAgeMillis); - TestOMRequestUtils.addKeyToTable(true, false, + OMRequestTestUtils.addKeyToTable(true, false, keyInfo, clientID, 0L, omMetadataManager); String groupID = omMetadataManager.getOpenKey(volumeName, bucketName, @@ -579,11 +579,11 @@ public void testGetExpiredOpenKeys() throws Exception { // Add unexpired keys to open key table. for (int i = 0; i < numUnexpiredOpenKeys; i++) { - OmKeyInfo keyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName, + OmKeyInfo keyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, "unexpired" + i, HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE); - TestOMRequestUtils.addKeyToTable(true, false, + OMRequestTestUtils.addKeyToTable(true, false, keyInfo, clientID, 0L, omMetadataManager); } @@ -619,11 +619,11 @@ private void addKeysToOM(String volumeName, String bucketName, String keyName, int i) throws Exception { if (i%2== 0) { - TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, keyName, + OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, keyName, 1000L, HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, omMetadataManager); } else { - TestOMRequestUtils.addKeyToTableCache(volumeName, bucketName, keyName, + OMRequestTestUtils.addKeyToTableCache(volumeName, bucketName, keyName, HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, omMetadataManager); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestTrashService.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestTrashService.java index 2700ae5c1b74..5804a9a39cc6 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestTrashService.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestTrashService.java @@ -30,7 +30,7 @@ import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.helpers.OpenKeySession; import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.security.authentication.client.AuthenticationException; import org.junit.After; import org.junit.Assert; @@ -102,14 +102,14 @@ public void testRecoverTrash() throws IOException { private void createAndDeleteKey(String keyName) throws IOException { - TestOMRequestUtils.addVolumeToOM(keyManager.getMetadataManager(), + OMRequestTestUtils.addVolumeToOM(keyManager.getMetadataManager(), OmVolumeArgs.newBuilder() .setOwnerName("owner") .setAdminName("admin") .setVolume(volumeName) .build()); - TestOMRequestUtils.addBucketToOM(keyManager.getMetadataManager(), + OMRequestTestUtils.addBucketToOM(keyManager.getMetadataManager(), OmBucketInfo.newBuilder() .setVolumeName(volumeName) .setBucketName(bucketName) diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java index 3dc3b97fec39..050417aa2e8d 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java @@ -33,7 +33,7 @@ import org.apache.hadoop.ozone.om.OMMetrics; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.request.bucket.OMBucketCreateRequest; import org.apache.hadoop.ozone.om.request.bucket.OMBucketDeleteRequest; import org.apache.hadoop.ozone.om.request.volume.OMVolumeCreateRequest; @@ -304,7 +304,7 @@ private void doMixTransactions(String volumeName, int bucketCount, private OMClientResponse deleteBucket(String volumeName, String bucketName, long transactionID) { OzoneManagerProtocolProtos.OMRequest omRequest = - TestOMRequestUtils.createDeleteBucketRequest(volumeName, bucketName); + OMRequestTestUtils.createDeleteBucketRequest(volumeName, bucketName); OMBucketDeleteRequest omBucketDeleteRequest = new OMBucketDeleteRequest(omRequest); @@ -450,7 +450,7 @@ private OMClientResponse createVolume(String volumeName, String admin = OzoneConsts.OZONE; String owner = UUID.randomUUID().toString(); OzoneManagerProtocolProtos.OMRequest omRequest = - TestOMRequestUtils.createVolumeRequest(volumeName, admin, owner); + OMRequestTestUtils.createVolumeRequest(volumeName, admin, owner); OMVolumeCreateRequest omVolumeCreateRequest = new OMVolumeCreateRequest(omRequest); @@ -467,7 +467,7 @@ private OMBucketCreateResponse createBucket(String volumeName, String bucketName, long transactionID) { OzoneManagerProtocolProtos.OMRequest omRequest = - TestOMRequestUtils.createBucketRequest(bucketName, volumeName, false, + OMRequestTestUtils.createBucketRequest(bucketName, volumeName, false, OzoneManagerProtocolProtos.StorageTypeProto.DISK); OMBucketCreateRequest omBucketCreateRequest = diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java similarity index 99% rename from hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java rename to hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java index bec1587411bb..204b4aaa433a 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java @@ -81,9 +81,9 @@ /** * Helper class to test OMClientRequest classes. */ -public final class TestOMRequestUtils { +public final class OMRequestTestUtils { - private TestOMRequestUtils() { + private OMRequestTestUtils() { //Do nothing } @@ -1042,7 +1042,7 @@ public static long getBucketId(String volumeName, String bucketName, public static long addParentsToDirTable(String volumeName, String bucketName, String key, OMMetadataManager omMetaMgr) throws Exception { - long bucketId = TestOMRequestUtils.getBucketId(volumeName, bucketName, + long bucketId = OMRequestTestUtils.getBucketId(volumeName, bucketName, omMetaMgr); if (org.apache.commons.lang3.StringUtils.isBlank(key)) { return bucketId; @@ -1053,9 +1053,9 @@ public static long addParentsToDirTable(String volumeName, String bucketName, long txnID = 50; for (String pathElement : pathComponents) { OmDirectoryInfo omDirInfo = - TestOMRequestUtils.createOmDirectoryInfo(pathElement, ++objectId, + OMRequestTestUtils.createOmDirectoryInfo(pathElement, ++objectId, parentId); - TestOMRequestUtils.addDirKeyToDirTable(true, omDirInfo, + OMRequestTestUtils.addDirKeyToDirTable(true, omDirInfo, txnID, omMetaMgr); parentId = omDirInfo.getObjectID(); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMClientRequestWithUserInfo.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMClientRequestWithUserInfo.java index 3f6bc154685c..c31bf0093341 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMClientRequestWithUserInfo.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMClientRequestWithUserInfo.java @@ -94,7 +94,7 @@ public void testUserInfo() throws Exception { String bucketName = UUID.randomUUID().toString(); String volumeName = UUID.randomUUID().toString(); OzoneManagerProtocolProtos.OMRequest omRequest = - TestOMRequestUtils.createBucketRequest(bucketName, volumeName, true, + OMRequestTestUtils.createBucketRequest(bucketName, volumeName, true, OzoneManagerProtocolProtos.StorageTypeProto.DISK); OMBucketCreateRequest omBucketCreateRequest = diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequest.java index 4ccf4206339a..83b34006a7bc 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequest.java @@ -35,7 +35,7 @@ .StorageTypeProto; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.util.Time; @@ -74,7 +74,7 @@ public void testValidateAndUpdateCacheWithNoVolume() throws Exception { String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); - OMRequest originalRequest = TestOMRequestUtils.createBucketRequest( + OMRequest originalRequest = OMRequestTestUtils.createBucketRequest( bucketName, volumeName, false, StorageTypeProto.SSD); OMBucketCreateRequest omBucketCreateRequest = @@ -129,7 +129,7 @@ private OMBucketCreateRequest doPreExecute(String volumeName, String bucketName) throws Exception { addCreateVolumeToTable(volumeName, omMetadataManager); OMRequest originalRequest = - TestOMRequestUtils.createBucketRequest(bucketName, volumeName, false, + OMRequestTestUtils.createBucketRequest(bucketName, volumeName, false, StorageTypeProto.SSD); OMBucketCreateRequest omBucketCreateRequest = @@ -217,6 +217,6 @@ public static void addCreateVolumeToTable(String volumeName, OmVolumeArgs.newBuilder().setCreationTime(Time.now()) .setVolume(volumeName).setAdminName(UUID.randomUUID().toString()) .setOwnerName(UUID.randomUUID().toString()).build(); - TestOMRequestUtils.addVolumeToOM(omMetadataManager, omVolumeArgs); + OMRequestTestUtils.addVolumeToOM(omMetadataManager, omVolumeArgs); } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequestWithFSO.java index 28ba8defa18e..f3dd34ca07ad 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequestWithFSO.java @@ -21,7 +21,7 @@ import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.StorageTypeProto; @@ -56,7 +56,7 @@ private OMBucketCreateRequest doPreExecute(String volumeName, String bucketName) throws Exception { addCreateVolumeToTable(volumeName, omMetadataManager); OMRequest originalRequest = - TestOMRequestUtils.createBucketReqFSO(bucketName, volumeName, + OMRequestTestUtils.createBucketReqFSO(bucketName, volumeName, false, StorageTypeProto.SSD); OMBucketCreateRequest omBucketCreateRequest = diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketDeleteRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketDeleteRequest.java index 1037baa8eaf9..090d3fd12c8e 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketDeleteRequest.java @@ -21,9 +21,9 @@ import java.util.UUID; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.junit.Assert; import org.junit.Test; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos @@ -61,7 +61,7 @@ public void testValidateAndUpdateCache() throws Exception { new OMBucketDeleteRequest(omRequest); // Create Volume and bucket entries in DB. - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); omBucketDeleteRequest.validateAndUpdateCache(ozoneManager, 1, @@ -93,7 +93,7 @@ public void testValidateAndUpdateCacheFailure() throws Exception { Assert.assertEquals(OzoneManagerProtocolProtos.Status.BUCKET_NOT_FOUND, omClientResponse.getOMResponse().getStatus()); - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketSetPropertyRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketSetPropertyRequest.java index c2a18acedbbb..243adfe0c49b 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketSetPropertyRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketSetPropertyRequest.java @@ -22,11 +22,11 @@ import java.util.UUID; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.ozone.test.GenericTestUtils; import org.junit.Assert; import org.junit.Test; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos. @@ -80,7 +80,7 @@ public void testValidateAndUpdateCache() throws Exception { bucketName, true, Long.MAX_VALUE); // Create with default BucketInfo values - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); OMBucketSetPropertyRequest omBucketSetPropertyRequest = @@ -142,9 +142,9 @@ public void testValidateAndUpdateCacheWithQuota() throws Exception { String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); - TestOMRequestUtils.addVolumeToDB( + OMRequestTestUtils.addVolumeToDB( volumeName, omMetadataManager, 10 * GB); - TestOMRequestUtils.addBucketToDB( + OMRequestTestUtils.addBucketToDB( volumeName, bucketName, omMetadataManager, 8 * GB); OMRequest omRequest = createSetBucketPropertyRequest(volumeName, bucketName, true, 20 * GB); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/acl/TestOMBucketAddAclRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/acl/TestOMBucketAddAclRequest.java index c4d4cc2d8a7b..3c3e55e1e08c 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/acl/TestOMBucketAddAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/acl/TestOMBucketAddAclRequest.java @@ -19,7 +19,7 @@ package org.apache.hadoop.ozone.om.request.bucket.acl; import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.request.bucket.TestBucketRequest; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; @@ -42,7 +42,7 @@ public void testPreExecute() throws Exception { String bucketName = UUID.randomUUID().toString(); OzoneAcl acl = OzoneAcl.parseAcl("user:testUser:rw"); - OMRequest originalRequest = TestOMRequestUtils + OMRequest originalRequest = OMRequestTestUtils .createBucketAddAclRequest(volumeName, bucketName, acl); long originModTime = originalRequest.getAddAclRequest() .getModificationTime(); @@ -66,13 +66,13 @@ public void testValidateAndUpdateCacheSuccess() throws Exception { String bucketName = UUID.randomUUID().toString(); String ownerName = "testUser"; - TestOMRequestUtils.addUserToDB(volumeName, ownerName, omMetadataManager); - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addUserToDB(volumeName, ownerName, omMetadataManager); + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); OzoneAcl acl = OzoneAcl.parseAcl("user:newUser:rw"); - OMRequest originalRequest = TestOMRequestUtils. + OMRequest originalRequest = OMRequestTestUtils. createBucketAddAclRequest(volumeName, bucketName, acl); OMBucketAddAclRequest omBucketAddAclRequest = new OMBucketAddAclRequest(originalRequest); @@ -101,7 +101,7 @@ public void testValidateAndUpdateCacheWithBucketNotFound() throws Exception { String bucketName = UUID.randomUUID().toString(); OzoneAcl acl = OzoneAcl.parseAcl("user:newUser:rw"); - OMRequest originalRequest = TestOMRequestUtils + OMRequest originalRequest = OMRequestTestUtils .createBucketAddAclRequest(volumeName, bucketName, acl); OMBucketAddAclRequest omBucketAddAclRequest = new OMBucketAddAclRequest(originalRequest); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/acl/TestOMBucketRemoveAclRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/acl/TestOMBucketRemoveAclRequest.java index eca281ca977f..f39c052cd18e 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/acl/TestOMBucketRemoveAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/acl/TestOMBucketRemoveAclRequest.java @@ -19,7 +19,7 @@ package org.apache.hadoop.ozone.om.request.bucket.acl; import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.request.bucket.TestBucketRequest; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; @@ -41,7 +41,7 @@ public void testPreExecute() throws Exception { String bucketName = UUID.randomUUID().toString(); OzoneAcl acl = OzoneAcl.parseAcl("user:testUser:rw"); - OMRequest originalRequest = TestOMRequestUtils + OMRequest originalRequest = OMRequestTestUtils .createBucketRemoveAclRequest(volumeName, bucketName, acl); long originModTime = originalRequest.getRemoveAclRequest() .getModificationTime(); @@ -65,14 +65,14 @@ public void testValidateAndUpdateCacheSuccess() throws Exception { String bucketName = UUID.randomUUID().toString(); String ownerName = "testUser"; - TestOMRequestUtils.addUserToDB(volumeName, ownerName, omMetadataManager); - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addUserToDB(volumeName, ownerName, omMetadataManager); + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); OzoneAcl acl = OzoneAcl.parseAcl("user:newUser:rw"); // Add acl - OMRequest addAclRequest = TestOMRequestUtils + OMRequest addAclRequest = OMRequestTestUtils .createBucketAddAclRequest(volumeName, bucketName, acl); OMBucketAddAclRequest omBucketAddAclRequest = new OMBucketAddAclRequest(addAclRequest); @@ -93,7 +93,7 @@ public void testValidateAndUpdateCacheSuccess() throws Exception { Assert.assertEquals(acl, bucketAcls.get(0)); // Remove acl. - OMRequest removeAclRequest = TestOMRequestUtils + OMRequest removeAclRequest = OMRequestTestUtils .createBucketRemoveAclRequest(volumeName, bucketName, acl); OMBucketRemoveAclRequest omBucketRemoveAclRequest = new OMBucketRemoveAclRequest(removeAclRequest); @@ -118,7 +118,7 @@ public void testValidateAndUpdateCacheWithBucketNotFound() throws Exception { String bucketName = UUID.randomUUID().toString(); OzoneAcl acl = OzoneAcl.parseAcl("user:newUser:rw"); - OMRequest originalRequest = TestOMRequestUtils + OMRequest originalRequest = OMRequestTestUtils .createBucketRemoveAclRequest(volumeName, bucketName, acl); OMBucketRemoveAclRequest omBucketRemoveAclRequest = new OMBucketRemoveAclRequest(originalRequest); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/acl/TestOMBucketSetAclRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/acl/TestOMBucketSetAclRequest.java index 519d1ddbd85d..53a9d80917a6 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/acl/TestOMBucketSetAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/acl/TestOMBucketSetAclRequest.java @@ -20,7 +20,7 @@ import com.google.common.collect.Lists; import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.request.bucket.TestBucketRequest; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; @@ -42,7 +42,7 @@ public void testPreExecute() throws Exception { String bucketName = UUID.randomUUID().toString(); OzoneAcl acl = OzoneAcl.parseAcl("user:testUser:rw"); - OMRequest originalRequest = TestOMRequestUtils + OMRequest originalRequest = OMRequestTestUtils .createBucketSetAclRequest(volumeName, bucketName, Lists.newArrayList(acl)); long originModTime = originalRequest.getSetAclRequest() @@ -67,15 +67,15 @@ public void testValidateAndUpdateCacheSuccess() throws Exception { String bucketName = UUID.randomUUID().toString(); String ownerName = "owner"; - TestOMRequestUtils.addUserToDB(volumeName, ownerName, omMetadataManager); - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addUserToDB(volumeName, ownerName, omMetadataManager); + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); OzoneAcl userAcl = OzoneAcl.parseAcl("user:newUser:rw"); OzoneAcl groupAcl = OzoneAcl.parseAcl("group:newGroup:rw"); List acls = Lists.newArrayList(userAcl, groupAcl); - OMRequest originalRequest = TestOMRequestUtils + OMRequest originalRequest = OMRequestTestUtils .createBucketSetAclRequest(volumeName, bucketName, acls); OMBucketSetAclRequest omBucketSetAclRequest = new OMBucketSetAclRequest(originalRequest); @@ -106,7 +106,7 @@ public void testValidateAndUpdateCacheWithBucketNotFound() throws Exception { String bucketName = UUID.randomUUID().toString(); OzoneAcl acl = OzoneAcl.parseAcl("user:newUser:rw"); - OMRequest originalRequest = TestOMRequestUtils + OMRequest originalRequest = OMRequestTestUtils .createBucketSetAclRequest(volumeName, bucketName, Lists.newArrayList(acl)); OMBucketSetAclRequest omBucketSetAclRequest = diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequest.java index 66dddb7a9f6a..46bdb5eaf785 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequest.java @@ -46,7 +46,7 @@ import org.apache.hadoop.ozone.om.OMMetrics; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos .CreateDirectoryRequest; @@ -108,7 +108,7 @@ public void testPreExecute() throws Exception { String bucketName = "bucket1"; String keyName = "a/b/c"; - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); OMRequest omRequest = createDirectoryRequest(volumeName, bucketName, @@ -132,7 +132,7 @@ public void testValidateAndUpdateCache() throws Exception { String keyName = genRandomKeyName(); // Add volume and bucket entries to DB. - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); OMRequest omRequest = createDirectoryRequest(volumeName, bucketName, @@ -201,7 +201,7 @@ public void testValidateAndUpdateCacheWithBucketNotFound() throws Exception { omDirectoryCreateRequest.preExecute(ozoneManager); omDirectoryCreateRequest = new OMDirectoryCreateRequest(modifiedOmRequest); - TestOMRequestUtils.addVolumeToDB(volumeName, omMetadataManager); + OMRequestTestUtils.addVolumeToDB(volumeName, omMetadataManager); OMClientResponse omClientResponse = omDirectoryCreateRequest.validateAndUpdateCache(ozoneManager, 100L, @@ -224,10 +224,10 @@ public void testValidateAndUpdateCacheWithSubDirectoryInPath() String keyName = genRandomKeyName(); // Add volume and bucket entries to DB. - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); - TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, + OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, keyName.substring(0, 12), 1L, HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, omMetadataManager); OMRequest omRequest = createDirectoryRequest(volumeName, bucketName, @@ -266,10 +266,10 @@ public void testValidateAndUpdateCacheWithDirectoryAlreadyExists() String keyName = genRandomKeyName(); // Add volume and bucket entries to DB. - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); - TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, + OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, OzoneFSUtils.addTrailingSlashIfNeeded(keyName), 1L, HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, omMetadataManager); @@ -310,10 +310,10 @@ public void testValidateAndUpdateCacheWithFilesInPath() throws Exception { String keyName = genRandomKeyName(); // Add volume and bucket entries to DB. - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); // Add a key with first two levels. - TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, + OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, keyName.substring(0, 11), 1L, HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, omMetadataManager); OMRequest omRequest = createDirectoryRequest(volumeName, bucketName, @@ -348,7 +348,7 @@ public void testCreateDirectoryOMMetric() String keyName = genRandomKeyName(); // Add volume and bucket entries to DB. - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); OMRequest omRequest = createDirectoryRequest(volumeName, bucketName, diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequestWithFSO.java index 36a4c9125394..beea90f909fc 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequestWithFSO.java @@ -41,7 +41,7 @@ import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; import org.apache.hadoop.ozone.om.request.OMClientRequest; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateDirectoryRequest; @@ -90,7 +90,7 @@ public void setup() throws Exception { OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS, folder.newFolder().getAbsolutePath()); - TestOMRequestUtils.configureFSOptimizedPaths(ozoneConfiguration, true); + OMRequestTestUtils.configureFSOptimizedPaths(ozoneConfiguration, true); omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration); when(ozoneManager.getMetrics()).thenReturn(omMetrics); when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager); @@ -114,7 +114,7 @@ public void testPreExecute() throws Exception { String bucketName = "bucket1"; String keyName = "a/b/c"; - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); OMRequest omRequest = createDirectoryRequest(volumeName, bucketName, @@ -138,7 +138,7 @@ public void testValidateAndUpdateCache() throws Exception { String keyName = createDirKey(dirs, 3); // Add volume and bucket entries to DB. - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName); @@ -218,7 +218,7 @@ public void testValidateAndUpdateCacheWithBucketNotFound() throws Exception { omDirCreateReqFSO = new OMDirectoryCreateRequestWithFSO(modifiedOmReq, BucketLayout.FILE_SYSTEM_OPTIMIZED); - TestOMRequestUtils.addVolumeToDB(volumeName, omMetadataManager); + OMRequestTestUtils.addVolumeToDB(volumeName, omMetadataManager); OMClientResponse omClientResponse = omDirCreateReqFSO.validateAndUpdateCache(ozoneManager, 100L, @@ -241,7 +241,7 @@ public void testValidateAndUpdateCacheWithSubDirectoryInPath() String keyName = createDirKey(dirs, 3); // Add volume and bucket entries to DB. - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName); @@ -252,14 +252,14 @@ public void testValidateAndUpdateCacheWithSubDirectoryInPath() //1. Create root OmDirectoryInfo omDirInfo = - TestOMRequestUtils.createOmDirectoryInfo(dirs.get(0), objID++, + OMRequestTestUtils.createOmDirectoryInfo(dirs.get(0), objID++, bucketID); - TestOMRequestUtils.addDirKeyToDirTable(true, omDirInfo, 5000, + OMRequestTestUtils.addDirKeyToDirTable(true, omDirInfo, 5000, omMetadataManager); //2. Create sub-directory under root - omDirInfo = TestOMRequestUtils.createOmDirectoryInfo(dirs.get(1), objID++, + omDirInfo = OMRequestTestUtils.createOmDirectoryInfo(dirs.get(1), objID++, omDirInfo.getObjectID()); - TestOMRequestUtils.addDirKeyToDirTable(true, omDirInfo, 5000, + OMRequestTestUtils.addDirKeyToDirTable(true, omDirInfo, 5000, omMetadataManager); OMRequest omRequest = createDirectoryRequest(volumeName, bucketName, @@ -293,7 +293,7 @@ public void testValidateAndUpdateCacheWithDirectoryAlreadyExists() String keyName = createDirKey(dirs, 3); // Add volume and bucket entries to DB. - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName); @@ -309,9 +309,9 @@ public void testValidateAndUpdateCacheWithDirectoryAlreadyExists() long objID = 100 + indx; long txnID = 5000 + indx; // for index=0, parentID is bucketID - OmDirectoryInfo omDirInfo = TestOMRequestUtils.createOmDirectoryInfo( + OmDirectoryInfo omDirInfo = OMRequestTestUtils.createOmDirectoryInfo( dirs.get(indx), objID, parentID); - TestOMRequestUtils.addDirKeyToDirTable(false, omDirInfo, + OMRequestTestUtils.addDirKeyToDirTable(false, omDirInfo, txnID, omMetadataManager); parentID = omDirInfo.getObjectID(); @@ -356,7 +356,7 @@ public void testValidateAndUpdateCacheWithFilesInPath() throws Exception { String keyName = createDirKey(dirs, 3); // Add volume and bucket entries to DB. - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName); OmBucketInfo omBucketInfo = @@ -369,9 +369,9 @@ public void testValidateAndUpdateCacheWithFilesInPath() throws Exception { long objID = 100 + indx; long txnID = 5000 + indx; // for index=0, parentID is bucketID - OmDirectoryInfo omDirInfo = TestOMRequestUtils.createOmDirectoryInfo( + OmDirectoryInfo omDirInfo = OMRequestTestUtils.createOmDirectoryInfo( dirs.get(indx), objID, parentID); - TestOMRequestUtils.addDirKeyToDirTable(false, omDirInfo, + OMRequestTestUtils.addDirKeyToDirTable(false, omDirInfo, txnID, omMetadataManager); parentID = omDirInfo.getObjectID(); @@ -381,7 +381,7 @@ public void testValidateAndUpdateCacheWithFilesInPath() throws Exception { long txnID = 50000; // Add a file into the FileTable, this is to simulate "file exists" check. - OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName, + OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE, objID++); String ozoneFileName = parentID + "/" + dirs.get(dirs.size() - 1); @@ -437,7 +437,7 @@ public void testValidateAndUpdateCacheWithFileExistsInGivenPath() String keyName = createDirKey(dirs, 3); // Add volume and bucket entries to DB. - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager, getBucketLayout()); String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName); OmBucketInfo omBucketInfo = @@ -448,14 +448,14 @@ public void testValidateAndUpdateCacheWithFileExistsInGivenPath() long txnID = 5000; // for index=0, parentID is bucketID - OmDirectoryInfo omDirInfo = TestOMRequestUtils.createOmDirectoryInfo( + OmDirectoryInfo omDirInfo = OMRequestTestUtils.createOmDirectoryInfo( dirs.get(0), objID++, parentID); - TestOMRequestUtils.addDirKeyToDirTable(true, omDirInfo, + OMRequestTestUtils.addDirKeyToDirTable(true, omDirInfo, txnID, omMetadataManager); parentID = omDirInfo.getObjectID(); // Add a key in second level. - OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName, + OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE, objID); @@ -506,7 +506,7 @@ public void testCreateDirectoryUptoLimitOfMaxLevels255() throws Exception { String keyName = createDirKey(dirs, 255); // Add volume and bucket entries to DB. - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName); OmBucketInfo omBucketInfo = @@ -545,7 +545,7 @@ public void testCreateDirectoryExceedLimitOfMaxLevels255() throws Exception { String keyName = createDirKey(dirs, 256); // Add volume and bucket entries to DB. - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); OMRequest omRequest = createDirectoryRequest(volumeName, bucketName, @@ -581,7 +581,7 @@ public void testCreateDirectoryOMMetric() throws Exception { String keyName = createDirKey(dirs, 3); // Add volume and bucket entries to DB. - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName); OmBucketInfo omBucketInfo = diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java index e28dd6cb501c..153a4ea62351 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java @@ -21,6 +21,7 @@ import java.util.List; import java.util.UUID; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.jetbrains.annotations.NotNull; import org.junit.Assert; import org.junit.Test; @@ -28,7 +29,6 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; import org.apache.hadoop.ozone.om.request.key.TestOMKeyRequest; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; @@ -118,7 +118,7 @@ public void testValidateAndUpdateCache() throws Exception { HddsProtos.ReplicationFactor.ONE, HddsProtos.ReplicationType.RATIS, false, true); - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); OMFileCreateRequest omFileCreateRequest = getOMFileCreateRequest(omRequest); @@ -192,7 +192,7 @@ public void testValidateAndUpdateCacheWithBucketNotFound() throws Exception { HddsProtos.ReplicationFactor.ONE, HddsProtos.ReplicationType.RATIS, false, true); - TestOMRequestUtils.addVolumeToDB(volumeName, omMetadataManager); + OMRequestTestUtils.addVolumeToDB(volumeName, omMetadataManager); OMFileCreateRequest omFileCreateRequest = getOMFileCreateRequest(omRequest); OMRequest modifiedOmRequest = omFileCreateRequest.preExecute(ozoneManager); @@ -212,16 +212,16 @@ public void testValidateAndUpdateCacheWithNonRecursive() throws Exception { testNonRecursivePath("a/b", false, false, true); // Create some child keys for the path - TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, + OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, "a/b/c/d", 0L, HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, omMetadataManager); - TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, + OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, "a/b/c/", 0L, HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, omMetadataManager); - TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, + OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, "a/b/", 0L, HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, omMetadataManager); - TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, + OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, "a/", 0L, HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, omMetadataManager); @@ -243,12 +243,12 @@ public void testValidateAndUpdateCacheWithRecursive() throws Exception { // Should be able to create file even if parent directories does not // exist and key already exist, as this is with overwrite enabled. testNonRecursivePath(UUID.randomUUID().toString(), false, false, false); - TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, + OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, "c/d/e/f", 0L, HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, omMetadataManager); testNonRecursivePath("c/d/e/f", true, true, false); // Create some child keys for the path - TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, + OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, "a/b/c/d", 0L, HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, omMetadataManager); testNonRecursivePath("a/b/c", false, true, false); @@ -263,7 +263,7 @@ public void testValidateAndUpdateCacheWithRecursiveAndOverWrite() testNonRecursivePath(key, false, true, false); // Add the key to key table - TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, + OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, key, 0L, HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, omMetadataManager); @@ -280,19 +280,19 @@ public void testValidateAndUpdateCacheWithNonRecursiveAndOverWrite() String key = "c/d/e/f"; // Need to add the path which starts with "c/d/e" to keyTable as this is // non-recursive parent should exist. - TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, + OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, "c/", 0L, HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, omMetadataManager); - TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, + OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, "c/d/", 0L, HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, omMetadataManager); - TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, + OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, "c/d/e/", 0L, HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, omMetadataManager); testNonRecursivePath(key, false, false, false); // Add the key to key table - TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, + OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, key, 0L, HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, omMetadataManager); @@ -308,7 +308,7 @@ protected void testNonRecursivePath(String key, HddsProtos.ReplicationFactor.ONE, HddsProtos.ReplicationType.RATIS, overWrite, recursive); - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); OMFileCreateRequest omFileCreateRequest = getOMFileCreateRequest(omRequest); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequestWithFSO.java index a3b02fecd91d..eb7e995a2614 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequestWithFSO.java @@ -22,7 +22,7 @@ import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; @@ -43,10 +43,10 @@ public void testValidateAndUpdateCacheWithNonRecursive() throws Exception { Assert.assertEquals("Invalid metrics value", 0, omMetrics.getNumKeys()); // Create parent dirs for the path - TestOMRequestUtils.addParentsToDirTable(volumeName, bucketName, + OMRequestTestUtils.addParentsToDirTable(volumeName, bucketName, "a/b/c", omMetadataManager); String fileNameD = "d"; - TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, + OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, "a/b/c/" + fileNameD, 0L, HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, omMetadataManager); @@ -76,12 +76,12 @@ public void testValidateAndUpdateCacheWithRecursiveAndOverWrite() // Add the key to key table OmDirectoryInfo omDirInfo = getDirInfo("c/d/e"); OmKeyInfo omKeyInfo = - TestOMRequestUtils.createOmKeyInfo(volumeName, bucketName, key, + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, key, HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, omDirInfo.getObjectID() + 10, omDirInfo.getObjectID(), 100, Time.now()); - TestOMRequestUtils.addFileToKeyTable(false, false, + OMRequestTestUtils.addFileToKeyTable(false, false, "f", omKeyInfo, -1, omDirInfo.getObjectID() + 10, omMetadataManager); @@ -97,10 +97,10 @@ public void testValidateAndUpdateCacheWithNonRecursiveAndOverWrite() String parentDir = "c/d/e"; String fileName = "f"; String key = parentDir + "/" + fileName; - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); // Create parent dirs for the path - long parentId = TestOMRequestUtils.addParentsToDirTable(volumeName, + long parentId = OMRequestTestUtils.addParentsToDirTable(volumeName, bucketName, parentDir, omMetadataManager); // Need to add the path which starts with "c/d/e" to OpenKeyTable as this is @@ -108,12 +108,12 @@ public void testValidateAndUpdateCacheWithNonRecursiveAndOverWrite() testNonRecursivePath(key, false, false, false); OmKeyInfo omKeyInfo = - TestOMRequestUtils.createOmKeyInfo(volumeName, bucketName, key, + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, key, HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, parentId + 1, parentId, 100, Time.now()); - TestOMRequestUtils.addFileToKeyTable(false, false, + OMRequestTestUtils.addFileToKeyTable(false, false, fileName, omKeyInfo, -1, 50, omMetadataManager); // Even if key exists in KeyTable, should be able to create file as @@ -126,7 +126,7 @@ public void testValidateAndUpdateCacheWithNonRecursiveAndOverWrite() protected OmKeyInfo verifyPathInOpenKeyTable(String key, long id, boolean doAssert) throws Exception { - long bucketId = TestOMRequestUtils.getBucketId(volumeName, bucketName, + long bucketId = OMRequestTestUtils.getBucketId(volumeName, bucketName, omMetadataManager); String[] pathComponents = StringUtils.split(key, '/'); long parentId = bucketId; @@ -160,7 +160,7 @@ protected OmKeyInfo verifyPathInOpenKeyTable(String key, long id, private OmDirectoryInfo getDirInfo(String key) throws Exception { - long bucketId = TestOMRequestUtils.getBucketId(volumeName, bucketName, + long bucketId = OMRequestTestUtils.getBucketId(volumeName, bucketName, omMetadataManager); String[] pathComponents = StringUtils.split(key, '/'); long parentId = bucketId; diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequest.java index 21209c4057f5..6f8bfd015b13 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequest.java @@ -25,13 +25,13 @@ import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.jetbrains.annotations.NotNull; import org.junit.Assert; import org.junit.Test; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos @@ -56,7 +56,7 @@ public void testPreExecute() throws Exception { @Test public void testValidateAndUpdateCache() throws Exception { // Add volume, bucket, key entries to DB. - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager, getBucketLayout()); addKeyToOpenKeyTable(volumeName, bucketName); @@ -94,7 +94,7 @@ public void testValidateAndUpdateCache() throws Exception { Assert.assertEquals(modifiedOmRequest.getAllocateBlockRequest() .getKeyArgs().getModificationTime(), omKeyInfo.getModificationTime()); - // creationTime was assigned at TestOMRequestUtils.addKeyToTable + // creationTime was assigned at OMRequestTestUtils.addKeyToTable // modificationTime was assigned at // doPreExecute(createAllocateBlockRequest()) Assert.assertTrue( @@ -153,7 +153,7 @@ public void testValidateAndUpdateCacheWithBucketNotFound() throws Exception { // Added only volume to DB. - TestOMRequestUtils.addVolumeToDB(volumeName, OzoneConsts.OZONE, + OMRequestTestUtils.addVolumeToDB(volumeName, OzoneConsts.OZONE, omMetadataManager); OMClientResponse omAllocateBlockResponse = @@ -175,7 +175,7 @@ public void testValidateAndUpdateCacheWithKeyNotFound() throws Exception { getOmAllocateBlockRequest(modifiedOmRequest); // Add volume, bucket entries to DB. - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager, omAllocateBlockRequest.getBucketLayout()); @@ -254,7 +254,7 @@ protected OMRequest createAllocateBlockRequest() { protected String addKeyToOpenKeyTable(String volumeName, String bucketName) throws Exception { - TestOMRequestUtils.addKeyToTable(true, volumeName, bucketName, + OMRequestTestUtils.addKeyToTable(true, volumeName, bucketName, keyName, clientID, replicationType, replicationFactor, omMetadataManager); return ""; diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequestWithFSO.java index 6482a4fd6460..6dec84d79cee 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequestWithFSO.java @@ -26,7 +26,7 @@ import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; @@ -58,19 +58,19 @@ protected String addKeyToOpenKeyTable(String volumeName, String bucketName) keyName = parentDir + OzoneConsts.OM_KEY_PREFIX + fileName; // add parentDir to dirTable - long parentID = TestOMRequestUtils.addParentsToDirTable(volumeName, + long parentID = OMRequestTestUtils.addParentsToDirTable(volumeName, bucketName, parentDir, omMetadataManager); long txnId = 50; long objectId = parentID + 1; OmKeyInfo omKeyInfoFSO = - TestOMRequestUtils.createOmKeyInfo(volumeName, bucketName, keyName, + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, objectId, parentID, txnId, Time.now()); // add key to openFileTable - TestOMRequestUtils.addFileToKeyTable(true, false, + OMRequestTestUtils.addFileToKeyTable(true, false, fileName, omKeyInfoFSO, clientID, txnLogId, omMetadataManager); return omMetadataManager.getOzonePathKey(parentID, fileName); @@ -92,7 +92,7 @@ public BucketLayout getBucketLayout() { @Override protected OmKeyInfo verifyPathInOpenKeyTable(String key, long id, boolean doAssert) throws Exception { - long bucketId = TestOMRequestUtils.getBucketId(volumeName, bucketName, + long bucketId = OMRequestTestUtils.getBucketId(volumeName, bucketName, omMetadataManager); String[] pathComponents = StringUtils.split(key, '/'); long parentId = bucketId; diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyAclRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyAclRequest.java index cda42a1b194e..df6804818cb3 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyAclRequest.java @@ -21,7 +21,7 @@ import java.util.UUID; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.request.key.acl.OMKeyAclRequest; import org.apache.hadoop.ozone.om.request.key.acl.OMKeyAddAclRequest; import org.apache.hadoop.ozone.om.request.key.acl.OMKeyRemoveAclRequest; @@ -46,7 +46,7 @@ public class TestOMKeyAclRequest extends TestOMKeyRequest { @Test public void testKeyAddAclRequest() throws Exception { // Manually add volume, bucket and key to DB - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager, getBucketLayout()); String ozoneKey = addKeyToTable(); @@ -83,7 +83,7 @@ public void testKeyAddAclRequest() throws Exception { @Test public void testKeyRemoveAclRequest() throws Exception { - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager, getBucketLayout()); String ozoneKey = addKeyToTable(); @@ -147,7 +147,7 @@ public void testKeyRemoveAclRequest() throws Exception { @Test public void testKeySetAclRequest() throws Exception { - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager, getBucketLayout()); String ozoneKey = addKeyToTable(); @@ -248,7 +248,7 @@ private OMRequest createSetAclKeyRequest(OzoneAcl acl) { } protected String addKeyToTable() throws Exception { - TestOMRequestUtils.addKeyToTable(false, false, volumeName, bucketName, + OMRequestTestUtils.addKeyToTable(false, false, volumeName, bucketName, keyName, clientID, replicationType, replicationFactor, 1L, omMetadataManager); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyAclRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyAclRequestWithFSO.java index 9f49d9d071e8..d528926f7adc 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyAclRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyAclRequestWithFSO.java @@ -20,7 +20,7 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.request.key.acl.OMKeyAclRequest; import org.apache.hadoop.ozone.om.request.key.acl.OMKeyAddAclRequestWithFSO; import org.apache.hadoop.ozone.om.request.key.acl.OMKeyRemoveAclRequestWithFSO; @@ -40,15 +40,15 @@ protected String addKeyToTable() throws Exception { keyName = key; // updated key name // Create parent dirs for the path - long parentId = TestOMRequestUtils + long parentId = OMRequestTestUtils .addParentsToDirTable(volumeName, bucketName, parentDir, omMetadataManager); - OmKeyInfo omKeyInfo = TestOMRequestUtils + OmKeyInfo omKeyInfo = OMRequestTestUtils .createOmKeyInfo(volumeName, bucketName, key, HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, parentId + 1, parentId, 100, Time.now()); - TestOMRequestUtils + OMRequestTestUtils .addFileToKeyTable(false, false, fileName, omKeyInfo, -1, 50, omMetadataManager); return omKeyInfo.getPath(); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java index e36ff2fb2cef..e35645eb4da3 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java @@ -28,6 +28,7 @@ import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.util.Time; import org.jetbrains.annotations.NotNull; import org.junit.Assert; @@ -36,7 +37,6 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos @@ -74,7 +74,7 @@ public void testValidateAndUpdateCacheWithUnknownBlockId() throws Exception { .stream().map(OmKeyLocationInfo::getFromProtobuf) .collect(Collectors.toList()); - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager, omKeyCommitRequest.getBucketLayout()); String ozoneKey = addKeyToOpenKeyTable(allocatedLocationList); @@ -136,7 +136,7 @@ public void testValidateAndUpdateCache() throws Exception { .map(OmKeyLocationInfo::getFromProtobuf) .collect(Collectors.toList()); - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager, omKeyCommitRequest.getBucketLayout()); String ozoneKey = addKeyToOpenKeyTable(allocatedLocationList); @@ -239,7 +239,7 @@ public void testValidateAndUpdateCacheWithBucketNotFound() throws Exception { OMKeyCommitRequest omKeyCommitRequest = getOmKeyCommitRequest(modifiedOmRequest); - TestOMRequestUtils.addVolumeToDB(volumeName, OzoneConsts.OZONE, + OMRequestTestUtils.addVolumeToDB(volumeName, OzoneConsts.OZONE, omMetadataManager); String ozoneKey = getOzonePathKey(); @@ -274,7 +274,7 @@ public void testValidateAndUpdateCacheWithKeyNotFound() throws Exception { OMKeyCommitRequest omKeyCommitRequest = getOmKeyCommitRequest(modifiedOmRequest); - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager, omKeyCommitRequest.getBucketLayout()); String ozoneKey = getOzonePathKey(); @@ -470,7 +470,7 @@ protected String getOzonePathKey() throws IOException { @NotNull protected String addKeyToOpenKeyTable(List locationList) throws Exception { - TestOMRequestUtils.addKeyToTable(true, volumeName, bucketName, keyName, + OMRequestTestUtils.addKeyToTable(true, volumeName, bucketName, keyName, clientID, replicationType, replicationFactor, omMetadataManager, locationList, version); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequestWithFSO.java index b57d402ae68a..0e27c1ce2b05 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequestWithFSO.java @@ -25,7 +25,7 @@ import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.util.Time; import org.jetbrains.annotations.NotNull; @@ -66,20 +66,20 @@ protected String addKeyToOpenKeyTable(List locationList) if (getParentDir() == null) { parentID = getBucketID(); } else { - parentID = TestOMRequestUtils.addParentsToDirTable(volumeName, + parentID = OMRequestTestUtils.addParentsToDirTable(volumeName, bucketName, getParentDir(), omMetadataManager); } long objectId = 100; OmKeyInfo omKeyInfoFSO = - TestOMRequestUtils.createOmKeyInfo(volumeName, bucketName, keyName, + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, objectId, parentID, 100, Time.now(), version); omKeyInfoFSO.appendNewBlocks(locationList, false); String fileName = OzoneFSUtils.getFileName(keyName); - TestOMRequestUtils.addFileToKeyTable(true, false, + OMRequestTestUtils.addFileToKeyTable(true, false, fileName, omKeyInfoFSO, clientID, txnLogId, omMetadataManager); return omMetadataManager.getOzonePathKey(parentID, fileName); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java index c5caf95cfed4..9ff7d7d4d822 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java @@ -28,12 +28,12 @@ import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.junit.Assert; import org.junit.Test; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos @@ -46,7 +46,7 @@ import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType.RATIS; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS; -import static org.apache.hadoop.ozone.om.request.TestOMRequestUtils.addVolumeAndBucketToDB; +import static org.apache.hadoop.ozone.om.request.OMRequestTestUtils.addVolumeAndBucketToDB; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.NOT_A_FILE; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.OK; import static org.junit.Assert.fail; @@ -292,7 +292,7 @@ public void testValidateAndUpdateCacheWithBucketNotFound() throws Exception { String openKey = getOpenKey(id); - TestOMRequestUtils.addVolumeToDB(volumeName, OzoneConsts.OZONE, + OMRequestTestUtils.addVolumeToDB(volumeName, OzoneConsts.OZONE, omMetadataManager); // Before calling @@ -498,7 +498,7 @@ public void testKeyCreateWithFileSystemPathsEnabled() throws Exception { } protected void addToKeyTable(String keyName) throws Exception { - TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, + OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, keyName.substring(1), 0L, RATIS, THREE, omMetadataManager); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequestWithFSO.java index 354ece092b23..2ddc5c71f91b 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequestWithFSO.java @@ -26,7 +26,7 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import org.apache.hadoop.ozone.om.helpers.BucketLayout; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.util.Time; import org.junit.Assert; @@ -56,12 +56,12 @@ protected void addToKeyTable(String keyName) throws Exception { long parentId = checkIntermediatePaths(keyPath); String fileName = OzoneFSUtils.getFileName(keyName); OmKeyInfo omKeyInfo = - TestOMRequestUtils.createOmKeyInfo(volumeName, bucketName, fileName, + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, fileName, HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, parentId + 1, parentId, 100, Time.now()); - TestOMRequestUtils.addFileToKeyTable(false, false, + OMRequestTestUtils.addFileToKeyTable(false, false, fileName, omKeyInfo, -1, 50, omMetadataManager); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequest.java index 34d9beacbbc2..03f246753d41 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequest.java @@ -21,11 +21,11 @@ import java.util.UUID; import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.junit.Assert; import org.junit.Test; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos @@ -48,7 +48,7 @@ public void testPreExecute() throws Exception { @Test public void testValidateAndUpdateCache() throws Exception { // Add volume, bucket and key entries to OM DB. - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager, getBucketLayout()); String ozoneKey = addKeyToTable(); @@ -89,7 +89,7 @@ public void testValidateAndUpdateCacheWithKeyNotFound() throws Exception { // Add only volume and bucket entry to DB. // In actual implementation we don't check for bucket/volume exists // during delete key. - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager, getBucketLayout()); OMClientResponse omClientResponse = @@ -125,7 +125,7 @@ public void testValidateAndUpdateCacheWithBucketNotFound() throws Exception { OMKeyDeleteRequest omKeyDeleteRequest = getOmKeyDeleteRequest(modifiedOmRequest); - TestOMRequestUtils.addVolumeToDB(volumeName, omMetadataManager); + OMRequestTestUtils.addVolumeToDB(volumeName, omMetadataManager); OMClientResponse omClientResponse = omKeyDeleteRequest .validateAndUpdateCache(ozoneManager, 100L, @@ -172,7 +172,7 @@ private OMRequest createDeleteKeyRequest() { } protected String addKeyToTable() throws Exception { - TestOMRequestUtils.addKeyToTable(false, volumeName, + OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, keyName, clientID, replicationType, replicationFactor, omMetadataManager); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequestWithFSO.java index b63e0646fa3d..5d006ca315bf 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequestWithFSO.java @@ -24,7 +24,7 @@ import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.security.acl.OzonePrefixPath; import org.apache.hadoop.util.Time; @@ -60,17 +60,17 @@ protected String addKeyToTable() throws Exception { keyName = key; // updated key name // Create parent dirs for the path - long parentId = TestOMRequestUtils.addParentsToDirTable(volumeName, + long parentId = OMRequestTestUtils.addParentsToDirTable(volumeName, bucketName, parentDir, omMetadataManager); OmKeyInfo omKeyInfo = - TestOMRequestUtils.createOmKeyInfo(volumeName, bucketName, key, + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, key, HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, parentId + 1, parentId, 100, Time.now()); omKeyInfo.setKeyName(fileName); - TestOMRequestUtils.addFileToKeyTable(false, false, + OMRequestTestUtils.addFileToKeyTable(false, false, fileName, omKeyInfo, -1, 50, omMetadataManager); return omKeyInfo.getPath(); } @@ -78,7 +78,7 @@ protected String addKeyToTable() throws Exception { @Test public void testOzonePrefixPathViewer() throws Exception { // Add volume, bucket and key entries to OM DB. - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager, BucketLayout.FILE_SYSTEM_OPTIMIZED); String ozoneKey = addKeyToTable(); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyPurgeRequestAndResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyPurgeRequestAndResponse.java index c3e2d036dc42..d4fe1da7b051 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyPurgeRequestAndResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyPurgeRequestAndResponse.java @@ -23,10 +23,10 @@ import java.util.List; import java.util.UUID; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.junit.Assert; import org.junit.Test; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; import org.apache.hadoop.ozone.om.response.key.OMKeyPurgeResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeletedKeys; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; @@ -54,13 +54,13 @@ private List createAndDeleteKeys(Integer trxnIndex, String bucket) bucket = bucketName; } // Add volume, bucket and key entries to OM DB. - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucket, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucket, omMetadataManager); List ozoneKeyNames = new ArrayList<>(numKeys); for (int i = 1; i <= numKeys; i++) { String key = keyName + "-" + i; - TestOMRequestUtils.addKeyToTable(false, false, volumeName, bucket, + OMRequestTestUtils.addKeyToTable(false, false, volumeName, bucket, key, clientID, replicationType, replicationFactor, trxnIndex++, omMetadataManager); ozoneKeyNames.add(omMetadataManager.getOzoneKey( @@ -69,7 +69,7 @@ private List createAndDeleteKeys(Integer trxnIndex, String bucket) List deletedKeyNames = new ArrayList<>(numKeys); for (String ozoneKey : ozoneKeyNames) { - String deletedKeyName = TestOMRequestUtils.deleteKey( + String deletedKeyName = OMRequestTestUtils.deleteKey( ozoneKey, omMetadataManager, trxnIndex++); deletedKeyNames.add(deletedKeyName); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRenameRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRenameRequest.java index 69b2421f9cc0..d229d8363b1e 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRenameRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRenameRequest.java @@ -19,11 +19,12 @@ package org.apache.hadoop.ozone.om.request.key; import java.util.UUID; + +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.junit.Assert; import org.junit.Test; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos @@ -49,9 +50,9 @@ public void testValidateAndUpdateCache() throws Exception { OMRequest modifiedOmRequest = doPreExecute(createRenameKeyRequest(toKeyName)); - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); - TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, keyName, + OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, keyName, clientID, replicationType, replicationFactor, omMetadataManager); OMKeyRenameRequest omKeyRenameRequest = @@ -97,7 +98,7 @@ public void testValidateAndUpdateCacheWithKeyNotFound() throws Exception { // In actual implementation we don't check for bucket/volume exists // during delete key. - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); OMKeyRenameRequest omKeyRenameRequest = @@ -135,7 +136,7 @@ public void testValidateAndUpdateCacheWithBucketNotFound() throws Exception { doPreExecute(createRenameKeyRequest(toKeyName)); // Add only volume entry to DB. - TestOMRequestUtils.addVolumeToDB(volumeName, omMetadataManager); + OMRequestTestUtils.addVolumeToDB(volumeName, omMetadataManager); OMKeyRenameRequest omKeyRenameRequest = new OMKeyRenameRequest(modifiedOmRequest); @@ -158,7 +159,7 @@ public void testValidateAndUpdateCacheWithToKeyInvalid() throws Exception { // In actual implementation we don't check for bucket/volume exists // during delete key. - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); OMKeyRenameRequest omKeyRenameRequest = @@ -183,7 +184,7 @@ public void testValidateAndUpdateCacheWithFromKeyInvalid() throws Exception { // In actual implementation we don't check for bucket/volume exists // during delete key. - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); OMKeyRenameRequest omKeyRenameRequest = diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysDeleteRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysDeleteRequest.java index 312e521f27cf..8d079eb5693d 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysDeleteRequest.java @@ -19,7 +19,7 @@ package org.apache.hadoop.ozone.om.request.key; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteKeyArgs; @@ -123,7 +123,7 @@ private void createPreRequisites() throws Exception { deleteKeyList = new ArrayList<>(); // Add volume, bucket and key entries to OM DB. - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); int count = 10; @@ -138,7 +138,7 @@ private void createPreRequisites() throws Exception { for (int i = 0; i < count; i++) { key = parentDir.concat("/key" + i); - TestOMRequestUtils.addKeyToTableCache(volumeName, bucketName, + OMRequestTestUtils.addKeyToTableCache(volumeName, bucketName, parentDir.concat("/key" + i), HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE, omMetadataManager); deleteKeyArgs.addKeys(key); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysRenameRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysRenameRequest.java index f69f6615a1f8..24fd138fdfed 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysRenameRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysRenameRequest.java @@ -20,7 +20,7 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; @@ -116,7 +116,7 @@ public void testKeysRenameRequestFail() throws Exception { private OMRequest createRenameKeyRequest(Boolean isIllegal) throws Exception { // Add volume, bucket and key entries to OM DB. - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); List renameKeyList = new ArrayList<>(); @@ -124,7 +124,7 @@ private OMRequest createRenameKeyRequest(Boolean isIllegal) throws Exception { for (int i = 0; i < count; i++) { String key = parentDir.concat("/key" + i); String toKey = parentDir.concat("/newKey" + i); - TestOMRequestUtils.addKeyToTableCache(volumeName, bucketName, + OMRequestTestUtils.addKeyToTableCache(volumeName, bucketName, parentDir.concat("/key" + i), HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE, omMetadataManager); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMOpenKeysDeleteRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMOpenKeysDeleteRequest.java index 31353e92c1cc..d3fcee7e7d48 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMOpenKeysDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMOpenKeysDeleteRequest.java @@ -28,12 +28,12 @@ import org.apache.hadoop.ozone.om.OMMetrics; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.junit.Assert; import org.junit.Test; import com.google.common.base.Optional; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos .Status; @@ -224,14 +224,14 @@ private void addToOpenKeyTableDB(long keySize, OpenKeyBucket... openKeys) for (OpenKey openKey: openKeyBucket.getKeysList()) { if (keySize > 0) { - OmKeyInfo keyInfo = TestOMRequestUtils.createOmKeyInfo(volume, bucket, + OmKeyInfo keyInfo = OMRequestTestUtils.createOmKeyInfo(volume, bucket, openKey.getName(), replicationType, replicationFactor); - TestOMRequestUtils.addKeyLocationInfo(keyInfo, 0, keySize); + OMRequestTestUtils.addKeyLocationInfo(keyInfo, 0, keySize); - TestOMRequestUtils.addKeyToTable(true, false, + OMRequestTestUtils.addKeyToTable(true, false, keyInfo, openKey.getClientID(), 0L, omMetadataManager); } else { - TestOMRequestUtils.addKeyToTable(true, + OMRequestTestUtils.addKeyToTable(true, volume, bucket, openKey.getName(), openKey.getClientID(), replicationType, replicationFactor, omMetadataManager); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMPrefixAclRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMPrefixAclRequest.java index 5690ff20cb65..08063f3a71fb 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMPrefixAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMPrefixAclRequest.java @@ -21,7 +21,7 @@ import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.om.PrefixManager; import org.apache.hadoop.ozone.om.PrefixManagerImpl; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.request.key.acl.prefix.OMPrefixAddAclRequest; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; @@ -46,9 +46,9 @@ public void testAclRequest() throws Exception { when(ozoneManager.getPrefixManager()).thenReturn(prefixManager); // Manually add volume, bucket and key to DB - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); - TestOMRequestUtils.addKeyToTable(false, false, volumeName, bucketName, + OMRequestTestUtils.addKeyToTable(false, false, volumeName, bucketName, keyName, clientID, replicationType, replicationFactor, 1L, omMetadataManager); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequest.java index b7094985f17b..78a2346aeb96 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequest.java @@ -21,10 +21,10 @@ import java.util.UUID; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.junit.Assert; import org.junit.Test; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; @@ -49,7 +49,7 @@ public void testValidateAndUpdateCache() throws Exception { String keyName = UUID.randomUUID().toString(); // Add volume and bucket to DB. - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); OMRequest modifiedRequest = doPreExecuteInitiateMPU(volumeName, @@ -100,7 +100,7 @@ public void testValidateAndUpdateCacheWithBucketNotFound() throws Exception { String bucketName = UUID.randomUUID().toString(); String keyName = UUID.randomUUID().toString(); - TestOMRequestUtils.addVolumeToDB(volumeName, omMetadataManager); + OMRequestTestUtils.addVolumeToDB(volumeName, omMetadataManager); OMRequest modifiedRequest = doPreExecuteInitiateMPU( volumeName, bucketName, keyName); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequestWithFSO.java index 38496c4a6532..1f6b08c3cd59 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequestWithFSO.java @@ -24,7 +24,7 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo; import org.apache.hadoop.ozone.om.helpers.BucketLayout; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; @@ -55,7 +55,7 @@ public void testValidateAndUpdateCache() throws Exception { String keyName = prefix + fileName; // Add volume and bucket to DB. - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java index a89a5bdd1847..581bd2ac569e 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java @@ -46,7 +46,7 @@ .OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Part; import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.when; @@ -112,7 +112,7 @@ public void stop() { protected OMRequest doPreExecuteInitiateMPU( String volumeName, String bucketName, String keyName) throws Exception { OMRequest omRequest = - TestOMRequestUtils.createInitiateMPURequest(volumeName, bucketName, + OMRequestTestUtils.createInitiateMPURequest(volumeName, bucketName, keyName); S3InitiateMultipartUploadRequest s3InitiateMultipartUploadRequest = @@ -150,7 +150,7 @@ protected OMRequest doPreExecuteCommitMPU( // Just set dummy size long dataSize = 100L; OMRequest omRequest = - TestOMRequestUtils.createCommitPartMPURequest(volumeName, bucketName, + OMRequestTestUtils.createCommitPartMPURequest(volumeName, bucketName, keyName, clientID, dataSize, multipartUploadID, partNumber); S3MultipartUploadCommitPartRequest s3MultipartUploadCommitPartRequest = getS3MultipartUploadCommitReq(omRequest); @@ -179,7 +179,7 @@ protected OMRequest doPreExecuteAbortMPU( String multipartUploadID) throws IOException { OMRequest omRequest = - TestOMRequestUtils.createAbortMPURequest(volumeName, bucketName, + OMRequestTestUtils.createAbortMPURequest(volumeName, bucketName, keyName, multipartUploadID); @@ -201,7 +201,7 @@ protected OMRequest doPreExecuteCompleteMPU(String volumeName, List partList) throws IOException { OMRequest omRequest = - TestOMRequestUtils.createCompleteMPURequest(volumeName, bucketName, + OMRequestTestUtils.createCompleteMPURequest(volumeName, bucketName, keyName, multipartUploadID, partList); S3MultipartUploadCompleteRequest s3MultipartUploadCompleteRequest = @@ -229,7 +229,7 @@ protected OMRequest doPreExecuteCompleteMPU(String volumeName, protected OMRequest doPreExecuteInitiateMPUWithFSO( String volumeName, String bucketName, String keyName) throws Exception { OMRequest omRequest = - TestOMRequestUtils.createInitiateMPURequest(volumeName, bucketName, + OMRequestTestUtils.createInitiateMPURequest(volumeName, bucketName, keyName); S3InitiateMultipartUploadRequestWithFSO diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadAbortRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadAbortRequest.java index 1afbb5fc0eef..85afa6265197 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadAbortRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadAbortRequest.java @@ -21,10 +21,10 @@ import java.io.IOException; import java.util.UUID; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.junit.Assert; import org.junit.Test; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos @@ -52,7 +52,7 @@ public void testValidateAndUpdateCache() throws Exception { String bucketName = UUID.randomUUID().toString(); String keyName = getKeyName(); - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); createParentPath(volumeName, bucketName); @@ -104,7 +104,7 @@ public void testValidateAndUpdateCacheMultipartNotFound() throws Exception { String bucketName = UUID.randomUUID().toString(); String keyName = UUID.randomUUID().toString(); - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); String multipartUploadID = "randomMPU"; @@ -162,7 +162,7 @@ public void testValidateAndUpdateCacheBucketNotFound() throws Exception { String keyName = UUID.randomUUID().toString(); - TestOMRequestUtils.addVolumeToDB(volumeName, omMetadataManager); + OMRequestTestUtils.addVolumeToDB(volumeName, omMetadataManager); String multipartUploadID = "randomMPU"; diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadAbortRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadAbortRequestWithFSO.java index dc768753f654..440830c1bcca 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadAbortRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadAbortRequestWithFSO.java @@ -20,7 +20,7 @@ import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.ozone.om.helpers.BucketLayout; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import java.util.UUID; @@ -58,7 +58,7 @@ protected String getKeyName() { protected void createParentPath(String volumeName, String bucketName) throws Exception { // Create parent dirs for the path - parentID = TestOMRequestUtils.addParentsToDirTable(volumeName, bucketName, + parentID = OMRequestTestUtils.addParentsToDirTable(volumeName, bucketName, dirName, omMetadataManager); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequest.java index f4d7f4ade890..a017764ef35e 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequest.java @@ -21,7 +21,7 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.om.helpers.BucketLayout; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos @@ -54,7 +54,7 @@ public void testValidateAndUpdateCacheSuccess() throws Exception { String bucketName = UUID.randomUUID().toString(); String keyName = getKeyName(); - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); createParentPath(volumeName, bucketName); @@ -116,7 +116,7 @@ public void testValidateAndUpdateCacheMultipartNotFound() throws Exception { String bucketName = UUID.randomUUID().toString(); String keyName = getKeyName(); - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); createParentPath(volumeName, bucketName); @@ -155,7 +155,7 @@ public void testValidateAndUpdateCacheKeyNotFound() throws Exception { String bucketName = UUID.randomUUID().toString(); String keyName = getKeyName(); - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); @@ -193,7 +193,7 @@ public void testValidateAndUpdateCacheBucketFound() throws Exception { String bucketName = UUID.randomUUID().toString(); String keyName = getKeyName(); - TestOMRequestUtils.addVolumeToDB(volumeName, omMetadataManager); + OMRequestTestUtils.addVolumeToDB(volumeName, omMetadataManager); long clientID = Time.now(); @@ -220,7 +220,7 @@ public void testValidateAndUpdateCacheBucketFound() throws Exception { protected void addKeyToOpenKeyTable(String volumeName, String bucketName, String keyName, long clientID) throws Exception { - TestOMRequestUtils.addKeyToTable(true, volumeName, bucketName, + OMRequestTestUtils.addKeyToTable(true, volumeName, bucketName, keyName, clientID, HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, omMetadataManager); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequestWithFSO.java index f9ae9d2d9d20..7de016ad1dfe 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequestWithFSO.java @@ -24,7 +24,7 @@ import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.util.Time; import org.junit.Assert; @@ -64,12 +64,12 @@ protected String getKeyName() { protected void addKeyToOpenKeyTable(String volumeName, String bucketName, String keyName, long clientID) throws Exception { long txnLogId = 10000; - OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName, + OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, parentID + 1, parentID, txnLogId, Time.now()); String fileName = OzoneFSUtils.getFileName(keyName); - TestOMRequestUtils.addFileToKeyTable(true, false, + OMRequestTestUtils.addFileToKeyTable(true, false, fileName, omKeyInfo, clientID, txnLogId, omMetadataManager); } @@ -92,7 +92,7 @@ protected String getOpenKey(String volumeName, String bucketName, protected OMRequest doPreExecuteInitiateMPU(String volumeName, String bucketName, String keyName) throws Exception { OMRequest omRequest = - TestOMRequestUtils.createInitiateMPURequest(volumeName, bucketName, + OMRequestTestUtils.createInitiateMPURequest(volumeName, bucketName, keyName); S3InitiateMultipartUploadRequest s3InitiateMultipartUploadRequest = @@ -116,7 +116,7 @@ protected OMRequest doPreExecuteInitiateMPU(String volumeName, protected void createParentPath(String volumeName, String bucketName) throws Exception { // Create parent dirs for the path - parentID = TestOMRequestUtils.addParentsToDirTable(volumeName, bucketName, + parentID = OMRequestTestUtils.addParentsToDirTable(volumeName, bucketName, dirName, omMetadataManager); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java index 3993597ec13f..f91f27cb802b 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java @@ -24,11 +24,11 @@ import java.util.UUID; import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.junit.Assert; import org.junit.Test; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos @@ -122,7 +122,7 @@ public void testValidateAndUpdateCacheSuccess() throws Exception { protected void addVolumeAndBucket(String volumeName, String bucketName) throws Exception { - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager, BucketLayout.DEFAULT); } @@ -132,7 +132,7 @@ public void testInvalidPartOrderError() throws Exception { String bucketName = UUID.randomUUID().toString(); String keyName = getKeyName(); - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); OMRequest initiateMPURequest = doPreExecuteInitiateMPU(volumeName, @@ -217,7 +217,7 @@ public void testValidateAndUpdateCacheBucketNotFound() throws Exception { String bucketName = UUID.randomUUID().toString(); String keyName = UUID.randomUUID().toString(); - TestOMRequestUtils.addVolumeToDB(volumeName, omMetadataManager); + OMRequestTestUtils.addVolumeToDB(volumeName, omMetadataManager); List partList = new ArrayList<>(); OMRequest completeMultipartRequest = doPreExecuteCompleteMPU(volumeName, @@ -242,7 +242,7 @@ public void testValidateAndUpdateCacheNoSuchMultipartUploadError() String bucketName = UUID.randomUUID().toString(); String keyName = UUID.randomUUID().toString(); - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); List partList = new ArrayList<>(); @@ -265,7 +265,7 @@ public void testValidateAndUpdateCacheNoSuchMultipartUploadError() protected void addKeyToTable(String volumeName, String bucketName, String keyName, long clientID) throws Exception { - TestOMRequestUtils.addKeyToTable(true, volumeName, bucketName, + OMRequestTestUtils.addKeyToTable(true, volumeName, bucketName, keyName, clientID, HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, omMetadataManager); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequestWithFSO.java index c6533ef9d48e..f0a1dfb6826b 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequestWithFSO.java @@ -25,7 +25,7 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.request.file.OMFileRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.util.Time; @@ -54,7 +54,7 @@ protected String getKeyName() { @Override protected void addVolumeAndBucket(String volumeName, String bucketName) throws Exception { - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager, BucketLayout.FILE_SYSTEM_OPTIMIZED); } @@ -71,7 +71,7 @@ protected void addKeyToTable(String volumeName, String bucketName, long objectId = parentID + 1; OmKeyInfo omKeyInfoFSO = - TestOMRequestUtils.createOmKeyInfo(volumeName, bucketName, keyName, + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, objectId, parentID, txnId, Time.now()); @@ -79,7 +79,7 @@ protected void addKeyToTable(String volumeName, String bucketName, // add key to openFileTable String fileName = OzoneFSUtils.getFileName(keyName); omKeyInfoFSO.setKeyName(fileName); - TestOMRequestUtils.addFileToKeyTable(true, false, + OMRequestTestUtils.addFileToKeyTable(true, false, fileName, omKeyInfoFSO, clientID, omKeyInfoFSO.getObjectID(), omMetadataManager); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeCreateRequest.java index aba72f28e099..c854773f7cd3 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeCreateRequest.java @@ -29,7 +29,7 @@ import org.junit.Test; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos @@ -183,7 +183,7 @@ public void testValidateAndUpdateCacheWithVolumeAlreadyExists() String adminName = "user1"; String ownerName = "user1"; - TestOMRequestUtils.addVolumeToDB(volumeName, omMetadataManager); + OMRequestTestUtils.addVolumeToDB(volumeName, omMetadataManager); OMRequest originalRequest = createVolumeRequest(volumeName, adminName, ownerName); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeDeleteRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeDeleteRequest.java index 73a288827027..ac6191fdccbe 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeDeleteRequest.java @@ -20,11 +20,11 @@ import java.util.UUID; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.junit.Assert; import org.junit.Test; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos @@ -62,8 +62,8 @@ public void testValidateAndUpdateCacheSuccess() throws Exception { omVolumeDeleteRequest.preExecute(ozoneManager); // Add volume and user to DB - TestOMRequestUtils.addVolumeToDB(volumeName, ownerName, omMetadataManager); - TestOMRequestUtils.addUserToDB(volumeName, ownerName, omMetadataManager); + OMRequestTestUtils.addVolumeToDB(volumeName, ownerName, omMetadataManager); + OMRequestTestUtils.addUserToDB(volumeName, ownerName, omMetadataManager); String volumeKey = omMetadataManager.getVolumeKey(volumeName); String ownerKey = omMetadataManager.getUserKey(ownerName); @@ -127,11 +127,11 @@ public void testValidateAndUpdateCacheWithVolumeNotEmpty() throws Exception { OmBucketInfo omBucketInfo = OmBucketInfo.newBuilder() .setVolumeName(volumeName).setBucketName(bucketName).build(); - TestOMRequestUtils.addBucketToOM(omMetadataManager, omBucketInfo); + OMRequestTestUtils.addBucketToOM(omMetadataManager, omBucketInfo); // Add user and volume to DB. - TestOMRequestUtils.addUserToDB(volumeName, ownerName, omMetadataManager); - TestOMRequestUtils.addVolumeToDB(volumeName, ownerName, omMetadataManager); + OMRequestTestUtils.addUserToDB(volumeName, ownerName, omMetadataManager); + OMRequestTestUtils.addVolumeToDB(volumeName, ownerName, omMetadataManager); OMClientResponse omClientResponse = omVolumeDeleteRequest.validateAndUpdateCache(ozoneManager, 1L, diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeSetOwnerRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeSetOwnerRequest.java index e77130003e2f..de633ac4a085 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeSetOwnerRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeSetOwnerRequest.java @@ -23,6 +23,7 @@ import java.util.Set; import java.util.UUID; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.storage.proto.OzoneManagerStorageProtos; import org.junit.Assert; import org.junit.Test; @@ -30,7 +31,6 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos .OMRequest; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; import org.apache.hadoop.ozone.om.response.OMClientResponse; /** @@ -43,7 +43,7 @@ public void testPreExecute() throws Exception { String volumeName = UUID.randomUUID().toString(); String newOwner = "user1"; OMRequest originalRequest = - TestOMRequestUtils.createSetVolumePropertyRequest(volumeName, newOwner); + OMRequestTestUtils.createSetVolumePropertyRequest(volumeName, newOwner); OMVolumeSetQuotaRequest omVolumeSetQuotaRequest = new OMVolumeSetQuotaRequest(originalRequest); @@ -59,13 +59,13 @@ public void testValidateAndUpdateCacheSuccess() throws Exception { String volumeName = UUID.randomUUID().toString(); String ownerName = "user1"; - TestOMRequestUtils.addUserToDB(volumeName, ownerName, omMetadataManager); - TestOMRequestUtils.addVolumeToDB(volumeName, ownerName, omMetadataManager); + OMRequestTestUtils.addUserToDB(volumeName, ownerName, omMetadataManager); + OMRequestTestUtils.addVolumeToDB(volumeName, ownerName, omMetadataManager); String newOwner = "user2"; OMRequest originalRequest = - TestOMRequestUtils.createSetVolumePropertyRequest(volumeName, newOwner); + OMRequestTestUtils.createSetVolumePropertyRequest(volumeName, newOwner); OMVolumeSetOwnerRequest omVolumeSetOwnerRequest = new OMVolumeSetOwnerRequest(originalRequest); @@ -101,7 +101,7 @@ public void testValidateAndUpdateCacheSuccess() throws Exception { // creationTime and modificationTime can be the same to the precision of a // millisecond - since there is no time-consuming operation between - // TestOMRequestUtils.addVolumeToDB (sets creationTime) and + // OMRequestTestUtils.addVolumeToDB (sets creationTime) and // preExecute (sets modificationTime). Assert.assertTrue(modificationTime >= creationTime); @@ -129,7 +129,7 @@ public void testValidateAndUpdateCacheWithVolumeNotFound() String ownerName = "user1"; OMRequest originalRequest = - TestOMRequestUtils.createSetVolumePropertyRequest(volumeName, + OMRequestTestUtils.createSetVolumePropertyRequest(volumeName, ownerName); OMVolumeSetOwnerRequest omVolumeSetOwnerRequest = @@ -155,7 +155,7 @@ public void testInvalidRequest() throws Exception { // create request with quota set. OMRequest originalRequest = - TestOMRequestUtils.createSetVolumePropertyRequest(volumeName, + OMRequestTestUtils.createSetVolumePropertyRequest(volumeName, 100L, 100L); OMVolumeSetOwnerRequest omVolumeSetOwnerRequest = @@ -179,13 +179,13 @@ public void testInvalidRequest() throws Exception { public void testOwnSameVolumeTwice() throws Exception { String volumeName = UUID.randomUUID().toString(); String owner = "user1"; - TestOMRequestUtils.addVolumeToDB(volumeName, owner, omMetadataManager); - TestOMRequestUtils.addUserToDB(volumeName, owner, omMetadataManager); + OMRequestTestUtils.addVolumeToDB(volumeName, owner, omMetadataManager); + OMRequestTestUtils.addUserToDB(volumeName, owner, omMetadataManager); String newOwner = "user2"; // Create request to set new owner OMRequest omRequest = - TestOMRequestUtils.createSetVolumePropertyRequest(volumeName, newOwner); + OMRequestTestUtils.createSetVolumePropertyRequest(volumeName, newOwner); OMVolumeSetOwnerRequest setOwnerRequest = new OMVolumeSetOwnerRequest(omRequest); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeSetQuotaRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeSetQuotaRequest.java index 2458ef717eb8..89137a95c355 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeSetQuotaRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeSetQuotaRequest.java @@ -22,7 +22,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.ozone.test.GenericTestUtils; import org.junit.Assert; @@ -44,7 +44,7 @@ public void testPreExecute() throws Exception { long quotaInBytes = 100L; long quotaInNamespace = 1000L; OMRequest originalRequest = - TestOMRequestUtils.createSetVolumePropertyRequest(volumeName, + OMRequestTestUtils.createSetVolumePropertyRequest(volumeName, quotaInBytes, quotaInNamespace); OMVolumeSetQuotaRequest omVolumeSetQuotaRequest = @@ -62,11 +62,11 @@ public void testValidateAndUpdateCacheSuccess() throws Exception { long quotaInBytes = 100L; long quotaInNamespace = 1000L; - TestOMRequestUtils.addUserToDB(volumeName, ownerName, omMetadataManager); - TestOMRequestUtils.addVolumeToDB(volumeName, ownerName, omMetadataManager); + OMRequestTestUtils.addUserToDB(volumeName, ownerName, omMetadataManager); + OMRequestTestUtils.addVolumeToDB(volumeName, ownerName, omMetadataManager); OMRequest originalRequest = - TestOMRequestUtils.createSetVolumePropertyRequest(volumeName, + OMRequestTestUtils.createSetVolumePropertyRequest(volumeName, quotaInBytes, quotaInNamespace); OMVolumeSetQuotaRequest omVolumeSetQuotaRequest = @@ -111,7 +111,7 @@ public void testValidateAndUpdateCacheSuccess() throws Exception { // creationTime and modificationTime can be the same to the precision of a // millisecond - since there is no time-consuming operation between - // TestOMRequestUtils.addVolumeToDB (sets creationTime) and + // OMRequestTestUtils.addVolumeToDB (sets creationTime) and // preExecute (sets modificationTime). Assert.assertTrue(modificationTime >= creationTime); } @@ -124,7 +124,7 @@ public void testValidateAndUpdateCacheWithVolumeNotFound() long quotaInNamespace= 100L; OMRequest originalRequest = - TestOMRequestUtils.createSetVolumePropertyRequest(volumeName, + OMRequestTestUtils.createSetVolumePropertyRequest(volumeName, quotaInBytes, quotaInNamespace); OMVolumeSetQuotaRequest omVolumeSetQuotaRequest = @@ -149,7 +149,7 @@ public void testInvalidRequest() throws Exception { // create request with owner set. OMRequest originalRequest = - TestOMRequestUtils.createSetVolumePropertyRequest(volumeName, + OMRequestTestUtils.createSetVolumePropertyRequest(volumeName, "user1"); // Creating OMVolumeSetQuotaRequest with SetProperty request set with owner. @@ -174,12 +174,12 @@ public void testValidateAndUpdateCacheWithQuota() throws Exception { String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); - TestOMRequestUtils.addVolumeToDB( + OMRequestTestUtils.addVolumeToDB( volumeName, omMetadataManager, 10 * GB); - TestOMRequestUtils.addBucketToDB( + OMRequestTestUtils.addBucketToDB( volumeName, bucketName, omMetadataManager, 8 * GB); OMRequest originalRequest = - TestOMRequestUtils.createSetVolumePropertyRequest(volumeName, + OMRequestTestUtils.createSetVolumePropertyRequest(volumeName, 5 * GB, 100L); OMVolumeSetQuotaRequest omVolumeSetQuotaRequest = diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/TestOMVolumeAddAclRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/TestOMVolumeAddAclRequest.java index 28a5ce17cdde..0822fd5293a4 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/TestOMVolumeAddAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/TestOMVolumeAddAclRequest.java @@ -20,7 +20,7 @@ import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.request.volume.TestOMVolumeRequest; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; @@ -43,7 +43,7 @@ public void testPreExecute() throws Exception { String volumeName = UUID.randomUUID().toString(); OzoneAcl acl = OzoneAcl.parseAcl("user:bilbo:rw"); OMRequest originalRequest = - TestOMRequestUtils.createVolumeAddAclRequest(volumeName, acl); + OMRequestTestUtils.createVolumeAddAclRequest(volumeName, acl); long originModTime = originalRequest.getAddAclRequest() .getModificationTime(); @@ -65,13 +65,13 @@ public void testValidateAndUpdateCacheSuccess() throws Exception { String volumeName = UUID.randomUUID().toString(); String ownerName = "user1"; - TestOMRequestUtils.addUserToDB(volumeName, ownerName, omMetadataManager); - TestOMRequestUtils.addVolumeToDB(volumeName, ownerName, omMetadataManager); + OMRequestTestUtils.addUserToDB(volumeName, ownerName, omMetadataManager); + OMRequestTestUtils.addVolumeToDB(volumeName, ownerName, omMetadataManager); OzoneAcl acl = OzoneAcl.parseAcl("user:bilbo:rwdlncxy[ACCESS]"); OMRequest originalRequest = - TestOMRequestUtils.createVolumeAddAclRequest(volumeName, acl); + OMRequestTestUtils.createVolumeAddAclRequest(volumeName, acl); OMVolumeAddAclRequest omVolumeAddAclRequest = new OMVolumeAddAclRequest(originalRequest); @@ -109,7 +109,7 @@ public void testValidateAndUpdateCacheWithVolumeNotFound() String volumeName = UUID.randomUUID().toString(); OzoneAcl acl = OzoneAcl.parseAcl("user:bilbo:rw"); OMRequest originalRequest = - TestOMRequestUtils.createVolumeAddAclRequest(volumeName, acl); + OMRequestTestUtils.createVolumeAddAclRequest(volumeName, acl); OMVolumeAddAclRequest omVolumeAddAclRequest = new OMVolumeAddAclRequest(originalRequest); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/TestOMVolumeRemoveAclRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/TestOMVolumeRemoveAclRequest.java index b6ef38178e80..cf13ec429682 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/TestOMVolumeRemoveAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/TestOMVolumeRemoveAclRequest.java @@ -20,7 +20,7 @@ import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.request.volume.TestOMVolumeRequest; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; @@ -42,7 +42,7 @@ public void testPreExecute() throws Exception { String volumeName = UUID.randomUUID().toString(); OzoneAcl acl = OzoneAcl.parseAcl("user:bilbo:rw"); OMRequest originalRequest = - TestOMRequestUtils.createVolumeRemoveAclRequest(volumeName, acl); + OMRequestTestUtils.createVolumeRemoveAclRequest(volumeName, acl); long originModTime = originalRequest.getRemoveAclRequest() .getModificationTime(); @@ -65,13 +65,13 @@ public void testValidateAndUpdateCacheSuccess() throws Exception { String volumeName = UUID.randomUUID().toString(); String ownerName = "user1"; - TestOMRequestUtils.addUserToDB(volumeName, ownerName, omMetadataManager); - TestOMRequestUtils.addVolumeToDB(volumeName, ownerName, omMetadataManager); + OMRequestTestUtils.addUserToDB(volumeName, ownerName, omMetadataManager); + OMRequestTestUtils.addVolumeToDB(volumeName, ownerName, omMetadataManager); OzoneAcl acl = OzoneAcl.parseAcl("user:bilbo:rwdlncxy[ACCESS]"); // add acl first OMRequest addAclRequest = - TestOMRequestUtils.createVolumeAddAclRequest(volumeName, acl); + OMRequestTestUtils.createVolumeAddAclRequest(volumeName, acl); OMVolumeAddAclRequest omVolumeAddAclRequest = new OMVolumeAddAclRequest(addAclRequest); omVolumeAddAclRequest.preExecute(ozoneManager); @@ -85,7 +85,7 @@ public void testValidateAndUpdateCacheSuccess() throws Exception { // remove acl OMRequest removeAclRequest = - TestOMRequestUtils.createVolumeRemoveAclRequest(volumeName, acl); + OMRequestTestUtils.createVolumeRemoveAclRequest(volumeName, acl); OMVolumeRemoveAclRequest omVolumeRemoveAclRequest = new OMVolumeRemoveAclRequest(removeAclRequest); omVolumeRemoveAclRequest.preExecute(ozoneManager); @@ -121,7 +121,7 @@ public void testValidateAndUpdateCacheWithVolumeNotFound() String volumeName = UUID.randomUUID().toString(); OzoneAcl acl = OzoneAcl.parseAcl("user:bilbo:rw"); OMRequest originalRequest = - TestOMRequestUtils.createVolumeRemoveAclRequest(volumeName, acl); + OMRequestTestUtils.createVolumeRemoveAclRequest(volumeName, acl); OMVolumeRemoveAclRequest omVolumeRemoveAclRequest = new OMVolumeRemoveAclRequest(originalRequest); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/TestOMVolumeSetAclRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/TestOMVolumeSetAclRequest.java index 0bd052cc495e..4ffb6ede58cc 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/TestOMVolumeSetAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/TestOMVolumeSetAclRequest.java @@ -21,7 +21,7 @@ import com.google.common.collect.Lists; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.request.volume.TestOMVolumeRequest; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; @@ -43,7 +43,7 @@ public void testPreExecute() throws Exception { String volumeName = UUID.randomUUID().toString(); OzoneAcl acl = OzoneAcl.parseAcl("user:bilbo:rw"); OMRequest originalRequest = - TestOMRequestUtils.createVolumeSetAclRequest(volumeName, + OMRequestTestUtils.createVolumeSetAclRequest(volumeName, Lists.newArrayList(acl)); long originModTime = originalRequest.getSetAclRequest() .getModificationTime(); @@ -66,8 +66,8 @@ public void testValidateAndUpdateCacheSuccess() throws Exception { String volumeName = UUID.randomUUID().toString(); String ownerName = "user1"; - TestOMRequestUtils.addUserToDB(volumeName, ownerName, omMetadataManager); - TestOMRequestUtils.addVolumeToDB(volumeName, ownerName, omMetadataManager); + OMRequestTestUtils.addUserToDB(volumeName, ownerName, omMetadataManager); + OMRequestTestUtils.addVolumeToDB(volumeName, ownerName, omMetadataManager); OzoneAcl userAccessAcl = OzoneAcl.parseAcl("user:bilbo:rw[ACCESS]"); OzoneAcl groupDefaultAcl = @@ -76,7 +76,7 @@ public void testValidateAndUpdateCacheSuccess() throws Exception { List acls = Lists.newArrayList(userAccessAcl, groupDefaultAcl); OMRequest originalRequest = - TestOMRequestUtils.createVolumeSetAclRequest(volumeName, acls); + OMRequestTestUtils.createVolumeSetAclRequest(volumeName, acls); OMVolumeSetAclRequest omVolumeSetAclRequest = new OMVolumeSetAclRequest(originalRequest); @@ -117,7 +117,7 @@ public void testValidateAndUpdateCacheWithVolumeNotFound() String volumeName = UUID.randomUUID().toString(); OzoneAcl acl = OzoneAcl.parseAcl("user:bilbo:rw"); OMRequest originalRequest = - TestOMRequestUtils.createVolumeSetAclRequest(volumeName, + OMRequestTestUtils.createVolumeSetAclRequest(volumeName, Lists.newArrayList(acl)); OMVolumeSetAclRequest omVolumeSetAclRequest = diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMDirectoryCreateResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMDirectoryCreateResponse.java index 33d8c81a093c..6703f4c55ea5 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMDirectoryCreateResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMDirectoryCreateResponse.java @@ -26,7 +26,7 @@ import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.request.file.OMDirectoryCreateRequest.Result; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos @@ -75,7 +75,7 @@ public void testAddToDBBatch() throws Exception { String keyName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); - OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName, + OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, OzoneFSUtils.addTrailingSlashIfNeeded(keyName), HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMDirectoryCreateResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMDirectoryCreateResponseWithFSO.java index 1562fd112a36..35064ba17a26 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMDirectoryCreateResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMDirectoryCreateResponseWithFSO.java @@ -25,7 +25,7 @@ import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.request.file.OMDirectoryCreateRequestWithFSO; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; @@ -64,7 +64,7 @@ public void testAddToDBBatch() throws Exception { long parentID = 100; OmDirectoryInfo omDirInfo = - TestOMRequestUtils.createOmDirectoryInfo(keyName, 500, parentID); + OMRequestTestUtils.createOmDirectoryInfo(keyName, 500, parentID); OMResponse omResponse = OMResponse.newBuilder().setCreateDirectoryResponse( OzoneManagerProtocolProtos.CreateDirectoryResponse.getDefaultInstance()) diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMFileCreateResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMFileCreateResponseWithFSO.java index eeffe19c5134..397e10ad6ecc 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMFileCreateResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMFileCreateResponseWithFSO.java @@ -21,7 +21,7 @@ import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.response.key.OMKeyCreateResponse; import org.apache.hadoop.ozone.om.response.key.TestOMKeyCreateResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; @@ -40,7 +40,7 @@ public class TestOMFileCreateResponseWithFSO extends TestOMKeyCreateResponse { @Override protected OmKeyInfo getOmKeyInfo() { Assert.assertNotNull(omBucketInfo); - return TestOMRequestUtils.createOmKeyInfo(volumeName, + return OMRequestTestUtils.createOmKeyInfo(volumeName, omBucketInfo.getBucketName(), keyName, replicationType, replicationFactor, omBucketInfo.getObjectID() + 1, diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponse.java index 71dd7dbdbb8f..8e4dbbf82d37 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponse.java @@ -25,7 +25,7 @@ import org.junit.Test; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.AllocateBlockResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; @@ -99,7 +99,7 @@ public void testAddToDBBatchWithErrorResponse() throws Exception { } protected OmKeyInfo createOmKeyInfo() throws Exception { - return TestOMRequestUtils.createOmKeyInfo(volumeName, + return OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, replicationType, replicationFactor); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponseWithFSO.java index bbbacaea0739..83c92d508f27 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponseWithFSO.java @@ -23,7 +23,7 @@ import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; import org.apache.hadoop.util.Time; import org.jetbrains.annotations.NotNull; @@ -48,7 +48,7 @@ protected OmKeyInfo createOmKeyInfo() throws Exception { long objectId = parentID + 1; OmKeyInfo omKeyInfoFSO = - TestOMRequestUtils.createOmKeyInfo(volumeName, bucketName, keyName, + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, objectId, parentID, txnId, Time.now()); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponse.java index 8709a07c6e2f..de8a4bbc13fe 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponse.java @@ -27,7 +27,7 @@ import org.junit.Test; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; /** @@ -79,7 +79,7 @@ public void testAddToDBBatch() throws Exception { @Test public void testAddToDBBatchNoOp() throws Exception { - OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName, + OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, replicationType, replicationFactor); omBucketInfo = OmBucketInfo.newBuilder() .setVolumeName(volumeName).setBucketName(bucketName) @@ -139,7 +139,7 @@ public void testAddToDBBatchOnOverwrite() throws Exception { @NotNull protected void addKeyToOpenKeyTable() throws Exception { - TestOMRequestUtils.addKeyToTable(true, volumeName, bucketName, keyName, + OMRequestTestUtils.addKeyToTable(true, volumeName, bucketName, keyName, clientID, replicationType, replicationFactor, omMetadataManager); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponseWithFSO.java index 5b67eecfe95c..caed4a031ebc 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponseWithFSO.java @@ -23,7 +23,7 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.util.Time; import org.jetbrains.annotations.NotNull; @@ -48,7 +48,7 @@ protected OMKeyCommitResponse getOmKeyCommitResponse(OmKeyInfo omKeyInfo, @Override protected OmKeyInfo getOmKeyInfo() { Assert.assertNotNull(omBucketInfo); - return TestOMRequestUtils.createOmKeyInfo(volumeName, + return OMRequestTestUtils.createOmKeyInfo(volumeName, omBucketInfo.getBucketName(), keyName, replicationType, replicationFactor, omBucketInfo.getObjectID() + 1, @@ -63,13 +63,13 @@ protected void addKeyToOpenKeyTable() throws Exception { long objectId = parentID + 10; OmKeyInfo omKeyInfoFSO = - TestOMRequestUtils.createOmKeyInfo(volumeName, bucketName, keyName, + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, objectId, parentID, 100, Time.now()); String fileName = OzoneFSUtils.getFileName(keyName); - TestOMRequestUtils.addFileToKeyTable(true, false, + OMRequestTestUtils.addFileToKeyTable(true, false, fileName, omKeyInfoFSO, clientID, txnLogId, omMetadataManager); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponseWithFSO.java index b508e152161e..a7ec3c1d473d 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponseWithFSO.java @@ -21,7 +21,7 @@ import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; import org.apache.hadoop.util.Time; import org.jetbrains.annotations.NotNull; @@ -46,7 +46,7 @@ protected String getOpenKeyName() { @Override protected OmKeyInfo getOmKeyInfo() { Assert.assertNotNull(omBucketInfo); - return TestOMRequestUtils.createOmKeyInfo(volumeName, + return OMRequestTestUtils.createOmKeyInfo(volumeName, omBucketInfo.getBucketName(), keyName, replicationType, replicationFactor, omBucketInfo.getObjectID() + 1, diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponse.java index 2a7e2ede3794..c8a1f8bb32d0 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponse.java @@ -24,12 +24,12 @@ import org.apache.hadoop.hdds.scm.pipeline.PipelineID; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.util.Time; import org.junit.Assert; import org.junit.Test; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import java.util.ArrayList; @@ -177,7 +177,7 @@ protected String addKeyToTable() throws Exception { String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName, keyName); - TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, keyName, + OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, keyName, clientID, replicationType, replicationFactor, omMetadataManager); return ozoneKey; } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponseWithFSO.java index f21e49fc192c..445556591407 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponseWithFSO.java @@ -21,7 +21,7 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.util.Time; import org.junit.Assert; @@ -41,20 +41,20 @@ protected OMKeyDeleteResponse getOmKeyDeleteResponse(OmKeyInfo omKeyInfo, @Override protected String addKeyToTable() throws Exception { // Add volume, bucket and key entries to OM DB. - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); // Create parent dirs for the path - long parentId = TestOMRequestUtils.addParentsToDirTable(volumeName, + long parentId = OMRequestTestUtils.addParentsToDirTable(volumeName, bucketName, "", omMetadataManager); OmKeyInfo omKeyInfo = - TestOMRequestUtils.createOmKeyInfo(volumeName, bucketName, keyName, + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, parentId + 1, parentId, 100, Time.now()); - TestOMRequestUtils.addFileToKeyTable(false, false, + OMRequestTestUtils.addFileToKeyTable(false, false, keyName, omKeyInfo, -1, 50, omMetadataManager); return omKeyInfo.getPath(); } @@ -62,7 +62,7 @@ protected String addKeyToTable() throws Exception { @Override protected OmKeyInfo getOmKeyInfo() { Assert.assertNotNull(getOmBucketInfo()); - return TestOMRequestUtils.createOmKeyInfo(volumeName, + return OMRequestTestUtils.createOmKeyInfo(volumeName, getOmBucketInfo().getBucketName(), keyName, replicationType, replicationFactor, getOmBucketInfo().getObjectID() + 1, diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyRenameResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyRenameResponse.java index 7f0400365868..92b244865052 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyRenameResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyRenameResponse.java @@ -20,11 +20,11 @@ import java.util.UUID; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.junit.Assert; import org.junit.Test; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; /** @@ -34,7 +34,7 @@ public class TestOMKeyRenameResponse extends TestOMKeyResponse { @Test public void testAddToDBBatch() throws Exception { - OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName, + OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, replicationType, replicationFactor); OzoneManagerProtocolProtos.OMResponse omResponse = @@ -55,7 +55,7 @@ public void testAddToDBBatch() throws Exception { String ozoneToKey = omMetadataManager.getOzoneKey(volumeName, bucketName, toKeyName); - TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, keyName, + OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, keyName, clientID, replicationType, replicationFactor, omMetadataManager); Assert.assertTrue( @@ -77,7 +77,7 @@ public void testAddToDBBatch() throws Exception { @Test public void testAddToDBBatchWithErrorResponse() throws Exception { - OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName, + OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, replicationType, replicationFactor); OzoneManagerProtocolProtos.OMResponse omResponse = @@ -98,7 +98,7 @@ public void testAddToDBBatchWithErrorResponse() throws Exception { String ozoneToKey = omMetadataManager.getOzoneKey(volumeName, bucketName, toKeyName); - TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, keyName, + OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, keyName, clientID, replicationType, replicationFactor, omMetadataManager); Assert.assertTrue( @@ -122,7 +122,7 @@ public void testAddToDBBatchWithErrorResponse() throws Exception { @Test public void testAddToDBBatchWithSameKeyName() throws Exception { - OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName, + OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, replicationType, replicationFactor); OzoneManagerProtocolProtos.OMResponse omResponse = @@ -140,7 +140,7 @@ public void testAddToDBBatchWithSameKeyName() throws Exception { String ozoneFromKey = omMetadataManager.getOzoneKey(volumeName, bucketName, keyName); - TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, keyName, + OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, keyName, clientID, replicationType, replicationFactor, omMetadataManager); Assert.assertTrue( diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyResponse.java index bdafb6841a69..b41c8680b3a8 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyResponse.java @@ -25,7 +25,7 @@ import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.jetbrains.annotations.NotNull; import org.junit.After; import org.junit.Before; @@ -88,7 +88,7 @@ protected String getOpenKeyName() { @NotNull protected OmKeyInfo getOmKeyInfo() { - return TestOMRequestUtils.createOmKeyInfo(volumeName, bucketName, keyName, + return OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, replicationType, replicationFactor); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysDeleteResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysDeleteResponse.java index 9fe3c8202f50..0d6a6d900656 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysDeleteResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysDeleteResponse.java @@ -21,7 +21,7 @@ import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteKeysResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; @@ -57,7 +57,7 @@ private void createPreRequisities() throws Exception { String ozoneKey = ""; for (int i = 0; i < 10; i++) { keyName = parent.concat(key + i); - TestOMRequestUtils.addKeyToTable(false, volumeName, + OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, keyName, 0L, RATIS, THREE, omMetadataManager); ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName, keyName); omKeyInfoList diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysRenameResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysRenameResponse.java index 3aa92c0b3092..d59ce9abae8f 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysRenameResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysRenameResponse.java @@ -20,7 +20,7 @@ import org.apache.hadoop.ozone.om.helpers.OmRenameKeys; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RenameKeysResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status; @@ -61,7 +61,7 @@ public void testKeysRenameResponse() throws Exception { omMetadataManager.getStore().commitBatchOperation(batchOperation); // Add volume, bucket and key entries to OM DB. - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); for (int i = 0; i < count; i++) { @@ -112,14 +112,14 @@ public void testKeysRenameResponseFail() throws Exception { private void createPreRequisities() throws Exception { // Add volume, bucket and key entries to OM DB. - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); Map formAndToKeyInfo = new HashMap<>(); for (int i = 0; i < count; i++) { String key = parentDir.concat("/key" + i); String toKey = parentDir.concat("/newKey" + i); - TestOMRequestUtils.addKeyToTable(false, volumeName, + OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, parentDir.concat("/key" + i), 0L, RATIS, THREE, omMetadataManager); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMOpenKeysDeleteResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMOpenKeysDeleteResponse.java index 56e372d42598..ebe2deeaf5fd 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMOpenKeysDeleteResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMOpenKeysDeleteResponse.java @@ -18,11 +18,11 @@ package org.apache.hadoop.ozone.om.response.key; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.junit.Assert; import org.junit.Test; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; @@ -164,11 +164,11 @@ private Map addOpenKeysToDB(String volume, int numKeys, String key = UUID.randomUUID().toString(); long clientID = random.nextLong(); - OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volume, + OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volume, bucket, key, replicationType, replicationFactor); if (keyLength > 0) { - TestOMRequestUtils.addKeyLocationInfo(omKeyInfo, 0, keyLength); + OMRequestTestUtils.addKeyLocationInfo(omKeyInfo, 0, keyLength); } String openKey = omMetadataManager.getOpenKey(volume, bucket, @@ -177,7 +177,7 @@ private Map addOpenKeysToDB(String volume, int numKeys, // Add to the open key table DB, not cache. // In a real execution, the open key would have been removed from the // cache by the request, and it would only remain in the DB. - TestOMRequestUtils.addKeyToTable(true, false, omKeyInfo, + OMRequestTestUtils.addKeyToTable(true, false, omKeyInfo, clientID, 0L, omMetadataManager); Assert.assertTrue(omMetadataManager.getOpenKeyTable(getBucketLayout()) .isExist(openKey)); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadCommitPartResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadCommitPartResponseWithFSO.java index 6d640b360704..f2089457de6a 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadCommitPartResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadCommitPartResponseWithFSO.java @@ -23,7 +23,7 @@ import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.helpers.BucketLayout; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PartKeyInfo; import org.apache.hadoop.util.Time; @@ -50,7 +50,7 @@ public void testAddDBToBatch() throws Exception { String keyName = getKeyName(); String multipartUploadID = UUID.randomUUID().toString(); - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); createParentPath(volumeName, bucketName); @@ -88,7 +88,7 @@ public void testAddDBToBatchWithParts() throws Exception { String bucketName = UUID.randomUUID().toString(); String keyName = getKeyName(); - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); createParentPath(volumeName, bucketName); @@ -160,7 +160,7 @@ public void testWithMultipartUploadError() throws Exception { String bucketName = UUID.randomUUID().toString(); String keyName = getKeyName(); - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); createParentPath(volumeName, bucketName); @@ -224,7 +224,7 @@ private String getKeyName() { private void createParentPath(String volumeName, String bucketName) throws Exception { // Create parent dirs for the path - parentID = TestOMRequestUtils.addParentsToDirTable(volumeName, bucketName, + parentID = OMRequestTestUtils.addParentsToDirTable(volumeName, bucketName, dirName, omMetadataManager); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadCompleteResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadCompleteResponseWithFSO.java index b52d3e2b4049..1ff17513cab1 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadCompleteResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadCompleteResponseWithFSO.java @@ -25,7 +25,7 @@ import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PartKeyInfo; import org.apache.hadoop.util.Time; @@ -54,7 +54,7 @@ public void testAddDBToBatch() throws Exception { String keyName = getKeyName(); String multipartUploadID = UUID.randomUUID().toString(); - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); long txnId = 50; @@ -81,14 +81,14 @@ public void testAddDBToBatch() throws Exception { clientId); String dbKey = omMetadataManager.getOzonePathKey(parentID, fileName); OmKeyInfo omKeyInfoFSO = - TestOMRequestUtils.createOmKeyInfo(volumeName, bucketName, keyName, + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, objectId, parentID, txnId, Time.now()); // add key to openFileTable omKeyInfoFSO.setKeyName(fileName); - TestOMRequestUtils.addFileToKeyTable(true, false, + OMRequestTestUtils.addFileToKeyTable(true, false, fileName, omKeyInfoFSO, clientId, omKeyInfoFSO.getObjectID(), omMetadataManager); @@ -130,7 +130,7 @@ public void testAddDBToBatchWithParts() throws Exception { String bucketName = UUID.randomUUID().toString(); String keyName = getKeyName(); - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); createParentPath(volumeName, bucketName); runAddDBToBatchWithParts(volumeName, bucketName, keyName, 0); @@ -148,12 +148,12 @@ public void testAddDBToBatchWithPartsWithKeyInDeleteTable() throws Exception { String bucketName = UUID.randomUUID().toString(); String keyName = getKeyName(); - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); createParentPath(volumeName, bucketName); // Put an entry to delete table with the same key prior to multipart commit - OmKeyInfo prevKey = TestOMRequestUtils.createOmKeyInfo(volumeName, + OmKeyInfo prevKey = OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, @@ -210,7 +210,7 @@ private long runAddDBToBatchWithParts(String volumeName, omMultipartKeyInfo, deleteEntryCount); OmKeyInfo omKeyInfo = - TestOMRequestUtils.createOmKeyInfo(volumeName, bucketName, keyName, + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, parentID + 9, @@ -310,7 +310,7 @@ private String getKeyName() { private void createParentPath(String volumeName, String bucketName) throws Exception { // Create parent dirs for the path - parentID = TestOMRequestUtils.addParentsToDirTable(volumeName, bucketName, + parentID = OMRequestTestUtils.addParentsToDirTable(volumeName, bucketName, dirName, omMetadataManager); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneNativeAuthorizer.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneNativeAuthorizer.java index fc60de3fbc67..a5568d01722c 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneNativeAuthorizer.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneNativeAuthorizer.java @@ -37,7 +37,7 @@ import org.apache.hadoop.ozone.om.helpers.OpenKeySession; import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil; import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType; import org.apache.hadoop.security.UserGroupInformation; @@ -203,7 +203,7 @@ private void createBucket(String volumeName, String bucketName) .setVolumeName(volumeName) .setBucketName(bucketName) .build(); - TestOMRequestUtils.addBucketToOM(metadataManager, bucketInfo); + OMRequestTestUtils.addBucketToOM(metadataManager, bucketInfo); buckObj = new OzoneObjInfo.Builder() .setVolumeName(vol) .setBucketName(buck) @@ -218,7 +218,7 @@ private void createVolume(String volumeName) throws IOException { .setAdminName(adminUgi.getUserName()) .setOwnerName(testUgi.getUserName()) .build(); - TestOMRequestUtils.addVolumeToOM(metadataManager, volumeArgs); + OMRequestTestUtils.addVolumeToOM(metadataManager, volumeArgs); volObj = new OzoneObjInfo.Builder() .setVolumeName(vol) .setResType(VOLUME) diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestParentAcl.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestParentAcl.java index 8702302c049a..1f8f246a3c22 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestParentAcl.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestParentAcl.java @@ -40,7 +40,7 @@ import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authentication.client.AuthenticationException; @@ -357,7 +357,7 @@ private static OzoneObjInfo createVolume(String volumeName) .setAdminName(adminUgi.getUserName()) .setOwnerName(testUgi.getUserName()) .build(); - TestOMRequestUtils.addVolumeToOM(metadataManager, volumeArgs); + OMRequestTestUtils.addVolumeToOM(metadataManager, volumeArgs); return new OzoneObjInfo.Builder() .setVolumeName(volumeName) .setResType(VOLUME) @@ -371,7 +371,7 @@ private static OzoneObjInfo createBucket(String volumeName, String bucketName) .setVolumeName(volumeName) .setBucketName(bucketName) .build(); - TestOMRequestUtils.addBucketToOM(metadataManager, bucketInfo); + OMRequestTestUtils.addBucketToOM(metadataManager, bucketInfo); return new OzoneObjInfo.Builder() .setVolumeName(volumeName) .setBucketName(bucketName) diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestVolumeOwner.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestVolumeOwner.java index c54391b4007c..145e5992f9d6 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestVolumeOwner.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestVolumeOwner.java @@ -33,7 +33,7 @@ import org.apache.hadoop.ozone.om.helpers.OpenKeySession; import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil; import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authentication.client.AuthenticationException; import org.apache.ozone.test.GenericTestUtils; @@ -107,7 +107,7 @@ private static void prepareTestVols() throws IOException { .setAdminName("om") .setOwnerName(getTestVolOwnerName(i)) .build(); - TestOMRequestUtils.addVolumeToOM(metadataManager, volumeArgs); + OMRequestTestUtils.addVolumeToOM(metadataManager, volumeArgs); } } @@ -119,7 +119,7 @@ private static void prepareTestBuckets() throws IOException { .setVolumeName(getTestVolumeName(i)) .setBucketName(getTestBucketName(j)) .build(); - TestOMRequestUtils.addBucketToOM(metadataManager, bucketInfo); + OMRequestTestUtils.addBucketToOM(metadataManager, bucketInfo); } } }