diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockInputStream.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockInputStream.java index 3dc5a82b335..ae8c114d813 100644 --- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockInputStream.java +++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockInputStream.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hdds.scm.storage; import com.google.common.primitives.Bytes; +import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.client.ContainerBlockID; import org.apache.hadoop.hdds.protocol.DatanodeDetails; @@ -50,7 +51,6 @@ import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.Random; import java.util.concurrent.ExecutionException; import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.atomic.AtomicBoolean; @@ -186,9 +186,8 @@ public void testSeek() throws Exception { assertThrows(EOFException.class, () -> seekAndVerify(finalPos)); // Seek to random positions between 0 and the block size. - Random random = new Random(); for (int i = 0; i < 10; i++) { - pos = random.nextInt(blockSize); + pos = RandomUtils.nextInt(0, blockSize); seekAndVerify(pos); } } diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockReconstructedStripeInputStream.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockReconstructedStripeInputStream.java index c708fc28ddb..c32cea09518 100644 --- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockReconstructedStripeInputStream.java +++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockReconstructedStripeInputStream.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.client.io; import com.google.common.collect.ImmutableSet; +import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; @@ -41,7 +42,6 @@ import java.util.HashSet; import java.util.List; import java.util.Map; -import java.util.Random; import java.util.Set; import java.util.SplittableRandom; import java.util.concurrent.ExecutorService; @@ -645,7 +645,7 @@ public void testSeekToPartialOffsetFails() { } private Integer getRandomStreamIndex(Set set) { - return set.stream().skip(new Random().nextInt(set.size())) + return set.stream().skip(RandomUtils.nextInt(0, set.size())) .findFirst().orElse(null); } diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksumByteBuffer.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksumByteBuffer.java index 5b88f5cb300..9567fa2c281 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksumByteBuffer.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksumByteBuffer.java @@ -22,7 +22,7 @@ import org.junit.jupiter.api.Test; import java.nio.charset.StandardCharsets; -import java.util.Random; +import org.apache.commons.lang3.RandomUtils; import java.util.zip.Checksum; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -59,11 +59,9 @@ void testCorrectness() { checkBytes("hello world!".getBytes(StandardCharsets.UTF_8)); - final Random random = new Random(); - final byte[] bytes = new byte[1 << 10]; + final int len = 1 << 10; for (int i = 0; i < 1000; i++) { - random.nextBytes(bytes); - checkBytes(bytes, random.nextInt(bytes.length)); + checkBytes(RandomUtils.nextBytes(len), RandomUtils.nextInt(0, len)); } } diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/http/TestHttpServer2Metrics.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/http/TestHttpServer2Metrics.java index 257c543d22c..3f00bc53d24 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/http/TestHttpServer2Metrics.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/http/TestHttpServer2Metrics.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hdds.server.http; +import org.apache.commons.lang3.RandomUtils; import static org.apache.hadoop.hdds.server.http.HttpServer2Metrics.HttpServer2MetricsInfo.HttpServerIdleThreadCount; import static org.apache.hadoop.hdds.server.http.HttpServer2Metrics.HttpServer2MetricsInfo.HttpServerMaxThreadCount; import static org.apache.hadoop.hdds.server.http.HttpServer2Metrics.HttpServer2MetricsInfo.HttpServerThreadCount; @@ -36,8 +37,6 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -import java.util.Random; - /** * Testing HttpServer2Metrics. */ @@ -57,11 +56,10 @@ public void setup() { @Test public void testMetrics() { // crate mock metrics - Random random = new Random(); - int threadCount = random.nextInt(); - int maxThreadCount = random.nextInt(); - int idleThreadCount = random.nextInt(); - int threadQueueWaitingTaskCount = random.nextInt(); + int threadCount = RandomUtils.nextInt(); + int maxThreadCount = RandomUtils.nextInt(); + int idleThreadCount = RandomUtils.nextInt(); + int threadQueueWaitingTaskCount = RandomUtils.nextInt(); String name = "s3g"; when(threadPool.getThreads()).thenReturn(threadCount); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java index be57aa8ea6a..9292ffa865c 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hdds.scm.block; +import org.apache.commons.lang3.RandomUtils; import org.apache.commons.io.FileUtils; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.client.RatisReplicationConfig; @@ -70,7 +71,6 @@ import java.util.List; import java.util.Map; import java.util.Optional; -import java.util.Random; import java.util.Set; import java.util.UUID; import java.util.concurrent.TimeoutException; @@ -218,9 +218,8 @@ private Map> generateData(int dataSize) throws IOException { private Map> generateData(int dataSize, HddsProtos.LifeCycleState state) throws IOException { Map> blockMap = new HashMap<>(); - Random random = new Random(1); - int continerIDBase = random.nextInt(100); - int localIDBase = random.nextInt(1000); + int continerIDBase = RandomUtils.nextInt(0, 100); + int localIDBase = RandomUtils.nextInt(0, 1000); for (int i = 0; i < dataSize; i++) { long containerID = continerIDBase + i; updateContainerMetadata(containerID, state); @@ -692,13 +691,12 @@ public void testInadequateReplicaCommit() throws Exception { @Test public void testRandomOperateTransactions() throws Exception { mockContainerHealthResult(true); - Random random = new Random(); int added = 0, committed = 0; List blocks = new ArrayList<>(); List txIDs; // Randomly add/get/commit/increase transactions. for (int i = 0; i < 100; i++) { - int state = random.nextInt(4); + int state = RandomUtils.nextInt(0, 4); if (state == 0) { addTransactions(generateData(10), true); added += 10; @@ -803,8 +801,7 @@ public void testDeletedBlockTransactions() // add two transactions for same container containerID = blocks.get(0).getContainerID(); Map> deletedBlocksMap = new HashMap<>(); - Random random = new Random(); - long localId = random.nextLong(); + long localId = RandomUtils.nextLong(); deletedBlocksMap.put(containerID, new LinkedList<>( Collections.singletonList(localId))); addTransactions(deletedBlocksMap, true); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java index 39e19135efa..3ed6ac89d6f 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java @@ -19,9 +19,11 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.List; -import java.util.Random; import java.util.stream.IntStream; +import org.apache.commons.lang3.RandomUtils; +import org.apache.commons.lang3.StringUtils; + import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.StorageUnit; import org.apache.hadoop.hdds.protocol.DatanodeDetails; @@ -48,8 +50,6 @@ import org.junit.jupiter.params.provider.MethodSource; import org.junit.jupiter.params.provider.ValueSource; -import org.apache.commons.lang3.StringUtils; - import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.DECOMMISSIONED; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.IN_SERVICE; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.HEALTHY; @@ -625,7 +625,7 @@ public void testOutOfServiceNodesNotSelected(int datanodeCount) { for (int i = 0; i < 10; i++) { // Set a random DN to in_service and ensure it is always picked - int index = new Random().nextInt(dnInfos.size()); + int index = RandomUtils.nextInt(0, dnInfos.size()); dnInfos.get(index).setNodeStatus(NodeStatus.inServiceHealthy()); try { List datanodeDetails = diff --git a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/checksum/TestReplicatedBlockChecksumComputer.java b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/checksum/TestReplicatedBlockChecksumComputer.java index 5cf4401bae2..6162f1ae5a4 100644 --- a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/checksum/TestReplicatedBlockChecksumComputer.java +++ b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/checksum/TestReplicatedBlockChecksumComputer.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.ozone.client.checksum; +import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.io.MD5Hash; import org.apache.hadoop.util.DataChecksum; @@ -27,7 +28,6 @@ import java.nio.ByteBuffer; import java.util.Collections; import java.util.List; -import java.util.Random; import static org.apache.hadoop.hdds.scm.OzoneClientConfig.ChecksumCombineMode.COMPOSITE_CRC; import static org.apache.hadoop.hdds.scm.OzoneClientConfig.ChecksumCombineMode.MD5MD5CRC; @@ -40,9 +40,8 @@ public class TestReplicatedBlockChecksumComputer { @Test public void testComputeMd5Crc() throws IOException { final int lenOfBytes = 32; - byte[] randomChunkChecksum = new byte[lenOfBytes]; - Random r = new Random(); - r.nextBytes(randomChunkChecksum); + byte[] randomChunkChecksum = RandomUtils.nextBytes(lenOfBytes); + MD5Hash emptyBlockMD5 = MD5Hash.digest(randomChunkChecksum); byte[] emptyBlockMD5Hash = emptyBlockMD5.getDigest(); AbstractBlockChecksumComputer computer = @@ -56,9 +55,7 @@ public void testComputeMd5Crc() throws IOException { @Test public void testComputeCompositeCrc() throws IOException { final int lenOfBytes = 32; - byte[] randomChunkChecksum = new byte[lenOfBytes]; - Random r = new Random(); - r.nextBytes(randomChunkChecksum); + byte[] randomChunkChecksum = RandomUtils.nextBytes(lenOfBytes); CrcComposer crcComposer = CrcComposer.newCrcComposer(DataChecksum.Type.CRC32C, 4); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSeekTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSeekTest.java index 49c693268e7..618025dc06f 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSeekTest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSeekTest.java @@ -18,6 +18,7 @@ package org.apache.hadoop.fs.contract; +import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.FSDataInputStream; @@ -31,7 +32,6 @@ import java.io.EOFException; import java.io.IOException; -import java.util.Random; import static org.apache.hadoop.fs.contract.ContractTestUtils.createFile; import static org.apache.hadoop.fs.contract.ContractTestUtils.dataset; @@ -341,15 +341,14 @@ public void testRandomSeeks() throws Throwable { byte[] buf = dataset(filesize, 0, 255); Path randomSeekFile = path("testrandomseeks.bin"); createFile(getFileSystem(), randomSeekFile, true, buf); - Random r = new Random(); // Record the sequence of seeks and reads which trigger a failure. int[] seeks = new int[10]; int[] reads = new int[10]; try (FSDataInputStream stm = getFileSystem().open(randomSeekFile)) { for (int i = 0; i < limit; i++) { - int seekOff = r.nextInt(buf.length); - int toRead = r.nextInt(Math.min(buf.length - seekOff, 32000)); + int seekOff = RandomUtils.nextInt(0, buf.length); + int toRead = RandomUtils.nextInt(0, Math.min(buf.length - seekOff, 32000)); seeks[i % seeks.length] = seekOff; reads[i % reads.length] = toRead; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTest.java index 61b0281c659..1675807d230 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTest.java @@ -96,7 +96,6 @@ import java.util.LinkedList; import java.util.List; import java.util.Optional; -import java.util.Random; import java.util.Set; import java.util.TreeSet; import java.util.UUID; @@ -2000,7 +1999,7 @@ private void checkInvalidPath(Path path) { @Test void testRenameFile() throws Exception { - final String dir = "/dir" + new Random().nextInt(1000); + final String dir = "/dir" + RandomUtils.nextInt(0, 1000); Path dirPath = new Path(getBucketPath() + dir); Path file1Source = new Path(getBucketPath() + dir + "/file1_Copy"); @@ -2026,7 +2025,7 @@ void testRenameFile() throws Exception { */ @Test void testRenameFileToDir() throws Exception { - final String dir = "/dir" + new Random().nextInt(1000); + final String dir = "/dir" + RandomUtils.nextInt(0, 1000); Path dirPath = new Path(getBucketPath() + dir); getFs().mkdirs(dirPath); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java index 725b17ee9d6..439b563d633 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hdds.scm.pipeline; +import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; @@ -34,7 +35,6 @@ import java.util.concurrent.TimeUnit; import java.util.HashMap; import java.util.Map; -import java.util.Random; import java.util.UUID; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_AUTO_CREATE_FACTOR_ONE; @@ -172,7 +172,6 @@ public void testMinLeaderCountChoosePolicy() throws Exception { // each datanode has leaderNumOfEachDn leaders after balance checkLeaderBalance(dnNum, leaderNumOfEachDn); - Random r = new Random(0); for (int i = 0; i < 10; i++) { // destroy some pipelines, wait new pipelines created, // then check leader balance @@ -181,7 +180,7 @@ public void testMinLeaderCountChoosePolicy() throws Exception { .getPipelines(RatisReplicationConfig.getInstance( ReplicationFactor.THREE), Pipeline.PipelineState.OPEN); - int destroyNum = r.nextInt(pipelines.size()); + int destroyNum = RandomUtils.nextInt(0, pipelines.size()); for (int k = 0; k <= destroyNum; k++) { pipelineManager.closePipeline(pipelines.get(k), false); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMultipartObjectGet.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMultipartObjectGet.java index 0dae8a8b0dc..c2e671b896e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMultipartObjectGet.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMultipartObjectGet.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.ozone; +import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.hdds.conf.DefaultConfigManager; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.utils.IOUtils; @@ -41,7 +42,6 @@ import javax.ws.rs.core.UriInfo; import java.io.IOException; import java.io.ByteArrayInputStream; -import java.security.SecureRandom; import java.util.ArrayList; import java.util.UUID; import java.util.List; @@ -217,8 +217,7 @@ public void testMultipart() throws Exception { private static String generateRandomContent(int sizeInMB) { int bytesToGenerate = sizeInMB * 1024 * 1024; - byte[] randomBytes = new byte[bytesToGenerate]; - new SecureRandom().nextBytes(randomBytes); + byte[] randomBytes = RandomUtils.nextBytes(bytesToGenerate); return Base64.getEncoder().encodeToString(randomBytes); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java index d03c57bf4e4..b053a4394bf 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java @@ -22,7 +22,7 @@ import java.time.Duration; import java.util.ArrayList; import java.util.List; -import java.util.Random; +import org.apache.commons.lang3.RandomUtils; import java.util.UUID; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; @@ -280,7 +280,7 @@ public void testWatchForCommitForRetryfailure() throws Exception { // as well as there is no logIndex generate in Ratis. // The basic idea here is just to test if its throws an exception. xceiverClient - .watchForCommit(index + new Random().nextInt(100) + 10); + .watchForCommit(index + RandomUtils.nextInt(0, 100) + 10); fail("expected exception not thrown"); } catch (Exception e) { assertInstanceOf(ExecutionException.class, e); @@ -374,7 +374,7 @@ public void testWatchForCommitForGroupMismatchException() throws Exception { // The basic idea here is just to test if its throws an exception. xceiverClient .watchForCommit(reply.getLogIndex() + - new Random().nextInt(100) + 10); + RandomUtils.nextInt(0, 100) + 10); fail("Expected exception not thrown"); } catch (Exception e) { assertInstanceOf(GroupMismatchException.class, HddsClientUtils.checkForException(e)); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestDeletedBlocksTxnShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestDeletedBlocksTxnShell.java index e74041ceafb..36b970f4ee9 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestDeletedBlocksTxnShell.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestDeletedBlocksTxnShell.java @@ -16,6 +16,7 @@ */ package org.apache.hadoop.ozone.shell; +import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; @@ -52,7 +53,6 @@ import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.Random; import java.util.Set; import java.util.UUID; import java.util.regex.Matcher; @@ -129,9 +129,8 @@ public void shutdown() { //> private Map> generateData(int dataSize) throws Exception { Map> blockMap = new HashMap<>(); - Random random = new Random(1); - int continerIDBase = random.nextInt(100); - int localIDBase = random.nextInt(1000); + int continerIDBase = RandomUtils.nextInt(0, 100); + int localIDBase = RandomUtils.nextInt(0, 1000); for (int i = 0; i < dataSize; i++) { long containerID = continerIDBase + i; updateContainerMetadata(containerID); diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconUtils.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconUtils.java index b34c8d31c6f..07196e29eaa 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconUtils.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconUtils.java @@ -41,8 +41,8 @@ import java.nio.file.Path; import java.nio.file.Paths; import java.net.URL; -import java.util.Random; +import org.apache.commons.lang3.RandomUtils; import org.apache.commons.io.FileUtils; import org.apache.commons.io.IOUtils; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -206,9 +206,8 @@ public void testNextClosestPowerIndexOfTwo() { assertNextClosestPowerIndexOfTwo(n - 1); } - final Random random = new Random(); for (int i = 0; i < 10; i++) { - assertNextClosestPowerIndexOfTwo(random.nextLong()); + assertNextClosestPowerIndexOfTwo(RandomUtils.nextLong()); } }