From 5bfcea82fc69a0e8b42235d20eb978c37c91246c Mon Sep 17 00:00:00 2001 From: Chia-Chuan Yu Date: Sat, 19 Apr 2025 13:51:29 +0800 Subject: [PATCH 1/2] HDDS-12813. Replace the depreciated call of RandomUtils --- .../hdds/scm/storage/TestBlockInputStream.java | 2 +- .../TestBlockOutputStreamCorrectness.java | 2 +- ...estECBlockReconstructedStripeInputStream.java | 2 +- .../ContainerCommandResponseBuilders.java | 2 +- .../ozone/common/TestChecksumByteBuffer.java | 3 ++- .../TestChecksumImplsComputeSameValues.java | 4 ++-- .../common/report/ContainerReportPublisher.java | 2 +- .../common/report/PipelineReportPublisher.java | 2 +- .../TestContainerDeletionChoosingPolicy.java | 2 +- .../common/impl/TestHddsDispatcher.java | 4 ++-- .../replication/TestGrpcContainerUploader.java | 2 +- .../hdds/fs/TestCachingSpaceUsageSource.java | 2 +- .../security/symmetric/TestManagedSecretKey.java | 6 +++--- .../token/TestOzoneBlockTokenIdentifier.java | 2 +- .../hdds/server/http/TestHttpServer2Metrics.java | 8 ++++---- .../apache/hadoop/hdds/scm/HddsTestUtils.java | 6 +++--- .../hdds/scm/block/TestDeletedBlockLog.java | 8 ++++---- .../TestSCMContainerPlacementRackAware.java | 2 +- .../TestReplicatedBlockChecksumComputer.java | 4 ++-- .../hadoop/ozone/MiniOzoneChaosCluster.java | 6 +++--- .../hadoop/ozone/failure/FailureManager.java | 4 ++-- .../loadgenerators/AgedDirLoadGenerator.java | 2 +- .../ozone/loadgenerators/AgedLoadGenerator.java | 4 ++-- .../hadoop/ozone/loadgenerators/DataBuffer.java | 2 +- .../loadgenerators/FilesystemLoadGenerator.java | 2 +- .../hadoop/ozone/loadgenerators/LoadBucket.java | 2 +- .../ozone/loadgenerators/LoadExecutors.java | 2 +- .../loadgenerators/NestedDirLoadGenerator.java | 2 +- .../loadgenerators/RandomDirLoadGenerator.java | 2 +- .../loadgenerators/RandomLoadGenerator.java | 2 +- .../loadgenerators/ReadOnlyLoadGenerator.java | 2 +- .../fs/contract/AbstractContractSeekTest.java | 4 ++-- .../ozone/AbstractRootedOzoneFileSystemTest.java | 4 ++-- .../hadoop/hdds/scm/TestContainerOperations.java | 2 +- .../TestGetCommittedBlockLengthAndPutKey.java | 4 ++-- .../hdds/scm/TestStorageContainerManager.java | 4 ++-- .../hadoop/hdds/scm/TestWatchForCommit.java | 6 ++++-- .../hadoop/hdds/scm/TestXceiverClientGrpc.java | 6 ++++-- .../TestContainerStateManagerIntegration.java | 2 +- .../metrics/TestSCMContainerManagerMetrics.java | 2 +- .../scm/pipeline/TestLeaderChoosePolicy.java | 2 +- .../scm/storage/TestContainerCommandsEC.java | 6 +++--- .../db/managed/TestRocksObjectLeakDetector.java | 2 +- .../org/apache/hadoop/ozone/TestBlockTokens.java | 2 +- .../hadoop/ozone/TestMultipartObjectGet.java | 2 +- .../ozone/client/rpc/OzoneRpcClientTests.java | 8 ++++---- .../ozone/client/rpc/TestBlockOutputStream.java | 12 ++++++------ .../rpc/TestBlockOutputStreamWithFailures.java | 16 ++++++++-------- .../TestOzoneClientMultipartUploadWithFSO.java | 2 +- .../TestOzoneRpcClientWithKeyLatestVersion.java | 6 +++--- .../hadoop/ozone/client/rpc/TestReadRetries.java | 2 +- .../server/TestSecureContainerServer.java | 2 +- .../TestDatanodeHddsVolumeFailureDetection.java | 2 +- .../ozone/om/TestObjectStoreWithLegacyFS.java | 2 +- .../ozone/shell/TestDeletedBlocksTxnShell.java | 4 ++-- .../hadoop/ozone/om/TestKeyManagerUnit.java | 4 ++-- .../TestMultipartUploadCleanupService.java | 6 +++--- .../om/service/TestOpenKeyCleanupService.java | 10 +++++----- .../ozone/security/TestOzoneTokenIdentifier.java | 2 +- .../security/acl/TestOzoneNativeAuthorizer.java | 4 ++-- .../hadoop/ozone/security/acl/TestParentAcl.java | 4 ++-- .../hadoop/fs/ozone/TestOzoneFSInputStream.java | 2 +- .../hadoop/ozone/recon/TestReconUtils.java | 4 ++-- .../ozone/freon/DatanodeSimulationState.java | 2 +- .../hadoop/ozone/freon/DatanodeSimulator.java | 4 ++-- .../freon/OzoneClientKeyReadWriteListOps.java | 2 +- 66 files changed, 125 insertions(+), 120 deletions(-) diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockInputStream.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockInputStream.java index ab612bcaea43..2e9a84cad429 100644 --- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockInputStream.java +++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockInputStream.java @@ -193,7 +193,7 @@ public void testSeek() throws Exception { // Seek to random positions between 0 and the block size. for (int i = 0; i < 10; i++) { - pos = RandomUtils.nextInt(0, blockSize); + pos = RandomUtils.secure().randomInt(0, blockSize); seekAndVerify(pos); } } diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockOutputStreamCorrectness.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockOutputStreamCorrectness.java index 7724c50283f3..a6f589e9cc7c 100644 --- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockOutputStreamCorrectness.java +++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockOutputStreamCorrectness.java @@ -69,7 +69,7 @@ class TestBlockOutputStreamCorrectness { private static final int DATA_SIZE = 256 * (int) OzoneConsts.MB; - private static final byte[] DATA = RandomUtils.nextBytes(DATA_SIZE); + private static final byte[] DATA = RandomUtils.secure().randomBytes(DATA_SIZE); @ParameterizedTest @ValueSource(ints = { 1, 1024, 1024 * 1024 }) diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockReconstructedStripeInputStream.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockReconstructedStripeInputStream.java index effef613013b..2fed95bfa6ba 100644 --- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockReconstructedStripeInputStream.java +++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockReconstructedStripeInputStream.java @@ -647,7 +647,7 @@ public void testSeekToPartialOffsetFails() { } private Integer getRandomStreamIndex(Set set) { - return set.stream().skip(RandomUtils.nextInt(0, set.size())) + return set.stream().skip(RandomUtils.secure().randomInt(0, set.size())) .findFirst().orElse(null); } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ContainerCommandResponseBuilders.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ContainerCommandResponseBuilders.java index 6285e2caf7ee..9ce971c08139 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ContainerCommandResponseBuilders.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ContainerCommandResponseBuilders.java @@ -362,7 +362,7 @@ public static ContainerCommandResponseProto getEchoResponse( ContainerProtos.EchoResponseProto.Builder echo = ContainerProtos.EchoResponseProto .newBuilder() - .setPayload(UnsafeByteOperations.unsafeWrap(RandomUtils.nextBytes(responsePayload))); + .setPayload(UnsafeByteOperations.unsafeWrap(RandomUtils.secure().randomBytes(responsePayload))); return getSuccessResponseBuilder(msg) .setEcho(echo) diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksumByteBuffer.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksumByteBuffer.java index 48ed716d2ec7..6151d71da56c 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksumByteBuffer.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksumByteBuffer.java @@ -79,7 +79,8 @@ void testCorrectness() { final int len = 1 << 10; for (int i = 0; i < 1000; i++) { - checkBytes(RandomUtils.nextBytes(len), RandomUtils.nextInt(0, len)); + checkBytes(RandomUtils.secure().randomBytes(len), + RandomUtils.secure().randomInt(0, len)); } } diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksumImplsComputeSameValues.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksumImplsComputeSameValues.java index 96f1c52c227b..fed48f63ff6a 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksumImplsComputeSameValues.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksumImplsComputeSameValues.java @@ -42,7 +42,7 @@ public class TestChecksumImplsComputeSameValues { @Test public void testCRC32ImplsMatch() { data.clear(); - data.put(RandomUtils.nextBytes(data.remaining())); + data.put(RandomUtils.secure().randomBytes(data.remaining())); for (int bpc : bytesPerChecksum) { List impls = new ArrayList<>(); impls.add(new PureJavaCrc32ByteBuffer()); @@ -58,7 +58,7 @@ public void testCRC32ImplsMatch() { @Test public void testCRC32CImplsMatch() { data.clear(); - data.put(RandomUtils.nextBytes(data.remaining())); + data.put(RandomUtils.secure().randomBytes(data.remaining())); for (int bpc : bytesPerChecksum) { List impls = new ArrayList<>(); impls.add(new PureJavaCrc32CByteBuffer()); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ContainerReportPublisher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ContainerReportPublisher.java index 26c0c6946630..7931ed5ec790 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ContainerReportPublisher.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ContainerReportPublisher.java @@ -71,7 +71,7 @@ protected long getReportFrequency() { @SuppressWarnings("java:S2245") // no need for secure random private long getRandomReportDelay() { - return RandomUtils.nextLong(0, containerReportInterval); + return RandomUtils.secure().randomLong(0, containerReportInterval); } @Override diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/PipelineReportPublisher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/PipelineReportPublisher.java index 5e6049843b41..3d709d6153a6 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/PipelineReportPublisher.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/PipelineReportPublisher.java @@ -61,7 +61,7 @@ protected long getReportFrequency() { @SuppressWarnings("java:S2245") // no need for secure random private long getRandomReportDelay() { - return RandomUtils.nextLong(0, pipelineReportInterval); + return RandomUtils.secure().randomLong(0, pipelineReportInterval); } @Override diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java index bb6aadae84dc..bd2e1237dda7 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java @@ -151,7 +151,7 @@ public void testTopNOrderedChoosingPolicy(ContainerLayoutVersion layout) List numberOfBlocks = new ArrayList(); // create [numContainers + 1] containers for (int i = 0; i <= numContainers; i++) { - long containerId = RandomUtils.nextLong(); + long containerId = RandomUtils.secure().randomLong(); KeyValueContainerData data = new KeyValueContainerData(containerId, layout, diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java index 5a2e362f1454..6afcadb809b9 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java @@ -609,7 +609,7 @@ static ChecksumData checksum(ByteString data) { private ContainerCommandRequestProto getWriteChunkRequest0( String datanodeId, Long containerId, Long localId, int chunkNum) { final int lenOfBytes = 32; - ByteString chunkData = ByteString.copyFrom(RandomUtils.nextBytes(32)); + ByteString chunkData = ByteString.copyFrom(RandomUtils.secure().randomBytes(32)); ContainerProtos.ChunkInfo chunk = ContainerProtos.ChunkInfo .newBuilder() @@ -638,7 +638,7 @@ private ContainerCommandRequestProto getWriteChunkRequest0( } static ContainerCommandRequestProto newPutSmallFile(Long containerId, Long localId) { - ByteString chunkData = ByteString.copyFrom(RandomUtils.nextBytes(32)); + ByteString chunkData = ByteString.copyFrom(RandomUtils.secure().randomBytes(32)); return newPutSmallFile(new BlockID(containerId, localId), chunkData); } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcContainerUploader.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcContainerUploader.java index cd3587d3512f..b8df5c18e8c9 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcContainerUploader.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcContainerUploader.java @@ -89,7 +89,7 @@ public void onNext(SendContainerRequest value) { // WHEN OutputStream out = startUpload(subject, callback); - out.write(RandomUtils.nextBytes(4)); + out.write(RandomUtils.secure().randomBytes(4)); out.close(); // THEN diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/fs/TestCachingSpaceUsageSource.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/fs/TestCachingSpaceUsageSource.java index d63f23578ccb..db0a2c89c9ea 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/fs/TestCachingSpaceUsageSource.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/fs/TestCachingSpaceUsageSource.java @@ -194,7 +194,7 @@ private static long missingInitialValue() { } private static long validInitialValue() { - return RandomUtils.nextLong(1, 100); + return RandomUtils.secure().randomLong(1, 100); } private static Builder paramsBuilder(AtomicLong savedValue) { diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/symmetric/TestManagedSecretKey.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/symmetric/TestManagedSecretKey.java index cd5bf80b2b16..386c8afb4903 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/symmetric/TestManagedSecretKey.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/symmetric/TestManagedSecretKey.java @@ -39,7 +39,7 @@ public class TestManagedSecretKey { @Test public void testSignAndVerifySuccess() throws Exception { // Data can be signed and verified by same key. - byte[] data = RandomUtils.nextBytes(100); + byte[] data = RandomUtils.secure().randomBytes(100); ManagedSecretKey secretKey = generateHmac(now(), ofDays(1)); byte[] signature = secretKey.sign(data); assertTrue(secretKey.isValidSignature(data, signature)); @@ -62,10 +62,10 @@ public void testSignAndVerifySuccess() throws Exception { @Test public void testVerifyFailure() throws Exception { - byte[] data = RandomUtils.nextBytes(100); + byte[] data = RandomUtils.secure().randomBytes(100); ManagedSecretKey secretKey = generateHmac(now(), ofDays(1)); // random signature is not valid. - assertFalse(secretKey.isValidSignature(data, RandomUtils.nextBytes(100))); + assertFalse(secretKey.isValidSignature(data, RandomUtils.secure().randomBytes(100))); // Data sign by one key can't be verified by another key. byte[] signature = secretKey.sign(data); diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/token/TestOzoneBlockTokenIdentifier.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/token/TestOzoneBlockTokenIdentifier.java index 4199af10689f..a04df4f618aa 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/token/TestOzoneBlockTokenIdentifier.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/token/TestOzoneBlockTokenIdentifier.java @@ -65,7 +65,7 @@ public void testSignToken() { // Verify an invalid signed OzoneMaster Token with Ozone Master. assertFalse(secretKey.isValidSignature(tokenId.getBytes(), - RandomUtils.nextBytes(128))); + RandomUtils.secure().randomBytes(128))); } @Test diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/http/TestHttpServer2Metrics.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/http/TestHttpServer2Metrics.java index 14e0a57bb500..07e842068d15 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/http/TestHttpServer2Metrics.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/http/TestHttpServer2Metrics.java @@ -56,10 +56,10 @@ public void setup() { @Test public void testMetrics() { // crate mock metrics - int threadCount = RandomUtils.nextInt(); - int maxThreadCount = RandomUtils.nextInt(); - int idleThreadCount = RandomUtils.nextInt(); - int threadQueueWaitingTaskCount = RandomUtils.nextInt(); + int threadCount = RandomUtils.secure().randomInt(); + int maxThreadCount = RandomUtils.secure().randomInt(); + int idleThreadCount = RandomUtils.secure().randomInt(); + int threadQueueWaitingTaskCount = RandomUtils.secure().randomInt(); String name = "s3g"; when(threadPool.getThreads()).thenReturn(threadCount); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsTestUtils.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsTestUtils.java index a3e948a5e9a2..4029b49172d8 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsTestUtils.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsTestUtils.java @@ -623,7 +623,7 @@ public static StorageContainerManager getScm(OzoneConfiguration conf, private static ContainerInfo.Builder getDefaultContainerInfoBuilder( final HddsProtos.LifeCycleState state) { return new ContainerInfo.Builder() - .setContainerID(RandomUtils.nextLong()) + .setContainerID(RandomUtils.secure().randomLong()) .setReplicationConfig( RatisReplicationConfig .getInstance(ReplicationFactor.THREE)) @@ -816,7 +816,7 @@ public static List getContainerInfo(int numContainers) { for (int i = 0; i < numContainers; i++) { ContainerInfo.Builder builder = new ContainerInfo.Builder(); containerInfoList.add(builder - .setContainerID(RandomUtils.nextLong()) + .setContainerID(RandomUtils.secure().randomLong()) .setReplicationConfig(ratisReplicationConfig) .build()); } @@ -837,7 +837,7 @@ public static List getECContainerInfo(int numContainers, int data for (int i = 0; i < numContainers; i++) { ContainerInfo.Builder builder = new ContainerInfo.Builder(); containerInfoList.add(builder - .setContainerID(RandomUtils.nextLong()) + .setContainerID(RandomUtils.secure().randomLong()) .setOwner("test-owner") .setPipelineID(PipelineID.randomId()) .setReplicationConfig(eCReplicationConfig) diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java index 23583d03b534..f37fa1ac10ff 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java @@ -205,8 +205,8 @@ private Map> generateData(int dataSize) throws IOException { private Map> generateData(int dataSize, HddsProtos.LifeCycleState state) throws IOException { Map> blockMap = new HashMap<>(); - int continerIDBase = RandomUtils.nextInt(0, 100); - int localIDBase = RandomUtils.nextInt(0, 1000); + int continerIDBase = RandomUtils.secure().randomInt(0, 100); + int localIDBase = RandomUtils.secure().randomInt(0, 1000); for (int i = 0; i < dataSize; i++) { long containerID = continerIDBase + i; updateContainerMetadata(containerID, state); @@ -752,7 +752,7 @@ public void testRandomOperateTransactions() throws Exception { List txIDs; // Randomly add/get/commit/increase transactions. for (int i = 0; i < 100; i++) { - int state = RandomUtils.nextInt(0, 4); + int state = RandomUtils.secure().randomInt(0, 4); if (state == 0) { addTransactions(generateData(10), true); added += 10; @@ -851,7 +851,7 @@ public void testDeletedBlockTransactions() // add two transactions for same container containerID = blocks.get(0).getContainerID(); Map> deletedBlocksMap = new HashMap<>(); - long localId = RandomUtils.nextLong(); + long localId = RandomUtils.secure().randomLong(); deletedBlocksMap.put(containerID, new LinkedList<>( Collections.singletonList(localId))); addTransactions(deletedBlocksMap, true); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java index 29f83ff062d4..989259959291 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java @@ -621,7 +621,7 @@ public void testOutOfServiceNodesNotSelected(int datanodeCount) { for (int i = 0; i < 10; i++) { // Set a random DN to in_service and ensure it is always picked - int index = RandomUtils.nextInt(0, dnInfos.size()); + int index = RandomUtils.secure().randomInt(0, dnInfos.size()); dnInfos.get(index).setNodeStatus(NodeStatus.inServiceHealthy()); try { List datanodeDetails = diff --git a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/checksum/TestReplicatedBlockChecksumComputer.java b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/checksum/TestReplicatedBlockChecksumComputer.java index 2cfb59dfba64..bc232e8e05d1 100644 --- a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/checksum/TestReplicatedBlockChecksumComputer.java +++ b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/checksum/TestReplicatedBlockChecksumComputer.java @@ -39,7 +39,7 @@ public class TestReplicatedBlockChecksumComputer { @Test public void testComputeMd5Crc() throws IOException { final int lenOfBytes = 32; - byte[] randomChunkChecksum = RandomUtils.nextBytes(lenOfBytes); + byte[] randomChunkChecksum = RandomUtils.secure().randomBytes(lenOfBytes); MD5Hash emptyBlockMD5 = MD5Hash.digest(randomChunkChecksum); byte[] emptyBlockMD5Hash = emptyBlockMD5.getDigest(); @@ -54,7 +54,7 @@ public void testComputeMd5Crc() throws IOException { @Test public void testComputeCompositeCrc() throws IOException { final int lenOfBytes = 32; - byte[] randomChunkChecksum = RandomUtils.nextBytes(lenOfBytes); + byte[] randomChunkChecksum = RandomUtils.secure().randomBytes(lenOfBytes); CrcComposer crcComposer = CrcComposer.newCrcComposer(DataChecksum.Type.CRC32C, 4); diff --git a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java index 17f135ebbf1a..555055a630d0 100644 --- a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java +++ b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java @@ -317,12 +317,12 @@ public boolean shouldStopOm() { if (failedOmSet.size() >= numOzoneManagers / 2) { return false; } - return RandomUtils.nextBoolean(); + return RandomUtils.secure().randomBoolean(); } // Datanode specific private int getNumberOfDnToFail() { - return RandomUtils.nextBoolean() ? 1 : 2; + return RandomUtils.secure().randomBoolean() ? 1 : 2; } public Set dnToFail() { @@ -395,7 +395,7 @@ public boolean shouldStopScm() { if (failedScmSet.size() >= numStorageContainerManagers / 2) { return false; } - return RandomUtils.nextBoolean(); + return RandomUtils.secure().randomBoolean(); } } diff --git a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/failure/FailureManager.java b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/failure/FailureManager.java index 6d80fd362e06..2a27708ef9da 100644 --- a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/failure/FailureManager.java +++ b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/failure/FailureManager.java @@ -89,10 +89,10 @@ public void stop() throws Exception { } public static boolean isFastRestart() { - return RandomUtils.nextBoolean(); + return RandomUtils.secure().randomBoolean(); } public static int getBoundedRandomIndex(int size) { - return RandomUtils.nextInt(0, size); + return RandomUtils.secure().randomInt(0, size); } } diff --git a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/AgedDirLoadGenerator.java b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/AgedDirLoadGenerator.java index 19ca066db9e6..2187309839e7 100644 --- a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/AgedDirLoadGenerator.java +++ b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/AgedDirLoadGenerator.java @@ -33,7 +33,7 @@ public AgedDirLoadGenerator(DataBuffer dataBuffer, LoadBucket fsBucket) { @Override public void generateLoad() throws Exception { - int index = RandomUtils.nextInt(0, maxDirIndex); + int index = RandomUtils.secure().randomInt(0, maxDirIndex); String keyName = getKeyName(index); fsBucket.readDirectory(keyName); } diff --git a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/AgedLoadGenerator.java b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/AgedLoadGenerator.java index ff101786c935..4551eedea41e 100644 --- a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/AgedLoadGenerator.java +++ b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/AgedLoadGenerator.java @@ -45,7 +45,7 @@ public AgedLoadGenerator(DataBuffer data, LoadBucket agedLoadBucket) { @Override public void generateLoad() throws Exception { - if (RandomUtils.nextInt(0, 100) <= 10) { + if (RandomUtils.secure().randomInt(0, 100) <= 10) { synchronized (agedFileAllocationIndex) { int index = agedFileAllocationIndex.getAndIncrement(); ByteBuffer buffer = dataBuffer.getBuffer(index); @@ -66,7 +66,7 @@ public void generateLoad() throws Exception { private Optional randomKeyToRead() { int currentIndex = agedFileWrittenIndex.get(); return currentIndex != 0 - ? Optional.of(RandomUtils.nextInt(0, currentIndex)) + ? Optional.of(RandomUtils.secure().randomInt(0, currentIndex)) : Optional.empty(); } diff --git a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/DataBuffer.java b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/DataBuffer.java index 17784816921c..9c5019e69863 100644 --- a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/DataBuffer.java +++ b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/DataBuffer.java @@ -39,7 +39,7 @@ public DataBuffer(int numBuffers) { for (int i = 0; i < numBuffers; i++) { int size = (int) StorageUnit.KB.toBytes(1 << i); ByteBuffer buffer = ByteBuffer.allocate(size); - buffer.put(RandomUtils.nextBytes(size)); + buffer.put(RandomUtils.secure().randomBytes(size)); this.buffers.add(buffer); } // TODO: add buffers of sizes of prime numbers. diff --git a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/FilesystemLoadGenerator.java b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/FilesystemLoadGenerator.java index 5a3c6c300e2d..61ec463a5c0b 100644 --- a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/FilesystemLoadGenerator.java +++ b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/FilesystemLoadGenerator.java @@ -38,7 +38,7 @@ public FilesystemLoadGenerator(DataBuffer dataBuffer, LoadBucket fsBucket) { @Override public void generateLoad() throws Exception { - int index = RandomUtils.nextInt(); + int index = RandomUtils.secure().randomInt(); ByteBuffer buffer = dataBuffer.getBuffer(index); String keyName = getKeyName(index); fsBucket.writeKey(true, buffer, keyName); diff --git a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/LoadBucket.java b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/LoadBucket.java index 6722586dcef7..4f0c1b921197 100644 --- a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/LoadBucket.java +++ b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/LoadBucket.java @@ -63,7 +63,7 @@ public LoadBucket(OzoneBucket bucket, OzoneConfiguration conf, } private boolean isFsOp() { - return RandomUtils.nextBoolean(); + return RandomUtils.secure().randomBoolean(); } // Write ops. diff --git a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/LoadExecutors.java b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/LoadExecutors.java index a0b3e8fb8b33..c7c8ccf9ec9c 100644 --- a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/LoadExecutors.java +++ b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/LoadExecutors.java @@ -57,7 +57,7 @@ private void load(long runTimeMillis) { while (Time.monotonicNow() - startTime < runTimeMillis) { LoadGenerator gen = - generators.get(RandomUtils.nextInt(0, numGenerators)); + generators.get(RandomUtils.secure().randomInt(0, numGenerators)); try { gen.generateLoad(); diff --git a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/NestedDirLoadGenerator.java b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/NestedDirLoadGenerator.java index 4ed69f13ab51..f1a82719b66e 100644 --- a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/NestedDirLoadGenerator.java +++ b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/NestedDirLoadGenerator.java @@ -42,7 +42,7 @@ private String createNewPath(int i, String s) { @Override public void generateLoad() throws Exception { - int index = RandomUtils.nextInt(0, maxDirDepth); + int index = RandomUtils.secure().randomInt(0, maxDirDepth); String str = this.pathMap.compute(index, this::createNewPath); fsBucket.createDirectory(str); fsBucket.readDirectory(str); diff --git a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/RandomDirLoadGenerator.java b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/RandomDirLoadGenerator.java index c81df6e22e69..88ef4a60b31d 100644 --- a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/RandomDirLoadGenerator.java +++ b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/RandomDirLoadGenerator.java @@ -31,7 +31,7 @@ public RandomDirLoadGenerator(DataBuffer dataBuffer, LoadBucket fsBucket) { @Override public void generateLoad() throws Exception { - int index = RandomUtils.nextInt(); + int index = RandomUtils.secure().randomInt(); String keyName = getKeyName(index); fsBucket.createDirectory(keyName); fsBucket.readDirectory(keyName); diff --git a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/RandomLoadGenerator.java b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/RandomLoadGenerator.java index 91d5d23ee3ea..f9cda3b5f6da 100644 --- a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/RandomLoadGenerator.java +++ b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/RandomLoadGenerator.java @@ -36,7 +36,7 @@ public RandomLoadGenerator(DataBuffer dataBuffer, LoadBucket bucket) { @Override public void generateLoad() throws Exception { - int index = RandomUtils.nextInt(); + int index = RandomUtils.secure().randomInt(); ByteBuffer buffer = dataBuffer.getBuffer(index); String keyName = getKeyName(index); ozoneBucket.writeKey(buffer, keyName); diff --git a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/ReadOnlyLoadGenerator.java b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/ReadOnlyLoadGenerator.java index a991245a6220..9d19f9f6c767 100644 --- a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/ReadOnlyLoadGenerator.java +++ b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/ReadOnlyLoadGenerator.java @@ -35,7 +35,7 @@ public ReadOnlyLoadGenerator(DataBuffer dataBuffer, LoadBucket replBucket) { @Override public void generateLoad() throws Exception { - int index = RandomUtils.nextInt(0, NUM_KEYS); + int index = RandomUtils.secure().randomInt(0, NUM_KEYS); ByteBuffer buffer = dataBuffer.getBuffer(index); String keyName = getKeyName(index); replBucket.readKey(buffer, keyName); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSeekTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSeekTest.java index d19475fa6a26..ca1450382d45 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSeekTest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSeekTest.java @@ -349,8 +349,8 @@ public void testRandomSeeks() throws Throwable { int[] reads = new int[10]; try (FSDataInputStream stm = getFileSystem().open(randomSeekFile)) { for (int i = 0; i < limit; i++) { - int seekOff = RandomUtils.nextInt(0, buf.length); - int toRead = RandomUtils.nextInt(0, Math.min(buf.length - seekOff, 32000)); + int seekOff = RandomUtils.secure().randomInt(0, buf.length); + int toRead = RandomUtils.secure().randomInt(0, Math.min(buf.length - seekOff, 32000)); seeks[i % seeks.length] = seekOff; reads[i % reads.length] = toRead; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTest.java index f8b9c26d9117..70df69038d99 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTest.java @@ -1984,7 +1984,7 @@ private void checkInvalidPath(Path path) { @Test void testRenameFile() throws Exception { - final String dir = "/dir" + RandomUtils.nextInt(0, 1000); + final String dir = "/dir" + RandomUtils.secure().randomInt(0, 1000); Path dirPath = new Path(getBucketPath() + dir); Path file1Source = new Path(getBucketPath() + dir + "/file1_Copy"); @@ -2010,7 +2010,7 @@ void testRenameFile() throws Exception { */ @Test void testRenameFileToDir() throws Exception { - final String dir = "/dir" + RandomUtils.nextInt(0, 1000); + final String dir = "/dir" + RandomUtils.secure().randomInt(0, 1000); Path dirPath = new Path(getBucketPath() + dir); getFs().mkdirs(dirPath); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestContainerOperations.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestContainerOperations.java index b98aee72866b..fcea426ad458 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestContainerOperations.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestContainerOperations.java @@ -96,7 +96,7 @@ void testContainerStateMachineIdempotency() throws Exception { // call create Container again BlockID blockID = ContainerTestHelper.getTestBlockID(containerID); byte[] data = - RandomStringUtils.random(RandomUtils.nextInt(0, 1024)).getBytes(UTF_8); + RandomStringUtils.random(RandomUtils.secure().randomInt(0, 1024)).getBytes(UTF_8); ContainerProtos.ContainerCommandRequestProto writeChunkRequest = ContainerTestHelper .getWriteChunkRequest(container.getPipeline(), blockID, diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestGetCommittedBlockLengthAndPutKey.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestGetCommittedBlockLengthAndPutKey.java index d1b52da2d500..6168f4c3ee36 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestGetCommittedBlockLengthAndPutKey.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestGetCommittedBlockLengthAndPutKey.java @@ -90,7 +90,7 @@ public void tesGetCommittedBlockLength() throws Exception { BlockID blockID = ContainerTestHelper.getTestBlockID(containerID); byte[] data = - RandomStringUtils.random(RandomUtils.nextInt(1, 1024)).getBytes(UTF_8); + RandomStringUtils.random(RandomUtils.secure().randomInt(1, 1024)).getBytes(UTF_8); ContainerProtos.ContainerCommandRequestProto writeChunkRequest = ContainerTestHelper .getWriteChunkRequest(container.getPipeline(), blockID, @@ -154,7 +154,7 @@ public void tesPutKeyResposne() throws Exception { BlockID blockID = ContainerTestHelper.getTestBlockID(containerID); byte[] data = - RandomStringUtils.random(RandomUtils.nextInt(1, 1024)).getBytes(UTF_8); + RandomStringUtils.random(RandomUtils.secure().randomInt(1, 1024)).getBytes(UTF_8); ContainerProtos.ContainerCommandRequestProto writeChunkRequest = ContainerTestHelper .getWriteChunkRequest(container.getPipeline(), blockID, diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java index da824fd801bb..2af6895bee25 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java @@ -288,8 +288,8 @@ private void testBlockDeletionTransactions(MiniOzoneCluster cluster) throws Exce // Add 2 TXs per container. Map> deletedBlocks = new HashMap<>(); List blocks = new ArrayList<>(); - blocks.add(RandomUtils.nextLong()); - blocks.add(RandomUtils.nextLong()); + blocks.add(RandomUtils.secure().randomLong()); + blocks.add(RandomUtils.secure().randomLong()); deletedBlocks.put(containerID, blocks); addTransactions(cluster.getStorageContainerManager(), delLog, deletedBlocks); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestWatchForCommit.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestWatchForCommit.java index 11b67acf31ea..b8d8e3ee5dd6 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestWatchForCommit.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestWatchForCommit.java @@ -282,7 +282,8 @@ public void testWatchForCommitForRetryfailure(RaftProtos.ReplicationLevel watchT // as well as there is no logIndex generate in Ratis. // The basic idea here is just to test if its throws an exception. ExecutionException e = assertThrows(ExecutionException.class, - () -> xceiverClient.watchForCommit(index + RandomUtils.nextInt(0, 100) + 10).get()); + () -> xceiverClient.watchForCommit(index + RandomUtils.secure(). + randomInt(0, 100) + 10).get()); // since the timeout value is quite long, the watch request will either // fail with NotReplicated exceptio, RetryFailureException or // RuntimeException @@ -382,7 +383,8 @@ public void testWatchForCommitForGroupMismatchException() throws Exception { // as well as there is no logIndex generate in Ratis. // The basic idea here is just to test if its throws an exception. final Exception e = assertThrows(Exception.class, - () -> xceiverClient.watchForCommit(reply.getLogIndex() + RandomUtils.nextInt(0, 100) + 10).get()); + () -> xceiverClient.watchForCommit(reply.getLogIndex() + RandomUtils.secure(). + randomInt(0, 100) + 10).get()); assertInstanceOf(GroupMismatchException.class, HddsClientUtils.checkForException(e)); } finally { clientManager.releaseClient(xceiverClient, false); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestXceiverClientGrpc.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestXceiverClientGrpc.java index 092d428a7742..d6b69e3a0375 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestXceiverClientGrpc.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestXceiverClientGrpc.java @@ -189,9 +189,11 @@ public void testPrimaryReadFromNormalDatanode() node -> assertEquals(NodeOperationalState.IN_SERVICE, node.getPersistedOpState())); randomPipeline.getNodes().get( - RandomUtils.nextInt(0, nodeCount)).setPersistedOpState(NodeOperationalState.IN_MAINTENANCE); + RandomUtils.secure().randomInt(0, nodeCount)). + setPersistedOpState(NodeOperationalState.IN_MAINTENANCE); randomPipeline.getNodes().get( - RandomUtils.nextInt(0, nodeCount)).setPersistedOpState(NodeOperationalState.IN_MAINTENANCE); + RandomUtils.secure().randomInt(0, nodeCount)). + setPersistedOpState(NodeOperationalState.IN_MAINTENANCE); try (XceiverClientGrpc client = new XceiverClientGrpc(randomPipeline, conf) { @Override public XceiverClientReply sendCommandAsync( diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManagerIntegration.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManagerIntegration.java index f050edf935e8..629ae0527015 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManagerIntegration.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManagerIntegration.java @@ -326,7 +326,7 @@ public void testReplicaMap() throws Exception { .setUuid(UUID.randomUUID()).build(); // Test 1: no replica's exist - ContainerID containerID = ContainerID.valueOf(RandomUtils.nextLong()); + ContainerID containerID = ContainerID.valueOf(RandomUtils.secure().randomLong()); Set replicaSet = containerStateManager.getContainerReplicas(containerID); assertNull(replicaSet); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/metrics/TestSCMContainerManagerMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/metrics/TestSCMContainerManagerMetrics.java index 1acb081c10ad..300ea81e520a 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/metrics/TestSCMContainerManagerMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/metrics/TestSCMContainerManagerMetrics.java @@ -111,7 +111,7 @@ public void testContainerOpsMetrics() throws Exception { assertThrows(ContainerNotFoundException.class, () -> containerManager.deleteContainer( - ContainerID.valueOf(RandomUtils.nextLong(10000, 20000)))); + ContainerID.valueOf(RandomUtils.secure().randomLong(10000, 20000)))); // deleteContainer should fail, so it should have the old metric value. metrics = getMetrics(SCMContainerManagerMetrics.class.getSimpleName()); assertEquals(getLongCounter("NumSuccessfulDeleteContainers", diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java index 58863bb83b6c..3c19a039e141 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java @@ -182,7 +182,7 @@ public void testMinLeaderCountChoosePolicy() throws Exception { .getPipelines(RatisReplicationConfig.getInstance( ReplicationFactor.THREE), Pipeline.PipelineState.OPEN); - int destroyNum = RandomUtils.nextInt(0, pipelines.size()); + int destroyNum = RandomUtils.secure().randomInt(0, pipelines.size()); for (int k = 0; k <= destroyNum; k++) { pipelineManager.closePipeline(pipelines.get(k), false); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestContainerCommandsEC.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestContainerCommandsEC.java index d79c31266281..7606c38c5631 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestContainerCommandsEC.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestContainerCommandsEC.java @@ -255,7 +255,7 @@ public void testOrphanBlock() throws Exception { String keyName = UUID.randomUUID().toString(); try (OutputStream out = classBucket .createKey(keyName, keyLen, repConfig, new HashMap<>())) { - out.write(RandomUtils.nextBytes(keyLen)); + out.write(RandomUtils.secure().randomBytes(keyLen)); } long orphanContainerID = classBucket.getKey(keyName) .getOzoneKeyLocations().get(0).getContainerID(); @@ -1023,8 +1023,8 @@ public static void prepareData(int[][] ranges) throws Exception { new ECReplicationConfig(EC_DATA, EC_PARITY, EC_CODEC, EC_CHUNK_SIZE); values = new byte[ranges.length][]; for (int i = 0; i < ranges.length; i++) { - int keySize = RandomUtils.nextInt(ranges[i][0], ranges[i][1]); - values[i] = RandomUtils.nextBytes(keySize); + int keySize = RandomUtils.secure().randomInt(ranges[i][0], ranges[i][1]); + values[i] = RandomUtils.secure().randomBytes(keySize); final String keyName = UUID.randomUUID().toString(); try (OutputStream out = classBucket .createKey(keyName, values[i].length, repConfig, new HashMap<>())) { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/utils/db/managed/TestRocksObjectLeakDetector.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/utils/db/managed/TestRocksObjectLeakDetector.java index b21dc8af69b0..167c8383f812 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/utils/db/managed/TestRocksObjectLeakDetector.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/utils/db/managed/TestRocksObjectLeakDetector.java @@ -70,7 +70,7 @@ public void testLeakDetector() throws Exception { testLeakDetector(() -> new ManagedLRUCache(1L)); testLeakDetector(ManagedOptions::new); testLeakDetector(ManagedReadOptions::new); - testLeakDetector(() -> new ManagedSlice(RandomUtils.nextBytes(10))); + testLeakDetector(() -> new ManagedSlice(RandomUtils.secure().randomBytes(10))); testLeakDetector(ManagedStatistics::new); testLeakDetector(ManagedWriteBatch::new); testLeakDetector(ManagedWriteOptions::new); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokens.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokens.java index 752d2894da19..62225911d638 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokens.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokens.java @@ -250,7 +250,7 @@ public void blockTokenFailsOnWrongPassword() throws Exception { for (OmKeyLocationInfoGroup v : keyInfo.getKeyLocationVersions()) { for (OmKeyLocationInfo l : v.getLocationList()) { Token token = l.getToken(); - byte[] randomPassword = RandomUtils.nextBytes(100); + byte[] randomPassword = RandomUtils.secure().randomBytes(100); Token override = new Token<>( token.getIdentifier(), randomPassword, token.getKind(), token.getService()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMultipartObjectGet.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMultipartObjectGet.java index 9b1fcf001586..c9af9a4ffd3c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMultipartObjectGet.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMultipartObjectGet.java @@ -218,7 +218,7 @@ public void testMultipart() throws Exception { private static String generateRandomContent(int sizeInMB) { int bytesToGenerate = sizeInMB * 1024 * 1024; - byte[] randomBytes = RandomUtils.nextBytes(bytesToGenerate); + byte[] randomBytes = RandomUtils.secure().randomBytes(bytesToGenerate); return Base64.getEncoder().encodeToString(randomBytes); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/OzoneRpcClientTests.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/OzoneRpcClientTests.java index 292723bfc6f2..6cc155b45a42 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/OzoneRpcClientTests.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/OzoneRpcClientTests.java @@ -1946,7 +1946,7 @@ public void testUsedBytesWithUploadPart() throws IOException { int blockSize = (int) ozoneManager.getConfiguration().getStorageSize( OZONE_SCM_BLOCK_SIZE, OZONE_SCM_BLOCK_SIZE_DEFAULT, StorageUnit.BYTES); String sampleData = Arrays.toString(generateData(blockSize + 100, - (byte) RandomUtils.nextLong())); + (byte) RandomUtils.secure().randomLong())); int valueLength = sampleData.getBytes(UTF_8).length; store.createVolume(volumeName); @@ -1983,7 +1983,7 @@ public void testValidateBlockLengthWithCommitKey() throws IOException { String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); - String value = RandomStringUtils.random(RandomUtils.nextInt(1, 1024)); + String value = RandomStringUtils.random(RandomUtils.secure().randomInt(1, 1024)); store.createVolume(volumeName); OzoneVolume volume = store.getVolume(volumeName); volume.createBucket(bucketName); @@ -2097,7 +2097,7 @@ public void testPutKeyRatisThreeNodesParallel() throws IOException, for (int i = 0; i < 5; i++) { String keyName = UUID.randomUUID().toString(); String data = Arrays.toString(generateData(5 * 1024 * 1024, - (byte) RandomUtils.nextLong())); + (byte) RandomUtils.secure().randomLong())); TestDataUtil.createKey(bucket, keyName, ReplicationConfig.fromTypeAndFactor(RATIS, THREE), data.getBytes(UTF_8)); @@ -3645,7 +3645,7 @@ void testCommitPartAfterCompleteUpload() throws Exception { // upload part 1. byte[] data = generateData(5 * 1024 * 1024, - (byte) RandomUtils.nextLong()); + (byte) RandomUtils.secure().randomLong()); OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName, data.length, 1, uploadID); ozoneOutputStream.write(data, 0, data.length); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStream.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStream.java index a67dca9e8eaf..12eede0eb03c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStream.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStream.java @@ -200,7 +200,7 @@ void testWriteLessThanChunkSize(boolean flushDelay, boolean enablePiggybacking) OzoneOutputStream key = createKey(client, keyName); int dataLength = 50; final int totalWriteLength = dataLength * 2; - byte[] data1 = RandomUtils.nextBytes(dataLength); + byte[] data1 = RandomUtils.secure().randomBytes(dataLength); key.write(data1); KeyOutputStream keyOutputStream = assertInstanceOf(KeyOutputStream.class, key.getOutputStream()); @@ -298,7 +298,7 @@ void testWriteExactlyFlushSize(boolean flushDelay, boolean enablePiggybacking) t OzoneOutputStream key = createKey(client, keyName); // write data equal to 2 chunks int dataLength = FLUSH_SIZE; - byte[] data1 = RandomUtils.nextBytes(dataLength); + byte[] data1 = RandomUtils.secure().randomBytes(dataLength); key.write(data1); assertEquals(writeChunkCount + 2, @@ -416,7 +416,7 @@ void testWriteMoreThanChunkSize(boolean flushDelay, boolean enablePiggybacking) OzoneOutputStream key = createKey(client, keyName); // write data more than 1 chunk int dataLength = CHUNK_SIZE + 50; - byte[] data1 = RandomUtils.nextBytes(dataLength); + byte[] data1 = RandomUtils.secure().randomBytes(dataLength); key.write(data1); assertEquals(totalOpCount + 1, metrics.getTotalOpCount()); KeyOutputStream keyOutputStream = @@ -505,7 +505,7 @@ void testWriteMoreThanFlushSize(boolean flushDelay, boolean enablePiggybacking) String keyName = getKeyName(); OzoneOutputStream key = createKey(client, keyName); int dataLength = FLUSH_SIZE + 50; - byte[] data1 = RandomUtils.nextBytes(dataLength); + byte[] data1 = RandomUtils.secure().randomBytes(dataLength); key.write(data1); assertEquals(totalOpCount + 3, metrics.getTotalOpCount()); @@ -594,7 +594,7 @@ void testWriteExactlyMaxFlushSize(boolean flushDelay, boolean enablePiggybacking String keyName = getKeyName(); OzoneOutputStream key = createKey(client, keyName); int dataLength = MAX_FLUSH_SIZE; - byte[] data1 = RandomUtils.nextBytes(dataLength); + byte[] data1 = RandomUtils.secure().randomBytes(dataLength); key.write(data1); KeyOutputStream keyOutputStream = @@ -689,7 +689,7 @@ void testWriteMoreThanMaxFlushSize(boolean flushDelay, boolean enablePiggybackin OzoneOutputStream key = createKey(client, keyName); int dataLength = MAX_FLUSH_SIZE + 50; // write data more than 1 chunk - byte[] data1 = RandomUtils.nextBytes(dataLength); + byte[] data1 = RandomUtils.secure().randomBytes(dataLength); key.write(data1); KeyOutputStream keyOutputStream = assertInstanceOf(KeyOutputStream.class, key.getOutputStream()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailures.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailures.java index 5450b830efd3..a4cb32edd1a2 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailures.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailures.java @@ -111,7 +111,7 @@ private void testWatchForCommitWithCloseContainerException(OzoneClient client) String keyName = getKeyName(); OzoneOutputStream key = createKey(client, keyName); int dataLength = MAX_FLUSH_SIZE + CHUNK_SIZE; - byte[] data1 = RandomUtils.nextBytes(dataLength); + byte[] data1 = RandomUtils.secure().randomBytes(dataLength); key.write(data1); KeyOutputStream keyOutputStream = @@ -196,7 +196,7 @@ void testWatchForCommitDatanodeFailure(boolean flushDelay, boolean enablePiggyba String keyName = getKeyName(); OzoneOutputStream key = createKey(client, keyName); int dataLength = MAX_FLUSH_SIZE + CHUNK_SIZE; - byte[] data1 = RandomUtils.nextBytes(dataLength); + byte[] data1 = RandomUtils.secure().randomBytes(dataLength); key.write(data1); // since its hitting the full bufferCondition, it will call watchForCommit // and completes at least putBlock for first flushSize worth of data @@ -280,7 +280,7 @@ void test2DatanodesFailure(boolean flushDelay, boolean enablePiggybacking) throw String keyName = getKeyName(); OzoneOutputStream key = createKey(client, keyName); int dataLength = MAX_FLUSH_SIZE + CHUNK_SIZE; - byte[] data1 = RandomUtils.nextBytes(dataLength); + byte[] data1 = RandomUtils.secure().randomBytes(dataLength); key.write(data1); // since its hitting the full bufferCondition, it will call watchForCommit // and completes atleast putBlock for first flushSize worth of data @@ -376,7 +376,7 @@ private void testWriteMoreThanMaxFlushSize(OzoneClient client) String keyName = getKeyName(); OzoneOutputStream key = createKey(client, keyName); int dataLength = MAX_FLUSH_SIZE + CHUNK_SIZE; - byte[] data1 = RandomUtils.nextBytes(dataLength); + byte[] data1 = RandomUtils.secure().randomBytes(dataLength); key.write(data1); KeyOutputStream keyOutputStream = @@ -433,7 +433,7 @@ private void testExceptionDuringClose(OzoneClient client) throws Exception { String keyName = getKeyName(); OzoneOutputStream key = createKey(client, keyName); int dataLength = 167; - byte[] data1 = RandomUtils.nextBytes(dataLength); + byte[] data1 = RandomUtils.secure().randomBytes(dataLength); key.write(data1); KeyOutputStream keyOutputStream = @@ -498,7 +498,7 @@ private void testWatchForCommitWithSingleNodeRatis(OzoneClient client) OzoneOutputStream key = createKey(client, keyName, 0, ReplicationFactor.ONE); int dataLength = MAX_FLUSH_SIZE + CHUNK_SIZE; - byte[] data1 = RandomUtils.nextBytes(dataLength); + byte[] data1 = RandomUtils.secure().randomBytes(dataLength); key.write(data1); KeyOutputStream keyOutputStream = @@ -587,7 +587,7 @@ void testDatanodeFailureWithSingleNode(boolean flushDelay, boolean enablePiggyba OzoneOutputStream key = createKey(client, keyName, 0, ReplicationFactor.ONE); int dataLength = MAX_FLUSH_SIZE + CHUNK_SIZE; - byte[] data1 = RandomUtils.nextBytes(dataLength); + byte[] data1 = RandomUtils.secure().randomBytes(dataLength); key.write(data1); // since its hitting the full bufferCondition, it will call watchForCommit // and completes at least putBlock for first flushSize worth of data @@ -678,7 +678,7 @@ void testDatanodeFailureWithPreAllocation(boolean flushDelay, boolean enablePigg createKey(client, keyName, 3 * BLOCK_SIZE, ReplicationFactor.ONE); int dataLength = MAX_FLUSH_SIZE + CHUNK_SIZE; - byte[] data1 = RandomUtils.nextBytes(dataLength); + byte[] data1 = RandomUtils.secure().randomBytes(dataLength); key.write(data1); // since its hitting the full bufferCondition, it will call watchForCommit // and completes at least putBlock for first flushSize worth of data diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java index 8e6a74f838b2..f3cbbcaa7748 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java @@ -444,7 +444,7 @@ public void testCommitPartAfterCompleteUpload() throws Exception { // upload part 1. byte[] data = generateData(5 * 1024 * 1024, - (byte) RandomUtils.nextLong()); + (byte) RandomUtils.secure().randomLong()); OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName, data.length, 1, uploadID); ozoneOutputStream.write(data, 0, data.length); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithKeyLatestVersion.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithKeyLatestVersion.java index fa6bf2274c0a..b437aa720269 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithKeyLatestVersion.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithKeyLatestVersion.java @@ -76,8 +76,8 @@ void testWithGetLatestVersion(boolean getLatestVersionOnly) throws Exception { OzoneBucket bucket = volume.getBucket(bucketName); String keyName = UUID.randomUUID().toString(); - byte[] content = RandomUtils.nextBytes(128); - int versions = RandomUtils.nextInt(2, 5); + byte[] content = RandomUtils.secure().randomBytes(128); + int versions = RandomUtils.secure().randomInt(2, 5); createAndOverwriteKey(bucket, keyName, versions, content); @@ -96,7 +96,7 @@ private void createAndOverwriteKey(OzoneBucket bucket, String key, int versions, byte[] content) throws IOException { ReplicationConfig replication = RatisReplicationConfig.getInstance(THREE); for (int i = 1; i < versions; i++) { - writeKey(bucket, key, RandomUtils.nextBytes(content.length), replication); + writeKey(bucket, key, RandomUtils.secure().randomBytes(content.length), replication); } // overwrite it writeKey(bucket, key, content, replication); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestReadRetries.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestReadRetries.java index 06cd29529eac..543acb089b3a 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestReadRetries.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestReadRetries.java @@ -79,7 +79,7 @@ void testPutKeyAndGetKeyThreeNodes() throws Exception { OzoneBucket bucket = volume.getBucket(bucketName); String keyName = "a/b/c/" + UUID.randomUUID(); - byte[] content = RandomUtils.nextBytes(128); + byte[] content = RandomUtils.secure().randomBytes(128); TestDataUtil.createKey(bucket, keyName, RatisReplicationConfig.getInstance(THREE), content); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java index be048e65b361..ef88ee1c4bc6 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java @@ -263,7 +263,7 @@ private static void runTestClientServer( Token token = blockTokenSecretManager.generateToken(blockID, - EnumSet.allOf(AccessModeProto.class), RandomUtils.nextLong()); + EnumSet.allOf(AccessModeProto.class), RandomUtils.secure().randomLong()); String encodedToken = token.encodeToUrlString(); ContainerCommandRequestProto.Builder writeChunk = diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/volume/TestDatanodeHddsVolumeFailureDetection.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/volume/TestDatanodeHddsVolumeFailureDetection.java index 7d3eaffaeced..5e2246582575 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/volume/TestDatanodeHddsVolumeFailureDetection.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/volume/TestDatanodeHddsVolumeFailureDetection.java @@ -277,7 +277,7 @@ private static MiniOzoneCluster newCluster(boolean schemaV3) private static long createKey(OzoneBucket bucket, String key) throws IOException { - byte[] bytes = RandomUtils.nextBytes(KEY_SIZE); + byte[] bytes = RandomUtils.secure().randomBytes(KEY_SIZE); RatisReplicationConfig replication = RatisReplicationConfig.getInstance(ReplicationFactor.ONE); TestDataUtil.createKey(bucket, key, replication, bytes); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithLegacyFS.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithLegacyFS.java index 490fb2f42c4c..cf28bbab41dd 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithLegacyFS.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithLegacyFS.java @@ -215,7 +215,7 @@ private OmMultipartUploadCompleteInfo uploadMPUWithDirectoryExists( String uploadID = omMultipartInfo.getUploadID(); // upload part 1. - byte[] data = generateData(128, (byte) RandomUtils.nextLong()); + byte[] data = generateData(128, (byte) RandomUtils.secure().randomLong()); OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName, data.length, 1, uploadID); ozoneOutputStream.write(data, 0, data.length); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestDeletedBlocksTxnShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestDeletedBlocksTxnShell.java index 54e9fdeff0a5..de1a0d360d29 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestDeletedBlocksTxnShell.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestDeletedBlocksTxnShell.java @@ -120,8 +120,8 @@ public void shutdown() { //> private Map> generateData(int dataSize) throws Exception { Map> blockMap = new HashMap<>(); - int continerIDBase = RandomUtils.nextInt(0, 100); - int localIDBase = RandomUtils.nextInt(0, 1000); + int continerIDBase = RandomUtils.secure().randomInt(0, 100); + int localIDBase = RandomUtils.secure().randomInt(0, 1000); for (int i = 0; i < dataSize; i++) { long containerID = continerIDBase + i; updateContainerMetadata(containerID); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java index b4c886147690..6b0b710a839e 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java @@ -472,7 +472,7 @@ private OmMultipartInfo addinitMultipartUploadToCache( metadataManager.getMultipartInfoTable().addCacheEntry( new CacheKey<>(metadataManager.getMultipartKey(volume, bucket, key, uploadID)), - CacheValue.get(RandomUtils.nextInt(), multipartKeyInfo)); + CacheValue.get(RandomUtils.secure().randomInt(), multipartKeyInfo)); return new OmMultipartInfo(volume, bucket, key, uploadID); } @@ -480,7 +480,7 @@ private void abortMultipart( String volume, String bucket, String key, String uploadID) { metadataManager.getMultipartInfoTable().addCacheEntry( new CacheKey<>(metadataManager.getMultipartKey(volume, bucket, key, - uploadID)), CacheValue.get(RandomUtils.nextInt())); + uploadID)), CacheValue.get(RandomUtils.secure().randomInt())); } @Test diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestMultipartUploadCleanupService.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestMultipartUploadCleanupService.java index 6004fea29e84..aaf3c267d902 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestMultipartUploadCleanupService.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestMultipartUploadCleanupService.java @@ -161,16 +161,16 @@ private void createIncompleteMPUKeys(int mpuKeyCount, String volume = UUID.randomUUID().toString(); String bucket = UUID.randomUUID().toString(); for (int x = 0; x < mpuKeyCount; x++) { - if (RandomUtils.nextBoolean()) { + if (RandomUtils.secure().randomBoolean()) { bucket = UUID.randomUUID().toString(); - if (RandomUtils.nextBoolean()) { + if (RandomUtils.secure().randomBoolean()) { volume = UUID.randomUUID().toString(); } } String key = UUID.randomUUID().toString(); createVolumeAndBucket(volume, bucket, bucketLayout); - final int numParts = RandomUtils.nextInt(0, 5); + final int numParts = RandomUtils.secure().randomInt(0, 5); // Create the MPU key createIncompleteMPUKey(volume, bucket, key, numParts); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestOpenKeyCleanupService.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestOpenKeyCleanupService.java index e2da9e82162e..97fa7db3c02f 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestOpenKeyCleanupService.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestOpenKeyCleanupService.java @@ -469,16 +469,16 @@ private void createOpenKeys(int keyCount, boolean hsync, String volume = UUID.randomUUID().toString(); String bucket = UUID.randomUUID().toString(); for (int x = 0; x < keyCount; x++) { - if (RandomUtils.nextBoolean()) { + if (RandomUtils.secure().randomBoolean()) { bucket = UUID.randomUUID().toString(); - if (RandomUtils.nextBoolean()) { + if (RandomUtils.secure().randomBoolean()) { volume = UUID.randomUUID().toString(); } } String key = withDir ? "dir1/dir2/" + UUID.randomUUID() : UUID.randomUUID().toString(); createVolumeAndBucket(volume, bucket, bucketLayout); - final int numBlocks = RandomUtils.nextInt(1, 3); + final int numBlocks = RandomUtils.secure().randomInt(1, 3); // Create the key createOpenKey(volume, bucket, key, numBlocks, hsync, recovery); } @@ -538,9 +538,9 @@ private void createIncompleteMPUKeys(int mpuKeyCount, String volume = UUID.randomUUID().toString(); String bucket = UUID.randomUUID().toString(); for (int x = 0; x < mpuKeyCount; x++) { - if (RandomUtils.nextBoolean()) { + if (RandomUtils.secure().randomBoolean()) { bucket = UUID.randomUUID().toString(); - if (RandomUtils.nextBoolean()) { + if (RandomUtils.secure().randomBoolean()) { volume = UUID.randomUUID().toString(); } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneTokenIdentifier.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneTokenIdentifier.java index 85b1d24bd1d3..dade773b8009 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneTokenIdentifier.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneTokenIdentifier.java @@ -105,7 +105,7 @@ public void testSignToken(@TempDir Path baseDir) throws GeneralSecurityException new Text("rm"), new Text("client")); tokenId.setOmCertSerialId("123"); LOG.info("Unsigned token {} is {}", tokenId, - verifyTokenAsymmetric(tokenId, RandomUtils.nextBytes(128), cert)); + verifyTokenAsymmetric(tokenId, RandomUtils.secure().randomBytes(128), cert)); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneNativeAuthorizer.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneNativeAuthorizer.java index 5f9a1ad44d1d..48b978064eaa 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneNativeAuthorizer.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneNativeAuthorizer.java @@ -122,7 +122,7 @@ public static Collection data() { public void createAll( String keyName, String prefixName, ACLType userRight, ACLType groupRight, boolean expectedResult) throws IOException { - int randomInt = RandomUtils.nextInt(); + int randomInt = RandomUtils.secure().randomInt(); this.vol = "vol" + randomInt; this.buck = "bucket" + randomInt; this.key = keyName + randomInt; @@ -427,7 +427,7 @@ private void resetAclsAndValidateAccess( + " name:" + (accessType == USER ? user : group)); // Randomize next type. - int type = RandomUtils.nextInt(0, 3); + int type = RandomUtils.secure().randomInt(0, 3); ACLIdentityType identityType = ACLIdentityType.values()[type]; // Add remaining acls one by one and then check access. OzoneAcl addAcl = OzoneAcl.of(identityType, diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestParentAcl.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestParentAcl.java index df5368456180..92bcc7b7a2b3 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestParentAcl.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestParentAcl.java @@ -119,7 +119,7 @@ static void setup() throws Exception { public void testKeyAcl() throws IOException { OzoneObj keyObj; - int randomInt = RandomUtils.nextInt(); + int randomInt = RandomUtils.secure().randomInt(); String vol = "vol" + randomInt; String buck = "bucket" + randomInt; String key = "key" + randomInt; @@ -165,7 +165,7 @@ public void testKeyAcl() public void testBucketAcl() throws IOException { OzoneObj bucketObj; - int randomInt = RandomUtils.nextInt(); + int randomInt = RandomUtils.secure().randomInt(); String vol = "vol" + randomInt; String buck = "bucket" + randomInt; diff --git a/hadoop-ozone/ozonefs-common/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java b/hadoop-ozone/ozonefs-common/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java index beca218a522e..86df63949b56 100644 --- a/hadoop-ozone/ozonefs-common/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java +++ b/hadoop-ozone/ozonefs-common/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java @@ -78,7 +78,7 @@ private static void testReadToByteBuffer( IntFunction bufferConstructor, int streamLength, int bufferCapacity, int bufferPosition) throws IOException { - final byte[] source = RandomUtils.nextBytes(streamLength); + final byte[] source = RandomUtils.secure().randomBytes(streamLength); final InputStream input = new ByteArrayInputStream(source); final OzoneFSInputStream subject = createTestSubject(input); diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconUtils.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconUtils.java index 07fea9581900..5cfd5f26fd47 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconUtils.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconUtils.java @@ -171,7 +171,7 @@ public void testNextClosestPowerIndexOfTwo() { } for (int i = 0; i < 10; i++) { - assertNextClosestPowerIndexOfTwo(RandomUtils.nextLong()); + assertNextClosestPowerIndexOfTwo(RandomUtils.secure().randomLong()); } } @@ -202,7 +202,7 @@ private static int oldNextClosestPowerIndexOfTwo(long dataSize) { private static ContainerInfo.Builder getDefaultContainerInfoBuilder( final HddsProtos.LifeCycleState state) { return new ContainerInfo.Builder() - .setContainerID(RandomUtils.nextLong()) + .setContainerID(RandomUtils.secure().randomLong()) .setReplicationConfig( RatisReplicationConfig .getInstance(HddsProtos.ReplicationFactor.THREE)) diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DatanodeSimulationState.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DatanodeSimulationState.java index 9dde33c5e6ab..1e3bae684d19 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DatanodeSimulationState.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DatanodeSimulationState.java @@ -152,7 +152,7 @@ private void addContainerReport(InetSocketAddress endpoint, // to avoid peaks. if (state.nextFullContainerReport == Instant.MIN) { state.nextFullContainerReport = Instant.now().plusMillis( - RandomUtils.nextLong(1, fullContainerReportDurationMs)); + RandomUtils.secure().randomLong(1, fullContainerReportDurationMs)); } else { state.nextFullContainerReport = Instant.now() .plusMillis(fullContainerReportDurationMs); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DatanodeSimulator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DatanodeSimulator.java index d1749f9a1592..b31fde4cbdf0 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DatanodeSimulator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DatanodeSimulator.java @@ -345,7 +345,7 @@ private boolean startDatanode(DatanodeSimulationState dn) long scmHeartbeatInterval = HddsServerUtil.getScmHeartbeatInterval(conf); scmClients.forEach((endpoint, client) -> { // Use random initial delay as a jitter to avoid peaks. - long initialDelay = RandomUtils.nextLong(0, scmHeartbeatInterval); + long initialDelay = RandomUtils.secure().randomLong(0, scmHeartbeatInterval); Runnable runnable = () -> heartbeat(endpoint, client, dn); heartbeatScheduler.scheduleAtFixedRate(runnable, initialDelay, scmHeartbeatInterval, TimeUnit.MILLISECONDS); @@ -353,7 +353,7 @@ private boolean startDatanode(DatanodeSimulationState dn) long reconHeartbeatInterval = HddsServerUtil.getReconHeartbeatInterval(conf); - long initialDelay = RandomUtils.nextLong(0, reconHeartbeatInterval); + long initialDelay = RandomUtils.secure().randomLong(0, reconHeartbeatInterval); Runnable runnable = () -> heartbeat(reconAddress, reconClient, dn); heartbeatScheduler.scheduleAtFixedRate(runnable, initialDelay, reconHeartbeatInterval, TimeUnit.MILLISECONDS); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OzoneClientKeyReadWriteListOps.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OzoneClientKeyReadWriteListOps.java index 602cf1ec07de..2051aeec25f0 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OzoneClientKeyReadWriteListOps.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OzoneClientKeyReadWriteListOps.java @@ -169,7 +169,7 @@ public Void call() throws Exception { timer = getMetrics().timer("key-read-write-list"); if (objectSizeInBytes >= 0) { - keyContent = RandomUtils.nextBytes(objectSizeInBytes); + keyContent = RandomUtils.secure().randomBytes(objectSizeInBytes); } if (kg == null) { kg = new KeyGeneratorUtil(); From 0caec1af1ee4a5eabb8405e5f92abaf7f1741622 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Sat, 19 Apr 2025 11:49:40 +0200 Subject: [PATCH 2/2] Apply suggestions from code review --- .../container/common/report/ContainerReportPublisher.java | 1 - .../container/common/report/PipelineReportPublisher.java | 1 - .../org/apache/hadoop/hdds/scm/TestWatchForCommit.java | 8 ++++---- 3 files changed, 4 insertions(+), 6 deletions(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ContainerReportPublisher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ContainerReportPublisher.java index 7931ed5ec790..6926543b7c20 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ContainerReportPublisher.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ContainerReportPublisher.java @@ -69,7 +69,6 @@ protected long getReportFrequency() { return containerReportInterval + getRandomReportDelay(); } - @SuppressWarnings("java:S2245") // no need for secure random private long getRandomReportDelay() { return RandomUtils.secure().randomLong(0, containerReportInterval); } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/PipelineReportPublisher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/PipelineReportPublisher.java index 3d709d6153a6..3064b74210f1 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/PipelineReportPublisher.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/PipelineReportPublisher.java @@ -59,7 +59,6 @@ protected long getReportFrequency() { return pipelineReportInterval + getRandomReportDelay(); } - @SuppressWarnings("java:S2245") // no need for secure random private long getRandomReportDelay() { return RandomUtils.secure().randomLong(0, pipelineReportInterval); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestWatchForCommit.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestWatchForCommit.java index b8d8e3ee5dd6..f52083fa21f3 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestWatchForCommit.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestWatchForCommit.java @@ -282,8 +282,8 @@ public void testWatchForCommitForRetryfailure(RaftProtos.ReplicationLevel watchT // as well as there is no logIndex generate in Ratis. // The basic idea here is just to test if its throws an exception. ExecutionException e = assertThrows(ExecutionException.class, - () -> xceiverClient.watchForCommit(index + RandomUtils.secure(). - randomInt(0, 100) + 10).get()); + () -> xceiverClient.watchForCommit(index + RandomUtils.secure().randomInt(0, 100) + 10) + .get()); // since the timeout value is quite long, the watch request will either // fail with NotReplicated exceptio, RetryFailureException or // RuntimeException @@ -383,8 +383,8 @@ public void testWatchForCommitForGroupMismatchException() throws Exception { // as well as there is no logIndex generate in Ratis. // The basic idea here is just to test if its throws an exception. final Exception e = assertThrows(Exception.class, - () -> xceiverClient.watchForCommit(reply.getLogIndex() + RandomUtils.secure(). - randomInt(0, 100) + 10).get()); + () -> xceiverClient.watchForCommit(reply.getLogIndex() + RandomUtils.secure().randomInt(0, 100) + 10) + .get()); assertInstanceOf(GroupMismatchException.class, HddsClientUtils.checkForException(e)); } finally { clientManager.releaseClient(xceiverClient, false);