diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDU.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDU.java index 39ef852319b..781102b06fa 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDU.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDU.java @@ -25,7 +25,7 @@ import java.io.File; import java.io.IOException; import java.io.RandomAccessFile; -import java.util.Random; +import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.util.Shell; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -47,9 +47,7 @@ void setUp() { static void createFile(File newFile, int size) throws IOException { // write random data so that filesystems with compression enabled (e.g. ZFS) // can't compress the file - Random random = new Random(); - byte[] data = new byte[size]; - random.nextBytes(data); + byte[] data = RandomUtils.secure().randomBytes(size); assumeTrue(newFile.createNewFile()); RandomAccessFile file = new RandomAccessFile(newFile, "rws"); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestKeyValueStreamDataChannel.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestKeyValueStreamDataChannel.java index 17a2a882b4d..bc5448e85a5 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestKeyValueStreamDataChannel.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestKeyValueStreamDataChannel.java @@ -32,12 +32,12 @@ import java.util.ArrayList; import java.util.Collection; import java.util.List; -import java.util.Random; import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletionException; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.ThreadLocalRandom; +import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.BlockData; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.DatanodeBlockID; @@ -188,15 +188,13 @@ static void runTestBuffers(int dataSize, int max, int seed, String name) assertThat(max).isGreaterThanOrEqualTo(PUT_BLOCK_PROTO_SIZE); // random data - final byte[] data = new byte[dataSize]; - final Random random = new Random(seed); - random.nextBytes(data); + final byte[] data = RandomUtils.secure().randomBytes(dataSize); // write output final Buffers buffers = new Buffers(max); final Output out = new Output(buffers); for (int offset = 0; offset < dataSize;) { - final int randomLength = random.nextInt(4 * max); + final int randomLength = RandomUtils.secure().randomInt(1, 4 * max); final int length = Math.min(randomLength, dataSize - offset); LOG.info("{}: offset = {}, length = {}", name, offset, length); final ByteBuffer b = ByteBuffer.wrap(data, offset, length); diff --git a/hadoop-hdds/erasurecode/src/test/java/org/apache/ozone/erasurecode/TestCoderBase.java b/hadoop-hdds/erasurecode/src/test/java/org/apache/ozone/erasurecode/TestCoderBase.java index 85d97f535c3..15c74287acd 100644 --- a/hadoop-hdds/erasurecode/src/test/java/org/apache/ozone/erasurecode/TestCoderBase.java +++ b/hadoop-hdds/erasurecode/src/test/java/org/apache/ozone/erasurecode/TestCoderBase.java @@ -21,17 +21,15 @@ import java.nio.ByteBuffer; import java.util.Arrays; -import java.util.Random; +import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; - /** * Test base of common utilities for tests not only raw coders but also block * coders. */ @SuppressWarnings({"checkstyle:VisibilityModifier", "checkstyle:HiddenField"}) public abstract class TestCoderBase { - protected static final Random RAND = new Random(); private static int fixedDataGenerator = 0; protected boolean allowDump = true; protected int numDataUnits; @@ -406,14 +404,14 @@ protected ECChunk generateDataChunk() { */ protected void fillDummyData(ByteBuffer buffer, int len) { byte[] dummy = new byte[len]; - RAND.nextBytes(dummy); + dummy = RandomUtils.secure().randomBytes(dummy.length); buffer.put(dummy); } protected byte[] generateData(int len) { byte[] buffer = new byte[len]; for (int i = 0; i < buffer.length; i++) { - buffer[i] = (byte) RAND.nextInt(256); + buffer[i] = (byte) RandomUtils.secure().randomInt(0, 256); } return buffer; } @@ -512,7 +510,7 @@ protected void dumpChunks(String header, ECChunk[] chunks) { * Make some chunk messy or not correct any more. */ protected void corruptSomeChunk(ECChunk[] chunks) { - int idx = new Random().nextInt(chunks.length); + int idx = RandomUtils.secure().randomInt(1, chunks.length); ByteBuffer buffer = chunks[idx].getBuffer(); if (buffer.hasRemaining()) { buffer.position(buffer.position() + 1); diff --git a/hadoop-hdds/erasurecode/src/test/java/org/apache/ozone/erasurecode/rawcoder/RawErasureCoderBenchmark.java b/hadoop-hdds/erasurecode/src/test/java/org/apache/ozone/erasurecode/rawcoder/RawErasureCoderBenchmark.java index a2bee3b0ca0..ef9b59325dd 100644 --- a/hadoop-hdds/erasurecode/src/test/java/org/apache/ozone/erasurecode/rawcoder/RawErasureCoderBenchmark.java +++ b/hadoop-hdds/erasurecode/src/test/java/org/apache/ozone/erasurecode/rawcoder/RawErasureCoderBenchmark.java @@ -25,12 +25,12 @@ import java.util.Arrays; import java.util.Collections; import java.util.List; -import java.util.Random; import java.util.concurrent.Callable; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; +import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.util.StopWatch; @@ -287,10 +287,8 @@ private static void printThreadStatistics( } private static ByteBuffer genTestData(boolean useDirectBuffer, int sizeKB) { - Random random = new Random(); int bufferSize = sizeKB * 1024; - byte[] tmp = new byte[bufferSize]; - random.nextBytes(tmp); + byte[] tmp = RandomUtils.secure().randomBytes(bufferSize); ByteBuffer data = useDirectBuffer ? ByteBuffer.allocateDirect(bufferSize) : ByteBuffer.allocate(bufferSize); diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/PayloadUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/PayloadUtils.java index 9ec5a45671e..10bd0e3b803 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/PayloadUtils.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/PayloadUtils.java @@ -18,21 +18,16 @@ package org.apache.hadoop.ozone.util; import com.google.protobuf.Proto2Utils; -import java.util.Random; +import org.apache.commons.lang3.RandomUtils; import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; import org.apache.ratis.thirdparty.com.google.protobuf.UnsafeByteOperations; import org.apache.ratis.util.Preconditions; - /** * Utility class for payload operations. */ public final class PayloadUtils { private static final int MAX_SIZE = 2097151 * 1024; - private static final byte[] SEED = new byte[1024]; - - static { - new Random().nextBytes(SEED); - } + private static final byte[] SEED = RandomUtils.secure().randomBytes(1024); private PayloadUtils() { } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestECKeyOutputStream.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestECKeyOutputStream.java index 73188e8b970..2bc5947a700 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestECKeyOutputStream.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestECKeyOutputStream.java @@ -34,12 +34,12 @@ import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.Random; import java.util.UUID; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicReference; import org.apache.commons.lang3.NotImplementedException; +import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.client.DefaultReplicationConfig; import org.apache.hadoop.hdds.client.ECReplicationConfig; @@ -219,8 +219,7 @@ public void testECKeyCreatetWithDatanodeIdChange() List locationInfoList = groupOutputStream.getLocationInfoList(); while (locationInfoList.isEmpty()) { locationInfoList = groupOutputStream.getLocationInfoList(); - Random random = new Random(); - random.nextBytes(b); + b = RandomUtils.secure().randomBytes(b.length); assertInstanceOf(ECKeyOutputStream.class, key.getOutputStream()); key.write(b); key.flush(); @@ -247,8 +246,7 @@ public void testECKeyCreatetWithDatanodeIdChange() locationInfoList = groupOutputStream.getLocationInfoList(); while (locationInfoList.size() == 1) { locationInfoList = groupOutputStream.getLocationInfoList(); - Random random = new Random(); - random.nextBytes(b); + b = RandomUtils.secure().randomBytes(b.length); assertInstanceOf(ECKeyOutputStream.class, key.getOutputStream()); key.write(b); key.flush(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java index ff627664d69..a70d4eb6753 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java @@ -60,6 +60,7 @@ import java.util.TreeMap; import java.util.UUID; import javax.xml.bind.DatatypeConverter; +import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.crypto.key.KeyProvider; import org.apache.hadoop.crypto.key.kms.KMSClientProvider; import org.apache.hadoop.crypto.key.kms.server.MiniKMS; @@ -651,9 +652,8 @@ private void testMultipartUploadWithEncryption(OzoneBucket bucket, // Read different data lengths and starting from different offsets and // verify the data matches. - Random random = new Random(); - int randomSize = random.nextInt(keySize / 2); - int randomOffset = random.nextInt(keySize - randomSize); + int randomSize = RandomUtils.secure().randomInt(0, keySize / 2); + int randomOffset = RandomUtils.secure().randomInt(0, keySize - randomSize); int[] readDataSizes = {keySize, keySize / 3 + 1, BLOCK_SIZE, BLOCK_SIZE * 2 + 1, CHUNK_SIZE, CHUNK_SIZE / 4 - 1, diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestKeyInputStream.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestKeyInputStream.java index 59cdbefa80a..6daa0235a6c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestKeyInputStream.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestKeyInputStream.java @@ -30,7 +30,7 @@ import java.nio.ByteBuffer; import java.util.Arrays; import java.util.List; -import java.util.Random; +import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.protocol.DatanodeDetails; @@ -96,9 +96,8 @@ private void randomSeek(TestBucket bucket, int dataLength, private void randomPositionSeek(TestBucket bucket, int dataLength, KeyInputStream keyInputStream, byte[] inputData, int readSize) throws Exception { - Random rand = new Random(); for (int i = 0; i < 100; i++) { - int position = rand.nextInt(dataLength - readSize); + int position = RandomUtils.secure().randomInt(0, dataLength - readSize); validate(bucket, keyInputStream, inputData, position, readSize); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestContainerReplication.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestContainerReplication.java index 7c70281315f..a75e122531c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestContainerReplication.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestContainerReplication.java @@ -46,12 +46,12 @@ import java.util.LinkedList; import java.util.List; import java.util.Map; -import java.util.Random; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.TimeUnit; import java.util.function.Supplier; import java.util.stream.Collectors; +import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -100,7 +100,6 @@ class TestContainerReplication { private static final String VOLUME = "vol1"; private static final String BUCKET = "bucket1"; private static final String KEY = "key1"; - private static final List> POLICIES = asList( SCMContainerPlacementCapacity.class, SCMContainerPlacementRackAware.class, @@ -207,8 +206,7 @@ private byte[] createTestData(OzoneClient client, int size) throws IOException { try (OutputStream out = bucket.createKey(KEY, 0, new ECReplicationConfig("RS-3-2-1k"), new HashMap<>())) { byte[] b = new byte[size]; - Random random = new Random(); - random.nextBytes(b); + b = RandomUtils.secure().randomBytes(b.length); out.write(b); return b; } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/s3/awssdk/v1/AbstractS3SDKV1Tests.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/s3/awssdk/v1/AbstractS3SDKV1Tests.java index f088e206381..4d536640af6 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/s3/awssdk/v1/AbstractS3SDKV1Tests.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/s3/awssdk/v1/AbstractS3SDKV1Tests.java @@ -86,11 +86,11 @@ import java.util.List; import java.util.Locale; import java.util.Map; -import java.util.Random; import java.util.Set; import java.util.stream.Collectors; import javax.xml.bind.DatatypeConverter; import org.apache.commons.lang3.RandomStringUtils; +import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.hdds.client.OzoneQuota; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationFactor; @@ -1064,9 +1064,8 @@ private static byte[] calculateDigest(InputStream inputStream, int skip, int len private static void createFile(File newFile, int size) throws IOException { // write random data so that filesystems with compression enabled (e.g. ZFS) // can't compress the file - Random random = new Random(); byte[] data = new byte[size]; - random.nextBytes(data); + data = RandomUtils.secure().randomBytes(data.length); RandomAccessFile file = new RandomAccessFile(newFile, "rws");