Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
package org.apache.hadoop.hdds.scm.storage;

import com.google.common.primitives.Bytes;
import org.apache.commons.lang3.RandomUtils;
import org.apache.hadoop.hdds.client.BlockID;
import org.apache.hadoop.hdds.client.ContainerBlockID;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
Expand Down Expand Up @@ -50,7 +51,6 @@
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.atomic.AtomicBoolean;
Expand Down Expand Up @@ -186,9 +186,8 @@ public void testSeek() throws Exception {
assertThrows(EOFException.class, () -> seekAndVerify(finalPos));

// Seek to random positions between 0 and the block size.
Random random = new Random();
for (int i = 0; i < 10; i++) {
pos = random.nextInt(blockSize);
pos = RandomUtils.nextInt(0, blockSize);
seekAndVerify(pos);
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
package org.apache.hadoop.ozone.client.io;

import com.google.common.collect.ImmutableSet;
import org.apache.commons.lang3.RandomUtils;
import org.apache.hadoop.hdds.client.ECReplicationConfig;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.MockDatanodeDetails;
Expand All @@ -41,7 +42,6 @@
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.Set;
import java.util.SplittableRandom;
import java.util.concurrent.ExecutorService;
Expand Down Expand Up @@ -645,7 +645,7 @@ public void testSeekToPartialOffsetFails() {
}

private Integer getRandomStreamIndex(Set<Integer> set) {
return set.stream().skip(new Random().nextInt(set.size()))
return set.stream().skip(RandomUtils.nextInt(0, set.size()))
.findFirst().orElse(null);
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@
import org.junit.jupiter.api.Test;

import java.nio.charset.StandardCharsets;
import java.util.Random;
import org.apache.commons.lang3.RandomUtils;
import java.util.zip.Checksum;

import static org.junit.jupiter.api.Assertions.assertEquals;
Expand Down Expand Up @@ -59,11 +59,9 @@ void testCorrectness() {

checkBytes("hello world!".getBytes(StandardCharsets.UTF_8));

final Random random = new Random();
final byte[] bytes = new byte[1 << 10];
final int len = 1 << 10;
for (int i = 0; i < 1000; i++) {
random.nextBytes(bytes);
checkBytes(bytes, random.nextInt(bytes.length));
checkBytes(RandomUtils.nextBytes(len), RandomUtils.nextInt(0, len));
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
*/
package org.apache.hadoop.hdds.server.http;

import org.apache.commons.lang3.RandomUtils;
import static org.apache.hadoop.hdds.server.http.HttpServer2Metrics.HttpServer2MetricsInfo.HttpServerIdleThreadCount;
import static org.apache.hadoop.hdds.server.http.HttpServer2Metrics.HttpServer2MetricsInfo.HttpServerMaxThreadCount;
import static org.apache.hadoop.hdds.server.http.HttpServer2Metrics.HttpServer2MetricsInfo.HttpServerThreadCount;
Expand All @@ -36,8 +37,6 @@
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;

import java.util.Random;

/**
* Testing HttpServer2Metrics.
*/
Expand All @@ -57,11 +56,10 @@ public void setup() {
@Test
public void testMetrics() {
// crate mock metrics
Random random = new Random();
int threadCount = random.nextInt();
int maxThreadCount = random.nextInt();
int idleThreadCount = random.nextInt();
int threadQueueWaitingTaskCount = random.nextInt();
int threadCount = RandomUtils.nextInt();
int maxThreadCount = RandomUtils.nextInt();
int idleThreadCount = RandomUtils.nextInt();
int threadQueueWaitingTaskCount = RandomUtils.nextInt();
String name = "s3g";

when(threadPool.getThreads()).thenReturn(threadCount);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
*/
package org.apache.hadoop.hdds.scm.block;

import org.apache.commons.lang3.RandomUtils;
import org.apache.commons.io.FileUtils;
import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.client.RatisReplicationConfig;
Expand Down Expand Up @@ -70,7 +71,6 @@
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Random;
import java.util.Set;
import java.util.UUID;
import java.util.concurrent.TimeoutException;
Expand Down Expand Up @@ -218,9 +218,8 @@ private Map<Long, List<Long>> generateData(int dataSize) throws IOException {
private Map<Long, List<Long>> generateData(int dataSize,
HddsProtos.LifeCycleState state) throws IOException {
Map<Long, List<Long>> blockMap = new HashMap<>();
Random random = new Random(1);
int continerIDBase = random.nextInt(100);
int localIDBase = random.nextInt(1000);
int continerIDBase = RandomUtils.nextInt(0, 100);
int localIDBase = RandomUtils.nextInt(0, 1000);
for (int i = 0; i < dataSize; i++) {
long containerID = continerIDBase + i;
updateContainerMetadata(containerID, state);
Expand Down Expand Up @@ -692,13 +691,12 @@ public void testInadequateReplicaCommit() throws Exception {
@Test
public void testRandomOperateTransactions() throws Exception {
mockContainerHealthResult(true);
Random random = new Random();
int added = 0, committed = 0;
List<DeletedBlocksTransaction> blocks = new ArrayList<>();
List<Long> txIDs;
// Randomly add/get/commit/increase transactions.
for (int i = 0; i < 100; i++) {
int state = random.nextInt(4);
int state = RandomUtils.nextInt(0, 4);
if (state == 0) {
addTransactions(generateData(10), true);
added += 10;
Expand Down Expand Up @@ -803,8 +801,7 @@ public void testDeletedBlockTransactions()
// add two transactions for same container
containerID = blocks.get(0).getContainerID();
Map<Long, List<Long>> deletedBlocksMap = new HashMap<>();
Random random = new Random();
long localId = random.nextLong();
long localId = RandomUtils.nextLong();
deletedBlocksMap.put(containerID, new LinkedList<>(
Collections.singletonList(localId)));
addTransactions(deletedBlocksMap, true);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,9 +19,11 @@
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Random;
import java.util.stream.IntStream;

import org.apache.commons.lang3.RandomUtils;
import org.apache.commons.lang3.StringUtils;

import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.conf.StorageUnit;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
Expand All @@ -48,8 +50,6 @@
import org.junit.jupiter.params.provider.MethodSource;
import org.junit.jupiter.params.provider.ValueSource;

import org.apache.commons.lang3.StringUtils;

import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.DECOMMISSIONED;
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.IN_SERVICE;
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.HEALTHY;
Expand Down Expand Up @@ -625,7 +625,7 @@ public void testOutOfServiceNodesNotSelected(int datanodeCount) {

for (int i = 0; i < 10; i++) {
// Set a random DN to in_service and ensure it is always picked
int index = new Random().nextInt(dnInfos.size());
int index = RandomUtils.nextInt(0, dnInfos.size());
dnInfos.get(index).setNodeStatus(NodeStatus.inServiceHealthy());
try {
List<DatanodeDetails> datanodeDetails =
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
*/
package org.apache.hadoop.ozone.client.checksum;

import org.apache.commons.lang3.RandomUtils;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.io.MD5Hash;
import org.apache.hadoop.util.DataChecksum;
Expand All @@ -27,7 +28,6 @@
import java.nio.ByteBuffer;
import java.util.Collections;
import java.util.List;
import java.util.Random;

import static org.apache.hadoop.hdds.scm.OzoneClientConfig.ChecksumCombineMode.COMPOSITE_CRC;
import static org.apache.hadoop.hdds.scm.OzoneClientConfig.ChecksumCombineMode.MD5MD5CRC;
Expand All @@ -40,9 +40,8 @@ public class TestReplicatedBlockChecksumComputer {
@Test
public void testComputeMd5Crc() throws IOException {
final int lenOfBytes = 32;
byte[] randomChunkChecksum = new byte[lenOfBytes];
Random r = new Random();
r.nextBytes(randomChunkChecksum);
byte[] randomChunkChecksum = RandomUtils.nextBytes(lenOfBytes);

MD5Hash emptyBlockMD5 = MD5Hash.digest(randomChunkChecksum);
byte[] emptyBlockMD5Hash = emptyBlockMD5.getDigest();
AbstractBlockChecksumComputer computer =
Expand All @@ -56,9 +55,7 @@ public void testComputeMd5Crc() throws IOException {
@Test
public void testComputeCompositeCrc() throws IOException {
final int lenOfBytes = 32;
byte[] randomChunkChecksum = new byte[lenOfBytes];
Random r = new Random();
r.nextBytes(randomChunkChecksum);
byte[] randomChunkChecksum = RandomUtils.nextBytes(lenOfBytes);

CrcComposer crcComposer =
CrcComposer.newCrcComposer(DataChecksum.Type.CRC32C, 4);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@

package org.apache.hadoop.fs.contract;

import org.apache.commons.lang3.RandomUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.FSDataInputStream;
Expand All @@ -31,7 +32,6 @@

import java.io.EOFException;
import java.io.IOException;
import java.util.Random;

import static org.apache.hadoop.fs.contract.ContractTestUtils.createFile;
import static org.apache.hadoop.fs.contract.ContractTestUtils.dataset;
Expand Down Expand Up @@ -341,15 +341,14 @@ public void testRandomSeeks() throws Throwable {
byte[] buf = dataset(filesize, 0, 255);
Path randomSeekFile = path("testrandomseeks.bin");
createFile(getFileSystem(), randomSeekFile, true, buf);
Random r = new Random();

// Record the sequence of seeks and reads which trigger a failure.
int[] seeks = new int[10];
int[] reads = new int[10];
try (FSDataInputStream stm = getFileSystem().open(randomSeekFile)) {
for (int i = 0; i < limit; i++) {
int seekOff = r.nextInt(buf.length);
int toRead = r.nextInt(Math.min(buf.length - seekOff, 32000));
int seekOff = RandomUtils.nextInt(0, buf.length);
int toRead = RandomUtils.nextInt(0, Math.min(buf.length - seekOff, 32000));

seeks[i % seeks.length] = seekOff;
reads[i % reads.length] = toRead;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,6 @@
import java.util.LinkedList;
import java.util.List;
import java.util.Optional;
import java.util.Random;
import java.util.Set;
import java.util.TreeSet;
import java.util.UUID;
Expand Down Expand Up @@ -2000,7 +1999,7 @@ private void checkInvalidPath(Path path) {

@Test
void testRenameFile() throws Exception {
final String dir = "/dir" + new Random().nextInt(1000);
final String dir = "/dir" + RandomUtils.nextInt(0, 1000);
Path dirPath = new Path(getBucketPath() + dir);
Path file1Source = new Path(getBucketPath() + dir
+ "/file1_Copy");
Expand All @@ -2026,7 +2025,7 @@ void testRenameFile() throws Exception {
*/
@Test
void testRenameFileToDir() throws Exception {
final String dir = "/dir" + new Random().nextInt(1000);
final String dir = "/dir" + RandomUtils.nextInt(0, 1000);
Path dirPath = new Path(getBucketPath() + dir);
getFs().mkdirs(dirPath);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
*/
package org.apache.hadoop.hdds.scm.pipeline;

import org.apache.commons.lang3.RandomUtils;
import org.apache.hadoop.hdds.client.RatisReplicationConfig;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
Expand All @@ -34,7 +35,6 @@
import java.util.concurrent.TimeUnit;
import java.util.HashMap;
import java.util.Map;
import java.util.Random;
import java.util.UUID;

import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_AUTO_CREATE_FACTOR_ONE;
Expand Down Expand Up @@ -172,7 +172,6 @@ public void testMinLeaderCountChoosePolicy() throws Exception {
// each datanode has leaderNumOfEachDn leaders after balance
checkLeaderBalance(dnNum, leaderNumOfEachDn);

Random r = new Random(0);
for (int i = 0; i < 10; i++) {
// destroy some pipelines, wait new pipelines created,
// then check leader balance
Expand All @@ -181,7 +180,7 @@ public void testMinLeaderCountChoosePolicy() throws Exception {
.getPipelines(RatisReplicationConfig.getInstance(
ReplicationFactor.THREE), Pipeline.PipelineState.OPEN);

int destroyNum = r.nextInt(pipelines.size());
int destroyNum = RandomUtils.nextInt(0, pipelines.size());
for (int k = 0; k <= destroyNum; k++) {
pipelineManager.closePipeline(pipelines.get(k), false);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
*/
package org.apache.hadoop.ozone;

import org.apache.commons.lang3.RandomUtils;
import org.apache.hadoop.hdds.conf.DefaultConfigManager;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.utils.IOUtils;
Expand All @@ -41,7 +42,6 @@
import javax.ws.rs.core.UriInfo;
import java.io.IOException;
import java.io.ByteArrayInputStream;
import java.security.SecureRandom;
import java.util.ArrayList;
import java.util.UUID;
import java.util.List;
Expand Down Expand Up @@ -217,8 +217,7 @@ public void testMultipart() throws Exception {

private static String generateRandomContent(int sizeInMB) {
int bytesToGenerate = sizeInMB * 1024 * 1024;
byte[] randomBytes = new byte[bytesToGenerate];
new SecureRandom().nextBytes(randomBytes);
byte[] randomBytes = RandomUtils.nextBytes(bytesToGenerate);
return Base64.getEncoder().encodeToString(randomBytes);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@
import java.time.Duration;
import java.util.ArrayList;
import java.util.List;
import java.util.Random;
import org.apache.commons.lang3.RandomUtils;
import java.util.UUID;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
Expand Down Expand Up @@ -280,7 +280,7 @@ public void testWatchForCommitForRetryfailure() throws Exception {
// as well as there is no logIndex generate in Ratis.
// The basic idea here is just to test if its throws an exception.
xceiverClient
.watchForCommit(index + new Random().nextInt(100) + 10);
.watchForCommit(index + RandomUtils.nextInt(0, 100) + 10);
fail("expected exception not thrown");
} catch (Exception e) {
assertInstanceOf(ExecutionException.class, e);
Expand Down Expand Up @@ -374,7 +374,7 @@ public void testWatchForCommitForGroupMismatchException() throws Exception {
// The basic idea here is just to test if its throws an exception.
xceiverClient
.watchForCommit(reply.getLogIndex() +
new Random().nextInt(100) + 10);
RandomUtils.nextInt(0, 100) + 10);
fail("Expected exception not thrown");
} catch (Exception e) {
assertInstanceOf(GroupMismatchException.class, HddsClientUtils.checkForException(e));
Expand Down
Loading