diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java index 77dd13d9ad72..cd1b00daa459 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java @@ -3031,12 +3031,26 @@ static TestOptions calculateRowsAndSize(final TestOptions opts) { && (opts.getCmdName().equals(RANDOM_READ) || opts.getCmdName().equals(RANDOM_SEEK_SCAN))) && opts.size != DEFAULT_OPTS.size && opts.perClientRunRows != DEFAULT_OPTS.perClientRunRows) { + //if rowsPerGB * opts.size exceed the max of int, throw RuntimeException + if(Integer.MAX_VALUE / rowsPerGB < opts.size) { + throw new RuntimeException("totalRows is larger than maximum of int"); + } + opts.totalRows = (int) opts.size * rowsPerGB; } else if (opts.size != DEFAULT_OPTS.size) { // total size in GB specified + if(Integer.MAX_VALUE / rowsPerGB < opts.size) { + throw new RuntimeException("totalRows is larger than maximum of int"); + } + opts.totalRows = (int) opts.size * rowsPerGB; opts.perClientRunRows = opts.totalRows / opts.numClientThreads; } else { + //if opts.numClientThreads * opts.perClientRunRows exceed the max of int, throw RuntimeException + if(Integer.MAX_VALUE / opts.numClientThreads < opts.perClientRunRows) { + throw new RuntimeException("totalRows is larger than maximum of int"); + } + opts.totalRows = opts.perClientRunRows * opts.numClientThreads; opts.size = opts.totalRows / rowsPerGB; }