result =
+ checksumRange.getData().thenCombineAsync(dataRange.getData(),
+ (sumBuffer, dataBuffer) ->
+ checkBytes(sumBuffer, checksumRange.getOffset(),
+ dataBuffer, dataRange.getOffset(), bytesPerSum, file));
+ // Now, slice the read data range to the user's ranges
+ for(FileRange original: ((CombinedFileRange) dataRange).getUnderlying()) {
+ original.setData(result.thenApply(
+ (b) -> VectoredReadUtils.sliceTo(b, dataRange.getOffset(), original)));
+ }
+ }
+ }
+ }
+
+ @Override
+ public boolean hasCapability(String capability) {
+ return datas.hasCapability(capability);
+ }
}
-
+
private static class FSDataBoundedInputStream extends FSDataInputStream {
private FileSystem fs;
private Path file;
@@ -317,12 +452,12 @@ private static class FSDataBoundedInputStream extends FSDataInputStream {
this.fs = fs;
this.file = file;
}
-
+
@Override
public boolean markSupported() {
return false;
}
-
+
/* Return the file length */
private long getFileLength() throws IOException {
if( fileLen==-1L ) {
@@ -330,7 +465,7 @@ private long getFileLength() throws IOException {
}
return fileLen;
}
-
+
/**
* Skips over and discards n bytes of data from the
* input stream.
@@ -354,11 +489,11 @@ public synchronized long skip(long n) throws IOException {
}
return super.skip(n);
}
-
+
/**
* Seek to the given position in the stream.
* The next read() will be from that position.
- *
+ *
* This method does not allow seek past the end of the file.
* This produces IOException.
*
@@ -424,22 +559,22 @@ public void concat(final Path f, final Path[] psrcs) throws IOException {
*/
public static long getChecksumLength(long size, int bytesPerSum) {
//the checksum length is equal to size passed divided by bytesPerSum +
- //bytes written in the beginning of the checksum file.
- return ((size + bytesPerSum - 1) / bytesPerSum) * 4 +
- CHECKSUM_VERSION.length + 4;
+ //bytes written in the beginning of the checksum file.
+ return ((size + bytesPerSum - 1) / bytesPerSum) * FSInputChecker.CHECKSUM_SIZE +
+ ChecksumFSInputChecker.HEADER_LENGTH;
}
/** This class provides an output stream for a checksummed file.
* It generates checksums for data. */
private static class ChecksumFSOutputSummer extends FSOutputSummer
implements IOStatisticsSource, StreamCapabilities {
- private FSDataOutputStream datas;
+ private FSDataOutputStream datas;
private FSDataOutputStream sums;
private static final float CHKSUM_AS_FRACTION = 0.01f;
private boolean isClosed = false;
-
- public ChecksumFSOutputSummer(ChecksumFileSystem fs,
- Path file,
+
+ ChecksumFSOutputSummer(ChecksumFileSystem fs,
+ Path file,
boolean overwrite,
int bufferSize,
short replication,
@@ -460,7 +595,7 @@ public ChecksumFSOutputSummer(ChecksumFileSystem fs,
sums.write(CHECKSUM_VERSION, 0, CHECKSUM_VERSION.length);
sums.writeInt(bytesPerSum);
}
-
+
@Override
public void close() throws IOException {
try {
@@ -471,7 +606,7 @@ public void close() throws IOException {
isClosed = true;
}
}
-
+
@Override
protected void writeChunk(byte[] b, int offset, int len, byte[] checksum,
int ckoff, int cklen)
@@ -727,7 +862,7 @@ public boolean rename(Path src, Path dst) throws IOException {
value = fs.rename(srcCheckFile, dstCheckFile);
} else if (fs.exists(dstCheckFile)) {
// no src checksum, so remove dst checksum
- value = fs.delete(dstCheckFile, true);
+ value = fs.delete(dstCheckFile, true);
}
return value;
@@ -759,7 +894,7 @@ public boolean delete(Path f, boolean recursive) throws IOException{
return fs.delete(f, true);
}
}
-
+
final private static PathFilter DEFAULT_FILTER = new PathFilter() {
@Override
public boolean accept(Path file) {
@@ -770,7 +905,7 @@ public boolean accept(Path file) {
/**
* List the statuses of the files/directories in the given path if the path is
* a directory.
- *
+ *
* @param f
* given path
* @return the statuses of the files/directories in the given path
@@ -791,7 +926,7 @@ public RemoteIterator listStatusIterator(final Path p)
/**
* List the statuses of the files/directories in the given path if the path is
* a directory.
- *
+ *
* @param f
* given path
* @return the statuses of the files/directories in the given patch
@@ -802,7 +937,7 @@ public RemoteIterator listLocatedStatus(Path f)
throws IOException {
return fs.listLocatedStatus(f, DEFAULT_FILTER);
}
-
+
@Override
public boolean mkdirs(Path f) throws IOException {
return fs.mkdirs(f);
@@ -856,7 +991,7 @@ public void copyToLocalFile(Path src, Path dst, boolean copyCrc)
} else {
FileStatus[] srcs = listStatus(src);
for (FileStatus srcFile : srcs) {
- copyToLocalFile(srcFile.getPath(),
+ copyToLocalFile(srcFile.getPath(),
new Path(dst, srcFile.getPath().getName()), copyCrc);
}
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
index 5225236509294..7a458e8f3fccd 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
@@ -1054,5 +1054,13 @@ public class CommonConfigurationKeysPublic {
public static final String HADOOP_HTTP_IDLE_TIMEOUT_MS_KEY =
"hadoop.http.idle_timeout.ms";
public static final int HADOOP_HTTP_IDLE_TIMEOUT_MS_DEFAULT = 60000;
+
+ /**
+ * To configure scheduling of server metrics update thread. This config is used to indicate
+ * initial delay and delay between each execution of the metric update runnable thread.
+ */
+ public static final String IPC_SERVER_METRICS_UPDATE_RUNNER_INTERVAL =
+ "ipc.server.metrics.update.runner.interval";
+ public static final int IPC_SERVER_METRICS_UPDATE_RUNNER_INTERVAL_DEFAULT = 5000;
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataInputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataInputStream.java
index b143a4cb63d19..52644402ca459 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataInputStream.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataInputStream.java
@@ -1,4 +1,4 @@
-/**
+/*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
@@ -26,6 +26,8 @@
import java.io.InputStream;
import java.nio.ByteBuffer;
import java.util.EnumSet;
+import java.util.List;
+import java.util.function.IntFunction;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
@@ -51,7 +53,7 @@ public class FSDataInputStream extends DataInputStream
*/
private final IdentityHashStore
extendedReadBuffers
- = new IdentityHashStore(0);
+ = new IdentityHashStore<>(0);
public FSDataInputStream(InputStream in) {
super(in);
@@ -279,4 +281,20 @@ public void readFully(long position, ByteBuffer buf) throws IOException {
public IOStatistics getIOStatistics() {
return IOStatisticsSupport.retrieveIOStatistics(in);
}
+
+ @Override
+ public int minSeekForVectorReads() {
+ return ((PositionedReadable) in).minSeekForVectorReads();
+ }
+
+ @Override
+ public int maxReadSizeForVectorReads() {
+ return ((PositionedReadable) in).maxReadSizeForVectorReads();
+ }
+
+ @Override
+ public void readVectored(List extends FileRange> ranges,
+ IntFunction allocate) throws IOException {
+ ((PositionedReadable) in).readVectored(ranges, allocate);
+ }
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
index 298570bb55fe8..22ac2ecbd7949 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
@@ -2372,8 +2372,7 @@ public FileStatus next(final AbstractFileSystem fs, final Path p)
Set resolveAbstractFileSystems(final Path f)
throws IOException {
final Path absF = fixRelativePart(f);
- final HashSet result
- = new HashSet();
+ final HashSet result = new HashSet<>();
new FSLinkResolver() {
@Override
public Void next(final AbstractFileSystem fs, final Path p)
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileRange.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileRange.java
new file mode 100644
index 0000000000000..e55696e96507e
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileRange.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs;
+
+import java.nio.ByteBuffer;
+import java.util.concurrent.CompletableFuture;
+
+import org.apache.hadoop.fs.impl.FileRangeImpl;
+
+/**
+ * A byte range of a file.
+ * This is used for the asynchronous gather read API of
+ * {@link PositionedReadable#readVectored}.
+ */
+public interface FileRange {
+
+ /**
+ * Get the starting offset of the range.
+ * @return the byte offset of the start
+ */
+ long getOffset();
+
+ /**
+ * Get the length of the range.
+ * @return the number of bytes in the range.
+ */
+ int getLength();
+
+ /**
+ * Get the future data for this range.
+ * @return the future for the {@link ByteBuffer} that contains the data
+ */
+ CompletableFuture getData();
+
+ /**
+ * Set a future for this range's data.
+ * This method is called by {@link PositionedReadable#readVectored} to store the
+ * data for the user to pick up later via {@link #getData}.
+ * @param data the future of the ByteBuffer that will have the data
+ */
+ void setData(CompletableFuture data);
+
+ /**
+ * Factory method to create a FileRange object.
+ * @param offset starting offset of the range.
+ * @param length length of the range.
+ * @return a new instance of FileRangeImpl.
+ */
+ static FileRange createFileRange(long offset, int length) {
+ return new FileRangeImpl(offset, length);
+ }
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/PositionedReadable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/PositionedReadable.java
index 6744d17a72666..de76090512705 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/PositionedReadable.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/PositionedReadable.java
@@ -1,4 +1,4 @@
-/**
+/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@@ -17,7 +17,11 @@
*/
package org.apache.hadoop.fs;
-import java.io.*;
+import java.io.EOFException;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.List;
+import java.util.function.IntFunction;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
@@ -85,4 +89,37 @@ void readFully(long position, byte[] buffer, int offset, int length)
* the read operation completed
*/
void readFully(long position, byte[] buffer) throws IOException;
+
+ /**
+ * What is the smallest reasonable seek?
+ * @return the minimum number of bytes
+ */
+ default int minSeekForVectorReads() {
+ return 4 * 1024;
+ }
+
+ /**
+ * What is the largest size that we should group ranges together as?
+ * @return the number of bytes to read at once
+ */
+ default int maxReadSizeForVectorReads() {
+ return 1024 * 1024;
+ }
+
+ /**
+ * Read fully a list of file ranges asynchronously from this file.
+ * The default iterates through the ranges to read each synchronously, but
+ * the intent is that FSDataInputStream subclasses can make more efficient
+ * readers.
+ * As a result of the call, each range will have FileRange.setData(CompletableFuture)
+ * called with a future that when complete will have a ByteBuffer with the
+ * data from the file's range.
+ * @param ranges the byte ranges to read
+ * @param allocate the function to allocate ByteBuffer
+ * @throws IOException any IOE.
+ */
+ default void readVectored(List extends FileRange> ranges,
+ IntFunction allocate) throws IOException {
+ VectoredReadUtils.readVectored(this, ranges, allocate);
+ }
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
index 468b37a885d23..f525c3cba78fe 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
@@ -1,4 +1,4 @@
-/**
+/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@@ -33,8 +33,11 @@
import java.io.FileDescriptor;
import java.net.URI;
import java.nio.ByteBuffer;
+import java.nio.channels.AsynchronousFileChannel;
+import java.nio.channels.CompletionHandler;
import java.nio.file.Files;
import java.nio.file.NoSuchFileException;
+import java.nio.file.StandardOpenOption;
import java.nio.file.attribute.BasicFileAttributes;
import java.nio.file.attribute.BasicFileAttributeView;
import java.nio.file.attribute.FileTime;
@@ -44,6 +47,9 @@
import java.util.Optional;
import java.util.StringTokenizer;
import java.util.concurrent.atomic.AtomicLong;
+import java.util.List;
+import java.util.concurrent.CompletableFuture;
+import java.util.function.IntFunction;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
@@ -61,6 +67,7 @@
import org.apache.hadoop.util.StringUtils;
import static org.apache.hadoop.fs.impl.PathCapabilitiesSupport.validatePathCapabilityArgs;
+import static org.apache.hadoop.fs.VectoredReadUtils.sortRanges;
import static org.apache.hadoop.fs.statistics.StreamStatisticNames.STREAM_READ_BYTES;
import static org.apache.hadoop.fs.statistics.StreamStatisticNames.STREAM_READ_EXCEPTIONS;
import static org.apache.hadoop.fs.statistics.StreamStatisticNames.STREAM_READ_SEEK_OPERATIONS;
@@ -130,7 +137,9 @@ public void initialize(URI uri, Configuration conf) throws IOException {
class LocalFSFileInputStream extends FSInputStream implements
HasFileDescriptor, IOStatisticsSource, StreamCapabilities {
private FileInputStream fis;
+ private final File name;
private long position;
+ private AsynchronousFileChannel asyncChannel = null;
/**
* Minimal set of counters.
@@ -148,7 +157,8 @@ class LocalFSFileInputStream extends FSInputStream implements
private final AtomicLong bytesRead;
public LocalFSFileInputStream(Path f) throws IOException {
- fis = new FileInputStream(pathToFile(f));
+ name = pathToFile(f);
+ fis = new FileInputStream(name);
bytesRead = ioStatistics.getCounterReference(
STREAM_READ_BYTES);
}
@@ -179,10 +189,16 @@ public boolean seekToNewSource(long targetPos) throws IOException {
@Override
public int available() throws IOException { return fis.available(); }
@Override
- public void close() throws IOException { fis.close(); }
- @Override
public boolean markSupported() { return false; }
-
+
+ @Override
+ public void close() throws IOException {
+ fis.close();
+ if (asyncChannel != null) {
+ asyncChannel.close();
+ }
+ }
+
@Override
public int read() throws IOException {
try {
@@ -262,6 +278,7 @@ public boolean hasCapability(String capability) {
// new capabilities.
switch (capability.toLowerCase(Locale.ENGLISH)) {
case StreamCapabilities.IOSTATISTICS:
+ case StreamCapabilities.VECTOREDIO:
return true;
default:
return false;
@@ -272,8 +289,89 @@ public boolean hasCapability(String capability) {
public IOStatistics getIOStatistics() {
return ioStatistics;
}
+
+ AsynchronousFileChannel getAsyncChannel() throws IOException {
+ if (asyncChannel == null) {
+ synchronized (this) {
+ asyncChannel = AsynchronousFileChannel.open(name.toPath(),
+ StandardOpenOption.READ);
+ }
+ }
+ return asyncChannel;
+ }
+
+ @Override
+ public void readVectored(List extends FileRange> ranges,
+ IntFunction allocate) throws IOException {
+
+ List extends FileRange> sortedRanges = Arrays.asList(sortRanges(ranges));
+ // Set up all of the futures, so that we can use them if things fail
+ for(FileRange range: sortedRanges) {
+ VectoredReadUtils.validateRangeRequest(range);
+ range.setData(new CompletableFuture<>());
+ }
+ try {
+ AsynchronousFileChannel channel = getAsyncChannel();
+ ByteBuffer[] buffers = new ByteBuffer[sortedRanges.size()];
+ AsyncHandler asyncHandler = new AsyncHandler(channel, sortedRanges, buffers);
+ for(int i = 0; i < sortedRanges.size(); ++i) {
+ FileRange range = sortedRanges.get(i);
+ buffers[i] = allocate.apply(range.getLength());
+ channel.read(buffers[i], range.getOffset(), i, asyncHandler);
+ }
+ } catch (IOException ioe) {
+ LOG.debug("Exception occurred during vectored read ", ioe);
+ for(FileRange range: sortedRanges) {
+ range.getData().completeExceptionally(ioe);
+ }
+ }
+ }
}
-
+
+ /**
+ * A CompletionHandler that implements readFully and translates back
+ * into the form of CompletionHandler that our users expect.
+ */
+ static class AsyncHandler implements CompletionHandler {
+ private final AsynchronousFileChannel channel;
+ private final List extends FileRange> ranges;
+ private final ByteBuffer[] buffers;
+
+ AsyncHandler(AsynchronousFileChannel channel,
+ List extends FileRange> ranges,
+ ByteBuffer[] buffers) {
+ this.channel = channel;
+ this.ranges = ranges;
+ this.buffers = buffers;
+ }
+
+ @Override
+ public void completed(Integer result, Integer r) {
+ FileRange range = ranges.get(r);
+ ByteBuffer buffer = buffers[r];
+ if (result == -1) {
+ failed(new EOFException("Read past End of File"), r);
+ } else {
+ if (buffer.remaining() > 0) {
+ // issue a read for the rest of the buffer
+ // QQ: What if this fails? It has the same handler.
+ channel.read(buffer, range.getOffset() + buffer.position(), r, this);
+ } else {
+ // QQ: Why is this required? I think because we don't want the
+ // user to read data beyond limit.
+ buffer.flip();
+ range.getData().complete(buffer);
+ }
+ }
+ }
+
+ @Override
+ public void failed(Throwable exc, Integer r) {
+ LOG.debug("Failed while reading range {} ", r, exc);
+ ranges.get(r).getData().completeExceptionally(exc);
+ }
+ }
+
@Override
public FSDataInputStream open(Path f, int bufferSize) throws IOException {
getFileStatus(f);
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/StreamCapabilities.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/StreamCapabilities.java
index 861178019505e..d68ef505dc3fe 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/StreamCapabilities.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/StreamCapabilities.java
@@ -80,6 +80,12 @@ public interface StreamCapabilities {
*/
String IOSTATISTICS = "iostatistics";
+ /**
+ * Support for vectored IO api.
+ * See {@code PositionedReadable#readVectored(List, IntFunction)}.
+ */
+ String VECTOREDIO = "readvectored";
+
/**
* Stream abort() capability implemented by {@link Abortable#abort()}.
* This matches the Path Capability
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/VectoredReadUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/VectoredReadUtils.java
new file mode 100644
index 0000000000000..64107f1a18f89
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/VectoredReadUtils.java
@@ -0,0 +1,292 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs;
+
+import java.io.EOFException;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Comparator;
+import java.util.List;
+import java.util.concurrent.CompletableFuture;
+import java.util.function.IntFunction;
+
+import org.apache.hadoop.fs.impl.CombinedFileRange;
+import org.apache.hadoop.util.Preconditions;
+
+/**
+ * Utility class which implements helper methods used
+ * in vectored IO implementation.
+ */
+public final class VectoredReadUtils {
+
+ /**
+ * Validate a single range.
+ * @param range file range.
+ * @throws EOFException any EOF Exception.
+ */
+ public static void validateRangeRequest(FileRange range)
+ throws EOFException {
+
+ Preconditions.checkArgument(range.getLength() >= 0, "length is negative");
+ if (range.getOffset() < 0) {
+ throw new EOFException("position is negative");
+ }
+ }
+
+ /**
+ * Validate a list of vectored read ranges.
+ * @param ranges list of ranges.
+ * @throws EOFException any EOF exception.
+ */
+ public static void validateVectoredReadRanges(List extends FileRange> ranges)
+ throws EOFException {
+ for (FileRange range : ranges) {
+ validateRangeRequest(range);
+ }
+ }
+
+
+
+ /**
+ * This is the default implementation which iterates through the ranges
+ * to read each synchronously, but the intent is that subclasses
+ * can make more efficient readers.
+ * The data or exceptions are pushed into {@link FileRange#getData()}.
+ * @param stream the stream to read the data from
+ * @param ranges the byte ranges to read
+ * @param allocate the byte buffer allocation
+ */
+ public static void readVectored(PositionedReadable stream,
+ List extends FileRange> ranges,
+ IntFunction allocate) {
+ for (FileRange range: ranges) {
+ range.setData(readRangeFrom(stream, range, allocate));
+ }
+ }
+
+ /**
+ * Synchronously reads a range from the stream dealing with the combinations
+ * of ByteBuffers buffers and PositionedReadable streams.
+ * @param stream the stream to read from
+ * @param range the range to read
+ * @param allocate the function to allocate ByteBuffers
+ * @return the CompletableFuture that contains the read data
+ */
+ public static CompletableFuture readRangeFrom(PositionedReadable stream,
+ FileRange range,
+ IntFunction allocate) {
+ CompletableFuture result = new CompletableFuture<>();
+ try {
+ ByteBuffer buffer = allocate.apply(range.getLength());
+ if (stream instanceof ByteBufferPositionedReadable) {
+ ((ByteBufferPositionedReadable) stream).readFully(range.getOffset(),
+ buffer);
+ buffer.flip();
+ } else {
+ readNonByteBufferPositionedReadable(stream, range, buffer);
+ }
+ result.complete(buffer);
+ } catch (IOException ioe) {
+ result.completeExceptionally(ioe);
+ }
+ return result;
+ }
+
+ private static void readNonByteBufferPositionedReadable(PositionedReadable stream,
+ FileRange range,
+ ByteBuffer buffer) throws IOException {
+ if (buffer.isDirect()) {
+ buffer.put(readInDirectBuffer(stream, range));
+ buffer.flip();
+ } else {
+ stream.readFully(range.getOffset(), buffer.array(),
+ buffer.arrayOffset(), range.getLength());
+ }
+ }
+
+ private static byte[] readInDirectBuffer(PositionedReadable stream,
+ FileRange range) throws IOException {
+ // if we need to read data from a direct buffer and the stream doesn't
+ // support it, we allocate a byte array to use.
+ byte[] tmp = new byte[range.getLength()];
+ stream.readFully(range.getOffset(), tmp, 0, tmp.length);
+ return tmp;
+ }
+
+ /**
+ * Is the given input list.
+ *
+ * - already sorted by offset
+ * - each range is more than minimumSeek apart
+ * - the start and end of each range is a multiple of chunkSize
+ *
+ *
+ * @param input the list of input ranges.
+ * @param chunkSize the size of the chunks that the offset and end must align to.
+ * @param minimumSeek the minimum distance between ranges.
+ * @return true if we can use the input list as is.
+ */
+ public static boolean isOrderedDisjoint(List extends FileRange> input,
+ int chunkSize,
+ int minimumSeek) {
+ long previous = -minimumSeek;
+ for (FileRange range: input) {
+ long offset = range.getOffset();
+ long end = range.getOffset() + range.getLength();
+ if (offset % chunkSize != 0 ||
+ end % chunkSize != 0 ||
+ (offset - previous < minimumSeek)) {
+ return false;
+ }
+ previous = end;
+ }
+ return true;
+ }
+
+ /**
+ * Calculates floor value of offset based on chunk size.
+ * @param offset file offset.
+ * @param chunkSize file chunk size.
+ * @return floor value.
+ */
+ public static long roundDown(long offset, int chunkSize) {
+ if (chunkSize > 1) {
+ return offset - (offset % chunkSize);
+ } else {
+ return offset;
+ }
+ }
+
+ /**
+ * Calculates the ceil value of offset based on chunk size.
+ * @param offset file offset.
+ * @param chunkSize file chunk size.
+ * @return ceil value.
+ */
+ public static long roundUp(long offset, int chunkSize) {
+ if (chunkSize > 1) {
+ long next = offset + chunkSize - 1;
+ return next - (next % chunkSize);
+ } else {
+ return offset;
+ }
+ }
+
+ /**
+ * Check if the input ranges are overlapping in nature.
+ * We call two ranges to be overlapping when start offset
+ * of second is less than the end offset of first.
+ * End offset is calculated as start offset + length.
+ * @param input list if input ranges.
+ * @return true/false based on logic explained above.
+ */
+ public static List extends FileRange> validateNonOverlappingAndReturnSortedRanges(
+ List extends FileRange> input) {
+
+ if (input.size() <= 1) {
+ return input;
+ }
+ FileRange[] sortedRanges = sortRanges(input);
+ FileRange prev = sortedRanges[0];
+ for (int i=1; i input) {
+ FileRange[] sortedRanges = input.toArray(new FileRange[0]);
+ Arrays.sort(sortedRanges, Comparator.comparingLong(FileRange::getOffset));
+ return sortedRanges;
+ }
+
+ /**
+ * Merge sorted ranges to optimize the access from the underlying file
+ * system.
+ * The motivations are that:
+ *
+ * - Upper layers want to pass down logical file ranges.
+ * - Fewer reads have better performance.
+ * - Applications want callbacks as ranges are read.
+ * - Some file systems want to round ranges to be at checksum boundaries.
+ *
+ *
+ * @param sortedRanges already sorted list of ranges based on offset.
+ * @param chunkSize round the start and end points to multiples of chunkSize
+ * @param minimumSeek the smallest gap that we should seek over in bytes
+ * @param maxSize the largest combined file range in bytes
+ * @return the list of sorted CombinedFileRanges that cover the input
+ */
+ public static List mergeSortedRanges(List extends FileRange> sortedRanges,
+ int chunkSize,
+ int minimumSeek,
+ int maxSize) {
+
+ CombinedFileRange current = null;
+ List result = new ArrayList<>(sortedRanges.size());
+
+ // now merge together the ones that merge
+ for (FileRange range: sortedRanges) {
+ long start = roundDown(range.getOffset(), chunkSize);
+ long end = roundUp(range.getOffset() + range.getLength(), chunkSize);
+ if (current == null || !current.merge(start, end, range, minimumSeek, maxSize)) {
+ current = new CombinedFileRange(start, end, range);
+ result.add(current);
+ }
+ }
+ return result;
+ }
+
+ /**
+ * Slice the data that was read to the user's request.
+ * This function assumes that the user's request is completely subsumed by the
+ * read data. This always creates a new buffer pointing to the same underlying
+ * data but with its own mark and position fields such that reading one buffer
+ * can't effect other's mark and position.
+ * @param readData the buffer with the readData
+ * @param readOffset the offset in the file for the readData
+ * @param request the user's request
+ * @return the readData buffer that is sliced to the user's request
+ */
+ public static ByteBuffer sliceTo(ByteBuffer readData, long readOffset,
+ FileRange request) {
+ int offsetChange = (int) (request.getOffset() - readOffset);
+ int requestLength = request.getLength();
+ readData = readData.slice();
+ readData.position(offsetChange);
+ readData.limit(offsetChange + requestLength);
+ return readData;
+ }
+
+ /**
+ * private constructor.
+ */
+ private VectoredReadUtils() {
+ throw new UnsupportedOperationException();
+ }
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/audit/AuditConstants.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/audit/AuditConstants.java
index d9629e388b384..0929c2be03acf 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/audit/AuditConstants.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/audit/AuditConstants.java
@@ -90,6 +90,11 @@ private AuditConstants() {
*/
public static final String PARAM_PROCESS = "ps";
+ /**
+ * Task Attempt ID query header: {@value}.
+ */
+ public static final String PARAM_TASK_ATTEMPT_ID = "ta";
+
/**
* Thread 0: the thread which created a span {@value}.
*/
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/audit/CommonAuditContext.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/audit/CommonAuditContext.java
index e188e168e5313..2dcd4f8b3f570 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/audit/CommonAuditContext.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/audit/CommonAuditContext.java
@@ -124,11 +124,15 @@ private CommonAuditContext() {
/**
* Put a context entry.
* @param key key
- * @param value new value
+ * @param value new value., If null, triggers removal.
* @return old value or null
*/
public Supplier put(String key, String value) {
- return evaluatedEntries.put(key, () -> value);
+ if (value != null) {
+ return evaluatedEntries.put(key, () -> value);
+ } else {
+ return evaluatedEntries.remove(key);
+ }
}
/**
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/CombinedFileRange.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/CombinedFileRange.java
new file mode 100644
index 0000000000000..516bbb2c70c76
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/CombinedFileRange.java
@@ -0,0 +1,70 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.impl;
+
+import org.apache.hadoop.fs.FileRange;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * A file range that represents a set of underlying file ranges.
+ * This is used when we combine the user's FileRange objects
+ * together into a single read for efficiency.
+ */
+public class CombinedFileRange extends FileRangeImpl {
+ private ArrayList underlying = new ArrayList<>();
+
+ public CombinedFileRange(long offset, long end, FileRange original) {
+ super(offset, (int) (end - offset));
+ this.underlying.add(original);
+ }
+
+ /**
+ * Get the list of ranges that were merged together to form this one.
+ * @return the list of input ranges
+ */
+ public List getUnderlying() {
+ return underlying;
+ }
+
+ /**
+ * Merge this input range into the current one, if it is compatible.
+ * It is assumed that otherOffset is greater or equal the current offset,
+ * which typically happens by sorting the input ranges on offset.
+ * @param otherOffset the offset to consider merging
+ * @param otherEnd the end to consider merging
+ * @param other the underlying FileRange to add if we merge
+ * @param minSeek the minimum distance that we'll seek without merging the
+ * ranges together
+ * @param maxSize the maximum size that we'll merge into a single range
+ * @return true if we have merged the range into this one
+ */
+ public boolean merge(long otherOffset, long otherEnd, FileRange other,
+ int minSeek, int maxSize) {
+ long end = this.getOffset() + this.getLength();
+ long newEnd = Math.max(end, otherEnd);
+ if (otherOffset - end >= minSeek || newEnd - this.getOffset() > maxSize) {
+ return false;
+ }
+ this.setLength((int) (newEnd - this.getOffset()));
+ underlying.add(other);
+ return true;
+ }
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FileRangeImpl.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FileRangeImpl.java
new file mode 100644
index 0000000000000..041e5f0a8d2d7
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FileRangeImpl.java
@@ -0,0 +1,74 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.impl;
+
+import java.nio.ByteBuffer;
+import java.util.concurrent.CompletableFuture;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.fs.FileRange;
+
+/**
+ * A range of bytes from a file with an optional buffer to read those bytes
+ * for zero copy. This shouldn't be created directly via constructor rather
+ * factory defined in {@code FileRange#createFileRange} should be used.
+ */
+@InterfaceAudience.Private
+public class FileRangeImpl implements FileRange {
+ private long offset;
+ private int length;
+ private CompletableFuture reader;
+
+ public FileRangeImpl(long offset, int length) {
+ this.offset = offset;
+ this.length = length;
+ }
+
+ @Override
+ public String toString() {
+ return "range[" + offset + "," + (offset + length) + ")";
+ }
+
+ @Override
+ public long getOffset() {
+ return offset;
+ }
+
+ @Override
+ public int getLength() {
+ return length;
+ }
+
+ public void setOffset(long offset) {
+ this.offset = offset;
+ }
+
+ public void setLength(int length) {
+ this.length = length;
+ }
+
+ @Override
+ public void setData(CompletableFuture pReader) {
+ this.reader = pReader;
+ }
+
+ @Override
+ public CompletableFuture getData() {
+ return reader;
+ }
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPConnectionPool.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPConnectionPool.java
index de86bab6d3324..eace6417dcd68 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPConnectionPool.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPConnectionPool.java
@@ -76,7 +76,7 @@ synchronized void returnToPool(ChannelSftp channel) {
ConnectionInfo info = con2infoMap.get(channel);
HashSet cons = idleConnections.get(info);
if (cons == null) {
- cons = new HashSet();
+ cons = new HashSet<>();
idleConnections.put(info, cons);
}
cons.add(channel);
@@ -94,7 +94,7 @@ synchronized void shutdown() {
Set cons = con2infoMap.keySet();
if (cons != null && cons.size() > 0) {
// make a copy since we need to modify the underlying Map
- Set copy = new HashSet(cons);
+ Set copy = new HashSet<>(cons);
// Initiate disconnect from all outstanding connections
for (ChannelSftp con : copy) {
try {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandFormat.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandFormat.java
index 4dd20d108428e..1228f76d846ab 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandFormat.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandFormat.java
@@ -165,7 +165,7 @@ public String getOptValue(String option) {
* @return Set{@literal <}String{@literal >} of the enabled options
*/
public Set getOpts() {
- Set optSet = new HashSet();
+ Set optSet = new HashSet<>();
for (Map.Entry entry : options.entrySet()) {
if (entry.getValue()) {
optSet.add(entry.getKey());
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/find/Find.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/find/Find.java
index 199038a751226..07baea89dd604 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/find/Find.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/find/Find.java
@@ -96,7 +96,7 @@ private static void addExpression(Class> clazz) {
private Expression rootExpression;
/** Set of path items returning a {@link Result#STOP} result. */
- private HashSet stopPaths = new HashSet();
+ private HashSet stopPaths = new HashSet<>();
/** Register the expressions with the expression factory. */
private static void registerExpressions(ExpressionFactory factory) {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/StoreStatisticNames.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/StoreStatisticNames.java
index c458269c3510d..c04c1bb47fcea 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/StoreStatisticNames.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/StoreStatisticNames.java
@@ -53,6 +53,9 @@ public final class StoreStatisticNames {
/** {@value}. */
public static final String OP_CREATE = "op_create";
+ /** {@value}. */
+ public static final String OP_CREATE_FILE = "op_createfile";
+
/** {@value}. */
public static final String OP_CREATE_NON_RECURSIVE =
"op_create_non_recursive";
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
index da3955b125e84..e31a701a6eaa7 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
@@ -1037,7 +1037,7 @@ public FileSystem[] getChildFileSystems() {
List> mountPoints =
fsState.getMountPoints();
Map fsMap = initializeMountedFileSystems(mountPoints);
- Set children = new HashSet();
+ Set children = new HashSet<>();
for (InodeTree.MountPoint mountPoint : mountPoints) {
FileSystem targetFs = fsMap.get(mountPoint.src);
children.addAll(Arrays.asList(targetFs.getChildFileSystems()));
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ByteBufferPool.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ByteBufferPool.java
index aa5f8731c54a7..b30e7cfb9c5f0 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ByteBufferPool.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ByteBufferPool.java
@@ -45,4 +45,9 @@ public interface ByteBufferPool {
* @param buffer a direct bytebuffer
*/
void putBuffer(ByteBuffer buffer);
+
+ /**
+ * Clear the buffer pool thus releasing all the buffers.
+ */
+ default void release() { }
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ElasticByteBufferPool.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ElasticByteBufferPool.java
index 6a162c3ff2087..c4c2940622729 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ElasticByteBufferPool.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ElasticByteBufferPool.java
@@ -36,8 +36,8 @@
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
-public final class ElasticByteBufferPool implements ByteBufferPool {
- private static final class Key implements Comparable {
+public class ElasticByteBufferPool implements ByteBufferPool {
+ protected static final class Key implements Comparable {
private final int capacity;
private final long insertionTime;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/WeakReferencedElasticByteBufferPool.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/WeakReferencedElasticByteBufferPool.java
new file mode 100644
index 0000000000000..c71c44e798a65
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/WeakReferencedElasticByteBufferPool.java
@@ -0,0 +1,155 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.io;
+
+import java.lang.ref.WeakReference;
+import java.nio.ByteBuffer;
+import java.util.Map;
+import java.util.TreeMap;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.classification.VisibleForTesting;
+
+/**
+ * Buffer pool implementation which uses weak references to store
+ * buffers in the pool, such that they are garbage collected when
+ * there are no references to the buffer during a gc run. This is
+ * important as direct buffers don't get garbage collected automatically
+ * during a gc run as they are not stored on heap memory.
+ * Also the buffers are stored in a tree map which helps in returning
+ * smallest buffer whose size is just greater than requested length.
+ * This is a thread safe implementation.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public final class WeakReferencedElasticByteBufferPool extends ElasticByteBufferPool {
+
+ /**
+ * Map to store direct byte buffers of different sizes in the pool.
+ * Used tree map such that we can return next greater than capacity
+ * buffer if buffer with exact capacity is unavailable.
+ * This must be accessed in synchronized blocks.
+ */
+ private final TreeMap> directBuffers =
+ new TreeMap<>();
+
+ /**
+ * Map to store heap based byte buffers of different sizes in the pool.
+ * Used tree map such that we can return next greater than capacity
+ * buffer if buffer with exact capacity is unavailable.
+ * This must be accessed in synchronized blocks.
+ */
+ private final TreeMap> heapBuffers =
+ new TreeMap<>();
+
+ /**
+ * Method to get desired buffer tree.
+ * @param isDirect whether the buffer is heap based or direct.
+ * @return corresponding buffer tree.
+ */
+ private TreeMap> getBufferTree(boolean isDirect) {
+ return isDirect
+ ? directBuffers
+ : heapBuffers;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @param direct whether we want a direct byte buffer or a heap one.
+ * @param length length of requested buffer.
+ * @return returns equal or next greater than capacity buffer from
+ * pool if already available and not garbage collected else creates
+ * a new buffer and return it.
+ */
+ @Override
+ public synchronized ByteBuffer getBuffer(boolean direct, int length) {
+ TreeMap> buffersTree = getBufferTree(direct);
+
+ // Scan the entire tree and remove all weak null references.
+ buffersTree.entrySet().removeIf(next -> next.getValue().get() == null);
+
+ Map.Entry> entry =
+ buffersTree.ceilingEntry(new Key(length, 0));
+ // If there is no buffer present in the pool with desired size.
+ if (entry == null) {
+ return direct ? ByteBuffer.allocateDirect(length) :
+ ByteBuffer.allocate(length);
+ }
+ // buffer is available in the pool and not garbage collected.
+ WeakReference bufferInPool = entry.getValue();
+ buffersTree.remove(entry.getKey());
+ ByteBuffer buffer = bufferInPool.get();
+ if (buffer != null) {
+ return buffer;
+ }
+ // buffer was in pool but already got garbage collected.
+ return direct
+ ? ByteBuffer.allocateDirect(length)
+ : ByteBuffer.allocate(length);
+ }
+
+ /**
+ * Return buffer to the pool.
+ * @param buffer buffer to be returned.
+ */
+ @Override
+ public synchronized void putBuffer(ByteBuffer buffer) {
+ buffer.clear();
+ TreeMap> buffersTree = getBufferTree(buffer.isDirect());
+ // Buffers are indexed by (capacity, time).
+ // If our key is not unique on the first try, we try again, since the
+ // time will be different. Since we use nanoseconds, it's pretty
+ // unlikely that we'll loop even once, unless the system clock has a
+ // poor granularity or multi-socket systems have clocks slightly out
+ // of sync.
+ while (true) {
+ Key keyToInsert = new Key(buffer.capacity(), System.nanoTime());
+ if (!buffersTree.containsKey(keyToInsert)) {
+ buffersTree.put(keyToInsert, new WeakReference<>(buffer));
+ return;
+ }
+ }
+ }
+
+ /**
+ * Clear the buffer pool thus releasing all the buffers.
+ * The caller must remove all references of
+ * existing buffers before calling this method to avoid
+ * memory leaks.
+ */
+ @Override
+ public synchronized void release() {
+ heapBuffers.clear();
+ directBuffers.clear();
+ }
+
+ /**
+ * Get current buffers count in the pool.
+ * @param isDirect whether we want to count the heap or direct buffers.
+ * @return count of buffers.
+ */
+ @VisibleForTesting
+ public synchronized int getCurrentBuffersCount(boolean isDirect) {
+ return isDirect
+ ? directBuffers.size()
+ : heapBuffers.size();
+ }
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CodecPool.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CodecPool.java
index 69e8c99a1f4da..1f095c6c6736e 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CodecPool.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CodecPool.java
@@ -109,7 +109,7 @@ private static boolean payback(Map, Set> pool, T codec) {
synchronized (pool) {
codecSet = pool.get(codecClass);
if (codecSet == null) {
- codecSet = new HashSet();
+ codecSet = new HashSet<>();
pool.put(codecClass, codecSet);
}
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DefaultCodec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DefaultCodec.java
index d2ffb22eaafb3..b407ddb11046c 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DefaultCodec.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DefaultCodec.java
@@ -26,7 +26,6 @@
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.io.compress.zlib.ZlibDecompressor;
import org.apache.hadoop.io.compress.zlib.ZlibFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/codec/ErasureCodec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/codec/ErasureCodec.java
index c75eaead83d01..22ab632a49512 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/codec/ErasureCodec.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/codec/ErasureCodec.java
@@ -19,7 +19,6 @@
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.io.erasurecode.CodecUtil;
import org.apache.hadoop.io.erasurecode.ECSchema;
import org.apache.hadoop.io.erasurecode.ErasureCodecOptions;
import org.apache.hadoop.io.erasurecode.ErasureCoderOptions;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/AsyncCallHandler.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/AsyncCallHandler.java
index 3ebbcd912dc71..60210ccd920c2 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/AsyncCallHandler.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/AsyncCallHandler.java
@@ -28,7 +28,6 @@
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import java.io.InterruptedIOException;
import java.lang.reflect.Method;
import java.util.Iterator;
import java.util.Queue;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
index 0b66347f1f90f..d7693f868eb30 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
@@ -41,7 +41,6 @@
import org.apache.hadoop.net.ConnectTimeoutException;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.token.SecretManager.InvalidToken;
-import org.ietf.jgss.GSSException;
import org.apache.hadoop.classification.VisibleForTesting;
import org.slf4j.Logger;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/avro/AvroReflectSerialization.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/avro/AvroReflectSerialization.java
index cfbc60d10452b..544958e682a50 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/avro/AvroReflectSerialization.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/avro/AvroReflectSerialization.java
@@ -64,7 +64,7 @@ public synchronized boolean accept(Class> c) {
private void getPackages() {
String[] pkgList = getConf().getStrings(AVRO_REFLECT_PACKAGES);
- packages = new HashSet();
+ packages = new HashSet<>();
if (pkgList != null) {
for (String pkg : pkgList) {
packages.add(pkg.trim());
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtocolProxy.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtocolProxy.java
index 49029f97b3d29..f5f212b29276d 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtocolProxy.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtocolProxy.java
@@ -69,7 +69,7 @@ private void fetchServerMethods(Method method) throws IOException {
}
int[] serverMethodsCodes = serverInfo.getMethods();
if (serverMethodsCodes != null) {
- serverMethods = new HashSet(serverMethodsCodes.length);
+ serverMethods = new HashSet<>(serverMethodsCodes.length);
for (int m : serverMethodsCodes) {
this.serverMethods.add(Integer.valueOf(m));
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
index 90f730d38836d..e79612f7a5a0f 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
@@ -65,9 +65,12 @@
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.ScheduledThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.atomic.LongAdder;
import java.util.stream.Collectors;
import javax.security.sasl.Sasl;
@@ -127,6 +130,8 @@
import org.apache.hadoop.tracing.TraceUtils;
import com.fasterxml.jackson.databind.ObjectMapper;
import org.apache.hadoop.classification.VisibleForTesting;
+
+import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
import org.apache.hadoop.thirdparty.protobuf.ByteString;
import org.apache.hadoop.thirdparty.protobuf.CodedOutputStream;
import org.apache.hadoop.thirdparty.protobuf.Message;
@@ -500,6 +505,11 @@ protected ResponseBuffer initialValue() {
private Responder responder = null;
private Handler[] handlers = null;
private final AtomicInteger numInProcessHandler = new AtomicInteger();
+ private final LongAdder totalRequests = new LongAdder();
+ private long lastSeenTotalRequests = 0;
+ private long totalRequestsPerSecond = 0;
+ private final long metricsUpdaterInterval;
+ private final ScheduledExecutorService scheduledExecutorService;
private boolean logSlowRPC = false;
@@ -515,6 +525,14 @@ public int getNumInProcessHandler() {
return numInProcessHandler.get();
}
+ public long getTotalRequests() {
+ return totalRequests.sum();
+ }
+
+ public long getTotalRequestsPerSecond() {
+ return totalRequestsPerSecond;
+ }
+
/**
* Sets slow RPC flag.
* @param logSlowRPCFlag input logSlowRPCFlag.
@@ -578,6 +596,7 @@ void logSlowRpcCalls(String methodName, Call call,
}
void updateMetrics(Call call, long startTime, boolean connDropped) {
+ totalRequests.increment();
// delta = handler + processing + response
long deltaNanos = Time.monotonicNowNanos() - startTime;
long timestampNanos = call.timestampNanos;
@@ -3304,6 +3323,14 @@ protected Server(String bindAddress, int port,
this.exceptionsHandler.addTerseLoggingExceptions(StandbyException.class);
this.exceptionsHandler.addTerseLoggingExceptions(
HealthCheckFailedException.class);
+ this.metricsUpdaterInterval =
+ conf.getLong(CommonConfigurationKeysPublic.IPC_SERVER_METRICS_UPDATE_RUNNER_INTERVAL,
+ CommonConfigurationKeysPublic.IPC_SERVER_METRICS_UPDATE_RUNNER_INTERVAL_DEFAULT);
+ this.scheduledExecutorService = new ScheduledThreadPoolExecutor(1,
+ new ThreadFactoryBuilder().setDaemon(true).setNameFormat("Hadoop-Metrics-Updater-%d")
+ .build());
+ this.scheduledExecutorService.scheduleWithFixedDelay(new MetricsUpdateRunner(),
+ metricsUpdaterInterval, metricsUpdaterInterval, TimeUnit.MILLISECONDS);
}
public synchronized void addAuxiliaryListener(int auxiliaryPort)
@@ -3598,10 +3625,25 @@ public synchronized void stop() {
}
responder.interrupt();
notifyAll();
+ shutdownMetricsUpdaterExecutor();
this.rpcMetrics.shutdown();
this.rpcDetailedMetrics.shutdown();
}
+ private void shutdownMetricsUpdaterExecutor() {
+ this.scheduledExecutorService.shutdown();
+ try {
+ boolean isExecutorShutdown =
+ this.scheduledExecutorService.awaitTermination(3, TimeUnit.SECONDS);
+ if (!isExecutorShutdown) {
+ LOG.info("Hadoop Metrics Updater executor could not be shutdown.");
+ }
+ } catch (InterruptedException e) {
+ Thread.currentThread().interrupt();
+ LOG.info("Hadoop Metrics Updater executor shutdown interrupted.", e);
+ }
+ }
+
/**
* Wait for the server to be stopped.
* Does not wait for all subthreads to finish.
@@ -4061,4 +4103,32 @@ protected int getMaxIdleTime() {
public String getServerName() {
return serverName;
}
+
+ /**
+ * Server metrics updater thread, used to update some metrics on a regular basis.
+ * For instance, requests per second.
+ */
+ private class MetricsUpdateRunner implements Runnable {
+
+ private long lastExecuted = 0;
+
+ @Override
+ public synchronized void run() {
+ long currentTime = Time.monotonicNow();
+ if (lastExecuted == 0) {
+ lastExecuted = currentTime - metricsUpdaterInterval;
+ }
+ long currentTotalRequests = totalRequests.sum();
+ long totalRequestsDiff = currentTotalRequests - lastSeenTotalRequests;
+ lastSeenTotalRequests = currentTotalRequests;
+ if ((currentTime - lastExecuted) > 0) {
+ double totalRequestsPerSecInDouble =
+ (double) totalRequestsDiff / TimeUnit.MILLISECONDS.toSeconds(
+ currentTime - lastExecuted);
+ totalRequestsPerSecond = ((long) totalRequestsPerSecInDouble);
+ }
+ lastExecuted = currentTime;
+ }
+ }
+
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/metrics/RpcMetrics.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/metrics/RpcMetrics.java
index a67530b3c97b2..bf21e3865fa8a 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/metrics/RpcMetrics.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/metrics/RpcMetrics.java
@@ -151,6 +151,16 @@ public String numOpenConnectionsPerUser() {
return server.getNumDroppedConnections();
}
+ @Metric("Number of total requests")
+ public long getTotalRequests() {
+ return server.getTotalRequests();
+ }
+
+ @Metric("Number of total requests per second")
+ public long getTotalRequestsPerSecond() {
+ return server.getTotalRequestsPerSecond();
+ }
+
public TimeUnit getMetricsTimeUnit() {
return metricsTimeUnit;
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRates.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRates.java
index 19696bd839400..90b5da01c062f 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRates.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRates.java
@@ -19,11 +19,10 @@
package org.apache.hadoop.metrics2.lib;
import java.lang.reflect.Method;
+import java.util.HashSet;
import java.util.Set;
import static org.apache.hadoop.util.Preconditions.*;
-import org.apache.hadoop.util.Sets;
-
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
@@ -44,7 +43,7 @@
public class MutableRates extends MutableMetric {
static final Logger LOG = LoggerFactory.getLogger(MutableRates.class);
private final MetricsRegistry registry;
- private final Set> protocolCache = Sets.newHashSet();
+ private final Set> protocolCache = new HashSet<>();
MutableRates(MetricsRegistry registry) {
this.registry = checkNotNull(registry, "metrics registry");
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRatesWithAggregation.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRatesWithAggregation.java
index dc37f96f4f449..4c5f0a844aaab 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRatesWithAggregation.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRatesWithAggregation.java
@@ -18,9 +18,9 @@
package org.apache.hadoop.metrics2.lib;
-import org.apache.hadoop.util.Sets;
import java.lang.ref.WeakReference;
import java.lang.reflect.Method;
+import java.util.HashSet;
import java.util.Iterator;
import java.util.Map;
import java.util.Set;
@@ -52,7 +52,7 @@ public class MutableRatesWithAggregation extends MutableMetric {
LoggerFactory.getLogger(MutableRatesWithAggregation.class);
private final Map globalMetrics =
new ConcurrentHashMap<>();
- private final Set> protocolCache = Sets.newHashSet();
+ private final Set> protocolCache = new HashSet<>();
private final ConcurrentLinkedDeque>>
weakReferenceQueue = new ConcurrentLinkedDeque<>();
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/AbstractDNSToSwitchMapping.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/AbstractDNSToSwitchMapping.java
index f050219398721..5a13b00098a44 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/AbstractDNSToSwitchMapping.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/AbstractDNSToSwitchMapping.java
@@ -115,7 +115,7 @@ public String dumpTopology() {
builder.append("Mapping: ").append(toString()).append("\n");
if (rack != null) {
builder.append("Map:\n");
- Set switches = new HashSet();
+ Set switches = new HashSet<>();
for (Map.Entry entry : rack.entrySet()) {
builder.append(" ")
.append(entry.getKey())
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
index 6644b3911b844..ebb354e7db3cb 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
@@ -1086,7 +1086,7 @@ private void interAddNodeWithEmptyRack(Node node) {
String rackname = node.getNetworkLocation();
Set nodes = rackMap.get(rackname);
if (nodes == null) {
- nodes = new HashSet();
+ nodes = new HashSet<>();
}
if (!decommissionNodes.contains(node.getName())) {
nodes.add(node.getName());
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/CompositeGroupsMapping.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/CompositeGroupsMapping.java
index 6f799c1542095..deca6f1152ba4 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/CompositeGroupsMapping.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/CompositeGroupsMapping.java
@@ -109,7 +109,7 @@ public void cacheGroupsAdd(List groups) throws IOException {
@Override
public synchronized Set getGroupsSet(String user) throws IOException {
- Set groupSet = new HashSet();
+ Set groupSet = new HashSet<>();
Set groups = null;
for (GroupMappingServiceProvider provider : providersList) {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/IdMappingServiceProvider.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/IdMappingServiceProvider.java
index 86edab7de7097..08cacdc248fa4 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/IdMappingServiceProvider.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/IdMappingServiceProvider.java
@@ -18,11 +18,9 @@
package org.apache.hadoop.security;
import java.io.IOException;
-import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
/**
* An interface for the implementation of {@literal <}userId,
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/NetgroupCache.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/NetgroupCache.java
index aa06c59a64814..5e466033fb713 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/NetgroupCache.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/NetgroupCache.java
@@ -65,7 +65,7 @@ public static List getNetgroupNames() {
}
private static Set getGroups() {
- Set allGroups = new HashSet ();
+ Set allGroups = new HashSet<>();
for (Set userGroups : userToNetgroupsMap.values()) {
allGroups.addAll(userGroups);
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/AccessControlList.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/AccessControlList.java
index 39dc29a79e1f6..6fabbfb47b9f8 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/AccessControlList.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/AccessControlList.java
@@ -105,8 +105,8 @@ public AccessControlList(String users, String groups) {
* @param userGroupStrings build ACL from array of Strings
*/
private void buildACL(String[] userGroupStrings) {
- users = new HashSet();
- groups = new HashSet();
+ users = new HashSet<>();
+ groups = new HashSet<>();
for (String aclPart : userGroupStrings) {
if (aclPart != null && isWildCardACLValue(aclPart)) {
allAllowed = true;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/ProxyServers.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/ProxyServers.java
index 410e25f583966..6f5283074dca6 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/ProxyServers.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/ProxyServers.java
@@ -33,7 +33,7 @@ public static void refresh() {
}
public static void refresh(Configuration conf){
- Collection tempServers = new HashSet();
+ Collection tempServers = new HashSet<>();
// trusted proxy servers such as http proxies
for (String host : conf.getTrimmedStrings(CONF_HADOOP_PROXYSERVERS)) {
InetSocketAddress addr = new InetSocketAddress(host, 0);
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/http/CrossOriginFilter.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/http/CrossOriginFilter.java
index 059cdc4b653de..ef342f257a937 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/http/CrossOriginFilter.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/http/CrossOriginFilter.java
@@ -22,7 +22,6 @@
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
-import java.util.regex.Matcher;
import java.util.regex.Pattern;
import javax.servlet.Filter;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/http/RestCsrfPreventionFilter.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/http/RestCsrfPreventionFilter.java
index b81ed8e90155e..7363ca0ba6450 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/http/RestCsrfPreventionFilter.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/http/RestCsrfPreventionFilter.java
@@ -94,7 +94,7 @@ public void init(FilterConfig filterConfig) throws ServletException {
void parseBrowserUserAgents(String userAgents) {
String[] agentsArray = userAgents.split(",");
- browserUserAgents = new HashSet();
+ browserUserAgents = new HashSet<>();
for (String patternString : agentsArray) {
browserUserAgents.add(Pattern.compile(patternString));
}
@@ -102,7 +102,7 @@ void parseBrowserUserAgents(String userAgents) {
void parseMethodsToIgnore(String mti) {
String[] methods = mti.split(",");
- methodsToIgnore = new HashSet();
+ methodsToIgnore = new HashSet<>();
for (int i = 0; i < methods.length; i++) {
methodsToIgnore.add(methods[i]);
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509TrustManager.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509TrustManager.java
index c6049a91b5a51..3dc5017ba6377 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509TrustManager.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509TrustManager.java
@@ -27,7 +27,6 @@
import javax.net.ssl.TrustManager;
import javax.net.ssl.TrustManagerFactory;
import javax.net.ssl.X509TrustManager;
-import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.nio.file.Files;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java
index c85595e922279..d0c0fac6e88df 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java
@@ -518,9 +518,9 @@ protected DelegationTokenInformation checkToken(TokenIdent identifier)
}
long now = Time.now();
if (info.getRenewDate() < now) {
- err =
- "Token has" + identifier.getRealUser() + "expired, current time: " + Time.formatTime(now)
- + " expected renewal time: " + Time.formatTime(info.getRenewDate());
+ err = "Token " + identifier.getRealUser() + " has expired, current time: "
+ + Time.formatTime(now) + " expected renewal time: " + Time
+ .formatTime(info.getRenewDate());
LOG.info("{}, Token={}", err, formatTokenId(identifier));
throw new InvalidToken(err);
}
@@ -716,7 +716,7 @@ public String getTrackingId() {
/** Remove expired delegation tokens from cache */
private void removeExpiredToken() throws IOException {
long now = Time.now();
- Set expiredTokens = new HashSet();
+ Set expiredTokens = new HashSet<>();
synchronized (this) {
Iterator> i =
currentTokens.entrySet().iterator();
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationHandler.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationHandler.java
index b55214451ec25..f4ede6f35edb0 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationHandler.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationHandler.java
@@ -89,7 +89,7 @@ public abstract class DelegationTokenAuthenticationHandler
public static final String TOKEN_KIND = PREFIX + "token-kind";
- private static final Set DELEGATION_TOKEN_OPS = new HashSet();
+ private static final Set DELEGATION_TOKEN_OPS = new HashSet<>();
public static final String DELEGATION_TOKEN_UGI_ATTRIBUTE =
"hadoop.security.delegation-token.ugi";
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/HttpUserGroupInformation.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/HttpUserGroupInformation.java
index 614c0d3b36bcf..1f18b1c2f6ae0 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/HttpUserGroupInformation.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/HttpUserGroupInformation.java
@@ -20,8 +20,6 @@
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.security.UserGroupInformation;
-import javax.servlet.http.HttpServletRequest;
-
/**
* Util class that returns the remote {@link UserGroupInformation} in scope
* for the HTTP request.
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/FileBasedIPList.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/FileBasedIPList.java
index 47aa9cc71a12e..31dfe594207be 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/FileBasedIPList.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/FileBasedIPList.java
@@ -58,7 +58,7 @@ public FileBasedIPList(String fileName) {
lines = null;
}
if (lines != null) {
- addressList = new MachineList(new HashSet(Arrays.asList(lines)));
+ addressList = new MachineList(new HashSet<>(Arrays.asList(lines)));
} else {
addressList = null;
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/FindClass.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/FindClass.java
index e51f7b14a60e0..268b4e166e8a3 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/FindClass.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/FindClass.java
@@ -20,7 +20,6 @@
import org.apache.hadoop.classification.VisibleForTesting;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
-import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import java.io.IOException;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/HostsFileReader.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/HostsFileReader.java
index 5141740a3d23e..d94668356e261 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/HostsFileReader.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/HostsFileReader.java
@@ -135,7 +135,7 @@ public static void readFileToMapWithFileInputStream(String type,
if (xmlInput) {
readXmlFileToMapWithFileInputStream(type, filename, inputStream, map);
} else {
- HashSet nodes = new HashSet();
+ HashSet nodes = new HashSet<>();
readFileToSetWithFileInputStream(type, filename, inputStream, nodes);
for (String node : nodes) {
map.put(node, null);
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JsonSerialization.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JsonSerialization.java
index 0bba79fd77f14..52c6c4505226a 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JsonSerialization.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JsonSerialization.java
@@ -297,11 +297,12 @@ public void save(FileSystem fs, Path path, T instance,
}
/**
- * Write the JSON as bytes, then close the file.
+ * Write the JSON as bytes, then close the stream.
+ * @param instance instance to write
* @param dataOutputStream an output stream that will always be closed
* @throws IOException on any failure
*/
- private void writeJsonAsBytes(T instance,
+ public void writeJsonAsBytes(T instance,
OutputStream dataOutputStream) throws IOException {
try {
dataOutputStream.write(toBytes(instance));
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ReflectionUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ReflectionUtils.java
index 2438b714ffcd3..155c4f9c5f498 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ReflectionUtils.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ReflectionUtils.java
@@ -21,7 +21,6 @@
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.PrintStream;
-import java.io.PrintWriter;
import java.io.UnsupportedEncodingException;
import java.lang.management.ManagementFactory;
import java.lang.management.ThreadInfo;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownHookManager.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownHookManager.java
index fbdd33331b62b..e85f850514b16 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownHookManager.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownHookManager.java
@@ -249,7 +249,7 @@ TimeUnit getTimeUnit() {
}
private final Set hooks =
- Collections.synchronizedSet(new HashSet());
+ Collections.synchronizedSet(new HashSet<>());
private AtomicBoolean shutdownInProgress = new AtomicBoolean(false);
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/functional/CloseableTaskPoolSubmitter.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/functional/CloseableTaskPoolSubmitter.java
index 26b687a3c5610..695da7e932279 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/functional/CloseableTaskPoolSubmitter.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/functional/CloseableTaskPoolSubmitter.java
@@ -34,7 +34,7 @@
*/
@InterfaceAudience.Public
@InterfaceStability.Unstable
-public final class CloseableTaskPoolSubmitter implements TaskPool.Submitter,
+public class CloseableTaskPoolSubmitter implements TaskPool.Submitter,
Closeable {
/** Executors. */
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
index 47786e473a52f..04cbd9fedf83c 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
@@ -104,6 +104,8 @@ The default timeunit used for RPC metrics is milliseconds (as per the below desc
| `rpcLockWaitTime`*num*`s90thPercentileLatency` | Shows the 90th percentile of RPC lock wait time in milliseconds (*num* seconds granularity) if `rpc.metrics.quantile.enable` is set to true. *num* is specified by `rpc.metrics.percentiles.intervals`. |
| `rpcLockWaitTime`*num*`s95thPercentileLatency` | Shows the 95th percentile of RPC lock wait time in milliseconds (*num* seconds granularity) if `rpc.metrics.quantile.enable` is set to true. *num* is specified by `rpc.metrics.percentiles.intervals`. |
| `rpcLockWaitTime`*num*`s99thPercentileLatency` | Shows the 99th percentile of RPC lock wait time in milliseconds (*num* seconds granularity) if `rpc.metrics.quantile.enable` is set to true. *num* is specified by `rpc.metrics.percentiles.intervals`. |
+| `TotalRequests` | Total num of requests served by the RPC server. |
+| `TotalRequestsPerSeconds` | Total num of requests per second served by the RPC server. |
RetryCache/NameNodeRetryCache
-----------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/fsdatainputstream.md b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/fsdatainputstream.md
index 090696483be34..197b999c81f66 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/fsdatainputstream.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/fsdatainputstream.md
@@ -443,6 +443,45 @@ The semantics of this are exactly equivalent to
That is, the buffer is filled entirely with the contents of the input source
from position `position`
+### `default void readVectored(List extends FileRange> ranges, IntFunction allocate)`
+
+Read fully data for a list of ranges asynchronously. The default implementation
+iterates through the ranges, tries to coalesce the ranges based on values of
+`minSeekForVectorReads` and `maxReadSizeForVectorReads` and then read each merged
+ranges synchronously, but the intent is sub classes can implement efficient
+implementation. Reading in both direct and heap byte buffers are supported.
+Also, clients are encouraged to use `WeakReferencedElasticByteBufferPool` for
+allocating buffers such that even direct buffers are garbage collected when
+they are no longer referenced.
+
+Note: Don't use direct buffers for reading from ChecksumFileSystem as that may
+lead to memory fragmentation explained in HADOOP-18296.
+
+
+#### Preconditions
+
+For each requested range:
+
+ range.getOffset >= 0 else raise IllegalArgumentException
+ range.getLength >= 0 else raise EOFException
+
+#### Postconditions
+
+For each requested range:
+
+ range.getData() returns CompletableFuture which will have data
+ from range.getOffset to range.getLength.
+
+### `minSeekForVectorReads()`
+
+The smallest reasonable seek. Two ranges won't be merged together if the difference between
+end of first and start of next range is more than this value.
+
+### `maxReadSizeForVectorReads()`
+
+Maximum number of bytes which can be read in one go after merging the ranges.
+Two ranges won't be merged if the combined data to be read is more than this value.
+Essentially setting this to 0 will disable the merging of ranges.
## Consistency
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/fsdatainputstreambuilder.md b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/fsdatainputstreambuilder.md
index db630e05c22d4..16a14150ef949 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/fsdatainputstreambuilder.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/fsdatainputstreambuilder.md
@@ -230,7 +230,7 @@ Note: some operations on the input stream, such as `seek()` may not attempt any
at all. Such operations MAY NOT raise exceotions when interacting with
nonexistent/unreadable files.
-## Standard `openFile()` options since Hadoop 3.3.3
+## Standard `openFile()` options since hadoop branch-3.3
These are options which `FileSystem` and `FileContext` implementation
MUST recognise and MAY support by changing the behavior of
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/fsdataoutputstreambuilder.md b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/fsdataoutputstreambuilder.md
index 64dda2df8c63c..59a93c5887a1f 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/fsdataoutputstreambuilder.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/fsdataoutputstreambuilder.md
@@ -26,7 +26,7 @@ create a new file or open an existing file on `FileSystem` for write.
## Invariants
The `FSDataOutputStreamBuilder` interface does not validate parameters
-and modify the state of `FileSystem` until [`build()`](#Builder.build) is
+and modify the state of `FileSystem` until `build()` is
invoked.
## Implementation-agnostic parameters.
@@ -110,7 +110,7 @@ of `FileSystem`.
#### Implementation Notes
The concrete `FileSystem` and/or `FSDataOutputStreamBuilder` implementation
-MUST verify that implementation-agnostic parameters (i.e., "syncable") or
+MUST verify that implementation-agnostic parameters (i.e., "syncable`) or
implementation-specific parameters (i.e., "foofs:cache")
are supported. `FileSystem` will satisfy optional parameters (via `opt(key, ...)`)
on best effort. If the mandatory parameters (via `must(key, ...)`) can not be satisfied
@@ -182,3 +182,58 @@ see `FileSystem#create(path, ...)` and `FileSystem#append()`.
result = FSDataOutputStream
The result is `FSDataOutputStream` to be used to write data to filesystem.
+
+
+## S3A-specific options
+
+Here are the custom options which the S3A Connector supports.
+
+| Name | Type | Meaning |
+|-----------------------------|-----------|----------------------------------------|
+| `fs.s3a.create.performance` | `boolean` | create a file with maximum performance |
+| `fs.s3a.create.header` | `string` | prefix for user supplied headers |
+
+### `fs.s3a.create.performance`
+
+Prioritize file creation performance over safety checks for filesystem consistency.
+
+This:
+1. Skips the `LIST` call which makes sure a file is being created over a directory.
+ Risk: a file is created over a directory.
+1. Ignores the overwrite flag.
+1. Never issues a `DELETE` call to delete parent directory markers.
+
+It is possible to probe an S3A Filesystem instance for this capability through
+the `hasPathCapability(path, "fs.s3a.create.performance")` check.
+
+Creating files with this option over existing directories is likely
+to make S3A filesystem clients behave inconsistently.
+
+Operations optimized for directories (e.g. listing calls) are likely
+to see the directory tree not the file; operations optimized for
+files (`getFileStatus()`, `isFile()`) more likely to see the file.
+The exact form of the inconsistencies, and which operations/parameters
+trigger this are undefined and may change between even minor releases.
+
+Using this option is the equivalent of pressing and holding down the
+"Electronic Stability Control"
+button on a rear-wheel drive car for five seconds: the safety checks are off.
+Things wil be faster if the driver knew what they were doing.
+If they didn't, the fact they had held the button down will
+be used as evidence at the inquest as proof that they made a
+conscious decision to choose speed over safety and
+that the outcome was their own fault.
+
+Accordingly: *Use if and only if you are confident that the conditions are met.*
+
+### `fs.s3a.create.header` User-supplied header support
+
+Options with the prefix `fs.s3a.create.header.` will be added to to the
+S3 object metadata as "user defined metadata".
+This metadata is visible to all applications. It can also be retrieved through the
+FileSystem/FileContext `listXAttrs()` and `getXAttrs()` API calls with the prefix `header.`
+
+When an object is renamed, the metadata is propagated the copy created.
+
+It is possible to probe an S3A Filesystem instance for this capability through
+the `hasPathCapability(path, "fs.s3a.create.header")` check.
\ No newline at end of file
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/cli/util/CommandExecutor.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/cli/util/CommandExecutor.java
index 5ef129cdc87ed..2ccfcfebb27e3 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/cli/util/CommandExecutor.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/cli/util/CommandExecutor.java
@@ -23,7 +23,6 @@
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.PrintStream;
-import java.util.StringTokenizer;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.ArrayList;
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
index 9fcf4a5eb55a2..c31229ba9fcf1 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
@@ -21,21 +21,17 @@
import java.util.HashSet;
import org.apache.hadoop.crypto.key.kms.KMSClientProvider;
-import org.apache.hadoop.fs.AbstractFileSystem;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.ftp.FtpConfigKeys;
import org.apache.hadoop.fs.local.LocalConfigKeys;
import org.apache.hadoop.ha.SshFenceByTcpPort;
import org.apache.hadoop.ha.ZKFailoverController;
-import org.apache.hadoop.http.HttpServer2;
import org.apache.hadoop.io.erasurecode.CodecUtil;
-import org.apache.hadoop.io.nativeio.NativeIO;
import org.apache.hadoop.security.CompositeGroupsMapping;
import org.apache.hadoop.security.HttpCrossOriginFilterInitializer;
import org.apache.hadoop.security.LdapGroupsMapping;
import org.apache.hadoop.security.RuleBasedLdapGroupsMapping;
-import org.apache.hadoop.security.http.CrossOriginFilter;
import org.apache.hadoop.security.ssl.SSLFactory;
/**
@@ -80,9 +76,9 @@ public void initializeMemberVariables() {
};
// Initialize used variables
- xmlPropsToSkipCompare = new HashSet();
- xmlPrefixToSkipCompare = new HashSet();
- configurationPropsToSkipCompare = new HashSet();
+ xmlPropsToSkipCompare = new HashSet<>();
+ xmlPrefixToSkipCompare = new HashSet<>();
+ configurationPropsToSkipCompare = new HashSet<>();
// Set error modes
errorIfMissingConfigProps = true;
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationDeprecation.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationDeprecation.java
index 2c0d6025f2688..83837862ac47e 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationDeprecation.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationDeprecation.java
@@ -30,7 +30,6 @@
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
-import java.util.Random;
import java.util.concurrent.Callable;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.Future;
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreamsForLocalFS.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreamsForLocalFS.java
index 8453889b53a5a..072baf188de72 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreamsForLocalFS.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreamsForLocalFS.java
@@ -25,7 +25,6 @@
import java.io.OutputStream;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.LocalFileSystem;
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestValueQueue.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestValueQueue.java
index 5da973c6a761d..4805fca1d49f4 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestValueQueue.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestValueQueue.java
@@ -18,6 +18,8 @@
package org.apache.hadoop.crypto.key;
import java.io.IOException;
+import java.util.Arrays;
+import java.util.HashSet;
import java.util.Queue;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.TimeoutException;
@@ -32,7 +34,6 @@
import org.junit.Assert;
import org.junit.Test;
-import org.apache.hadoop.util.Sets;
public class TestValueQueue {
Logger LOG = LoggerFactory.getLogger(TestValueQueue.class);
@@ -103,10 +104,10 @@ public void testWarmUp() throws Exception {
Assert.assertEquals(5, fillInfos[0].num);
Assert.assertEquals(5, fillInfos[1].num);
Assert.assertEquals(5, fillInfos[2].num);
- Assert.assertEquals(Sets.newHashSet("k1", "k2", "k3"),
- Sets.newHashSet(fillInfos[0].key,
+ Assert.assertEquals(new HashSet<>(Arrays.asList("k1", "k2", "k3")),
+ new HashSet<>(Arrays.asList(fillInfos[0].key,
fillInfos[1].key,
- fillInfos[2].key));
+ fillInfos[2].key)));
vq.shutdown();
}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java
index 886297b745c0f..3bc96c3e2fce0 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java
@@ -39,6 +39,8 @@
import java.security.NoSuchAlgorithmException;
import java.security.PrivilegedExceptionAction;
import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashSet;
import java.util.List;
import java.util.concurrent.TimeUnit;
@@ -65,7 +67,6 @@
import org.junit.rules.Timeout;
import org.mockito.Mockito;
-import org.apache.hadoop.util.Sets;
public class TestLoadBalancingKMSClientProvider {
@@ -86,8 +87,8 @@ public void testCreation() throws Exception {
KMSClientProvider[] providers =
((LoadBalancingKMSClientProvider) kp).getProviders();
assertEquals(1, providers.length);
- assertEquals(Sets.newHashSet("http://host1:9600/kms/foo/v1/"),
- Sets.newHashSet(providers[0].getKMSUrl()));
+ assertEquals(new HashSet<>(Collections.singleton("http://host1:9600/kms/foo/v1/")),
+ new HashSet<>(Collections.singleton(providers[0].getKMSUrl())));
kp = new KMSClientProvider.Factory().createProvider(new URI(
"kms://http@host1;host2;host3:9600/kms/foo"), conf);
@@ -95,12 +96,12 @@ public void testCreation() throws Exception {
providers =
((LoadBalancingKMSClientProvider) kp).getProviders();
assertEquals(3, providers.length);
- assertEquals(Sets.newHashSet("http://host1:9600/kms/foo/v1/",
+ assertEquals(new HashSet<>(Arrays.asList("http://host1:9600/kms/foo/v1/",
"http://host2:9600/kms/foo/v1/",
- "http://host3:9600/kms/foo/v1/"),
- Sets.newHashSet(providers[0].getKMSUrl(),
+ "http://host3:9600/kms/foo/v1/")),
+ new HashSet<>(Arrays.asList(providers[0].getKMSUrl(),
providers[1].getKMSUrl(),
- providers[2].getKMSUrl()));
+ providers[2].getKMSUrl())));
kp = new KMSClientProvider.Factory().createProvider(new URI(
"kms://http@host1;host2;host3:9600/kms/foo"), conf);
@@ -108,12 +109,12 @@ public void testCreation() throws Exception {
providers =
((LoadBalancingKMSClientProvider) kp).getProviders();
assertEquals(3, providers.length);
- assertEquals(Sets.newHashSet("http://host1:9600/kms/foo/v1/",
+ assertEquals(new HashSet<>(Arrays.asList("http://host1:9600/kms/foo/v1/",
"http://host2:9600/kms/foo/v1/",
- "http://host3:9600/kms/foo/v1/"),
- Sets.newHashSet(providers[0].getKMSUrl(),
+ "http://host3:9600/kms/foo/v1/")),
+ new HashSet<>(Arrays.asList(providers[0].getKMSUrl(),
providers[1].getKMSUrl(),
- providers[2].getKMSUrl()));
+ providers[2].getKMSUrl())));
}
@Test
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/random/TestOsSecureRandom.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/random/TestOsSecureRandom.java
index 2ea45231a13a1..6448a9a2fba73 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/random/TestOsSecureRandom.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/random/TestOsSecureRandom.java
@@ -22,7 +22,7 @@
import org.apache.commons.lang3.SystemUtils;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.util.Shell.ShellCommandExecutor;
+
import org.junit.Assume;
import org.junit.Test;
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestCommandFormat.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestCommandFormat.java
index 4b855c4940440..084c6a0aef83d 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestCommandFormat.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestCommandFormat.java
@@ -43,9 +43,9 @@ public class TestCommandFormat {
@Before
public void setUp() {
- args = new ArrayList();
- expectedOpts = new HashSet();
- expectedArgs = new ArrayList();
+ args = new ArrayList<>();
+ expectedOpts = new HashSet<>();
+ expectedArgs = new ArrayList<>();
}
@Test
@@ -205,6 +205,6 @@ private static List listOf(String ... objects) {
}
private static Set setOf(String ... objects) {
- return new HashSet(listOf(objects));
+ return new HashSet<>(listOf(objects));
}
}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystemBasics.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystemBasics.java
index 6415df6310fc2..471d2458f4f46 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystemBasics.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystemBasics.java
@@ -246,7 +246,7 @@ public void testListLocatedStatus() throws Exception {
// test.har has the following contents:
// dir1/1.txt
// dir1/2.txt
- Set expectedFileNames = new HashSet();
+ Set expectedFileNames = new HashSet<>();
expectedFileNames.add("1.txt");
expectedFileNames.add("2.txt");
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestListFiles.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestListFiles.java
index 44308ea6fc5ea..dce3b956d47ef 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestListFiles.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestListFiles.java
@@ -152,7 +152,7 @@ public void testDirectory() throws IOException {
writeFile(fs, FILE1, FILE_LEN);
writeFile(fs, FILE3, FILE_LEN);
- Set filesToFind = new HashSet();
+ Set filesToFind = new HashSet<>();
filesToFind.add(fs.makeQualified(FILE1));
filesToFind.add(fs.makeQualified(FILE2));
filesToFind.add(fs.makeQualified(FILE3));
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
index 72287782baac6..5b8c10b3fa6f9 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
@@ -747,7 +747,7 @@ public void testTrashEmptier() throws Exception {
Path myPath = new Path(TEST_DIR, "test/mkdirs");
mkdir(fs, myPath);
int fileIndex = 0;
- Set checkpoints = new HashSet();
+ Set checkpoints = new HashSet<>();
while (true) {
// Create a file with a new name
Path myFile = new Path(TEST_DIR, "test/mkdirs/myFile" + fileIndex++);
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestVectoredReadUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestVectoredReadUtils.java
new file mode 100644
index 0000000000000..5d08b02e113d5
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestVectoredReadUtils.java
@@ -0,0 +1,371 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.nio.IntBuffer;
+import java.util.Arrays;
+import java.util.List;
+import java.util.concurrent.CompletableFuture;
+import java.util.function.IntFunction;
+
+import org.assertj.core.api.Assertions;
+import org.junit.Test;
+import org.mockito.ArgumentMatchers;
+import org.mockito.Mockito;
+
+import org.apache.hadoop.fs.impl.CombinedFileRange;
+import org.apache.hadoop.test.HadoopTestBase;
+
+import static org.apache.hadoop.fs.VectoredReadUtils.sortRanges;
+import static org.apache.hadoop.test.MoreAsserts.assertFutureCompletedSuccessfully;
+import static org.apache.hadoop.test.MoreAsserts.assertFutureFailedExceptionally;
+
+/**
+ * Test behavior of {@link VectoredReadUtils}.
+ */
+public class TestVectoredReadUtils extends HadoopTestBase {
+
+ @Test
+ public void testSliceTo() {
+ final int size = 64 * 1024;
+ ByteBuffer buffer = ByteBuffer.allocate(size);
+ // fill the buffer with data
+ IntBuffer intBuffer = buffer.asIntBuffer();
+ for(int i=0; i < size / Integer.BYTES; ++i) {
+ intBuffer.put(i);
+ }
+ // ensure we don't make unnecessary slices
+ ByteBuffer slice = VectoredReadUtils.sliceTo(buffer, 100,
+ FileRange.createFileRange(100, size));
+ Assertions.assertThat(buffer)
+ .describedAs("Slicing on the same offset shouldn't " +
+ "create a new buffer")
+ .isEqualTo(slice);
+
+ // try slicing a range
+ final int offset = 100;
+ final int sliceStart = 1024;
+ final int sliceLength = 16 * 1024;
+ slice = VectoredReadUtils.sliceTo(buffer, offset,
+ FileRange.createFileRange(offset + sliceStart, sliceLength));
+ // make sure they aren't the same, but use the same backing data
+ Assertions.assertThat(buffer)
+ .describedAs("Slicing on new offset should " +
+ "create a new buffer")
+ .isNotEqualTo(slice);
+ Assertions.assertThat(buffer.array())
+ .describedAs("Slicing should use the same underlying " +
+ "data")
+ .isEqualTo(slice.array());
+ // test the contents of the slice
+ intBuffer = slice.asIntBuffer();
+ for(int i=0; i < sliceLength / Integer.BYTES; ++i) {
+ assertEquals("i = " + i, i + sliceStart / Integer.BYTES, intBuffer.get());
+ }
+ }
+
+ @Test
+ public void testRounding() {
+ for(int i=5; i < 10; ++i) {
+ assertEquals("i = "+ i, 5, VectoredReadUtils.roundDown(i, 5));
+ assertEquals("i = "+ i, 10, VectoredReadUtils.roundUp(i+1, 5));
+ }
+ assertEquals("Error while roundDown", 13, VectoredReadUtils.roundDown(13, 1));
+ assertEquals("Error while roundUp", 13, VectoredReadUtils.roundUp(13, 1));
+ }
+
+ @Test
+ public void testMerge() {
+ FileRange base = FileRange.createFileRange(2000, 1000);
+ CombinedFileRange mergeBase = new CombinedFileRange(2000, 3000, base);
+
+ // test when the gap between is too big
+ assertFalse("Large gap ranges shouldn't get merged", mergeBase.merge(5000, 6000,
+ FileRange.createFileRange(5000, 1000), 2000, 4000));
+ assertEquals("Number of ranges in merged range shouldn't increase",
+ 1, mergeBase.getUnderlying().size());
+ assertEquals("post merge offset", 2000, mergeBase.getOffset());
+ assertEquals("post merge length", 1000, mergeBase.getLength());
+
+ // test when the total size gets exceeded
+ assertFalse("Large size ranges shouldn't get merged", mergeBase.merge(5000, 6000,
+ FileRange.createFileRange(5000, 1000), 2001, 3999));
+ assertEquals("Number of ranges in merged range shouldn't increase",
+ 1, mergeBase.getUnderlying().size());
+ assertEquals("post merge offset", 2000, mergeBase.getOffset());
+ assertEquals("post merge length", 1000, mergeBase.getLength());
+
+ // test when the merge works
+ assertTrue("ranges should get merged ", mergeBase.merge(5000, 6000,
+ FileRange.createFileRange(5000, 1000), 2001, 4000));
+ assertEquals("post merge size", 2, mergeBase.getUnderlying().size());
+ assertEquals("post merge offset", 2000, mergeBase.getOffset());
+ assertEquals("post merge length", 4000, mergeBase.getLength());
+
+ // reset the mergeBase and test with a 10:1 reduction
+ mergeBase = new CombinedFileRange(200, 300, base);
+ assertEquals(200, mergeBase.getOffset());
+ assertEquals(100, mergeBase.getLength());
+ assertTrue("ranges should get merged ", mergeBase.merge(500, 600,
+ FileRange.createFileRange(5000, 1000), 201, 400));
+ assertEquals("post merge size", 2, mergeBase.getUnderlying().size());
+ assertEquals("post merge offset", 200, mergeBase.getOffset());
+ assertEquals("post merge length", 400, mergeBase.getLength());
+ }
+
+ @Test
+ public void testSortAndMerge() {
+ List input = Arrays.asList(
+ FileRange.createFileRange(3000, 100),
+ FileRange.createFileRange(2100, 100),
+ FileRange.createFileRange(1000, 100)
+ );
+ assertFalse("Ranges are non disjoint", VectoredReadUtils.isOrderedDisjoint(input, 100, 800));
+ List outputList = VectoredReadUtils.mergeSortedRanges(
+ Arrays.asList(sortRanges(input)), 100, 1001, 2500);
+ Assertions.assertThat(outputList)
+ .describedAs("merged range size")
+ .hasSize(1);
+ CombinedFileRange output = outputList.get(0);
+ Assertions.assertThat(output.getUnderlying())
+ .describedAs("merged range underlying size")
+ .hasSize(3);
+ assertEquals("range[1000,3100)", output.toString());
+ assertTrue("merged output ranges are disjoint",
+ VectoredReadUtils.isOrderedDisjoint(outputList, 100, 800));
+
+ // the minSeek doesn't allow the first two to merge
+ assertFalse("Ranges are non disjoint",
+ VectoredReadUtils.isOrderedDisjoint(input, 100, 1000));
+ outputList = VectoredReadUtils.mergeSortedRanges(Arrays.asList(sortRanges(input)),
+ 100, 1000, 2100);
+ Assertions.assertThat(outputList)
+ .describedAs("merged range size")
+ .hasSize(2);
+ assertEquals("range[1000,1100)", outputList.get(0).toString());
+ assertEquals("range[2100,3100)", outputList.get(1).toString());
+ assertTrue("merged output ranges are disjoint",
+ VectoredReadUtils.isOrderedDisjoint(outputList, 100, 1000));
+
+ // the maxSize doesn't allow the third range to merge
+ assertFalse("Ranges are non disjoint",
+ VectoredReadUtils.isOrderedDisjoint(input, 100, 800));
+ outputList = VectoredReadUtils.mergeSortedRanges(Arrays.asList(sortRanges(input)),
+ 100, 1001, 2099);
+ Assertions.assertThat(outputList)
+ .describedAs("merged range size")
+ .hasSize(2);
+ assertEquals("range[1000,2200)", outputList.get(0).toString());
+ assertEquals("range[3000,3100)", outputList.get(1).toString());
+ assertTrue("merged output ranges are disjoint",
+ VectoredReadUtils.isOrderedDisjoint(outputList, 100, 800));
+
+ // test the round up and round down (the maxSize doesn't allow any merges)
+ assertFalse("Ranges are non disjoint",
+ VectoredReadUtils.isOrderedDisjoint(input, 16, 700));
+ outputList = VectoredReadUtils.mergeSortedRanges(Arrays.asList(sortRanges(input)),
+ 16, 1001, 100);
+ Assertions.assertThat(outputList)
+ .describedAs("merged range size")
+ .hasSize(3);
+ assertEquals("range[992,1104)", outputList.get(0).toString());
+ assertEquals("range[2096,2208)", outputList.get(1).toString());
+ assertEquals("range[2992,3104)", outputList.get(2).toString());
+ assertTrue("merged output ranges are disjoint",
+ VectoredReadUtils.isOrderedDisjoint(outputList, 16, 700));
+ }
+
+ @Test
+ public void testSortAndMergeMoreCases() throws Exception {
+ List input = Arrays.asList(
+ FileRange.createFileRange(3000, 110),
+ FileRange.createFileRange(3000, 100),
+ FileRange.createFileRange(2100, 100),
+ FileRange.createFileRange(1000, 100)
+ );
+ assertFalse("Ranges are non disjoint",
+ VectoredReadUtils.isOrderedDisjoint(input, 100, 800));
+ List outputList = VectoredReadUtils.mergeSortedRanges(
+ Arrays.asList(sortRanges(input)), 1, 1001, 2500);
+ Assertions.assertThat(outputList)
+ .describedAs("merged range size")
+ .hasSize(1);
+ CombinedFileRange output = outputList.get(0);
+ Assertions.assertThat(output.getUnderlying())
+ .describedAs("merged range underlying size")
+ .hasSize(4);
+ assertEquals("range[1000,3110)", output.toString());
+ assertTrue("merged output ranges are disjoint",
+ VectoredReadUtils.isOrderedDisjoint(outputList, 1, 800));
+
+ outputList = VectoredReadUtils.mergeSortedRanges(
+ Arrays.asList(sortRanges(input)), 100, 1001, 2500);
+ Assertions.assertThat(outputList)
+ .describedAs("merged range size")
+ .hasSize(1);
+ output = outputList.get(0);
+ Assertions.assertThat(output.getUnderlying())
+ .describedAs("merged range underlying size")
+ .hasSize(4);
+ assertEquals("range[1000,3200)", output.toString());
+ assertTrue("merged output ranges are disjoint",
+ VectoredReadUtils.isOrderedDisjoint(outputList, 1, 800));
+
+ }
+
+ @Test
+ public void testMaxSizeZeroDisablesMering() throws Exception {
+ List randomRanges = Arrays.asList(
+ FileRange.createFileRange(3000, 110),
+ FileRange.createFileRange(3000, 100),
+ FileRange.createFileRange(2100, 100)
+ );
+ assertEqualRangeCountsAfterMerging(randomRanges, 1, 1, 0);
+ assertEqualRangeCountsAfterMerging(randomRanges, 1, 0, 0);
+ assertEqualRangeCountsAfterMerging(randomRanges, 1, 100, 0);
+ }
+
+ private void assertEqualRangeCountsAfterMerging(List inputRanges,
+ int chunkSize,
+ int minimumSeek,
+ int maxSize) {
+ List combinedFileRanges = VectoredReadUtils
+ .mergeSortedRanges(inputRanges, chunkSize, minimumSeek, maxSize);
+ Assertions.assertThat(combinedFileRanges)
+ .describedAs("Mismatch in number of ranges post merging")
+ .hasSize(inputRanges.size());
+ }
+
+ interface Stream extends PositionedReadable, ByteBufferPositionedReadable {
+ // nothing
+ }
+
+ static void fillBuffer(ByteBuffer buffer) {
+ byte b = 0;
+ while (buffer.remaining() > 0) {
+ buffer.put(b++);
+ }
+ }
+
+ @Test
+ public void testReadRangeFromByteBufferPositionedReadable() throws Exception {
+ Stream stream = Mockito.mock(Stream.class);
+ Mockito.doAnswer(invocation -> {
+ fillBuffer(invocation.getArgument(1));
+ return null;
+ }).when(stream).readFully(ArgumentMatchers.anyLong(),
+ ArgumentMatchers.any(ByteBuffer.class));
+ CompletableFuture result =
+ VectoredReadUtils.readRangeFrom(stream, FileRange.createFileRange(1000, 100),
+ ByteBuffer::allocate);
+ assertFutureCompletedSuccessfully(result);
+ ByteBuffer buffer = result.get();
+ assertEquals("Size of result buffer", 100, buffer.remaining());
+ byte b = 0;
+ while (buffer.remaining() > 0) {
+ assertEquals("remain = " + buffer.remaining(), b++, buffer.get());
+ }
+
+ // test an IOException
+ Mockito.reset(stream);
+ Mockito.doThrow(new IOException("foo"))
+ .when(stream).readFully(ArgumentMatchers.anyLong(),
+ ArgumentMatchers.any(ByteBuffer.class));
+ result =
+ VectoredReadUtils.readRangeFrom(stream, FileRange.createFileRange(1000, 100),
+ ByteBuffer::allocate);
+ assertFutureFailedExceptionally(result);
+ }
+
+ static void runReadRangeFromPositionedReadable(IntFunction allocate)
+ throws Exception {
+ PositionedReadable stream = Mockito.mock(PositionedReadable.class);
+ Mockito.doAnswer(invocation -> {
+ byte b=0;
+ byte[] buffer = invocation.getArgument(1);
+ for(int i=0; i < buffer.length; ++i) {
+ buffer[i] = b++;
+ }
+ return null;
+ }).when(stream).readFully(ArgumentMatchers.anyLong(),
+ ArgumentMatchers.any(), ArgumentMatchers.anyInt(),
+ ArgumentMatchers.anyInt());
+ CompletableFuture result =
+ VectoredReadUtils.readRangeFrom(stream, FileRange.createFileRange(1000, 100),
+ allocate);
+ assertFutureCompletedSuccessfully(result);
+ ByteBuffer buffer = result.get();
+ assertEquals("Size of result buffer", 100, buffer.remaining());
+ byte b = 0;
+ while (buffer.remaining() > 0) {
+ assertEquals("remain = " + buffer.remaining(), b++, buffer.get());
+ }
+
+ // test an IOException
+ Mockito.reset(stream);
+ Mockito.doThrow(new IOException("foo"))
+ .when(stream).readFully(ArgumentMatchers.anyLong(),
+ ArgumentMatchers.any(), ArgumentMatchers.anyInt(),
+ ArgumentMatchers.anyInt());
+ result =
+ VectoredReadUtils.readRangeFrom(stream, FileRange.createFileRange(1000, 100),
+ ByteBuffer::allocate);
+ assertFutureFailedExceptionally(result);
+ }
+
+ @Test
+ public void testReadRangeArray() throws Exception {
+ runReadRangeFromPositionedReadable(ByteBuffer::allocate);
+ }
+
+ @Test
+ public void testReadRangeDirect() throws Exception {
+ runReadRangeFromPositionedReadable(ByteBuffer::allocateDirect);
+ }
+
+ static void validateBuffer(String message, ByteBuffer buffer, int start) {
+ byte expected = (byte) start;
+ while (buffer.remaining() > 0) {
+ assertEquals(message + " remain: " + buffer.remaining(), expected++,
+ buffer.get());
+ }
+ }
+
+ @Test
+ public void testReadVectored() throws Exception {
+ List input = Arrays.asList(FileRange.createFileRange(0, 100),
+ FileRange.createFileRange(100_000, 100),
+ FileRange.createFileRange(200_000, 100));
+ Stream stream = Mockito.mock(Stream.class);
+ Mockito.doAnswer(invocation -> {
+ fillBuffer(invocation.getArgument(1));
+ return null;
+ }).when(stream).readFully(ArgumentMatchers.anyLong(),
+ ArgumentMatchers.any(ByteBuffer.class));
+ // should not merge the ranges
+ VectoredReadUtils.readVectored(stream, input, ByteBuffer::allocate);
+ Mockito.verify(stream, Mockito.times(3))
+ .readFully(ArgumentMatchers.anyLong(), ArgumentMatchers.any(ByteBuffer.class));
+ for(int b=0; b < input.size(); ++b) {
+ validateBuffer("buffer " + b, input.get(b).getData().get(), 0);
+ }
+ }
+}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractDeleteTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractDeleteTest.java
index 08df1d4d883a6..605ea45649a16 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractDeleteTest.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractDeleteTest.java
@@ -19,7 +19,7 @@
package org.apache.hadoop.fs.contract;
import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.FileSystem;
+
import org.junit.Test;
import java.io.IOException;
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractVectoredReadTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractVectoredReadTest.java
new file mode 100644
index 0000000000000..77bcc496ff4a2
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractVectoredReadTest.java
@@ -0,0 +1,406 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.contract;
+
+import java.io.EOFException;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ExecutionException;
+import java.util.function.IntFunction;
+
+import org.assertj.core.api.Assertions;
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FileRange;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.StreamCapabilities;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.impl.FutureIOSupport;
+import org.apache.hadoop.io.WeakReferencedElasticByteBufferPool;
+import org.apache.hadoop.test.LambdaTestUtils;
+
+import static org.apache.hadoop.fs.contract.ContractTestUtils.assertCapabilities;
+import static org.apache.hadoop.fs.contract.ContractTestUtils.assertDatasetEquals;
+import static org.apache.hadoop.fs.contract.ContractTestUtils.createFile;
+import static org.apache.hadoop.fs.contract.ContractTestUtils.returnBuffersToPoolPostRead;
+import static org.apache.hadoop.fs.contract.ContractTestUtils.validateVectoredReadResult;
+
+@RunWith(Parameterized.class)
+public abstract class AbstractContractVectoredReadTest extends AbstractFSContractTestBase {
+
+ private static final Logger LOG = LoggerFactory.getLogger(AbstractContractVectoredReadTest.class);
+
+ public static final int DATASET_LEN = 64 * 1024;
+ protected static final byte[] DATASET = ContractTestUtils.dataset(DATASET_LEN, 'a', 32);
+ protected static final String VECTORED_READ_FILE_NAME = "vectored_file.txt";
+
+ private final IntFunction allocate;
+
+ private final WeakReferencedElasticByteBufferPool pool =
+ new WeakReferencedElasticByteBufferPool();
+
+ private final String bufferType;
+
+ @Parameterized.Parameters(name = "Buffer type : {0}")
+ public static List params() {
+ return Arrays.asList("direct", "array");
+ }
+
+ public AbstractContractVectoredReadTest(String bufferType) {
+ this.bufferType = bufferType;
+ this.allocate = value -> {
+ boolean isDirect = !"array".equals(bufferType);
+ return pool.getBuffer(isDirect, value);
+ };
+ }
+
+ public IntFunction getAllocate() {
+ return allocate;
+ }
+
+ @Override
+ public void setup() throws Exception {
+ super.setup();
+ Path path = path(VECTORED_READ_FILE_NAME);
+ FileSystem fs = getFileSystem();
+ createFile(fs, path, true, DATASET);
+ }
+
+ @Override
+ public void teardown() throws Exception {
+ super.teardown();
+ pool.release();
+ }
+
+ @Test
+ public void testVectoredReadCapability() throws Exception {
+ FileSystem fs = getFileSystem();
+ String[] vectoredReadCapability = new String[]{StreamCapabilities.VECTOREDIO};
+ try (FSDataInputStream in = fs.open(path(VECTORED_READ_FILE_NAME))) {
+ assertCapabilities(in, vectoredReadCapability, null);
+ }
+ }
+
+ @Test
+ public void testVectoredReadMultipleRanges() throws Exception {
+ FileSystem fs = getFileSystem();
+ List fileRanges = new ArrayList<>();
+ for (int i = 0; i < 10; i++) {
+ FileRange fileRange = FileRange.createFileRange(i * 100, 100);
+ fileRanges.add(fileRange);
+ }
+ try (FSDataInputStream in = fs.open(path(VECTORED_READ_FILE_NAME))) {
+ in.readVectored(fileRanges, allocate);
+ CompletableFuture>[] completableFutures = new CompletableFuture>[fileRanges.size()];
+ int i = 0;
+ for (FileRange res : fileRanges) {
+ completableFutures[i++] = res.getData();
+ }
+ CompletableFuture combinedFuture = CompletableFuture.allOf(completableFutures);
+ combinedFuture.get();
+
+ validateVectoredReadResult(fileRanges, DATASET);
+ returnBuffersToPoolPostRead(fileRanges, pool);
+ }
+ }
+
+ @Test
+ public void testVectoredReadAndReadFully() throws Exception {
+ FileSystem fs = getFileSystem();
+ List fileRanges = new ArrayList<>();
+ fileRanges.add(FileRange.createFileRange(100, 100));
+ try (FSDataInputStream in = fs.open(path(VECTORED_READ_FILE_NAME))) {
+ in.readVectored(fileRanges, allocate);
+ byte[] readFullRes = new byte[100];
+ in.readFully(100, readFullRes);
+ ByteBuffer vecRes = FutureIOSupport.awaitFuture(fileRanges.get(0).getData());
+ Assertions.assertThat(vecRes)
+ .describedAs("Result from vectored read and readFully must match")
+ .isEqualByComparingTo(ByteBuffer.wrap(readFullRes));
+ returnBuffersToPoolPostRead(fileRanges, pool);
+ }
+ }
+
+ /**
+ * As the minimum seek value is 4*1024,none of the below ranges
+ * will get merged.
+ */
+ @Test
+ public void testDisjointRanges() throws Exception {
+ FileSystem fs = getFileSystem();
+ List fileRanges = new ArrayList<>();
+ fileRanges.add(FileRange.createFileRange(0, 100));
+ fileRanges.add(FileRange.createFileRange(4_000 + 101, 100));
+ fileRanges.add(FileRange.createFileRange(16_000 + 101, 100));
+ try (FSDataInputStream in = fs.open(path(VECTORED_READ_FILE_NAME))) {
+ in.readVectored(fileRanges, allocate);
+ validateVectoredReadResult(fileRanges, DATASET);
+ returnBuffersToPoolPostRead(fileRanges, pool);
+ }
+ }
+
+ /**
+ * As the minimum seek value is 4*1024, all the below ranges
+ * will get merged into one.
+ */
+ @Test
+ public void testAllRangesMergedIntoOne() throws Exception {
+ FileSystem fs = getFileSystem();
+ List fileRanges = new ArrayList<>();
+ fileRanges.add(FileRange.createFileRange(0, 100));
+ fileRanges.add(FileRange.createFileRange(4_000 - 101, 100));
+ fileRanges.add(FileRange.createFileRange(8_000 - 101, 100));
+ try (FSDataInputStream in = fs.open(path(VECTORED_READ_FILE_NAME))) {
+ in.readVectored(fileRanges, allocate);
+ validateVectoredReadResult(fileRanges, DATASET);
+ returnBuffersToPoolPostRead(fileRanges, pool);
+ }
+ }
+
+ /**
+ * As the minimum seek value is 4*1024, the first three ranges will be
+ * merged into and other two will remain as it is.
+ */
+ @Test
+ public void testSomeRangesMergedSomeUnmerged() throws Exception {
+ FileSystem fs = getFileSystem();
+ List fileRanges = new ArrayList<>();
+ fileRanges.add(FileRange.createFileRange(8 * 1024, 100));
+ fileRanges.add(FileRange.createFileRange(14 * 1024, 100));
+ fileRanges.add(FileRange.createFileRange(10 * 1024, 100));
+ fileRanges.add(FileRange.createFileRange(2 * 1024 - 101, 100));
+ fileRanges.add(FileRange.createFileRange(40 * 1024, 1024));
+ FileStatus fileStatus = fs.getFileStatus(path(VECTORED_READ_FILE_NAME));
+ CompletableFuture builder =
+ fs.openFile(path(VECTORED_READ_FILE_NAME))
+ .withFileStatus(fileStatus)
+ .build();
+ try (FSDataInputStream in = builder.get()) {
+ in.readVectored(fileRanges, allocate);
+ validateVectoredReadResult(fileRanges, DATASET);
+ returnBuffersToPoolPostRead(fileRanges, pool);
+ }
+ }
+
+ @Test
+ public void testOverlappingRanges() throws Exception {
+ FileSystem fs = getFileSystem();
+ List fileRanges = getSampleOverlappingRanges();
+ FileStatus fileStatus = fs.getFileStatus(path(VECTORED_READ_FILE_NAME));
+ CompletableFuture builder =
+ fs.openFile(path(VECTORED_READ_FILE_NAME))
+ .withFileStatus(fileStatus)
+ .build();
+ try (FSDataInputStream in = builder.get()) {
+ in.readVectored(fileRanges, allocate);
+ validateVectoredReadResult(fileRanges, DATASET);
+ returnBuffersToPoolPostRead(fileRanges, pool);
+ }
+ }
+
+ @Test
+ public void testSameRanges() throws Exception {
+ // Same ranges are special case of overlapping only.
+ FileSystem fs = getFileSystem();
+ List fileRanges = getSampleSameRanges();
+ CompletableFuture builder =
+ fs.openFile(path(VECTORED_READ_FILE_NAME))
+ .build();
+ try (FSDataInputStream in = builder.get()) {
+ in.readVectored(fileRanges, allocate);
+ validateVectoredReadResult(fileRanges, DATASET);
+ returnBuffersToPoolPostRead(fileRanges, pool);
+ }
+ }
+
+ @Test
+ public void testSomeRandomNonOverlappingRanges() throws Exception {
+ FileSystem fs = getFileSystem();
+ List fileRanges = new ArrayList<>();
+ fileRanges.add(FileRange.createFileRange(500, 100));
+ fileRanges.add(FileRange.createFileRange(1000, 200));
+ fileRanges.add(FileRange.createFileRange(50, 10));
+ fileRanges.add(FileRange.createFileRange(10, 5));
+ try (FSDataInputStream in = fs.open(path(VECTORED_READ_FILE_NAME))) {
+ in.readVectored(fileRanges, allocate);
+ validateVectoredReadResult(fileRanges, DATASET);
+ returnBuffersToPoolPostRead(fileRanges, pool);
+ }
+ }
+
+ @Test
+ public void testConsecutiveRanges() throws Exception {
+ FileSystem fs = getFileSystem();
+ List fileRanges = new ArrayList<>();
+ fileRanges.add(FileRange.createFileRange(500, 100));
+ fileRanges.add(FileRange.createFileRange(600, 200));
+ fileRanges.add(FileRange.createFileRange(800, 100));
+ try (FSDataInputStream in = fs.open(path(VECTORED_READ_FILE_NAME))) {
+ in.readVectored(fileRanges, allocate);
+ validateVectoredReadResult(fileRanges, DATASET);
+ returnBuffersToPoolPostRead(fileRanges, pool);
+ }
+ }
+
+ @Test
+ public void testEOFRanges() throws Exception {
+ FileSystem fs = getFileSystem();
+ List fileRanges = new ArrayList<>();
+ fileRanges.add(FileRange.createFileRange(DATASET_LEN, 100));
+ try (FSDataInputStream in = fs.open(path(VECTORED_READ_FILE_NAME))) {
+ in.readVectored(fileRanges, allocate);
+ for (FileRange res : fileRanges) {
+ CompletableFuture data = res.getData();
+ try {
+ ByteBuffer buffer = data.get();
+ // Shouldn't reach here.
+ Assert.fail("EOFException must be thrown while reading EOF");
+ } catch (ExecutionException ex) {
+ // ignore as expected.
+ } catch (Exception ex) {
+ LOG.error("Exception while running vectored read ", ex);
+ Assert.fail("Exception while running vectored read " + ex);
+ }
+ }
+ }
+ }
+
+ @Test
+ public void testNegativeLengthRange() throws Exception {
+ FileSystem fs = getFileSystem();
+ List fileRanges = new ArrayList<>();
+ fileRanges.add(FileRange.createFileRange(0, -50));
+ verifyExceptionalVectoredRead(fs, fileRanges, IllegalArgumentException.class);
+ }
+
+ @Test
+ public void testNegativeOffsetRange() throws Exception {
+ FileSystem fs = getFileSystem();
+ List fileRanges = new ArrayList<>();
+ fileRanges.add(FileRange.createFileRange(-1, 50));
+ verifyExceptionalVectoredRead(fs, fileRanges, EOFException.class);
+ }
+
+ @Test
+ public void testNormalReadAfterVectoredRead() throws Exception {
+ FileSystem fs = getFileSystem();
+ List fileRanges = createSampleNonOverlappingRanges();
+ try (FSDataInputStream in = fs.open(path(VECTORED_READ_FILE_NAME))) {
+ in.readVectored(fileRanges, allocate);
+ // read starting 200 bytes
+ byte[] res = new byte[200];
+ in.read(res, 0, 200);
+ ByteBuffer buffer = ByteBuffer.wrap(res);
+ assertDatasetEquals(0, "normal_read", buffer, 200, DATASET);
+ Assertions.assertThat(in.getPos())
+ .describedAs("Vectored read shouldn't change file pointer.")
+ .isEqualTo(200);
+ validateVectoredReadResult(fileRanges, DATASET);
+ returnBuffersToPoolPostRead(fileRanges, pool);
+ }
+ }
+
+ @Test
+ public void testVectoredReadAfterNormalRead() throws Exception {
+ FileSystem fs = getFileSystem();
+ List fileRanges = createSampleNonOverlappingRanges();
+ try (FSDataInputStream in = fs.open(path(VECTORED_READ_FILE_NAME))) {
+ // read starting 200 bytes
+ byte[] res = new byte[200];
+ in.read(res, 0, 200);
+ ByteBuffer buffer = ByteBuffer.wrap(res);
+ assertDatasetEquals(0, "normal_read", buffer, 200, DATASET);
+ Assertions.assertThat(in.getPos())
+ .describedAs("Vectored read shouldn't change file pointer.")
+ .isEqualTo(200);
+ in.readVectored(fileRanges, allocate);
+ validateVectoredReadResult(fileRanges, DATASET);
+ returnBuffersToPoolPostRead(fileRanges, pool);
+ }
+ }
+
+ @Test
+ public void testMultipleVectoredReads() throws Exception {
+ FileSystem fs = getFileSystem();
+ List fileRanges1 = createSampleNonOverlappingRanges();
+ List fileRanges2 = createSampleNonOverlappingRanges();
+ try (FSDataInputStream in = fs.open(path(VECTORED_READ_FILE_NAME))) {
+ in.readVectored(fileRanges1, allocate);
+ in.readVectored(fileRanges2, allocate);
+ validateVectoredReadResult(fileRanges2, DATASET);
+ validateVectoredReadResult(fileRanges1, DATASET);
+ returnBuffersToPoolPostRead(fileRanges1, pool);
+ returnBuffersToPoolPostRead(fileRanges2, pool);
+ }
+ }
+
+ protected List createSampleNonOverlappingRanges() {
+ List fileRanges = new ArrayList<>();
+ fileRanges.add(FileRange.createFileRange(0, 100));
+ fileRanges.add(FileRange.createFileRange(110, 50));
+ return fileRanges;
+ }
+
+ protected List getSampleSameRanges() {
+ List fileRanges = new ArrayList<>();
+ fileRanges.add(FileRange.createFileRange(8_000, 1000));
+ fileRanges.add(FileRange.createFileRange(8_000, 1000));
+ fileRanges.add(FileRange.createFileRange(8_000, 1000));
+ return fileRanges;
+ }
+
+ protected List getSampleOverlappingRanges() {
+ List fileRanges = new ArrayList<>();
+ fileRanges.add(FileRange.createFileRange(100, 500));
+ fileRanges.add(FileRange.createFileRange(400, 500));
+ return fileRanges;
+ }
+
+ /**
+ * Validate that exceptions must be thrown during a vectored
+ * read operation with specific input ranges.
+ * @param fs FileSystem instance.
+ * @param fileRanges input file ranges.
+ * @param clazz type of exception expected.
+ * @throws Exception any other IOE.
+ */
+ protected void verifyExceptionalVectoredRead(
+ FileSystem fs,
+ List fileRanges,
+ Class clazz) throws Exception {
+
+ CompletableFuture builder =
+ fs.openFile(path(VECTORED_READ_FILE_NAME))
+ .build();
+ try (FSDataInputStream in = builder.get()) {
+ LambdaTestUtils.intercept(clazz,
+ () -> in.readVectored(fileRanges, allocate));
+ }
+ }
+}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java
index eb56d957d9a1a..b61abddd43426 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java
@@ -21,6 +21,7 @@
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileContext;
+import org.apache.hadoop.fs.FileRange;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocatedFileStatus;
@@ -28,7 +29,11 @@
import org.apache.hadoop.fs.PathCapabilities;
import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.fs.StreamCapabilities;
+import org.apache.hadoop.io.ByteBufferPool;
import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.util.functional.RemoteIterators;
+import org.apache.hadoop.util.functional.FutureIO;
+
import org.junit.Assert;
import org.junit.AssumptionViolatedException;
import org.slf4j.Logger;
@@ -39,6 +44,7 @@
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
+import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
@@ -49,6 +55,9 @@
import java.util.Properties;
import java.util.Set;
import java.util.UUID;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
@@ -68,6 +77,11 @@ public class ContractTestUtils extends Assert {
public static final String IO_CHUNK_MODULUS_SIZE = "io.chunk.modulus.size";
public static final int DEFAULT_IO_CHUNK_MODULUS_SIZE = 128;
+ /**
+ * Timeout in seconds for vectored read operation in tests : {@value}.
+ */
+ public static final int VECTORED_READ_OPERATION_TEST_TIMEOUT_SECONDS = 5 * 60;
+
/**
* Assert that a property in the property set matches the expected value.
* @param props property set
@@ -1095,6 +1109,78 @@ public static void validateFileContent(byte[] concat, byte[][] bytes) {
mismatch);
}
+ /**
+ * Utility to validate vectored read results.
+ * @param fileRanges input ranges.
+ * @param originalData original data.
+ * @throws IOException any ioe.
+ */
+ public static void validateVectoredReadResult(List fileRanges,
+ byte[] originalData)
+ throws IOException, TimeoutException {
+ CompletableFuture>[] completableFutures = new CompletableFuture>[fileRanges.size()];
+ int i = 0;
+ for (FileRange res : fileRanges) {
+ completableFutures[i++] = res.getData();
+ }
+ CompletableFuture combinedFuture = CompletableFuture.allOf(completableFutures);
+ FutureIO.awaitFuture(combinedFuture,
+ VECTORED_READ_OPERATION_TEST_TIMEOUT_SECONDS,
+ TimeUnit.SECONDS);
+
+ for (FileRange res : fileRanges) {
+ CompletableFuture data = res.getData();
+ ByteBuffer buffer = FutureIO.awaitFuture(data,
+ VECTORED_READ_OPERATION_TEST_TIMEOUT_SECONDS,
+ TimeUnit.SECONDS);
+ assertDatasetEquals((int) res.getOffset(), "vecRead",
+ buffer, res.getLength(), originalData);
+ }
+ }
+
+ /**
+ * Utility to return buffers back to the pool once all
+ * data has been read for each file range.
+ * @param fileRanges list of file range.
+ * @param pool buffer pool.
+ * @throws IOException any IOE
+ * @throws TimeoutException ideally this should never occur.
+ */
+ public static void returnBuffersToPoolPostRead(List fileRanges,
+ ByteBufferPool pool)
+ throws IOException, TimeoutException {
+ for (FileRange range : fileRanges) {
+ ByteBuffer buffer = FutureIO.awaitFuture(range.getData(),
+ VECTORED_READ_OPERATION_TEST_TIMEOUT_SECONDS,
+ TimeUnit.SECONDS);
+ pool.putBuffer(buffer);
+ }
+ }
+
+
+ /**
+ * Assert that the data read matches the dataset at the given offset.
+ * This helps verify that the seek process is moving the read pointer
+ * to the correct location in the file.
+ * @param readOffset the offset in the file where the read began.
+ * @param operation operation name for the assertion.
+ * @param data data read in.
+ * @param length length of data to check.
+ * @param originalData original data.
+ */
+ public static void assertDatasetEquals(
+ final int readOffset,
+ final String operation,
+ final ByteBuffer data,
+ int length, byte[] originalData) {
+ for (int i = 0; i < length; i++) {
+ int o = readOffset + i;
+ assertEquals(operation + " with read offset " + readOffset
+ + ": data[" + i + "] != DATASET[" + o + "]",
+ originalData[o], data.get());
+ }
+ }
+
/**
* Receives test data from the given input file and checks the size of the
* data as well as the pattern inside the received data.
@@ -1446,11 +1532,7 @@ public static TreeScanResults treeWalk(FileSystem fs, Path path)
*/
public static List toList(
RemoteIterator iterator) throws IOException {
- ArrayList list = new ArrayList<>();
- while (iterator.hasNext()) {
- list.add(iterator.next());
- }
- return list;
+ return RemoteIterators.toList(iterator);
}
/**
@@ -1464,11 +1546,7 @@ public static List toList(
*/
public static List iteratorToList(
RemoteIterator iterator) throws IOException {
- List list = new ArrayList<>();
- while (iterator.hasNext()) {
- list.add(iterator.next());
- }
- return list;
+ return RemoteIterators.toList(iterator);
}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ftp/FTPContract.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ftp/FTPContract.java
index 1efd7fc4e95d4..62648ec58bcc7 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ftp/FTPContract.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ftp/FTPContract.java
@@ -22,7 +22,6 @@
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.contract.AbstractBondedFSContract;
-import org.junit.Assert;
import java.net.URI;
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/localfs/TestLocalFSContractVectoredRead.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/localfs/TestLocalFSContractVectoredRead.java
new file mode 100644
index 0000000000000..5d6ca3f8f0c90
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/localfs/TestLocalFSContractVectoredRead.java
@@ -0,0 +1,86 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.contract.localfs;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.CompletableFuture;
+
+import org.assertj.core.api.Assertions;
+import org.junit.Test;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.ChecksumException;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileRange;
+import org.apache.hadoop.fs.LocalFileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.contract.AbstractContractVectoredReadTest;
+import org.apache.hadoop.fs.contract.AbstractFSContract;
+import org.apache.hadoop.fs.contract.ContractTestUtils;
+
+import static org.apache.hadoop.fs.contract.ContractTestUtils.validateVectoredReadResult;
+import static org.apache.hadoop.test.LambdaTestUtils.intercept;
+
+public class TestLocalFSContractVectoredRead extends AbstractContractVectoredReadTest {
+
+ public TestLocalFSContractVectoredRead(String bufferType) {
+ super(bufferType);
+ }
+
+ @Override
+ protected AbstractFSContract createContract(Configuration conf) {
+ return new LocalFSContract(conf);
+ }
+
+ @Test
+ public void testChecksumValidationDuringVectoredRead() throws Exception {
+ Path testPath = path("big_range_checksum");
+ LocalFileSystem localFs = (LocalFileSystem) getFileSystem();
+ final byte[] datasetCorrect = ContractTestUtils.dataset(DATASET_LEN, 'a', 32);
+ try (FSDataOutputStream out = localFs.create(testPath, true)){
+ out.write(datasetCorrect);
+ }
+ Path checksumPath = localFs.getChecksumFile(testPath);
+ Assertions.assertThat(localFs.exists(checksumPath))
+ .describedAs("Checksum file should be present")
+ .isTrue();
+ CompletableFuture fis = localFs.openFile(testPath).build();
+ List someRandomRanges = new ArrayList<>();
+ someRandomRanges.add(FileRange.createFileRange(10, 1024));
+ someRandomRanges.add(FileRange.createFileRange(1025, 1024));
+ try (FSDataInputStream in = fis.get()){
+ in.readVectored(someRandomRanges, getAllocate());
+ validateVectoredReadResult(someRandomRanges, datasetCorrect);
+ }
+ final byte[] datasetCorrupted = ContractTestUtils.dataset(DATASET_LEN, 'a', 64);
+ try (FSDataOutputStream out = localFs.getRaw().create(testPath, true)){
+ out.write(datasetCorrupted);
+ }
+ CompletableFuture fisN = localFs.openFile(testPath).build();
+ try (FSDataInputStream in = fisN.get()){
+ in.readVectored(someRandomRanges, getAllocate());
+ // Expect checksum exception when data is updated directly through
+ // raw local fs instance.
+ intercept(ChecksumException.class,
+ () -> validateVectoredReadResult(someRandomRanges, datasetCorrupted));
+ }
+ }
+}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/rawlocal/TestRawLocalContractUnderlyingFileBehavior.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/rawlocal/TestRawLocalContractUnderlyingFileBehavior.java
index 2cb5414caa4c7..6eb24985f4ff3 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/rawlocal/TestRawLocalContractUnderlyingFileBehavior.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/rawlocal/TestRawLocalContractUnderlyingFileBehavior.java
@@ -19,7 +19,7 @@
package org.apache.hadoop.fs.contract.rawlocal;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.contract.ContractTestUtils;
+
import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Test;
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/rawlocal/TestRawLocalContractVectoredRead.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/rawlocal/TestRawLocalContractVectoredRead.java
new file mode 100644
index 0000000000000..cbb31ffe27a59
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/rawlocal/TestRawLocalContractVectoredRead.java
@@ -0,0 +1,35 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.contract.rawlocal;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.contract.AbstractContractVectoredReadTest;
+import org.apache.hadoop.fs.contract.AbstractFSContract;
+
+public class TestRawLocalContractVectoredRead extends AbstractContractVectoredReadTest {
+
+ public TestRawLocalContractVectoredRead(String bufferType) {
+ super(bufferType);
+ }
+
+ @Override
+ protected AbstractFSContract createContract(Configuration conf) {
+ return new RawlocalFSContract(conf);
+ }
+}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/rawlocal/TestRawlocalContractPathHandle.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/rawlocal/TestRawlocalContractPathHandle.java
index 3c088d278e536..c34269708ddcb 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/rawlocal/TestRawlocalContractPathHandle.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/rawlocal/TestRawlocalContractPathHandle.java
@@ -21,7 +21,6 @@
import org.apache.hadoop.fs.Options;
import org.apache.hadoop.fs.contract.AbstractContractPathHandleTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.apache.hadoop.fs.contract.localfs.LocalFSContract;
import org.apache.hadoop.fs.contract.rawlocal.RawlocalFSContract;
public class TestRawlocalContractPathHandle
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestPathData.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestPathData.java
index 921fc18131a5c..130ee5edee768 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestPathData.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestPathData.java
@@ -24,7 +24,6 @@
import java.io.File;
import java.io.IOException;
-import java.net.URI;
import java.util.Arrays;
import org.apache.hadoop.conf.Configuration;
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestTextCommand.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestTextCommand.java
index c99b97e6e4021..4eb1d433bee45 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestTextCommand.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestTextCommand.java
@@ -25,15 +25,12 @@
import java.io.IOException;
import java.io.InputStream;
import java.io.StringWriter;
-import java.lang.reflect.Method;
import java.net.URI;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
-import java.nio.file.Paths;
import org.apache.commons.io.IOUtils;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.Test;
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemWithAuthorityLocalFileSystem.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemWithAuthorityLocalFileSystem.java
index 9223338f34bf5..f2452279bc7fc 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemWithAuthorityLocalFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemWithAuthorityLocalFileSystem.java
@@ -22,7 +22,6 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FileSystemTestHelper;
import org.apache.hadoop.fs.FsConstants;
import org.apache.hadoop.fs.Path;
import static org.apache.hadoop.fs.FileSystem.TRASH_PREFIX;
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemTestSetup.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemTestSetup.java
index 866c03ecda9d2..5713f532be7e8 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemTestSetup.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemTestSetup.java
@@ -20,7 +20,6 @@
import java.net.URI;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileSystemTestHelper;
import org.apache.hadoop.fs.FsConstants;
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServerLifecycle.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServerLifecycle.java
index 757ea0c05e7c0..4ae1190abd5af 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServerLifecycle.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServerLifecycle.java
@@ -17,8 +17,6 @@
*/
package org.apache.hadoop.http;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.log4j.Logger;
import org.junit.Test;
public class TestHttpServerLifecycle extends HttpServerFunctionalTest {
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestIOUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestIOUtils.java
index fca72d9c65a6a..51f207f97ad29 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestIOUtils.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestIOUtils.java
@@ -275,7 +275,7 @@ public void testListDirectory() throws IOException {
File dir = new File("testListDirectory");
Files.createDirectory(dir.toPath());
try {
- Set entries = new HashSet();
+ Set entries = new HashSet<>();
entries.add("entry1");
entries.add("entry2");
entries.add("entry3");
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestMoreWeakReferencedElasticByteBufferPool.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestMoreWeakReferencedElasticByteBufferPool.java
new file mode 100644
index 0000000000000..6ca380ef0e46b
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestMoreWeakReferencedElasticByteBufferPool.java
@@ -0,0 +1,97 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.io;
+
+import java.nio.BufferOverflowException;
+import java.nio.ByteBuffer;
+
+import org.assertj.core.api.Assertions;
+import org.junit.Test;
+
+import org.apache.hadoop.test.HadoopTestBase;
+
+import static org.apache.hadoop.test.LambdaTestUtils.intercept;
+
+/**
+ * Non parameterized tests for {@code WeakReferencedElasticByteBufferPool}.
+ */
+public class TestMoreWeakReferencedElasticByteBufferPool
+ extends HadoopTestBase {
+
+ @Test
+ public void testMixedBuffersInPool() {
+ WeakReferencedElasticByteBufferPool pool = new WeakReferencedElasticByteBufferPool();
+ ByteBuffer buffer1 = pool.getBuffer(true, 5);
+ ByteBuffer buffer2 = pool.getBuffer(true, 10);
+ ByteBuffer buffer3 = pool.getBuffer(false, 5);
+ ByteBuffer buffer4 = pool.getBuffer(false, 10);
+ ByteBuffer buffer5 = pool.getBuffer(true, 15);
+
+ assertBufferCounts(pool, 0, 0);
+ pool.putBuffer(buffer1);
+ pool.putBuffer(buffer2);
+ assertBufferCounts(pool, 2, 0);
+ pool.putBuffer(buffer3);
+ assertBufferCounts(pool, 2, 1);
+ pool.putBuffer(buffer5);
+ assertBufferCounts(pool, 3, 1);
+ pool.putBuffer(buffer4);
+ assertBufferCounts(pool, 3, 2);
+ pool.release();
+ assertBufferCounts(pool, 0, 0);
+
+ }
+
+ @Test
+ public void testUnexpectedBufferSizes() throws Exception {
+ WeakReferencedElasticByteBufferPool pool = new WeakReferencedElasticByteBufferPool();
+ ByteBuffer buffer1 = pool.getBuffer(true, 0);
+
+ // try writing a random byte in a 0 length buffer.
+ // Expected exception as buffer requested is of size 0.
+ intercept(BufferOverflowException.class,
+ () -> buffer1.put(new byte[1]));
+
+ // Expected IllegalArgumentException as negative length buffer is requested.
+ intercept(IllegalArgumentException.class,
+ () -> pool.getBuffer(true, -5));
+
+ // test returning null buffer to the pool.
+ intercept(NullPointerException.class,
+ () -> pool.putBuffer(null));
+ }
+
+ /**
+ * Utility method to assert counts of direct and heap buffers in
+ * the given buffer pool.
+ * @param pool buffer pool.
+ * @param numDirectBuffersExpected expected number of direct buffers.
+ * @param numHeapBuffersExpected expected number of heap buffers.
+ */
+ private void assertBufferCounts(WeakReferencedElasticByteBufferPool pool,
+ int numDirectBuffersExpected,
+ int numHeapBuffersExpected) {
+ Assertions.assertThat(pool.getCurrentBuffersCount(true))
+ .describedAs("Number of direct buffers in pool")
+ .isEqualTo(numDirectBuffersExpected);
+ Assertions.assertThat(pool.getCurrentBuffersCount(false))
+ .describedAs("Number of heap buffers in pool")
+ .isEqualTo(numHeapBuffersExpected);
+ }
+}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestWeakReferencedElasticByteBufferPool.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestWeakReferencedElasticByteBufferPool.java
new file mode 100644
index 0000000000000..1434010ffa652
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestWeakReferencedElasticByteBufferPool.java
@@ -0,0 +1,232 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.io;
+
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Random;
+
+import org.assertj.core.api.Assertions;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
+import org.apache.hadoop.test.HadoopTestBase;
+
+/**
+ * Unit tests for {@code WeakReferencedElasticByteBufferPool}.
+ */
+@RunWith(Parameterized.class)
+public class TestWeakReferencedElasticByteBufferPool
+ extends HadoopTestBase {
+
+ private final boolean isDirect;
+
+ private final String type;
+
+ @Parameterized.Parameters(name = "Buffer type : {0}")
+ public static List params() {
+ return Arrays.asList("direct", "array");
+ }
+
+ public TestWeakReferencedElasticByteBufferPool(String type) {
+ this.type = type;
+ this.isDirect = !"array".equals(type);
+ }
+
+ @Test
+ public void testGetAndPutBasic() {
+ WeakReferencedElasticByteBufferPool pool = new WeakReferencedElasticByteBufferPool();
+ int bufferSize = 5;
+ ByteBuffer buffer = pool.getBuffer(isDirect, bufferSize);
+ Assertions.assertThat(buffer.isDirect())
+ .describedAs("Buffered returned should be of correct type {}", type)
+ .isEqualTo(isDirect);
+ Assertions.assertThat(buffer.capacity())
+ .describedAs("Initial capacity of returned buffer from pool")
+ .isEqualTo(bufferSize);
+ Assertions.assertThat(buffer.position())
+ .describedAs("Initial position of returned buffer from pool")
+ .isEqualTo(0);
+
+ byte[] arr = createByteArray(bufferSize);
+ buffer.put(arr, 0, arr.length);
+ buffer.flip();
+ validateBufferContent(buffer, arr);
+ Assertions.assertThat(buffer.position())
+ .describedAs("Buffer's position after filling bytes in it")
+ .isEqualTo(bufferSize);
+ // releasing buffer to the pool.
+ pool.putBuffer(buffer);
+ Assertions.assertThat(buffer.position())
+ .describedAs("Position should be reset to 0 after returning buffer to the pool")
+ .isEqualTo(0);
+
+ }
+
+ @Test
+ public void testPoolingWithDifferentSizes() {
+ WeakReferencedElasticByteBufferPool pool = new WeakReferencedElasticByteBufferPool();
+ ByteBuffer buffer = pool.getBuffer(isDirect, 5);
+ ByteBuffer buffer1 = pool.getBuffer(isDirect, 10);
+ ByteBuffer buffer2 = pool.getBuffer(isDirect, 15);
+
+ Assertions.assertThat(pool.getCurrentBuffersCount(isDirect))
+ .describedAs("Number of buffers in the pool")
+ .isEqualTo(0);
+
+ pool.putBuffer(buffer1);
+ pool.putBuffer(buffer2);
+ Assertions.assertThat(pool.getCurrentBuffersCount(isDirect))
+ .describedAs("Number of buffers in the pool")
+ .isEqualTo(2);
+ ByteBuffer buffer3 = pool.getBuffer(isDirect, 12);
+ Assertions.assertThat(buffer3.capacity())
+ .describedAs("Pooled buffer should have older capacity")
+ .isEqualTo(15);
+ Assertions.assertThat(pool.getCurrentBuffersCount(isDirect))
+ .describedAs("Number of buffers in the pool")
+ .isEqualTo(1);
+ pool.putBuffer(buffer);
+ ByteBuffer buffer4 = pool.getBuffer(isDirect, 6);
+ Assertions.assertThat(buffer4.capacity())
+ .describedAs("Pooled buffer should have older capacity")
+ .isEqualTo(10);
+ Assertions.assertThat(pool.getCurrentBuffersCount(isDirect))
+ .describedAs("Number of buffers in the pool")
+ .isEqualTo(1);
+
+ pool.release();
+ Assertions.assertThat(pool.getCurrentBuffersCount(isDirect))
+ .describedAs("Number of buffers in the pool post release")
+ .isEqualTo(0);
+ }
+
+ @Test
+ public void testPoolingWithDifferentInsertionTime() {
+ WeakReferencedElasticByteBufferPool pool = new WeakReferencedElasticByteBufferPool();
+ ByteBuffer buffer = pool.getBuffer(isDirect, 10);
+ ByteBuffer buffer1 = pool.getBuffer(isDirect, 10);
+ ByteBuffer buffer2 = pool.getBuffer(isDirect, 10);
+
+ Assertions.assertThat(pool.getCurrentBuffersCount(isDirect))
+ .describedAs("Number of buffers in the pool")
+ .isEqualTo(0);
+
+ pool.putBuffer(buffer1);
+ pool.putBuffer(buffer2);
+ Assertions.assertThat(pool.getCurrentBuffersCount(isDirect))
+ .describedAs("Number of buffers in the pool")
+ .isEqualTo(2);
+ ByteBuffer buffer3 = pool.getBuffer(isDirect, 10);
+ // As buffer1 is returned to the pool before buffer2, it should
+ // be returned when buffer of same size is asked again from
+ // the pool. Memory references must match not just content
+ // that is why {@code Assertions.isSameAs} is used here rather
+ // than usual {@code Assertions.isEqualTo}.
+ Assertions.assertThat(buffer3)
+ .describedAs("Buffers should be returned in order of their " +
+ "insertion time")
+ .isSameAs(buffer1);
+ pool.putBuffer(buffer);
+ ByteBuffer buffer4 = pool.getBuffer(isDirect, 10);
+ Assertions.assertThat(buffer4)
+ .describedAs("Buffers should be returned in order of their " +
+ "insertion time")
+ .isSameAs(buffer2);
+ }
+
+ @Test
+ public void testGarbageCollection() {
+ WeakReferencedElasticByteBufferPool pool = new WeakReferencedElasticByteBufferPool();
+ ByteBuffer buffer = pool.getBuffer(isDirect, 5);
+ ByteBuffer buffer1 = pool.getBuffer(isDirect, 10);
+ ByteBuffer buffer2 = pool.getBuffer(isDirect, 15);
+ Assertions.assertThat(pool.getCurrentBuffersCount(isDirect))
+ .describedAs("Number of buffers in the pool")
+ .isEqualTo(0);
+ pool.putBuffer(buffer1);
+ pool.putBuffer(buffer2);
+ Assertions.assertThat(pool.getCurrentBuffersCount(isDirect))
+ .describedAs("Number of buffers in the pool")
+ .isEqualTo(2);
+ // Before GC.
+ ByteBuffer buffer4 = pool.getBuffer(isDirect, 12);
+ Assertions.assertThat(buffer4.capacity())
+ .describedAs("Pooled buffer should have older capacity")
+ .isEqualTo(15);
+ pool.putBuffer(buffer4);
+ // Removing the references
+ buffer1 = null;
+ buffer2 = null;
+ buffer4 = null;
+ System.gc();
+ ByteBuffer buffer3 = pool.getBuffer(isDirect, 12);
+ Assertions.assertThat(buffer3.capacity())
+ .describedAs("After garbage collection new buffer should be " +
+ "returned with fixed capacity")
+ .isEqualTo(12);
+ }
+
+ @Test
+ public void testWeakReferencesPruning() {
+ WeakReferencedElasticByteBufferPool pool = new WeakReferencedElasticByteBufferPool();
+ ByteBuffer buffer1 = pool.getBuffer(isDirect, 5);
+ ByteBuffer buffer2 = pool.getBuffer(isDirect, 10);
+ ByteBuffer buffer3 = pool.getBuffer(isDirect, 15);
+
+ pool.putBuffer(buffer2);
+ pool.putBuffer(buffer3);
+ Assertions.assertThat(pool.getCurrentBuffersCount(isDirect))
+ .describedAs("Number of buffers in the pool")
+ .isEqualTo(2);
+
+ // marking only buffer2 to be garbage collected.
+ buffer2 = null;
+ System.gc();
+ ByteBuffer buffer4 = pool.getBuffer(isDirect, 10);
+ // Number of buffers in the pool is 0 as one got garbage
+ // collected and other got returned in above call.
+ Assertions.assertThat(pool.getCurrentBuffersCount(isDirect))
+ .describedAs("Number of buffers in the pool")
+ .isEqualTo(0);
+ Assertions.assertThat(buffer4.capacity())
+ .describedAs("After gc, pool should return next greater than " +
+ "available buffer")
+ .isEqualTo(15);
+
+ }
+
+ private void validateBufferContent(ByteBuffer buffer, byte[] arr) {
+ for (int i=0; i compressors = new HashSet();
+ Set compressors = new HashSet<>();
for (int i = 0; i < 10; ++i) {
compressors.add(CodecPool.getCompressor(codec));
}
@@ -180,7 +180,7 @@ public void testDecompressorNotReturnSameInstance() {
Decompressor decomp = CodecPool.getDecompressor(codec);
CodecPool.returnDecompressor(decomp);
CodecPool.returnDecompressor(decomp);
- Set decompressors = new HashSet();
+ Set decompressors = new HashSet<>();
for (int i = 0; i < 10; ++i) {
decompressors.add(CodecPool.getDecompressor(codec));
}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/bzip2/TestBzip2CompressorDecompressor.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/bzip2/TestBzip2CompressorDecompressor.java
index c585a463e46b1..fae5ce6de40a4 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/bzip2/TestBzip2CompressorDecompressor.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/bzip2/TestBzip2CompressorDecompressor.java
@@ -18,9 +18,6 @@
package org.apache.hadoop.io.compress.bzip2;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.io.DataInputBuffer;
-import org.apache.hadoop.io.DataOutputBuffer;
-import org.apache.hadoop.io.compress.*;
import org.apache.hadoop.io.compress.bzip2.Bzip2Compressor;
import org.apache.hadoop.io.compress.bzip2.Bzip2Decompressor;
import org.apache.hadoop.test.MultithreadedTestUtil;
@@ -32,7 +29,6 @@
import static org.junit.Assert.*;
import static org.junit.Assume.*;
-import static org.junit.Assume.assumeTrue;
public class TestBzip2CompressorDecompressor {
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/zlib/TestZlibCompressorDecompressor.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/zlib/TestZlibCompressorDecompressor.java
index ac9ea5e8a8468..25da4fe2375ed 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/zlib/TestZlibCompressorDecompressor.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/zlib/TestZlibCompressorDecompressor.java
@@ -28,7 +28,6 @@
import java.util.zip.DeflaterOutputStream;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.io.DataInputBuffer;
import org.apache.hadoop.io.compress.CompressDecompressTester;
import org.apache.hadoop.io.compress.Compressor;
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestCompression.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestCompression.java
index b1bf0774974da..6b4c698551359 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestCompression.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestCompression.java
@@ -17,15 +17,12 @@
*/
package org.apache.hadoop.io.file.tfile;
-import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.test.LambdaTestUtils;
import org.junit.*;
import java.io.IOException;
import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
public class TestCompression {
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/MiniRPCBenchmark.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/MiniRPCBenchmark.java
index 2290270bfba1a..70ae639091421 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/MiniRPCBenchmark.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/MiniRPCBenchmark.java
@@ -36,7 +36,6 @@
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.DefaultImpersonationProvider;
-import org.apache.hadoop.security.authorize.ProxyUsers;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenInfo;
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSelector;
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
index 5fc9cb5410b30..5caabd22a88c6 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
@@ -19,6 +19,8 @@
package org.apache.hadoop.ipc;
import org.apache.hadoop.ipc.metrics.RpcMetrics;
+
+import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
import org.apache.hadoop.thirdparty.protobuf.ServiceException;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.conf.Configuration;
@@ -84,6 +86,7 @@
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
import static org.assertj.core.api.Assertions.assertThat;
@@ -1697,6 +1700,61 @@ public void testRpcMetricsInNanos() throws Exception {
}
}
+ @Test
+ public void testNumTotalRequestsMetrics() throws Exception {
+ UserGroupInformation ugi = UserGroupInformation.
+ createUserForTesting("userXyz", new String[0]);
+
+ final Server server = setupTestServer(conf, 1);
+
+ ExecutorService executorService = null;
+ try {
+ RpcMetrics rpcMetrics = server.getRpcMetrics();
+ assertEquals(0, rpcMetrics.getTotalRequests());
+ assertEquals(0, rpcMetrics.getTotalRequestsPerSecond());
+
+ List> externalCallList = new ArrayList<>();
+
+ executorService = Executors.newSingleThreadExecutor(
+ new ThreadFactoryBuilder().setDaemon(true).setNameFormat("testNumTotalRequestsMetrics")
+ .build());
+ AtomicInteger rps = new AtomicInteger(0);
+ CountDownLatch countDownLatch = new CountDownLatch(1);
+ executorService.submit(() -> {
+ while (true) {
+ int numRps = (int) rpcMetrics.getTotalRequestsPerSecond();
+ rps.getAndSet(numRps);
+ if (rps.get() > 0) {
+ countDownLatch.countDown();
+ break;
+ }
+ }
+ });
+
+ for (int i = 0; i < 100000; i++) {
+ externalCallList.add(newExtCall(ugi, () -> null));
+ }
+ for (ExternalCall externalCall : externalCallList) {
+ server.queueCall(externalCall);
+ }
+ for (ExternalCall externalCall : externalCallList) {
+ externalCall.get();
+ }
+
+ assertEquals(100000, rpcMetrics.getTotalRequests());
+ if (countDownLatch.await(10, TimeUnit.SECONDS)) {
+ assertTrue(rps.get() > 10);
+ } else {
+ throw new AssertionError("total requests per seconds are still 0");
+ }
+ } finally {
+ if (executorService != null) {
+ executorService.shutdown();
+ }
+ server.stop();
+ }
+ }
+
public static void main(String[] args) throws Exception {
new TestRPC().testCallsInternal(conf);
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestDNS.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestDNS.java
index 0e1ac1deb9612..2504a6401a8d9 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestDNS.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestDNS.java
@@ -18,8 +18,6 @@
package org.apache.hadoop.net;
-import java.lang.reflect.Field;
-import java.lang.reflect.Modifier;
import java.net.NetworkInterface;
import java.net.SocketException;
import java.net.UnknownHostException;
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestTableMapping.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestTableMapping.java
index 86870e1257119..697b0bad43757 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestTableMapping.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestTableMapping.java
@@ -30,7 +30,7 @@
import java.util.List;
import org.apache.hadoop.conf.Configuration;
-import org.junit.Before;
+
import org.junit.Test;
public class TestTableMapping {
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestKDiagNoKDC.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestKDiagNoKDC.java
index 2e266bba1f97a..03d953b5f3cc3 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestKDiagNoKDC.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestKDiagNoKDC.java
@@ -19,8 +19,7 @@
package org.apache.hadoop.security;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.minikdc.MiniKdc;
-import org.junit.AfterClass;
+
import org.junit.Assert;
import org.junit.Before;
import org.junit.BeforeClass;
@@ -31,20 +30,12 @@
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import java.io.File;
-import java.util.Properties;
import java.util.concurrent.TimeUnit;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_TOKEN_FILES;
-import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION;
import static org.apache.hadoop.security.KDiag.ARG_KEYLEN;
-import static org.apache.hadoop.security.KDiag.ARG_KEYTAB;
import static org.apache.hadoop.security.KDiag.ARG_NOFAIL;
import static org.apache.hadoop.security.KDiag.ARG_NOLOGIN;
-import static org.apache.hadoop.security.KDiag.ARG_PRINCIPAL;
-import static org.apache.hadoop.security.KDiag.ARG_SECURE;
-import static org.apache.hadoop.security.KDiag.CAT_CONFIG;
-import static org.apache.hadoop.security.KDiag.CAT_KERBEROS;
import static org.apache.hadoop.security.KDiag.CAT_LOGIN;
import static org.apache.hadoop.security.KDiag.CAT_TOKEN;
import static org.apache.hadoop.security.KDiag.KerberosDiagsFailure;
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGIWithMiniKdc.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGIWithMiniKdc.java
index de74d17863668..f04fbe1e08926 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGIWithMiniKdc.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGIWithMiniKdc.java
@@ -19,23 +19,18 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.minikdc.MiniKdc;
-import org.apache.hadoop.security.authentication.util.KerberosUtil;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.LambdaTestUtils;
-import org.apache.hadoop.util.PlatformName;
+
import org.junit.After;
import org.junit.Test;
import org.slf4j.event.Level;
-import javax.security.auth.Subject;
import javax.security.auth.kerberos.KerberosPrincipal;
-import javax.security.auth.login.AppConfigurationEntry;
import javax.security.auth.login.LoginContext;
import java.io.File;
import java.security.Principal;
-import java.util.HashMap;
import java.util.HashSet;
-import java.util.Map;
import java.util.Properties;
import java.util.Set;
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/http/TestCrossOriginFilter.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/http/TestCrossOriginFilter.java
index b9662b8c6a328..0b396be48f983 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/http/TestCrossOriginFilter.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/http/TestCrossOriginFilter.java
@@ -36,9 +36,6 @@
import org.junit.Test;
import org.mockito.Mockito;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.verify;
-
public class TestCrossOriginFilter {
@Test
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/launcher/TestServiceLauncher.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/launcher/TestServiceLauncher.java
index f40051b0d178f..72757e4b1c182 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/launcher/TestServiceLauncher.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/launcher/TestServiceLauncher.java
@@ -29,8 +29,6 @@
import org.apache.hadoop.service.launcher.testservices.StoppingInStartLaunchableService;
import org.apache.hadoop.service.launcher.testservices.StringConstructorOnlyService;
-import static org.apache.hadoop.service.launcher.LauncherArguments.*;
-
import static org.apache.hadoop.test.GenericTestUtils.*;
import static org.apache.hadoop.service.launcher.testservices.ExceptionInExecuteLaunchableService.*;
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/launcher/testservices/NoArgsAllowedService.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/launcher/testservices/NoArgsAllowedService.java
index 602cb157ed5d8..9245b1844f792 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/launcher/testservices/NoArgsAllowedService.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/launcher/testservices/NoArgsAllowedService.java
@@ -26,7 +26,6 @@
import org.slf4j.LoggerFactory;
import java.util.List;
-import java.util.Map;
/**
* service that does not allow any arguments.
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
index f1bf4bb91e668..61d5938494c22 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
@@ -39,6 +39,7 @@
import java.util.Random;
import java.util.Set;
import java.util.Enumeration;
+import java.util.TreeSet;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
@@ -72,7 +73,6 @@
import org.slf4j.LoggerFactory;
import org.apache.hadoop.thirdparty.com.google.common.base.Joiner;
-import org.apache.hadoop.util.Sets;
import static org.apache.hadoop.fs.contract.ContractTestUtils.createFile;
import static org.apache.hadoop.util.functional.CommonCallableSupplier.submit;
@@ -344,13 +344,13 @@ public static void assertExists(File f) {
public static void assertGlobEquals(File dir, String pattern,
String ... expectedMatches) throws IOException {
- Set found = Sets.newTreeSet();
+ Set found = new TreeSet<>();
for (File f : FileUtil.listFiles(dir)) {
if (f.getName().matches(pattern)) {
found.add(f.getName());
}
}
- Set expectedSet = Sets.newTreeSet(
+ Set expectedSet = new TreeSet<>(
Arrays.asList(expectedMatches));
Assert.assertEquals("Bad files matching " + pattern + " in " + dir,
Joiner.on(",").join(expectedSet),
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/MoreAsserts.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/MoreAsserts.java
index 142669b78682e..f6e6055d78e2c 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/MoreAsserts.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/MoreAsserts.java
@@ -19,6 +19,9 @@
package org.apache.hadoop.test;
import java.util.Iterator;
+import java.util.concurrent.CompletableFuture;
+
+import org.assertj.core.api.Assertions;
import org.junit.Assert;
/**
@@ -28,17 +31,18 @@ public class MoreAsserts {
/**
* Assert equivalence for array and iterable
- * @param the type of the elements
- * @param s the name/message for the collection
- * @param expected the expected array of elements
- * @param actual the actual iterable of elements
+ *
+ * @param the type of the elements
+ * @param s the name/message for the collection
+ * @param expected the expected array of elements
+ * @param actual the actual iterable of elements
*/
public static void assertEquals(String s, T[] expected,
Iterable actual) {
Iterator it = actual.iterator();
int i = 0;
for (; i < expected.length && it.hasNext(); ++i) {
- Assert.assertEquals("Element "+ i +" for "+ s, expected[i], it.next());
+ Assert.assertEquals("Element " + i + " for " + s, expected[i], it.next());
}
Assert.assertTrue("Expected more elements", i == expected.length);
Assert.assertTrue("Expected less elements", !it.hasNext());
@@ -46,7 +50,8 @@ public static void assertEquals(String s, T[] expected,
/**
* Assert equality for two iterables
- * @param the type of the elements
+ *
+ * @param the type of the elements
* @param s
* @param expected
* @param actual
@@ -57,10 +62,40 @@ public static void assertEquals(String s, Iterable expected,
Iterator ita = actual.iterator();
int i = 0;
while (ite.hasNext() && ita.hasNext()) {
- Assert.assertEquals("Element "+ i +" for "+s, ite.next(), ita.next());
+ Assert.assertEquals("Element " + i + " for " + s, ite.next(), ita.next());
}
Assert.assertTrue("Expected more elements", !ite.hasNext());
Assert.assertTrue("Expected less elements", !ita.hasNext());
}
+
+ public static void assertFutureCompletedSuccessfully(CompletableFuture future) {
+ Assertions.assertThat(future.isDone())
+ .describedAs("This future is supposed to be " +
+ "completed successfully")
+ .isTrue();
+ Assertions.assertThat(future.isCompletedExceptionally())
+ .describedAs("This future is supposed to be " +
+ "completed successfully")
+ .isFalse();
+ }
+
+ public static void assertFutureFailedExceptionally(CompletableFuture future) {
+ Assertions.assertThat(future.isCompletedExceptionally())
+ .describedAs("This future is supposed to be " +
+ "completed exceptionally")
+ .isTrue();
+ }
+
+ /**
+ * Assert two same type of values.
+ * @param actual actual value.
+ * @param expected expected value.
+ * @param message error message to print in case of mismatch.
+ */
+ public static void assertEqual(T actual, T expected, String message) {
+ Assertions.assertThat(actual)
+ .describedAs("Mismatch in %s", message)
+ .isEqualTo(expected);
+ }
}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/MultithreadedTestUtil.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/MultithreadedTestUtil.java
index 217c2f84eba4b..e270ee68000eb 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/MultithreadedTestUtil.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/MultithreadedTestUtil.java
@@ -70,8 +70,8 @@ public abstract class MultithreadedTestUtil {
public static class TestContext {
private Throwable err = null;
private boolean stopped = false;
- private Set testThreads = new HashSet();
- private Set finishedThreads = new HashSet();
+ private Set testThreads = new HashSet<>();
+ private Set finishedThreads = new HashSet<>();
/**
* Check if the context can run threads.
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestTimedOutTestsListener.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestTimedOutTestsListener.java
index 1334f1c95f407..42ed8c8775570 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestTimedOutTestsListener.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestTimedOutTestsListener.java
@@ -19,7 +19,6 @@
import java.io.PrintWriter;
import java.io.StringWriter;
-import java.util.concurrent.BrokenBarrierException;
import java.util.concurrent.CyclicBarrier;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/tools/TestCommandShell.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/tools/TestCommandShell.java
index 606791801fe13..e9c5950b729c6 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/tools/TestCommandShell.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/tools/TestCommandShell.java
@@ -24,7 +24,6 @@
import org.apache.hadoop.tools.CommandShell;
import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import org.junit.Before;
import org.junit.Test;
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDiskCheckerWithDiskIo.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDiskCheckerWithDiskIo.java
index 082672ccd33d2..552d1319312c6 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDiskCheckerWithDiskIo.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDiskCheckerWithDiskIo.java
@@ -23,15 +23,12 @@
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.Timeout;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.nio.file.Files;
-import java.nio.file.attribute.PosixFilePermission;
import java.nio.file.attribute.PosixFilePermissions;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestShell.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestShell.java
index 68e70ebb79a5c..9ae52ff95cb91 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestShell.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestShell.java
@@ -20,7 +20,7 @@
import java.util.concurrent.TimeUnit;
import java.util.function.Supplier;
import org.apache.commons.io.FileUtils;
-import org.apache.hadoop.security.alias.AbstractJavaKeyStoreProvider;
+
import org.junit.Assert;
import java.io.BufferedReader;
diff --git a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSAudit.java b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSAudit.java
index 14c2ae907b14d..d71172e1b93ef 100644
--- a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSAudit.java
+++ b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSAudit.java
@@ -36,9 +36,9 @@
import org.apache.hadoop.thirdparty.com.google.common.cache.CacheBuilder;
import org.apache.hadoop.thirdparty.com.google.common.cache.RemovalListener;
import org.apache.hadoop.thirdparty.com.google.common.cache.RemovalNotification;
-import org.apache.hadoop.util.Sets;
import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
+import java.util.Arrays;
import java.util.HashSet;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
@@ -56,10 +56,10 @@
*/
public class KMSAudit {
@VisibleForTesting
- static final Set AGGREGATE_OPS_WHITELIST = Sets.newHashSet(
+ static final Set AGGREGATE_OPS_WHITELIST = new HashSet<>(Arrays.asList(
KMS.KMSOp.GET_KEY_VERSION, KMS.KMSOp.GET_CURRENT_KEY,
KMS.KMSOp.DECRYPT_EEK, KMS.KMSOp.GENERATE_EEK, KMS.KMSOp.REENCRYPT_EEK
- );
+ ));
private Cache cache;
diff --git a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
index a0a58ff3567f5..f4c7fbe0b3c3c 100644
--- a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
+++ b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
@@ -17,6 +17,7 @@
*/
package org.apache.hadoop.crypto.key.kms.server;
+import org.apache.commons.lang3.reflect.FieldUtils;
import org.apache.hadoop.thirdparty.com.google.common.cache.LoadingCache;
import org.apache.curator.test.TestingServer;
import org.apache.hadoop.conf.Configuration;
@@ -48,7 +49,6 @@
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.security.token.delegation.web.DelegationTokenIdentifier;
import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.test.Whitebox;
import org.apache.hadoop.util.Time;
import org.apache.http.client.utils.URIBuilder;
import org.junit.After;
@@ -929,6 +929,7 @@ public Void call() throws Exception {
}
@Test
+ @SuppressWarnings("unchecked")
public void testKMSProviderCaching() throws Exception {
Configuration conf = new Configuration();
File confDir = getTestDir();
@@ -946,11 +947,12 @@ public Void call() throws Exception {
KMSClientProvider kmscp = createKMSClientProvider(uri, conf);
// get the reference to the internal cache, to test invalidation.
- ValueQueue vq =
- (ValueQueue) Whitebox.getInternalState(kmscp, "encKeyVersionQueue");
+ ValueQueue vq = (ValueQueue) FieldUtils.getField(KMSClientProvider.class,
+ "encKeyVersionQueue", true).get(kmscp);
LoadingCache> kq =
- ((LoadingCache