diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/StripeReader.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/StripeReader.java index e4e933b42ec21..46289772e6f7d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/StripeReader.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/StripeReader.java @@ -239,7 +239,7 @@ private int readToBuffer(BlockReader blockReader, throws IOException { final int targetLength = strategy.getTargetLength(); int curAttempts = 0; - while (curAttempts < readDNMaxAttempts) { + while (true) { curAttempts++; int length = 0; try { @@ -252,9 +252,8 @@ private int readToBuffer(BlockReader blockReader, } return length; } catch (ChecksumException ce) { - DFSClient.LOG.warn("Found Checksum error for " - + currentBlock + " from " + currentNode - + " at " + ce.getPos()); + DFSClient.LOG.warn("Found Checksum error for {} from {} at {}", + currentBlock, currentNode, ce.getPos()); //Clear buffer to make next decode success strategy.getReadBuffer().clear(); // we want to remember which block replicas we have tried @@ -271,21 +270,16 @@ private int readToBuffer(BlockReader blockReader, offsetInBlock, targetBlocks, readerInfos, chunkIndex, readTo)) { blockReader = readerInfos[chunkIndex].reader; - String msg = "Reconnect to " + currentNode.getInfoAddr() - + " for block " + currentBlock.getBlock(); - DFSClient.LOG.warn(msg); + DFSClient.LOG.warn("Reconnect to {} for block {}", + currentNode.getInfoAddr(), currentBlock.getBlock()); continue; } } - DFSClient.LOG.warn("Exception while reading from " - + currentBlock + " of " + dfsStripedInputStream.getSrc() + " from " - + currentNode, e); + DFSClient.LOG.warn("Exception while reading from {} of {} from {}", + currentBlock, dfsStripedInputStream.getSrc(), currentNode, e); throw e; } } - throw new IOException("Read request interrupted. " + - currentBlock + " of " + dfsStripedInputStream.getSrc() + " from " - + currentNode); } private Callable readCells(final BlockReader reader, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStreamWithTimeout.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStreamWithTimeout.java index d90c7fb91de3b..a1ff537d75533 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStreamWithTimeout.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStreamWithTimeout.java @@ -84,7 +84,7 @@ public void setup() throws IOException { conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize); conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 0); - conf.setInt(DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY, 1000); + conf.setInt(DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY, 500); if (ErasureCodeNative.isNativeCodeLoaded()) { conf.set(