From 880677674db79c3c22042f7f8247c4894d5a25d4 Mon Sep 17 00:00:00 2001 From: "yoshida.shinya" Date: Thu, 18 Nov 2021 12:57:53 +0900 Subject: [PATCH 01/11] HDFS-16332 Handle invalid token exception in sasl handshake --- .../sasl/DataTransferSaslUtil.java | 9 + .../src/main/proto/datatransfer.proto | 1 + .../sasl/SaslDataTransferServer.java | 23 +++ ...TestSaslDataTransferExpiredBlockToken.java | 189 ++++++++++++++++++ 4 files changed, 222 insertions(+) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/TestSaslDataTransferExpiredBlockToken.java diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/DataTransferSaslUtil.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/DataTransferSaslUtil.java index 526f3d0c66512..96fe4ae31aa61 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/DataTransferSaslUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/DataTransferSaslUtil.java @@ -52,6 +52,7 @@ import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto; import org.apache.hadoop.hdfs.protocolPB.PBHelperClient; +import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException; import org.apache.hadoop.security.SaslPropertiesResolver; import org.apache.hadoop.security.SaslRpcServer.QualityOfProtection; import org.slf4j.Logger; @@ -216,6 +217,8 @@ public static byte[] readSaslMessage(InputStream in) throws IOException { DataTransferEncryptorMessageProto.parseFrom(vintPrefixed(in)); if (proto.getStatus() == DataTransferEncryptorStatus.ERROR_UNKNOWN_KEY) { throw new InvalidEncryptionKeyException(proto.getMessage()); + } else if (proto.getStatus() == DataTransferEncryptorStatus.ERROR_ACCESS_TOKEN) { + throw new InvalidBlockTokenException(proto.getMessage()); } else if (proto.getStatus() == DataTransferEncryptorStatus.ERROR) { throw new IOException(proto.getMessage()); } else { @@ -237,6 +240,8 @@ public static byte[] readSaslMessageAndNegotiationCipherOptions( DataTransferEncryptorMessageProto.parseFrom(vintPrefixed(in)); if (proto.getStatus() == DataTransferEncryptorStatus.ERROR_UNKNOWN_KEY) { throw new InvalidEncryptionKeyException(proto.getMessage()); + } else if (proto.getStatus() == DataTransferEncryptorStatus.ERROR_ACCESS_TOKEN) { + throw new InvalidBlockTokenException(proto.getMessage()); } else if (proto.getStatus() == DataTransferEncryptorStatus.ERROR) { throw new IOException(proto.getMessage()); } else { @@ -280,6 +285,8 @@ public static SaslMessageWithHandshake readSaslMessageWithHandshakeSecret( DataTransferEncryptorMessageProto.parseFrom(vintPrefixed(in)); if (proto.getStatus() == DataTransferEncryptorStatus.ERROR_UNKNOWN_KEY) { throw new InvalidEncryptionKeyException(proto.getMessage()); + } else if (proto.getStatus() == DataTransferEncryptorStatus.ERROR_ACCESS_TOKEN) { + throw new InvalidBlockTokenException(proto.getMessage()); } else if (proto.getStatus() == DataTransferEncryptorStatus.ERROR) { throw new IOException(proto.getMessage()); } else { @@ -471,6 +478,8 @@ public static void sendSaslMessageAndNegotiationCipherOptions( DataTransferEncryptorMessageProto.parseFrom(vintPrefixed(in)); if (proto.getStatus() == DataTransferEncryptorStatus.ERROR_UNKNOWN_KEY) { throw new InvalidEncryptionKeyException(proto.getMessage()); + } else if (proto.getStatus() == DataTransferEncryptorStatus.ERROR_ACCESS_TOKEN) { + throw new InvalidBlockTokenException(proto.getMessage()); } else if (proto.getStatus() == DataTransferEncryptorStatus.ERROR) { throw new IOException(proto.getMessage()); } else { diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/datatransfer.proto b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/datatransfer.proto index 28a292e729e2a..401d3b8fe74a6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/datatransfer.proto +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/datatransfer.proto @@ -38,6 +38,7 @@ message DataTransferEncryptorMessageProto { SUCCESS = 0; ERROR_UNKNOWN_KEY = 1; ERROR = 2; + ERROR_ACCESS_TOKEN = 3; } required DataTransferEncryptorStatus status = 1; optional bytes payload = 2; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferServer.java index 1e10fbbd3dd03..40c089c9d47ff 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferServer.java @@ -52,10 +52,12 @@ import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto.DataTransferEncryptorStatus; import org.apache.hadoop.hdfs.security.token.block.BlockPoolTokenSecretManager; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; +import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException; import org.apache.hadoop.hdfs.server.datanode.DNConf; import org.apache.hadoop.security.SaslPropertiesResolver; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.token.SecretManager; import org.apache.hadoop.util.Lists; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -441,6 +443,14 @@ private IOStreamPair doSaslHandshake(Peer peer, OutputStream underlyingOut, // error, the client will get a new encryption key from the NN and retry // connecting to this DN. sendInvalidKeySaslErrorMessage(out, ioe.getCause().getMessage()); + } else if (ioe instanceof SaslException && + ioe.getCause() != null && + (ioe.getCause() instanceof InvalidBlockTokenException || + ioe.getCause() instanceof SecretManager.InvalidToken)) { + // This could be because the client is long-lived and block token is expired + // The client will get new block token from the NN, upon receiving this error + // and retry connecting to this DN + sendInvalidTokenSaslErrorMessage(out, ioe.getCause().getMessage()); } else { sendGenericSaslErrorMessage(out, ioe.getMessage()); } @@ -460,4 +470,17 @@ private static void sendInvalidKeySaslErrorMessage(DataOutputStream out, sendSaslMessage(out, DataTransferEncryptorStatus.ERROR_UNKNOWN_KEY, null, message); } + + /** + * Sends a SASL negotiation message indicating an invalid token error. + * + * @param out stream to receive message + * @param message to send + * @throws IOException for any error + */ + private static void sendInvalidTokenSaslErrorMessage(DataOutputStream out, + String message) throws IOException { + sendSaslMessage(out, DataTransferEncryptorStatus.ERROR_ACCESS_TOKEN, null, + message); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/TestSaslDataTransferExpiredBlockToken.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/TestSaslDataTransferExpiredBlockToken.java new file mode 100644 index 0000000000000..ea56cdda6c120 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/TestSaslDataTransferExpiredBlockToken.java @@ -0,0 +1,189 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.protocol.datatransfer.sasl; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.io.IOException; +import java.util.Random; +import java.util.concurrent.TimeUnit; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.DFSInputStream; +import org.apache.hadoop.hdfs.HdfsConfiguration; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.HedgedRead; +import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.Retry; +import org.apache.hadoop.hdfs.protocol.LocatedBlock; +import org.apache.hadoop.hdfs.security.token.block.SecurityTestUtil; +import org.junit.After; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; +import org.junit.rules.Timeout; + +public class TestSaslDataTransferExpiredBlockToken extends SaslDataTransferTestCase { + private static final int BLOCK_SIZE = 4096; + private static final int FILE_SIZE = 2 * BLOCK_SIZE; + private static final Path PATH = new Path("/file1"); + + private final byte[] rawData = new byte[FILE_SIZE]; + private MiniDFSCluster cluster; + + @Rule + public ExpectedException exception = ExpectedException.none(); + + @Rule + public Timeout timeout = new Timeout(60, TimeUnit.SECONDS); + + @Before + public void before() throws Exception { + Random r = new Random(); + r.nextBytes(rawData); + + HdfsConfiguration conf = createSecureConfig( + "authentication,integrity,privacy"); + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); + cluster.waitActive(); + + try (FileSystem fs = cluster.getFileSystem()) { + createFile(fs); + } + + // set a short token lifetime (1 second) initially + SecurityTestUtil.setBlockTokenLifetime( + cluster.getNameNode() + .getNamesystem() + .getBlockManager() + .getBlockTokenSecretManager(), 1000L); + } + + @After + public void shutdown() { + if (cluster != null) { + cluster.shutdown(); + cluster = null; + } + } + + private void createFile(FileSystem fs) throws IOException { + try (FSDataOutputStream out = fs.create(PATH)) { + out.write(rawData); + } + } + + // read a file using blockSeekTo() + private boolean checkFile1(FSDataInputStream in) { + byte[] toRead = new byte[FILE_SIZE]; + int totalRead = 0; + int nRead = 0; + try { + while ((nRead = in.read(toRead, totalRead, toRead.length - totalRead)) > 0) { + totalRead += nRead; + } + } catch (IOException e) { + return false; + } + assertEquals("Cannot read file.", toRead.length, totalRead); + return checkFile(toRead); + } + + // read a file using fetchBlockByteRange()/hedgedFetchBlockByteRange + private boolean checkFile2(FSDataInputStream in) { + byte[] toRead = new byte[FILE_SIZE]; + try { + assertEquals("Cannot read file", toRead.length, in.read(0, toRead, 0, toRead.length)); + } catch (IOException e) { + return false; + } + return checkFile(toRead); + } + + private boolean checkFile(byte[] fileToCheck) { + if (fileToCheck.length != rawData.length) { + return false; + } + for (int i = 0; i < fileToCheck.length; i++) { + if (fileToCheck[i] != rawData[i]) { + return false; + } + } + return true; + } + + private FileSystem newFileSystem() throws IOException { + Configuration clientConf = new Configuration(cluster.getConfiguration(0)); + + clientConf.setInt(Retry.WINDOW_BASE_KEY, Integer.MAX_VALUE); + + return FileSystem.newInstance(cluster.getURI(), clientConf); + } + private FileSystem newFileSystemHedgedRead() throws IOException { + Configuration clientConf = new Configuration(cluster.getConfiguration(0)); + + clientConf.setInt(Retry.WINDOW_BASE_KEY, 3000); + clientConf.setInt(HedgedRead.THREADPOOL_SIZE_KEY, 5); + + return FileSystem.newInstance(cluster.getURI(), clientConf); + } + + @Test + public void testBlockSeekToWithExpiredToken() throws Exception { + // read using blockSeekTo(). Acquired tokens are cached in in + try (FileSystem fs = newFileSystem(); + FSDataInputStream in = fs.open(PATH)) { + waitBlockTokenExpired(in); + assertTrue(checkFile1(in)); + } + } + + @Test + public void testFetchBlockByteRangeWithExpiredToken() throws Exception { + // read using fetchBlockByteRange(). Acquired tokens are cached in in + try (FileSystem fs = newFileSystem(); + FSDataInputStream in = fs.open(PATH)) { + waitBlockTokenExpired(in); + assertTrue(checkFile2(in)); + } + } + + @Test + public void testHedgedFetchBlockByteRangeWithExpiredToken() throws Exception { + // read using hedgedFetchBlockByteRange(). Acquired tokens are cached in in + try (FileSystem fs = newFileSystemHedgedRead(); + FSDataInputStream in = fs.open(PATH)) { + waitBlockTokenExpired(in); + assertTrue(checkFile2(in)); + } + } + + private void waitBlockTokenExpired(FSDataInputStream in1) throws Exception { + DFSInputStream innerStream = (DFSInputStream) in1.getWrappedStream(); + for (LocatedBlock block : innerStream.getAllBlocks()) { + while (!SecurityTestUtil.isBlockTokenExpired(block.getBlockToken())) { + Thread.sleep(100); + } + } + } +} From e0e720645a9a468b7a244adcd0c31c9bea30b7b9 Mon Sep 17 00:00:00 2001 From: "yoshida.shinya" Date: Thu, 18 Nov 2021 14:05:03 +0900 Subject: [PATCH 02/11] Reformat --- .../sasl/SaslDataTransferServer.java | 12 +- ...TestSaslDataTransferExpiredBlockToken.java | 242 +++++++++--------- 2 files changed, 125 insertions(+), 129 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferServer.java index 40c089c9d47ff..f943170ec271b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferServer.java @@ -444,9 +444,9 @@ private IOStreamPair doSaslHandshake(Peer peer, OutputStream underlyingOut, // connecting to this DN. sendInvalidKeySaslErrorMessage(out, ioe.getCause().getMessage()); } else if (ioe instanceof SaslException && - ioe.getCause() != null && - (ioe.getCause() instanceof InvalidBlockTokenException || - ioe.getCause() instanceof SecretManager.InvalidToken)) { + ioe.getCause() != null && + (ioe.getCause() instanceof InvalidBlockTokenException || + ioe.getCause() instanceof SecretManager.InvalidToken)) { // This could be because the client is long-lived and block token is expired // The client will get new block token from the NN, upon receiving this error // and retry connecting to this DN @@ -474,13 +474,13 @@ private static void sendInvalidKeySaslErrorMessage(DataOutputStream out, /** * Sends a SASL negotiation message indicating an invalid token error. * - * @param out stream to receive message + * @param out stream to receive message * @param message to send * @throws IOException for any error */ private static void sendInvalidTokenSaslErrorMessage(DataOutputStream out, - String message) throws IOException { + String message) throws IOException { sendSaslMessage(out, DataTransferEncryptorStatus.ERROR_ACCESS_TOKEN, null, - message); + message); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/TestSaslDataTransferExpiredBlockToken.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/TestSaslDataTransferExpiredBlockToken.java index ea56cdda6c120..8689f33d98f71 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/TestSaslDataTransferExpiredBlockToken.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/TestSaslDataTransferExpiredBlockToken.java @@ -36,6 +36,7 @@ import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.Retry; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.security.token.block.SecurityTestUtil; + import org.junit.After; import org.junit.Before; import org.junit.Rule; @@ -44,146 +45,141 @@ import org.junit.rules.Timeout; public class TestSaslDataTransferExpiredBlockToken extends SaslDataTransferTestCase { - private static final int BLOCK_SIZE = 4096; - private static final int FILE_SIZE = 2 * BLOCK_SIZE; - private static final Path PATH = new Path("/file1"); - - private final byte[] rawData = new byte[FILE_SIZE]; - private MiniDFSCluster cluster; - - @Rule - public ExpectedException exception = ExpectedException.none(); - - @Rule - public Timeout timeout = new Timeout(60, TimeUnit.SECONDS); - - @Before - public void before() throws Exception { - Random r = new Random(); - r.nextBytes(rawData); - - HdfsConfiguration conf = createSecureConfig( - "authentication,integrity,privacy"); - cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); - cluster.waitActive(); - - try (FileSystem fs = cluster.getFileSystem()) { - createFile(fs); - } - - // set a short token lifetime (1 second) initially - SecurityTestUtil.setBlockTokenLifetime( - cluster.getNameNode() - .getNamesystem() - .getBlockManager() - .getBlockTokenSecretManager(), 1000L); - } + private static final int BLOCK_SIZE = 4096; + private static final int FILE_SIZE = 2 * BLOCK_SIZE; + private static final Path PATH = new Path("/file1"); - @After - public void shutdown() { - if (cluster != null) { - cluster.shutdown(); - cluster = null; - } - } + private final byte[] rawData = new byte[FILE_SIZE]; + private MiniDFSCluster cluster; + + @Rule + public ExpectedException exception = ExpectedException.none(); - private void createFile(FileSystem fs) throws IOException { - try (FSDataOutputStream out = fs.create(PATH)) { - out.write(rawData); - } + @Rule + public Timeout timeout = new Timeout(60, TimeUnit.SECONDS); + + @Before + public void before() throws Exception { + Random r = new Random(); + r.nextBytes(rawData); + + HdfsConfiguration conf = createSecureConfig("authentication,integrity,privacy"); + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); + cluster.waitActive(); + + try (FileSystem fs = cluster.getFileSystem()) { + createFile(fs); } - // read a file using blockSeekTo() - private boolean checkFile1(FSDataInputStream in) { - byte[] toRead = new byte[FILE_SIZE]; - int totalRead = 0; - int nRead = 0; - try { - while ((nRead = in.read(toRead, totalRead, toRead.length - totalRead)) > 0) { - totalRead += nRead; - } - } catch (IOException e) { - return false; - } - assertEquals("Cannot read file.", toRead.length, totalRead); - return checkFile(toRead); + // set a short token lifetime (1 second) initially + SecurityTestUtil.setBlockTokenLifetime( + cluster.getNameNode().getNamesystem().getBlockManager().getBlockTokenSecretManager(), + 1000L); + } + + @After + public void shutdown() { + if (cluster != null) { + cluster.shutdown(); + cluster = null; } + } - // read a file using fetchBlockByteRange()/hedgedFetchBlockByteRange - private boolean checkFile2(FSDataInputStream in) { - byte[] toRead = new byte[FILE_SIZE]; - try { - assertEquals("Cannot read file", toRead.length, in.read(0, toRead, 0, toRead.length)); - } catch (IOException e) { - return false; - } - return checkFile(toRead); + private void createFile(FileSystem fs) throws IOException { + try (FSDataOutputStream out = fs.create(PATH)) { + out.write(rawData); + } + } + + // read a file using blockSeekTo() + private boolean checkFile1(FSDataInputStream in) { + byte[] toRead = new byte[FILE_SIZE]; + int totalRead = 0; + int nRead = 0; + try { + while ((nRead = in.read(toRead, totalRead, toRead.length - totalRead)) > 0) { + totalRead += nRead; + } + } catch (IOException e) { + return false; } + assertEquals("Cannot read file.", toRead.length, totalRead); + return checkFile(toRead); + } + + // read a file using fetchBlockByteRange()/hedgedFetchBlockByteRange() + private boolean checkFile2(FSDataInputStream in) { + byte[] toRead = new byte[FILE_SIZE]; + try { + assertEquals("Cannot read file", toRead.length, in.read(0, toRead, 0, toRead.length)); + } catch (IOException e) { + return false; + } + return checkFile(toRead); + } - private boolean checkFile(byte[] fileToCheck) { - if (fileToCheck.length != rawData.length) { - return false; - } - for (int i = 0; i < fileToCheck.length; i++) { - if (fileToCheck[i] != rawData[i]) { - return false; - } - } - return true; + private boolean checkFile(byte[] fileToCheck) { + if (fileToCheck.length != rawData.length) { + return false; + } + for (int i = 0; i < fileToCheck.length; i++) { + if (fileToCheck[i] != rawData[i]) { + return false; + } } + return true; + } - private FileSystem newFileSystem() throws IOException { - Configuration clientConf = new Configuration(cluster.getConfiguration(0)); + private FileSystem newFileSystem() throws IOException { + Configuration clientConf = new Configuration(cluster.getConfiguration(0)); - clientConf.setInt(Retry.WINDOW_BASE_KEY, Integer.MAX_VALUE); + clientConf.setInt(Retry.WINDOW_BASE_KEY, Integer.MAX_VALUE); - return FileSystem.newInstance(cluster.getURI(), clientConf); - } - private FileSystem newFileSystemHedgedRead() throws IOException { - Configuration clientConf = new Configuration(cluster.getConfiguration(0)); + return FileSystem.newInstance(cluster.getURI(), clientConf); + } - clientConf.setInt(Retry.WINDOW_BASE_KEY, 3000); - clientConf.setInt(HedgedRead.THREADPOOL_SIZE_KEY, 5); + private FileSystem newFileSystemHedgedRead() throws IOException { + Configuration clientConf = new Configuration(cluster.getConfiguration(0)); - return FileSystem.newInstance(cluster.getURI(), clientConf); - } + clientConf.setInt(Retry.WINDOW_BASE_KEY, 3000); + clientConf.setInt(HedgedRead.THREADPOOL_SIZE_KEY, 5); - @Test - public void testBlockSeekToWithExpiredToken() throws Exception { - // read using blockSeekTo(). Acquired tokens are cached in in - try (FileSystem fs = newFileSystem(); - FSDataInputStream in = fs.open(PATH)) { - waitBlockTokenExpired(in); - assertTrue(checkFile1(in)); - } - } + return FileSystem.newInstance(cluster.getURI(), clientConf); + } - @Test - public void testFetchBlockByteRangeWithExpiredToken() throws Exception { - // read using fetchBlockByteRange(). Acquired tokens are cached in in - try (FileSystem fs = newFileSystem(); - FSDataInputStream in = fs.open(PATH)) { - waitBlockTokenExpired(in); - assertTrue(checkFile2(in)); - } + @Test + public void testBlockSeekToWithExpiredToken() throws Exception { + // read using blockSeekTo(). Acquired tokens are cached in in + try (FileSystem fs = newFileSystem(); FSDataInputStream in = fs.open(PATH)) { + waitBlockTokenExpired(in); + assertTrue(checkFile1(in)); } - - @Test - public void testHedgedFetchBlockByteRangeWithExpiredToken() throws Exception { - // read using hedgedFetchBlockByteRange(). Acquired tokens are cached in in - try (FileSystem fs = newFileSystemHedgedRead(); - FSDataInputStream in = fs.open(PATH)) { - waitBlockTokenExpired(in); - assertTrue(checkFile2(in)); - } + } + + @Test + public void testFetchBlockByteRangeWithExpiredToken() throws Exception { + // read using fetchBlockByteRange(). Acquired tokens are cached in in + try (FileSystem fs = newFileSystem(); FSDataInputStream in = fs.open(PATH)) { + waitBlockTokenExpired(in); + assertTrue(checkFile2(in)); } - - private void waitBlockTokenExpired(FSDataInputStream in1) throws Exception { - DFSInputStream innerStream = (DFSInputStream) in1.getWrappedStream(); - for (LocatedBlock block : innerStream.getAllBlocks()) { - while (!SecurityTestUtil.isBlockTokenExpired(block.getBlockToken())) { - Thread.sleep(100); - } - } + } + + @Test + public void testHedgedFetchBlockByteRangeWithExpiredToken() throws Exception { + // read using hedgedFetchBlockByteRange(). Acquired tokens are cached in in + try (FileSystem fs = newFileSystemHedgedRead(); FSDataInputStream in = fs.open(PATH)) { + waitBlockTokenExpired(in); + assertTrue(checkFile2(in)); + } + } + + private void waitBlockTokenExpired(FSDataInputStream in1) throws Exception { + DFSInputStream innerStream = (DFSInputStream) in1.getWrappedStream(); + for (LocatedBlock block : innerStream.getAllBlocks()) { + while (!SecurityTestUtil.isBlockTokenExpired(block.getBlockToken())) { + Thread.sleep(100); + } } + } } From 108ba05eb088d570e04497d27f13c7907d7cd23e Mon Sep 17 00:00:00 2001 From: "yoshida.shinya" Date: Thu, 18 Nov 2021 20:22:27 +0900 Subject: [PATCH 03/11] Remove ExpectedException --- .../sasl/TestSaslDataTransferExpiredBlockToken.java | 3 --- 1 file changed, 3 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/TestSaslDataTransferExpiredBlockToken.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/TestSaslDataTransferExpiredBlockToken.java index 8689f33d98f71..ea6e623f3a793 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/TestSaslDataTransferExpiredBlockToken.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/TestSaslDataTransferExpiredBlockToken.java @@ -52,9 +52,6 @@ public class TestSaslDataTransferExpiredBlockToken extends SaslDataTransferTestC private final byte[] rawData = new byte[FILE_SIZE]; private MiniDFSCluster cluster; - @Rule - public ExpectedException exception = ExpectedException.none(); - @Rule public Timeout timeout = new Timeout(60, TimeUnit.SECONDS); From 98fa43df9fb2903f2bd103e1d328528b5d08239f Mon Sep 17 00:00:00 2001 From: "yoshida.shinya" Date: Fri, 19 Nov 2021 14:49:40 +0900 Subject: [PATCH 04/11] Remove unused import --- .../datatransfer/sasl/TestSaslDataTransferExpiredBlockToken.java | 1 - 1 file changed, 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/TestSaslDataTransferExpiredBlockToken.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/TestSaslDataTransferExpiredBlockToken.java index ea6e623f3a793..838ad7cc97f22 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/TestSaslDataTransferExpiredBlockToken.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/TestSaslDataTransferExpiredBlockToken.java @@ -41,7 +41,6 @@ import org.junit.Before; import org.junit.Rule; import org.junit.Test; -import org.junit.rules.ExpectedException; import org.junit.rules.Timeout; public class TestSaslDataTransferExpiredBlockToken extends SaslDataTransferTestCase { From faf9a801c0534cb00bb11e4d436e229b889b3e55 Mon Sep 17 00:00:00 2001 From: "yoshida.shinya" Date: Sat, 20 Nov 2021 15:29:53 +0900 Subject: [PATCH 05/11] triggerrebuild From d500e069920e99a6efcb0440d09e350ccffaa42e Mon Sep 17 00:00:00 2001 From: "yoshida.shinya" Date: Fri, 26 Nov 2021 12:14:13 +0900 Subject: [PATCH 06/11] Change ERROR_ACCESS_TOKEN status to bool flag, with refactoring readSaslMessage --- .../sasl/DataTransferSaslUtil.java | 78 +++++++++---------- .../sasl/SaslDataTransferClient.java | 12 ++- .../src/main/proto/datatransfer.proto | 2 +- .../sasl/SaslDataTransferServer.java | 3 +- 4 files changed, 50 insertions(+), 45 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/DataTransferSaslUtil.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/DataTransferSaslUtil.java index 96fe4ae31aa61..bee3678882308 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/DataTransferSaslUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/DataTransferSaslUtil.java @@ -34,6 +34,7 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.function.Function; import javax.security.sasl.Sasl; import org.apache.commons.codec.binary.Base64; @@ -205,6 +206,25 @@ public static SaslPropertiesResolver getSaslPropertiesResolver( return resolver; } + private static T readSaslMessage(InputStream in, + Function handler) throws IOException { + DataTransferEncryptorMessageProto proto = + DataTransferEncryptorMessageProto.parseFrom(vintPrefixed(in)); + switch (proto.getStatus()) { + case ERROR_UNKNOWN_KEY: + throw new InvalidEncryptionKeyException(proto.getMessage()); + case ERROR: + if (proto.hasAccessTokenError() && proto.getAccessTokenError()) { + throw new InvalidBlockTokenException(proto.getMessage()); + } + throw new IOException(proto.getMessage()); + case SUCCESS: + return handler.apply(proto); + default: + throw new IOException("Unknown status: " + proto.getStatus() + ", message: " + proto.getMessage()); + } + } + /** * Reads a SASL negotiation message. * @@ -213,17 +233,7 @@ public static SaslPropertiesResolver getSaslPropertiesResolver( * @throws IOException for any error */ public static byte[] readSaslMessage(InputStream in) throws IOException { - DataTransferEncryptorMessageProto proto = - DataTransferEncryptorMessageProto.parseFrom(vintPrefixed(in)); - if (proto.getStatus() == DataTransferEncryptorStatus.ERROR_UNKNOWN_KEY) { - throw new InvalidEncryptionKeyException(proto.getMessage()); - } else if (proto.getStatus() == DataTransferEncryptorStatus.ERROR_ACCESS_TOKEN) { - throw new InvalidBlockTokenException(proto.getMessage()); - } else if (proto.getStatus() == DataTransferEncryptorStatus.ERROR) { - throw new IOException(proto.getMessage()); - } else { - return proto.getPayload().toByteArray(); - } + return readSaslMessage(in, proto -> proto.getPayload().toByteArray()); } /** @@ -236,15 +246,7 @@ public static byte[] readSaslMessage(InputStream in) throws IOException { */ public static byte[] readSaslMessageAndNegotiationCipherOptions( InputStream in, List cipherOptions) throws IOException { - DataTransferEncryptorMessageProto proto = - DataTransferEncryptorMessageProto.parseFrom(vintPrefixed(in)); - if (proto.getStatus() == DataTransferEncryptorStatus.ERROR_UNKNOWN_KEY) { - throw new InvalidEncryptionKeyException(proto.getMessage()); - } else if (proto.getStatus() == DataTransferEncryptorStatus.ERROR_ACCESS_TOKEN) { - throw new InvalidBlockTokenException(proto.getMessage()); - } else if (proto.getStatus() == DataTransferEncryptorStatus.ERROR) { - throw new IOException(proto.getMessage()); - } else { + return readSaslMessage(in, proto -> { List optionProtos = proto.getCipherOptionList(); if (optionProtos != null) { for (CipherOptionProto optionProto : optionProtos) { @@ -252,7 +254,7 @@ public static byte[] readSaslMessageAndNegotiationCipherOptions( } } return proto.getPayload().toByteArray(); - } + }); } static class SaslMessageWithHandshake { @@ -281,15 +283,7 @@ String getBpid() { public static SaslMessageWithHandshake readSaslMessageWithHandshakeSecret( InputStream in) throws IOException { - DataTransferEncryptorMessageProto proto = - DataTransferEncryptorMessageProto.parseFrom(vintPrefixed(in)); - if (proto.getStatus() == DataTransferEncryptorStatus.ERROR_UNKNOWN_KEY) { - throw new InvalidEncryptionKeyException(proto.getMessage()); - } else if (proto.getStatus() == DataTransferEncryptorStatus.ERROR_ACCESS_TOKEN) { - throw new InvalidBlockTokenException(proto.getMessage()); - } else if (proto.getStatus() == DataTransferEncryptorStatus.ERROR) { - throw new IOException(proto.getMessage()); - } else { + return readSaslMessage(in, proto -> { byte[] payload = proto.getPayload().toByteArray(); byte[] secret = null; String bpid = null; @@ -299,7 +293,7 @@ public static SaslMessageWithHandshake readSaslMessageWithHandshakeSecret( bpid = handshakeSecret.getBpid(); } return new SaslMessageWithHandshake(payload, secret, bpid); - } + }); } /** @@ -474,15 +468,7 @@ public static void sendSaslMessageAndNegotiationCipherOptions( public static SaslResponseWithNegotiatedCipherOption readSaslMessageAndNegotiatedCipherOption(InputStream in) throws IOException { - DataTransferEncryptorMessageProto proto = - DataTransferEncryptorMessageProto.parseFrom(vintPrefixed(in)); - if (proto.getStatus() == DataTransferEncryptorStatus.ERROR_UNKNOWN_KEY) { - throw new InvalidEncryptionKeyException(proto.getMessage()); - } else if (proto.getStatus() == DataTransferEncryptorStatus.ERROR_ACCESS_TOKEN) { - throw new InvalidBlockTokenException(proto.getMessage()); - } else if (proto.getStatus() == DataTransferEncryptorStatus.ERROR) { - throw new IOException(proto.getMessage()); - } else { + return readSaslMessage(in, proto -> { byte[] response = proto.getPayload().toByteArray(); List options = PBHelperClient.convertCipherOptionProtos( proto.getCipherOptionList()); @@ -491,7 +477,7 @@ public static void sendSaslMessageAndNegotiationCipherOptions( option = options.get(0); } return new SaslResponseWithNegotiatedCipherOption(response, option); - } + }); } /** @@ -567,6 +553,13 @@ public static void sendSaslMessage(OutputStream out, DataTransferEncryptorStatus status, byte[] payload, String message, HandshakeSecretProto handshakeSecret) throws IOException { + sendSaslMessage(out, status, payload, message, handshakeSecret, false); + } + + public static void sendSaslMessage(OutputStream out, + DataTransferEncryptorStatus status, byte[] payload, String message, + HandshakeSecretProto handshakeSecret, boolean accessTokenError) + throws IOException { DataTransferEncryptorMessageProto.Builder builder = DataTransferEncryptorMessageProto.newBuilder(); @@ -580,6 +573,9 @@ public static void sendSaslMessage(OutputStream out, if (handshakeSecret != null) { builder.setHandshakeSecret(handshakeSecret); } + if (accessTokenError) { + builder.setAccessTokenError(true); + } DataTransferEncryptorMessageProto proto = builder.build(); proto.writeDelimitedTo(out); diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferClient.java index 1a61c9664825e..64ffb40415c1a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferClient.java @@ -603,7 +603,17 @@ private IOStreamPair doSaslHandshake(InetAddress addr, conf, cipherOption, underlyingOut, underlyingIn, false) : sasl.createStreamPair(out, in); } catch (IOException ioe) { - sendGenericSaslErrorMessage(out, ioe.getMessage()); + try { + sendGenericSaslErrorMessage(out, ioe.getMessage()); + } catch (Exception ioe2) { + // If ioe is caused by error response from server, server will close peer connection. + // So sendGenericSaslErrorMessage might cause IOException due to "Broken pipe". + // We suppress IOException from sendGenericSaslErrorMessage + // and always throw `ioe` as top level. + // `ioe` can be InvalidEncryptionKeyException or InvalidBlockTokenException + // that indicates refresh key or token and are important for caller. + ioe.addSuppressed(ioe2); + } throw ioe; } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/datatransfer.proto b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/datatransfer.proto index 401d3b8fe74a6..d2f72f919b683 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/datatransfer.proto +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/datatransfer.proto @@ -38,13 +38,13 @@ message DataTransferEncryptorMessageProto { SUCCESS = 0; ERROR_UNKNOWN_KEY = 1; ERROR = 2; - ERROR_ACCESS_TOKEN = 3; } required DataTransferEncryptorStatus status = 1; optional bytes payload = 2; optional string message = 3; repeated CipherOptionProto cipherOption = 4; optional HandshakeSecretProto handshakeSecret = 5; + optional bool accessTokenError = 6; } message HandshakeSecretProto { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferServer.java index f943170ec271b..fcb6b7d7bc575 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferServer.java @@ -480,7 +480,6 @@ private static void sendInvalidKeySaslErrorMessage(DataOutputStream out, */ private static void sendInvalidTokenSaslErrorMessage(DataOutputStream out, String message) throws IOException { - sendSaslMessage(out, DataTransferEncryptorStatus.ERROR_ACCESS_TOKEN, null, - message); + sendSaslMessage(out, DataTransferEncryptorStatus.ERROR, null, message, null, true); } } From fdcbea2d8a36e5accb2e133af908a724df1319c5 Mon Sep 17 00:00:00 2001 From: "yoshida.shinya" Date: Fri, 26 Nov 2021 18:53:36 +0900 Subject: [PATCH 07/11] Fix checkstyle --- .../hdfs/protocol/datatransfer/sasl/DataTransferSaslUtil.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/DataTransferSaslUtil.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/DataTransferSaslUtil.java index bee3678882308..ab5cd0608d9d4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/DataTransferSaslUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/DataTransferSaslUtil.java @@ -221,7 +221,8 @@ private static T readSaslMessage(InputStream in, case SUCCESS: return handler.apply(proto); default: - throw new IOException("Unknown status: " + proto.getStatus() + ", message: " + proto.getMessage()); + throw new IOException( + "Unknown status: " + proto.getStatus() + ", message: " + proto.getMessage()); } } From b432e3204156ea89ae9e9aafa94a310bb0a848c0 Mon Sep 17 00:00:00 2001 From: "yoshida.shinya" Date: Wed, 1 Dec 2021 23:11:42 +0900 Subject: [PATCH 08/11] Rename ioe2 -> e --- .../protocol/datatransfer/sasl/SaslDataTransferClient.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferClient.java index 64ffb40415c1a..759c532e14a42 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferClient.java @@ -605,14 +605,14 @@ private IOStreamPair doSaslHandshake(InetAddress addr, } catch (IOException ioe) { try { sendGenericSaslErrorMessage(out, ioe.getMessage()); - } catch (Exception ioe2) { + } catch (Exception e) { // If ioe is caused by error response from server, server will close peer connection. // So sendGenericSaslErrorMessage might cause IOException due to "Broken pipe". // We suppress IOException from sendGenericSaslErrorMessage // and always throw `ioe` as top level. // `ioe` can be InvalidEncryptionKeyException or InvalidBlockTokenException // that indicates refresh key or token and are important for caller. - ioe.addSuppressed(ioe2); + ioe.addSuppressed(e); } throw ioe; } From 48c75cedcba6eed622bfd3868380f6bf3fa4c1db Mon Sep 17 00:00:00 2001 From: "yoshida.shinya" Date: Thu, 2 Dec 2021 20:04:43 +0900 Subject: [PATCH 09/11] Debug log for suppressed exception --- .../protocol/datatransfer/sasl/SaslDataTransferClient.java | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferClient.java index 759c532e14a42..8cb9aee683ad8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferClient.java @@ -603,8 +603,9 @@ private IOStreamPair doSaslHandshake(InetAddress addr, conf, cipherOption, underlyingOut, underlyingIn, false) : sasl.createStreamPair(out, in); } catch (IOException ioe) { + String message = ioe.getMessage(); try { - sendGenericSaslErrorMessage(out, ioe.getMessage()); + sendGenericSaslErrorMessage(out, message); } catch (Exception e) { // If ioe is caused by error response from server, server will close peer connection. // So sendGenericSaslErrorMessage might cause IOException due to "Broken pipe". @@ -612,6 +613,8 @@ private IOStreamPair doSaslHandshake(InetAddress addr, // and always throw `ioe` as top level. // `ioe` can be InvalidEncryptionKeyException or InvalidBlockTokenException // that indicates refresh key or token and are important for caller. + LOG.debug("Failed to send generic sasl error (server: {}, message: {}), suppress exception", + addr.toString(), message, e); ioe.addSuppressed(e); } throw ioe; From 495ba76857851a6acfc948e3b373f40449c2c873 Mon Sep 17 00:00:00 2001 From: "yoshida.shinya" Date: Fri, 3 Dec 2021 11:21:12 +0900 Subject: [PATCH 10/11] Remove toString for logging args --- .../datatransfer/sasl/SaslDataTransferClient.java | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferClient.java index 8cb9aee683ad8..641c7a0ff4790 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferClient.java @@ -588,11 +588,11 @@ private IOStreamPair doSaslHandshake(InetAddress addr, // the client accepts some cipher suites, but the server does not. LOG.debug("Client accepts cipher suites {}, " + "but server {} does not accept any of them", - cipherSuites, addr.toString()); + cipherSuites, addr); } } else { LOG.debug("Client using cipher suite {} with server {}", - cipherOption.getCipherSuite().getName(), addr.toString()); + cipherOption.getCipherSuite().getName(), addr); } } } @@ -613,8 +613,8 @@ private IOStreamPair doSaslHandshake(InetAddress addr, // and always throw `ioe` as top level. // `ioe` can be InvalidEncryptionKeyException or InvalidBlockTokenException // that indicates refresh key or token and are important for caller. - LOG.debug("Failed to send generic sasl error (server: {}, message: {}), suppress exception", - addr.toString(), message, e); + LOG.debug("Failed to send generic sasl error to server {} (message: {}), " + + "suppress exception", addr, message, e); ioe.addSuppressed(e); } throw ioe; From 5fe1d32e150a50598c0760a7b3848b0cee87ffe4 Mon Sep 17 00:00:00 2001 From: "yoshida.shinya" Date: Fri, 3 Dec 2021 15:16:21 +0900 Subject: [PATCH 11/11] triggerrebuild