diff --git a/BUILDING.txt b/BUILDING.txt
index 03dffdd80c74c..d3c9a1a7f51ee 100644
--- a/BUILDING.txt
+++ b/BUILDING.txt
@@ -6,6 +6,7 @@ Requirements:
* Unix System
* JDK 1.8
* Maven 3.3 or later
+* Protocol Buffers 3.7.1 (if compiling native code)
* CMake 3.1 or newer (if compiling native code)
* Zlib devel (if compiling native code)
* Cyrus SASL devel (if compiling native code)
@@ -61,6 +62,16 @@ Installing required packages for clean install of Ubuntu 14.04 LTS Desktop:
$ sudo apt-get -y install maven
* Native libraries
$ sudo apt-get -y install build-essential autoconf automake libtool cmake zlib1g-dev pkg-config libssl-dev libsasl2-dev
+* Protocol Buffers 3.7.1 (required to build native code)
+ $ mkdir -p /opt/protobuf-3.7-src \
+ && curl -L -s -S \
+ https://github.com/protocolbuffers/protobuf/releases/download/v3.7.1/protobuf-java-3.7.1.tar.gz \
+ -o /opt/protobuf-3.7.1.tar.gz \
+ && tar xzf /opt/protobuf-3.7.1.tar.gz --strip-components 1 -C /opt/protobuf-3.7-src \
+ && cd /opt/protobuf-3.7-src \
+ && ./configure\
+ && make install \
+ && rm -rf /opt/protobuf-3.7-src
Optional packages:
@@ -384,6 +395,15 @@ Installing required dependencies for clean install of macOS 10.14:
* Install native libraries, only openssl is required to compile native code,
you may optionally install zlib, lz4, etc.
$ brew install openssl
+* Protocol Buffers 3.7.1 (required to compile native code)
+ $ wget https://github.com/protocolbuffers/protobuf/releases/download/v3.7.1/protobuf-java-3.7.1.tar.gz
+ $ mkdir -p protobuf-3.7 && tar zxvf protobuf-java-3.7.1.tar.gz --strip-components 1 -C protobuf-3.7
+ $ cd protobuf-3.7
+ $ ./configure
+ $ make
+ $ make check
+ $ make install
+ $ protoc --version
Note that building Hadoop 3.1.1/3.1.2/3.2.0 native code from source is broken
on macOS. For 3.1.1/3.1.2, you need to manually backport YARN-8622. For 3.2.0,
@@ -409,6 +429,7 @@ Requirements:
* Windows System
* JDK 1.8
* Maven 3.0 or later
+* Protocol Buffers 3.7.1
* CMake 3.1 or newer
* Visual Studio 2010 Professional or Higher
* Windows SDK 8.1 (if building CPU rate control for the container executor)
diff --git a/dev-support/docker/Dockerfile b/dev-support/docker/Dockerfile
index 969d8bb44e376..65cada2784df9 100644
--- a/dev-support/docker/Dockerfile
+++ b/dev-support/docker/Dockerfile
@@ -105,6 +105,23 @@ RUN mkdir -p /opt/cmake \
ENV CMAKE_HOME /opt/cmake
ENV PATH "${PATH}:/opt/cmake/bin"
+######
+# Install Google Protobuf 3.7.1 (2.6.0 ships with Xenial)
+######
+# hadolint ignore=DL3003
+RUN mkdir -p /opt/protobuf-src \
+ && curl -L -s -S \
+ https://github.com/protocolbuffers/protobuf/releases/download/v3.7.1/protobuf-java-3.7.1.tar.gz \
+ -o /opt/protobuf.tar.gz \
+ && tar xzf /opt/protobuf.tar.gz --strip-components 1 -C /opt/protobuf-src \
+ && cd /opt/protobuf-src \
+ && ./configure --prefix=/opt/protobuf \
+ && make install \
+ && cd /root \
+ && rm -rf /opt/protobuf-src
+ENV PROTOBUF_HOME /opt/protobuf
+ENV PATH "${PATH}:/opt/protobuf/bin"
+
######
# Install Apache Maven 3.3.9 (3.3.9 ships with Xenial)
######
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/fsdatainputstream.md b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/fsdatainputstream.md
index 090696483be34..b8f9e87e66b1d 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/fsdatainputstream.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/fsdatainputstream.md
@@ -119,6 +119,59 @@ Return the data at the current position.
else
result = -1
+### `InputStream.available()`
+
+Returns the number of bytes "estimated" to be readable on a stream before `read()`
+blocks on any IO (i.e. the thread is potentially suspended for some time).
+
+That is: for all values `v` returned by `available()`, `read(buffer, 0, v)`
+is should not block.
+
+#### Postconditions
+
+```python
+if len(data) == 0:
+ result = 0
+
+elif pos >= len(data):
+ result = 0
+
+else:
+ d = "the amount of data known to be already buffered/cached locally"
+ result = min(1, d) # optional but recommended: see below.
+```
+
+As `0` is a number which is always meets this condition, it is nominally
+possible for an implementation to simply return `0`. However, this is not
+considered useful, and some applications/libraries expect a positive number.
+
+#### The GZip problem.
+
+[JDK-7036144](http://bugs.java.com/bugdatabase/view_bug.do?bug_id=7036144),
+"GZIPInputStream readTrailer uses faulty available() test for end-of-stream"
+discusses how the JDK's GZip code it uses `available()` to detect an EOF,
+in a loop similar to the the following
+
+```java
+while(instream.available()) {
+ process(instream.read());
+}
+```
+
+The correct loop would have been:
+
+```java
+int r;
+while((r=instream.read()) >= 0) {
+ process(r);
+}
+```
+
+If `available()` ever returns 0, then the gzip loop halts prematurely.
+
+For this reason, implementations *should* return a value >=1, even
+if it breaks that requirement of `available()` returning the amount guaranteed
+not to block on reads.
### `InputStream.read(buffer[], offset, length)`
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSeekTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSeekTest.java
index ca8e4a053beac..db3691611b118 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSeekTest.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSeekTest.java
@@ -32,6 +32,7 @@
import java.io.IOException;
import java.util.Random;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
import static org.apache.hadoop.fs.contract.ContractTestUtils.createFile;
import static org.apache.hadoop.fs.contract.ContractTestUtils.dataset;
import static org.apache.hadoop.fs.contract.ContractTestUtils.skip;
@@ -99,14 +100,18 @@ public void testSeekZeroByteFile() throws Throwable {
describe("seek and read a 0 byte file");
instream = getFileSystem().open(zeroByteFile);
assertEquals(0, instream.getPos());
+ assertAvailableIsZero(instream);
//expect initial read to fai;
int result = instream.read();
assertMinusOne("initial byte read", result);
+ assertAvailableIsZero(instream);
byte[] buffer = new byte[1];
//expect that seek to 0 works
instream.seek(0);
+ assertAvailableIsZero(instream);
//reread, expect same exception
result = instream.read();
+ assertAvailableIsZero(instream);
assertMinusOne("post-seek byte read", result);
result = instream.read(buffer, 0, 1);
assertMinusOne("post-seek buffer read", result);
@@ -132,8 +137,8 @@ public void testBlockReadZeroByteFile() throws Throwable {
@Test
public void testSeekReadClosedFile() throws Throwable {
instream = getFileSystem().open(smallSeekFile);
- getLogger().debug(
- "Stream is of type " + instream.getClass().getCanonicalName());
+ getLogger().debug("Stream is of type {}",
+ instream.getClass().getCanonicalName());
instream.close();
try {
instream.seek(0);
@@ -168,10 +173,26 @@ public void testSeekReadClosedFile() throws Throwable {
try {
long offset = instream.getPos();
} catch (IOException e) {
- // its valid to raise error here; but the test is applied to make
+ // it is valid to raise error here; but the test is applied to make
// sure there's no other exception like an NPE.
}
+ // a closed stream should either fail or return 0 bytes.
+ try {
+ int a = instream.available();
+ LOG.info("available() returns a value on a closed file: {}", a);
+ assertAvailableIsZero(instream);
+ } catch (IOException | IllegalStateException expected) {
+ // expected
+ }
+ // a closed stream should either fail or return 0 bytes.
+ try {
+ int a = instream.available();
+ LOG.info("available() returns a value on a closed file: {}", a);
+ assertAvailableIsZero(instream);
+ } catch (IOException | IllegalStateException expected) {
+ // expected
+ }
//and close again
instream.close();
}
@@ -205,6 +226,7 @@ public void testSeekFile() throws Throwable {
//expect that seek to 0 works
instream.seek(0);
int result = instream.read();
+ assertAvailableIsPositive(instream);
assertEquals(0, result);
assertEquals(1, instream.read());
assertEquals(2, instream.getPos());
@@ -226,13 +248,24 @@ public void testSeekAndReadPastEndOfFile() throws Throwable {
//go just before the end
instream.seek(TEST_FILE_LEN - 2);
assertTrue("Premature EOF", instream.read() != -1);
+ assertAvailableIsPositive(instream);
assertTrue("Premature EOF", instream.read() != -1);
+ checkAvailabilityAtEOF();
assertMinusOne("read past end of file", instream.read());
}
+ /**
+ * This can be overridden if a filesystem always returns 01
+ * @throws IOException
+ */
+ protected void checkAvailabilityAtEOF() throws IOException {
+ assertAvailableIsZero(instream);
+ }
+
@Test
public void testSeekPastEndOfFileThenReseekAndRead() throws Throwable {
- describe("do a seek past the EOF, then verify the stream recovers");
+ describe("do a seek past the EOF, " +
+ "then verify the stream recovers");
instream = getFileSystem().open(smallSeekFile);
//go just before the end. This may or may not fail; it may be delayed until the
//read
@@ -261,6 +294,7 @@ public void testSeekPastEndOfFileThenReseekAndRead() throws Throwable {
//now go back and try to read from a valid point in the file
instream.seek(1);
assertTrue("Premature EOF", instream.read() != -1);
+ assertAvailableIsPositive(instream);
}
/**
@@ -278,6 +312,7 @@ public void testSeekBigFile() throws Throwable {
//expect that seek to 0 works
instream.seek(0);
int result = instream.read();
+ assertAvailableIsPositive(instream);
assertEquals(0, result);
assertEquals(1, instream.read());
assertEquals(2, instream.read());
@@ -296,6 +331,7 @@ public void testSeekBigFile() throws Throwable {
instream.seek(0);
assertEquals(0, instream.getPos());
instream.read();
+ assertAvailableIsPositive(instream);
assertEquals(1, instream.getPos());
byte[] buf = new byte[80 * 1024];
instream.readFully(1, buf, 0, buf.length);
@@ -314,7 +350,7 @@ public void testPositionedBulkReadDoesntChangePosition() throws Throwable {
instream.seek(39999);
assertTrue(-1 != instream.read());
assertEquals(40000, instream.getPos());
-
+ assertAvailableIsPositive(instream);
int v = 256;
byte[] readBuffer = new byte[v];
assertEquals(v, instream.read(128, readBuffer, 0, v));
@@ -322,6 +358,7 @@ public void testPositionedBulkReadDoesntChangePosition() throws Throwable {
assertEquals(40000, instream.getPos());
//content is the same too
assertEquals("@40000", block[40000], (byte) instream.read());
+ assertAvailableIsPositive(instream);
//now verify the picked up data
for (int i = 0; i < 256; i++) {
assertEquals("@" + i, block[i + 128], readBuffer[i]);
@@ -376,6 +413,7 @@ public void testReadFullyZeroByteFile() throws Throwable {
assertEquals(0, instream.getPos());
byte[] buffer = new byte[1];
instream.readFully(0, buffer, 0, 0);
+ assertAvailableIsZero(instream);
assertEquals(0, instream.getPos());
// seek to 0 read 0 bytes from it
instream.seek(0);
@@ -551,7 +589,9 @@ public void testReadSmallFile() throws Throwable {
fail("Expected an exception, got " + r);
} catch (EOFException e) {
handleExpectedException(e);
- } catch (IOException | IllegalArgumentException | IndexOutOfBoundsException e) {
+ } catch (IOException
+ | IllegalArgumentException
+ | IndexOutOfBoundsException e) {
handleRelaxedException("read() with a negative position ",
"EOFException",
e);
@@ -587,6 +627,29 @@ public void testReadAtExactEOF() throws Throwable {
instream = getFileSystem().open(smallSeekFile);
instream.seek(TEST_FILE_LEN -1);
assertTrue("read at last byte", instream.read() > 0);
+ assertAvailableIsZero(instream);
assertEquals("read just past EOF", -1, instream.read());
}
+
+ /**
+ * Assert that the number of bytes available is zero.
+ * @param in input stream
+ */
+ protected static void assertAvailableIsZero(FSDataInputStream in)
+ throws IOException {
+ assertEquals("stream.available() should be zero",
+ 0, in.available());
+ }
+
+ /**
+ * Assert that the number of bytes available is greater than zero.
+ * @param in input stream
+ */
+ protected static void assertAvailableIsPositive(FSDataInputStream in)
+ throws IOException {
+ int available = in.available();
+ assertTrue("stream.available() should be positive but is "
+ + available,
+ available > 0);
+ }
}
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java
index 8218c7708712d..b15828a153098 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java
@@ -78,7 +78,9 @@ public class XceiverClientManager implements Closeable {
private boolean isSecurityEnabled;
private final boolean topologyAwareRead;
/**
- * Creates a new XceiverClientManager.
+ * Creates a new XceiverClientManager for non secured ozone cluster.
+ * For security enabled ozone cluster, client should use the other constructor
+ * with a valid ca certificate in pem string format.
*
* @param conf configuration
*/
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java
index c62d9773639fc..2828f6ea41ca0 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java
@@ -41,8 +41,7 @@
*/
public final class Pipeline {
- private static final Logger LOG = LoggerFactory
- .getLogger(Pipeline.class);
+ private static final Logger LOG = LoggerFactory.getLogger(Pipeline.class);
private final PipelineID id;
private final ReplicationType type;
private final ReplicationFactor factor;
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/cache/CacheKey.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/cache/CacheKey.java
index aa05d88dadabe..7be2921b6a117 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/cache/CacheKey.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/cache/CacheKey.java
@@ -24,7 +24,7 @@
* CacheKey for the RocksDB table.
* @param
*/
-public class CacheKey {
+public class CacheKey implements Comparable {
private final KEY key;
@@ -53,4 +53,13 @@ public boolean equals(Object o) {
public int hashCode() {
return Objects.hash(key);
}
+
+ @Override
+ public int compareTo(Object o) {
+ if(Objects.equals(key, ((CacheKey>)o).key)) {
+ return 0;
+ } else {
+ return key.toString().compareTo((((CacheKey>) o).key).toString());
+ }
+ }
}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/cache/TableCacheImpl.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/cache/TableCacheImpl.java
index c3215c475eb9b..3e6999a49cfaa 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/cache/TableCacheImpl.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/cache/TableCacheImpl.java
@@ -23,6 +23,7 @@
import java.util.Map;
import java.util.NavigableSet;
import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentSkipListMap;
import java.util.concurrent.ConcurrentSkipListSet;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
@@ -47,7 +48,7 @@
public class TableCacheImpl implements TableCache {
- private final ConcurrentHashMap cache;
+ private final Map cache;
private final NavigableSet> epochEntries;
private ExecutorService executorService;
private CacheCleanupPolicy cleanupPolicy;
@@ -55,7 +56,14 @@ public class TableCacheImpl();
+
+ // As for full table cache only we need elements to be inserted in sorted
+ // manner, so that list will be easy. For other we can go with Hash map.
+ if (cleanupPolicy == CacheCleanupPolicy.NEVER) {
+ cache = new ConcurrentSkipListMap<>();
+ } else {
+ cache = new ConcurrentHashMap<>();
+ }
epochEntries = new ConcurrentSkipListSet<>();
// Created a singleThreadExecutor, so one cleanup will be running at a
// time.
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
index a3d1c4ab28834..3f7d0b915d5d5 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
@@ -453,6 +453,9 @@ public final class OzoneConfigKeys {
"ozone.network.topology.aware.read";
public static final boolean OZONE_NETWORK_TOPOLOGY_AWARE_READ_DEFAULT = false;
+ public static final String OZONE_MANAGER_FAIR_LOCK = "ozone.om.lock.fair";
+ public static final boolean OZONE_MANAGER_FAIR_LOCK_DEFAULT = false;
+
/**
* There is no need to instantiate this class.
*/
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/ActiveLock.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/ActiveLock.java
index 49efad05feb5a..95dfd6c393cac 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/ActiveLock.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/ActiveLock.java
@@ -31,9 +31,12 @@ public final class ActiveLock {
/**
* Use ActiveLock#newInstance to create instance.
+ *
+ * @param fairness - if true the lock uses a fair ordering policy, else
+ * non-fair ordering.
*/
- private ActiveLock() {
- this.lock = new ReentrantReadWriteLock();
+ private ActiveLock(boolean fairness) {
+ this.lock = new ReentrantReadWriteLock(fairness);
this.count = new AtomicInteger(0);
}
@@ -42,8 +45,8 @@ private ActiveLock() {
*
* @return new ActiveLock
*/
- public static ActiveLock newInstance() {
- return new ActiveLock();
+ public static ActiveLock newInstance(boolean fairness) {
+ return new ActiveLock(fairness);
}
/**
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/LockManager.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/LockManager.java
index 670d4d16378bd..3c2b5d4a394c2 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/LockManager.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/LockManager.java
@@ -37,18 +37,31 @@ public class LockManager {
private static final Logger LOG = LoggerFactory.getLogger(LockManager.class);
private final Map activeLocks = new ConcurrentHashMap<>();
- private final GenericObjectPool lockPool =
- new GenericObjectPool<>(new PooledLockFactory());
+ private final GenericObjectPool lockPool;
/**
- * Creates new LockManager instance with the given Configuration.
+ * Creates new LockManager instance with the given Configuration.and uses
+ * non-fair mode for locks.
*
* @param conf Configuration object
*/
public LockManager(final Configuration conf) {
+ this(conf, false);
+ }
+
+
+ /**
+ * Creates new LockManager instance with the given Configuration.
+ *
+ * @param conf Configuration object
+ * @param fair - true to use fair lock ordering, else non-fair lock ordering.
+ */
+ public LockManager(final Configuration conf, boolean fair) {
final int maxPoolSize = conf.getInt(
HddsConfigKeys.HDDS_LOCK_MAX_CONCURRENCY,
HddsConfigKeys.HDDS_LOCK_MAX_CONCURRENCY_DEFAULT);
+ lockPool =
+ new GenericObjectPool<>(new PooledLockFactory(fair));
lockPool.setMaxTotal(maxPoolSize);
}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/PooledLockFactory.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/PooledLockFactory.java
index 4c24ef74b2831..1e3ba05a3a2b2 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/PooledLockFactory.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/PooledLockFactory.java
@@ -26,9 +26,14 @@
*/
public class PooledLockFactory extends BasePooledObjectFactory {
+ private boolean fairness;
+
+ PooledLockFactory(boolean fair) {
+ this.fairness = fair;
+ }
@Override
public ActiveLock create() throws Exception {
- return ActiveLock.newInstance();
+ return ActiveLock.newInstance(fairness);
}
@Override
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 31bc65240d294..b0a59fa209ccb 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -1529,6 +1529,17 @@
+
+ ozone.om.lock.fair
+ false
+ If this is true, the Ozone Manager lock will be used in Fair
+ mode, which will schedule threads in the order received/queued. If this is
+ false, uses non-fair ordering. See
+ java.util.concurrent.locks.ReentrantReadWriteLock
+ for more information on fair/non-fair locks.
+
+
+
ozone.om.ratis.enable
false
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SCMCLI.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SCMCLI.java
index ff30eca470e09..0b5c18e8205cb 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SCMCLI.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SCMCLI.java
@@ -25,6 +25,7 @@
import org.apache.hadoop.hdds.cli.GenericCli;
import org.apache.hadoop.hdds.cli.HddsVersionProvider;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.SCMSecurityProtocol;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.scm.XceiverClientManager;
import org.apache.hadoop.hdds.scm.cli.container.ContainerCommands;
@@ -36,17 +37,20 @@
import org.apache.hadoop.hdds.scm.protocolPB
.StorageContainerLocationProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB;
+import org.apache.hadoop.hdds.security.x509.SecurityConfig;
import org.apache.hadoop.hdds.tracing.TracingUtil;
import org.apache.hadoop.ipc.Client;
import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.OzoneSecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.NativeCodeLoader;
import org.apache.commons.lang3.StringUtils;
import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForClients;
+import static org.apache.hadoop.hdds.HddsUtils.getScmSecurityClient;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys
.OZONE_SCM_CLIENT_ADDRESS_KEY;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE;
@@ -136,8 +140,21 @@ public ScmClient createScmClient()
NetUtils.getDefaultSocketFactory(ozoneConf),
Client.getRpcTimeout(ozoneConf))),
StorageContainerLocationProtocol.class, ozoneConf);
- return new ContainerOperationClient(
- client, new XceiverClientManager(ozoneConf));
+
+ XceiverClientManager xceiverClientManager = null;
+ if (OzoneSecurityUtil.isSecurityEnabled(ozoneConf)) {
+ SecurityConfig securityConfig = new SecurityConfig(ozoneConf);
+ SCMSecurityProtocol scmSecurityProtocolClient = getScmSecurityClient(
+ (OzoneConfiguration) securityConfig.getConfiguration());
+ String caCertificate =
+ scmSecurityProtocolClient.getCACertificate();
+ xceiverClientManager = new XceiverClientManager(ozoneConf,
+ OzoneConfiguration.of(ozoneConf).getObject(XceiverClientManager
+ .ScmClientConfig.class), caCertificate);
+ } else {
+ xceiverClientManager = new XceiverClientManager(ozoneConf);
+ }
+ return new ContainerOperationClient(client, xceiverClientManager);
}
public void checkContainerExists(ScmClient scmClient, long containerId)
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/resources/webapps/static/index.html b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/resources/webapps/static/index.html
index 7caba43124cab..b28c959be3058 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/resources/webapps/static/index.html
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/resources/webapps/static/index.html
@@ -21,16 +21,16 @@
Hadoop HttpFS Server