diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 7eee00b78698..1e3e07c5926d 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -92,10 +92,10 @@ jobs:
restore-keys: |
${{ runner.os }}-pnpm-
- name: Cache for maven dependencies
- uses: actions/cache@v4
+ uses: actions/cache/restore@v4
with:
path: |
- ~/.m2/repository
+ ~/.m2/repository/*/*/*
!~/.m2/repository/org/apache/ozone
key: maven-repo-${{ hashFiles('**/pom.xml') }}
restore-keys: |
@@ -160,7 +160,7 @@ jobs:
uses: actions/cache/restore@v4
with:
path: |
- ~/.m2/repository
+ ~/.m2/repository/*/*/*
!~/.m2/repository/org/apache/ozone
key: maven-repo-${{ hashFiles('**/pom.xml') }}
restore-keys: |
@@ -198,7 +198,7 @@ jobs:
uses: actions/cache/restore@v4
with:
path: |
- ~/.m2/repository
+ ~/.m2/repository/*/*/*
!~/.m2/repository/org/apache/ozone
key: maven-repo-${{ hashFiles('**/pom.xml') }}
restore-keys: |
@@ -242,7 +242,7 @@ jobs:
uses: actions/cache/restore@v4
with:
path: |
- ~/.m2/repository
+ ~/.m2/repository/*/*/*
!~/.m2/repository/org/apache/ozone
key: maven-repo-${{ hashFiles('**/pom.xml') }}
restore-keys: |
@@ -310,7 +310,7 @@ jobs:
uses: actions/cache/restore@v4
with:
path: |
- ~/.m2/repository
+ ~/.m2/repository/*/*/*
!~/.m2/repository/org/apache/ozone
key: maven-repo-${{ hashFiles('**/pom.xml') }}
restore-keys: |
@@ -446,7 +446,7 @@ jobs:
uses: actions/cache/restore@v4
with:
path: |
- ~/.m2/repository
+ ~/.m2/repository/*/*/*
!~/.m2/repository/org/apache/ozone
key: maven-repo-${{ hashFiles('**/pom.xml') }}
restore-keys: |
@@ -506,7 +506,7 @@ jobs:
uses: actions/cache/restore@v4
with:
path: |
- ~/.m2/repository
+ ~/.m2/repository/*/*/*
!~/.m2/repository/org/apache/ozone
key: maven-repo-${{ hashFiles('**/pom.xml') }}
restore-keys: |
diff --git a/.github/workflows/intermittent-test-check.yml b/.github/workflows/intermittent-test-check.yml
index b31762c6c946..73302ced1a4f 100644
--- a/.github/workflows/intermittent-test-check.yml
+++ b/.github/workflows/intermittent-test-check.yml
@@ -105,13 +105,13 @@ jobs:
with:
ref: ${{ github.event.inputs.ref }}
- name: Cache for maven dependencies
- uses: actions/cache@v4
+ uses: actions/cache/restore@v4
with:
- path: ~/.m2/repository
- key: maven-repo-${{ hashFiles('**/pom.xml') }}-8-single
+ path: |
+ ~/.m2/repository/*/*/*
+ !~/.m2/repository/org/apache/ozone
+ key: maven-repo-${{ hashFiles('**/pom.xml') }}
restore-keys: |
- maven-repo-${{ hashFiles('**/pom.xml') }}-8
- maven-repo-${{ hashFiles('**/pom.xml') }}
maven-repo-
- name: Setup java
uses: actions/setup-java@v4
diff --git a/.github/workflows/populate-cache.yml b/.github/workflows/populate-cache.yml
new file mode 100644
index 000000000000..47d4d30f6f82
--- /dev/null
+++ b/.github/workflows/populate-cache.yml
@@ -0,0 +1,74 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This workflow creates cache with Maven dependencies for Ozone build.
+
+name: populate-cache
+
+on:
+ push:
+ branches:
+ - master
+ - ozone-1.4
+ paths:
+ - 'pom.xml'
+ - '**/pom.xml'
+ - '.github/workflows/populate-cache.yml'
+ schedule:
+ - cron: '20 3 * * *'
+
+jobs:
+ build:
+ runs-on: ubuntu-20.04
+ steps:
+ - name: Checkout project
+ uses: actions/checkout@v4
+
+ - name: Restore cache for Maven dependencies
+ id: restore-cache
+ uses: actions/cache/restore@v4
+ with:
+ path: |
+ ~/.m2/repository/*/*/*
+ !~/.m2/repository/org/apache/ozone
+ key: maven-repo-${{ hashFiles('**/pom.xml') }}
+
+ - name: Setup Java
+ if: steps.restore-cache.outputs.cache-hit != 'true'
+ uses: actions/setup-java@v4
+ with:
+ distribution: 'temurin'
+ java-version: 8
+
+ - name: Fetch dependencies
+ if: steps.restore-cache.outputs.cache-hit != 'true'
+ run: mvn --batch-mode --fail-never --no-transfer-progress --show-version dependency:go-offline
+
+ - name: Delete Ozone jars from repo
+ if: steps.restore-cache.outputs.cache-hit != 'true'
+ run: rm -fr ~/.m2/repository/org/apache/ozone
+
+ - name: List repo contents
+ if: steps.restore-cache.outputs.cache-hit != 'true'
+ run: find ~/.m2/repository -type f | sort | xargs ls -lh
+
+ - name: Save cache for Maven dependencies
+ if: steps.restore-cache.outputs.cache-hit != 'true'
+ uses: actions/cache/save@v4
+ with:
+ path: |
+ ~/.m2/repository/*/*/*
+ !~/.m2/repository/org/apache/ozone
+ key: maven-repo-${{ hashFiles('**/pom.xml') }}
diff --git a/.github/workflows/repeat-acceptance.yml b/.github/workflows/repeat-acceptance.yml
index 7269a9c417a6..6eb9c26f07df 100644
--- a/.github/workflows/repeat-acceptance.yml
+++ b/.github/workflows/repeat-acceptance.yml
@@ -91,9 +91,11 @@ jobs:
restore-keys: |
${{ runner.os }}-pnpm-
- name: Cache for maven dependencies
- uses: actions/cache@v4
+ uses: actions/cache/restore@v4
with:
- path: ~/.m2/repository
+ path: |
+ ~/.m2/repository/*/*/*
+ !~/.m2/repository/org/apache/ozone
key: maven-repo-${{ hashFiles('**/pom.xml') }}-${{ env.JAVA_VERSION }}
restore-keys: |
maven-repo-${{ hashFiles('**/pom.xml') }}
@@ -115,12 +117,6 @@ jobs:
hadoop-ozone/dist/target/ozone-*.tar.gz
!hadoop-ozone/dist/target/ozone-*-src.tar.gz
retention-days: 1
- - name: Delete temporary build artifacts before caching
- run: |
- #Never cache local artifacts
- rm -rf ~/.m2/repository/org/apache/ozone/hdds*
- rm -rf ~/.m2/repository/org/apache/ozone/ozone*
- if: always()
acceptance:
needs:
- prepare-job
diff --git a/SECURITY.md b/SECURITY.md
index 2f92dd685c12..3a89968026a2 100644
--- a/SECURITY.md
+++ b/SECURITY.md
@@ -5,13 +5,16 @@
The first stable release of Apache Ozone is 1.0, the previous alpha and beta releases are not supported by the community.
| Version | Supported |
-| ------------- | ------------------ |
+|---------------| ------------------ |
| 0.3.0 (alpha) | :x: |
| 0.4.0 (alpha) | :x: |
| 0.4.1 (alpha) | :x: |
| 0.5.0 (beta) | :x: |
-| 1.0 | :white_check_mark: |
-| 1.1 | :white_check_mark: |
+| 1.0.0 | :x: |
+| 1.1.0 | :x: |
+| 1.2.1 | :x: |
+| 1.3.0 | :x: |
+| 1.4.0 | :white_check_mark: |
## Reporting a Vulnerability
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java
index 44af34cb919c..021d2fc30905 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java
@@ -224,7 +224,7 @@ public enum ChecksumCombineMode {
private String fsDefaultBucketLayout = "FILE_SYSTEM_OPTIMIZED";
@PostConstruct
- private void validate() {
+ public void validate() {
Preconditions.checkState(streamBufferSize > 0);
Preconditions.checkState(streamBufferFlushSize > 0);
Preconditions.checkState(streamBufferMaxSize > 0);
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ByteBufferOutputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ByteBufferOutputStream.java
index cff7a8ecd3ca..39ce2dc3f1d4 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ByteBufferOutputStream.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ByteBufferOutputStream.java
@@ -39,6 +39,11 @@ public void write(@Nonnull byte[] byteArray) throws IOException {
write(ByteBuffer.wrap(byteArray));
}
+ @Override
+ public void write(@Nonnull byte[] byteArray, int off, int len) throws IOException {
+ write(ByteBuffer.wrap(byteArray), off, len);
+ }
+
@Override
public void write(int b) throws IOException {
write(new byte[]{(byte) b});
diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/TestOzoneClientConfig.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/TestOzoneClientConfig.java
new file mode 100644
index 000000000000..0dd29cb50a45
--- /dev/null
+++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/TestOzoneClientConfig.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.scm;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.junit.jupiter.api.Test;
+
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_CLIENT_BYTES_PER_CHECKSUM_MIN_SIZE;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+
+class TestOzoneClientConfig {
+
+ @Test
+ void missingSizeSuffix() {
+ final int bytes = 1024;
+
+ OzoneConfiguration conf = new OzoneConfiguration();
+ conf.setInt("ozone.client.bytes.per.checksum", bytes);
+
+ OzoneClientConfig subject = conf.getObject(OzoneClientConfig.class);
+
+ assertEquals(OZONE_CLIENT_BYTES_PER_CHECKSUM_MIN_SIZE, subject.getBytesPerChecksum());
+ }
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/LeakDetector.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/LeakDetector.java
index 67f5c2f2bbc5..477a291f9283 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/LeakDetector.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/LeakDetector.java
@@ -18,6 +18,7 @@
*/
package org.apache.hadoop.hdds.utils;
+import org.apache.ratis.util.UncheckedAutoCloseable;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -25,6 +26,7 @@
import java.util.Collections;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.atomic.AtomicLong;
/**
* Simple general resource leak detector using {@link ReferenceQueue} and {@link java.lang.ref.WeakReference} to
@@ -37,7 +39,7 @@
* class MyResource implements AutoClosable {
* static final LeakDetector LEAK_DETECTOR = new LeakDetector("MyResource");
*
- * private final LeakTracker leakTracker = LEAK_DETECTOR.track(this, () -> {
+ * private final UncheckedAutoCloseable leakTracker = LEAK_DETECTOR.track(this, () -> {
* // report leaks, don't refer to the original object (MyResource) here.
* System.out.println("MyResource is not closed before being discarded.");
* });
@@ -53,13 +55,14 @@
* }
*/
public class LeakDetector {
- public static final Logger LOG = LoggerFactory.getLogger(LeakDetector.class);
+ private static final Logger LOG = LoggerFactory.getLogger(LeakDetector.class);
+ private static final AtomicLong COUNTER = new AtomicLong();
private final ReferenceQueue queue = new ReferenceQueue<>();
private final Set allLeaks = Collections.newSetFromMap(new ConcurrentHashMap<>());
private final String name;
public LeakDetector(String name) {
- this.name = name;
+ this.name = name + COUNTER.getAndIncrement();
start();
}
@@ -89,7 +92,7 @@ private void run() {
LOG.warn("Exiting leak detector {}.", name);
}
- public LeakTracker track(Object leakable, Runnable reportLeak) {
+ public UncheckedAutoCloseable track(Object leakable, Runnable reportLeak) {
// A rate filter can be put here to only track a subset of all objects, e.g. 5%, 10%,
// if we have proofs that leak tracking impacts performance, or a single LeakDetector
// thread can't keep up with the pace of object allocation.
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/LeakTracker.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/LeakTracker.java
index 6103d520ca8a..dfd07f7e584a 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/LeakTracker.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/LeakTracker.java
@@ -18,6 +18,8 @@
*/
package org.apache.hadoop.hdds.utils;
+import org.apache.ratis.util.UncheckedAutoCloseable;
+
import java.lang.ref.ReferenceQueue;
import java.lang.ref.WeakReference;
import java.util.Set;
@@ -27,7 +29,7 @@
*
* @see LeakDetector
*/
-public class LeakTracker extends WeakReference {
+final class LeakTracker extends WeakReference implements UncheckedAutoCloseable {
private final Set allLeaks;
private final Runnable leakReporter;
LeakTracker(Object referent, ReferenceQueue referenceQueue,
@@ -40,6 +42,7 @@ public class LeakTracker extends WeakReference {
/**
* Called by the tracked resource when closing.
*/
+ @Override
public void close() {
allLeaks.remove(this);
}
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/resource/TestLeakDetector.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/resource/TestLeakDetector.java
index 4a60fcc8a4d5..fd5cf75afa57 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/resource/TestLeakDetector.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/resource/TestLeakDetector.java
@@ -18,7 +18,7 @@
package org.apache.hadoop.hdds.resource;
import org.apache.hadoop.hdds.utils.LeakDetector;
-import org.apache.hadoop.hdds.utils.LeakTracker;
+import org.apache.ratis.util.UncheckedAutoCloseable;
import org.junit.jupiter.api.Test;
import java.util.concurrent.atomic.AtomicInteger;
@@ -28,18 +28,21 @@
/**
* Test LeakDetector.
*/
-public class TestLeakDetector {
+class TestLeakDetector {
private static final LeakDetector LEAK_DETECTOR = new LeakDetector("test");
- private AtomicInteger leaks = new AtomicInteger(0);
+ private final AtomicInteger leaks = new AtomicInteger(0);
@Test
- public void testLeakDetector() throws Exception {
+ void testNoLeaks() throws Exception {
// create and close resource => no leaks.
createResource(true);
System.gc();
Thread.sleep(100);
assertEquals(0, leaks.get());
+ }
+ @Test
+ void testLeaks() throws Exception {
// create and not close => leaks.
createResource(false);
System.gc();
@@ -47,7 +50,7 @@ public void testLeakDetector() throws Exception {
assertEquals(1, leaks.get());
}
- private void createResource(boolean close) throws Exception {
+ private void createResource(boolean close) {
MyResource resource = new MyResource(leaks);
if (close) {
resource.close();
@@ -55,14 +58,14 @@ private void createResource(boolean close) throws Exception {
}
private static final class MyResource implements AutoCloseable {
- private final LeakTracker leakTracker;
+ private final UncheckedAutoCloseable leakTracker;
private MyResource(final AtomicInteger leaks) {
leakTracker = LEAK_DETECTOR.track(this, () -> leaks.incrementAndGet());
}
@Override
- public void close() throws Exception {
+ public void close() {
leakTracker.close();
}
}
diff --git a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigType.java b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigType.java
index 4ed59669a9df..e121e4333a0d 100644
--- a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigType.java
+++ b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigType.java
@@ -118,7 +118,7 @@ void set(ConfigurationTarget target, String key, Object value,
SIZE {
@Override
Object parse(String value, Config config, Class> type, String key) {
- StorageSize measure = StorageSize.parse(value);
+ StorageSize measure = StorageSize.parse(value, config.sizeUnit());
long val = Math.round(measure.getUnit().toBytes(measure.getValue()));
if (type == int.class) {
return (int) val;
@@ -130,9 +130,9 @@ Object parse(String value, Config config, Class> type, String key) {
void set(ConfigurationTarget target, String key, Object value,
Config config) {
if (value instanceof Long) {
- target.setStorageSize(key, (long) value, StorageUnit.BYTES);
+ target.setStorageSize(key, (long) value, config.sizeUnit());
} else if (value instanceof Integer) {
- target.setStorageSize(key, (int) value, StorageUnit.BYTES);
+ target.setStorageSize(key, (int) value, config.sizeUnit());
} else {
throw new ConfigurationException("Unsupported type " + value.getClass()
+ " for " + key);
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java
index 32fcbfec6e44..31089bc1c0b6 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java
@@ -52,6 +52,7 @@
import org.apache.hadoop.hdds.conf.StorageUnit;
import org.apache.hadoop.hdds.utils.db.managed.ManagedColumnFamilyOptions;
import org.apache.hadoop.hdds.utils.db.managed.ManagedDBOptions;
+import org.apache.hadoop.hdds.utils.db.managed.ManagedLogger;
import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB;
import org.apache.hadoop.hdds.utils.db.managed.ManagedStatistics;
import org.apache.hadoop.hdds.utils.db.managed.ManagedWriteOptions;
@@ -405,12 +406,7 @@ private ManagedDBOptions getDefaultDBOptions(
// Apply logging settings.
if (rocksDBConfiguration.isRocksdbLoggingEnabled()) {
- org.rocksdb.Logger logger = new org.rocksdb.Logger(dbOptions) {
- @Override
- protected void log(InfoLogLevel infoLogLevel, String s) {
- ROCKS_DB_LOGGER.info(s);
- }
- };
+ ManagedLogger logger = new ManagedLogger(dbOptions, (infoLogLevel, s) -> ROCKS_DB_LOGGER.info(s));
InfoLogLevel level = InfoLogLevel.valueOf(rocksDBConfiguration
.getRocksdbLogLevel() + "_LEVEL");
logger.setInfoLogLevel(level);
diff --git a/hadoop-hdds/hadoop-dependency-client/pom.xml b/hadoop-hdds/hadoop-dependency-client/pom.xml
index f6ebcb0a0201..7ed20ab6acb3 100644
--- a/hadoop-hdds/hadoop-dependency-client/pom.xml
+++ b/hadoop-hdds/hadoop-dependency-client/pom.xml
@@ -39,6 +39,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
hadoop-common
${hadoop.version}
+
+ org.xerial.snappy
+ snappy-java
+
org.apache.hadoop
hadoop-annotations
@@ -282,5 +286,9 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
+
+ org.xerial.snappy
+ snappy-java
+
diff --git a/hadoop-hdds/hadoop-dependency-server/pom.xml b/hadoop-hdds/hadoop-dependency-server/pom.xml
index 10b5369b8b4d..542dc2883a4c 100644
--- a/hadoop-hdds/hadoop-dependency-server/pom.xml
+++ b/hadoop-hdds/hadoop-dependency-server/pom.xml
@@ -39,6 +39,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
hadoop-common
${hadoop.version}
+
+ org.xerial.snappy
+ snappy-java
+
org.apache.curator
*
@@ -130,5 +134,9 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
+
+ org.xerial.snappy
+ snappy-java
+
diff --git a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedBloomFilter.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedBloomFilter.java
index ffee7c1f5519..8246d10820ba 100644
--- a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedBloomFilter.java
+++ b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedBloomFilter.java
@@ -18,7 +18,7 @@
*/
package org.apache.hadoop.hdds.utils.db.managed;
-import org.apache.hadoop.hdds.utils.LeakTracker;
+import org.apache.ratis.util.UncheckedAutoCloseable;
import org.rocksdb.BloomFilter;
import static org.apache.hadoop.hdds.utils.db.managed.ManagedRocksObjectUtils.track;
@@ -27,11 +27,14 @@
* Managed BloomFilter.
*/
public class ManagedBloomFilter extends BloomFilter {
- private final LeakTracker leakTracker = track(this);
+ private final UncheckedAutoCloseable leakTracker = track(this);
@Override
public void close() {
- super.close();
- leakTracker.close();
+ try {
+ super.close();
+ } finally {
+ leakTracker.close();
+ }
}
}
diff --git a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedColumnFamilyOptions.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedColumnFamilyOptions.java
index 7b1da6a16923..055d4be9d9a3 100644
--- a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedColumnFamilyOptions.java
+++ b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedColumnFamilyOptions.java
@@ -18,7 +18,7 @@
*/
package org.apache.hadoop.hdds.utils.db.managed;
-import org.apache.hadoop.hdds.utils.LeakTracker;
+import org.apache.ratis.util.UncheckedAutoCloseable;
import org.rocksdb.ColumnFamilyOptions;
import org.rocksdb.TableFormatConfig;
@@ -33,7 +33,7 @@ public class ManagedColumnFamilyOptions extends ColumnFamilyOptions {
* instances.
*/
private boolean reused = false;
- private final LeakTracker leakTracker = track(this);
+ private final UncheckedAutoCloseable leakTracker = track(this);
public ManagedColumnFamilyOptions() {
}
@@ -79,8 +79,11 @@ public boolean isReused() {
@Override
public void close() {
- super.close();
- leakTracker.close();
+ try {
+ super.close();
+ } finally {
+ leakTracker.close();
+ }
}
/**
diff --git a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedCompactRangeOptions.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedCompactRangeOptions.java
index 0e397ed0e9b6..6ac4a2fa5b67 100644
--- a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedCompactRangeOptions.java
+++ b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedCompactRangeOptions.java
@@ -18,7 +18,7 @@
*/
package org.apache.hadoop.hdds.utils.db.managed;
-import org.apache.hadoop.hdds.utils.LeakTracker;
+import org.apache.ratis.util.UncheckedAutoCloseable;
import org.rocksdb.CompactRangeOptions;
import static org.apache.hadoop.hdds.utils.db.managed.ManagedRocksObjectUtils.track;
@@ -27,11 +27,14 @@
* Managed CompactRangeOptions.
*/
public class ManagedCompactRangeOptions extends CompactRangeOptions {
- private final LeakTracker leakTracker = track(this);
+ private final UncheckedAutoCloseable leakTracker = track(this);
@Override
public void close() {
- super.close();
- leakTracker.close();
+ try {
+ super.close();
+ } finally {
+ leakTracker.close();
+ }
}
}
diff --git a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedDBOptions.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedDBOptions.java
index fa01e2e1018b..4eb2a0d2bc36 100644
--- a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedDBOptions.java
+++ b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedDBOptions.java
@@ -18,20 +18,37 @@
*/
package org.apache.hadoop.hdds.utils.db.managed;
-import org.apache.hadoop.hdds.utils.LeakTracker;
+import org.apache.hadoop.hdds.utils.IOUtils;
+import org.apache.ratis.util.UncheckedAutoCloseable;
import org.rocksdb.DBOptions;
+import org.rocksdb.Logger;
+import java.util.concurrent.atomic.AtomicReference;
+
+import static org.apache.hadoop.hdds.utils.db.managed.ManagedRocksObjectUtils.LOG;
import static org.apache.hadoop.hdds.utils.db.managed.ManagedRocksObjectUtils.track;
/**
* Managed DBOptions.
*/
public class ManagedDBOptions extends DBOptions {
- private final LeakTracker leakTracker = track(this);
+
+ private final UncheckedAutoCloseable leakTracker = track(this);
+ private final AtomicReference loggerRef = new AtomicReference<>();
+
+ @Override
+ public DBOptions setLogger(Logger logger) {
+ IOUtils.close(LOG, loggerRef.getAndSet(logger));
+ return super.setLogger(logger);
+ }
@Override
public void close() {
- super.close();
- leakTracker.close();
+ try {
+ IOUtils.close(LOG, loggerRef.getAndSet(null));
+ super.close();
+ } finally {
+ leakTracker.close();
+ }
}
}
diff --git a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedEnvOptions.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedEnvOptions.java
index baad1ad7f4ca..388f5abea397 100644
--- a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedEnvOptions.java
+++ b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedEnvOptions.java
@@ -18,7 +18,7 @@
*/
package org.apache.hadoop.hdds.utils.db.managed;
-import org.apache.hadoop.hdds.utils.LeakTracker;
+import org.apache.ratis.util.UncheckedAutoCloseable;
import org.rocksdb.EnvOptions;
import static org.apache.hadoop.hdds.utils.db.managed.ManagedRocksObjectUtils.track;
@@ -27,11 +27,14 @@
* Managed EnvOptions.
*/
public class ManagedEnvOptions extends EnvOptions {
- private final LeakTracker leakTracker = track(this);
+ private final UncheckedAutoCloseable leakTracker = track(this);
@Override
public void close() {
- super.close();
- leakTracker.close();
+ try {
+ super.close();
+ } finally {
+ leakTracker.close();
+ }
}
}
diff --git a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedFlushOptions.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedFlushOptions.java
index 126f5336ba90..b151f836f962 100644
--- a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedFlushOptions.java
+++ b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedFlushOptions.java
@@ -18,7 +18,7 @@
*/
package org.apache.hadoop.hdds.utils.db.managed;
-import org.apache.hadoop.hdds.utils.LeakTracker;
+import org.apache.ratis.util.UncheckedAutoCloseable;
import org.rocksdb.FlushOptions;
import static org.apache.hadoop.hdds.utils.db.managed.ManagedRocksObjectUtils.track;
@@ -27,11 +27,14 @@
* Managed FlushOptions.
*/
public class ManagedFlushOptions extends FlushOptions {
- private final LeakTracker leakTracker = track(this);
+ private final UncheckedAutoCloseable leakTracker = track(this);
@Override
public void close() {
- super.close();
- leakTracker.close();
+ try {
+ super.close();
+ } finally {
+ leakTracker.close();
+ }
}
}
diff --git a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedIngestExternalFileOptions.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedIngestExternalFileOptions.java
index 1783a34587cf..ec68f42e748a 100644
--- a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedIngestExternalFileOptions.java
+++ b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedIngestExternalFileOptions.java
@@ -18,7 +18,7 @@
*/
package org.apache.hadoop.hdds.utils.db.managed;
-import org.apache.hadoop.hdds.utils.LeakTracker;
+import org.apache.ratis.util.UncheckedAutoCloseable;
import org.rocksdb.IngestExternalFileOptions;
import static org.apache.hadoop.hdds.utils.db.managed.ManagedRocksObjectUtils.track;
@@ -27,11 +27,14 @@
* Managed IngestExternalFileOptions.
*/
public class ManagedIngestExternalFileOptions extends IngestExternalFileOptions {
- private final LeakTracker leakTracker = track(this);
+ private final UncheckedAutoCloseable leakTracker = track(this);
@Override
public void close() {
- super.close();
- leakTracker.close();
+ try {
+ super.close();
+ } finally {
+ leakTracker.close();
+ }
}
}
diff --git a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedLRUCache.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedLRUCache.java
index 5244863a5a17..8130361d79de 100644
--- a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedLRUCache.java
+++ b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedLRUCache.java
@@ -18,7 +18,7 @@
*/
package org.apache.hadoop.hdds.utils.db.managed;
-import org.apache.hadoop.hdds.utils.LeakTracker;
+import org.apache.ratis.util.UncheckedAutoCloseable;
import org.rocksdb.LRUCache;
import static org.apache.hadoop.hdds.utils.db.managed.ManagedRocksObjectUtils.track;
@@ -27,7 +27,7 @@
* Managed LRUCache.
*/
public class ManagedLRUCache extends LRUCache {
- private final LeakTracker leakTracker = track(this);
+ private final UncheckedAutoCloseable leakTracker = track(this);
public ManagedLRUCache(long capacity) {
super(capacity);
@@ -35,7 +35,10 @@ public ManagedLRUCache(long capacity) {
@Override
public void close() {
- super.close();
- leakTracker.close();
+ try {
+ super.close();
+ } finally {
+ leakTracker.close();
+ }
}
}
diff --git a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedLogger.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedLogger.java
new file mode 100644
index 000000000000..d04f91cd4e29
--- /dev/null
+++ b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedLogger.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.utils.db.managed;
+
+import org.apache.ratis.util.UncheckedAutoCloseable;
+import org.rocksdb.InfoLogLevel;
+import org.rocksdb.Logger;
+
+import java.util.function.BiConsumer;
+
+import static org.apache.hadoop.hdds.utils.db.managed.ManagedRocksObjectUtils.track;
+
+/** Managed {@link Logger}. */
+public class ManagedLogger extends Logger {
+
+ private final UncheckedAutoCloseable leakTracker = track(this);
+ private final BiConsumer delegate;
+
+ public ManagedLogger(ManagedDBOptions dbOptions, BiConsumer delegate) {
+ super(dbOptions);
+ this.delegate = delegate;
+ }
+
+ @Override
+ protected void log(InfoLogLevel infoLogLevel, String logMsg) {
+ delegate.accept(infoLogLevel, logMsg);
+ }
+
+ @Override
+ public void close() {
+ try {
+ super.close();
+ } finally {
+ leakTracker.close();
+ }
+ }
+}
diff --git a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedObject.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedObject.java
index 1e4068a7a800..522ca1ac3252 100644
--- a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedObject.java
+++ b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedObject.java
@@ -18,7 +18,7 @@
*/
package org.apache.hadoop.hdds.utils.db.managed;
-import org.apache.hadoop.hdds.utils.LeakTracker;
+import org.apache.ratis.util.UncheckedAutoCloseable;
import org.rocksdb.RocksObject;
import static org.apache.hadoop.hdds.utils.db.managed.ManagedRocksObjectUtils.track;
@@ -29,7 +29,7 @@
*/
class ManagedObject implements AutoCloseable {
private final T original;
- private final LeakTracker leakTracker = track(this);
+ private final UncheckedAutoCloseable leakTracker = track(this);
ManagedObject(T original) {
this.original = original;
@@ -41,7 +41,10 @@ public T get() {
@Override
public void close() {
- original.close();
- leakTracker.close();
+ try {
+ original.close();
+ } finally {
+ leakTracker.close();
+ }
}
}
diff --git a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedOptions.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedOptions.java
index e438068e3a70..9cf0a46fd8b6 100644
--- a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedOptions.java
+++ b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedOptions.java
@@ -18,7 +18,7 @@
*/
package org.apache.hadoop.hdds.utils.db.managed;
-import org.apache.hadoop.hdds.utils.LeakTracker;
+import org.apache.ratis.util.UncheckedAutoCloseable;
import org.rocksdb.Options;
import static org.apache.hadoop.hdds.utils.db.managed.ManagedRocksObjectUtils.track;
@@ -27,11 +27,14 @@
* Managed Options.
*/
public class ManagedOptions extends Options {
- private final LeakTracker leakTracker = track(this);
+ private final UncheckedAutoCloseable leakTracker = track(this);
@Override
public void close() {
- super.close();
- leakTracker.close();
+ try {
+ super.close();
+ } finally {
+ leakTracker.close();
+ }
}
}
diff --git a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedReadOptions.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedReadOptions.java
index 48c2238ec4a0..39d41482751a 100644
--- a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedReadOptions.java
+++ b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedReadOptions.java
@@ -18,7 +18,7 @@
*/
package org.apache.hadoop.hdds.utils.db.managed;
-import org.apache.hadoop.hdds.utils.LeakTracker;
+import org.apache.ratis.util.UncheckedAutoCloseable;
import org.rocksdb.ReadOptions;
import static org.apache.hadoop.hdds.utils.db.managed.ManagedRocksObjectUtils.track;
@@ -27,11 +27,14 @@
* Managed {@link ReadOptions}.
*/
public class ManagedReadOptions extends ReadOptions {
- private final LeakTracker leakTracker = track(this);
+ private final UncheckedAutoCloseable leakTracker = track(this);
@Override
public void close() {
- super.close();
- leakTracker.close();
+ try {
+ super.close();
+ } finally {
+ leakTracker.close();
+ }
}
}
diff --git a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksObjectUtils.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksObjectUtils.java
index 7ae7001ccd39..3d7c08275de4 100644
--- a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksObjectUtils.java
+++ b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksObjectUtils.java
@@ -20,7 +20,7 @@
import org.apache.hadoop.hdds.HddsUtils;
import org.apache.hadoop.hdds.utils.LeakDetector;
-import org.apache.hadoop.hdds.utils.LeakTracker;
+import org.apache.ratis.util.UncheckedAutoCloseable;
import org.awaitility.Awaitility;
import org.awaitility.core.ConditionTimeoutException;
import org.rocksdb.RocksDB;
@@ -49,7 +49,7 @@ private ManagedRocksObjectUtils() {
private static final LeakDetector LEAK_DETECTOR = new LeakDetector("ManagedRocksObject");
- static LeakTracker track(AutoCloseable object) {
+ static UncheckedAutoCloseable track(AutoCloseable object) {
ManagedRocksObjectMetrics.INSTANCE.increaseManagedObject();
final Class> clazz = object.getClass();
final StackTraceElement[] stackTrace = getStackTrace();
@@ -80,7 +80,6 @@ static String formatStackTrace(@Nullable StackTraceElement[] elements) {
* @param maxDuration poll max duration.
* @param interval poll interval.
* @param pollDelayDuration poll delay val.
- * @return true if deleted.
*/
public static void waitForFileDelete(File file, Duration maxDuration,
Duration interval,
diff --git a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedSlice.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedSlice.java
index 8c366bdaa423..cff320fec5e2 100644
--- a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedSlice.java
+++ b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedSlice.java
@@ -18,7 +18,7 @@
*/
package org.apache.hadoop.hdds.utils.db.managed;
-import org.apache.hadoop.hdds.utils.LeakTracker;
+import org.apache.ratis.util.UncheckedAutoCloseable;
import org.rocksdb.Slice;
import static org.apache.hadoop.hdds.utils.db.managed.ManagedRocksObjectUtils.track;
@@ -27,7 +27,7 @@
* Managed Slice.
*/
public class ManagedSlice extends Slice {
- private final LeakTracker leakTracker = track(this);
+ private final UncheckedAutoCloseable leakTracker = track(this);
public ManagedSlice(byte[] data) {
super(data);
@@ -40,9 +40,12 @@ public synchronized long getNativeHandle() {
@Override
protected void disposeInternal() {
- super.disposeInternal();
// RocksMutableObject.close is final thus can't be decorated.
// So, we decorate disposeInternal instead to track closure.
- leakTracker.close();
+ try {
+ super.disposeInternal();
+ } finally {
+ leakTracker.close();
+ }
}
}
diff --git a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedSstFileWriter.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedSstFileWriter.java
index de7e9d526634..0c9f27dd5eb5 100644
--- a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedSstFileWriter.java
+++ b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedSstFileWriter.java
@@ -18,7 +18,7 @@
*/
package org.apache.hadoop.hdds.utils.db.managed;
-import org.apache.hadoop.hdds.utils.LeakTracker;
+import org.apache.ratis.util.UncheckedAutoCloseable;
import org.rocksdb.EnvOptions;
import org.rocksdb.Options;
import org.rocksdb.SstFileWriter;
@@ -29,7 +29,7 @@
* Managed SstFileWriter.
*/
public class ManagedSstFileWriter extends SstFileWriter {
- private final LeakTracker leakTracker = track(this);
+ private final UncheckedAutoCloseable leakTracker = track(this);
public ManagedSstFileWriter(EnvOptions envOptions,
Options options) {
@@ -38,7 +38,10 @@ public ManagedSstFileWriter(EnvOptions envOptions,
@Override
public void close() {
- super.close();
- leakTracker.close();
+ try {
+ super.close();
+ } finally {
+ leakTracker.close();
+ }
}
}
diff --git a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedStatistics.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedStatistics.java
index 75af8b881355..8fc166bb6122 100644
--- a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedStatistics.java
+++ b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedStatistics.java
@@ -18,7 +18,7 @@
*/
package org.apache.hadoop.hdds.utils.db.managed;
-import org.apache.hadoop.hdds.utils.LeakTracker;
+import org.apache.ratis.util.UncheckedAutoCloseable;
import org.rocksdb.Statistics;
import static org.apache.hadoop.hdds.utils.db.managed.ManagedRocksObjectUtils.track;
@@ -27,11 +27,14 @@
* Managed Statistics.
*/
public class ManagedStatistics extends Statistics {
- private final LeakTracker leakTracker = track(this);
+ private final UncheckedAutoCloseable leakTracker = track(this);
@Override
public void close() {
- super.close();
- leakTracker.close();
+ try {
+ super.close();
+ } finally {
+ leakTracker.close();
+ }
}
}
diff --git a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedWriteBatch.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedWriteBatch.java
index b1411b09a49a..bda1af7d59bb 100644
--- a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedWriteBatch.java
+++ b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedWriteBatch.java
@@ -18,7 +18,7 @@
*/
package org.apache.hadoop.hdds.utils.db.managed;
-import org.apache.hadoop.hdds.utils.LeakTracker;
+import org.apache.ratis.util.UncheckedAutoCloseable;
import org.rocksdb.WriteBatch;
import static org.apache.hadoop.hdds.utils.db.managed.ManagedRocksObjectUtils.track;
@@ -27,7 +27,7 @@
* Managed WriteBatch.
*/
public class ManagedWriteBatch extends WriteBatch {
- private final LeakTracker leakTracker = track(this);
+ private final UncheckedAutoCloseable leakTracker = track(this);
public ManagedWriteBatch() {
}
@@ -38,7 +38,10 @@ public ManagedWriteBatch(byte[] data) {
@Override
public void close() {
- super.close();
- leakTracker.close();
+ try {
+ super.close();
+ } finally {
+ leakTracker.close();
+ }
}
}
diff --git a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedWriteOptions.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedWriteOptions.java
index 5d32a290b5e2..4ce8bc037bb6 100644
--- a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedWriteOptions.java
+++ b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedWriteOptions.java
@@ -18,7 +18,7 @@
*/
package org.apache.hadoop.hdds.utils.db.managed;
-import org.apache.hadoop.hdds.utils.LeakTracker;
+import org.apache.ratis.util.UncheckedAutoCloseable;
import org.rocksdb.WriteOptions;
import static org.apache.hadoop.hdds.utils.db.managed.ManagedRocksObjectUtils.track;
@@ -27,11 +27,14 @@
* Managed {@link WriteOptions}.
*/
public class ManagedWriteOptions extends WriteOptions {
- private final LeakTracker leakTracker = track(this);
+ private final UncheckedAutoCloseable leakTracker = track(this);
@Override
public void close() {
- super.close();
- leakTracker.close();
+ try {
+ super.close();
+ } finally {
+ leakTracker.close();
+ }
}
}
diff --git a/hadoop-hdds/server-scm/dev-support/findbugsExcludeFile.xml b/hadoop-hdds/server-scm/dev-support/findbugsExcludeFile.xml
index 50f349186089..dc08720c9687 100644
--- a/hadoop-hdds/server-scm/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-hdds/server-scm/dev-support/findbugsExcludeFile.xml
@@ -51,4 +51,9 @@
+
+
+
+
+
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManager.java
index 979cff799fa5..84263435cd29 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManager.java
@@ -293,6 +293,7 @@ public synchronized void start() {
if (!isRunning()) {
LOG.info("Starting Replication Monitor Thread.");
running = true;
+ metrics = ReplicationManagerMetrics.create(this);
if (rmConf.isLegacyEnabled()) {
legacyReplicationManager.setMetrics(metrics);
}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManagerMetrics.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManagerMetrics.java
index 5c3ee4e29aec..eb75db9bd504 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManagerMetrics.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManagerMetrics.java
@@ -235,10 +235,15 @@ public ReplicationManagerMetrics(ReplicationManager manager) {
}
public static ReplicationManagerMetrics create(ReplicationManager manager) {
- return DefaultMetricsSystem.instance().register(METRICS_SOURCE_NAME,
- "SCM Replication manager (closed container replication) related "
- + "metrics",
- new ReplicationManagerMetrics(manager));
+ ReplicationManagerMetrics replicationManagerMetrics = (ReplicationManagerMetrics)
+ DefaultMetricsSystem.instance().getSource(METRICS_SOURCE_NAME);
+ if (replicationManagerMetrics == null) {
+ return DefaultMetricsSystem.instance().register(METRICS_SOURCE_NAME,
+ "SCM Replication manager (closed container replication) related "
+ + "metrics",
+ new ReplicationManagerMetrics(manager));
+ }
+ return replicationManagerMetrics;
}
@Override
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestContainerReplicaPendingOps.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestContainerReplicaPendingOps.java
index 6432b2e623eb..b5fa7c7f2fc9 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestContainerReplicaPendingOps.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestContainerReplicaPendingOps.java
@@ -25,6 +25,7 @@
import org.apache.hadoop.hdds.scm.container.ContainerID;
import org.apache.ozone.test.TestClock;
import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.mockito.Mockito;
@@ -68,6 +69,13 @@ public void setup() {
dn3 = MockDatanodeDetails.randomDatanodeDetails();
}
+ @AfterEach
+ void cleanup() {
+ if (metrics != null) {
+ metrics.unRegister();
+ }
+ }
+
@Test
public void testGetPendingOpsReturnsEmptyList() {
List ops =
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestECUnderReplicationHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestECUnderReplicationHandler.java
index e65485709de7..4f3c0702d712 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestECUnderReplicationHandler.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestECUnderReplicationHandler.java
@@ -47,6 +47,7 @@
import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
import org.apache.ratis.protocol.exceptions.NotLeaderException;
import org.assertj.core.util.Lists;
+import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.params.ParameterizedTest;
@@ -171,6 +172,13 @@ public NodeStatus getNodeStatus(DatanodeDetails dd) {
.thenReturn(new ContainerPlacementStatusDefault(2, 2, 3));
}
+ @AfterEach
+ void cleanup() {
+ if (metrics != null) {
+ metrics.unRegister();
+ }
+ }
+
@ParameterizedTest
@ValueSource(strings = {"rs-6-3-1024k", "rs-10-4-1024k"})
void defersNonCriticalPartialReconstruction(String rep) throws IOException {
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java
index ff65da6bfd97..8e1fa7b33bbf 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java
@@ -51,6 +51,7 @@
import org.apache.hadoop.util.Lists;
import org.apache.ozone.test.TestClock;
import org.apache.ratis.protocol.exceptions.NotLeaderException;
+import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.params.ParameterizedTest;
@@ -168,6 +169,13 @@ public void setup() throws IOException {
Mockito.when(scmContext.isInSafeMode()).thenReturn(false);
}
+ @AfterEach
+ void cleanup() {
+ if (replicationManager.getMetrics() != null) {
+ replicationManager.getMetrics().unRegister();
+ }
+ }
+
private ReplicationManager createReplicationManager() throws IOException {
return new ReplicationManager(
configuration,
diff --git a/hadoop-hdds/tools/pom.xml b/hadoop-hdds/tools/pom.xml
index 02ed8c9cc573..6a9b5937b770 100644
--- a/hadoop-hdds/tools/pom.xml
+++ b/hadoop-hdds/tools/pom.xml
@@ -91,6 +91,11 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
org.xerial
sqlite-jdbc
+
+ org.assertj
+ assertj-core
+ test
+
org.mockito
mockito-core
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/cli/OzoneAdmin.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/cli/OzoneAdmin.java
index 093dd93430b9..cc496a28e777 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/cli/OzoneAdmin.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/cli/OzoneAdmin.java
@@ -22,13 +22,7 @@
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.tracing.TracingUtil;
import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.util.NativeCodeLoader;
-import org.apache.log4j.ConsoleAppender;
-import org.apache.log4j.Level;
-import org.apache.log4j.LogManager;
-import org.apache.log4j.Logger;
-import org.apache.log4j.PatternLayout;
import picocli.CommandLine;
/**
@@ -75,12 +69,6 @@ public UserGroupInformation getUser() throws IOException {
* @param argv - System Args Strings[]
*/
public static void main(String[] argv) {
- LogManager.resetConfiguration();
- Logger.getRootLogger().setLevel(Level.INFO);
- Logger.getRootLogger()
- .addAppender(new ConsoleAppender(new PatternLayout("%m%n")));
- Logger.getLogger(NativeCodeLoader.class).setLevel(Level.ERROR);
-
new OzoneAdmin().run(argv);
}
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStartSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStartSubcommand.java
index ff82b82ec87a..29f2f3d45727 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStartSubcommand.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStartSubcommand.java
@@ -19,8 +19,6 @@
import org.apache.hadoop.hdds.cli.HddsVersionProvider;
import org.apache.hadoop.hdds.scm.client.ScmClient;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
import picocli.CommandLine.Command;
import java.io.IOException;
@@ -35,12 +33,9 @@
versionProvider = HddsVersionProvider.class)
public class ReplicationManagerStartSubcommand extends ScmSubcommand {
- private static final Logger LOG =
- LoggerFactory.getLogger(ReplicationManagerStartSubcommand.class);
-
@Override
public void execute(ScmClient scmClient) throws IOException {
scmClient.startReplicationManager();
- LOG.info("Starting ReplicationManager...");
+ System.out.println("Starting ReplicationManager...");
}
}
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStatusSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStatusSubcommand.java
index 9bc3649dd9f0..b2e308e14227 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStatusSubcommand.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStatusSubcommand.java
@@ -19,8 +19,6 @@
import org.apache.hadoop.hdds.cli.HddsVersionProvider;
import org.apache.hadoop.hdds.scm.client.ScmClient;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
import picocli.CommandLine.Command;
import java.io.IOException;
@@ -35,18 +33,15 @@
versionProvider = HddsVersionProvider.class)
public class ReplicationManagerStatusSubcommand extends ScmSubcommand {
- private static final Logger LOG =
- LoggerFactory.getLogger(ReplicationManagerStatusSubcommand.class);
-
@Override
public void execute(ScmClient scmClient) throws IOException {
boolean execReturn = scmClient.getReplicationManagerStatus();
// Output data list
if (execReturn) {
- LOG.info("ReplicationManager is Running.");
+ System.out.println("ReplicationManager is Running.");
} else {
- LOG.info("ReplicationManager is Not Running.");
+ System.out.println("ReplicationManager is Not Running.");
}
}
}
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStopSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStopSubcommand.java
index 7d3063a7636c..12de13c07d26 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStopSubcommand.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStopSubcommand.java
@@ -19,8 +19,6 @@
import org.apache.hadoop.hdds.cli.HddsVersionProvider;
import org.apache.hadoop.hdds.scm.client.ScmClient;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
import picocli.CommandLine.Command;
import java.io.IOException;
@@ -35,14 +33,11 @@
versionProvider = HddsVersionProvider.class)
public class ReplicationManagerStopSubcommand extends ScmSubcommand {
- private static final Logger LOG =
- LoggerFactory.getLogger(ReplicationManagerStopSubcommand.class);
-
@Override
public void execute(ScmClient scmClient) throws IOException {
scmClient.stopReplicationManager();
- LOG.info("Stopping ReplicationManager...");
- LOG.info("Requested SCM to stop ReplicationManager, " +
+ System.out.println("Stopping ReplicationManager...");
+ System.out.println("Requested SCM to stop ReplicationManager, " +
"it might take sometime for the ReplicationManager to stop.");
}
}
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeCheckSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeCheckSubcommand.java
index db2f02c5e125..747215dcac71 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeCheckSubcommand.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeCheckSubcommand.java
@@ -24,8 +24,6 @@
import org.apache.hadoop.hdds.cli.HddsVersionProvider;
import org.apache.hadoop.hdds.scm.client.ScmClient;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
import picocli.CommandLine;
import picocli.CommandLine.Command;
@@ -39,9 +37,6 @@
versionProvider = HddsVersionProvider.class)
public class SafeModeCheckSubcommand extends ScmSubcommand {
- private static final Logger LOG =
- LoggerFactory.getLogger(SafeModeCheckSubcommand.class);
-
@CommandLine.Option(names = {"--verbose"},
description = "Show detailed status of rules.")
private boolean verbose;
@@ -52,17 +47,17 @@ public void execute(ScmClient scmClient) throws IOException {
// Output data list
if (execReturn) {
- LOG.info("SCM is in safe mode.");
+ System.out.println("SCM is in safe mode.");
if (verbose) {
for (Map.Entry> entry :
scmClient.getSafeModeRuleStatuses().entrySet()) {
Pair value = entry.getValue();
- LOG.info("validated:{}, {}, {}",
+ System.out.printf("validated:%s, %s, %s%n",
value.getLeft(), entry.getKey(), value.getRight());
}
}
} else {
- LOG.info("SCM is out of safe mode.");
+ System.out.println("SCM is out of safe mode.");
}
}
}
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeExitSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeExitSubcommand.java
index bcf64deb85e2..e4173c9767e3 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeExitSubcommand.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeExitSubcommand.java
@@ -22,8 +22,6 @@
import org.apache.hadoop.hdds.cli.HddsVersionProvider;
import org.apache.hadoop.hdds.scm.client.ScmClient;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
import picocli.CommandLine.Command;
/**
@@ -36,14 +34,11 @@
versionProvider = HddsVersionProvider.class)
public class SafeModeExitSubcommand extends ScmSubcommand {
- private static final Logger LOG =
- LoggerFactory.getLogger(SafeModeExitSubcommand.class);
-
@Override
public void execute(ScmClient scmClient) throws IOException {
boolean execReturn = scmClient.forceExitSafeMode();
if (execReturn) {
- LOG.info("SCM exit safe mode successfully.");
+ System.out.println("SCM exit safe mode successfully.");
}
}
}
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeWaitSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeWaitSubcommand.java
index abaca08cfbb9..ad94d4fffd0d 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeWaitSubcommand.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeWaitSubcommand.java
@@ -23,8 +23,6 @@
import org.apache.hadoop.hdds.cli.HddsVersionProvider;
import org.apache.hadoop.hdds.scm.client.ScmClient;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
import picocli.CommandLine.Command;
import picocli.CommandLine.Option;
import picocli.CommandLine.Mixin;
@@ -39,9 +37,6 @@
versionProvider = HddsVersionProvider.class)
public class SafeModeWaitSubcommand implements Callable {
- private static final Logger LOG =
- LoggerFactory.getLogger(SafeModeWaitSubcommand.class);
-
@Option(description =
"Define timeout (in second) to wait until (exit code 1) "
+ "or until safemode is ended (exit code 0).", defaultValue = "30",
@@ -62,26 +57,26 @@ public Void call() throws Exception {
long remainingTime;
do {
if (!scmClient.inSafeMode()) {
- LOG.info("SCM is out of safe mode.");
+ System.out.println("SCM is out of safe mode.");
return null;
}
remainingTime = getRemainingTimeInSec();
if (remainingTime > 0) {
- LOG.info(
+ System.out.printf(
"SCM is in safe mode. Will retry in 1 sec. Remaining time "
- + "(sec): {}",
+ + "(sec): %s%n",
remainingTime);
Thread.sleep(1000);
} else {
- LOG.info("SCM is in safe mode. No more retries.");
+ System.out.println("SCM is in safe mode. No more retries.");
}
} while (remainingTime > 0);
} catch (InterruptedException ex) {
- LOG.info(
- "SCM is not available (yet?). Error is {}. Will retry in 1 sec. "
- + "Remaining time (sec): {}",
+ System.out.printf(
+ "SCM is not available (yet?). Error is %s. Will retry in 1 sec. "
+ + "Remaining time (sec): %s%n",
ex.getMessage(), getRemainingTimeInSec());
Thread.sleep(1000);
Thread.currentThread().interrupt();
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/cert/CleanExpiredCertsSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/cert/CleanExpiredCertsSubcommand.java
index cab7a29a4ea6..09caf8147ad4 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/cert/CleanExpiredCertsSubcommand.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/cert/CleanExpiredCertsSubcommand.java
@@ -19,8 +19,6 @@
import org.apache.hadoop.hdds.cli.HddsVersionProvider;
import org.apache.hadoop.hdds.protocol.SCMSecurityProtocol;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
import picocli.CommandLine;
import java.io.IOException;
@@ -36,13 +34,10 @@
versionProvider = HddsVersionProvider.class)
public class CleanExpiredCertsSubcommand extends ScmCertSubcommand {
- private static final Logger LOG =
- LoggerFactory.getLogger(CleanExpiredCertsSubcommand.class);
-
@Override
protected void execute(SCMSecurityProtocol client) throws IOException {
List pemEncodedCerts = client.removeExpiredCertificates();
- LOG.info("List of removed expired certificates:");
- printCertList(LOG, pemEncodedCerts);
+ System.out.println("List of removed expired certificates:");
+ printCertList(pemEncodedCerts);
}
}
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/cert/InfoSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/cert/InfoSubcommand.java
index 6177c8f7ff4e..c708d424d9c9 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/cert/InfoSubcommand.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/cert/InfoSubcommand.java
@@ -26,12 +26,8 @@
import org.apache.hadoop.hdds.protocol.SCMSecurityProtocol;
import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
import picocli.CommandLine.Command;
-import picocli.CommandLine.Model.CommandSpec;
import picocli.CommandLine.Parameters;
-import picocli.CommandLine.Spec;
/**
* This is the handler that process certificate info command.
@@ -44,12 +40,6 @@
class InfoSubcommand extends ScmCertSubcommand {
- private static final Logger LOG =
- LoggerFactory.getLogger(InfoSubcommand.class);
-
- @Spec
- private CommandSpec spec;
-
@Parameters(description = "Serial id of the certificate in decimal.")
private String serialId;
@@ -61,12 +51,12 @@ public void execute(SCMSecurityProtocol client) throws IOException {
"Certificate can't be found");
// Print container report info.
- LOG.info("Certificate id: {}", serialId);
+ System.out.printf("Certificate id: %s%n", serialId);
try {
X509Certificate cert = CertificateCodec.getX509Certificate(certPemStr);
- LOG.info(cert.toString());
+ System.out.println(cert);
} catch (CertificateException ex) {
- LOG.error("Failed to get certificate id " + serialId);
+ System.err.println("Failed to get certificate id " + serialId);
throw new IOException("Fail to get certificate id " + serialId, ex);
}
}
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/cert/ListSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/cert/ListSubcommand.java
index c2e0bd7fadff..ea0898381478 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/cert/ListSubcommand.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/cert/ListSubcommand.java
@@ -36,8 +36,6 @@
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec;
import org.apache.hadoop.hdds.server.JsonUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
import picocli.CommandLine.Command;
import picocli.CommandLine.Help.Visibility;
import picocli.CommandLine.Option;
@@ -54,9 +52,6 @@
versionProvider = HddsVersionProvider.class)
public class ListSubcommand extends ScmCertSubcommand {
- private static final Logger LOG =
- LoggerFactory.getLogger(ListSubcommand.class);
-
@Option(names = {"-s", "--start"},
description = "Certificate serial id to start the iteration",
defaultValue = "0", showDefaultValue = Visibility.ALWAYS)
@@ -114,7 +109,7 @@ protected void execute(SCMSecurityProtocol client) throws IOException {
CertificateCodec.getX509Certificate(certPemStr);
certList.add(new Certificate(cert));
} catch (CertificateException ex) {
- LOG.error("Failed to parse certificate.");
+ err.println("Failed to parse certificate.");
}
}
System.out.println(
@@ -122,9 +117,9 @@ protected void execute(SCMSecurityProtocol client) throws IOException {
return;
}
- LOG.info("Certificate list:(Type={}, BatchSize={}, CertCount={})",
+ System.out.printf("Certificate list:(Type=%s, BatchSize=%s, CertCount=%s)%n",
type.toUpperCase(), count, certPemList.size());
- printCertList(LOG, certPemList);
+ printCertList(certPemList);
}
private static class BigIntJsonSerializer extends JsonSerializer {
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/cert/ScmCertSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/cert/ScmCertSubcommand.java
index d7ebb44e0ffc..354adbb5d6ba 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/cert/ScmCertSubcommand.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/cert/ScmCertSubcommand.java
@@ -20,7 +20,6 @@
import org.apache.hadoop.hdds.protocol.SCMSecurityProtocol;
import org.apache.hadoop.hdds.scm.cli.ScmOption;
import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec;
-import org.slf4j.Logger;
import picocli.CommandLine;
import java.io.IOException;
@@ -37,29 +36,29 @@ public abstract class ScmCertSubcommand implements Callable {
@CommandLine.Mixin
private ScmOption scmOption;
- private static final String OUTPUT_FORMAT = "%-17s %-30s %-30s %-110s %-110s";
+ private static final String OUTPUT_FORMAT = "%-17s %-30s %-30s %-110s %-110s%n";
- protected void printCertList(Logger log, List pemEncodedCerts) {
+ protected void printCertList(List pemEncodedCerts) {
if (pemEncodedCerts.isEmpty()) {
- log.info("No certificates to list");
+ System.out.println("No certificates to list");
return;
}
- log.info(String.format(OUTPUT_FORMAT, "SerialNumber", "Valid From",
- "Expiry", "Subject", "Issuer"));
+ System.out.printf(OUTPUT_FORMAT, "SerialNumber", "Valid From",
+ "Expiry", "Subject", "Issuer");
for (String certPemStr : pemEncodedCerts) {
try {
X509Certificate cert = CertificateCodec.getX509Certificate(certPemStr);
- printCert(cert, log);
+ printCert(cert);
} catch (CertificateException e) {
- log.error("Failed to parse certificate.", e);
+ System.err.println("Failed to parse certificate: " + e.getMessage());
}
}
}
- protected void printCert(X509Certificate cert, Logger log) {
- log.info(String.format(OUTPUT_FORMAT, cert.getSerialNumber(),
+ protected void printCert(X509Certificate cert) {
+ System.out.printf(OUTPUT_FORMAT, cert.getSerialNumber(),
cert.getNotBefore(), cert.getNotAfter(), cert.getSubjectDN(),
- cert.getIssuerDN()));
+ cert.getIssuerDN());
}
protected abstract void execute(SCMSecurityProtocol client)
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CreateSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CreateSubcommand.java
index 9eedbf858958..313dc64c9fc9 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CreateSubcommand.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CreateSubcommand.java
@@ -25,8 +25,6 @@
import org.apache.hadoop.hdds.scm.container.common.helpers
.ContainerWithPipeline;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
import picocli.CommandLine.Command;
import picocli.CommandLine.Option;
@@ -40,9 +38,6 @@
versionProvider = HddsVersionProvider.class)
public class CreateSubcommand extends ScmSubcommand {
- private static final Logger LOG =
- LoggerFactory.getLogger(CreateSubcommand.class);
-
@Option(description = "Owner of the new container", defaultValue = "OZONE",
names = { "-o", "--owner"})
private String owner;
@@ -50,7 +45,7 @@ public class CreateSubcommand extends ScmSubcommand {
@Override
public void execute(ScmClient scmClient) throws IOException {
ContainerWithPipeline container = scmClient.createContainer(owner);
- LOG.info("Container {} is created.",
+ System.out.printf("Container %s is created.%n",
container.getContainerInfo().getContainerID());
}
}
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoSubcommand.java
index 8ed9f520b29d..0e67661bba1d 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoSubcommand.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoSubcommand.java
@@ -45,8 +45,6 @@
import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
import org.apache.hadoop.hdds.scm.pipeline.PipelineNotFoundException;
import org.apache.hadoop.hdds.server.JsonUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
import picocli.CommandLine;
import picocli.CommandLine.Command;
import picocli.CommandLine.Model.CommandSpec;
@@ -63,9 +61,6 @@
versionProvider = HddsVersionProvider.class)
public class InfoSubcommand extends ScmSubcommand {
- private static final Logger LOG =
- LoggerFactory.getLogger(InfoSubcommand.class);
-
@Spec
private CommandSpec spec;
@@ -126,13 +121,13 @@ private void printOutput(ScmClient scmClient, String id, boolean first)
private void printHeader() {
if (json && multiContainer) {
- LOG.info("[");
+ System.out.println("[");
}
}
private void printFooter() {
if (json && multiContainer) {
- LOG.info("]");
+ System.out.println("]");
}
}
@@ -142,9 +137,9 @@ private void printError(String error) {
private void printBreak() {
if (json) {
- LOG.info(",");
+ System.out.println(",");
} else {
- LOG.info("");
+ System.out.println("");
}
}
@@ -175,47 +170,47 @@ private void printDetails(ScmClient scmClient, long containerID,
new ContainerWithPipelineAndReplicas(container.getContainerInfo(),
container.getPipeline(), replicas,
container.getContainerInfo().getPipelineID());
- LOG.info(JsonUtils.toJsonStringWithDefaultPrettyPrinter(wrapper));
+ System.out.println(JsonUtils.toJsonStringWithDefaultPrettyPrinter(wrapper));
} else {
ContainerWithoutDatanodes wrapper =
new ContainerWithoutDatanodes(container.getContainerInfo(),
container.getPipeline(), replicas,
container.getContainerInfo().getPipelineID());
- LOG.info(JsonUtils.toJsonStringWithDefaultPrettyPrinter(wrapper));
+ System.out.println(JsonUtils.toJsonStringWithDefaultPrettyPrinter(wrapper));
}
} else {
// Print container report info.
- LOG.info("Container id: {}", containerID);
+ System.out.printf("Container id: %s%n", containerID);
boolean verbose = spec != null
&& spec.root().userObject() instanceof GenericParentCommand
&& ((GenericParentCommand) spec.root().userObject()).isVerbose();
if (verbose) {
- LOG.info("Pipeline Info: {}", container.getPipeline());
+ System.out.printf("Pipeline Info: %s%n", container.getPipeline());
} else {
- LOG.info("Pipeline id: {}", container.getPipeline().getId().getId());
+ System.out.printf("Pipeline id: %s%n", container.getPipeline().getId().getId());
}
- LOG.info("Write PipelineId: {}",
+ System.out.printf("Write PipelineId: %s%n",
container.getContainerInfo().getPipelineID().getId());
try {
String pipelineState = scmClient.getPipeline(
container.getContainerInfo().getPipelineID().getProtobuf())
.getPipelineState().toString();
- LOG.info("Write Pipeline State: {}", pipelineState);
+ System.out.printf("Write Pipeline State: %s%n", pipelineState);
} catch (IOException ioe) {
if (SCMHAUtils.unwrapException(
ioe) instanceof PipelineNotFoundException) {
- LOG.info("Write Pipeline State: CLOSED");
+ System.out.println("Write Pipeline State: CLOSED");
} else {
printError("Failed to retrieve pipeline info");
}
}
- LOG.info("Container State: {}", container.getContainerInfo().getState());
+ System.out.printf("Container State: %s%n", container.getContainerInfo().getState());
// Print pipeline of an existing container.
String machinesStr = container.getPipeline().getNodes().stream().map(
InfoSubcommand::buildDatanodeDetails)
.collect(Collectors.joining(",\n"));
- LOG.info("Datanodes: [{}]", machinesStr);
+ System.out.printf("Datanodes: [%s]%n", machinesStr);
// Print the replica details if available
if (replicas != null) {
@@ -223,7 +218,7 @@ private void printDetails(ScmClient scmClient, long containerID,
.sorted(Comparator.comparing(ContainerReplicaInfo::getReplicaIndex))
.map(InfoSubcommand::buildReplicaDetails)
.collect(Collectors.joining(",\n"));
- LOG.info("Replicas: [{}]", replicaStr);
+ System.out.printf("Replicas: [%s]%n", replicaStr);
}
}
}
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java
index b120fe4169da..ecc43d04087a 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java
@@ -36,8 +36,6 @@
import com.fasterxml.jackson.databind.ObjectWriter;
import com.fasterxml.jackson.databind.SerializationFeature;
import com.fasterxml.jackson.datatype.jsr310.JavaTimeModule;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
import picocli.CommandLine.Command;
import picocli.CommandLine.Help.Visibility;
import picocli.CommandLine.Option;
@@ -52,9 +50,6 @@
versionProvider = HddsVersionProvider.class)
public class ListSubcommand extends ScmSubcommand {
- private static final Logger LOG =
- LoggerFactory.getLogger(ListSubcommand.class);
-
@Option(names = {"-s", "--start"},
description = "Container id to start the iteration")
private long startId;
@@ -94,7 +89,7 @@ public class ListSubcommand extends ScmSubcommand {
private void outputContainerInfo(ContainerInfo containerInfo)
throws IOException {
// Print container report info.
- LOG.info("{}", WRITER.writeValueAsString(containerInfo));
+ System.out.println(WRITER.writeValueAsString(containerInfo));
}
@Override
diff --git a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/TestInfoSubCommand.java b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/TestInfoSubCommand.java
index 9b264312fdf4..a5490b088638 100644
--- a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/TestInfoSubCommand.java
+++ b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/TestInfoSubCommand.java
@@ -28,9 +28,6 @@
import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
import org.apache.hadoop.hdds.scm.pipeline.PipelineNotFoundException;
-import org.apache.log4j.AppenderSkeleton;
-import org.apache.log4j.Logger;
-import org.apache.log4j.spi.LoggingEvent;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.BeforeEach;
@@ -46,6 +43,7 @@
import java.io.UnsupportedEncodingException;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
+import java.util.Arrays;
import java.util.List;
import java.util.UUID;
import java.util.regex.Matcher;
@@ -54,6 +52,7 @@
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.CLOSED;
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE;
+import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.mockito.ArgumentMatchers.anyLong;
@@ -68,8 +67,6 @@ public class TestInfoSubCommand {
private ScmClient scmClient;
private InfoSubcommand cmd;
private List datanodes;
- private Logger logger;
- private TestAppender appender;
private final ByteArrayOutputStream outContent = new ByteArrayOutputStream();
private final ByteArrayOutputStream errContent = new ByteArrayOutputStream();
@@ -89,18 +86,12 @@ public void setup() throws IOException {
Mockito.when(scmClient.getPipeline(any()))
.thenThrow(new PipelineNotFoundException("Pipeline not found."));
- appender = new TestAppender();
- logger = Logger.getLogger(
- org.apache.hadoop.hdds.scm.cli.container.InfoSubcommand.class);
- logger.addAppender(appender);
-
System.setOut(new PrintStream(outContent, false, DEFAULT_ENCODING));
System.setErr(new PrintStream(errContent, false, DEFAULT_ENCODING));
}
@AfterEach
public void after() {
- logger.removeAppender(appender);
System.setOut(originalOut);
System.setErr(originalErr);
System.setIn(originalIn);
@@ -152,10 +143,8 @@ public void testContainersCanBeReadFromStdin() throws IOException {
private void validateMultiOutput() throws UnsupportedEncodingException {
// Ensure we have a log line for each containerID
- List logs = appender.getLog();
- List replica = logs.stream()
- .filter(m -> m.getRenderedMessage()
- .matches("(?s)^Container id: (1|123|456|789).*"))
+ List replica = Arrays.stream(outContent.toString(DEFAULT_ENCODING).split("\n"))
+ .filter(m -> m.matches("(?s)^Container id: (1|123|456|789).*"))
.collect(Collectors.toList());
Assertions.assertEquals(4, replica.size());
@@ -194,10 +183,8 @@ public void testMultipleContainersCanBePassedJson() throws Exception {
private void validateJsonMultiOutput() throws UnsupportedEncodingException {
// Ensure we have a log line for each containerID
- List logs = appender.getLog();
- List replica = logs.stream()
- .filter(m -> m.getRenderedMessage()
- .matches("(?s)^.*\"containerInfo\".*"))
+ List replica = Arrays.stream(outContent.toString(DEFAULT_ENCODING).split("\n"))
+ .filter(m -> m.matches("(?s)^.*\"containerInfo\".*"))
.collect(Collectors.toList());
Assertions.assertEquals(4, replica.size());
@@ -217,34 +204,33 @@ private void testReplicaIncludedInOutput(boolean includeIndex)
cmd.execute(scmClient);
// Ensure we have a line for Replicas:
- List logs = appender.getLog();
- List replica = logs.stream()
- .filter(m -> m.getRenderedMessage().matches("(?s)^Replicas:.*"))
- .collect(Collectors.toList());
- Assertions.assertEquals(1, replica.size());
+ String output = outContent.toString(DEFAULT_ENCODING);
+ Pattern pattern = Pattern.compile("Replicas: \\[.*\\]", Pattern.DOTALL);
+ Matcher matcher = pattern.matcher(output);
+ assertTrue(matcher.find());
+ String replica = matcher.group();
// Ensure each DN UUID is mentioned in the message:
for (DatanodeDetails dn : datanodes) {
- Pattern pattern = Pattern.compile(".*" + dn.getUuid().toString() + ".*",
+ Pattern uuidPattern = Pattern.compile(".*" + dn.getUuid().toString() + ".*",
Pattern.DOTALL);
- Matcher matcher = pattern.matcher(replica.get(0).getRenderedMessage());
- Assertions.assertTrue(matcher.matches());
+ assertThat(replica).matches(uuidPattern);
}
// Ensure the replicaIndex output is in order
if (includeIndex) {
List indexList = new ArrayList<>();
for (int i = 1; i < datanodes.size() + 1; i++) {
String temp = "ReplicaIndex: " + i;
- indexList.add(replica.get(0).getRenderedMessage().indexOf(temp));
+ indexList.add(replica.indexOf(temp));
}
Assertions.assertEquals(datanodes.size(), indexList.size());
Assertions.assertTrue(inSort(indexList));
}
// Ensure ReplicaIndex is not mentioned as it was not passed in the proto:
- Pattern pattern = Pattern.compile(".*ReplicaIndex.*",
- Pattern.DOTALL);
- Matcher matcher = pattern.matcher(replica.get(0).getRenderedMessage());
- Assertions.assertEquals(includeIndex, matcher.matches());
+ Assertions.assertEquals(includeIndex,
+ Pattern.compile(".*ReplicaIndex.*", Pattern.DOTALL)
+ .matcher(replica)
+ .matches());
}
@Test
@@ -257,9 +243,8 @@ public void testReplicasNotOutputIfError() throws IOException {
cmd.execute(scmClient);
// Ensure we have no lines for Replicas:
- List logs = appender.getLog();
- List replica = logs.stream()
- .filter(m -> m.getRenderedMessage().matches("(?s)^Replicas:.*"))
+ List replica = Arrays.stream(outContent.toString(DEFAULT_ENCODING).split("\n"))
+ .filter(m -> m.matches("(?s)^Replicas:.*"))
.collect(Collectors.toList());
Assertions.assertEquals(0, replica.size());
@@ -278,9 +263,7 @@ public void testReplicasNotOutputIfErrorWithJson() throws IOException {
c.parseArgs("1", "--json");
cmd.execute(scmClient);
- List logs = appender.getLog();
- Assertions.assertEquals(1, logs.size());
- String json = logs.get(0).getRenderedMessage();
+ String json = outContent.toString(DEFAULT_ENCODING);
Assertions.assertFalse(json.matches("(?s).*replicas.*"));
}
@@ -316,11 +299,8 @@ private void testJsonOutput() throws IOException {
c.parseArgs("1", "--json");
cmd.execute(scmClient);
- List logs = appender.getLog();
- Assertions.assertEquals(1, logs.size());
-
// Ensure each DN UUID is mentioned in the message after replicas:
- String json = logs.get(0).getRenderedMessage();
+ String json = outContent.toString(DEFAULT_ENCODING);
Assertions.assertTrue(json.matches("(?s).*replicas.*"));
for (DatanodeDetails dn : datanodes) {
Pattern pattern = Pattern.compile(
@@ -415,25 +395,4 @@ private List createDatanodeDetails(int count) {
return dns;
}
- private static class TestAppender extends AppenderSkeleton {
- private final List log = new ArrayList<>();
-
- @Override
- public boolean requiresLayout() {
- return false;
- }
-
- @Override
- protected void append(final LoggingEvent loggingEvent) {
- log.add(loggingEvent);
- }
-
- @Override
- public void close() {
- }
-
- public List getLog() {
- return new ArrayList<>(log);
- }
- }
}
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
index cd5b1804721a..1db5c84562d8 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
@@ -1239,7 +1239,7 @@ List getNextShallowListOfKeys(String prevKey)
proxy.listStatusLight(volumeName, name, delimiterKeyPrefix, false,
startKey, listCacheSize, false);
- if (addedKeyPrefix) {
+ if (addedKeyPrefix && statuses.size() > 0) {
// previous round already include the startKey, so remove it
statuses.remove(0);
} else {
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
index 850ae0d19376..0be238257526 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
@@ -1382,7 +1382,7 @@ public OzoneDataStreamOutput createStreamKey(
if (checkKeyNameEnabled) {
HddsClientUtils.verifyKeyName(keyName);
}
- HddsClientUtils.checkNotNull(keyName, replicationConfig);
+ HddsClientUtils.checkNotNull(keyName);
OmKeyArgs.Builder builder = new OmKeyArgs.Builder()
.setVolumeName(volumeName)
@@ -1767,7 +1767,7 @@ public OmMultipartInfo initiateMultipartUpload(String volumeName,
HddsClientUtils.checkNotNull(keyName);
if (omVersion
.compareTo(OzoneManagerVersion.ERASURE_CODED_STORAGE_SUPPORT) < 0) {
- if (replicationConfig.getReplicationType()
+ if (replicationConfig != null && replicationConfig.getReplicationType()
== HddsProtos.ReplicationType.EC) {
throw new IOException("Can not set the replication of the file to"
+ " Erasure Coded replication, as OzoneManager does not support"
diff --git a/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt b/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt
index 952e0f348133..449efe6ff35f 100644
--- a/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt
+++ b/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt
@@ -221,6 +221,27 @@ EPL 2.0
jakarta.ws.rs:jakarta.ws.rs-api
org.aspectj:aspectjrt
org.aspectj:aspectjweaver
+ org.glassfish.hk2.external:aopalliance-repackaged
+ org.glassfish.hk2.external:jakarta.inject
+ org.glassfish.hk2.external:javax.inject
+ org.glassfish.hk2:guice-bridge
+ org.glassfish.hk2:hk2-api
+ org.glassfish.hk2:hk2-locator
+ org.glassfish.hk2:hk2-utils
+ org.glassfish.hk2:osgi-resource-locator
+ org.glassfish.jersey.containers:jersey-container-servlet
+ org.glassfish.jersey.containers:jersey-container-servlet-core
+ org.glassfish.jersey.core:jersey-client
+ org.glassfish.jersey.core:jersey-common
+ org.glassfish.jersey.core:jersey-server
+ org.glassfish.jersey.ext.cdi:jersey-cdi1x
+ org.glassfish.jersey.ext:jersey-entity-filtering
+ org.glassfish.jersey.inject:jersey-hk2
+ org.glassfish.jersey.media:jersey-media-jaxb
+ org.glassfish.jersey.media:jersey-media-json-jackson
+ org.jgrapht:jgrapht-core
+ org.jgrapht:jgrapht-ext
+
CDDL 1.1 + GPLv2 with classpath exception
@@ -240,28 +261,9 @@ CDDL 1.1 + GPLv2 with classpath exception
javax.servlet.jsp:jsp-api
javax.ws.rs:jsr311-api
javax.xml.bind:jaxb-api
- org.glassfish.hk2.external:aopalliance-repackaged
- org.glassfish.hk2.external:jakarta.inject
- org.glassfish.hk2.external:javax.inject
- org.glassfish.hk2:guice-bridge
- org.glassfish.hk2:hk2-api
- org.glassfish.hk2:hk2-locator
- org.glassfish.hk2:hk2-utils
- org.glassfish.hk2:osgi-resource-locator
- org.glassfish.jaxb:jaxb-runtime
org.glassfish.jaxb:jaxb-core
org.glassfish.jaxb:jaxb-runtime
org.glassfish.jaxb:txw2
- org.glassfish.jersey.containers:jersey-container-servlet
- org.glassfish.jersey.containers:jersey-container-servlet-core
- org.glassfish.jersey.core:jersey-client
- org.glassfish.jersey.core:jersey-common
- org.glassfish.jersey.core:jersey-server
- org.glassfish.jersey.ext.cdi:jersey-cdi1x
- org.glassfish.jersey.ext:jersey-entity-filtering
- org.glassfish.jersey.inject:jersey-hk2
- org.glassfish.jersey.media:jersey-media-jaxb
- org.glassfish.jersey.media:jersey-media-json-jackson
org.jvnet.staxex:stax-ex
@@ -463,24 +465,6 @@ MIT
org.slf4j:slf4j-reload4j
-EPL 2.0
-=====================
-
- jakarta.annotation:jakarta.annotation-api
- jakarta.ws.rs:jakarta.ws.rs-api
- org.jgrapht:jgrapht-core
- org.jgrapht:jgrapht-ext
-
-
-CDDL + GPLv2 with classpath exception
-=====================
-
- javax.annotation:javax.annotation-api
- javax.el:javax.el-api
- javax.interceptor:javax.interceptor-api
- javax.servlet:javax.servlet-api
-
-
Public Domain
=====================
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListKeys.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListKeys.java
index b2007c7e0279..9599727d1483 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListKeys.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListKeys.java
@@ -69,6 +69,8 @@ public class TestListKeys {
private static String omId;
private static OzoneBucket legacyOzoneBucket;
+
+ private static OzoneBucket obsOzoneBucket;
private static OzoneClient client;
@Rule
@@ -99,6 +101,10 @@ public static void init() throws Exception {
legacyOzoneBucket = TestDataUtil
.createVolumeAndBucket(client, BucketLayout.LEGACY);
+ // create a volume and a OBJECT_STORE bucket
+ obsOzoneBucket = TestDataUtil
+ .createVolumeAndBucket(client, BucketLayout.OBJECT_STORE);
+
initFSNameSpace();
}
@@ -112,6 +118,7 @@ public static void teardownClass() {
private static void initFSNameSpace() throws Exception {
buildNameSpaceTree(legacyOzoneBucket);
+ buildNameSpaceTree(obsOzoneBucket);
}
/**
@@ -121,9 +128,9 @@ private static void initFSNameSpace() throws Exception {
* |
* a1
* |
- * -----------------------------------
- * | | |
- * b1 b2 b3
+ * --------------------------------------------------------
+ * | | | |
+ * b1 b2 b3 b4
* ------- --------- -----------
* | | | | | | | |
* c1 c2 d1 d2 d3 e1 e2 e3
@@ -138,25 +145,27 @@ private static void initFSNameSpace() throws Exception {
private static void buildNameSpaceTree(OzoneBucket ozoneBucket)
throws Exception {
LinkedList keys = new LinkedList<>();
- keys.add("/a1/b1/c1111.tx");
- keys.add("/a1/b1/c1222.tx");
- keys.add("/a1/b1/c1333.tx");
- keys.add("/a1/b1/c1444.tx");
- keys.add("/a1/b1/c1555.tx");
- keys.add("/a1/b1/c1/c1.tx");
- keys.add("/a1/b1/c12/c2.tx");
- keys.add("/a1/b1/c12/c3.tx");
-
- keys.add("/a1/b2/d1/d11.tx");
- keys.add("/a1/b2/d2/d21.tx");
- keys.add("/a1/b2/d2/d22.tx");
- keys.add("/a1/b2/d3/d31.tx");
-
- keys.add("/a1/b3/e1/e11.tx");
- keys.add("/a1/b3/e2/e21.tx");
- keys.add("/a1/b3/e3/e31.tx");
+ keys.add("a1/b1/c1111.tx");
+ keys.add("a1/b1/c1222.tx");
+ keys.add("a1/b1/c1333.tx");
+ keys.add("a1/b1/c1444.tx");
+ keys.add("a1/b1/c1555.tx");
+ keys.add("a1/b1/c1/c1.tx");
+ keys.add("a1/b1/c12/c2.tx");
+ keys.add("a1/b1/c12/c3.tx");
+
+ keys.add("a1/b2/d1/d11.tx");
+ keys.add("a1/b2/d2/d21.tx");
+ keys.add("a1/b2/d2/d22.tx");
+ keys.add("a1/b2/d3/d31.tx");
+
+ keys.add("a1/b3/e1/e11.tx");
+ keys.add("a1/b3/e2/e21.tx");
+ keys.add("a1/b3/e3/e31.tx");
createKeys(ozoneBucket, keys);
+
+ ozoneBucket.createDirectory("a1/b4/");
}
private static Stream shallowListDataWithTrailingSlash() {
@@ -199,6 +208,58 @@ private static Stream shallowListDataWithTrailingSlash() {
"a1/b1/c1333.tx",
"a1/b1/c1444.tx",
"a1/b1/c1555.tx"
+ ))),
+
+ // Case-7: StartKey is empty, return key that is same as keyPrefix.
+ of("a1/b4/", "", newLinkedList(Arrays.asList(
+ "a1/b4/"
+ )))
+ );
+ }
+
+ private static Stream shallowListObsDataWithTrailingSlash() {
+ return Stream.of(
+
+ // Case-1: StartKey is less than prefixKey, return emptyList.
+ of("a1/b2/", "a1", newLinkedList(Collections.emptyList())),
+
+ // Case-2: StartKey is empty, return all immediate node.
+ of("a1/b2/", "", newLinkedList(Arrays.asList(
+ "a1/b2/d1/",
+ "a1/b2/d2/",
+ "a1/b2/d3/"
+ ))),
+
+ // Case-3: StartKey is same as prefixKey, return all immediate nodes.
+ of("a1/b2/", "a1/b2", newLinkedList(Arrays.asList(
+ "a1/b2/d1/",
+ "a1/b2/d2/",
+ "a1/b2/d3/"
+ ))),
+
+ // Case-4: StartKey is greater than prefixKey
+ of("a1/b2/", "a1/b2/d2/d21.tx", newLinkedList(Arrays.asList(
+ "a1/b2/d2/",
+ "a1/b2/d3/"
+ ))),
+
+ // Case-5: StartKey reaches last element, return emptyList
+ of("a1/b2/", "a1/b2/d3/d31.tx", newLinkedList(
+ Collections.emptyList()
+ )),
+
+ // Case-6: Mix result
+ of("a1/b1/", "a1/b1/c12", newLinkedList(Arrays.asList(
+ "a1/b1/c12/",
+ "a1/b1/c1222.tx",
+ "a1/b1/c1333.tx",
+ "a1/b1/c1444.tx",
+ "a1/b1/c1555.tx"
+ ))),
+
+ // Case-7: StartKey is empty, return key that is same as keyPrefix.
+ of("a1/b4/", "", newLinkedList(Arrays.asList(
+ "a1/b4/"
)))
);
}
@@ -265,6 +326,11 @@ private static Stream shallowListDataWithoutTrailingSlash() {
of("a1/b1/c12", "", newLinkedList(Arrays.asList(
"a1/b1/c12/",
"a1/b1/c1222.tx"
+ ))),
+
+ // Case-10:
+ of("a1/b4", "", newLinkedList(Arrays.asList(
+ "a1/b4/"
)))
);
@@ -277,11 +343,19 @@ public void testShallowListKeysWithPrefixTrailingSlash(String keyPrefix,
checkKeyShallowList(keyPrefix, startKey, expectedKeys, legacyOzoneBucket);
}
+ @ParameterizedTest
+ @MethodSource("shallowListObsDataWithTrailingSlash")
+ public void testShallowListObsKeysWithPrefixTrailingSlash(String keyPrefix,
+ String startKey, List expectedKeys) throws Exception {
+ checkKeyShallowList(keyPrefix, startKey, expectedKeys, obsOzoneBucket);
+ }
+
@ParameterizedTest
@MethodSource("shallowListDataWithoutTrailingSlash")
public void testShallowListKeysWithoutPrefixTrailingSlash(String keyPrefix,
String startKey, List expectedKeys) throws Exception {
checkKeyShallowList(keyPrefix, startKey, expectedKeys, legacyOzoneBucket);
+ checkKeyShallowList(keyPrefix, startKey, expectedKeys, obsOzoneBucket);
}
private void checkKeyShallowList(String keyPrefix, String startKey,
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java
index d28ef3b27034..763991d87592 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java
@@ -78,6 +78,9 @@
import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_FOUND;
import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_EMPTY;
import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND;
+import static org.apache.hadoop.ozone.om.helpers.BucketLayout.FILE_SYSTEM_OPTIMIZED;
+import static org.apache.hadoop.ozone.om.helpers.BucketLayout.LEGACY;
+import static org.apache.hadoop.ozone.om.helpers.BucketLayout.OBJECT_STORE;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertNotNull;
@@ -92,6 +95,8 @@
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.Timeout;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.ValueSource;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import picocli.CommandLine;
@@ -1772,9 +1777,10 @@ public void testVolumeListKeys()
OMException exception = (OMException) execution.getCause();
assertEquals(VOLUME_NOT_FOUND, exception.getResult());
}
-
- @Test
- public void testRecursiveVolumeDelete()
+
+ @ParameterizedTest
+ @ValueSource(ints = {1, 5})
+ public void testRecursiveVolumeDelete(int threadCount)
throws Exception {
String volume1 = "volume10";
String volume2 = "volume20";
@@ -1783,47 +1789,19 @@ public void testRecursiveVolumeDelete()
// Create bucket bucket1 with layout FILE_SYSTEM_OPTIMIZED
// Insert some keys into it
generateKeys(OZONE_URI_DELIMITER + volume1,
- "/bucketfso",
+ "/fsobucket1",
BucketLayout.FILE_SYSTEM_OPTIMIZED.toString());
- // Create another volume volume2 with bucket and some keys into it.
+ // Create another volume volume2 with bucket and some keys into it.
generateKeys(OZONE_URI_DELIMITER + volume2,
"/bucket2",
BucketLayout.FILE_SYSTEM_OPTIMIZED.toString());
- // Create OBS bucket in volume1
- String[] args = new String[] {"bucket", "create", "--layout",
- BucketLayout.OBJECT_STORE.toString(), volume1 + "/bucketobs"};
- execute(ozoneShell, args);
- out.reset();
-
- // Insert few keys into OBS bucket
- String keyName = OZONE_URI_DELIMITER + volume1 + "/bucketobs" +
- OZONE_URI_DELIMITER + "key";
- for (int i = 0; i < 5; i++) {
- args = new String[] {
- "key", "put", "o3://" + omServiceId + keyName + i,
- testFile.getPath()};
- execute(ozoneShell, args);
- }
- out.reset();
-
- // Create Legacy bucket in volume1
- args = new String[] {"bucket", "create", "--layout",
- BucketLayout.LEGACY.toString(), volume1 + "/bucketlegacy"};
- execute(ozoneShell, args);
- out.reset();
-
- // Insert few keys into legacy bucket
- keyName = OZONE_URI_DELIMITER + volume1 + "/bucketlegacy" +
- OZONE_URI_DELIMITER + "key";
- for (int i = 0; i < 5; i++) {
- args = new String[] {
- "key", "put", "o3://" + omServiceId + keyName + i,
- testFile.getPath()};
- execute(ozoneShell, args);
- }
- out.reset();
+ createBucketAndGenerateKeys(volume1, FILE_SYSTEM_OPTIMIZED, "fsobucket2");
+ createBucketAndGenerateKeys(volume1, OBJECT_STORE, "obsbucket1");
+ createBucketAndGenerateKeys(volume1, OBJECT_STORE, "obsbucket2");
+ createBucketAndGenerateKeys(volume1, LEGACY, "legacybucket1");
+ createBucketAndGenerateKeys(volume1, LEGACY, "legacybucket2");
// Try volume delete without recursive
// It should fail as volume is not empty
@@ -1838,22 +1816,50 @@ public void testRecursiveVolumeDelete()
assertEquals(client.getObjectStore().getVolume(volume1)
.getName(), volume1);
- // Delete volume1(containing OBS, FSO and Legacy buckets) recursively
- args =
- new String[] {"volume", "delete", volume1, "-r", "--yes"};
+ // Delete volume1(containing OBS, FSO and Legacy buckets) recursively with thread count
+ String[] args = new String[] {"volume", "delete", volume1, "-r", "--yes", "-t", String.valueOf(threadCount)};
execute(ozoneShell, args);
out.reset();
+ // volume1 should not exist
+ omExecution = assertThrows(OMException.class,
+ () -> client.getObjectStore().getVolume(volume1));
+ assertEquals(VOLUME_NOT_FOUND, omExecution.getResult());
+
// volume2 should still exist
assertEquals(client.getObjectStore().getVolume(volume2)
.getName(), volume2);
- // volume1 should not exist
+ // Delete volume2 recursively
+ args = new String[] {"volume", "delete", volume2, "-r", "--yes"};
+ execute(ozoneShell, args);
+ out.reset();
+
+ // volume2 should not exist
omExecution = assertThrows(OMException.class,
- () -> client.getObjectStore().getVolume(volume1));
+ () -> client.getObjectStore().getVolume(volume2));
assertEquals(VOLUME_NOT_FOUND, omExecution.getResult());
}
+ private void createBucketAndGenerateKeys(String volume, BucketLayout layout, String bucketName) {
+ // Create bucket
+ String[] args = new String[] {"bucket", "create", volume + "/" + bucketName,
+ "--layout", layout.toString()};
+ execute(ozoneShell, args);
+ out.reset();
+
+ // Insert keys
+ String keyName = OZONE_URI_DELIMITER + volume + "/" + bucketName +
+ OZONE_URI_DELIMITER + "key";
+ for (int i = 0; i < 5; i++) {
+ args = new String[] {
+ "key", "put", "o3://" + omServiceId + keyName + i,
+ testFile.getPath()};
+ execute(ozoneShell, args);
+ }
+ out.reset();
+ }
+
@Test
public void testLinkedAndNonLinkedBucketMetaData()
throws Exception {
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java
index 6c7ee06e8b35..623d41b3379b 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java
@@ -270,7 +270,6 @@ public static List getAllParentInfo(OzoneManager ozoneManager,
KeyArgs keyArgs, List missingParents, OmBucketInfo bucketInfo,
OMFileRequest.OMPathInfo omPathInfo, long trxnLogIndex)
throws IOException {
- OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
List missingParentInfos = new ArrayList<>();
// The base id is left shifted by 8 bits for creating space to
@@ -303,10 +302,6 @@ public static List getAllParentInfo(OzoneManager ozoneManager,
objectCount++;
missingParentInfos.add(parentKeyInfo);
- omMetadataManager.getKeyTable(BucketLayout.DEFAULT).addCacheEntry(
- omMetadataManager.getOzoneKey(
- volumeName, bucketName, parentKeyInfo.getKeyName()),
- parentKeyInfo, trxnLogIndex);
}
return missingParentInfos;
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java
index 800f982034df..c09d87af1d56 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java
@@ -729,6 +729,12 @@ protected OmKeyInfo prepareFileInfo(
dbKeyInfo.setModificationTime(keyArgs.getModificationTime());
dbKeyInfo.setUpdateID(transactionLogIndex, isRatisEnabled);
dbKeyInfo.setReplicationConfig(replicationConfig);
+
+ // Construct a new metadata map from KeyArgs.
+ // Clear the old one when the key is overwritten.
+ dbKeyInfo.getMetadata().clear();
+ dbKeyInfo.getMetadata().putAll(KeyValueUtil.getFromProtobuf(
+ keyArgs.getMetadataList()));
return dbKeyInfo;
}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java
index afc60e2947b5..953e457d1994 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java
@@ -25,10 +25,11 @@
import java.util.Arrays;
import java.util.Collection;
import java.util.List;
-import java.util.UUID;
-import java.util.stream.Collectors;
import java.util.Map;
+import java.util.Collections;
import java.util.HashMap;
+import java.util.UUID;
+import java.util.stream.Collectors;
import org.apache.hadoop.hdds.client.ECReplicationConfig;
import org.apache.hadoop.hdds.client.ReplicationConfig;
@@ -40,15 +41,17 @@
import org.apache.hadoop.ozone.om.exceptions.OMException;
import org.apache.hadoop.ozone.om.helpers.BucketLayout;
import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
import org.apache.hadoop.ozone.om.lock.OzoneLockProvider;
import org.apache.hadoop.ozone.om.request.OMRequestTestUtils;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.MethodSource;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.KeyValue;
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
import org.apache.hadoop.ozone.om.response.OMClientResponse;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
@@ -66,8 +69,10 @@
import static org.apache.hadoop.ozone.OzoneConsts.OM_SNAPSHOT_INDICATOR;
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS;
import static org.apache.hadoop.ozone.om.request.OMRequestTestUtils.addVolumeAndBucketToDB;
+import static org.apache.hadoop.ozone.om.request.OMRequestTestUtils.createOmKeyInfo;
import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.NOT_A_FILE;
import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.OK;
+import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.fail;
import static org.mockito.Mockito.when;
@@ -460,6 +465,107 @@ public void testValidateAndUpdateCacheWithInvalidPath(
Assertions.assertNull(omKeyInfo);
}
+
+ @ParameterizedTest
+ @MethodSource("data")
+ public void testOverwritingExistingMetadata(
+ boolean setKeyPathLock, boolean setFileSystemPaths) throws Exception {
+ when(ozoneManager.getOzoneLockProvider()).thenReturn(
+ new OzoneLockProvider(setKeyPathLock, setFileSystemPaths));
+
+ addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager,
+ getBucketLayout());
+
+ Map initialMetadata =
+ Collections.singletonMap("initialKey", "initialValue");
+ OMRequest initialRequest =
+ createKeyRequest(false, 0, keyName, initialMetadata);
+ OMKeyCreateRequest initialOmKeyCreateRequest =
+ new OMKeyCreateRequest(initialRequest, getBucketLayout());
+ OMClientResponse initialResponse =
+ initialOmKeyCreateRequest.validateAndUpdateCache(ozoneManager, 100L);
+ verifyMetadataInResponse(initialResponse, initialMetadata);
+
+ // We have to add the key to the key table, as validateAndUpdateCache only
+ // updates the cache and not the DB.
+ OmKeyInfo keyInfo = createOmKeyInfo(volumeName, bucketName, keyName,
+ replicationType, replicationFactor);
+ keyInfo.setMetadata(initialMetadata);
+ omMetadataManager.getKeyTable(initialOmKeyCreateRequest.getBucketLayout())
+ .put(getOzoneKey(), keyInfo);
+
+ Map updatedMetadata =
+ Collections.singletonMap("initialKey", "updatedValue");
+ OMRequest updatedRequest =
+ createKeyRequest(false, 0, keyName, updatedMetadata);
+ OMKeyCreateRequest updatedOmKeyCreateRequest =
+ new OMKeyCreateRequest(updatedRequest, getBucketLayout());
+
+ OMClientResponse updatedResponse =
+ updatedOmKeyCreateRequest.validateAndUpdateCache(ozoneManager, 101L);
+ verifyMetadataInResponse(updatedResponse, updatedMetadata);
+ }
+
+ @ParameterizedTest
+ @MethodSource("data")
+ public void testCreationWithoutMetadataFollowedByOverwriteWithMetadata(
+ boolean setKeyPathLock, boolean setFileSystemPaths) throws Exception {
+ when(ozoneManager.getOzoneLockProvider()).thenReturn(
+ new OzoneLockProvider(setKeyPathLock, setFileSystemPaths));
+ addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager,
+ getBucketLayout());
+
+ // Create the key request without any initial metadata
+ OMRequest createRequestWithoutMetadata = createKeyRequest(false, 0, keyName,
+ null); // Passing 'null' for metadata
+ OMKeyCreateRequest createOmKeyCreateRequest =
+ new OMKeyCreateRequest(createRequestWithoutMetadata, getBucketLayout());
+
+ // Perform the create operation without any metadata
+ OMClientResponse createResponse =
+ createOmKeyCreateRequest.validateAndUpdateCache(ozoneManager, 100L);
+ // Verify that no metadata exists in the response
+ assertThat(
+ createResponse.getOMResponse().getCreateKeyResponse().getKeyInfo()
+ .getMetadataList()).isEmpty();
+
+ OmKeyInfo keyInfo = createOmKeyInfo(volumeName, bucketName, keyName,
+ replicationType, replicationFactor);
+ omMetadataManager.getKeyTable(createOmKeyCreateRequest.getBucketLayout())
+ .put(getOzoneKey(), keyInfo);
+
+ // Define new metadata for the overwrite operation
+ Map overwriteMetadata = new HashMap<>();
+ overwriteMetadata.put("newKey", "newValue");
+
+ // Overwrite the previously created key with new metadata
+ OMRequest overwriteRequestWithMetadata =
+ createKeyRequest(false, 0, keyName, overwriteMetadata);
+ OMKeyCreateRequest overwriteOmKeyCreateRequest =
+ new OMKeyCreateRequest(overwriteRequestWithMetadata, getBucketLayout());
+
+ // Perform the overwrite operation and capture the response
+ OMClientResponse overwriteResponse =
+ overwriteOmKeyCreateRequest.validateAndUpdateCache(ozoneManager, 101L);
+ // Verify the new metadata is correctly applied in the response
+ verifyMetadataInResponse(overwriteResponse, overwriteMetadata);
+ }
+
+
+ private void verifyMetadataInResponse(OMClientResponse response,
+ Map expectedMetadata) {
+ // Extract metadata from the response
+ List metadataList =
+ response.getOMResponse().getCreateKeyResponse().getKeyInfo()
+ .getMetadataList();
+ Assertions.assertEquals(expectedMetadata.size(), metadataList.size());
+ metadataList.forEach(kv -> {
+ String expectedValue = expectedMetadata.get(kv.getKey());
+ Assertions.assertEquals(expectedValue, kv.getValue(),
+ "Metadata value mismatch for key: " + kv.getKey());
+ });
+ }
+
/**
* This method calls preExecute and verify the modified request.
* @param originalOMRequest
@@ -541,24 +647,51 @@ protected OMRequest createKeyRequest(boolean isMultipartKey, int partNumber) {
private OMRequest createKeyRequest(boolean isMultipartKey, int partNumber,
String keyName) {
+ return createKeyRequest(isMultipartKey, partNumber, keyName, null);
+ }
+ /**
+ * Create OMRequest which encapsulates a CreateKeyRequest, optionally
+ * with metadata.
+ *
+ * @param isMultipartKey Indicates if the key is part of a multipart upload.
+ * @param partNumber The part number for multipart uploads, ignored if
+ * isMultipartKey is false.
+ * @param keyName The name of the key to create or update.
+ * @param metadata Optional metadata for the key. Pass null or an empty
+ * map if no metadata is to be set.
+ * @return OMRequest configured with the provided parameters.
+ */
+ protected OMRequest createKeyRequest(boolean isMultipartKey, int partNumber,
+ String keyName,
+ Map metadata) {
KeyArgs.Builder keyArgs = KeyArgs.newBuilder()
.setVolumeName(volumeName).setBucketName(bucketName)
.setKeyName(keyName).setIsMultipartKey(isMultipartKey)
.setFactor(replicationFactor).setType(replicationType)
.setLatestVersionLocation(true);
+ // Configure for multipart upload, if applicable
if (isMultipartKey) {
keyArgs.setDataSize(dataSize).setMultipartNumber(partNumber);
}
+ // Include metadata, if provided
+ if (metadata != null && !metadata.isEmpty()) {
+ metadata.forEach((key, value) -> keyArgs.addMetadata(KeyValue.newBuilder()
+ .setKey(key)
+ .setValue(value)
+ .build()));
+ }
+
OzoneManagerProtocolProtos.CreateKeyRequest createKeyRequest =
CreateKeyRequest.newBuilder().setKeyArgs(keyArgs).build();
return OMRequest.newBuilder()
.setCmdType(OzoneManagerProtocolProtos.Type.CreateKey)
.setClientId(UUID.randomUUID().toString())
- .setCreateKeyRequest(createKeyRequest).build();
+ .setCreateKeyRequest(createKeyRequest)
+ .build();
}
private OMRequest createKeyRequest(
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java
index 1e247c8eb858..04c030530ce7 100644
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java
@@ -771,7 +771,8 @@ public Response initializeMultipartUpload(
private ReplicationConfig getReplicationConfig(OzoneBucket ozoneBucket,
String storageType) throws OS3Exception {
if (StringUtils.isEmpty(storageType)) {
- storageType = S3StorageType.getDefault(ozoneConfiguration).toString();
+ S3StorageType defaultStorageType = S3StorageType.getDefault(ozoneConfiguration);
+ storageType = (defaultStorageType != null ? defaultStorageType.toString() : null);
}
ReplicationConfig clientConfiguredReplicationConfig = null;
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3StorageType.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3StorageType.java
index ae42e812fb3e..9eb88989a32e 100644
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3StorageType.java
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3StorageType.java
@@ -62,6 +62,10 @@ public ReplicationType getType() {
public static S3StorageType getDefault(ConfigurationSource config) {
String replicationString = config.get(OzoneConfigKeys.OZONE_REPLICATION);
ReplicationFactor configFactor;
+ if (replicationString == null) {
+ // if no config is set then let server take decision
+ return null;
+ }
try {
configFactor = ReplicationFactor.valueOf(
Integer.parseInt(replicationString));
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/conf/OzoneManagersCommandHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/conf/OzoneManagersCommandHandler.java
index e8ced23b348f..f66f4f3abda2 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/conf/OzoneManagersCommandHandler.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/conf/OzoneManagersCommandHandler.java
@@ -33,8 +33,7 @@
*/
@Command(name = "ozonemanagers",
aliases = {"-ozonemanagers"},
- description = "gets list of ozone storage container "
- + "manager nodes in the cluster",
+ description = "gets list of Ozone Manager nodes in the cluster",
mixinStandardHelpOptions = true,
versionProvider = HddsVersionProvider.class)
public class OzoneManagersCommandHandler implements Callable {
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/DeleteVolumeHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/DeleteVolumeHandler.java
index e380e98561b0..8cc80502386f 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/DeleteVolumeHandler.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/DeleteVolumeHandler.java
@@ -121,6 +121,11 @@ private void deleteVolumeRecursive()
totalBucketCount++;
}
doCleanBuckets();
+ // Reset counters and bucket list
+ numberOfBucketsCleaned.set(0);
+ totalBucketCount = 0;
+ cleanedBucketCounter.set(0);
+ bucketIdList.clear();
}
/**
@@ -201,6 +206,7 @@ public void run() {
if (!cleanOBSBucket(bucket)) {
throw new RuntimeException("Failed to clean bucket");
}
+ break;
default:
throw new RuntimeException("Invalid bucket layout");
}
diff --git a/pom.xml b/pom.xml
index 591b69011836..7bbd3eeddc44 100644
--- a/pom.xml
+++ b/pom.xml
@@ -312,6 +312,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs
5.1.0
+ 1.1.10.5
@@ -1555,6 +1556,11 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs
${mockito2.version}
test
+
+ org.xerial.snappy
+ snappy-java
+ ${snappy-java.version}
+