diff --git a/hadoop-hdds/interface-client/pom.xml b/hadoop-hdds/interface-client/pom.xml
index c23f631cf4d1..1a61dfa930e8 100644
--- a/hadoop-hdds/interface-client/pom.xml
+++ b/hadoop-hdds/interface-client/pom.xml
@@ -41,7 +41,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
org.apache.hadoop.thirdparty
- hadoop-shaded-protobuf_3_7
+ hadoop-shaded-protobuf_3_25
org.apache.ratis
diff --git a/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/MetricsAsserts.java b/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/MetricsAsserts.java
index f2c65d149613..f4651a408f70 100644
--- a/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/MetricsAsserts.java
+++ b/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/MetricsAsserts.java
@@ -51,7 +51,8 @@
* Copied from Hadoop and migrated to AssertJ.
*/
public final class MetricsAsserts {
-
+ // workaround for HADOOP-19301.
+ private static final MutableQuantiles QUANTILES = new MutableQuantiles();
private static final Logger LOG = LoggerFactory.getLogger(MetricsAsserts.class);
private static final Offset EPSILON = Offset.offset(0.00001);
private static final Offset EPSILON_FLOAT = Offset.offset(0.00001f);
@@ -411,7 +412,7 @@ public static void assertQuantileGauges(String prefix,
public static void assertQuantileGauges(String prefix,
MetricsRecordBuilder rb, String valueName) {
verify(rb).addGauge(eqName(info(prefix + "NumOps", "")), geq(0L));
- for (Quantile q : MutableQuantiles.quantiles) {
+ for (Quantile q : QUANTILES.getQuantiles()) {
String nameTemplate = prefix + "%dthPercentile" + valueName;
int percentile = (int) (100 * q.quantile);
verify(rb).addGauge(
@@ -432,7 +433,7 @@ public static void assertQuantileGauges(String prefix,
public static void assertInverseQuantileGauges(String prefix,
MetricsRecordBuilder rb, String valueName) {
verify(rb).addGauge(eqName(info(prefix + "NumOps", "")), geq(0L));
- for (Quantile q : MutableQuantiles.quantiles) {
+ for (Quantile q : QUANTILES.getQuantiles()) {
String nameTemplate = prefix + "%dthInversePercentile" + valueName;
int percentile = (int) (100 * q.quantile);
verify(rb).addGauge(
diff --git a/hadoop-ozone/dist/src/main/compose/common/s3a-test.sh b/hadoop-ozone/dist/src/main/compose/common/s3a-test.sh
index e8032068465a..cfdab10c9035 100644
--- a/hadoop-ozone/dist/src/main/compose/common/s3a-test.sh
+++ b/hadoop-ozone/dist/src/main/compose/common/s3a-test.sh
@@ -93,10 +93,13 @@ execute_s3a_tests() {
EOF
# Some tests are skipped due to known issues.
+ # - ITestS3AContractBulkDelete: HDDS-11661
+ # - ITestS3AContractCreate: HDDS-11663
# - ITestS3AContractDistCp: HDDS-10616
+ # - ITestS3AContractMkdirWithCreatePerf: HDDS-11662
# - ITestS3AContractRename: HDDS-10665
mvn -B -V --fail-never --no-transfer-progress \
- -Dtest='ITestS3AContract*, ITestS3ACommitterMRJob, !ITestS3AContractDistCp, !ITestS3AContractRename' \
+ -Dtest='ITestS3AContract*, ITestS3ACommitterMRJob, !ITestS3AContractBulkDelete, !ITestS3AContractCreate#testOverwrite*EmptyDirectory[*], !ITestS3AContractDistCp, !ITestS3AContractMkdirWithCreatePerf, !ITestS3AContractRename' \
clean test
local target="${RESULT_DIR}/junit/${bucket}/target"
diff --git a/hadoop-ozone/dist/src/main/license/jar-report.txt b/hadoop-ozone/dist/src/main/license/jar-report.txt
index 1e07ec1a2c23..e6d9a1d2a2e0 100644
--- a/hadoop-ozone/dist/src/main/license/jar-report.txt
+++ b/hadoop-ozone/dist/src/main/license/jar-report.txt
@@ -63,7 +63,7 @@ share/ozone/lib/hadoop-common.jar
share/ozone/lib/hadoop-hdfs-client.jar
share/ozone/lib/hadoop-hdfs.jar
share/ozone/lib/hadoop-shaded-guava.jar
-share/ozone/lib/hadoop-shaded-protobuf_3_7.jar
+share/ozone/lib/hadoop-shaded-protobuf_3_25.jar
share/ozone/lib/hdds-annotation-processing.jar
share/ozone/lib/hdds-client.jar
share/ozone/lib/hdds-common.jar
@@ -134,6 +134,7 @@ share/ozone/lib/jersey-hk2.jar
share/ozone/lib/jersey-media-jaxb.jar
share/ozone/lib/jersey-media-json-jackson.jar
share/ozone/lib/jersey-server.jar
+share/ozone/lib/jettison.jar
share/ozone/lib/jetty-client.jar
share/ozone/lib/jetty-http.jar
share/ozone/lib/jetty-io.jar
@@ -202,6 +203,7 @@ share/ozone/lib/netty-tcnative-classes.Final.jar
share/ozone/lib/netty-transport.Final.jar
share/ozone/lib/netty-transport-classes-epoll.Final.jar
share/ozone/lib/netty-transport-native-epoll.Final-linux-x86_64.jar
+share/ozone/lib/netty-transport-native-epoll.Final.jar
share/ozone/lib/netty-transport-native-unix-common.Final.jar
share/ozone/lib/nimbus-jose-jwt.jar
share/ozone/lib/okhttp.jar
diff --git a/hadoop-ozone/dist/src/shell/conf/log4j.properties b/hadoop-ozone/dist/src/shell/conf/log4j.properties
index 96e90ab54174..aa3d0b4bf43a 100644
--- a/hadoop-ozone/dist/src/shell/conf/log4j.properties
+++ b/hadoop-ozone/dist/src/shell/conf/log4j.properties
@@ -20,7 +20,7 @@ hadoop.log.dir=.
hadoop.log.file=hadoop.log
# Define the root logger to the system property "hadoop.root.logger".
-log4j.rootLogger=${hadoop.root.logger}, EventCounter
+log4j.rootLogger=${hadoop.root.logger}
# Logging Threshold
log4j.threshold=ALL
@@ -129,13 +129,6 @@ log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd
log4j.logger.com.amazonaws.http.AmazonHttpClient=ERROR
#log4j.logger.org.apache.hadoop.fs.s3a.S3AFileSystem=WARN
-#
-# Event Counter Appender
-# Sends counts of logging messages at different severity levels to Hadoop Metrics.
-#
-log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
-
-
# Log levels of third-party libraries
log4j.logger.org.apache.commons.beanutils=WARN
diff --git a/hadoop-ozone/integration-test/pom.xml b/hadoop-ozone/integration-test/pom.xml
index 45a3827a06b9..f4a2f7131850 100644
--- a/hadoop-ozone/integration-test/pom.xml
+++ b/hadoop-ozone/integration-test/pom.xml
@@ -263,6 +263,11 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
org.slf4j
jul-to-slf4j
+
+ org.assertj
+ assertj-core
+ ${assertj.version}
+
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java
index 95aeb6490246..40df94858e64 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java
@@ -153,6 +153,14 @@ static void runContainerStateMachineMetrics(
assertCounter("NumContainerNotOpenVerifyFailures", 0L, metric);
assertCounter("WriteChunkMsNumOps", 1L, metric);
+ applyTransactionLatency = getDoubleGauge(
+ "ApplyTransactionNsAvgTime", metric);
+ assertThat(applyTransactionLatency).isGreaterThan(0.0);
+ writeStateMachineLatency = getDoubleGauge(
+ "WriteStateMachineDataNsAvgTime", metric);
+ assertThat(writeStateMachineLatency).isGreaterThan(0.0);
+
+
//Read Chunk
ContainerProtos.ContainerCommandRequestProto readChunkRequest =
ContainerTestHelper.getReadChunkRequest(pipeline, writeChunkRequest
@@ -165,12 +173,6 @@ static void runContainerStateMachineMetrics(
RaftGroupId.valueOf(pipeline.getId().getId()));
assertCounter("NumQueryStateMachineOps", 1L, metric);
assertCounter("NumApplyTransactionOps", 1L, metric);
- applyTransactionLatency = getDoubleGauge(
- "ApplyTransactionNsAvgTime", metric);
- assertThat(applyTransactionLatency).isGreaterThan(0.0);
- writeStateMachineLatency = getDoubleGauge(
- "WriteStateMachineDataNsAvgTime", metric);
- assertThat(writeStateMachineLatency).isGreaterThan(0.0);
} finally {
if (client != null) {
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java
index 71f1b682d0f4..861127916c28 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java
@@ -29,9 +29,11 @@
import static org.assertj.core.api.Assertions.assertThat;
import java.io.IOException;
+import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
+import java.util.stream.Collectors;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
@@ -52,9 +54,10 @@
import org.apache.hadoop.tools.DistCpOptions;
import org.apache.hadoop.tools.SimpleCopyListing;
import org.apache.hadoop.tools.mapred.CopyMapper;
-import org.apache.hadoop.tools.util.DistCpTestUtils;
+import org.apache.hadoop.util.ToolRunner;
import org.apache.hadoop.util.functional.RemoteIterators;
+import org.assertj.core.api.Assertions;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
@@ -534,8 +537,7 @@ public void testLargeFilesFromRemote() throws Exception {
public void testSetJobId() throws Exception {
describe("check jobId is set in the conf");
remoteFS.create(new Path(remoteDir, "file1")).close();
- DistCpTestUtils
- .assertRunDistCp(DistCpConstants.SUCCESS, remoteDir.toString(),
+ assertRunDistCp(DistCpConstants.SUCCESS, remoteDir.toString(),
localDir.toString(), getDefaultCLIOptionsOrNull(), conf);
assertThat(conf.get(CONF_LABEL_DISTCP_JOB_ID))
.withFailMessage("DistCp job id isn't set")
@@ -719,7 +721,7 @@ public void testDistCpWithIterator() throws Exception {
GenericTestUtils.LogCapturer.captureLogs(SimpleCopyListing.LOG);
String options = "-useiterator -update -delete" + getDefaultCLIOptions();
- DistCpTestUtils.assertRunDistCp(DistCpConstants.SUCCESS, source.toString(),
+ assertRunDistCp(DistCpConstants.SUCCESS, source.toString(),
dest.toString(), options, conf);
// Check the target listing was also done using iterator.
@@ -864,7 +866,7 @@ public void testDistCpWithFile() throws Exception {
verifyPathExists(remoteFS, "", source);
verifyPathExists(localFS, "", localDir);
- DistCpTestUtils.assertRunDistCp(DistCpConstants.SUCCESS, source.toString(),
+ assertRunDistCp(DistCpConstants.SUCCESS, source.toString(),
dest.toString(), getDefaultCLIOptionsOrNull(), conf);
assertThat(RemoteIterators.toList(localFS.listFiles(dest, true)))
@@ -889,7 +891,7 @@ public void testDistCpWithUpdateExistFile() throws Exception {
verifyPathExists(remoteFS, "", source);
verifyPathExists(localFS, "", dest);
- DistCpTestUtils.assertRunDistCp(DistCpConstants.SUCCESS, source.toString(),
+ assertRunDistCp(DistCpConstants.SUCCESS, source.toString(),
dest.toString(), "-delete -update" + getDefaultCLIOptions(), conf);
assertThat(RemoteIterators.toList(localFS.listFiles(dest, true)))
@@ -1015,4 +1017,37 @@ private void verifySkipAndCopyCounter(Job job,
.withFailMessage("Mismatch in SKIP counter value")
.isEqualTo(skipExpectedValue);
}
+
+ /**
+ * Runs distcp from src to dst, preserving XAttrs. Asserts the
+ * expected exit code.
+ *
+ * @param exitCode expected exit code
+ * @param src distcp src path
+ * @param dst distcp destination
+ * @param options distcp command line options
+ * @param conf Configuration to use
+ * @throws Exception if there is any error
+ */
+ public static void assertRunDistCp(int exitCode, String src, String dst,
+ String options, Configuration conf)
+ throws Exception {
+ assertRunDistCp(exitCode, src, dst,
+ options == null ? new String[0] : options.trim().split(" "), conf);
+ }
+
+ private static void assertRunDistCp(int exitCode, String src, String dst,
+ String[] options, Configuration conf)
+ throws Exception {
+ DistCp distCp = new DistCp(conf, null);
+ String[] optsArr = new String[options.length + 2];
+ System.arraycopy(options, 0, optsArr, 0, options.length);
+ optsArr[optsArr.length - 2] = src;
+ optsArr[optsArr.length - 1] = dst;
+
+ Assertions.assertThat(ToolRunner.run(conf, distCp, optsArr))
+ .describedAs("Exit code of distcp %s",
+ Arrays.stream(optsArr).collect(Collectors.joining(" ")))
+ .isEqualTo(exitCode);
+ }
}
diff --git a/hadoop-ozone/interface-client/pom.xml b/hadoop-ozone/interface-client/pom.xml
index 7e9383553cce..2e68deeeb3be 100644
--- a/hadoop-ozone/interface-client/pom.xml
+++ b/hadoop-ozone/interface-client/pom.xml
@@ -49,7 +49,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
org.apache.hadoop.thirdparty
- hadoop-shaded-protobuf_3_7
+ hadoop-shaded-protobuf_3_25
diff --git a/hadoop-ozone/ozonefs-shaded/pom.xml b/hadoop-ozone/ozonefs-shaded/pom.xml
index 4146eaaa4d51..9e77ffd7c331 100644
--- a/hadoop-ozone/ozonefs-shaded/pom.xml
+++ b/hadoop-ozone/ozonefs-shaded/pom.xml
@@ -78,6 +78,12 @@
+
+ com.google.protobuf
+ protobuf-java
+ 2.5.0
+ compile
+
diff --git a/pom.xml b/pom.xml
index 89a12e7083ac..2e117991ed5f 100644
--- a/pom.xml
+++ b/pom.xml
@@ -67,7 +67,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs
2023-01-01T00:00:00Z
2.10.2
- 3.3.6
+ 3.4.1
${ozone.version}
@@ -198,11 +198,11 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs
2.5.0
- 3.7.1
- 1.1.1
+ 3.23.4
+ 1.3.0
3.1.12.2
- 2.1.9
+ 3.6.1
4.12.0
4.2.2
2.6.1
@@ -1034,7 +1034,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs
org.apache.hadoop.thirdparty
- hadoop-shaded-protobuf_3_7
+ hadoop-shaded-protobuf_3_25
${hadoop-thirdparty.version}