Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion hadoop-hdds/interface-client/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
</dependency>
<dependency>
<groupId>org.apache.hadoop.thirdparty</groupId>
<artifactId>hadoop-shaded-protobuf_3_7</artifactId>
<artifactId>hadoop-shaded-protobuf_3_25</artifactId>
</dependency>
<dependency>
<groupId>org.apache.ratis</groupId>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,8 @@
* Copied from Hadoop and migrated to AssertJ.
*/
public final class MetricsAsserts {

// workaround for HADOOP-19301.
private static final MutableQuantiles QUANTILES = new MutableQuantiles();
private static final Logger LOG = LoggerFactory.getLogger(MetricsAsserts.class);
private static final Offset<Double> EPSILON = Offset.offset(0.00001);
private static final Offset<Float> EPSILON_FLOAT = Offset.offset(0.00001f);
Expand Down Expand Up @@ -411,7 +412,7 @@ public static void assertQuantileGauges(String prefix,
public static void assertQuantileGauges(String prefix,
MetricsRecordBuilder rb, String valueName) {
verify(rb).addGauge(eqName(info(prefix + "NumOps", "")), geq(0L));
for (Quantile q : MutableQuantiles.quantiles) {
for (Quantile q : QUANTILES.getQuantiles()) {
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

String nameTemplate = prefix + "%dthPercentile" + valueName;
int percentile = (int) (100 * q.quantile);
verify(rb).addGauge(
Expand All @@ -432,7 +433,7 @@ public static void assertQuantileGauges(String prefix,
public static void assertInverseQuantileGauges(String prefix,
MetricsRecordBuilder rb, String valueName) {
verify(rb).addGauge(eqName(info(prefix + "NumOps", "")), geq(0L));
for (Quantile q : MutableQuantiles.quantiles) {
for (Quantile q : QUANTILES.getQuantiles()) {
String nameTemplate = prefix + "%dthInversePercentile" + valueName;
int percentile = (int) (100 * q.quantile);
verify(rb).addGauge(
Expand Down
5 changes: 4 additions & 1 deletion hadoop-ozone/dist/src/main/compose/common/s3a-test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -93,10 +93,13 @@ execute_s3a_tests() {
EOF

# Some tests are skipped due to known issues.
# - ITestS3AContractBulkDelete: HDDS-11661
# - ITestS3AContractCreate: HDDS-11663
# - ITestS3AContractDistCp: HDDS-10616
# - ITestS3AContractMkdirWithCreatePerf: HDDS-11662
# - ITestS3AContractRename: HDDS-10665
mvn -B -V --fail-never --no-transfer-progress \
-Dtest='ITestS3AContract*, ITestS3ACommitterMRJob, !ITestS3AContractDistCp, !ITestS3AContractRename' \
-Dtest='ITestS3AContract*, ITestS3ACommitterMRJob, !ITestS3AContractBulkDelete, !ITestS3AContractCreate#testOverwrite*EmptyDirectory[*], !ITestS3AContractDistCp, !ITestS3AContractMkdirWithCreatePerf, !ITestS3AContractRename' \
clean test

local target="${RESULT_DIR}/junit/${bucket}/target"
Expand Down
4 changes: 3 additions & 1 deletion hadoop-ozone/dist/src/main/license/jar-report.txt
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ share/ozone/lib/hadoop-common.jar
share/ozone/lib/hadoop-hdfs-client.jar
share/ozone/lib/hadoop-hdfs.jar
share/ozone/lib/hadoop-shaded-guava.jar
share/ozone/lib/hadoop-shaded-protobuf_3_7.jar
share/ozone/lib/hadoop-shaded-protobuf_3_25.jar
share/ozone/lib/hdds-annotation-processing.jar
share/ozone/lib/hdds-client.jar
share/ozone/lib/hdds-common.jar
Expand Down Expand Up @@ -134,6 +134,7 @@ share/ozone/lib/jersey-hk2.jar
share/ozone/lib/jersey-media-jaxb.jar
share/ozone/lib/jersey-media-json-jackson.jar
share/ozone/lib/jersey-server.jar
share/ozone/lib/jettison.jar
share/ozone/lib/jetty-client.jar
share/ozone/lib/jetty-http.jar
share/ozone/lib/jetty-io.jar
Expand Down Expand Up @@ -202,6 +203,7 @@ share/ozone/lib/netty-tcnative-classes.Final.jar
share/ozone/lib/netty-transport.Final.jar
share/ozone/lib/netty-transport-classes-epoll.Final.jar
share/ozone/lib/netty-transport-native-epoll.Final-linux-x86_64.jar
share/ozone/lib/netty-transport-native-epoll.Final.jar
share/ozone/lib/netty-transport-native-unix-common.Final.jar
share/ozone/lib/nimbus-jose-jwt.jar
share/ozone/lib/okhttp.jar
Expand Down
9 changes: 1 addition & 8 deletions hadoop-ozone/dist/src/shell/conf/log4j.properties
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ hadoop.log.dir=.
hadoop.log.file=hadoop.log

# Define the root logger to the system property "hadoop.root.logger".
log4j.rootLogger=${hadoop.root.logger}, EventCounter
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

log4j.rootLogger=${hadoop.root.logger}

# Logging Threshold
log4j.threshold=ALL
Expand Down Expand Up @@ -129,13 +129,6 @@ log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd
log4j.logger.com.amazonaws.http.AmazonHttpClient=ERROR
#log4j.logger.org.apache.hadoop.fs.s3a.S3AFileSystem=WARN

#
# Event Counter Appender
# Sends counts of logging messages at different severity levels to Hadoop Metrics.
#
log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter


# Log levels of third-party libraries
log4j.logger.org.apache.commons.beanutils=WARN

Expand Down
5 changes: 5 additions & 0 deletions hadoop-ozone/integration-test/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -263,6 +263,11 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
<groupId>org.slf4j</groupId>
<artifactId>jul-to-slf4j</artifactId>
</dependency>
<dependency>
<groupId>org.assertj</groupId>
<artifactId>assertj-core</artifactId>
<version>${assertj.version}</version>
</dependency>
</dependencies>

<build>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -153,6 +153,14 @@ static void runContainerStateMachineMetrics(
assertCounter("NumContainerNotOpenVerifyFailures", 0L, metric);
assertCounter("WriteChunkMsNumOps", 1L, metric);

applyTransactionLatency = getDoubleGauge(
Copy link
Contributor Author

@jojochuang jojochuang Nov 8, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

"ApplyTransactionNsAvgTime", metric);
assertThat(applyTransactionLatency).isGreaterThan(0.0);
writeStateMachineLatency = getDoubleGauge(
"WriteStateMachineDataNsAvgTime", metric);
assertThat(writeStateMachineLatency).isGreaterThan(0.0);


//Read Chunk
ContainerProtos.ContainerCommandRequestProto readChunkRequest =
ContainerTestHelper.getReadChunkRequest(pipeline, writeChunkRequest
Expand All @@ -165,12 +173,6 @@ static void runContainerStateMachineMetrics(
RaftGroupId.valueOf(pipeline.getId().getId()));
assertCounter("NumQueryStateMachineOps", 1L, metric);
assertCounter("NumApplyTransactionOps", 1L, metric);
applyTransactionLatency = getDoubleGauge(
"ApplyTransactionNsAvgTime", metric);
assertThat(applyTransactionLatency).isGreaterThan(0.0);
writeStateMachineLatency = getDoubleGauge(
"WriteStateMachineDataNsAvgTime", metric);
assertThat(writeStateMachineLatency).isGreaterThan(0.0);

} finally {
if (client != null) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,9 +29,11 @@
import static org.assertj.core.api.Assertions.assertThat;

import java.io.IOException;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.stream.Collectors;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
Expand All @@ -52,9 +54,10 @@
import org.apache.hadoop.tools.DistCpOptions;
import org.apache.hadoop.tools.SimpleCopyListing;
import org.apache.hadoop.tools.mapred.CopyMapper;
import org.apache.hadoop.tools.util.DistCpTestUtils;
import org.apache.hadoop.util.ToolRunner;
import org.apache.hadoop.util.functional.RemoteIterators;

import org.assertj.core.api.Assertions;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
Expand Down Expand Up @@ -534,8 +537,7 @@ public void testLargeFilesFromRemote() throws Exception {
public void testSetJobId() throws Exception {
describe("check jobId is set in the conf");
remoteFS.create(new Path(remoteDir, "file1")).close();
DistCpTestUtils
.assertRunDistCp(DistCpConstants.SUCCESS, remoteDir.toString(),
assertRunDistCp(DistCpConstants.SUCCESS, remoteDir.toString(),
localDir.toString(), getDefaultCLIOptionsOrNull(), conf);
assertThat(conf.get(CONF_LABEL_DISTCP_JOB_ID))
.withFailMessage("DistCp job id isn't set")
Expand Down Expand Up @@ -719,7 +721,7 @@ public void testDistCpWithIterator() throws Exception {
GenericTestUtils.LogCapturer.captureLogs(SimpleCopyListing.LOG);

String options = "-useiterator -update -delete" + getDefaultCLIOptions();
DistCpTestUtils.assertRunDistCp(DistCpConstants.SUCCESS, source.toString(),
assertRunDistCp(DistCpConstants.SUCCESS, source.toString(),
dest.toString(), options, conf);

// Check the target listing was also done using iterator.
Expand Down Expand Up @@ -864,7 +866,7 @@ public void testDistCpWithFile() throws Exception {
verifyPathExists(remoteFS, "", source);
verifyPathExists(localFS, "", localDir);

DistCpTestUtils.assertRunDistCp(DistCpConstants.SUCCESS, source.toString(),
assertRunDistCp(DistCpConstants.SUCCESS, source.toString(),
dest.toString(), getDefaultCLIOptionsOrNull(), conf);

assertThat(RemoteIterators.toList(localFS.listFiles(dest, true)))
Expand All @@ -889,7 +891,7 @@ public void testDistCpWithUpdateExistFile() throws Exception {

verifyPathExists(remoteFS, "", source);
verifyPathExists(localFS, "", dest);
DistCpTestUtils.assertRunDistCp(DistCpConstants.SUCCESS, source.toString(),
assertRunDistCp(DistCpConstants.SUCCESS, source.toString(),
dest.toString(), "-delete -update" + getDefaultCLIOptions(), conf);

assertThat(RemoteIterators.toList(localFS.listFiles(dest, true)))
Expand Down Expand Up @@ -1015,4 +1017,37 @@ private void verifySkipAndCopyCounter(Job job,
.withFailMessage("Mismatch in SKIP counter value")
.isEqualTo(skipExpectedValue);
}

/**
* Runs distcp from src to dst, preserving XAttrs. Asserts the
* expected exit code.
*
* @param exitCode expected exit code
* @param src distcp src path
* @param dst distcp destination
* @param options distcp command line options
* @param conf Configuration to use
* @throws Exception if there is any error
*/
public static void assertRunDistCp(int exitCode, String src, String dst,
String options, Configuration conf)
throws Exception {
assertRunDistCp(exitCode, src, dst,
options == null ? new String[0] : options.trim().split(" "), conf);
}

private static void assertRunDistCp(int exitCode, String src, String dst,
String[] options, Configuration conf)
throws Exception {
DistCp distCp = new DistCp(conf, null);
String[] optsArr = new String[options.length + 2];
System.arraycopy(options, 0, optsArr, 0, options.length);
optsArr[optsArr.length - 2] = src;
optsArr[optsArr.length - 1] = dst;

Assertions.assertThat(ToolRunner.run(conf, distCp, optsArr))
.describedAs("Exit code of distcp %s",
Arrays.stream(optsArr).collect(Collectors.joining(" ")))
.isEqualTo(exitCode);
}
}
2 changes: 1 addition & 1 deletion hadoop-ozone/interface-client/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">

<dependency>
<groupId>org.apache.hadoop.thirdparty</groupId>
<artifactId>hadoop-shaded-protobuf_3_7</artifactId>
<artifactId>hadoop-shaded-protobuf_3_25</artifactId>
</dependency>

<dependency>
Expand Down
6 changes: 6 additions & 0 deletions hadoop-ozone/ozonefs-shaded/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,12 @@
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>com.google.protobuf</groupId>
<artifactId>protobuf-java</artifactId>
<version>2.5.0</version>
<scope>compile</scope>
Comment on lines +84 to +85
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Version and scope are inherited / defaulted.

Suggested change
<version>2.5.0</version>
<scope>compile</scope>
  • Previously we could assume protobuf 2.5.0 is available in Hadoop environment.
  • Then we had to introduce ozone-filesystem-hadoop3-client for environments where Hadoop's protobuf is relocated to org.apache.hadoop.shaded.com.google.protobuf, but still did not include Protobuf in the fat jar.
  • Now Hadoop 3.4 no longer has either of these, so we need to include Protobuf in the fat jars.

With that, I don't think we still need ozone-filesystem-hadoop3-client, do we?

Also, can/should we relocate it to org.apache.hadoop.ozone.shaded.com.google.protobuf?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

agreed.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

we should also use this opportunity to drop protobuf 2.5 from Ozone. Will. do that in a separate jira.

</dependency>
</dependencies>
<build>
<plugins>
Expand Down
10 changes: 5 additions & 5 deletions pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs
<project.build.outputTimestamp>2023-01-01T00:00:00Z</project.build.outputTimestamp>

<hadoop2.version>2.10.2</hadoop2.version>
<hadoop.version>3.3.6</hadoop.version>
<hadoop.version>3.4.1</hadoop.version>

<!-- version for hdds/ozone components -->
<hdds.version>${ozone.version}</hdds.version>
Expand Down Expand Up @@ -198,11 +198,11 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs
<!-- ProtocolBuffer version, used to verify the protoc version and -->
<!-- define the protobuf JAR version -->
<proto2.hadooprpc.protobuf.version>2.5.0</proto2.hadooprpc.protobuf.version>
<proto3.hadooprpc.protobuf.version>3.7.1</proto3.hadooprpc.protobuf.version>
<hadoop-thirdparty.version>1.1.1</hadoop-thirdparty.version>
<proto3.hadooprpc.protobuf.version>3.23.4</proto3.hadooprpc.protobuf.version>
<hadoop-thirdparty.version>1.3.0</hadoop-thirdparty.version>

<spotbugs.version>3.1.12.2</spotbugs.version>
<dnsjava.version>2.1.9</dnsjava.version>
<dnsjava.version>3.6.1</dnsjava.version>
<okhttp3.version>4.12.0</okhttp3.version>
<stax2.version>4.2.2</stax2.version>
<jakarta.inject.version>2.6.1</jakarta.inject.version>
Expand Down Expand Up @@ -1034,7 +1034,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs
</dependency>
<dependency>
<groupId>org.apache.hadoop.thirdparty</groupId>
<artifactId>hadoop-shaded-protobuf_3_7</artifactId>
<artifactId>hadoop-shaded-protobuf_3_25</artifactId>
<version>${hadoop-thirdparty.version}</version>
</dependency>
<dependency>
Expand Down