Skip to content
Merged
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion hadoop-hdds/interface-client/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
</dependency>
<dependency>
<groupId>org.apache.hadoop.thirdparty</groupId>
<artifactId>hadoop-shaded-protobuf_3_7</artifactId>
<artifactId>hadoop-shaded-protobuf_3_25</artifactId>
</dependency>
<dependency>
<groupId>org.apache.ratis</groupId>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,8 @@
* Copied from Hadoop and migrated to AssertJ.
*/
public final class MetricsAsserts {

// workaround for HADOOP-19301.
private static final MutableQuantiles QUANTILES = new MutableQuantiles();
private static final Logger LOG = LoggerFactory.getLogger(MetricsAsserts.class);
private static final Offset<Double> EPSILON = Offset.offset(0.00001);
private static final Offset<Float> EPSILON_FLOAT = Offset.offset(0.00001f);
Expand Down Expand Up @@ -411,7 +412,7 @@ public static void assertQuantileGauges(String prefix,
public static void assertQuantileGauges(String prefix,
MetricsRecordBuilder rb, String valueName) {
verify(rb).addGauge(eqName(info(prefix + "NumOps", "")), geq(0L));
for (Quantile q : MutableQuantiles.quantiles) {
for (Quantile q : QUANTILES.getQuantiles()) {
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

String nameTemplate = prefix + "%dthPercentile" + valueName;
int percentile = (int) (100 * q.quantile);
verify(rb).addGauge(
Expand All @@ -432,7 +433,7 @@ public static void assertQuantileGauges(String prefix,
public static void assertInverseQuantileGauges(String prefix,
MetricsRecordBuilder rb, String valueName) {
verify(rb).addGauge(eqName(info(prefix + "NumOps", "")), geq(0L));
for (Quantile q : MutableQuantiles.quantiles) {
for (Quantile q : QUANTILES.getQuantiles()) {
String nameTemplate = prefix + "%dthInversePercentile" + valueName;
int percentile = (int) (100 * q.quantile);
verify(rb).addGauge(
Expand Down
4 changes: 0 additions & 4 deletions hadoop-ozone/dist/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -261,10 +261,6 @@
<groupId>org.apache.ozone</groupId>
<artifactId>ozone-filesystem-hadoop3</artifactId>
</dependency>
<dependency>
<groupId>org.apache.ozone</groupId>
<artifactId>ozone-filesystem-hadoop3-client</artifactId>
</dependency>
</dependencies>
</profile>
<profile>
Expand Down
10 changes: 3 additions & 7 deletions hadoop-ozone/dist/src/main/compose/common/hadoop-test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -42,14 +42,10 @@ export OZONE_DIR=/opt/ozone
# shellcheck source=/dev/null
source "$COMPOSE_DIR/../testlib.sh"

for HADOOP_VERSION in ${hadoop2.version} 3.1.2 ${hadoop.version}; do
export HADOOP_VERSION
for test_version in $HADOOP_TEST_VERSIONS; do
export HADOOP_IMAGE="${test_version%%:*}"
export HADOOP_VERSION="${test_version##*:}"
export HADOOP_MAJOR_VERSION=${HADOOP_VERSION%%.*}
if [[ "${HADOOP_VERSION}" == "${hadoop2.version}" ]] || [[ "${HADOOP_VERSION}" == "${hadoop.version}" ]]; then
export HADOOP_IMAGE=apache/hadoop
else
export HADOOP_IMAGE=flokkr/hadoop
fi

docker-compose --ansi never --profile hadoop up -d nm rm

Expand Down
5 changes: 4 additions & 1 deletion hadoop-ozone/dist/src/main/compose/common/s3a-test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -93,10 +93,13 @@ execute_s3a_tests() {
EOF

# Some tests are skipped due to known issues.
# - ITestS3AContractBulkDelete: HDDS-11661
# - ITestS3AContractCreate: HDDS-11663
# - ITestS3AContractDistCp: HDDS-10616
# - ITestS3AContractMkdirWithCreatePerf: HDDS-11662
# - ITestS3AContractRename: HDDS-10665
mvn -B -V --fail-never --no-transfer-progress \
-Dtest='ITestS3AContract*, ITestS3ACommitterMRJob, !ITestS3AContractDistCp, !ITestS3AContractRename' \
-Dtest='ITestS3AContract*, ITestS3ACommitterMRJob, !ITestS3AContractBulkDelete, !ITestS3AContractCreate#testOverwrite*EmptyDirectory[*], !ITestS3AContractDistCp, !ITestS3AContractMkdirWithCreatePerf, !ITestS3AContractRename' \
clean test

local target="${RESULT_DIR}/junit/${bucket}/target"
Expand Down
5 changes: 3 additions & 2 deletions hadoop-ozone/dist/src/main/license/jar-report.txt
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ share/ozone/lib/hadoop-common.jar
share/ozone/lib/hadoop-hdfs-client.jar
share/ozone/lib/hadoop-hdfs.jar
share/ozone/lib/hadoop-shaded-guava.jar
share/ozone/lib/hadoop-shaded-protobuf_3_7.jar
share/ozone/lib/hadoop-shaded-protobuf_3_25.jar
share/ozone/lib/hdds-annotation-processing.jar
share/ozone/lib/hdds-client.jar
share/ozone/lib/hdds-common.jar
Expand Down Expand Up @@ -134,6 +134,7 @@ share/ozone/lib/jersey-hk2.jar
share/ozone/lib/jersey-media-jaxb.jar
share/ozone/lib/jersey-media-json-jackson.jar
share/ozone/lib/jersey-server.jar
share/ozone/lib/jettison.jar
share/ozone/lib/jetty-client.jar
share/ozone/lib/jetty-http.jar
share/ozone/lib/jetty-io.jar
Expand Down Expand Up @@ -202,6 +203,7 @@ share/ozone/lib/netty-tcnative-classes.Final.jar
share/ozone/lib/netty-transport.Final.jar
share/ozone/lib/netty-transport-classes-epoll.Final.jar
share/ozone/lib/netty-transport-native-epoll.Final-linux-x86_64.jar
share/ozone/lib/netty-transport-native-epoll.Final.jar
share/ozone/lib/netty-transport-native-unix-common.Final.jar
share/ozone/lib/nimbus-jose-jwt.jar
share/ozone/lib/okhttp.jar
Expand All @@ -222,7 +224,6 @@ share/ozone/lib/ozone-filesystem-hadoop2.jar
share/ozone/lib/ozone-filesystem-hadoop3.jar
share/ozone/lib/ozone-filesystem.jar
share/ozone/lib/ozone-httpfsgateway.jar
share/ozone/lib/ozone-filesystem-hadoop3-client.jar
share/ozone/lib/ozone-insight.jar
share/ozone/lib/ozone-interface-client.jar
share/ozone/lib/ozone-interface-storage.jar
Expand Down
9 changes: 1 addition & 8 deletions hadoop-ozone/dist/src/shell/conf/log4j.properties
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ hadoop.log.dir=.
hadoop.log.file=hadoop.log

# Define the root logger to the system property "hadoop.root.logger".
log4j.rootLogger=${hadoop.root.logger}, EventCounter
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

log4j.rootLogger=${hadoop.root.logger}

# Logging Threshold
log4j.threshold=ALL
Expand Down Expand Up @@ -129,13 +129,6 @@ log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd
log4j.logger.com.amazonaws.http.AmazonHttpClient=ERROR
#log4j.logger.org.apache.hadoop.fs.s3a.S3AFileSystem=WARN

#
# Event Counter Appender
# Sends counts of logging messages at different severity levels to Hadoop Metrics.
#
log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter


# Log levels of third-party libraries
log4j.logger.org.apache.commons.beanutils=WARN

Expand Down
5 changes: 5 additions & 0 deletions hadoop-ozone/integration-test/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -251,6 +251,11 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
<groupId>org.slf4j</groupId>
<artifactId>jul-to-slf4j</artifactId>
</dependency>
<dependency>
<groupId>org.assertj</groupId>
<artifactId>assertj-core</artifactId>
<version>${assertj.version}</version>
</dependency>
</dependencies>

<build>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -154,6 +154,14 @@ static void runContainerStateMachineMetrics(
assertCounter("NumContainerNotOpenVerifyFailures", 0L, metric);
assertCounter("WriteChunkMsNumOps", 1L, metric);

applyTransactionLatency = getDoubleGauge(
Copy link
Contributor Author

@jojochuang jojochuang Nov 8, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

"ApplyTransactionNsAvgTime", metric);
assertThat(applyTransactionLatency).isGreaterThan(0.0);
writeStateMachineLatency = getDoubleGauge(
"WriteStateMachineDataNsAvgTime", metric);
assertThat(writeStateMachineLatency).isGreaterThan(0.0);


//Read Chunk
ContainerProtos.ContainerCommandRequestProto readChunkRequest =
ContainerTestHelper.getReadChunkRequest(pipeline, writeChunkRequest
Expand All @@ -166,12 +174,6 @@ static void runContainerStateMachineMetrics(
RaftGroupId.valueOf(pipeline.getId().getId()));
assertCounter("NumQueryStateMachineOps", 1L, metric);
assertCounter("NumApplyTransactionOps", 1L, metric);
applyTransactionLatency = getDoubleGauge(
"ApplyTransactionNsAvgTime", metric);
assertThat(applyTransactionLatency).isGreaterThan(0.0);
writeStateMachineLatency = getDoubleGauge(
"WriteStateMachineDataNsAvgTime", metric);
assertThat(writeStateMachineLatency).isGreaterThan(0.0);

} finally {
if (client != null) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,9 +29,11 @@
import static org.assertj.core.api.Assertions.assertThat;

import java.io.IOException;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.stream.Collectors;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
Expand All @@ -52,9 +54,10 @@
import org.apache.hadoop.tools.DistCpOptions;
import org.apache.hadoop.tools.SimpleCopyListing;
import org.apache.hadoop.tools.mapred.CopyMapper;
import org.apache.hadoop.tools.util.DistCpTestUtils;
import org.apache.hadoop.util.ToolRunner;
import org.apache.hadoop.util.functional.RemoteIterators;

import org.assertj.core.api.Assertions;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
Expand Down Expand Up @@ -534,8 +537,7 @@ public void testLargeFilesFromRemote() throws Exception {
public void testSetJobId() throws Exception {
describe("check jobId is set in the conf");
remoteFS.create(new Path(remoteDir, "file1")).close();
DistCpTestUtils
.assertRunDistCp(DistCpConstants.SUCCESS, remoteDir.toString(),
assertRunDistCp(DistCpConstants.SUCCESS, remoteDir.toString(),
localDir.toString(), getDefaultCLIOptionsOrNull(), conf);
assertThat(conf.get(CONF_LABEL_DISTCP_JOB_ID))
.withFailMessage("DistCp job id isn't set")
Expand Down Expand Up @@ -719,7 +721,7 @@ public void testDistCpWithIterator() throws Exception {
GenericTestUtils.LogCapturer.captureLogs(SimpleCopyListing.LOG);

String options = "-useiterator -update -delete" + getDefaultCLIOptions();
DistCpTestUtils.assertRunDistCp(DistCpConstants.SUCCESS, source.toString(),
assertRunDistCp(DistCpConstants.SUCCESS, source.toString(),
dest.toString(), options, conf);

// Check the target listing was also done using iterator.
Expand Down Expand Up @@ -864,7 +866,7 @@ public void testDistCpWithFile() throws Exception {
verifyPathExists(remoteFS, "", source);
verifyPathExists(localFS, "", localDir);

DistCpTestUtils.assertRunDistCp(DistCpConstants.SUCCESS, source.toString(),
assertRunDistCp(DistCpConstants.SUCCESS, source.toString(),
dest.toString(), getDefaultCLIOptionsOrNull(), conf);

assertThat(RemoteIterators.toList(localFS.listFiles(dest, true)))
Expand All @@ -889,7 +891,7 @@ public void testDistCpWithUpdateExistFile() throws Exception {

verifyPathExists(remoteFS, "", source);
verifyPathExists(localFS, "", dest);
DistCpTestUtils.assertRunDistCp(DistCpConstants.SUCCESS, source.toString(),
assertRunDistCp(DistCpConstants.SUCCESS, source.toString(),
dest.toString(), "-delete -update" + getDefaultCLIOptions(), conf);

assertThat(RemoteIterators.toList(localFS.listFiles(dest, true)))
Expand Down Expand Up @@ -1015,4 +1017,37 @@ private void verifySkipAndCopyCounter(Job job,
.withFailMessage("Mismatch in SKIP counter value")
.isEqualTo(skipExpectedValue);
}

/**
* Runs distcp from src to dst, preserving XAttrs. Asserts the
* expected exit code.
*
* @param exitCode expected exit code
* @param src distcp src path
* @param dst distcp destination
* @param options distcp command line options
* @param conf Configuration to use
* @throws Exception if there is any error
*/
public static void assertRunDistCp(int exitCode, String src, String dst,
String options, Configuration conf)
throws Exception {
assertRunDistCp(exitCode, src, dst,
options == null ? new String[0] : options.trim().split(" "), conf);
}

private static void assertRunDistCp(int exitCode, String src, String dst,
String[] options, Configuration conf)
throws Exception {
DistCp distCp = new DistCp(conf, null);
String[] optsArr = new String[options.length + 2];
System.arraycopy(options, 0, optsArr, 0, options.length);
optsArr[optsArr.length - 2] = src;
optsArr[optsArr.length - 1] = dst;

Assertions.assertThat(ToolRunner.run(conf, distCp, optsArr))
.describedAs("Exit code of distcp %s",
Arrays.stream(optsArr).collect(Collectors.joining(" ")))
.isEqualTo(exitCode);
}
}
2 changes: 1 addition & 1 deletion hadoop-ozone/interface-client/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">

<dependency>
<groupId>org.apache.hadoop.thirdparty</groupId>
<artifactId>hadoop-shaded-protobuf_3_7</artifactId>
<artifactId>hadoop-shaded-protobuf_3_25</artifactId>
</dependency>

<dependency>
Expand Down
43 changes: 43 additions & 0 deletions hadoop-ozone/ozonefs-hadoop3/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -95,6 +95,49 @@
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-shade-plugin</artifactId>
<executions>
<execution>
<phase>package</phase>
<goals>
<goal>shade</goal>
</goals>
<configuration>
<skip>${maven.shade.skip}</skip>
<transformers>
<transformer
implementation="org.apache.maven.plugins.shade.resource.DontIncludeResourceTransformer">
<resources>
<resource>META-INF/BC1024KE.DSA</resource>
<resource>META-INF/BC2048KE.DSA</resource>
<resource>META-INF/BC1024KE.SF</resource>
<resource>META-INF/BC2048KE.SF</resource>
</resources>
</transformer>
<transformer
implementation="org.apache.maven.plugins.shade.resource.ServicesResourceTransformer"/>
<transformer
implementation="org.apache.maven.plugins.shade.resource.XmlAppendingTransformer">
<resource>ozone-default-generated.xml</resource>
</transformer>
</transformers>
<relocations>
<relocation>
<pattern>com.google.protobuf</pattern>
<shadedPattern>
org.apache.hadoop.shaded.com.google.protobuf
</shadedPattern>
<includes>
<include>com.google.protobuf.*</include>
</includes>
</relocation>
</relocations>
</configuration>
</execution>
</executions>
</plugin>
<plugin>
<groupId>com.github.spotbugs</groupId>
<artifactId>spotbugs-maven-plugin</artifactId>
Expand Down
6 changes: 6 additions & 0 deletions hadoop-ozone/ozonefs-shaded/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,12 @@
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>com.google.protobuf</groupId>
<artifactId>protobuf-java</artifactId>
<version>2.5.0</version>
<scope>compile</scope>
Comment on lines +84 to +85
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Version and scope are inherited / defaulted.

Suggested change
<version>2.5.0</version>
<scope>compile</scope>
  • Previously we could assume protobuf 2.5.0 is available in Hadoop environment.
  • Then we had to introduce ozone-filesystem-hadoop3-client for environments where Hadoop's protobuf is relocated to org.apache.hadoop.shaded.com.google.protobuf, but still did not include Protobuf in the fat jar.
  • Now Hadoop 3.4 no longer has either of these, so we need to include Protobuf in the fat jars.

With that, I don't think we still need ozone-filesystem-hadoop3-client, do we?

Also, can/should we relocate it to org.apache.hadoop.ozone.shaded.com.google.protobuf?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

agreed.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

we should also use this opportunity to drop protobuf 2.5 from Ozone. Will. do that in a separate jira.

</dependency>
</dependencies>
<build>
<plugins>
Expand Down
6 changes: 0 additions & 6 deletions hadoop-ozone/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -128,11 +128,6 @@
<artifactId>ozone-filesystem-hadoop3</artifactId>
<version>${ozone.version}</version>
</dependency>
<dependency>
<groupId>org.apache.ozone</groupId>
<artifactId>ozone-filesystem-hadoop3-client</artifactId>
<version>${ozone.version}</version>
</dependency>
<dependency>
<groupId>org.apache.ozone</groupId>
<artifactId>ozone-filesystem-hadoop2</artifactId>
Expand Down Expand Up @@ -386,7 +381,6 @@
<module>ozonefs-shaded</module>
<module>ozonefs-hadoop2</module>
<module>ozonefs-hadoop3</module>
<module>ozonefs-hadoop3-client</module>
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

revert HDDS-6926.

</modules>
</profile>
<profile>
Expand Down
Loading