-
Notifications
You must be signed in to change notification settings - Fork 588
HDDS-11617. Update hadoop to 3.4.1 #7376
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
8914981
a5f91e7
ec588ee
451929e
e0547e1
0f54314
ea21702
735c1de
57aa7a8
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -20,7 +20,7 @@ hadoop.log.dir=. | |
| hadoop.log.file=hadoop.log | ||
|
|
||
| # Define the root logger to the system property "hadoop.root.logger". | ||
| log4j.rootLogger=${hadoop.root.logger}, EventCounter | ||
|
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. |
||
| log4j.rootLogger=${hadoop.root.logger} | ||
|
|
||
| # Logging Threshold | ||
| log4j.threshold=ALL | ||
|
|
@@ -129,13 +129,6 @@ log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd | |
| log4j.logger.com.amazonaws.http.AmazonHttpClient=ERROR | ||
| #log4j.logger.org.apache.hadoop.fs.s3a.S3AFileSystem=WARN | ||
|
|
||
| # | ||
| # Event Counter Appender | ||
| # Sends counts of logging messages at different severity levels to Hadoop Metrics. | ||
| # | ||
| log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter | ||
|
|
||
|
|
||
| # Log levels of third-party libraries | ||
| log4j.logger.org.apache.commons.beanutils=WARN | ||
|
|
||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -153,6 +153,14 @@ static void runContainerStateMachineMetrics( | |
| assertCounter("NumContainerNotOpenVerifyFailures", 0L, metric); | ||
| assertCounter("WriteChunkMsNumOps", 1L, metric); | ||
|
|
||
| applyTransactionLatency = getDoubleGauge( | ||
|
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. behavior change due to https://issues.apache.org/jira/browse/HADOOP-18502 |
||
| "ApplyTransactionNsAvgTime", metric); | ||
| assertThat(applyTransactionLatency).isGreaterThan(0.0); | ||
| writeStateMachineLatency = getDoubleGauge( | ||
| "WriteStateMachineDataNsAvgTime", metric); | ||
| assertThat(writeStateMachineLatency).isGreaterThan(0.0); | ||
|
|
||
|
|
||
| //Read Chunk | ||
| ContainerProtos.ContainerCommandRequestProto readChunkRequest = | ||
| ContainerTestHelper.getReadChunkRequest(pipeline, writeChunkRequest | ||
|
|
@@ -165,12 +173,6 @@ static void runContainerStateMachineMetrics( | |
| RaftGroupId.valueOf(pipeline.getId().getId())); | ||
| assertCounter("NumQueryStateMachineOps", 1L, metric); | ||
| assertCounter("NumApplyTransactionOps", 1L, metric); | ||
| applyTransactionLatency = getDoubleGauge( | ||
| "ApplyTransactionNsAvgTime", metric); | ||
| assertThat(applyTransactionLatency).isGreaterThan(0.0); | ||
| writeStateMachineLatency = getDoubleGauge( | ||
| "WriteStateMachineDataNsAvgTime", metric); | ||
| assertThat(writeStateMachineLatency).isGreaterThan(0.0); | ||
|
|
||
| } finally { | ||
| if (client != null) { | ||
|
|
||
| Original file line number | Diff line number | Diff line change | ||||
|---|---|---|---|---|---|---|
|
|
@@ -78,6 +78,12 @@ | |||||
| </exclusion> | ||||||
| </exclusions> | ||||||
| </dependency> | ||||||
| <dependency> | ||||||
| <groupId>com.google.protobuf</groupId> | ||||||
| <artifactId>protobuf-java</artifactId> | ||||||
| <version>2.5.0</version> | ||||||
| <scope>compile</scope> | ||||||
|
Comment on lines
+84
to
+85
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Version and scope are inherited / defaulted.
Suggested change
With that, I don't think we still need Also, can/should we relocate it to
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. agreed.
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. we should also use this opportunity to drop protobuf 2.5 from Ozone. Will. do that in a separate jira. |
||||||
| </dependency> | ||||||
| </dependencies> | ||||||
| <build> | ||||||
| <plugins> | ||||||
|
|
||||||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Workaround for https://issues.apache.org/jira/browse/HADOOP-19301