diff --git a/LICENSE-binary b/LICENSE-binary index 7a712a5ac98df..0e93a3aba9f4e 100644 --- a/LICENSE-binary +++ b/LICENSE-binary @@ -208,6 +208,7 @@ License Version 2.0: hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/AbstractFuture.java hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/TimeoutFuture.java +ch.qos.reload4j:reload4j:1.2.18.3 com.aliyun:aliyun-java-sdk-core:3.4.0 com.aliyun:aliyun-java-sdk-ecs:4.2.0 com.aliyun:aliyun-java-sdk-ram:3.0.0 @@ -273,7 +274,6 @@ io.reactivex:rxjava-string:1.1.1 io.reactivex:rxnetty:0.4.20 io.swagger:swagger-annotations:1.5.4 javax.inject:javax.inject:1 -log4j:log4j:1.2.17 net.java.dev.jna:jna:5.2.0 net.minidev:accessors-smart:2.4.7 net.minidev:json-smart:2.4.7 @@ -436,9 +436,10 @@ org.codehaus.mojo:animal-sniffer-annotations:1.17 org.jruby.jcodings:jcodings:1.0.13 org.jruby.joni:joni:2.1.2 org.ojalgo:ojalgo:43.0 -org.slf4j:jul-to-slf4j:1.7.30 -org.slf4j:slf4j-api:1.7.30 -org.slf4j:slf4j-log4j12:1.7.30 +org.slf4j:jcl-over-slf4j:1.7.35 +org.slf4j:jul-to-slf4j:1.7.35 +org.slf4j:slf4j-api:1.7.35 +org.slf4j:slf4j-reload4j:1.7.35 CDDL 1.1 + GPLv2 with classpath exception diff --git a/dev-support/git-jira-validation/README.md b/dev-support/git-jira-validation/README.md new file mode 100644 index 0000000000000..308c54228d17c --- /dev/null +++ b/dev-support/git-jira-validation/README.md @@ -0,0 +1,134 @@ + + +Apache Hadoop Git/Jira FixVersion validation +============================================================ + +Git commits in Apache Hadoop contains Jira number of the format +HADOOP-XXXX or HDFS-XXXX or YARN-XXXX or MAPREDUCE-XXXX. +While creating a release candidate, we also include changelist +and this changelist can be identified based on Fixed/Closed Jiras +with the correct fix versions. However, sometimes we face few +inconsistencies between fixed Jira and Git commit message. + +git_jira_fix_version_check.py script takes care of +identifying all git commits with commit +messages with any of these issues: + +1. commit is reverted as per commit message +2. commit does not contain Jira number format in message +3. Jira does not have expected fixVersion +4. Jira has expected fixVersion, but it is not yet resolved + +Moreover, this script also finds any resolved Jira with expected +fixVersion but without any corresponding commit present. + +This should be useful as part of RC preparation. + +git_jira_fix_version_check supports python3 and it required +installation of jira: + +``` +$ python3 --version +Python 3.9.7 + +$ python3 -m venv ./venv + +$ ./venv/bin/pip install -r dev-support/git-jira-validation/requirements.txt + +$ ./venv/bin/python dev-support/git-jira-validation/git_jira_fix_version_check.py + +``` + +The script also requires below inputs: +``` +1. First commit hash to start excluding commits from history: + Usually we can provide latest commit hash from last tagged release + so that the script will only loop through all commits in git commit + history before this commit hash. e.g for 3.3.2 release, we can provide + git hash: fa4915fdbbbec434ab41786cb17b82938a613f16 + because this commit bumps up hadoop pom versions to 3.3.2: + https://github.com/apache/hadoop/commit/fa4915fdbbbec434ab41786cb17b82938a613f16 + +2. Fix Version: + Exact fixVersion that we would like to compare all Jira's fixVersions + with. e.g for 3.3.2 release, it should be 3.3.2. + +3. JIRA Project Name: + The exact name of Project as case-sensitive e.g HADOOP / OZONE + +4. Path of project's working dir with release branch checked-in: + Path of project from where we want to compare git hashes from. Local fork + of the project should be up-to date with upstream and expected release + branch should be checked-in. + +5. Jira server url (default url: https://issues.apache.org/jira): + Default value of server points to ASF Jiras but this script can be + used outside of ASF Jira too. +``` + + +Example of script execution: +``` +JIRA Project Name (e.g HADOOP / OZONE etc): HADOOP +First commit hash to start excluding commits from history: fa4915fdbbbec434ab41786cb17b82938a613f16 +Fix Version: 3.3.2 +Jira server url (default: https://issues.apache.org/jira): +Path of project's working dir with release branch checked-in: /Users/vjasani/Documents/src/hadoop-3.3/hadoop + +Check git status output and verify expected branch + +On branch branch-3.3.2 +Your branch is up to date with 'origin/branch-3.3.2'. + +nothing to commit, working tree clean + + +Jira/Git commit message diff starting: ############################################## +Jira not present with version: 3.3.2. Commit: 8cd8e435fb43a251467ca74fadcb14f21a3e8163 HADOOP-17198. Support S3 Access Points (#3260) (branch-3.3.2) (#3955) +WARN: Jira not found. Commit: 8af28b7cca5c6020de94e739e5373afc69f399e5 Updated the index as per 3.3.2 release +WARN: Jira not found. Commit: e42e483d0085aa46543ebcb1196dd155ddb447d0 Make upstream aware of 3.3.1 release +Commit seems reverted. Commit: 6db1165380cd308fb74c9d17a35c1e57174d1e09 Revert "HDFS-14099. Unknown frame descriptor when decompressing multiple frames (#3836)" +Commit seems reverted. Commit: 1e3f94fa3c3d4a951d4f7438bc13e6f008f228f4 Revert "HDFS-16333. fix balancer bug when transfer an EC block (#3679)" +Jira not present with version: 3.3.2. Commit: ce0bc7b473a62a580c1227a4de6b10b64b045d3a HDFS-16344. Improve DirectoryScanner.Stats#toString (#3695) +Jira not present with version: 3.3.2. Commit: 30f0629d6e6f735c9f4808022f1a1827c5531f75 HDFS-16339. Show the threshold when mover threads quota is exceeded (#3689) +Jira not present with version: 3.3.2. Commit: e449daccf486219e3050254d667b74f92e8fc476 YARN-11007. Correct words in YARN documents (#3680) +Commit seems reverted. Commit: 5c189797828e60a3329fd920ecfb99bcbccfd82d Revert "HDFS-16336. Addendum: De-flake TestRollingUpgrade#testRollback (#3686)" +Jira not present with version: 3.3.2. Commit: 544dffd179ed756bc163e4899e899a05b93d9234 HDFS-16171. De-flake testDecommissionStatus (#3280) +Jira not present with version: 3.3.2. Commit: c6914b1cb6e4cab8263cd3ae5cc00bc7a8de25de HDFS-16350. Datanode start time should be set after RPC server starts successfully (#3711) +Jira not present with version: 3.3.2. Commit: 328d3b84dfda9399021ccd1e3b7afd707e98912d HDFS-16336. Addendum: De-flake TestRollingUpgrade#testRollback (#3686) +Jira not present with version: 3.3.2. Commit: 3ae8d4ccb911c9ababd871824a2fafbb0272c016 HDFS-16336. De-flake TestRollingUpgrade#testRollback (#3686) +Jira not present with version: 3.3.2. Commit: 15d3448e25c797b7d0d401afdec54683055d4bb5 HADOOP-17975. Fallback to simple auth does not work for a secondary DistributedFileSystem instance. (#3579) +Jira not present with version: 3.3.2. Commit: dd50261219de71eaa0a1ad28529953e12dfb92e0 YARN-10991. Fix to ignore the grouping "[]" for resourcesStr in parseResourcesString method (#3592) +Jira not present with version: 3.3.2. Commit: ef462b21bf03b10361d2f9ea7b47d0f7360e517f HDFS-16332. Handle invalid token exception in sasl handshake (#3677) +WARN: Jira not found. Commit: b55edde7071419410ea5bea4ce6462b980e48f5b Also update hadoop.version to 3.3.2 +... +... +... +Found first commit hash after which git history is redundant. commit: fa4915fdbbbec434ab41786cb17b82938a613f16 +Exiting successfully +Jira/Git commit message diff completed: ############################################## + +Any resolved Jira with fixVersion 3.3.2 but corresponding commit not present +Starting diff: ############################################## +HADOOP-18066 is marked resolved with fixVersion 3.3.2 but no corresponding commit found +HADOOP-17936 is marked resolved with fixVersion 3.3.2 but no corresponding commit found +Completed diff: ############################################## + + +``` + diff --git a/dev-support/git-jira-validation/git_jira_fix_version_check.py b/dev-support/git-jira-validation/git_jira_fix_version_check.py new file mode 100644 index 0000000000000..c2e12a13aae22 --- /dev/null +++ b/dev-support/git-jira-validation/git_jira_fix_version_check.py @@ -0,0 +1,118 @@ +#!/usr/bin/env python3 +############################################################################ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +############################################################################ +"""An application to assist Release Managers with ensuring that histories in +Git and fixVersions in JIRA are in agreement. See README.md for a detailed +explanation. +""" + + +import os +import re +import subprocess + +from jira import JIRA + +jira_project_name = input("JIRA Project Name (e.g HADOOP / OZONE etc): ") \ + or "HADOOP" +# Define project_jira_keys with - appended. e.g for HADOOP Jiras, +# project_jira_keys should include HADOOP-, HDFS-, YARN-, MAPREDUCE- +project_jira_keys = [jira_project_name + '-'] +if jira_project_name == 'HADOOP': + project_jira_keys.append('HDFS-') + project_jira_keys.append('YARN-') + project_jira_keys.append('MAPREDUCE-') + +first_exclude_commit_hash = input("First commit hash to start excluding commits from history: ") +fix_version = input("Fix Version: ") + +jira_server_url = input( + "Jira server url (default: https://issues.apache.org/jira): ") \ + or "https://issues.apache.org/jira" + +jira = JIRA(server=jira_server_url) + +local_project_dir = input("Path of project's working dir with release branch checked-in: ") +os.chdir(local_project_dir) + +GIT_STATUS_MSG = subprocess.check_output(['git', 'status']).decode("utf-8") +print('\nCheck git status output and verify expected branch\n') +print(GIT_STATUS_MSG) + +print('\nJira/Git commit message diff starting: ##############################################') + +issue_set_from_commit_msg = set() + +for commit in subprocess.check_output(['git', 'log', '--pretty=oneline']).decode( + "utf-8").splitlines(): + if commit.startswith(first_exclude_commit_hash): + print("Found first commit hash after which git history is redundant. commit: " + + first_exclude_commit_hash) + print("Exiting successfully") + break + if re.search('revert', commit, re.IGNORECASE): + print("Commit seems reverted. \t\t\t Commit: " + commit) + continue + ACTUAL_PROJECT_JIRA = None + for project_jira in project_jira_keys: + if project_jira in commit: + ACTUAL_PROJECT_JIRA = project_jira + break + if not ACTUAL_PROJECT_JIRA: + print("WARN: Jira not found. \t\t\t Commit: " + commit) + continue + JIRA_NUM = '' + for c in commit.split(ACTUAL_PROJECT_JIRA)[1]: + if c.isdigit(): + JIRA_NUM = JIRA_NUM + c + else: + break + issue = jira.issue(ACTUAL_PROJECT_JIRA + JIRA_NUM) + EXPECTED_FIX_VERSION = False + for version in issue.fields.fixVersions: + if version.name == fix_version: + EXPECTED_FIX_VERSION = True + break + if not EXPECTED_FIX_VERSION: + print("Jira not present with version: " + fix_version + ". \t Commit: " + commit) + continue + if issue.fields.status is None or issue.fields.status.name not in ('Resolved', 'Closed'): + print("Jira is not resolved yet? \t\t Commit: " + commit) + else: + # This means Jira corresponding to current commit message is resolved with expected + # fixVersion. + # This is no-op by default, if needed, convert to print statement. + issue_set_from_commit_msg.add(ACTUAL_PROJECT_JIRA + JIRA_NUM) + +print('Jira/Git commit message diff completed: ##############################################') + +print('\nAny resolved Jira with fixVersion ' + fix_version + + ' but corresponding commit not present') +print('Starting diff: ##############################################') +all_issues_with_fix_version = jira.search_issues( + 'project=' + jira_project_name + ' and status in (Resolved,Closed) and fixVersion=' + + fix_version) + +for issue in all_issues_with_fix_version: + if issue.key not in issue_set_from_commit_msg: + print(issue.key + ' is marked resolved with fixVersion ' + fix_version + + ' but no corresponding commit found') + +print('Completed diff: ##############################################') diff --git a/dev-support/git-jira-validation/requirements.txt b/dev-support/git-jira-validation/requirements.txt new file mode 100644 index 0000000000000..ae7535a119fa9 --- /dev/null +++ b/dev-support/git-jira-validation/requirements.txt @@ -0,0 +1,18 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +jira==3.1.1 diff --git a/hadoop-assemblies/src/main/resources/assemblies/hadoop-dynamometer.xml b/hadoop-assemblies/src/main/resources/assemblies/hadoop-dynamometer.xml index 448035262e12d..b2ce562231c5a 100644 --- a/hadoop-assemblies/src/main/resources/assemblies/hadoop-dynamometer.xml +++ b/hadoop-assemblies/src/main/resources/assemblies/hadoop-dynamometer.xml @@ -66,7 +66,7 @@ org.slf4j:slf4j-api - org.slf4j:slf4j-log4j12 + org.slf4j:slf4j-reload4j diff --git a/hadoop-assemblies/src/main/resources/assemblies/hadoop-hdfs-nfs-dist.xml b/hadoop-assemblies/src/main/resources/assemblies/hadoop-hdfs-nfs-dist.xml index 0edfdeb7b0d52..af5d89d7efe48 100644 --- a/hadoop-assemblies/src/main/resources/assemblies/hadoop-hdfs-nfs-dist.xml +++ b/hadoop-assemblies/src/main/resources/assemblies/hadoop-hdfs-nfs-dist.xml @@ -40,7 +40,7 @@ org.apache.hadoop:hadoop-hdfs org.slf4j:slf4j-api - org.slf4j:slf4j-log4j12 + org.slf4j:slf4j-reload4j org.hsqldb:hsqldb diff --git a/hadoop-assemblies/src/main/resources/assemblies/hadoop-httpfs-dist.xml b/hadoop-assemblies/src/main/resources/assemblies/hadoop-httpfs-dist.xml index d698a3005d429..bec2f94b95ea1 100644 --- a/hadoop-assemblies/src/main/resources/assemblies/hadoop-httpfs-dist.xml +++ b/hadoop-assemblies/src/main/resources/assemblies/hadoop-httpfs-dist.xml @@ -69,7 +69,7 @@ org.apache.hadoop:hadoop-hdfs org.slf4j:slf4j-api - org.slf4j:slf4j-log4j12 + org.slf4j:slf4j-reload4j org.hsqldb:hsqldb diff --git a/hadoop-assemblies/src/main/resources/assemblies/hadoop-kms-dist.xml b/hadoop-assemblies/src/main/resources/assemblies/hadoop-kms-dist.xml index ff6f99080cafd..e5e6834b04206 100644 --- a/hadoop-assemblies/src/main/resources/assemblies/hadoop-kms-dist.xml +++ b/hadoop-assemblies/src/main/resources/assemblies/hadoop-kms-dist.xml @@ -69,7 +69,7 @@ org.apache.hadoop:hadoop-hdfs org.slf4j:slf4j-api - org.slf4j:slf4j-log4j12 + org.slf4j:slf4j-reload4j org.hsqldb:hsqldb diff --git a/hadoop-assemblies/src/main/resources/assemblies/hadoop-mapreduce-dist.xml b/hadoop-assemblies/src/main/resources/assemblies/hadoop-mapreduce-dist.xml index 06a55d6d06a72..28d5ebe9f605d 100644 --- a/hadoop-assemblies/src/main/resources/assemblies/hadoop-mapreduce-dist.xml +++ b/hadoop-assemblies/src/main/resources/assemblies/hadoop-mapreduce-dist.xml @@ -179,7 +179,7 @@ org.apache.hadoop:hadoop-hdfs org.slf4j:slf4j-api - org.slf4j:slf4j-log4j12 + org.slf4j:slf4j-reload4j org.hsqldb:hsqldb jdiff:jdiff:jar diff --git a/hadoop-assemblies/src/main/resources/assemblies/hadoop-nfs-dist.xml b/hadoop-assemblies/src/main/resources/assemblies/hadoop-nfs-dist.xml index cb3d9cdf24978..59000c071131c 100644 --- a/hadoop-assemblies/src/main/resources/assemblies/hadoop-nfs-dist.xml +++ b/hadoop-assemblies/src/main/resources/assemblies/hadoop-nfs-dist.xml @@ -40,7 +40,7 @@ org.apache.hadoop:hadoop-hdfs org.slf4j:slf4j-api - org.slf4j:slf4j-log4j12 + org.slf4j:slf4j-reload4j org.hsqldb:hsqldb diff --git a/hadoop-assemblies/src/main/resources/assemblies/hadoop-tools.xml b/hadoop-assemblies/src/main/resources/assemblies/hadoop-tools.xml index 054d8c0ace2bd..1b9140f419b4b 100644 --- a/hadoop-assemblies/src/main/resources/assemblies/hadoop-tools.xml +++ b/hadoop-assemblies/src/main/resources/assemblies/hadoop-tools.xml @@ -214,7 +214,7 @@ org.apache.hadoop:hadoop-pipes org.slf4j:slf4j-api - org.slf4j:slf4j-log4j12 + org.slf4j:slf4j-reload4j diff --git a/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml b/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml index 4da4ac5acb98b..cd86ce4e41766 100644 --- a/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml +++ b/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml @@ -309,7 +309,7 @@ org.apache.hadoop:* org.slf4j:slf4j-api - org.slf4j:slf4j-log4j12 + org.slf4j:slf4j-reload4j org.hsqldb:hsqldb diff --git a/hadoop-client-modules/hadoop-client-check-invariants/pom.xml b/hadoop-client-modules/hadoop-client-check-invariants/pom.xml index f99ac01aed38e..3781e8929d866 100644 --- a/hadoop-client-modules/hadoop-client-check-invariants/pom.xml +++ b/hadoop-client-modules/hadoop-client-check-invariants/pom.xml @@ -84,8 +84,8 @@ org.slf4j:slf4j-api commons-logging:commons-logging - - log4j:log4j + + ch.qos.reload4j:reload4j com.google.code.findbugs:jsr305 diff --git a/hadoop-client-modules/hadoop-client-check-test-invariants/pom.xml b/hadoop-client-modules/hadoop-client-check-test-invariants/pom.xml index 3f3564f9fac49..6627983d57cd9 100644 --- a/hadoop-client-modules/hadoop-client-check-test-invariants/pom.xml +++ b/hadoop-client-modules/hadoop-client-check-test-invariants/pom.xml @@ -88,8 +88,8 @@ org.slf4j:slf4j-api commons-logging:commons-logging - - log4j:log4j + + ch.qos.reload4j:reload4j junit:junit diff --git a/hadoop-client-modules/hadoop-client-integration-tests/pom.xml b/hadoop-client-modules/hadoop-client-integration-tests/pom.xml index 21f8c99bfe452..3d6c776b588f7 100644 --- a/hadoop-client-modules/hadoop-client-integration-tests/pom.xml +++ b/hadoop-client-modules/hadoop-client-integration-tests/pom.xml @@ -33,8 +33,8 @@ - log4j - log4j + ch.qos.reload4j + reload4j test @@ -42,11 +42,6 @@ slf4j-api test - - org.slf4j - slf4j-log4j12 - test - junit junit diff --git a/hadoop-client-modules/hadoop-client-minicluster/pom.xml b/hadoop-client-modules/hadoop-client-minicluster/pom.xml index 680c2504d8ece..f912c6e5a0806 100644 --- a/hadoop-client-modules/hadoop-client-minicluster/pom.xml +++ b/hadoop-client-modules/hadoop-client-minicluster/pom.xml @@ -193,8 +193,12 @@ slf4j-log4j12 - log4j - log4j + org.slf4j + slf4j-reload4j + + + ch.qos.reload4j + reload4j com.fasterxml.jackson.core @@ -682,7 +686,7 @@ commons-logging:commons-logging junit:junit com.google.code.findbugs:jsr305 - log4j:log4j + ch.qos.reload4j:reload4j org.eclipse.jetty.websocket:websocket-common org.eclipse.jetty.websocket:websocket-api diff --git a/hadoop-client-modules/hadoop-client-runtime/pom.xml b/hadoop-client-modules/hadoop-client-runtime/pom.xml index e2b7e1f671d12..f2e7abcb5a5d8 100644 --- a/hadoop-client-modules/hadoop-client-runtime/pom.xml +++ b/hadoop-client-modules/hadoop-client-runtime/pom.xml @@ -103,8 +103,8 @@ * one of the three custom log4j appenders we have --> - log4j - log4j + ch.qos.reload4j + reload4j runtime true @@ -150,8 +150,8 @@ org.slf4j:slf4j-api commons-logging:commons-logging - - log4j:log4j + + ch.qos.reload4j:reload4j com.google.code.findbugs:jsr305 diff --git a/hadoop-client-modules/hadoop-client/pom.xml b/hadoop-client-modules/hadoop-client/pom.xml index 7674812016d7e..44c4bdca11be9 100644 --- a/hadoop-client-modules/hadoop-client/pom.xml +++ b/hadoop-client-modules/hadoop-client/pom.xml @@ -206,8 +206,8 @@ commons-cli - log4j - log4j + ch.qos.reload4j + reload4j com.sun.jersey @@ -282,11 +282,6 @@ io.netty netty - - - org.slf4j - slf4j-log4j12 - @@ -315,11 +310,6 @@ io.netty netty - - - org.slf4j - slf4j-log4j12 - diff --git a/hadoop-common-project/hadoop-auth-examples/pom.xml b/hadoop-common-project/hadoop-auth-examples/pom.xml index f4af8183c0f7d..22096fcebbfc3 100644 --- a/hadoop-common-project/hadoop-auth-examples/pom.xml +++ b/hadoop-common-project/hadoop-auth-examples/pom.xml @@ -47,13 +47,13 @@ compile - log4j - log4j + ch.qos.reload4j + reload4j runtime org.slf4j - slf4j-log4j12 + slf4j-reload4j runtime diff --git a/hadoop-common-project/hadoop-auth/pom.xml b/hadoop-common-project/hadoop-auth/pom.xml index 7b814b0d6a17e..8b16652dc768b 100644 --- a/hadoop-common-project/hadoop-auth/pom.xml +++ b/hadoop-common-project/hadoop-auth/pom.xml @@ -82,13 +82,13 @@ compile - log4j - log4j + ch.qos.reload4j + reload4j runtime org.slf4j - slf4j-log4j12 + slf4j-reload4j runtime @@ -176,6 +176,12 @@ apacheds-server-integ ${apacheds.version} test + + + log4j + log4j + + org.apache.directory.server diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml index b3d52b9fa7cd3..b5b3ad2532312 100644 --- a/hadoop-common-project/hadoop-common/pom.xml +++ b/hadoop-common-project/hadoop-common/pom.xml @@ -159,8 +159,8 @@ compile - log4j - log4j + ch.qos.reload4j + reload4j compile @@ -205,7 +205,7 @@ org.slf4j - slf4j-log4j12 + slf4j-reload4j compile diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java index 5e2d6c5badb2e..13c9d8573796e 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java @@ -38,6 +38,7 @@ import java.nio.file.AccessDeniedException; import java.nio.file.FileSystems; import java.nio.file.Files; +import java.nio.file.Paths; import java.util.ArrayList; import java.util.Enumeration; import java.util.List; @@ -970,6 +971,14 @@ private static void unpackEntries(TarArchiveInputStream tis, + " would create entry outside of " + outputDir); } + if (entry.isSymbolicLink() || entry.isLink()) { + String canonicalTargetPath = getCanonicalPath(entry.getLinkName(), outputDir); + if (!canonicalTargetPath.startsWith(targetDirPath)) { + throw new IOException( + "expanding " + entry.getName() + " would create entry outside of " + outputDir); + } + } + if (entry.isDirectory()) { File subDir = new File(outputDir, entry.getName()); if (!subDir.mkdirs() && !subDir.isDirectory()) { @@ -985,10 +994,12 @@ private static void unpackEntries(TarArchiveInputStream tis, } if (entry.isSymbolicLink()) { - // Create symbolic link relative to tar parent dir - Files.createSymbolicLink(FileSystems.getDefault() - .getPath(outputDir.getPath(), entry.getName()), - FileSystems.getDefault().getPath(entry.getLinkName())); + // Create symlink with canonical target path to ensure that we don't extract + // outside targetDirPath + String canonicalTargetPath = getCanonicalPath(entry.getLinkName(), outputDir); + Files.createSymbolicLink( + FileSystems.getDefault().getPath(outputDir.getPath(), entry.getName()), + FileSystems.getDefault().getPath(canonicalTargetPath)); return; } @@ -1000,7 +1011,8 @@ private static void unpackEntries(TarArchiveInputStream tis, } if (entry.isLink()) { - File src = new File(outputDir, entry.getLinkName()); + String canonicalTargetPath = getCanonicalPath(entry.getLinkName(), outputDir); + File src = new File(canonicalTargetPath); HardLink.createHardLink(src, outputFile); return; } @@ -1008,6 +1020,20 @@ private static void unpackEntries(TarArchiveInputStream tis, org.apache.commons.io.FileUtils.copyToFile(tis, outputFile); } + /** + * Gets the canonical path for the given path. + * + * @param path The path for which the canonical path needs to be computed. + * @param parentDir The parent directory to use if the path is a relative path. + * @return The canonical path of the given path. + */ + private static String getCanonicalPath(String path, File parentDir) throws IOException { + java.nio.file.Path targetPath = Paths.get(path); + return (targetPath.isAbsolute() ? + new File(path) : + new File(parentDir, path)).getCanonicalPath(); + } + /** * Class for creating hardlinks. * Supports Unix, WindXP. diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java index 7503edd45f440..8f333d1506bfa 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java @@ -1579,11 +1579,6 @@ public boolean mkdirs(Path dir, FsPermission permission) throw readOnlyMountTable("mkdirs", dir); } - @Override - public boolean mkdirs(Path dir) throws IOException { - return mkdirs(dir, null); - } - @Override public FSDataInputStream open(Path f, int bufferSize) throws AccessControlException, FileNotFoundException, IOException { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/RawErasureDecoder.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/RawErasureDecoder.java index 249930ebe3f22..2ebe94b0385ab 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/RawErasureDecoder.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/RawErasureDecoder.java @@ -81,7 +81,7 @@ public RawErasureDecoder(ErasureCoderOptions coderOptions) { * @param outputs output buffers to put decoded data into according to * erasedIndexes, ready for read after the call */ - public void decode(ByteBuffer[] inputs, int[] erasedIndexes, + public synchronized void decode(ByteBuffer[] inputs, int[] erasedIndexes, ByteBuffer[] outputs) throws IOException { ByteBufferDecodingState decodingState = new ByteBufferDecodingState(this, inputs, erasedIndexes, outputs); @@ -130,7 +130,7 @@ protected abstract void doDecode(ByteBufferDecodingState decodingState) * erasedIndexes, ready for read after the call * @throws IOException if the decoder is closed. */ - public void decode(byte[][] inputs, int[] erasedIndexes, byte[][] outputs) + public synchronized void decode(byte[][] inputs, int[] erasedIndexes, byte[][] outputs) throws IOException { ByteArrayDecodingState decodingState = new ByteArrayDecodingState(this, inputs, erasedIndexes, outputs); @@ -163,7 +163,7 @@ protected abstract void doDecode(ByteArrayDecodingState decodingState) * erasedIndexes, ready for read after the call * @throws IOException if the decoder is closed */ - public void decode(ECChunk[] inputs, int[] erasedIndexes, + public synchronized void decode(ECChunk[] inputs, int[] erasedIndexes, ECChunk[] outputs) throws IOException { ByteBuffer[] newInputs = CoderUtil.toBuffers(inputs); ByteBuffer[] newOutputs = CoderUtil.toBuffers(outputs); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericsUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericsUtil.java index 0aba34845a676..334e370214ef4 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericsUtil.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericsUtil.java @@ -85,7 +85,7 @@ public static boolean isLog4jLogger(Class clazz) { } Logger log = LoggerFactory.getLogger(clazz); try { - Class log4jClass = Class.forName("org.slf4j.impl.Log4jLoggerAdapter"); + Class log4jClass = Class.forName("org.slf4j.impl.Reload4jLoggerAdapter"); return log4jClass.isInstance(log); } catch (ClassNotFoundException e) { return false; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java index e84d23c058a9b..03b9d22b98daa 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java @@ -42,13 +42,14 @@ import java.net.URL; import java.net.UnknownHostException; import java.nio.charset.StandardCharsets; -import java.nio.file.FileSystems; import java.nio.file.Files; +import java.nio.file.Paths; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.List; +import java.util.Objects; import java.util.jar.Attributes; import java.util.jar.JarFile; import java.util.jar.Manifest; @@ -60,9 +61,12 @@ import org.apache.commons.io.FileUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.test.LambdaTestUtils; import org.apache.hadoop.util.StringUtils; import org.apache.tools.tar.TarEntry; import org.apache.tools.tar.TarOutputStream; + +import org.assertj.core.api.Assertions; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -158,13 +162,12 @@ public void setup() throws IOException { FileUtils.forceMkdir(dir1); FileUtils.forceMkdir(dir2); - new File(del, FILE).createNewFile(); - File tmpFile = new File(tmp, FILE); - tmpFile.createNewFile(); + Verify.createNewFile(new File(del, FILE)); + File tmpFile = Verify.createNewFile(new File(tmp, FILE)); // create files - new File(dir1, FILE).createNewFile(); - new File(dir2, FILE).createNewFile(); + Verify.createNewFile(new File(dir1, FILE)); + Verify.createNewFile(new File(dir2, FILE)); // create a symlink to file File link = new File(del, LINK); @@ -173,7 +176,7 @@ public void setup() throws IOException { // create a symlink to dir File linkDir = new File(del, "tmpDir"); FileUtil.symLink(tmp.toString(), linkDir.toString()); - Assert.assertEquals(5, del.listFiles().length); + Assert.assertEquals(5, Objects.requireNonNull(del.listFiles()).length); // create files in partitioned directories createFile(partitioned, "part-r-00000", "foo"); @@ -200,13 +203,9 @@ public void tearDown() throws IOException { private File createFile(File directory, String name, String contents) throws IOException { File newFile = new File(directory, name); - PrintWriter pw = new PrintWriter(newFile); - try { + try (PrintWriter pw = new PrintWriter(newFile)) { pw.println(contents); } - finally { - pw.close(); - } return newFile; } @@ -218,11 +217,11 @@ public void testListFiles() throws IOException { //Test existing directory with no files case File newDir = new File(tmp.getPath(),"test"); - newDir.mkdir(); + Verify.mkdir(newDir); Assert.assertTrue("Failed to create test dir", newDir.exists()); files = FileUtil.listFiles(newDir); Assert.assertEquals(0, files.length); - newDir.delete(); + assertTrue(newDir.delete()); Assert.assertFalse("Failed to delete test dir", newDir.exists()); //Test non-existing directory case, this throws @@ -244,11 +243,11 @@ public void testListAPI() throws IOException { //Test existing directory with no files case File newDir = new File(tmp.getPath(),"test"); - newDir.mkdir(); + Verify.mkdir(newDir); Assert.assertTrue("Failed to create test dir", newDir.exists()); files = FileUtil.list(newDir); Assert.assertEquals("New directory unexpectedly contains files", 0, files.length); - newDir.delete(); + assertTrue(newDir.delete()); Assert.assertFalse("Failed to delete test dir", newDir.exists()); //Test non-existing directory case, this throws @@ -266,7 +265,7 @@ public void testListAPI() throws IOException { public void testFullyDelete() throws IOException { boolean ret = FileUtil.fullyDelete(del); Assert.assertTrue(ret); - Assert.assertFalse(del.exists()); + Verify.notExists(del); validateTmpDir(); } @@ -279,13 +278,13 @@ public void testFullyDelete() throws IOException { @Test (timeout = 30000) public void testFullyDeleteSymlinks() throws IOException { File link = new File(del, LINK); - Assert.assertEquals(5, del.list().length); + assertDelListLength(5); // Since tmpDir is symlink to tmp, fullyDelete(tmpDir) should not // delete contents of tmp. See setupDirs for details. boolean ret = FileUtil.fullyDelete(link); Assert.assertTrue(ret); - Assert.assertFalse(link.exists()); - Assert.assertEquals(4, del.list().length); + Verify.notExists(link); + assertDelListLength(4); validateTmpDir(); File linkDir = new File(del, "tmpDir"); @@ -293,8 +292,8 @@ public void testFullyDeleteSymlinks() throws IOException { // delete contents of tmp. See setupDirs for details. ret = FileUtil.fullyDelete(linkDir); Assert.assertTrue(ret); - Assert.assertFalse(linkDir.exists()); - Assert.assertEquals(3, del.list().length); + Verify.notExists(linkDir); + assertDelListLength(3); validateTmpDir(); } @@ -310,16 +309,16 @@ public void testFullyDeleteDanglingSymlinks() throws IOException { // to make y as a dangling link to file tmp/x boolean ret = FileUtil.fullyDelete(tmp); Assert.assertTrue(ret); - Assert.assertFalse(tmp.exists()); + Verify.notExists(tmp); // dangling symlink to file File link = new File(del, LINK); - Assert.assertEquals(5, del.list().length); + assertDelListLength(5); // Even though 'y' is dangling symlink to file tmp/x, fullyDelete(y) // should delete 'y' properly. ret = FileUtil.fullyDelete(link); Assert.assertTrue(ret); - Assert.assertEquals(4, del.list().length); + assertDelListLength(4); // dangling symlink to directory File linkDir = new File(del, "tmpDir"); @@ -327,22 +326,22 @@ public void testFullyDeleteDanglingSymlinks() throws IOException { // delete tmpDir properly. ret = FileUtil.fullyDelete(linkDir); Assert.assertTrue(ret); - Assert.assertEquals(3, del.list().length); + assertDelListLength(3); } @Test (timeout = 30000) public void testFullyDeleteContents() throws IOException { boolean ret = FileUtil.fullyDeleteContents(del); Assert.assertTrue(ret); - Assert.assertTrue(del.exists()); - Assert.assertEquals(0, del.listFiles().length); + Verify.exists(del); + Assert.assertEquals(0, Objects.requireNonNull(del.listFiles()).length); validateTmpDir(); } private void validateTmpDir() { - Assert.assertTrue(tmp.exists()); - Assert.assertEquals(1, tmp.listFiles().length); - Assert.assertTrue(new File(tmp, FILE).exists()); + Verify.exists(tmp); + Assert.assertEquals(1, Objects.requireNonNull(tmp.listFiles()).length); + Verify.exists(new File(tmp, FILE)); } /** @@ -366,15 +365,15 @@ private void validateTmpDir() { * @throws IOException */ private void setupDirsAndNonWritablePermissions() throws IOException { - new MyFile(del, FILE_1_NAME).createNewFile(); + Verify.createNewFile(new MyFile(del, FILE_1_NAME)); // "file1" is non-deletable by default, see MyFile.delete(). - xSubDir.mkdirs(); - file2.createNewFile(); + Verify.mkdirs(xSubDir); + Verify.createNewFile(file2); - xSubSubDir.mkdirs(); - file22.createNewFile(); + Verify.mkdirs(xSubSubDir); + Verify.createNewFile(file22); revokePermissions(file22); revokePermissions(xSubSubDir); @@ -382,8 +381,8 @@ private void setupDirsAndNonWritablePermissions() throws IOException { revokePermissions(file2); revokePermissions(xSubDir); - ySubDir.mkdirs(); - file3.createNewFile(); + Verify.mkdirs(ySubDir); + Verify.createNewFile(file3); File tmpFile = new File(tmp, FILE); tmpFile.createNewFile(); @@ -448,6 +447,88 @@ public void testFailFullyDeleteGrantPermissions() throws IOException { validateAndSetWritablePermissions(false, ret); } + /** + * Asserts if the {@link TestFileUtil#del} meets the given expected length. + * + * @param expectedLength The expected length of the {@link TestFileUtil#del}. + */ + private void assertDelListLength(int expectedLength) { + Assertions.assertThat(del.list()).describedAs("del list").isNotNull().hasSize(expectedLength); + } + + /** + * Helper class to perform {@link File} operation and also verify them. + */ + public static class Verify { + /** + * Invokes {@link File#createNewFile()} on the given {@link File} instance. + * + * @param file The file to call {@link File#createNewFile()} on. + * @return The result of {@link File#createNewFile()}. + * @throws IOException As per {@link File#createNewFile()}. + */ + public static File createNewFile(File file) throws IOException { + assertTrue("Unable to create new file " + file, file.createNewFile()); + return file; + } + + /** + * Invokes {@link File#mkdir()} on the given {@link File} instance. + * + * @param file The file to call {@link File#mkdir()} on. + * @return The result of {@link File#mkdir()}. + */ + public static File mkdir(File file) { + assertTrue("Unable to mkdir for " + file, file.mkdir()); + return file; + } + + /** + * Invokes {@link File#mkdirs()} on the given {@link File} instance. + * + * @param file The file to call {@link File#mkdirs()} on. + * @return The result of {@link File#mkdirs()}. + */ + public static File mkdirs(File file) { + assertTrue("Unable to mkdirs for " + file, file.mkdirs()); + return file; + } + + /** + * Invokes {@link File#delete()} on the given {@link File} instance. + * + * @param file The file to call {@link File#delete()} on. + * @return The result of {@link File#delete()}. + */ + public static File delete(File file) { + assertTrue("Unable to delete " + file, file.delete()); + return file; + } + + /** + * Invokes {@link File#exists()} on the given {@link File} instance. + * + * @param file The file to call {@link File#exists()} on. + * @return The result of {@link File#exists()}. + */ + public static File exists(File file) { + assertTrue("Expected file " + file + " doesn't exist", file.exists()); + return file; + } + + /** + * Invokes {@link File#exists()} on the given {@link File} instance to check if the + * {@link File} doesn't exists. + * + * @param file The file to call {@link File#exists()} on. + * @return The negation of the result of {@link File#exists()}. + */ + public static File notExists(File file) { + assertFalse("Expected file " + file + " must not exist", file.exists()); + return file; + } + } + /** * Extend {@link File}. Same as {@link File} except for two things: (1) This * treats file1Name as a very special file which is not delete-able @@ -580,14 +661,13 @@ public void testGetDU() throws Exception { FileUtil.chmod(partitioned.getAbsolutePath(), "0777", true/*recursive*/); } } - + @Test (timeout = 30000) - public void testUnTar() throws IOException { + public void testUnTar() throws Exception { // make a simple tar: final File simpleTar = new File(del, FILE); - OutputStream os = new FileOutputStream(simpleTar); - TarOutputStream tos = new TarOutputStream(os); - try { + OutputStream os = new FileOutputStream(simpleTar); + try (TarOutputStream tos = new TarOutputStream(os)) { TarEntry te = new TarEntry("/bar/foo"); byte[] data = "some-content".getBytes("UTF-8"); te.setSize(data.length); @@ -596,55 +676,42 @@ public void testUnTar() throws IOException { tos.closeEntry(); tos.flush(); tos.finish(); - } finally { - tos.close(); } // successfully untar it into an existing dir: FileUtil.unTar(simpleTar, tmp); // check result: - assertTrue(new File(tmp, "/bar/foo").exists()); + Verify.exists(new File(tmp, "/bar/foo")); assertEquals(12, new File(tmp, "/bar/foo").length()); - - final File regularFile = new File(tmp, "QuickBrownFoxJumpsOverTheLazyDog"); - regularFile.createNewFile(); - assertTrue(regularFile.exists()); - try { - FileUtil.unTar(simpleTar, regularFile); - assertTrue("An IOException expected.", false); - } catch (IOException ioe) { - // okay - } + + final File regularFile = + Verify.createNewFile(new File(tmp, "QuickBrownFoxJumpsOverTheLazyDog")); + LambdaTestUtils.intercept(IOException.class, () -> FileUtil.unTar(simpleTar, regularFile)); } @Test (timeout = 30000) public void testReplaceFile() throws IOException { - final File srcFile = new File(tmp, "src"); - // src exists, and target does not exist: - srcFile.createNewFile(); - assertTrue(srcFile.exists()); + final File srcFile = Verify.createNewFile(new File(tmp, "src")); final File targetFile = new File(tmp, "target"); - assertTrue(!targetFile.exists()); + Verify.notExists(targetFile); FileUtil.replaceFile(srcFile, targetFile); - assertTrue(!srcFile.exists()); - assertTrue(targetFile.exists()); + Verify.notExists(srcFile); + Verify.exists(targetFile); // src exists and target is a regular file: - srcFile.createNewFile(); - assertTrue(srcFile.exists()); + Verify.createNewFile(srcFile); + Verify.exists(srcFile); FileUtil.replaceFile(srcFile, targetFile); - assertTrue(!srcFile.exists()); - assertTrue(targetFile.exists()); + Verify.notExists(srcFile); + Verify.exists(targetFile); // src exists, and target is a non-empty directory: - srcFile.createNewFile(); - assertTrue(srcFile.exists()); - targetFile.delete(); - targetFile.mkdirs(); - File obstacle = new File(targetFile, "obstacle"); - obstacle.createNewFile(); - assertTrue(obstacle.exists()); + Verify.createNewFile(srcFile); + Verify.exists(srcFile); + Verify.delete(targetFile); + Verify.mkdirs(targetFile); + File obstacle = Verify.createNewFile(new File(targetFile, "obstacle")); assertTrue(targetFile.exists() && targetFile.isDirectory()); try { FileUtil.replaceFile(srcFile, targetFile); @@ -653,9 +720,9 @@ public void testReplaceFile() throws IOException { // okay } // check up the post-condition: nothing is deleted: - assertTrue(srcFile.exists()); + Verify.exists(srcFile); assertTrue(targetFile.exists() && targetFile.isDirectory()); - assertTrue(obstacle.exists()); + Verify.exists(obstacle); } @Test (timeout = 30000) @@ -668,13 +735,13 @@ public void testCreateLocalTempFile() throws IOException { assertTrue(tmp1.exists() && tmp2.exists()); assertTrue(tmp1.canWrite() && tmp2.canWrite()); assertTrue(tmp1.canRead() && tmp2.canRead()); - tmp1.delete(); - tmp2.delete(); + Verify.delete(tmp1); + Verify.delete(tmp2); assertTrue(!tmp1.exists() && !tmp2.exists()); } @Test (timeout = 30000) - public void testUnZip() throws IOException { + public void testUnZip() throws Exception { // make sa simple zip final File simpleZip = new File(del, FILE); OutputStream os = new FileOutputStream(simpleZip); @@ -695,18 +762,12 @@ public void testUnZip() throws IOException { // successfully unzip it into an existing dir: FileUtil.unZip(simpleZip, tmp); // check result: - assertTrue(new File(tmp, "foo").exists()); + Verify.exists(new File(tmp, "foo")); assertEquals(12, new File(tmp, "foo").length()); - - final File regularFile = new File(tmp, "QuickBrownFoxJumpsOverTheLazyDog"); - regularFile.createNewFile(); - assertTrue(regularFile.exists()); - try { - FileUtil.unZip(simpleZip, regularFile); - assertTrue("An IOException expected.", false); - } catch (IOException ioe) { - // okay - } + + final File regularFile = + Verify.createNewFile(new File(tmp, "QuickBrownFoxJumpsOverTheLazyDog")); + LambdaTestUtils.intercept(IOException.class, () -> FileUtil.unZip(simpleZip, regularFile)); } @Test (timeout = 30000) @@ -752,24 +813,24 @@ public void testCopy5() throws IOException { final File dest = new File(del, "dest"); boolean result = FileUtil.copy(fs, srcPath, dest, false, conf); assertTrue(result); - assertTrue(dest.exists()); + Verify.exists(dest); assertEquals(content.getBytes().length + System.getProperty("line.separator").getBytes().length, dest.length()); - assertTrue(srcFile.exists()); // should not be deleted + Verify.exists(srcFile); // should not be deleted // copy regular file, delete src: - dest.delete(); - assertTrue(!dest.exists()); + Verify.delete(dest); + Verify.notExists(dest); result = FileUtil.copy(fs, srcPath, dest, true, conf); assertTrue(result); - assertTrue(dest.exists()); + Verify.exists(dest); assertEquals(content.getBytes().length + System.getProperty("line.separator").getBytes().length, dest.length()); - assertTrue(!srcFile.exists()); // should be deleted + Verify.notExists(srcFile); // should be deleted // copy a dir: - dest.delete(); - assertTrue(!dest.exists()); + Verify.delete(dest); + Verify.notExists(dest); srcPath = new Path(partitioned.toURI()); result = FileUtil.copy(fs, srcPath, dest, true, conf); assertTrue(result); @@ -781,7 +842,7 @@ public void testCopy5() throws IOException { assertEquals(3 + System.getProperty("line.separator").getBytes().length, f.length()); } - assertTrue(!partitioned.exists()); // should be deleted + Verify.notExists(partitioned); // should be deleted } @Test (timeout = 30000) @@ -869,8 +930,8 @@ public void testSymlinkRenameTo() throws Exception { // create the symlink FileUtil.symLink(file.getAbsolutePath(), link.getAbsolutePath()); - Assert.assertTrue(file.exists()); - Assert.assertTrue(link.exists()); + Verify.exists(file); + Verify.exists(link); File link2 = new File(del, "_link2"); @@ -880,10 +941,10 @@ public void testSymlinkRenameTo() throws Exception { // Make sure the file still exists // (NOTE: this would fail on Java6 on Windows if we didn't // copy the file in FileUtil#symlink) - Assert.assertTrue(file.exists()); + Verify.exists(file); - Assert.assertTrue(link2.exists()); - Assert.assertFalse(link.exists()); + Verify.exists(link2); + Verify.notExists(link); } /** @@ -898,13 +959,13 @@ public void testSymlinkDelete() throws Exception { // create the symlink FileUtil.symLink(file.getAbsolutePath(), link.getAbsolutePath()); - Assert.assertTrue(file.exists()); - Assert.assertTrue(link.exists()); + Verify.exists(file); + Verify.exists(link); // make sure that deleting a symlink works properly - Assert.assertTrue(link.delete()); - Assert.assertFalse(link.exists()); - Assert.assertTrue(file.exists()); + Verify.delete(link); + Verify.notExists(link); + Verify.exists(file); } /** @@ -931,13 +992,13 @@ public void testSymlinkLength() throws Exception { Assert.assertEquals(data.length, file.length()); Assert.assertEquals(data.length, link.length()); - file.delete(); - Assert.assertFalse(file.exists()); + Verify.delete(file); + Verify.notExists(file); Assert.assertEquals(0, link.length()); - link.delete(); - Assert.assertFalse(link.exists()); + Verify.delete(link); + Verify.notExists(link); } /** @@ -1003,7 +1064,7 @@ public void testSymlinkFileAlreadyExists() throws IOException { public void testSymlinkSameFile() throws IOException { File file = new File(del, FILE); - file.delete(); + Verify.delete(file); // Create a symbolic link // The operation should succeed @@ -1076,21 +1137,21 @@ private void doUntarAndVerify(File tarFile, File untarDir) String parentDir = untarDir.getCanonicalPath() + Path.SEPARATOR + "name"; File testFile = new File(parentDir + Path.SEPARATOR + "version"); - Assert.assertTrue(testFile.exists()); + Verify.exists(testFile); Assert.assertTrue(testFile.length() == 0); String imageDir = parentDir + Path.SEPARATOR + "image"; testFile = new File(imageDir + Path.SEPARATOR + "fsimage"); - Assert.assertTrue(testFile.exists()); + Verify.exists(testFile); Assert.assertTrue(testFile.length() == 157); String currentDir = parentDir + Path.SEPARATOR + "current"; testFile = new File(currentDir + Path.SEPARATOR + "fsimage"); - Assert.assertTrue(testFile.exists()); + Verify.exists(testFile); Assert.assertTrue(testFile.length() == 4331); testFile = new File(currentDir + Path.SEPARATOR + "edits"); - Assert.assertTrue(testFile.exists()); + Verify.exists(testFile); Assert.assertTrue(testFile.length() == 1033); testFile = new File(currentDir + Path.SEPARATOR + "fstime"); - Assert.assertTrue(testFile.exists()); + Verify.exists(testFile); Assert.assertTrue(testFile.length() == 8); } @@ -1151,9 +1212,9 @@ public void testCreateJarWithClassPath() throws Exception { } // create non-jar files, which we expect to not be included in the classpath - Assert.assertTrue(new File(tmp, "text.txt").createNewFile()); - Assert.assertTrue(new File(tmp, "executable.exe").createNewFile()); - Assert.assertTrue(new File(tmp, "README").createNewFile()); + Verify.createNewFile(new File(tmp, "text.txt")); + Verify.createNewFile(new File(tmp, "executable.exe")); + Verify.createNewFile(new File(tmp, "README")); // create classpath jar String wildcardPath = tmp.getCanonicalPath() + File.separator + "*"; @@ -1239,9 +1300,9 @@ public void testGetJarsInDirectory() throws Exception { } // create non-jar files, which we expect to not be included in the result - assertTrue(new File(tmp, "text.txt").createNewFile()); - assertTrue(new File(tmp, "executable.exe").createNewFile()); - assertTrue(new File(tmp, "README").createNewFile()); + Verify.createNewFile(new File(tmp, "text.txt")); + Verify.createNewFile(new File(tmp, "executable.exe")); + Verify.createNewFile(new File(tmp, "README")); // pass in the directory String directory = tmp.getCanonicalPath(); @@ -1275,7 +1336,7 @@ public void setupCompareFs() { uri4 = new URI(uris4); uri5 = new URI(uris5); uri6 = new URI(uris6); - } catch (URISyntaxException use) { + } catch (URISyntaxException ignored) { } // Set up InetAddress inet1 = mock(InetAddress.class); @@ -1298,7 +1359,7 @@ public void setupCompareFs() { when(InetAddress.getByName(uris3)).thenReturn(inet3); when(InetAddress.getByName(uris4)).thenReturn(inet4); when(InetAddress.getByName(uris5)).thenReturn(inet5); - } catch (UnknownHostException ue) { + } catch (UnknownHostException ignored) { } fs1 = mock(FileSystem.class); @@ -1318,62 +1379,87 @@ public void setupCompareFs() { @Test public void testCompareFsNull() throws Exception { setupCompareFs(); - assertEquals(FileUtil.compareFs(null,fs1),false); - assertEquals(FileUtil.compareFs(fs1,null),false); + assertFalse(FileUtil.compareFs(null, fs1)); + assertFalse(FileUtil.compareFs(fs1, null)); } @Test public void testCompareFsDirectories() throws Exception { setupCompareFs(); - assertEquals(FileUtil.compareFs(fs1,fs1),true); - assertEquals(FileUtil.compareFs(fs1,fs2),false); - assertEquals(FileUtil.compareFs(fs1,fs5),false); - assertEquals(FileUtil.compareFs(fs3,fs4),true); - assertEquals(FileUtil.compareFs(fs1,fs6),false); + assertTrue(FileUtil.compareFs(fs1, fs1)); + assertFalse(FileUtil.compareFs(fs1, fs2)); + assertFalse(FileUtil.compareFs(fs1, fs5)); + assertTrue(FileUtil.compareFs(fs3, fs4)); + assertFalse(FileUtil.compareFs(fs1, fs6)); } @Test(timeout = 8000) public void testCreateSymbolicLinkUsingJava() throws IOException { final File simpleTar = new File(del, FILE); OutputStream os = new FileOutputStream(simpleTar); - TarArchiveOutputStream tos = new TarArchiveOutputStream(os); - File untarFile = null; - try { + try (TarArchiveOutputStream tos = new TarArchiveOutputStream(os)) { // Files to tar final String tmpDir = "tmp/test"; File tmpDir1 = new File(tmpDir, "dir1/"); File tmpDir2 = new File(tmpDir, "dir2/"); - // Delete the directories if they already exist - tmpDir1.mkdirs(); - tmpDir2.mkdirs(); + Verify.mkdirs(tmpDir1); + Verify.mkdirs(tmpDir2); - java.nio.file.Path symLink = FileSystems - .getDefault().getPath(tmpDir1.getPath() + "/sl"); + java.nio.file.Path symLink = Paths.get(tmpDir1.getPath(), "sl"); // Create Symbolic Link - Files.createSymbolicLink(symLink, - FileSystems.getDefault().getPath(tmpDir2.getPath())).toString(); + Files.createSymbolicLink(symLink, Paths.get(tmpDir2.getPath())); assertTrue(Files.isSymbolicLink(symLink.toAbsolutePath())); - // put entries in tar file + // Put entries in tar file putEntriesInTar(tos, tmpDir1.getParentFile()); tos.close(); - untarFile = new File(tmpDir, "2"); - // Untar using java + File untarFile = new File(tmpDir, "2"); + // Untar using Java FileUtil.unTarUsingJava(simpleTar, untarFile, false); // Check symbolic link and other directories are there in untar file assertTrue(Files.exists(untarFile.toPath())); - assertTrue(Files.exists(FileSystems.getDefault().getPath(untarFile - .getPath(), tmpDir))); - assertTrue(Files.isSymbolicLink(FileSystems.getDefault().getPath(untarFile - .getPath().toString(), symLink.toString()))); - + assertTrue(Files.exists(Paths.get(untarFile.getPath(), tmpDir))); + assertTrue(Files.isSymbolicLink(Paths.get(untarFile.getPath(), symLink.toString()))); } finally { FileUtils.deleteDirectory(new File("tmp")); - tos.close(); } + } + + @Test(expected = IOException.class) + public void testCreateArbitrarySymlinkUsingJava() throws IOException { + final File simpleTar = new File(del, FILE); + OutputStream os = new FileOutputStream(simpleTar); + File rootDir = new File("tmp"); + try (TarArchiveOutputStream tos = new TarArchiveOutputStream(os)) { + tos.setLongFileMode(TarArchiveOutputStream.LONGFILE_GNU); + + // Create arbitrary dir + File arbitraryDir = new File(rootDir, "arbitrary-dir/"); + Verify.mkdirs(arbitraryDir); + + // We will tar from the tar-root lineage + File tarRoot = new File(rootDir, "tar-root/"); + File symlinkRoot = new File(tarRoot, "dir1/"); + Verify.mkdirs(symlinkRoot); + + // Create Symbolic Link to an arbitrary dir + java.nio.file.Path symLink = Paths.get(symlinkRoot.getPath(), "sl"); + Files.createSymbolicLink(symLink, arbitraryDir.toPath().toAbsolutePath()); + + // Put entries in tar file + putEntriesInTar(tos, tarRoot); + putEntriesInTar(tos, new File(symLink.toFile(), "dir-outside-tar-root/")); + tos.close(); + + // Untar using Java + File untarFile = new File(rootDir, "extracted"); + FileUtil.unTarUsingJava(simpleTar, untarFile, false); + } finally { + FileUtils.deleteDirectory(rootDir); + } } private void putEntriesInTar(TarArchiveOutputStream tos, File f) @@ -1450,7 +1536,7 @@ public void testReadSymlinkWithAFileAsInput() throws IOException { String result = FileUtil.readLink(file); Assert.assertEquals("", result); - file.delete(); + Verify.delete(file); } /** diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestClassUtil.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestClassUtil.java index 98e182236c94c..04337929abd9f 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestClassUtil.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestClassUtil.java @@ -35,6 +35,6 @@ public void testFindContainingJar() { Assert.assertTrue("Containing jar does not exist on file system ", jarFile.exists()); Assert.assertTrue("Incorrect jar file " + containingJar, - jarFile.getName().matches("log4j.*[.]jar")); + jarFile.getName().matches("reload4j.*[.]jar")); } } diff --git a/hadoop-common-project/hadoop-kms/pom.xml b/hadoop-common-project/hadoop-kms/pom.xml index b73b8b688c06f..e62de2a10be75 100644 --- a/hadoop-common-project/hadoop-kms/pom.xml +++ b/hadoop-common-project/hadoop-kms/pom.xml @@ -134,8 +134,8 @@ test-jar - log4j - log4j + ch.qos.reload4j + reload4j compile @@ -145,7 +145,7 @@ org.slf4j - slf4j-log4j12 + slf4j-reload4j runtime diff --git a/hadoop-common-project/hadoop-minikdc/pom.xml b/hadoop-common-project/hadoop-minikdc/pom.xml index 98296a6bbbe1d..5e6b1502e39ce 100644 --- a/hadoop-common-project/hadoop-minikdc/pom.xml +++ b/hadoop-common-project/hadoop-minikdc/pom.xml @@ -40,7 +40,7 @@ org.slf4j - slf4j-log4j12 + slf4j-reload4j compile diff --git a/hadoop-common-project/hadoop-nfs/pom.xml b/hadoop-common-project/hadoop-nfs/pom.xml index c74e7cd638afa..36bc6cb6cf526 100644 --- a/hadoop-common-project/hadoop-nfs/pom.xml +++ b/hadoop-common-project/hadoop-nfs/pom.xml @@ -79,13 +79,13 @@ compile - log4j - log4j + ch.qos.reload4j + reload4j runtime org.slf4j - slf4j-log4j12 + slf4j-reload4j runtime diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml index 7172f276ce184..ff4b83b03387f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml @@ -48,8 +48,8 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> commons-logging - log4j - log4j + ch.qos.reload4j + reload4j diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml index 192674e384313..ecff47a5a0d24 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml @@ -179,8 +179,8 @@ test-jar - log4j - log4j + ch.qos.reload4j + reload4j compile @@ -190,7 +190,7 @@ org.slf4j - slf4j-log4j12 + slf4j-reload4j runtime diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml index 77e8cab19cdca..6232adf16f875 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml @@ -134,8 +134,8 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> compile - log4j - log4j + ch.qos.reload4j + reload4j compile @@ -160,7 +160,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.slf4j - slf4j-log4j12 + slf4j-reload4j provided diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml index 7d0a77d670a7e..b9cf724851f62 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml @@ -54,8 +54,8 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> commons-logging - log4j - log4j + ch.qos.reload4j + reload4j @@ -71,7 +71,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.slf4j - slf4j-log4j12 + slf4j-reload4j provided diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml index e38875c69f9c6..d61b520475670 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml @@ -118,8 +118,8 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> compile - log4j - log4j + ch.qos.reload4j + reload4j compile @@ -162,7 +162,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.slf4j - slf4j-log4j12 + slf4j-reload4j provided diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java index 0367b4a7aa3c8..2c666a3831747 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java @@ -293,7 +293,7 @@ public void handle(ExtendedBlock block, IOException e) { volume, block); return; } - LOG.warn("Reporting bad {} on {}", block, volume); + LOG.warn("Reporting bad {} on {}", block, volume, e); scanner.datanode.handleBadBlock(block, e, true); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java index 2ab4b83a3d270..d263d7dfd35db 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java @@ -2353,6 +2353,7 @@ public void shutdown() { if (mbeanName != null) { MBeans.unregister(mbeanName); + mbeanName = null; } if (asyncDiskService != null) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java index c60acaa003150..ee0bf8a5fb165 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java @@ -80,8 +80,12 @@ private static void verifyQuotaForRename(FSDirectory fsd, INodesInPath src, // Assume dstParent existence check done by callers. INode dstParent = dst.getINode(-2); // Use the destination parent's storage policy for quota delta verify. + final boolean isSrcSetSp = src.getLastINode().isSetStoragePolicy(); + final byte storagePolicyID = isSrcSetSp ? + src.getLastINode().getLocalStoragePolicyID() : + dstParent.getStoragePolicyID(); final QuotaCounts delta = src.getLastINode() - .computeQuotaUsage(bsps, dstParent.getStoragePolicyID(), false, + .computeQuotaUsage(bsps, storagePolicyID, false, Snapshot.CURRENT_STATE_ID); // Reduce the required quota by dst that is being removed diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index 7b902d5ff1b3d..fc17eaebf7f19 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -1363,9 +1363,13 @@ public INodesInPath addLastINode(INodesInPath existing, INode inode, // always verify inode name verifyINodeName(inode.getLocalNameBytes()); + final boolean isSrcSetSp = inode.isSetStoragePolicy(); + final byte storagePolicyID = isSrcSetSp ? + inode.getLocalStoragePolicyID() : + parent.getStoragePolicyID(); final QuotaCounts counts = inode .computeQuotaUsage(getBlockStoragePolicySuite(), - parent.getStoragePolicyID(), false, Snapshot.CURRENT_STATE_ID); + storagePolicyID, false, Snapshot.CURRENT_STATE_ID); updateCount(existing, pos, counts, checkQuota); boolean isRename = (inode.getParent() != null); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java index 8b34dfea95462..c3e31bcba692c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java @@ -1512,11 +1512,12 @@ public synchronized void purgeLogsOlderThan(final long minTxIdToKeep) { if (!isOpenForWrite()) { return; } - - assert curSegmentTxId == HdfsServerConstants.INVALID_TXID || // on format this is no-op - minTxIdToKeep <= curSegmentTxId : - "cannot purge logs older than txid " + minTxIdToKeep + - " when current segment starts at " + curSegmentTxId; + + Preconditions.checkArgument( + curSegmentTxId == HdfsServerConstants.INVALID_TXID || // on format this is no-op + minTxIdToKeep <= curSegmentTxId, + "cannot purge logs older than txid " + minTxIdToKeep + + " when current segment starts at " + curSegmentTxId); if (minTxIdToKeep == 0) { return; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java index 03f01eb32eef1..8e417fe43aa5c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java @@ -340,6 +340,16 @@ public boolean isFile() { return false; } + /** + * Check if this inode itself has a storage policy set. + */ + public boolean isSetStoragePolicy() { + if (isSymlink()) { + return false; + } + return getLocalStoragePolicyID() != HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED; + } + /** Cast this inode to an {@link INodeFile}. */ public INodeFile asFile() { throw new IllegalStateException("Current inode is not a file: " diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageReconstructor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageReconstructor.java index 9ad4b090649b2..203bcc132844f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageReconstructor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageReconstructor.java @@ -1761,6 +1761,10 @@ private void processXml() throws Exception { XMLEvent ev = expectTag("[section header]", true); if (ev.getEventType() == XMLStreamConstants.END_ELEMENT) { if (ev.asEndElement().getName().getLocalPart().equals("fsimage")) { + if(unprocessedSections.size() == 1 && unprocessedSections.contains + (SnapshotDiffSectionProcessor.NAME)){ + break; + } throw new IOException("FSImage XML ended prematurely, without " + "including section(s) " + StringUtils.join(", ", unprocessedSections)); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml index 80c481886d712..78af86b0a3c8d 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml @@ -1591,7 +1591,7 @@ dfs.block.scanner.volume.bytes.per.second 1048576 - If this is 0, the DataNode's block scanner will be disabled. If this + If this is configured less than or equal to zero, the DataNode's block scanner will be disabled. If this is positive, this is the number of bytes per second that the DataNode's block scanner will try to scan from each volume. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java index fcb52577d9991..fdc746464f4e5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java @@ -479,4 +479,23 @@ public Object run() throws IOException { assertEquals("The owner did not match ", owner, userUgi.getShortUserName()); otherfs.delete(user1Path, false); } + + @Test + public void testInternalDirectoryPermissions() throws IOException { + LOG.info("Starting testInternalDirectoryPermissions!"); + Configuration localConf = new Configuration(conf); + ConfigUtil.addLinkFallback( + localConf, new Path(targetTestRoot, "fallbackDir").toUri()); + FileSystem fs = FileSystem.get(FsConstants.VIEWFS_URI, localConf); + // check that the default permissions on a sub-folder of an internal + // directory are the same as those created on non-internal directories. + Path subDirOfInternalDir = new Path("/internalDir/dir1"); + fs.mkdirs(subDirOfInternalDir); + + Path subDirOfRealDir = new Path("/internalDir/linkToDir2/dir1"); + fs.mkdirs(subDirOfRealDir); + + assertEquals(fs.getFileStatus(subDirOfInternalDir).getPermission(), + fs.getFileStatus(subDirOfRealDir).getPermission()); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java index 79088d3be8555..e14ea4dc26579 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java @@ -44,6 +44,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.QuotaUsage; import org.apache.hadoop.fs.StorageType; +import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.client.impl.LeaseRenewer; import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException; import org.apache.hadoop.hdfs.protocol.HdfsConstants; @@ -958,6 +959,44 @@ public void testQuotaByStorageType() throws Exception { 6 * fileSpace); } + @Test + public void testRenameInodeWithStorageType() throws IOException { + final int size = 64; + final short repl = 1; + final Path foo = new Path("/foo"); + final Path bs1 = new Path(foo, "bs1"); + final Path wow = new Path(bs1, "wow"); + final Path bs2 = new Path(foo, "bs2"); + final Path wow2 = new Path(bs2, "wow2"); + final Path wow3 = new Path(bs2, "wow3"); + + dfs.mkdirs(bs1, FsPermission.getDirDefault()); + dfs.mkdirs(bs2, FsPermission.getDirDefault()); + dfs.setQuota(bs1, 1000, 434217728); + dfs.setQuota(bs2, 1000, 434217728); + // file wow3 without storage policy + DFSTestUtil.createFile(dfs, wow3, size, repl, 0); + + dfs.setStoragePolicy(bs2, HdfsConstants.ONESSD_STORAGE_POLICY_NAME); + + DFSTestUtil.createFile(dfs, wow, size, repl, 0); + DFSTestUtil.createFile(dfs, wow2, size, repl, 0); + assertTrue("Without storage policy, typeConsumed should be 0.", + dfs.getQuotaUsage(bs1).getTypeConsumed(StorageType.SSD) == 0); + assertTrue("With storage policy, typeConsumed should not be 0.", + dfs.getQuotaUsage(bs2).getTypeConsumed(StorageType.SSD) != 0); + // wow3 without storage policy , rename will not change typeConsumed + dfs.rename(wow3, bs1); + assertTrue("Rename src without storagePolicy, dst typeConsumed should not be changed.", + dfs.getQuotaUsage(bs2).getTypeConsumed(StorageType.SSD) == 0); + + long srcTypeQuota = dfs.getQuotaUsage(bs2).getTypeQuota(StorageType.SSD); + dfs.rename(bs2, bs1); + long dstTypeQuota = dfs.getQuotaUsage(bs1).getTypeConsumed(StorageType.SSD); + assertTrue("Rename with storage policy, typeConsumed should not be 0.", + dstTypeQuota != srcTypeQuota); + } + private static void checkContentSummary(final ContentSummary expected, final ContentSummary computed) { assertEquals(expected.toString(), computed.toString()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java index 113da585c9e9b..417ad3ce74c62 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java @@ -1367,7 +1367,10 @@ void registerMBean(final String storageId) { @Override public void shutdown() { - if (mbeanName != null) MBeans.unregister(mbeanName); + if (mbeanName != null) { + MBeans.unregister(mbeanName); + mbeanName = null; + } } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockScanner.java index c74785923a7c1..2086e15348e0c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockScanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockScanner.java @@ -282,11 +282,17 @@ public void testVolumeIteratorWithCaching() throws Exception { public void testDisableVolumeScanner() throws Exception { Configuration conf = new Configuration(); disableBlockScanner(conf); - TestContext ctx = new TestContext(conf, 1); - try { - Assert.assertFalse(ctx.datanode.getBlockScanner().isEnabled()); - } finally { - ctx.close(); + try(TestContext ctx = new TestContext(conf, 1)) { + assertFalse(ctx.datanode.getBlockScanner().isEnabled()); + } + } + + @Test(timeout=60000) + public void testDisableVolumeScanner2() throws Exception { + Configuration conf = new Configuration(); + conf.setLong(DFS_BLOCK_SCANNER_VOLUME_BYTES_PER_SECOND, -1L); + try(TestContext ctx = new TestContext(conf, 1)) { + assertFalse(ctx.datanode.getBlockScanner().isEnabled()); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java index 7bf3bfc1f8e84..8980e18b68e86 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java @@ -1122,17 +1122,17 @@ public void testReverseXmlRoundTrip() throws Throwable { LOG.info("Creating reverseImage.xml=" + reverseImageXml.getAbsolutePath() + ", reverseImage=" + reverseImage.getAbsolutePath() + ", reverseImage2Xml=" + reverseImage2Xml.getAbsolutePath()); - if (OfflineImageViewerPB.run(new String[] { "-p", "XML", + if (OfflineImageViewerPB.run(new String[] {"-p", "XML", "-i", originalFsimage.getAbsolutePath(), "-o", reverseImageXml.getAbsolutePath() }) != 0) { throw new IOException("oiv returned failure creating first XML file."); } - if (OfflineImageViewerPB.run(new String[] { "-p", "ReverseXML", + if (OfflineImageViewerPB.run(new String[] {"-p", "ReverseXML", "-i", reverseImageXml.getAbsolutePath(), "-o", reverseImage.getAbsolutePath() }) != 0) { throw new IOException("oiv returned failure recreating fsimage file."); } - if (OfflineImageViewerPB.run(new String[] { "-p", "XML", + if (OfflineImageViewerPB.run(new String[] {"-p", "XML", "-i", reverseImage.getAbsolutePath(), "-o", reverseImage2Xml.getAbsolutePath() }) != 0) { throw new IOException("oiv returned failure creating second " + @@ -1141,7 +1141,7 @@ public void testReverseXmlRoundTrip() throws Throwable { // The XML file we wrote based on the re-created fsimage should be the // same as the one we dumped from the original fsimage. Assert.assertEquals("", - GenericTestUtils.getFilesDiff(reverseImageXml, reverseImage2Xml)); + GenericTestUtils.getFilesDiff(reverseImageXml, reverseImage2Xml)); } /** @@ -1176,6 +1176,40 @@ public void testReverseXmlWrongLayoutVersion() throws Throwable { } } + /** + * Tests that the ReverseXML processor doesn't accept XML files without the SnapshotDiffSection. + */ + @Test + public void testReverseXmlWithoutSnapshotDiffSection() throws Throwable { + File imageWSDS = new File(tempDir, "imageWithoutSnapshotDiffSection.xml"); + try(PrintWriter writer = new PrintWriter(imageWSDS, "UTF-8")) { + writer.println(""); + writer.println(""); + writer.println(""); + writer.println("-66"); + writer.println("1"); + writer.println("545bbef596c06af1c3c8dca1ce29096a64608478"); + writer.println(""); + writer.println(""); + writer.println(""); + writer.println("914880" + + ""); + writer.println("9035" + + "00" + + ""); + writer.println(""); + writer.println("00" + + ""); + writer.println("326384987"); + writer.println("10" + + "0"); + writer.println(""); + writer.println(""); + } + OfflineImageReconstructor.run(imageWSDS.getAbsolutePath(), + imageWSDS.getAbsolutePath() + ".out"); + } + @Test public void testFileDistributionCalculatorForException() throws Exception { File fsimageFile = null; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/CMakeLists.txt b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/CMakeLists.txt index ae3b9c6029e57..4c32838afb0b4 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/CMakeLists.txt +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/CMakeLists.txt @@ -27,6 +27,7 @@ set(GTEST_SRC_DIR ${CMAKE_SOURCE_DIR}/../../../../hadoop-common-project/hadoop-c # Add extra compiler and linker flags. # -Wno-sign-compare hadoop_add_compiler_flags("-DNDEBUG -DSIMPLE_MEMCPY -fno-strict-aliasing -fsigned-char") +set(CMAKE_CXX_STANDARD 11) # Source location. set(SRC main/native) diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml index 5d85ef701606b..faf2bd2084bff 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml @@ -86,7 +86,7 @@ org.slf4j - slf4j-log4j12 + slf4j-reload4j org.apache.hadoop diff --git a/hadoop-mapreduce-project/pom.xml b/hadoop-mapreduce-project/pom.xml index 1163ee307a05f..2d41ad562c35a 100644 --- a/hadoop-mapreduce-project/pom.xml +++ b/hadoop-mapreduce-project/pom.xml @@ -88,7 +88,7 @@ org.slf4j - slf4j-log4j12 + slf4j-reload4j org.apache.hadoop diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index aa54020df2f15..c3817e00d1274 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -81,8 +81,8 @@ 4.4.13 - 1.7.30 - 1.2.17 + 1.7.36 + 1.2.18.3 1.1 @@ -298,12 +298,28 @@ org.apache.hadoop hadoop-common ${hadoop.version} + + + org.slf4j + slf4j-reload4j + + org.apache.hadoop hadoop-common ${hadoop.version} test-jar + + + log4j + log4j + + + org.slf4j + slf4j-log4j12 + + org.apache.hadoop @@ -374,12 +390,24 @@ org.apache.hadoop hadoop-mapreduce-client-core ${hadoop.version} + + + org.slf4j + slf4j-reload4j + + org.apache.hadoop hadoop-mapreduce-client-jobclient ${hadoop.version} + + + org.slf4j + slf4j-reload4j + + @@ -953,9 +981,9 @@ ${commons-logging-api.version} - log4j - log4j - ${log4j.version} + ch.qos.reload4j + reload4j + ${reload4j.version} com.sun.jdmk @@ -1099,7 +1127,7 @@ org.slf4j - slf4j-log4j12 + slf4j-reload4j ${slf4j.version} @@ -1305,6 +1333,10 @@ org.apache.kerby kerby-config + + log4j + log4j + org.slf4j slf4j-api @@ -1313,6 +1345,10 @@ org.slf4j slf4j-log4j12 + + org.slf4j + slf4j-reload4j + @@ -1341,6 +1377,14 @@ io.netty netty-transport-native-epoll + + log4j + log4j + + + org.slf4j + slf4j-log4j12 + @@ -1480,6 +1524,10 @@ org.slf4j slf4j-api + + log4j + log4j + @@ -1594,6 +1642,10 @@ jdk.tools jdk.tools + + log4j + log4j + @@ -1602,6 +1654,16 @@ ${hbase.version} test tests + + + log4j + log4j + + + org.slf4j + slf4j-log4j12 + + org.apache.hbase @@ -1619,6 +1681,28 @@ org.apache.hbase hbase-server ${hbase.version} + + + log4j + log4j + + + + + org.apache.hbase + hbase-server + ${hbase.version} + test + + + log4j + log4j + + + org.slf4j + slf4j-log4j12 + + org.apache.hbase @@ -1626,6 +1710,16 @@ ${hbase.version} test tests + + + log4j + log4j + + + org.slf4j + slf4j-log4j12 + + org.apache.hbase @@ -1650,6 +1744,14 @@ jdk.tools jdk.tools + + log4j + log4j + + + org.slf4j + slf4j-log4j12 + @@ -2160,6 +2262,9 @@ com.sun.jersey.jersey-test-framework:* com.google.inject:guice org.ow2.asm:asm + + org.slf4j:slf4j-log4j12 + log4j:log4j diff --git a/hadoop-tools/hadoop-azure/pom.xml b/hadoop-tools/hadoop-azure/pom.xml index 546d28d5e8e62..9256dda2f4bff 100644 --- a/hadoop-tools/hadoop-azure/pom.xml +++ b/hadoop-tools/hadoop-azure/pom.xml @@ -245,8 +245,8 @@ - log4j - log4j + ch.qos.reload4j + reload4j test diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java index df482c1859872..c1bb6aa68d29b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java @@ -2672,6 +2672,20 @@ public static boolean isAclEnabled(Configuration conf) { public static final String DEFAULT_RM_APPLICATION_HTTPS_POLICY = "NONE"; + + // If the proxy connection time enabled. + public static final String RM_PROXY_TIMEOUT_ENABLED = + RM_PREFIX + "proxy.timeout.enabled"; + + public static final boolean DEFALUT_RM_PROXY_TIMEOUT_ENABLED = + true; + + public static final String RM_PROXY_CONNECTION_TIMEOUT = + RM_PREFIX + "proxy.connection.timeout"; + + public static final int DEFAULT_RM_PROXY_CONNECTION_TIMEOUT = + 60000; + /** * Interval of time the linux container executor should try cleaning up * cgroups entry when cleaning up a container. This is required due to what diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml index 10d466c27e91e..3befe5ddfb7e7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml @@ -46,8 +46,8 @@ - log4j - log4j + ch.qos.reload4j + reload4j org.apache.hadoop.thirdparty diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/pom.xml index 922fd295ee167..ac0681c0279fa 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/pom.xml @@ -118,8 +118,8 @@ - log4j - log4j + ch.qos.reload4j + reload4j runtime diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml index 64f252a1ebece..9670e2b1f0f0f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml @@ -47,8 +47,8 @@ commons-cli - log4j - log4j + ch.qos.reload4j + reload4j org.eclipse.jetty.websocket diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml index 419348e868a0e..033250385f2d6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml @@ -164,8 +164,8 @@ jersey-guice - log4j - log4j + ch.qos.reload4j + reload4j com.fasterxml.jackson.core diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml index ff3a817913293..4be357b78a658 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml @@ -2601,6 +2601,18 @@ + + Enable the web proxy connection timeout, default is enabled. + yarn.resourcemanager.proxy.timeout.enabled + true + + + + The web proxy connection timeout. + yarn.resourcemanager.proxy.connection.timeout + 60000 + + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml index 8ab1e71103884..f68936a06da2c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml @@ -160,8 +160,8 @@ hadoop-shaded-guava - log4j - log4j + ch.qos.reload4j + reload4j org.apache.hadoop diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java index 69e775f84e557..d0d95c388a6d0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java @@ -2056,6 +2056,22 @@ private void refreshLabelToNodeCache(Set updateLabels) { } } + /** + * Add node to nodeTracker. Used when validating CS configuration by instantiating a new + * CS instance. + * @param nodesToAdd node to be added + */ + public void addNodes(List nodesToAdd) { + writeLock.lock(); + try { + for (FiCaSchedulerNode node : nodesToAdd) { + nodeTracker.addNode(node); + } + } finally { + writeLock.unlock(); + } + } + private void addNode(RMNode nodeManager) { writeLock.lock(); try { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfigValidator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfigValidator.java index c3b4df4efdf46..d180ffb64ba23 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfigValidator.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfigValidator.java @@ -42,6 +42,7 @@ private CapacitySchedulerConfigValidator() { public static boolean validateCSConfiguration( final Configuration oldConf, final Configuration newConf, final RMContext rmContext) throws IOException { + CapacityScheduler liveScheduler = (CapacityScheduler) rmContext.getScheduler(); CapacityScheduler newCs = new CapacityScheduler(); try { //TODO: extract all the validation steps and replace reinitialize with @@ -49,6 +50,7 @@ public static boolean validateCSConfiguration( newCs.setConf(oldConf); newCs.setRMContext(rmContext); newCs.init(oldConf); + newCs.addNodes(liveScheduler.getAllNodes()); newCs.reinitialize(newConf, rmContext, true); return true; } finally { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/YarnConfigurationStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/YarnConfigurationStore.java index 4480bc34dcc48..425d63f6a66eb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/YarnConfigurationStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/YarnConfigurationStore.java @@ -53,6 +53,7 @@ public abstract class YarnConfigurationStore { * audit logging and recovery. */ public static class LogMutation implements Serializable { + private static final long serialVersionUID = 7754046036718906356L; private Map updates; private String user; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerConfigValidator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerConfigValidator.java index 04f4349db1dad..ad114d901cf9b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerConfigValidator.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerConfigValidator.java @@ -19,13 +19,23 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; import org.apache.hadoop.yarn.LocalConfigurationProvider; +import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes; +import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.api.records.ResourceInformation; import org.apache.hadoop.yarn.api.records.impl.LightWeightResource; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; +import org.apache.hadoop.yarn.server.resourcemanager.MockNM; +import org.apache.hadoop.yarn.server.resourcemanager.MockRM; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager; import org.apache.hadoop.yarn.server.resourcemanager.placement.PlacementManager; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; +import org.apache.hadoop.yarn.util.YarnVersionInfo; +import org.apache.hadoop.yarn.util.resource.DominantResourceCalculator; +import org.apache.hadoop.yarn.util.resource.ResourceUtils; import org.junit.Assert; import org.junit.Test; import org.mockito.Mockito; @@ -34,9 +44,71 @@ import java.util.HashMap; import java.util.Map; +import static org.apache.hadoop.yarn.api.records.ResourceInformation.GPU_URI; import static org.junit.Assert.fail; public class TestCapacitySchedulerConfigValidator { + public static final int NODE_MEMORY = 16; + public static final int NODE1_VCORES = 8; + public static final int NODE2_VCORES = 10; + public static final int NODE3_VCORES = 12; + public static final Map NODE_GPU = ImmutableMap.of(GPU_URI, 2L); + public static final int GB = 1024; + + private static final String PARENT_A = "parentA"; + private static final String PARENT_B = "parentB"; + private static final String LEAF_A = "leafA"; + private static final String LEAF_B = "leafB"; + + private static final String PARENT_A_FULL_PATH = CapacitySchedulerConfiguration.ROOT + + "." + PARENT_A; + private static final String LEAF_A_FULL_PATH = PARENT_A_FULL_PATH + + "." + LEAF_A; + private static final String PARENT_B_FULL_PATH = CapacitySchedulerConfiguration.ROOT + + "." + PARENT_B; + private static final String LEAF_B_FULL_PATH = PARENT_B_FULL_PATH + + "." + LEAF_B; + + private final Resource A_MINRES = Resource.newInstance(16 * GB, 10); + private final Resource B_MINRES = Resource.newInstance(32 * GB, 5); + private final Resource FULL_MAXRES = Resource.newInstance(48 * GB, 30); + private final Resource PARTIAL_MAXRES = Resource.newInstance(16 * GB, 10); + private final Resource VCORE_EXCEEDED_MAXRES = Resource.newInstance(16 * GB, 50); + private Resource A_MINRES_GPU; + private Resource B_MINRES_GPU; + private Resource FULL_MAXRES_GPU; + private Resource PARTIAL_MAXRES_GPU; + private Resource GPU_EXCEEDED_MAXRES_GPU; + + protected MockRM mockRM = null; + protected MockNM nm1 = null; + protected MockNM nm2 = null; + protected MockNM nm3 = null; + protected CapacityScheduler cs; + + public static void setupResources(boolean useGpu) { + Map riMap = new HashMap<>(); + + ResourceInformation memory = ResourceInformation.newInstance( + ResourceInformation.MEMORY_MB.getName(), + ResourceInformation.MEMORY_MB.getUnits(), + YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB, + YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB); + ResourceInformation vcores = ResourceInformation.newInstance( + ResourceInformation.VCORES.getName(), + ResourceInformation.VCORES.getUnits(), + YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES, + YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES); + riMap.put(ResourceInformation.MEMORY_URI, memory); + riMap.put(ResourceInformation.VCORES_URI, vcores); + if (useGpu) { + riMap.put(ResourceInformation.GPU_URI, + ResourceInformation.newInstance(ResourceInformation.GPU_URI, "", 0, + ResourceTypes.COUNTABLE, 0, 10L)); + } + + ResourceUtils.initializeResourcesFromResourceInformationMap(riMap); + } /** * Test for the case when the scheduler.minimum-allocation-mb == 0. @@ -69,7 +141,6 @@ public void testValidateMemoryAllocationHIgherMinThanMaxMem() { } - @Test public void testValidateMemoryAllocation() { Map configs = new HashMap(); @@ -115,7 +186,6 @@ public void testValidateVCoresHigherMinThanMaxVCore() { } - @Test public void testValidateVCores() { Map configs = new HashMap(); @@ -147,6 +217,106 @@ public void testValidateCSConfigInvalidCapacity() { } } + @Test + public void testValidateCSConfigDefaultRCAbsoluteModeParentMaxMemoryExceeded() + throws Exception { + setUpMockRM(false); + RMContext rmContext = mockRM.getRMContext(); + CapacitySchedulerConfiguration oldConfiguration = cs.getConfiguration(); + CapacitySchedulerConfiguration newConfiguration = + new CapacitySchedulerConfiguration(cs.getConfiguration()); + newConfiguration.setMaximumResourceRequirement("", LEAF_A_FULL_PATH, FULL_MAXRES); + try { + CapacitySchedulerConfigValidator + .validateCSConfiguration(oldConfiguration, newConfiguration, rmContext); + fail("Parent maximum capacity exceeded"); + } catch (IOException e) { + Assert.assertTrue(e.getCause().getMessage() + .startsWith("Max resource configuration")); + } finally { + mockRM.stop(); + } + } + + @Test + public void testValidateCSConfigDefaultRCAbsoluteModeParentMaxVcoreExceeded() throws Exception { + setUpMockRM(false); + RMContext rmContext = mockRM.getRMContext(); + CapacitySchedulerConfiguration oldConfiguration = cs.getConfiguration(); + CapacitySchedulerConfiguration newConfiguration = + new CapacitySchedulerConfiguration(cs.getConfiguration()); + newConfiguration.setMaximumResourceRequirement("", LEAF_A_FULL_PATH, VCORE_EXCEEDED_MAXRES); + try { + CapacitySchedulerConfigValidator + .validateCSConfiguration(oldConfiguration, newConfiguration, rmContext); + } catch (IOException e) { + fail("In DefaultResourceCalculator vcore limits are not enforced"); + } finally { + mockRM.stop(); + } + } + + @Test + public void testValidateCSConfigDominantRCAbsoluteModeParentMaxMemoryExceeded() + throws Exception { + setUpMockRM(true); + RMContext rmContext = mockRM.getRMContext(); + CapacitySchedulerConfiguration oldConfiguration = cs.getConfiguration(); + CapacitySchedulerConfiguration newConfiguration = + new CapacitySchedulerConfiguration(cs.getConfiguration()); + newConfiguration.setMaximumResourceRequirement("", LEAF_A_FULL_PATH, FULL_MAXRES); + try { + CapacitySchedulerConfigValidator + .validateCSConfiguration(oldConfiguration, newConfiguration, rmContext); + fail("Parent maximum capacity exceeded"); + } catch (IOException e) { + Assert.assertTrue(e.getCause().getMessage() + .startsWith("Max resource configuration")); + } finally { + mockRM.stop(); + } + } + + @Test + public void testValidateCSConfigDominantRCAbsoluteModeParentMaxVcoreExceeded() throws Exception { + setUpMockRM(true); + RMContext rmContext = mockRM.getRMContext(); + CapacitySchedulerConfiguration oldConfiguration = cs.getConfiguration(); + CapacitySchedulerConfiguration newConfiguration = + new CapacitySchedulerConfiguration(cs.getConfiguration()); + newConfiguration.setMaximumResourceRequirement("", LEAF_A_FULL_PATH, VCORE_EXCEEDED_MAXRES); + try { + CapacitySchedulerConfigValidator + .validateCSConfiguration(oldConfiguration, newConfiguration, rmContext); + fail("Parent maximum capacity exceeded"); + } catch (IOException e) { + Assert.assertTrue(e.getCause().getMessage() + .startsWith("Max resource configuration")); + } finally { + mockRM.stop(); + } + } + + @Test + public void testValidateCSConfigDominantRCAbsoluteModeParentMaxGPUExceeded() throws Exception { + setUpMockRM(true); + RMContext rmContext = mockRM.getRMContext(); + CapacitySchedulerConfiguration oldConfiguration = cs.getConfiguration(); + CapacitySchedulerConfiguration newConfiguration = + new CapacitySchedulerConfiguration(cs.getConfiguration()); + newConfiguration.setMaximumResourceRequirement("", LEAF_A_FULL_PATH, GPU_EXCEEDED_MAXRES_GPU); + try { + CapacitySchedulerConfigValidator + .validateCSConfiguration(oldConfiguration, newConfiguration, rmContext); + fail("Parent maximum capacity exceeded"); + } catch (IOException e) { + Assert.assertTrue(e.getCause().getMessage() + .startsWith("Max resource configuration")); + } finally { + mockRM.stop(); + } + } + @Test public void testValidateCSConfigStopALeafQueue() throws IOException { Configuration oldConfig = CapacitySchedulerConfigGeneratorForTest @@ -155,7 +325,7 @@ public void testValidateCSConfigStopALeafQueue() throws IOException { newConfig .set("yarn.scheduler.capacity.root.test1.state", "STOPPED"); RMContext rmContext = prepareRMContext(); - Boolean isValidConfig = CapacitySchedulerConfigValidator + boolean isValidConfig = CapacitySchedulerConfigValidator .validateCSConfiguration(oldConfig, newConfig, rmContext); Assert.assertTrue(isValidConfig); } @@ -340,9 +510,11 @@ public void testAddQueueToALeafQueue() throws IOException { Assert.assertTrue(isValidConfig); } - public static RMContext prepareRMContext() { + setupResources(false); RMContext rmContext = Mockito.mock(RMContext.class); + CapacityScheduler mockCs = Mockito.mock(CapacityScheduler.class); + Mockito.when(rmContext.getScheduler()).thenReturn(mockCs); LocalConfigurationProvider configProvider = Mockito .mock(LocalConfigurationProvider.class); Mockito.when(rmContext.getConfigurationProvider()) @@ -361,4 +533,94 @@ public static RMContext prepareRMContext() { .thenReturn(queuePlacementManager); return rmContext; } + + private void setUpMockRM(boolean useDominantRC) throws Exception { + YarnConfiguration conf = new YarnConfiguration(); + conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class, + ResourceScheduler.class); + setupResources(useDominantRC); + CapacitySchedulerConfiguration csConf = setupCSConfiguration(conf, useDominantRC); + + mockRM = new MockRM(csConf); + + cs = (CapacityScheduler) mockRM.getResourceScheduler(); + mockRM.start(); + cs.start(); + + setupNodes(mockRM); + } + + private void setupNodes(MockRM newMockRM) throws Exception { + nm1 = new MockNM("h1:1234", + Resource.newInstance(NODE_MEMORY * GB, NODE1_VCORES, NODE_GPU), + newMockRM.getResourceTrackerService(), + YarnVersionInfo.getVersion()); + + nm1.registerNode(); + + nm2 = new MockNM("h2:1234", + Resource.newInstance(NODE_MEMORY * GB, NODE2_VCORES, NODE_GPU), + newMockRM.getResourceTrackerService(), + YarnVersionInfo.getVersion()); + nm2.registerNode(); + + nm3 = new MockNM("h3:1234", + Resource.newInstance(NODE_MEMORY * GB, NODE3_VCORES, NODE_GPU), + newMockRM.getResourceTrackerService(), + YarnVersionInfo.getVersion()); + nm3.registerNode(); + } + + private void setupGpuResourceValues() { + A_MINRES_GPU = Resource.newInstance(A_MINRES.getMemorySize(), A_MINRES.getVirtualCores(), + ImmutableMap.of(GPU_URI, 2L)); + B_MINRES_GPU = Resource.newInstance(B_MINRES.getMemorySize(), B_MINRES.getVirtualCores(), + ImmutableMap.of(GPU_URI, 2L)); + FULL_MAXRES_GPU = Resource.newInstance(FULL_MAXRES.getMemorySize(), + FULL_MAXRES.getVirtualCores(), ImmutableMap.of(GPU_URI, 6L)); + PARTIAL_MAXRES_GPU = Resource.newInstance(PARTIAL_MAXRES.getMemorySize(), + PARTIAL_MAXRES.getVirtualCores(), ImmutableMap.of(GPU_URI, 4L)); + GPU_EXCEEDED_MAXRES_GPU = Resource.newInstance(PARTIAL_MAXRES.getMemorySize(), + PARTIAL_MAXRES.getVirtualCores(), ImmutableMap.of(GPU_URI, 50L)); + } + + private CapacitySchedulerConfiguration setupCSConfiguration(YarnConfiguration configuration, + boolean useDominantRC) { + CapacitySchedulerConfiguration csConf = new CapacitySchedulerConfiguration(configuration); + if (useDominantRC) { + csConf.set(CapacitySchedulerConfiguration.RESOURCE_CALCULATOR_CLASS, + DominantResourceCalculator.class.getName()); + csConf.set(YarnConfiguration.RESOURCE_TYPES, ResourceInformation.GPU_URI); + } + + csConf.setQueues(CapacitySchedulerConfiguration.ROOT, + new String[]{PARENT_A, PARENT_B}); + csConf.setQueues(PARENT_A_FULL_PATH, new String[]{LEAF_A}); + csConf.setQueues(PARENT_B_FULL_PATH, new String[]{LEAF_B}); + + if (useDominantRC) { + setupGpuResourceValues(); + csConf.setMinimumResourceRequirement("", PARENT_A_FULL_PATH, A_MINRES_GPU); + csConf.setMinimumResourceRequirement("", PARENT_B_FULL_PATH, B_MINRES_GPU); + csConf.setMinimumResourceRequirement("", LEAF_A_FULL_PATH, A_MINRES_GPU); + csConf.setMinimumResourceRequirement("", LEAF_B_FULL_PATH, B_MINRES_GPU); + + csConf.setMaximumResourceRequirement("", PARENT_A_FULL_PATH, PARTIAL_MAXRES_GPU); + csConf.setMaximumResourceRequirement("", PARENT_B_FULL_PATH, FULL_MAXRES_GPU); + csConf.setMaximumResourceRequirement("", LEAF_A_FULL_PATH, PARTIAL_MAXRES_GPU); + csConf.setMaximumResourceRequirement("", LEAF_B_FULL_PATH, FULL_MAXRES_GPU); + } else { + csConf.setMinimumResourceRequirement("", PARENT_A_FULL_PATH, A_MINRES); + csConf.setMinimumResourceRequirement("", PARENT_B_FULL_PATH, B_MINRES); + csConf.setMinimumResourceRequirement("", LEAF_A_FULL_PATH, A_MINRES); + csConf.setMinimumResourceRequirement("", LEAF_B_FULL_PATH, B_MINRES); + + csConf.setMaximumResourceRequirement("", PARENT_A_FULL_PATH, PARTIAL_MAXRES); + csConf.setMaximumResourceRequirement("", PARENT_B_FULL_PATH, FULL_MAXRES); + csConf.setMaximumResourceRequirement("", LEAF_A_FULL_PATH, PARTIAL_MAXRES); + csConf.setMaximumResourceRequirement("", LEAF_B_FULL_PATH, FULL_MAXRES); + } + + return csConf; + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServlet.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServlet.java index 0b6bb65d8db34..03b7077bc16d5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServlet.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServlet.java @@ -122,6 +122,9 @@ public HTML html() { } } + protected void setConf(YarnConfiguration conf){ + this.conf = conf; + } /** * Default constructor */ @@ -230,6 +233,14 @@ private void proxyLink(final HttpServletRequest req, String httpsPolicy = conf.get(YarnConfiguration.RM_APPLICATION_HTTPS_POLICY, YarnConfiguration.DEFAULT_RM_APPLICATION_HTTPS_POLICY); + + boolean connectionTimeoutEnabled = + conf.getBoolean(YarnConfiguration.RM_PROXY_TIMEOUT_ENABLED, + YarnConfiguration.DEFALUT_RM_PROXY_TIMEOUT_ENABLED); + int connectionTimeout = + conf.getInt(YarnConfiguration.RM_PROXY_CONNECTION_TIMEOUT, + YarnConfiguration.DEFAULT_RM_PROXY_CONNECTION_TIMEOUT); + if (httpsPolicy.equals("LENIENT") || httpsPolicy.equals("STRICT")) { ProxyCA proxyCA = getProxyCA(); // ProxyCA could be null when the Proxy is run outside the RM @@ -250,10 +261,18 @@ private void proxyLink(final HttpServletRequest req, InetAddress localAddress = InetAddress.getByName(proxyHost); LOG.debug("local InetAddress for proxy host: {}", localAddress); httpClientBuilder.setDefaultRequestConfig( - RequestConfig.custom() - .setCircularRedirectsAllowed(true) - .setLocalAddress(localAddress) - .build()); + connectionTimeoutEnabled ? + RequestConfig.custom() + .setCircularRedirectsAllowed(true) + .setLocalAddress(localAddress) + .setConnectionRequestTimeout(connectionTimeout) + .setSocketTimeout(connectionTimeout) + .setConnectTimeout(connectionTimeout) + .build() : + RequestConfig.custom() + .setCircularRedirectsAllowed(true) + .setLocalAddress(localAddress) + .build()); HttpRequestBase base = null; if (method.equals(HTTP.GET)) { @@ -621,7 +640,6 @@ private FetchedAppReport getFetchedAppReport(ApplicationId id) * again... If this method returns true, there was a redirect, and * it was handled by redirecting the current request to an error page. * - * @param path the part of the request path after the app id * @param id the app id * @param req the request object * @param resp the response object diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/TestWebAppProxyServlet.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/TestWebAppProxyServlet.java index f05e05a2d6374..6c8993f6e80b7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/TestWebAppProxyServlet.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/TestWebAppProxyServlet.java @@ -23,6 +23,8 @@ import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; import java.io.ByteArrayOutputStream; import java.io.IOException; @@ -35,10 +37,14 @@ import java.net.HttpURLConnection; import java.net.URI; import java.net.URL; +import java.net.SocketTimeoutException; +import java.util.Collections; import java.util.Enumeration; import java.util.List; import java.util.Map; +import javax.servlet.ServletConfig; +import javax.servlet.ServletContext; import javax.servlet.ServletException; import javax.servlet.http.HttpServlet; import javax.servlet.http.HttpServletRequest; @@ -98,6 +104,7 @@ public static void start() throws Exception { context.setContextPath("/foo"); server.setHandler(context); context.addServlet(new ServletHolder(TestServlet.class), "/bar"); + context.addServlet(new ServletHolder(TimeOutTestServlet.class), "/timeout"); ((ServerConnector)server.getConnectors()[0]).setHost("localhost"); server.start(); originalPort = ((ServerConnector)server.getConnectors()[0]).getLocalPort(); @@ -145,6 +152,29 @@ protected void doPost(HttpServletRequest req, HttpServletResponse resp) } } + @SuppressWarnings("serial") + public static class TimeOutTestServlet extends HttpServlet { + + @Override + protected void doGet(HttpServletRequest req, HttpServletResponse resp) + throws ServletException, IOException { + try { + Thread.sleep(10 * 1000); + } catch (InterruptedException e) { + LOG.warn("doGet() interrupted", e); + resp.setStatus(HttpServletResponse.SC_BAD_REQUEST); + return; + } + resp.setStatus(HttpServletResponse.SC_OK); + } + + @Override + protected void doPost(HttpServletRequest req, HttpServletResponse resp) + throws ServletException, IOException { + resp.setStatus(HttpServletResponse.SC_OK); + } + } + @Test(timeout=5000) public void testWebAppProxyServlet() throws Exception { configuration.set(YarnConfiguration.PROXY_ADDRESS, "localhost:9090"); @@ -256,6 +286,45 @@ public void testWebAppProxyServlet() throws Exception { } } + @Test(expected = SocketTimeoutException.class) + public void testWebAppProxyConnectionTimeout() + throws IOException, ServletException{ + HttpServletRequest request = mock(HttpServletRequest.class); + when(request.getMethod()).thenReturn("GET"); + when(request.getRemoteUser()).thenReturn("dr.who"); + when(request.getPathInfo()).thenReturn("/application_00_0"); + when(request.getHeaderNames()).thenReturn(Collections.emptyEnumeration()); + + HttpServletResponse response = mock(HttpServletResponse.class); + when(response.getOutputStream()).thenReturn(null); + + WebAppProxyServlet servlet = new WebAppProxyServlet(); + YarnConfiguration conf = new YarnConfiguration(); + conf.setBoolean(YarnConfiguration.RM_PROXY_TIMEOUT_ENABLED, + true); + conf.setInt(YarnConfiguration.RM_PROXY_CONNECTION_TIMEOUT, + 1000); + + servlet.setConf(conf); + + ServletConfig config = mock(ServletConfig.class); + ServletContext context = mock(ServletContext.class); + when(config.getServletContext()).thenReturn(context); + + AppReportFetcherForTest appReportFetcher = + new AppReportFetcherForTest(new YarnConfiguration()); + + when(config.getServletContext() + .getAttribute(WebAppProxy.FETCHER_ATTRIBUTE)) + .thenReturn(appReportFetcher); + + appReportFetcher.answer = 7; + + servlet.init(config); + servlet.doGet(request, response); + + } + @Test(timeout=5000) public void testAppReportForEmptyTrackingUrl() throws Exception { configuration.set(YarnConfiguration.PROXY_ADDRESS, "localhost:9090"); @@ -391,9 +460,9 @@ public void testWebAppProxyServerMainMethod() throws Exception { @Test(timeout=5000) public void testCheckHttpsStrictAndNotProvided() throws Exception { - HttpServletResponse resp = Mockito.mock(HttpServletResponse.class); + HttpServletResponse resp = mock(HttpServletResponse.class); StringWriter sw = new StringWriter(); - Mockito.when(resp.getWriter()).thenReturn(new PrintWriter(sw)); + when(resp.getWriter()).thenReturn(new PrintWriter(sw)); YarnConfiguration conf = new YarnConfiguration(); final URI httpLink = new URI("http://foo.com"); final URI httpsLink = new URI("https://foo.com"); @@ -566,6 +635,12 @@ public FetchedAppReport getApplicationReport(ApplicationId appId) return result; } else if (answer == 6) { return getDefaultApplicationReport(appId, false); + } else if (answer == 7) { + // test connection timeout + FetchedAppReport result = getDefaultApplicationReport(appId); + result.getApplicationReport().setOriginalTrackingUrl("localhost:" + + originalPort + "/foo/timeout?a=b#main"); + return result; } return null; }