From 214ac3e931023384ec9b6e981886357a85bbd364 Mon Sep 17 00:00:00 2001 From: cnauroth Date: Tue, 23 Jun 2015 07:09:37 -0700 Subject: [PATCH 001/130] HADOOP-12111. Add CHANGES-HADOOP-12111.txt. --- hadoop-common-project/hadoop-common/CHANGES-HADOOP-12111.txt | 1 + 1 file changed, 1 insertion(+) create mode 100644 hadoop-common-project/hadoop-common/CHANGES-HADOOP-12111.txt diff --git a/hadoop-common-project/hadoop-common/CHANGES-HADOOP-12111.txt b/hadoop-common-project/hadoop-common/CHANGES-HADOOP-12111.txt new file mode 100644 index 0000000000000..0ec7a0f75d340 --- /dev/null +++ b/hadoop-common-project/hadoop-common/CHANGES-HADOOP-12111.txt @@ -0,0 +1 @@ + Breakdown of HADOOP-12111 sub-tasks: From 09a2e360f2603f54cddf22e2bec6a0f1d77d596b Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Fri, 26 Jun 2015 23:30:37 -0700 Subject: [PATCH 002/130] HADOOP-12113. update test-patch branch to latest code (aw) --- dev-support/smart-apply-patch.sh | 4 +- dev-support/test-patch.d/checkstyle.sh | 211 +- dev-support/test-patch.d/shellcheck.sh | 25 +- dev-support/test-patch.d/whitespace.sh | 35 +- dev-support/test-patch.sh | 2722 ++++++++++------- .../hadoop-common/CHANGES-HADOOP-12111.txt | 2 + 6 files changed, 1837 insertions(+), 1162 deletions(-) diff --git a/dev-support/smart-apply-patch.sh b/dev-support/smart-apply-patch.sh index ebcb660c511b8..ddfd940064fe0 100755 --- a/dev-support/smart-apply-patch.sh +++ b/dev-support/smart-apply-patch.sh @@ -43,7 +43,9 @@ PATCH=${PATCH:-patch} # allow overriding patch binary # Cleanup handler for temporary files TOCLEAN="" cleanup() { - rm $TOCLEAN + if [[ -n ${TOCLEAN} ]]; then + rm $TOCLEAN + fi exit $1 } trap "cleanup 1" HUP INT QUIT TERM diff --git a/dev-support/test-patch.d/checkstyle.sh b/dev-support/test-patch.d/checkstyle.sh index 63115842b39d1..1fbe88e920ee7 100755 --- a/dev-support/test-patch.d/checkstyle.sh +++ b/dev-support/test-patch.d/checkstyle.sh @@ -18,79 +18,126 @@ add_plugin checkstyle CHECKSTYLE_TIMER=0 -# if it ends in an explicit .sh, then this is shell code. -# if it doesn't have an extension, we assume it is shell code too function checkstyle_filefilter { local filename=$1 - if [[ ${filename} =~ \.java$ ]]; then - add_test checkstyle + if [[ ${BUILDTOOL} == maven + || ${BUILDTOOL} == ant ]]; then + if [[ ${filename} =~ \.java$ ]]; then + add_test checkstyle + fi fi } -function checkstyle_mvnrunner +function checkstyle_runner { - local logfile=$1 - local output=$2 + local repostatus=$1 local tmp=${PATCH_DIR}/$$.${RANDOM} local j + local i=0 + local fn + local savestart=${TIMER} + local savestop + local output + local logfile + local repo + local modulesuffix + local cmd + + modules_reset + + if [[ ${repostatus} == branch ]]; then + repo=${PATCH_BRANCH} + else + repo="the patch" + fi + + #shellcheck disable=SC2153 + until [[ $i -eq ${#MODULE[@]} ]]; do + start_clock + fn=$(module_file_fragment "${MODULE[${i}]}") + modulesuffix=$(basename "${MODULE[${i}]}") + output="${PATCH_DIR}/${repostatus}-checkstyle-${fn}.txt" + logfile="${PATCH_DIR}/maven-${repostatus}-checkstyle-${fn}.txt" + pushd "${BASEDIR}/${MODULE[${i}]}" >/dev/null + + case ${BUILDTOOL} in + maven) + cmd="${MVN} ${MAVEN_ARGS[*]} clean test \ + checkstyle:checkstyle \ + -Dcheckstyle.consoleOutput=true \ + ${MODULEEXTRAPARAM[${i}]//@@@MODULEFN@@@/${fn}} -Ptest-patch" + ;; + ant) + cmd="${ANT} \ + -Dcheckstyle.consoleOutput=true \ + ${MODULEEXTRAPARAM[${i}]//@@@MODULEFN@@@/${fn}} \ + ${ANT_ARGS[*]} checkstyle" + ;; + esac + + #shellcheck disable=SC2086 + echo ${cmd} "> ${logfile}" + #shellcheck disable=SC2086 + ${cmd} 2>&1 \ + | tee "${logfile}" \ + | ${GREP} ^/ \ + | ${SED} -e "s,${BASEDIR},.,g" \ + > "${tmp}" + + if [[ $? == 0 ]] ; then + module_status ${i} +1 "${logfile}" "${modulesuffix} in ${repo} passed checkstyle" + else + module_status ${i} -1 "${logfile}" "${modulesuffix} in ${repo} failed checkstyle" + ((result = result + 1)) + fi + savestop=$(stop_clock) + #shellcheck disable=SC2034 + MODULE_STATUS_TIMER[${i}]=${savestop} + + for j in ${CHANGED_FILES}; do + ${GREP} "${j}" "${tmp}" >> "${output}" + done - "${MVN}" clean test checkstyle:checkstyle -DskipTests \ - -Dcheckstyle.consoleOutput=true \ - "-D${PROJECT_NAME}PatchProcess" 2>&1 \ - | tee "${logfile}" \ - | ${GREP} ^/ \ - | ${SED} -e "s,${BASEDIR},.,g" \ - > "${tmp}" - - # the checkstyle output files are massive, so - # let's reduce the work by filtering out files - # that weren't changed. Some modules are - # MASSIVE and this can cut the output down to - # by orders of magnitude!! - for j in ${CHANGED_FILES}; do - ${GREP} "${j}" "${tmp}" >> "${output}" + rm "${tmp}" 2>/dev/null + # shellcheck disable=SC2086 + popd >/dev/null + ((i=i+1)) done - rm "${tmp}" 2>/dev/null + TIMER=${savestart} + + if [[ ${result} -gt 0 ]]; then + return 1 + fi + return 0 } function checkstyle_preapply { - local module_suffix - local modules=${CHANGED_MODULES} - local module + local result - verify_needed_test checkstyle + big_console_header "${PATCH_BRANCH} checkstyle" + + start_clock + verify_needed_test checkstyle if [[ $? == 0 ]]; then + echo "Patch does not need checkstyle testing." return 0 fi - big_console_header "checkstyle plugin: prepatch" - - start_clock - - for module in ${modules} - do - pushd "${module}" >/dev/null - echo " Running checkstyle in ${module}" - module_suffix=$(basename "${module}") - - checkstyle_mvnrunner \ - "${PATCH_DIR}/maven-${PATCH_BRANCH}checkstyle-${module_suffix}.txt" \ - "${PATCH_DIR}/${PATCH_BRANCH}checkstyle${module_suffix}.txt" - - if [[ $? != 0 ]] ; then - echo "Pre-patch ${PATCH_BRANCH} checkstyle compilation is broken?" - add_jira_table -1 checkstyle "Pre-patch ${PATCH_BRANCH} ${module} checkstyle compilation may be broken." - fi - popd >/dev/null - done + personality_modules branch checkstyle + checkstyle_runner branch + result=$? + modules_messages branch checkstyle true # keep track of how much as elapsed for us already CHECKSTYLE_TIMER=$(stop_clock) + if [[ ${result} != 0 ]]; then + return 1 + fi return 0 } @@ -135,71 +182,71 @@ function checkstyle_calcdiffs function checkstyle_postapply { - local rc=0 + local result local module - local modules=${CHANGED_MODULES} - local module_suffix + local fn + local i=0 local numprepatch=0 local numpostpatch=0 local diffpostpatch=0 - verify_needed_test checkstyle + big_console_header "Patch checkstyle plugin" + start_clock + + verify_needed_test checkstyle if [[ $? == 0 ]]; then + echo "Patch does not need checkstyle testing." return 0 fi - big_console_header "checkstyle plugin: postpatch" + personality_modules patch checkstyle + checkstyle_runner patch + result=$? - start_clock # add our previous elapsed to our new timer # by setting the clock back offset_clock "${CHECKSTYLE_TIMER}" - for module in ${modules} - do - pushd "${module}" >/dev/null - echo " Running checkstyle in ${module}" - module_suffix=$(basename "${module}") - - checkstyle_mvnrunner \ - "${PATCH_DIR}/maven-patchcheckstyle-${module_suffix}.txt" \ - "${PATCH_DIR}/patchcheckstyle${module_suffix}.txt" - - if [[ $? != 0 ]] ; then - ((rc = rc +1)) - echo "Post-patch checkstyle compilation is broken." - add_jira_table -1 checkstyle "Post-patch checkstyle ${module} compilation is broken." + until [[ $i -eq ${#MODULE[@]} ]]; do + if [[ ${MODULE_STATUS[${i}]} == -1 ]]; then + ((result=result+1)) + ((i=i+1)) continue fi + module=${MODULE[$i]} + fn=$(module_file_fragment "${module}") + + if [[ ! -f "${PATCH_DIR}/branch-checkstyle-${fn}.txt" ]]; then + touch "${PATCH_DIR}/branch-checkstyle-${fn}.txt" + fi #shellcheck disable=SC2016 diffpostpatch=$(checkstyle_calcdiffs \ - "${PATCH_DIR}/${PATCH_BRANCH}checkstyle${module_suffix}.txt" \ - "${PATCH_DIR}/patchcheckstyle${module_suffix}.txt" \ - "${PATCH_DIR}/diffcheckstyle${module_suffix}.txt" ) + "${PATCH_DIR}/branch-checkstyle-${fn}.txt" \ + "${PATCH_DIR}/patch-checkstyle-${fn}.txt" \ + "${PATCH_DIR}/diff-checkstyle-${fn}.txt" ) if [[ ${diffpostpatch} -gt 0 ]] ; then - ((rc = rc + 1)) + ((result = result + 1)) # shellcheck disable=SC2016 - numprepatch=$(wc -l "${PATCH_DIR}/${PATCH_BRANCH}checkstyle${module_suffix}.txt" | ${AWK} '{print $1}') + numprepatch=$(wc -l "${PATCH_DIR}/branch-checkstyle-${fn}.txt" | ${AWK} '{print $1}') # shellcheck disable=SC2016 - numpostpatch=$(wc -l "${PATCH_DIR}/patchcheckstyle${module_suffix}.txt" | ${AWK} '{print $1}') + numpostpatch=$(wc -l "${PATCH_DIR}/patch-checkstyle-${fn}.txt" | ${AWK} '{print $1}') - add_jira_table -1 checkstyle "The applied patch generated "\ - "${diffpostpatch} new checkstyle issues (total was ${numprepatch}, now ${numpostpatch})." - footer="${footer} @@BASE@@/diffcheckstyle${module_suffix}.txt" + module_status ${i} -1 "diff-checkstyle-${fn}.txt" "Patch generated "\ + "${diffpostpatch} new checkstyle issues in "\ + "${module} (total was ${numprepatch}, now ${numpostpatch})." fi - - popd >/dev/null + ((i=i+1)) done - if [[ ${rc} -gt 0 ]] ; then - add_jira_footer checkstyle "${footer}" + modules_messages patch checkstyle true + + if [[ ${result} != 0 ]]; then return 1 fi - add_jira_table +1 checkstyle "There were no new checkstyle issues." return 0 -} \ No newline at end of file +} diff --git a/dev-support/test-patch.d/shellcheck.sh b/dev-support/test-patch.d/shellcheck.sh index 9277ea50aa475..1c06a5d5e11a0 100755 --- a/dev-support/test-patch.d/shellcheck.sh +++ b/dev-support/test-patch.d/shellcheck.sh @@ -56,6 +56,7 @@ function shellcheck_private_findbash function shellcheck_preapply { local i + local msg verify_needed_test shellcheck if [[ $? == 0 ]]; then @@ -65,7 +66,7 @@ function shellcheck_preapply big_console_header "shellcheck plugin: prepatch" if [[ ! -x "${SHELLCHECK}" ]]; then - hadoop_error "shellcheck is not available." + yetus_error "shellcheck is not available." return 0 fi @@ -73,6 +74,13 @@ function shellcheck_preapply # shellcheck disable=SC2016 SHELLCHECK_VERSION=$(${SHELLCHECK} --version | ${GREP} version: | ${AWK} '{print $NF}') + msg="v${SHELLCHECK_VERSION}" + + if [[ ${SHELLCHECK_VERSION} =~ 0.[0-3].[0-5] ]]; then + msg="${msg} (This is an old version that has serious bugs. Consider upgrading.)" + fi + + add_footer_table shellcheck "${msg}" echo "Running shellcheck against all identifiable shell scripts" pushd "${BASEDIR}" >/dev/null @@ -138,8 +146,8 @@ function shellcheck_postapply big_console_header "shellcheck plugin: postpatch" if [[ ! -x "${SHELLCHECK}" ]]; then - hadoop_error "shellcheck is not available." - add_jira_table 0 shellcheck "Shellcheck was not available." + yetus_error "shellcheck is not available." + add_vote_table 0 shellcheck "Shellcheck was not available." return 0 fi @@ -155,8 +163,13 @@ function shellcheck_postapply ${SHELLCHECK} -f gcc "${i}" >> "${PATCH_DIR}/patchshellcheck-result.txt" done + if [[ ! -f "${PATCH_DIR}/${PATCH_BRANCH}shellcheck-result.txt" ]]; then + touch "${PATCH_DIR}/${PATCH_BRANCH}shellcheck-result.txt" + fi + # shellcheck disable=SC2016 numPrepatch=$(wc -l "${PATCH_DIR}/${PATCH_BRANCH}shellcheck-result.txt" | ${AWK} '{print $1}') + # shellcheck disable=SC2016 numPostpatch=$(wc -l "${PATCH_DIR}/patchshellcheck-result.txt" | ${AWK} '{print $1}') @@ -167,12 +180,12 @@ function shellcheck_postapply ) if [[ ${diffPostpatch} -gt 0 ]] ; then - add_jira_table -1 shellcheck "The applied patch generated "\ + add_vote_table -1 shellcheck "The applied patch generated "\ "${diffPostpatch} new shellcheck (v${SHELLCHECK_VERSION}) issues (total was ${numPrepatch}, now ${numPostpatch})." - add_jira_footer shellcheck "@@BASE@@/diffpatchshellcheck.txt" + add_footer_table shellcheck "@@BASE@@/diffpatchshellcheck.txt" return 1 fi - add_jira_table +1 shellcheck "There were no new shellcheck (v${SHELLCHECK_VERSION}) issues." + add_vote_table +1 shellcheck "There were no new shellcheck issues." return 0 } diff --git a/dev-support/test-patch.d/whitespace.sh b/dev-support/test-patch.d/whitespace.sh index 324481ca74d51..6fc033b3f31f9 100755 --- a/dev-support/test-patch.d/whitespace.sh +++ b/dev-support/test-patch.d/whitespace.sh @@ -19,28 +19,47 @@ add_plugin whitespace function whitespace_postapply { local count - local j + local result=0 big_console_header "Checking for whitespace at the end of lines" start_clock pushd "${BASEDIR}" >/dev/null - for j in ${CHANGED_FILES}; do - ${GREP} -nHE '[[:blank:]]$' "./${j}" | ${GREP} -f "${GITDIFFLINES}" >> "${PATCH_DIR}/whitespace.txt" - done + # shellcheck disable=SC2016 + ${AWK} '/\t/ {print $0}' \ + "${GITDIFFCONTENT}" \ + | ${GREP} -v Makefile: >> "${PATCH_DIR}/whitespace-tabs.txt" + + ${GREP} -E '[[:blank:]]$' \ + "${GITDIFFCONTENT}" \ + >> "${PATCH_DIR}/whitespace-eol.txt" # shellcheck disable=SC2016 - count=$(wc -l "${PATCH_DIR}/whitespace.txt" | ${AWK} '{print $1}') + count=$(wc -l "${PATCH_DIR}/whitespace-eol.txt" | ${AWK} '{print $1}') if [[ ${count} -gt 0 ]]; then - add_jira_table -1 whitespace "The patch has ${count}"\ + add_vote_table -1 whitespace "The patch has ${count}"\ " line(s) that end in whitespace. Use git apply --whitespace=fix." - add_jira_footer whitespace "@@BASE@@/whitespace.txt" + add_footer_table whitespace "@@BASE@@/whitespace-eol.txt" + ((result=result+1)) + fi + + # shellcheck disable=SC2016 + count=$(wc -l "${PATCH_DIR}/whitespace-tabs.txt" | ${AWK} '{print $1}') + + if [[ ${count} -gt 0 ]]; then + add_vote_table -1 whitespace "The patch has ${count}"\ + " line(s) with tabs." + add_footer_table whitespace "@@BASE@@/whitespace-tabs.txt" + ((result=result+1)) + fi + + if [[ ${result} -gt 0 ]]; then popd >/dev/null return 1 fi popd >/dev/null - add_jira_table +1 whitespace "The patch has no lines that end in whitespace." + add_vote_table +1 whitespace "Patch has no whitespace issues." return 0 } diff --git a/dev-support/test-patch.sh b/dev-support/test-patch.sh index cd91a5c8cd8d9..e1dadd282c833 100755 --- a/dev-support/test-patch.sh +++ b/dev-support/test-patch.sh @@ -14,14 +14,41 @@ # See the License for the specific language governing permissions and # limitations under the License. +# Make sure that bash version meets the pre-requisite + +if [[ -z "${BASH_VERSINFO}" ]] \ + || [[ "${BASH_VERSINFO[0]}" -lt 3 ]] \ + || [[ "${BASH_VERSINFO[0]}" -eq 3 && "${BASH_VERSINFO[1]}" -lt 2 ]]; then + echo "bash v3.2+ is required. Sorry." + exit 1 +fi + ### BUILD_URL is set by Hudson if it is run by patch process this="${BASH_SOURCE-$0}" BINDIR=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P) -CWD=$(pwd) +STARTINGDIR=$(pwd) USER_PARAMS=("$@") GLOBALTIMER=$(date +"%s") +# global arrays +declare -a MAVEN_ARGS=("--batch-mode") +declare -a ANT_ARGS=("-noinput") +declare -a TP_HEADER +declare -a TP_VOTE_TABLE +declare -a TP_TEST_TABLE +declare -a TP_FOOTER_TABLE +declare -a MODULE_STATUS +declare -a MODULE_STATUS_TIMER +declare -a MODULE_STATUS_MSG +declare -a MODULE_STATUS_LOG +declare -a MODULE + +TP_HEADER_COUNTER=0 +TP_VOTE_COUNTER=0 +TP_TEST_COUNTER=0 +TP_FOOTER_COUNTER=0 + ## @description Setup the default global variables ## @audience public ## @stability stable @@ -33,10 +60,15 @@ function setup_defaults else MVN=${MAVEN_HOME}/bin/mvn fi - # This parameter needs to be kept as an array - MAVEN_ARGS=() - PROJECT_NAME=hadoop + if [[ -z "${ANT_HOME:-}" ]]; then + ANT=ant + else + ANT=${ANT_HOME}/bin/ant + fi + + PROJECT_NAME=yetus + DOCKERFILE="${BINDIR}/test-patch-docker/Dockerfile-startstub" HOW_TO_CONTRIBUTE="https://wiki.apache.org/hadoop/HowToContribute" JENKINS=false BASEDIR=$(pwd) @@ -44,13 +76,15 @@ function setup_defaults USER_PLUGIN_DIR="" LOAD_SYSTEM_PLUGINS=true + ALLOWSUMMARIES=true - FINDBUGS_HOME=${FINDBUGS_HOME:-} - FINDBUGS_WARNINGS_FAIL_PRECHECK=false + DOCKERSUPPORT=false ECLIPSE_HOME=${ECLIPSE_HOME:-} BUILD_NATIVE=${BUILD_NATIVE:-true} PATCH_BRANCH="" - PATCH_BRANCH_DEFAULT="trunk" + PATCH_BRANCH_DEFAULT="master" + + #shellcheck disable=SC2034 CHANGED_MODULES="" USER_MODULE_LIST="" OFFLINE=false @@ -58,50 +92,40 @@ function setup_defaults REEXECED=false RESETREPO=false ISSUE="" - ISSUE_RE='^(HADOOP|YARN|MAPREDUCE|HDFS)-[0-9]+$' + ISSUE_RE='^(YETUS)-[0-9]+$' TIMER=$(date +"%s") PATCHURL="" - OSTYPE=$(uname -s) + BUILDTOOL=maven + BUGSYSTEM=jira + JDK_TEST_LIST="javac javadoc unit" + GITDIFFLINES="${PATCH_DIR}/gitdifflines.txt" + GITDIFFCONTENT="${PATCH_DIR}/gitdiffcontent.txt" # Solaris needs POSIX, not SVID case ${OSTYPE} in SunOS) - PS=${PS:-ps} AWK=${AWK:-/usr/xpg4/bin/awk} SED=${SED:-/usr/xpg4/bin/sed} WGET=${WGET:-wget} GIT=${GIT:-git} - EGREP=${EGREP:-/usr/xpg4/bin/egrep} GREP=${GREP:-/usr/xpg4/bin/grep} - PATCH=${PATCH:-patch} + PATCH=${PATCH:-/usr/gnu/bin/patch} DIFF=${DIFF:-/usr/gnu/bin/diff} - JIRACLI=${JIRA:-jira} FILE=${FILE:-file} ;; *) - PS=${PS:-ps} AWK=${AWK:-awk} SED=${SED:-sed} WGET=${WGET:-wget} GIT=${GIT:-git} - EGREP=${EGREP:-egrep} GREP=${GREP:-grep} PATCH=${PATCH:-patch} DIFF=${DIFF:-diff} - JIRACLI=${JIRA:-jira} FILE=${FILE:-file} ;; esac - declare -a JIRA_COMMENT_TABLE - declare -a JIRA_FOOTER_TABLE - declare -a JIRA_HEADER - declare -a JIRA_TEST_TABLE - - JFC=0 - JTC=0 - JTT=0 RESULT=0 } @@ -110,7 +134,7 @@ function setup_defaults ## @stability stable ## @replaceable no ## @param string -function hadoop_error +function yetus_error { echo "$*" 1>&2 } @@ -120,20 +144,51 @@ function hadoop_error ## @stability stable ## @replaceable no ## @param string -function hadoop_debug +function yetus_debug { - if [[ -n "${HADOOP_SHELL_SCRIPT_DEBUG}" ]]; then + if [[ -n "${TP_SHELL_SCRIPT_DEBUG}" ]]; then echo "[$(date) DEBUG]: $*" 1>&2 fi } +## @description Convert the given module name to a file fragment +## @audience public +## @stability stable +## @replaceable no +## @param module +function module_file_fragment +{ + local mod=$1 + if [[ ${mod} == . ]]; then + echo root + else + echo "$1" | tr '/' '_' | tr '\\' '_' + fi +} + +## @description Convert time in seconds to m + s +## @audience public +## @stability stable +## @replaceable no +## @param seconds +function clock_display +{ + local -r elapsed=$1 + + if [[ ${elapsed} -lt 0 ]]; then + echo "N/A" + else + printf "%3sm %02ss" $((elapsed/60)) $((elapsed%60)) + fi +} + ## @description Activate the local timer ## @audience public ## @stability stable ## @replaceable no function start_clock { - hadoop_debug "Start clock" + yetus_debug "Start clock" TIMER=$(date +"%s") } @@ -145,7 +200,7 @@ function stop_clock { local -r stoptime=$(date +"%s") local -r elapsed=$((stoptime-TIMER)) - hadoop_debug "Stop clock" + yetus_debug "Stop clock" echo ${elapsed} } @@ -158,7 +213,7 @@ function stop_global_clock { local -r stoptime=$(date +"%s") local -r elapsed=$((stoptime-GLOBALTIMER)) - hadoop_debug "Stop global clock" + yetus_debug "Stop global clock" echo ${elapsed} } @@ -178,10 +233,10 @@ function offset_clock ## @stability stable ## @replaceable no ## @param string -function add_jira_header +function add_header_line { - JIRA_HEADER[${JHC}]="| $* |" - JHC=$(( JHC+1 )) + TP_HEADER[${TP_HEADER_COUNTER}]="$*" + ((TP_HEADER_COUNTER=TP_HEADER_COUNTER+1 )) } ## @description Add to the output table. If the first parameter is a number @@ -199,88 +254,152 @@ function add_jira_header ## @param subsystem ## @param string ## @return Elapsed time display -function add_jira_table +function add_vote_table { local value=$1 local subsystem=$2 shift 2 - local color - local calctime=0 - + local calctime local -r elapsed=$(stop_clock) - if [[ ${elapsed} -lt 0 ]]; then - calctime="N/A" + yetus_debug "add_vote_table ${value} ${subsystem} ${*}" + + calctime=$(clock_display "${elapsed}") + + if [[ ${value} == "1" ]]; then + value="+1" + fi + + if [[ -z ${value} ]]; then + TP_VOTE_TABLE[${TP_VOTE_COUNTER}]="| | ${subsystem} | | ${*:-} |" else - printf -v calctime "%3sm %02ss" $((elapsed/60)) $((elapsed%60)) + TP_VOTE_TABLE[${TP_VOTE_COUNTER}]="| ${value} | ${subsystem} | ${calctime} | $* |" fi + ((TP_VOTE_COUNTER=TP_VOTE_COUNTER+1)) +} - echo "" - echo "Elapsed time: ${calctime}" - echo "" +## @description Report the JVM version of the given directory +## @stability stable +## @audience private +## @replaceable yes +## @params directory +## @returns version +function report_jvm_version +{ + #shellcheck disable=SC2016 + "${1}/bin/java" -version 2>&1 | head -1 | ${AWK} '{print $NF}' | tr -d \" +} - case ${value} in - 1|+1) - value="+1" - color="green" - ;; - -1) - color="red" - ;; - 0) - color="blue" - ;; - null) - ;; - esac +## @description Verify if a given test is multijdk +## @audience public +## @stability stable +## @replaceable yes +## @param test +## @return 1 = yes +## @return 0 = no +function verify_multijdk_test +{ + local i=$1 - if [[ -z ${color} ]]; then - JIRA_COMMENT_TABLE[${JTC}]="| | ${subsystem} | | ${*:-} |" - JTC=$(( JTC+1 )) - else - JIRA_COMMENT_TABLE[${JTC}]="| {color:${color}}${value}{color} | ${subsystem} | ${calctime} | $* |" - JTC=$(( JTC+1 )) + if [[ "${JDK_DIR_LIST}" == "${JAVA_HOME}" ]]; then + yetus_debug "MultiJDK not configured." + return 0 fi + + if [[ ${JDK_TEST_LIST} =~ $i ]]; then + yetus_debug "${i} is in ${JDK_TEST_LIST} and MultiJDK configured." + return 1 + fi + return 0 } -## @description Put the final environment information at the bottom +## @description Absolute path the JDK_DIR_LIST and JAVA_HOME. +## @description if JAVA_HOME is in JDK_DIR_LIST, it is positioned last +## @stability stable +## @audience private +## @replaceable yes +function fullyqualifyjdks +{ + local i + local jdkdir + local tmplist + + JAVA_HOME=$(cd -P -- "${JAVA_HOME}" >/dev/null && pwd -P) + + for i in ${JDK_DIR_LIST}; do + jdkdir=$(cd -P -- "${i}" >/dev/null && pwd -P) + if [[ ${jdkdir} != "${JAVA_HOME}" ]]; then + tmplist="${tmplist} ${jdkdir}" + fi + done + + JDK_DIR_LIST="${tmplist} ${JAVA_HOME}" + JDK_DIR_LIST=${JDK_DIR_LIST/ } +} + +## @description Put the opening environment information at the bottom ## @description of the footer table ## @stability stable ## @audience private ## @replaceable yes -function close_jira_footer +function prepopulate_footer { # shellcheck disable=SC2016 - local -r javaversion=$("${JAVA_HOME}/bin/java" -version 2>&1 | head -1 | ${AWK} '{print $NF}' | tr -d \") + local javaversion + local listofjdks local -r unamea=$(uname -a) + local i + + add_footer_table "uname" "${unamea}" + add_footer_table "Build tool" "${BUILDTOOL}" + + if [[ -n ${PERSONALITY} ]]; then + add_footer_table "Personality" "${PERSONALITY}" + fi - add_jira_footer "Java" "${javaversion}" - add_jira_footer "uname" "${unamea}" + javaversion=$(report_jvm_version "${JAVA_HOME}") + add_footer_table "Default Java" "${javaversion}" + if [[ -n ${JDK_DIR_LIST} + && ${JDK_DIR_LIST} != "${JAVA_HOME}" ]]; then + for i in ${JDK_DIR_LIST}; do + javaversion=$(report_jvm_version "${i}") + listofjdks="${listofjdks} ${i}:${javaversion}" + done + add_footer_table "Multi-JDK versions" "${listofjdks}" + fi +} + +## @description Put docker stats in various tables +## @stability stable +## @audience private +## @replaceable yes +function finish_docker_stats +{ + if [[ ${DOCKERMODE} == true ]]; then + # DOCKER_VERSION is set by our creator. + add_footer_table "Docker" "${DOCKER_VERSION}" + fi } ## @description Put the final elapsed time at the bottom of the table. ## @audience private ## @stability stable ## @replaceable no -function close_jira_table +function finish_vote_table { local -r elapsed=$(stop_global_clock) + local calctime - if [[ ${elapsed} -lt 0 ]]; then - calctime="N/A" - else - printf -v calctime "%3sm %02ss" $((elapsed/60)) $((elapsed%60)) - fi + calctime=$(clock_display "${elapsed}") echo "" echo "Total Elapsed time: ${calctime}" echo "" - - JIRA_COMMENT_TABLE[${JTC}]="| | | ${calctime} | |" - JTC=$(( JTC+1 )) + TP_VOTE_TABLE[${TP_VOTE_COUNTER}]="| | | ${calctime} | |" + ((TP_VOTE_COUNTER=TP_VOTE_COUNTER+1 )) } ## @description Add to the footer of the display. @@BASE@@ will get replaced with the @@ -291,13 +410,13 @@ function close_jira_table ## @replaceable no ## @param subsystem ## @param string -function add_jira_footer +function add_footer_table { local subsystem=$1 shift 1 - JIRA_FOOTER_TABLE[${JFC}]="| ${subsystem} | $* |" - JFC=$(( JFC+1 )) + TP_FOOTER_TABLE[${TP_FOOTER_COUNTER}]="| ${subsystem} | $* |" + ((TP_FOOTER_COUNTER=TP_FOOTER_COUNTER+1 )) } ## @description Special table just for unit test failures @@ -306,13 +425,13 @@ function add_jira_footer ## @replaceable no ## @param failurereason ## @param testlist -function add_jira_test_table +function add_test_table { local failure=$1 shift 1 - JIRA_TEST_TABLE[${JTT}]="| ${failure} | $* |" - JTT=$(( JTT+1 )) + TP_TEST_TABLE[${TP_TEST_COUNTER}]="| ${failure} | $* |" + ((TP_TEST_COUNTER=TP_TEST_COUNTER+1 )) } ## @description Large display for the user console @@ -334,29 +453,6 @@ function big_console_header printf "\n\n" } -## @description Remove {color} tags from a string -## @audience public -## @stability stable -## @replaceable no -## @param string -## @return string -function colorstripper -{ - local string=$1 - shift 1 - - local green="" - local white="" - local red="" - local blue="" - - echo "${string}" | \ - ${SED} -e "s,{color:red},${red},g" \ - -e "s,{color:green},${green},g" \ - -e "s,{color:blue},${blue},g" \ - -e "s,{color},${white},g" -} - ## @description Find the largest size of a column of an array ## @audience private ## @stability evolving @@ -391,7 +487,7 @@ function find_java_home { start_clock if [[ -z ${JAVA_HOME:-} ]]; then - case $(uname -s) in + case ${OSTYPE} in Darwin) if [[ -z "${JAVA_HOME}" ]]; then if [[ -x /usr/libexec/java_home ]]; then @@ -409,7 +505,7 @@ function find_java_home if [[ -z ${JAVA_HOME:-} ]]; then echo "JAVA_HOME is not defined." - add_jira_table -1 pre-patch "JAVA_HOME is not defined." + add_vote_table -1 pre-patch "JAVA_HOME is not defined." return 1 fi return 0 @@ -420,26 +516,17 @@ function find_java_home ## @stability stable ## @audience public ## @returns ${JIRACLI} exit code -function write_to_jira +function write_comment { local -r commentfile=${1} shift - local retval + local retval=0 if [[ ${OFFLINE} == false && ${JENKINS} == true ]]; then - export USER=hudson - # shellcheck disable=SC2086 - ${JIRACLI} --comment "$(cat ${commentfile})" \ - -s https://issues.apache.org/jira \ - -a addcomment -u hadoopqa \ - -p "${JIRA_PASSWD}" \ - --issue "${ISSUE}" + ${BUGSYSTEM}_write_comment "${commentfile}" retval=$? - ${JIRACLI} -s https://issues.apache.org/jira \ - -a logout -u hadoopqa \ - -p "${JIRA_PASSWD}" fi return ${retval} } @@ -457,44 +544,46 @@ function verify_patchdir_still_exists local extra="" if [[ ! -d ${PATCH_DIR} ]]; then - rm "${commentfile}" 2>/dev/null - - echo "(!) The patch artifact directory has been removed! " > "${commentfile}" - echo "This is a fatal error for test-patch.sh. Aborting. " >> "${commentfile}" - echo - cat ${commentfile} - echo - if [[ ${JENKINS} == true ]]; then - if [[ -n ${NODE_NAME} ]]; then - extra=" (node ${NODE_NAME})" - fi - echo "Jenkins${extra} information at ${BUILD_URL} may provide some hints. " >> "${commentfile}" + rm "${commentfile}" 2>/dev/null - write_to_jira ${commentfile} + echo "(!) The patch artifact directory has been removed! " > "${commentfile}" + echo "This is a fatal error for test-patch.sh. Aborting. " >> "${commentfile}" + echo + cat ${commentfile} + echo + if [[ ${JENKINS} == true ]]; then + if [[ -n ${NODE_NAME} ]]; then + extra=" (node ${NODE_NAME})" fi + echo "Jenkins${extra} information at ${BUILD_URL} may provide some hints. " >> "${commentfile}" - rm "${commentfile}" - cleanup_and_exit ${RESULT} + write_comment ${commentfile} fi + + rm "${commentfile}" + cleanup_and_exit ${RESULT} + fi } -## @description generate a list of all files and line numbers that -## @description that were added/changed in the source repo +## @description generate a list of all files and line numbers in $GITDIFFLINES that +## @description that were added/changed in the source repo. $GITDIFFCONTENT +## @description is same file, but also includes the content of those lines ## @audience private ## @stability stable -## @params filename ## @replaceable no function compute_gitdiff { - local outfile=$1 local file local line local startline local counter local numlines local actual + local content + local outfile="${PATCH_DIR}/computegitdiff.${RANDOM}" pushd "${BASEDIR}" >/dev/null + ${GIT} add --all --intent-to-add while read line; do if [[ ${line} =~ ^\+\+\+ ]]; then file="./"$(echo "${line}" | cut -f2- -d/) @@ -511,11 +600,27 @@ function compute_gitdiff numlines=1 fi counter=0 - until [[ ${counter} -gt ${numlines} ]]; do + # it isn't obvious, but on MOST platforms under MOST use cases, + # this is faster than using sed, and definitely faster than using + # awk. + # http://unix.stackexchange.com/questions/47407/cat-line-x-to-line-y-on-a-huge-file + # has a good discussion w/benchmarks + # + # note that if tail is still sending data through the pipe, but head gets enough + # to do what was requested, head will exit, leaving tail with a broken pipe. + # we're going to send stderr to /dev/null and ignore the error since head's + # output is really what we're looking for + tail -n "+${startline}" "${file}" 2>/dev/null | head -n ${numlines} > "${outfile}" + oldifs=${IFS} + IFS='' + while read -r content; do ((actual=counter+startline)) - echo "${file}:${actual}:" >> "${outfile}" + echo "${file}:${actual}:" >> "${GITDIFFLINES}" + printf "%s:%s:%s\n" "${file}" "${actual}" "${content}" >> "${GITDIFFCONTENT}" ((counter=counter+1)) - done + done < "${outfile}" + rm "${outfile}" + IFS=${oldifs} fi done < <("${GIT}" diff --unified=0 --no-color) popd >/dev/null @@ -540,35 +645,39 @@ function echo_and_redirect verify_patchdir_still_exists find "${BASEDIR}" -type d -exec chmod +x {} \; + # to the screen echo "${*} > ${logfile} 2>&1" - "${@}" > "${logfile}" 2>&1 + # to the log + echo "${*}" > "${logfile}" + # the actual command + "${@}" >> "${logfile}" 2>&1 } -## @description is PATCH_DIR relative to BASEDIR? +## @description is a given directory relative to BASEDIR? ## @audience public ## @stability stable ## @replaceable yes -## @returns 1 - no, PATCH_DIR -## @returns 0 - yes, PATCH_DIR - BASEDIR -function relative_patchdir +## @param path +## @returns 1 - no, path +## @returns 0 - yes, path - BASEDIR +function relative_dir { - local p=${PATCH_DIR#${BASEDIR}} + local p=${1#${BASEDIR}} - if [[ ${#p} -eq ${#PATCH_DIR} ]]; then - echo ${p} + if [[ ${#p} -eq ${#1} ]]; then + echo "${p}" return 1 fi p=${p#/} - echo ${p} + echo "${p}" return 0 } - ## @description Print the usage information ## @audience public ## @stability stable ## @replaceable no -function hadoop_usage +function testpatch_usage { local -r up=$(echo ${PROJECT_NAME} | tr '[:lower:]' '[:upper:]') @@ -582,25 +691,36 @@ function hadoop_usage echo "Options:" echo "--basedir= The directory to apply the patch to (default current directory)" echo "--branch= Forcibly set the branch" - echo "--branch-default= If the branch isn't forced and we don't detect one in the patch name, use this branch (default 'trunk')" + echo "--branch-default= If the branch isn't forced and we don't detect one in the patch name, use this branch (default 'master')" + #not quite working yet + #echo "--bugsystem= The bug system in use ('jira', the default, or 'github')" echo "--build-native= If true, then build native components (default 'true')" - echo "--contrib-guide= URL to point new users towards project conventions. (default Hadoop's wiki)" + echo "--build-tool= Pick which build tool to focus around (maven, ant)" + echo "--contrib-guide= URL to point new users towards project conventions. (default: ${HOW_TO_CONTRIBUTE} )" echo "--debug If set, then output some extra stuff to stderr" echo "--dirty-workspace Allow the local git workspace to have uncommitted changes" - echo "--findbugs-home= Findbugs home directory (default FINDBUGS_HOME environment variable)" - echo "--findbugs-strict-precheck If there are Findbugs warnings during precheck, fail" - echo "--issue-re= Bash regular expression to use when trying to find a jira ref in the patch name (default '^(HADOOP|YARN|MAPREDUCE|HDFS)-[0-9]+$')" + echo "--docker Spawn a docker container" + echo "--dockerfile= Dockerfile fragment to use as the base" + echo "--issue-re= Bash regular expression to use when trying to find a jira ref in the patch name (default: \'${ISSUE_RE}\')" + echo "--java-home= Set JAVA_HOME (In Docker mode, this should be local to the image)" + echo "--multijdkdirs= Comma delimited lists of JDK paths to use for multi-JDK tests" + echo "--multijdktests= Comma delimited tests to use when multijdkdirs is used. (default: javac,javadoc,unit)" echo "--modulelist= Specify additional modules to test (comma delimited)" echo "--offline Avoid connecting to the Internet" - echo "--patch-dir= The directory for working and output files (default '/tmp/${PROJECT_NAME}-test-patch/pid')" + echo "--patch-dir= The directory for working and output files (default '/tmp/test-patch-${PROJECT_NAME}/pid')" + echo "--personality= The personality file to load" echo "--plugins= A directory of user provided plugins. see test-patch.d for examples (default empty)" - echo "--project= The short name for project currently using test-patch (default 'hadoop')" + echo "--project= The short name for project currently using test-patch (default 'yetus')" echo "--resetrepo Forcibly clean the repo" echo "--run-tests Run all relevant tests below the base directory" echo "--skip-system-plugins Do not load plugins from ${BINDIR}/test-patch.d" + echo "--summarize= Allow tests to summarize results" echo "--testlist= Specify which subsystem tests to use (comma delimited)" - + echo "--test-parallel= Run multiple tests in parallel (default false in developer mode, true in Jenkins mode)" + echo "--test-threads= Number of tests to run in parallel (default defined in ${PROJECT_NAME} build)" + echo "" echo "Shell binary overrides:" + echo "--ant-cmd= The 'ant' command to use (default \${ANT_HOME}/bin/ant, or 'ant')" echo "--awk-cmd= The 'awk' command to use (default 'awk')" echo "--diff-cmd= The GNU-compatible 'diff' command to use (default 'diff')" echo "--file-cmd= The 'file' command to use (default 'file')" @@ -608,17 +728,24 @@ function hadoop_usage echo "--grep-cmd= The 'grep' command to use (default 'grep')" echo "--mvn-cmd= The 'mvn' command to use (default \${MAVEN_HOME}/bin/mvn, or 'mvn')" echo "--patch-cmd= The 'patch' command to use (default 'patch')" - echo "--ps-cmd= The 'ps' command to use (default 'ps')" echo "--sed-cmd= The 'sed' command to use (default 'sed')" echo echo "Jenkins-only options:" echo "--jenkins Run by Jenkins (runs tests and posts results to JIRA)" + echo "--build-url Set the build location web page" echo "--eclipse-home= Eclipse home directory (default ECLIPSE_HOME environment variable)" - echo "--jira-cmd= The 'jira' command to use (default 'jira')" - echo "--jira-password= The password for the 'jira' command" echo "--mv-patch-dir Move the patch-dir into the basedir during cleanup." echo "--wget-cmd= The 'wget' command to use (default 'wget')" + + importplugins + + for plugin in ${PLUGINS} ${BUGSYSTEMS}; do + if declare -f ${plugin}_usage >/dev/null 2>&1; then + echo + "${plugin}_usage" + fi + done } ## @description Interpret the command line parameters @@ -634,6 +761,9 @@ function parse_args for i in "$@"; do case ${i} in + --ant-cmd=*) + ANT=${i#*=} + ;; --awk-cmd=*) AWK=${i#*=} ;; @@ -646,14 +776,23 @@ function parse_args --branch-default=*) PATCH_BRANCH_DEFAULT=${i#*=} ;; + --bugsystem=*) + BUGSYSTEM=${i#*=} + ;; --build-native=*) BUILD_NATIVE=${i#*=} ;; + --build-tool=*) + BUILDTOOL=${i#*=} + ;; + --build-url=*) + BUILD_URL=${i#*=} + ;; --contrib-guide=*) HOW_TO_CONTRIBUTE=${i#*=} ;; --debug) - HADOOP_SHELL_SCRIPT_DEBUG=true + TP_SHELL_SCRIPT_DEBUG=true ;; --diff-cmd=*) DIFF=${i#*=} @@ -661,18 +800,21 @@ function parse_args --dirty-workspace) DIRTY_WORKSPACE=true ;; + --docker) + DOCKERSUPPORT=true + ;; + --dockerfile=*) + DOCKERFILE=${i#*=} + ;; + --dockermode) + DOCKERMODE=true + ;; --eclipse-home=*) ECLIPSE_HOME=${i#*=} ;; --file-cmd=*) FILE=${i#*=} ;; - --findbugs-home=*) - FINDBUGS_HOME=${i#*=} - ;; - --findbugs-strict-precheck) - FINDBUGS_WARNINGS_FAIL_PRECHECK=true - ;; --git-cmd=*) GIT=${i#*=} ;; @@ -680,7 +822,7 @@ function parse_args GREP=${i#*=} ;; --help|-help|-h|help|--h|--\?|-\?|\?) - hadoop_usage + testpatch_usage exit 0 ;; --issue-re=*) @@ -691,17 +833,22 @@ function parse_args ;; --jenkins) JENKINS=true - ;; - --jira-cmd=*) - JIRACLI=${i#*=} - ;; - --jira-password=*) - JIRA_PASSWD=${i#*=} + TEST_PARALLEL=${TEST_PARALLEL:-true} ;; --modulelist=*) USER_MODULE_LIST=${i#*=} USER_MODULE_LIST=${USER_MODULE_LIST//,/ } - hadoop_debug "Manually forcing modules ${USER_MODULE_LIST}" + yetus_debug "Manually forcing modules ${USER_MODULE_LIST}" + ;; + --multijdkdirs=*) + JDK_DIR_LIST=${i#*=} + JDK_DIR_LIST=${JDK_DIR_LIST//,/ } + yetus_debug "Multi-JVM mode activated with ${JDK_DIR_LIST}" + ;; + --multijdktests=*) + JDK_TEST_LIST=${i#*=} + JDK_TEST_LIST=${JDK_TEST_LIST//,/ } + yetus_debug "Multi-JVM test list: ${JDK_TEST_LIST}" ;; --mvn-cmd=*) MVN=${i#*=} @@ -718,19 +865,17 @@ function parse_args --patch-dir=*) USER_PATCH_DIR=${i#*=} ;; + --personality=*) + PERSONALITY=${i#*=} + ;; --plugins=*) USER_PLUGIN_DIR=${i#*=} ;; --project=*) PROJECT_NAME=${i#*=} ;; - --ps-cmd=*) - PS=${i#*=} - ;; --reexec) REEXECED=true - start_clock - add_jira_table 0 reexec "dev-support patch detected." ;; --resetrepo) RESETREPO=true @@ -741,62 +886,81 @@ function parse_args --skip-system-plugins) LOAD_SYSTEM_PLUGINS=false ;; + --summarize=*) + ALLOWSUMMARIES=${i#*=} + ;; --testlist=*) testlist=${i#*=} testlist=${testlist//,/ } for j in ${testlist}; do - hadoop_debug "Manually adding patch test subsystem ${j}" + yetus_debug "Manually adding patch test subsystem ${j}" add_test "${j}" done ;; + --test-parallel=*) + TEST_PARALLEL=${i#*=} + ;; + --test-threads=*) + # shellcheck disable=SC2034 + TEST_THREADS=${i#*=} + ;; + --tpglobaltimer=*) + GLOBALTIMER=${i#*=} + ;; + --tpreexectimer=*) + REEXECLAUNCHTIMER=${i#*=} + ;; --wget-cmd=*) WGET=${i#*=} ;; + --*) + ## PATCH_OR_ISSUE can't be a --. So this is probably + ## a plugin thing. + continue + ;; *) PATCH_OR_ISSUE=${i} ;; esac done - # if we requested offline, pass that to mvn - if [[ ${OFFLINE} == "true" ]] ; then - MAVEN_ARGS=(${MAVEN_ARGS[@]} --offline) + if [[ -n ${REEXECLAUNCHTIMER} ]]; then + TIMER=${REEXECLAUNCHTIMER}; + else + start_clock fi - # we need absolute dir for ${BASEDIR} - cd "${CWD}" - BASEDIR=$(cd -P -- "${BASEDIR}" >/dev/null && pwd -P) + if [[ ${REEXECED} == true + && ${DOCKERMODE} == true ]]; then + add_vote_table 0 reexec "docker + precommit patch detected." + elif [[ ${REEXECED} == true ]]; then + add_vote_table 0 reexec "precommit patch detected." + elif [[ ${DOCKERMODE} == true ]]; then + add_vote_table 0 reexec "docker mode." + fi - if [[ ${BUILD_NATIVE} == "true" ]] ; then - NATIVE_PROFILE=-Pnative - REQUIRE_TEST_LIB_HADOOP=-Drequire.test.libhadoop + # if we requested offline, pass that to mvn + if [[ ${OFFLINE} == "true" ]]; then + MAVEN_ARGS=(${MAVEN_ARGS[@]} --offline) + ANT_ARGS=(${ANT_ARGS[@]} -Doffline=) fi + if [[ -z "${PATCH_OR_ISSUE}" ]]; then - hadoop_usage + testpatch_usage exit 1 fi - if [[ ${JENKINS} == "true" ]] ; then - echo "Running in Jenkins mode" - ISSUE=${PATCH_OR_ISSUE} - RESETREPO=true - # shellcheck disable=SC2034 - ECLIPSE_PROPERTY="-Declipse.home=${ECLIPSE_HOME}" - else - if [[ ${RESETREPO} == "true" ]] ; then - echo "Running in destructive (--resetrepo) developer mode" - else - echo "Running in developer mode" - fi - JENKINS=false - fi + + # we need absolute dir for ${BASEDIR} + cd "${STARTINGDIR}" + BASEDIR=$(cd -P -- "${BASEDIR}" >/dev/null && pwd -P) if [[ -n ${USER_PATCH_DIR} ]]; then PATCH_DIR="${USER_PATCH_DIR}" else - PATCH_DIR=/tmp/${PROJECT_NAME}-test-patch/$$ + PATCH_DIR=/tmp/test-patch-${PROJECT_NAME}/$$ fi - cd "${CWD}" + cd "${STARTINGDIR}" if [[ ! -d ${PATCH_DIR} ]]; then mkdir -p "${PATCH_DIR}" if [[ $? == 0 ]] ; then @@ -810,26 +974,76 @@ function parse_args # we need absolute dir for PATCH_DIR PATCH_DIR=$(cd -P -- "${PATCH_DIR}" >/dev/null && pwd -P) - GITDIFFLINES=${PATCH_DIR}/gitdifflines.txt + if [[ ${JENKINS} == "true" ]]; then + echo "Running in Jenkins mode" + ISSUE=${PATCH_OR_ISSUE} + RESETREPO=true + # shellcheck disable=SC2034 + ECLIPSE_PROPERTY="-Declipse.home=${ECLIPSE_HOME}" + else + if [[ ${RESETREPO} == "true" ]] ; then + echo "Running in destructive (--resetrepo) developer mode" + else + echo "Running in developer mode" + fi + JENKINS=false + fi + + if [[ -n "${USER_PLUGIN_DIR}" ]]; then + USER_PLUGIN_DIR=$(cd -P -- "${USER_PLUGIN_DIR}" >/dev/null && pwd -P) + fi + + GITDIFFLINES="${PATCH_DIR}/gitdifflines.txt" + GITDIFFCONTENT="${PATCH_DIR}/gitdiffcontent.txt" } ## @description Locate the pom.xml file for a given directory ## @audience private ## @stability stable ## @replaceable no -## @return directory containing the pom.xml -function find_pom_dir +## @return directory containing the pom.xml. Nothing returned if not found. +function find_pomxml_dir { local dir dir=$(dirname "$1") - hadoop_debug "Find pom dir for: ${dir}" + yetus_debug "Find pom.xml dir for: ${dir}" while builtin true; do if [[ -f "${dir}/pom.xml" ]];then echo "${dir}" - hadoop_debug "Found: ${dir}" + yetus_debug "Found: ${dir}" + return + elif [[ ${dir} == "." ]]; then + yetus_error "ERROR: pom.xml is not found. Make sure the target is a Maven-based project." + return + else + dir=$(dirname "${dir}") + fi + done +} + +## @description Locate the build.xml file for a given directory +## @audience private +## @stability stable +## @replaceable no +## @return directory containing the build.xml. Nothing returned if not found. +function find_buildxml_dir +{ + local dir + + dir=$(dirname "$1") + + yetus_debug "Find build.xml dir for: ${dir}" + + while builtin true; do + if [[ -f "${dir}/build.xml" ]];then + echo "${dir}" + yetus_debug "Found: ${dir}" + return + elif [[ ${dir} == "." ]]; then + yetus_error "ERROR: build.xml is not found. Make sure the target is a Ant-based project." return else dir=$(dirname "${dir}") @@ -858,33 +1072,65 @@ function find_changed_files | sort -u) } -## @description Find the modules of the maven build that ${PATCH_DIR}/patch modifies +## @description Find the modules of the build that ${PATCH_DIR}/patch modifies ## @audience private ## @stability stable ## @replaceable no -## @return None; sets ${CHANGED_MODULES} +## @return None; sets ${CHANGED_MODULES} and ${CHANGED_UNFILTERED_MODULES} function find_changed_modules { # Come up with a list of changed files into ${TMP} local pomdirs + local pomdir local module local pommods # Now find all the modules that were changed for file in ${CHANGED_FILES}; do - #shellcheck disable=SC2086 - pomdirs="${pomdirs} $(find_pom_dir ${file})" + case ${BUILDTOOL} in + maven) + #shellcheck disable=SC2086 + pomdir=$(find_pomxml_dir ${file}) + if [[ -z ${pomdir} ]]; then + output_to_console 1 + output_to_bugsystem 1 + cleanup_and_exit 1 + fi + pomdirs="${pomdirs} ${pomdir}" + ;; + ant) + #shellcheck disable=SC2086 + pomdir=$(find_buildxml_dir ${file}) + if [[ -z ${pomdir} ]]; then + output_to_console 1 + output_to_bugsystem 1 + cleanup_and_exit 1 + fi + pomdirs="${pomdirs} ${pomdir}" + ;; + *) + yetus_error "ERROR: Unsupported build tool." + output_to_console 1 + output_to_bugsystem 1 + cleanup_and_exit 1 + ;; + esac done - # Filter out modules without code - for module in ${pomdirs}; do - ${GREP} "pom" "${module}/pom.xml" > /dev/null - if [[ "$?" != 0 ]]; then - pommods="${pommods} ${module}" - fi - done + #shellcheck disable=SC2086,SC2034 + CHANGED_UNFILTERED_MODULES=$(echo ${pomdirs} ${USER_MODULE_LIST} | tr ' ' '\n' | sort -u) + + if [[ ${BUILDTOOL} == maven ]]; then + # Filter out modules without code + for module in ${pomdirs}; do + ${GREP} "pom" "${module}/pom.xml" > /dev/null + if [[ "$?" != 0 ]]; then + pommods="${pommods} ${module}" + fi + done + fi - #shellcheck disable=SC2086 + #shellcheck disable=SC2086,SC2034 CHANGED_MODULES=$(echo ${pommods} ${USER_MODULE_LIST} | tr ' ' '\n' | sort -u) } @@ -904,38 +1150,38 @@ function git_checkout cd "${BASEDIR}" if [[ ! -d .git ]]; then - hadoop_error "ERROR: ${BASEDIR} is not a git repo." + yetus_error "ERROR: ${BASEDIR} is not a git repo." cleanup_and_exit 1 fi if [[ ${RESETREPO} == "true" ]] ; then ${GIT} reset --hard if [[ $? != 0 ]]; then - hadoop_error "ERROR: git reset is failing" + yetus_error "ERROR: git reset is failing" cleanup_and_exit 1 fi # if PATCH_DIR is in BASEDIR, then we don't want # git wiping it out. - exemptdir=$(relative_patchdir) + exemptdir=$(relative_dir "${PATCH_DIR}") if [[ $? == 1 ]]; then ${GIT} clean -xdf else # we do, however, want it emptied of all _files_. # we need to leave _directories_ in case we are in # re-exec mode (which places a directory full of stuff in it) - hadoop_debug "Exempting ${exemptdir} from clean" + yetus_debug "Exempting ${exemptdir} from clean" rm "${PATCH_DIR}/*" 2>/dev/null ${GIT} clean -xdf -e "${exemptdir}" fi if [[ $? != 0 ]]; then - hadoop_error "ERROR: git clean is failing" + yetus_error "ERROR: git clean is failing" cleanup_and_exit 1 fi ${GIT} checkout --force "${PATCH_BRANCH_DEFAULT}" if [[ $? != 0 ]]; then - hadoop_error "ERROR: git checkout --force ${PATCH_BRANCH_DEFAULT} is failing" + yetus_error "ERROR: git checkout --force ${PATCH_BRANCH_DEFAULT} is failing" cleanup_and_exit 1 fi @@ -949,14 +1195,14 @@ function git_checkout if [[ ${OFFLINE} == false ]]; then ${GIT} pull --rebase if [[ $? != 0 ]]; then - hadoop_error "ERROR: git pull is failing" + yetus_error "ERROR: git pull is failing" cleanup_and_exit 1 fi fi # forcibly checkout this branch or git ref ${GIT} checkout --force "${PATCH_BRANCH}" if [[ $? != 0 ]]; then - hadoop_error "ERROR: git checkout ${PATCH_BRANCH} is failing" + yetus_error "ERROR: git checkout ${PATCH_BRANCH} is failing" cleanup_and_exit 1 fi @@ -965,7 +1211,7 @@ function git_checkout if [[ ${OFFLINE} == false ]]; then ${GIT} pull --rebase if [[ $? != 0 ]]; then - hadoop_error "ERROR: git pull is failing" + yetus_error "ERROR: git pull is failing" cleanup_and_exit 1 fi fi @@ -974,9 +1220,9 @@ function git_checkout status=$(${GIT} status --porcelain) if [[ "${status}" != "" && -z ${DIRTY_WORKSPACE} ]] ; then - hadoop_error "ERROR: --dirty-workspace option not provided." - hadoop_error "ERROR: can't run in a workspace that contains the following modifications" - hadoop_error "${status}" + yetus_error "ERROR: --dirty-workspace option not provided." + yetus_error "ERROR: can't run in a workspace that contains the following modifications" + yetus_error "${status}" cleanup_and_exit 1 fi @@ -1005,114 +1251,42 @@ function git_checkout echo "Testing ${ISSUE} patch on ${PATCH_BRANCH}." fi - add_jira_footer "git revision" "${PATCH_BRANCH} / ${GIT_REVISION}" + add_footer_table "git revision" "${PATCH_BRANCH} / ${GIT_REVISION}" - if [[ ! -f ${BASEDIR}/pom.xml ]]; then - hadoop_error "ERROR: This verison of test-patch.sh only supports Maven-based builds. Aborting." - add_jira_table -1 pre-patch "Unsupported build system." - output_to_jira 1 - cleanup_and_exit 1 - fi return 0 } -## @description Confirm the source environment is compilable +## @description Confirm the given branch is a member of the list of space +## @description delimited branches or a git ref ## @audience private -## @stability stable +## @stability evolving ## @replaceable no +## @param branch +## @param branchlist ## @return 0 on success ## @return 1 on failure -function precheck_without_patch +function verify_valid_branch { - local -r mypwd=$(pwd) + local branches=$1 + local check=$2 + local i - big_console_header "Pre-patch ${PATCH_BRANCH} Java verification" + # shortcut some common + # non-resolvable names + if [[ -z ${check} ]]; then + return 1 + fi - start_clock + if [[ ${check} == patch ]]; then + return 1 + fi - verify_needed_test javac + if [[ ${check} =~ ^git ]]; then + ref=$(echo "${check}" | cut -f2 -dt) + count=$(echo "${ref}" | wc -c | tr -d ' ') - if [[ $? == 1 ]]; then - echo "Compiling ${mypwd}" - echo_and_redirect "${PATCH_DIR}/${PATCH_BRANCH}JavacWarnings.txt" "${MVN}" "${MAVEN_ARGS[@]}" clean test -DskipTests -D${PROJECT_NAME}PatchProcess -Ptest-patch - if [[ $? != 0 ]] ; then - echo "${PATCH_BRANCH} compilation is broken?" - add_jira_table -1 pre-patch "${PATCH_BRANCH} compilation may be broken." - return 1 - fi - else - echo "Patch does not appear to need javac tests." - fi - - verify_needed_test javadoc - - if [[ $? == 1 ]]; then - echo "Javadoc'ing ${mypwd}" - echo_and_redirect "${PATCH_DIR}/${PATCH_BRANCH}JavadocWarnings.txt" "${MVN}" "${MAVEN_ARGS[@]}" clean test javadoc:javadoc -DskipTests -Pdocs -D${PROJECT_NAME}PatchProcess - if [[ $? != 0 ]] ; then - echo "Pre-patch ${PATCH_BRANCH} javadoc compilation is broken?" - add_jira_table -1 pre-patch "Pre-patch ${PATCH_BRANCH} JavaDoc compilation may be broken." - return 1 - fi - else - echo "Patch does not appear to need javadoc tests." - fi - - verify_needed_test site - - if [[ $? == 1 ]]; then - echo "site creation for ${mypwd}" - echo_and_redirect "${PATCH_DIR}/${PATCH_BRANCH}SiteWarnings.txt" "${MVN}" "${MAVEN_ARGS[@]}" clean site site:stage -DskipTests -Dmaven.javadoc.skip=true -D${PROJECT_NAME}PatchProcess - if [[ $? != 0 ]] ; then - echo "Pre-patch ${PATCH_BRANCH} site compilation is broken?" - add_jira_table -1 pre-patch "Pre-patch ${PATCH_BRANCH} site compilation may be broken." - return 1 - fi - else - echo "Patch does not appear to need site tests." - fi - - precheck_findbugs - - if [[ $? != 0 ]] ; then - return 1 - fi - - add_jira_table 0 pre-patch "Pre-patch ${PATCH_BRANCH} compilation is healthy." - return 0 -} - -## @description Confirm the given branch is a member of the list of space -## @description delimited branches or a git ref -## @audience private -## @stability evolving -## @replaceable no -## @param branch -## @param branchlist -## @return 0 on success -## @return 1 on failure -function verify_valid_branch -{ - local branches=$1 - local check=$2 - local i - - # shortcut some common - # non-resolvable names - if [[ -z ${check} ]]; then - return 1 - fi - - if [[ ${check} == patch ]]; then - return 1 - fi - - if [[ ${check} =~ ^git ]]; then - ref=$(echo "${check}" | cut -f2 -dt) - count=$(echo "${ref}" | wc -c | tr -d ' ') - - if [[ ${count} == 8 || ${count} == 41 ]]; then - return 0 + if [[ ${count} == 8 || ${count} == 41 ]]; then + return 0 fi return 1 fi @@ -1136,7 +1310,7 @@ function determine_branch local allbranches local patchnamechunk - hadoop_debug "Determine branch" + yetus_debug "Determine branch" # something has already set this, so move on if [[ -n ${PATCH_BRANCH} ]]; then @@ -1155,12 +1329,12 @@ function determine_branch allbranches=$(${GIT} branch -r | tr -d ' ' | ${SED} -e s,origin/,,g) for j in "${PATCHURL}" "${PATCH_OR_ISSUE}"; do - hadoop_debug "Determine branch: starting with ${j}" + yetus_debug "Determine branch: starting with ${j}" # shellcheck disable=SC2016 patchnamechunk=$(echo "${j}" | ${AWK} -F/ '{print $NF}') # ISSUE.branch.##.patch - hadoop_debug "Determine branch: ISSUE.branch.##.patch" + yetus_debug "Determine branch: ISSUE.branch.##.patch" PATCH_BRANCH=$(echo "${patchnamechunk}" | cut -f2 -d. ) verify_valid_branch "${allbranches}" "${PATCH_BRANCH}" if [[ $? == 0 ]]; then @@ -1168,7 +1342,7 @@ function determine_branch fi # ISSUE-branch-##.patch - hadoop_debug "Determine branch: ISSUE-branch-##.patch" + yetus_debug "Determine branch: ISSUE-branch-##.patch" PATCH_BRANCH=$(echo "${patchnamechunk}" | cut -f3- -d- | cut -f1,2 -d-) verify_valid_branch "${allbranches}" "${PATCH_BRANCH}" if [[ $? == 0 ]]; then @@ -1176,7 +1350,7 @@ function determine_branch fi # ISSUE-##.patch.branch - hadoop_debug "Determine branch: ISSUE-##.patch.branch" + yetus_debug "Determine branch: ISSUE-##.patch.branch" # shellcheck disable=SC2016 PATCH_BRANCH=$(echo "${patchnamechunk}" | ${AWK} -F. '{print $NF}') verify_valid_branch "${allbranches}" "${PATCH_BRANCH}" @@ -1185,7 +1359,7 @@ function determine_branch fi # ISSUE-branch.##.patch - hadoop_debug "Determine branch: ISSUE-branch.##.patch" + yetus_debug "Determine branch: ISSUE-branch.##.patch" # shellcheck disable=SC2016 PATCH_BRANCH=$(echo "${patchnamechunk}" | cut -f3- -d- | ${AWK} -F. '{print $(NF-2)}' 2>/dev/null) verify_valid_branch "${allbranches}" "${PATCH_BRANCH}" @@ -1210,7 +1384,7 @@ function determine_issue local patchnamechunk local maybeissue - hadoop_debug "Determine issue" + yetus_debug "Determine issue" # we can shortcut jenkins if [[ ${JENKINS} == true ]]; then @@ -1241,13 +1415,13 @@ function add_test { local testname=$1 - hadoop_debug "Testing against ${testname}" + yetus_debug "Testing against ${testname}" if [[ -z ${NEEDED_TESTS} ]]; then - hadoop_debug "Setting tests to ${testname}" + yetus_debug "Setting tests to ${testname}" NEEDED_TESTS=${testname} elif [[ ! ${NEEDED_TESTS} =~ ${testname} ]] ; then - hadoop_debug "Adding ${testname}" + yetus_debug "Adding ${testname}" NEEDED_TESTS="${NEEDED_TESTS} ${testname}" fi } @@ -1279,44 +1453,8 @@ function determine_needed_tests local i for i in ${CHANGED_FILES}; do - if [[ ${i} =~ src/main/webapp ]]; then - hadoop_debug "tests/webapp: ${i}" - elif [[ ${i} =~ \.sh - || ${i} =~ \.cmd - ]]; then - hadoop_debug "tests/shell: ${i}" - elif [[ ${i} =~ \.md$ - || ${i} =~ \.md\.vm$ - || ${i} =~ src/site - || ${i} =~ src/main/docs - ]]; then - hadoop_debug "tests/site: ${i}" - add_test site - elif [[ ${i} =~ \.c$ - || ${i} =~ \.cc$ - || ${i} =~ \.h$ - || ${i} =~ \.hh$ - || ${i} =~ \.proto$ - || ${i} =~ src/test - || ${i} =~ \.cmake$ - || ${i} =~ CMakeLists.txt - ]]; then - hadoop_debug "tests/units: ${i}" - add_test javac - add_test unit - elif [[ ${i} =~ pom.xml$ - || ${i} =~ \.java$ - || ${i} =~ src/main - ]]; then - hadoop_debug "tests/javadoc+units: ${i}" - add_test javadoc - add_test javac - add_test unit - fi - if [[ ${i} =~ \.java$ ]]; then - add_test findbugs - fi + personality_file_tests "${i}" for plugin in ${PLUGINS}; do if declare -f ${plugin}_filefilter >/dev/null 2>&1; then @@ -1325,7 +1463,7 @@ function determine_needed_tests done done - add_jira_footer "Optional Tests" "${NEEDED_TESTS}" + add_footer_table "Optional Tests" "${NEEDED_TESTS}" } ## @description Given ${PATCH_ISSUE}, determine what type of patch file is in use, and do the @@ -1338,7 +1476,7 @@ function determine_needed_tests function locate_patch { local notSureIfPatch=false - hadoop_debug "locate patch" + yetus_debug "locate patch" if [[ -f ${PATCH_OR_ISSUE} ]]; then PATCH_FILE="${PATCH_OR_ISSUE}" @@ -1349,36 +1487,64 @@ function locate_patch else ${WGET} -q -O "${PATCH_DIR}/jira" "http://issues.apache.org/jira/browse/${PATCH_OR_ISSUE}" - if [[ $? != 0 ]];then - hadoop_error "ERROR: Unable to determine what ${PATCH_OR_ISSUE} may reference." - cleanup_and_exit 1 - fi - - if [[ $(${GREP} -c 'Patch Available' "${PATCH_DIR}/jira") == 0 ]] ; then - if [[ ${JENKINS} == true ]]; then - hadoop_error "ERROR: ${PATCH_OR_ISSUE} is not \"Patch Available\"." + case $? in + 0) + ;; + 2) + yetus_error "ERROR: .wgetrc/.netrc parsing error." cleanup_and_exit 1 - else - hadoop_error "WARNING: ${PATCH_OR_ISSUE} is not \"Patch Available\"." + ;; + 3) + yetus_error "ERROR: File IO error." + cleanup_and_exit 1 + ;; + 4) + yetus_error "ERROR: URL ${PATCH_OR_ISSUE} is unreachable." + cleanup_and_exit 1 + ;; + *) + # we want to try and do as much as we can in docker mode, + # but if the patch was passed as a file, then we may not + # be able to continue. + if [[ ${REEXECED} == true + && -f "${PATCH_DIR}/patch" ]]; then + PATCH_FILE="${PATCH_DIR}/patch" + else + yetus_error "ERROR: Unable to fetch ${PATCH_OR_ISSUE}." + cleanup_and_exit 1 + fi + ;; + esac + + if [[ -z "${PATCH_FILE}" ]]; then + if [[ $(${GREP} -c 'Patch Available' "${PATCH_DIR}/jira") == 0 ]] ; then + if [[ ${JENKINS} == true ]]; then + yetus_error "ERROR: ${PATCH_OR_ISSUE} is not \"Patch Available\"." + cleanup_and_exit 1 + else + yetus_error "WARNING: ${PATCH_OR_ISSUE} is not \"Patch Available\"." + fi fi - fi - relativePatchURL=$(${GREP} -o '"/jira/secure/attachment/[0-9]*/[^"]*' "${PATCH_DIR}/jira" | ${GREP} -v -e 'htm[l]*$' | sort | tail -1 | ${GREP} -o '/jira/secure/attachment/[0-9]*/[^"]*') - PATCHURL="http://issues.apache.org${relativePatchURL}" - if [[ ! ${PATCHURL} =~ \.patch$ ]]; then - notSureIfPatch=true + relativePatchURL=$(${GREP} -o '"/jira/secure/attachment/[0-9]*/[^"]*' "${PATCH_DIR}/jira" | ${GREP} -v -e 'htm[l]*$' | sort | tail -1 | ${GREP} -o '/jira/secure/attachment/[0-9]*/[^"]*') + PATCHURL="http://issues.apache.org${relativePatchURL}" + if [[ ! ${PATCHURL} =~ \.patch$ ]]; then + notSureIfPatch=true + fi + patchNum=$(echo "${PATCHURL}" | ${GREP} -o '[0-9]*/' | ${GREP} -o '[0-9]*') + echo "${ISSUE} patch is being downloaded at $(date) from" fi - patchNum=$(echo "${PATCHURL}" | ${GREP} -o '[0-9]*/' | ${GREP} -o '[0-9]*') - echo "${ISSUE} patch is being downloaded at $(date) from" fi - echo "${PATCHURL}" - add_jira_footer "Patch URL" "${PATCHURL}" - ${WGET} -q -O "${PATCH_DIR}/patch" "${PATCHURL}" - if [[ $? != 0 ]];then - hadoop_error "ERROR: ${PATCH_OR_ISSUE} could not be downloaded." - cleanup_and_exit 1 + if [[ -z "${PATCH_FILE}" ]]; then + echo "${PATCHURL}" + add_footer_table "Patch URL" "${PATCHURL}" + ${WGET} -q -O "${PATCH_DIR}/patch" "${PATCHURL}" + if [[ $? != 0 ]];then + yetus_error "ERROR: ${PATCH_OR_ISSUE} could not be downloaded." + cleanup_and_exit 1 + fi + PATCH_FILE="${PATCH_DIR}/patch" fi - PATCH_FILE="${PATCH_DIR}/patch" fi if [[ ! -f "${PATCH_DIR}/patch" ]]; then @@ -1386,18 +1552,19 @@ function locate_patch if [[ $? == 0 ]] ; then echo "Patch file ${PATCH_FILE} copied to ${PATCH_DIR}" else - hadoop_error "ERROR: Could not copy ${PATCH_FILE} to ${PATCH_DIR}" + yetus_error "ERROR: Could not copy ${PATCH_FILE} to ${PATCH_DIR}" cleanup_and_exit 1 fi fi + if [[ ${notSureIfPatch} == "true" ]]; then guess_patch_file "${PATCH_DIR}/patch" if [[ $? != 0 ]]; then - hadoop_error "ERROR: ${PATCHURL} is not a patch file." + yetus_error "ERROR: ${PATCHURL} is not a patch file." cleanup_and_exit 1 else - hadoop_debug "The patch ${PATCHURL} was not named properly, but it looks like a patch file. proceeding, but issue/branch matching might go awry." - add_jira_table 0 patch "The patch file was not named according to ${PROJECT_NAME}'s naming conventions. Please see ${HOW_TO_CONTRIBUTE} for instructions." + yetus_debug "The patch ${PATCHURL} was not named properly, but it looks like a patch file. proceeding, but issue/branch matching might go awry." + add_vote_table 0 patch "The patch file was not named according to ${PROJECT_NAME}'s naming conventions. Please see ${HOW_TO_CONTRIBUTE} for instructions." fi fi } @@ -1413,15 +1580,15 @@ function guess_patch_file local patch=$1 local fileOutput - hadoop_debug "Trying to guess is ${patch} is a patch file." + yetus_debug "Trying to guess is ${patch} is a patch file." fileOutput=$("${FILE}" "${patch}") if [[ $fileOutput =~ \ diff\ ]]; then - hadoop_debug "file magic says it's a diff." + yetus_debug "file magic says it's a diff." return 0 fi - fileOutput=$(head -n 1 "${patch}" | "${EGREP}" "^(From [a-z0-9]* Mon Sep 17 00:00:00 2001)|(diff .*)|(Index: .*)$") + fileOutput=$(head -n 1 "${patch}" | "${GREP}" -E "^(From [a-z0-9]* Mon Sep 17 00:00:00 2001)|(diff .*)|(Index: .*)$") if [[ $? == 0 ]]; then - hadoop_debug "first line looks like a patch file." + yetus_debug "first line looks like a patch file." return 0 fi return 1 @@ -1442,7 +1609,7 @@ function verify_patch_file "${BINDIR}/smart-apply-patch.sh" "${PATCH_DIR}/patch" dryrun if [[ $? != 0 ]] ; then echo "PATCH APPLICATION FAILED" - add_jira_table -1 patch "The patch command could not apply the patch during dryrun." + add_vote_table -1 patch "The patch command could not apply the patch during dryrun." return 1 else return 0 @@ -1464,17 +1631,91 @@ function apply_patch_file if [[ $? != 0 ]] ; then echo "PATCH APPLICATION FAILED" ((RESULT = RESULT + 1)) - add_jira_table -1 patch "The patch command could not apply the patch." + add_vote_table -1 patch "The patch command could not apply the patch." output_to_console 1 - output_to_jira 1 + output_to_bugsystem 1 cleanup_and_exit 1 fi return 0 } +## @description copy the test-patch binary bits to a new working dir, +## @description setting USER_PLUGIN_DIR and PERSONALITY to the new +## @description locations. +## @description this is used for test-patch in docker and reexec mode +## @audience private +## @stability evolving +## @replaceable no +function copytpbits +{ + local dockerdir + local dockfile + local person + # we need to copy/consolidate all the bits that might have changed + # that are considered part of test-patch. This *might* break + # things that do off-path includes, but there isn't much we can + # do about that, I don't think. + + # if we've already copied, then don't bother doing it again + if [[ ${STARTDIR} == ${PATCH_DIR}/precommit ]]; then + hadoop_debug "Skipping copytpbits; already copied once" + return + fi + + pushd "${STARTINGDIR}" >/dev/null + mkdir -p "${PATCH_DIR}/precommit/user-plugins" + mkdir -p "${PATCH_DIR}/precommit/personality" + mkdir -p "${PATCH_DIR}/precommit/test-patch-docker" -## @description If this actually patches the files used for the QA process -## @description under dev-support and its subdirectories, then + # copy our entire universe, preserving links, etc. + (cd "${BINDIR}"; tar cpf - . ) | (cd "${PATCH_DIR}/precommit"; tar xpf - ) + + if [[ -n "${USER_PLUGIN_DIR}" + && -d "${USER_PLUGIN_DIR}" ]]; then + cp -pr "${USER_PLUGIN_DIR}/*" \ + "${PATCH_DIR}/precommit/user-plugins" + fi + # Set to be relative to ${PATCH_DIR}/precommit + USER_PLUGIN_DIR="${PATCH_DIR}/precommit/user-plugins" + + if [[ -n ${PERSONALITY} + && -f ${PERSONALITY} ]]; then + cp -pr "${PERSONALITY}" "${PATCH_DIR}/precommit/personality" + person=$(basename "${PERSONALITY}") + + # Set to be relative to ${PATCH_DIR}/precommit + PERSONALITY="${PATCH_DIR}/precommit/personality/${person}" + fi + + if [[ -n ${DOCKERFILE} + && -f ${DOCKERFILE} ]]; then + dockerdir=$(dirname "${DOCKERFILE}") + dockfile=$(basename "${DOCKERFILE}") + pushd "${dockerdir}" >/dev/null + gitfilerev=$("${GIT}" log -n 1 --pretty=format:%h -- "${dockfile}" 2>/dev/null) + popd >/dev/null + if [[ -z ${gitfilerev} ]]; then + gitfilerev=$(date "+%F") + gitfilerev="date${gitfilerev}" + fi + ( + echo "### TEST_PATCH_PRIVATE: dockerfile=${DOCKERFILE}" + echo "### TEST_PATCH_PRIVATE: gitrev=${gitfilerev}" + cat "${DOCKERFILE}" + # make sure we put some space between, just in case last + # line isn't an empty line or whatever + printf "\n\n" + cat "${BINDIR}/test-patch-docker/Dockerfile-endstub" + + printf "\n\n" + ) > "${PATCH_DIR}/precommit/test-patch-docker/Dockerfile" + DOCKERFILE="${PATCH_DIR}/precommit/test-patch-docker/Dockerfile" + fi + + popd >/dev/null +} + +## @description If this patches actually patches test-patch.sh, then ## @description run with the patched version for the test. ## @audience private ## @stability evolving @@ -1483,241 +1724,664 @@ function apply_patch_file function check_reexec { local commentfile=${PATCH_DIR}/tp.${RANDOM} + local tpdir + local copy=false + local testdir + local person if [[ ${REEXECED} == true ]]; then big_console_header "Re-exec mode detected. Continuing." return fi - if [[ ! ${CHANGED_FILES} =~ dev-support/test-patch - && ! ${CHANGED_FILES} =~ dev-support/smart-apply ]] ; then + for testdir in "${BINDIR}" \ + "${PERSONALITY}" \ + "${USER_PLUGIN_DIR}" \ + "${DOCKERFILE}"; do + tpdir=$(relative_dir "${testdir}") + if [[ $? == 0 + && ${CHANGED_FILES} =~ ${tpdir} ]]; then + copy=true + fi + done + + if [[ ${copy} == true ]]; then + big_console_header "precommit patch detected" + + if [[ ${RESETREPO} == false ]]; then + ((RESULT = RESULT + 1)) + yetus_debug "can't destructively change the working directory. run with '--resetrepo' please. :(" + add_vote_table -1 precommit "Couldn't test precommit changes because we aren't configured to destructively change the working directory." + else + + apply_patch_file + + if [[ ${JENKINS} == true ]]; then + rm "${commentfile}" 2>/dev/null + echo "(!) A patch to the testing environment has been detected. " > "${commentfile}" + echo "Re-executing against the patched versions to perform further tests. " >> "${commentfile}" + echo "The console is at ${BUILD_URL}console in case of problems." >> "${commentfile}" + write_comment "${commentfile}" + rm "${commentfile}" + fi + fi + fi + + if [[ ${DOCKERSUPPORT} == false + && ${copy} == false ]]; then return fi - big_console_header "dev-support patch detected" + if [[ ${DOCKERSUPPORT} == true + && ${copy} == false ]]; then + big_console_header "Re-execing under Docker" - if [[ ${RESETREPO} == false ]]; then - ((RESULT = RESULT + 1)) - hadoop_debug "can't destructively change the working directory. run with '--resetrepo' please. :(" - add_jira_table -1 dev-support "Couldn't test dev-support changes because we aren't configured to destructively change the working directory." - return fi - printf "\n\nRe-executing against patched versions to test.\n\n" + # copy our universe + copytpbits - apply_patch_file + if [[ ${DOCKERSUPPORT} == true ]]; then + # if we are doing docker, then we re-exec, but underneath the + # container - if [[ ${JENKINS} == true ]]; then + client=$(docker version | grep 'Client version' | cut -f2 -d: | tr -d ' ') + server=$(docker version | grep 'Server version' | cut -f2 -d: | tr -d ' ') - rm "${commentfile}" 2>/dev/null + dockerversion="Client=${client} Server=${server}" - echo "(!) A patch to the files used for the QA process has been detected. " > "${commentfile}" - echo "Re-executing against the patched versions to perform further tests. " >> "${commentfile}" - echo "The console is at ${BUILD_URL}console in case of problems." >> "${commentfile}" + TESTPATCHMODE="${USER_PARAMS[*]}" + if [[ -n "${BUILD_URL}" ]]; then + TESTPATCHMODE="--build-url=${BUILD_URL} ${TESTPATCHMODE}" + fi + TESTPATCHMODE="--tpglobaltimer=${GLOBALTIMER} ${TESTPATCHMODE}" + TESTPATCHMODE="--tpreexectimer=${TIMER} ${TESTPATCHMODE}" + TESTPATCHMODE="--personality=\'${PERSONALITY}\' ${TESTPATCHMODE}" + TESTPATCHMODE="--plugins=\'${USER_PLUGIN_DIR}\' ${TESTPATCHMODE}" + TESTPATCHMODE=" ${TESTPATCHMODE}" + export TESTPATCHMODE + + patchdir=$(relative_dir "${PATCH_DIR}") + + cd "${BASEDIR}" + #shellcheck disable=SC2093 + exec bash "${PATCH_DIR}/precommit/test-patch-docker/test-patch-docker.sh" \ + --dockerversion="${dockerversion}" \ + --java-home="${JAVA_HOME}" \ + --patch-dir="${patchdir}" \ + --project="${PROJECT_NAME}" - write_to_jira "${commentfile}" - rm "${commentfile}" + else + + # if we aren't doing docker, then just call ourselves + # but from the new path with the new flags + #shellcheck disable=SC2093 + cd "${PATCH_DIR}/precommit/" + exec "${PATCH_DIR}/precommit/test-patch.sh" \ + "${USER_PARAMS[@]}" \ + --reexec \ + --basedir="${BASEDIR}" \ + --branch="${PATCH_BRANCH}" \ + --patch-dir="${PATCH_DIR}" \ + --tpglobaltimer="${GLOBALTIMER}" \ + --tpreexectimer="${TIMER}" \ + --personality="${PERSONALITY}" \ + --plugins="${USER_PLUGIN_DIR}" + fi +} + +## @description Reset the test results +## @audience public +## @stability evolving +## @replaceable no +function modules_reset +{ + MODULE_STATUS=() + MODULE_STATUS_TIMER=() + MODULE_STATUS_MSG=() + MODULE_STATUS_LOG=() +} + +## @description Utility to print standard module errors +## @audience public +## @stability evolving +## @replaceable no +## @param repostatus +## @param testtype +## @param mvncmdline +function modules_messages +{ + local repostatus=$1 + local testtype=$2 + local summarymode=$3 + shift 2 + local modindex=0 + local repo + local goodtime=0 + local failure=false + local oldtimer + local statusjdk + local multijdkmode=false + + if [[ ${repostatus} == branch ]]; then + repo=${PATCH_BRANCH} + else + repo="the patch" + fi + + verify_multijdk_test "${testtype}" + if [[ $? == 1 ]]; then + multijdkmode=true + fi + + oldtimer=${TIMER} + + if [[ ${summarymode} == true + && ${ALLOWSUMMARIES} == true ]]; then + + until [[ ${modindex} -eq ${#MODULE[@]} ]]; do + + if [[ ${multijdkmode} == true ]]; then + statusjdk=${MODULE_STATUS_JDK[${modindex}]} + fi + + if [[ "${MODULE_STATUS[${modindex}]}" == '+1' ]]; then + ((goodtime=goodtime + ${MODULE_STATUS_TIMER[${modindex}]})) + else + failure=true + start_clock + echo "" + echo "${MODULE_STATUS_MSG[${modindex}]}" + echo "" + offset_clock "${MODULE_STATUS_TIMER[${modindex}]}" + add_vote_table "${MODULE_STATUS[${modindex}]}" "${testtype}" "${MODULE_STATUS_MSG[${modindex}]}" + if [[ ${MODULE_STATUS[${modindex}]} == -1 + && -n "${MODULE_STATUS_LOG[${modindex}]}" ]]; then + add_footer_table "${testtype}" "@@BASE@@/${MODULE_STATUS_LOG[${modindex}]}" + fi + fi + ((modindex=modindex+1)) + done + + if [[ ${failure} == false ]]; then + start_clock + offset_clock "${goodtime}" + add_vote_table +1 "${testtype}" "${repo} passed${statusjdk}" + fi + else + until [[ ${modindex} -eq ${#MODULE[@]} ]]; do + start_clock + echo "" + echo "${MODULE_STATUS_MSG[${modindex}]}" + echo "" + offset_clock "${MODULE_STATUS_TIMER[${modindex}]}" + add_vote_table "${MODULE_STATUS[${modindex}]}" "${testtype}" "${MODULE_STATUS_MSG[${modindex}]}" + if [[ ${MODULE_STATUS[${modindex}]} == -1 + && -n "${MODULE_STATUS_LOG[${modindex}]}" ]]; then + add_footer_table "${testtype}" "@@BASE@@/${MODULE_STATUS_LOG[${modindex}]}" + fi + ((modindex=modindex+1)) + done fi + TIMER=${oldtimer} +} + +## @description Add a test result +## @audience public +## @stability evolving +## @replaceable no +## @param module +## @param runtime +function module_status +{ + local index=$1 + local value=$2 + local log=$3 + shift 3 - cd "${CWD}" - mkdir -p "${PATCH_DIR}/dev-support-test" - cp -pr "${BASEDIR}"/dev-support/test-patch* "${PATCH_DIR}/dev-support-test" - cp -pr "${BASEDIR}"/dev-support/smart-apply* "${PATCH_DIR}/dev-support-test" + local jdk - big_console_header "exec'ing test-patch.sh now..." + jdk=$(report_jvm_version "${JAVA_HOME}") - exec "${PATCH_DIR}/dev-support-test/test-patch.sh" \ - --reexec \ - --branch="${PATCH_BRANCH}" \ - --patch-dir="${PATCH_DIR}" \ - "${USER_PARAMS[@]}" + if [[ -n ${index} + && ${index} =~ ^[0-9]+$ ]]; then + MODULE_STATUS[${index}]="${value}" + MODULE_STATUS_LOG[${index}]="${log}" + MODULE_STATUS_JDK[${index}]=" with JDK v${jdk}" + MODULE_STATUS_MSG[${index}]="${*}" + else + yetus_error "ASSERT: module_status given bad index: ${index}" + local frame=0 + while caller $frame; do + ((frame++)); + done + echo "$*" + exit 1 + fi } -## @description Check the current directory for @author tags -## @audience private +## @description run the maven tests for the queued modules +## @audience public +## @stability evolving +## @replaceable no +## @param repostatus +## @param testtype +## @param mvncmdline +function modules_workers +{ + local repostatus=$1 + local testtype=$2 + shift 2 + local modindex=0 + local fn + local savestart=${TIMER} + local savestop + local repo + local modulesuffix + local jdk="" + local jdkindex=0 + local statusjdk + + if [[ ${repostatus} == branch ]]; then + repo=${PATCH_BRANCH} + else + repo="the patch" + fi + + modules_reset + + verify_multijdk_test "${testtype}" + if [[ $? == 1 ]]; then + jdk=$(report_jvm_version "${JAVA_HOME}") + statusjdk=" with JDK v${jdk}" + jdk="-jdk${jdk}" + jdk=${jdk// /} + yetus_debug "Starting MultiJDK mode${statusjdk} on ${testtype}" + fi + + until [[ ${modindex} -eq ${#MODULE[@]} ]]; do + start_clock + + fn=$(module_file_fragment "${MODULE[${modindex}]}") + fn="${fn}${jdk}" + modulesuffix=$(basename "${MODULE[${modindex}]}") + pushd "${BASEDIR}/${MODULE[${modindex}]}" >/dev/null + + if [[ ${modulesuffix} == . ]]; then + modulesuffix="root" + fi + + if [[ $? != 0 ]]; then + echo "${BASEDIR}/${MODULE[${modindex}]} no longer exists. Skipping." + ((modindex=modindex+1)) + continue + fi + + case ${BUILDTOOL} in + maven) + #shellcheck disable=SC2086 + echo_and_redirect "${PATCH_DIR}/${repostatus}-${testtype}-${fn}.txt" \ + ${MVN} "${MAVEN_ARGS[@]}" \ + "${@//@@@MODULEFN@@@/${fn}}" \ + ${MODULEEXTRAPARAM[${modindex}]//@@@MODULEFN@@@/${fn}} -Ptest-patch + ;; + ant) + #shellcheck disable=SC2086 + echo_and_redirect "${PATCH_DIR}/${repostatus}-${testtype}-${fn}.txt" \ + "${ANT}" "${ANT_ARGS[@]}" \ + ${MODULEEXTRAPARAM[${modindex}]//@@@MODULEFN@@@/${fn}} \ + "${@//@@@MODULEFN@@@/${fn}}" + ;; + *) + yetus_error "ERROR: Unsupported build tool." + return 1 + ;; + esac + + if [[ $? == 0 ]] ; then + module_status \ + ${modindex} \ + +1 \ + "${repostatus}-${testtype}-${fn}.txt" \ + "${modulesuffix} in ${repo} passed${statusjdk}." + else + module_status \ + ${modindex} \ + -1 \ + "${repostatus}-${testtype}-${fn}.txt" \ + "${modulesuffix} in ${repo} failed${statusjdk}." + ((result = result + 1)) + fi + savestop=$(stop_clock) + MODULE_STATUS_TIMER[${modindex}]=${savestop} + # shellcheck disable=SC2086 + echo "Elapsed: $(clock_display ${savestop})" + popd >/dev/null + ((modindex=modindex+1)) + done + + TIMER=${savestart} + + if [[ ${result} -gt 0 ]]; then + return 1 + fi + return 0 +} + +## @description Reset the queue for tests +## @audience public +## @stability evolving +## @replaceable no +function clear_personality_queue +{ + yetus_debug "Personality: clear queue" + MODCOUNT=0 + MODULE=() +} + +## @description Build the queue for tests +## @audience public ## @stability evolving ## @replaceable no +## @param module +## @param profiles/flags/etc +function personality_enqueue_module +{ + yetus_debug "Personality: enqueue $*" + local module=$1 + shift + + MODULE[${MODCOUNT}]=${module} + MODULEEXTRAPARAM[${MODCOUNT}]=${*} + ((MODCOUNT=MODCOUNT+1)) +} + +## @description Confirm compilation pre-patch +## @audience private +## @stability stable +## @replaceable no ## @return 0 on success ## @return 1 on failure -function check_author +function precheck_javac { - local authorTags + local result=0 + local -r savejavahome=${JAVA_HOME} + local multijdkmode=false + local jdkindex=0 - big_console_header "Checking there are no @author tags in the patch." + big_console_header "Pre-patch ${PATCH_BRANCH} javac compilation" - start_clock + verify_needed_test javac + if [[ $? == 0 ]]; then + echo "Patch does not appear to need javac tests." + return 0 + fi - if [[ ${CHANGED_FILES} =~ dev-support/test-patch ]]; then - add_jira_table 0 @author "Skipping @author checks as test-patch has been patched." - return 0 + verify_multijdk_test javac + if [[ $? == 1 ]]; then + multijdkmode=true fi - authorTags=$("${GREP}" -c -i '^[^-].*@author' "${PATCH_DIR}/patch") - echo "There appear to be ${authorTags} @author tags in the patch." - if [[ ${authorTags} != 0 ]] ; then - add_jira_table -1 @author \ - "The patch appears to contain ${authorTags} @author tags which the Hadoop" \ - " community has agreed to not allow in code contributions." + for jdkindex in ${JDK_DIR_LIST}; do + if [[ ${multijdkmode} == true ]]; then + JAVA_HOME=${jdkindex} + fi + + personality_modules branch javac + case ${BUILDTOOL} in + maven) + modules_workers branch javac clean compile + ;; + ant) + modules_workers branch javac + ;; + *) + yetus_error "ERROR: Unsupported build tool." + return 1 + ;; + esac + + ((result=result + $?)) + modules_messages branch javac true + + done + JAVA_HOME=${savejavahome} + + if [[ ${result} -gt 0 ]]; then return 1 fi - add_jira_table +1 @author "The patch does not contain any @author tags." return 0 } -## @description Check the patch file for changed/new tests +## @description Confirm Javadoc pre-patch ## @audience private -## @stability evolving +## @stability stable ## @replaceable no ## @return 0 on success ## @return 1 on failure -function check_modified_unittests +function precheck_javadoc { - local testReferences=0 - local i + local result=0 + local -r savejavahome=${JAVA_HOME} + local multijdkmode=false + local jdkindex=0 - verify_needed_test unit + big_console_header "Pre-patch ${PATCH_BRANCH} Javadoc verification" + verify_needed_test javadoc if [[ $? == 0 ]]; then - return 0 + echo "Patch does not appear to need javadoc tests." + return 0 fi - big_console_header "Checking there are new or changed tests in the patch." - - start_clock + verify_multijdk_test javadoc + if [[ $? == 1 ]]; then + multijdkmode=true + fi - for i in ${CHANGED_FILES}; do - if [[ ${i} =~ /test/ ]]; then - ((testReferences=testReferences + 1)) + for jdkindex in ${JDK_DIR_LIST}; do + if [[ ${multijdkmode} == true ]]; then + JAVA_HOME=${jdkindex} fi + + personality_modules branch javadoc + case ${BUILDTOOL} in + maven) + modules_workers branch javadoc clean javadoc:javadoc + ;; + ant) + modules_workers branch javadoc clean javadoc + ;; + *) + yetus_error "ERROR: Unsupported build tool." + return 1 + ;; + esac + + ((result=result + $?)) + modules_messages branch javadoc true + done + JAVA_HOME=${savejavahome} - echo "There appear to be ${testReferences} test file(s) referenced in the patch." - if [[ ${testReferences} == 0 ]] ; then - add_jira_table -1 "tests included" \ - "The patch doesn't appear to include any new or modified tests. " \ - "Please justify why no new tests are needed for this patch." \ - "Also please list what manual steps were performed to verify this patch." + if [[ ${result} -gt 0 ]]; then return 1 fi - add_jira_table +1 "tests included" \ - "The patch appears to include ${testReferences} new or modified test files." return 0 } -## @description Helper for check_javadoc +## @description Confirm site pre-patch ## @audience private -## @stability evolving +## @stability stable ## @replaceable no ## @return 0 on success ## @return 1 on failure -function count_javadoc_warns +function precheck_site { - local warningfile=$1 + local result=0 - #shellcheck disable=SC2016,SC2046 - return $(${EGREP} "^[0-9]+ warnings$" "${warningfile}" | ${AWK} '{sum+=$1} END {print sum}') + if [[ ${BUILDTOOL} != maven ]]; then + return 0 + fi + + big_console_header "Pre-patch ${PATCH_BRANCH} site verification" + + verify_needed_test site + if [[ $? == 0 ]];then + echo "Patch does not appear to need site tests." + return 0 + fi + + personality_modules branch site + modules_workers branch site clean site site:stage + result=$? + modules_messages branch site true + if [[ ${result} != 0 ]]; then + return 1 + fi + return 0 } -## @description Count and compare the number of JavaDoc warnings pre- and post- patch +## @description Confirm the source environment pre-patch +## @audience private +## @stability stable +## @replaceable no +## @return 0 on success +## @return 1 on failure +function precheck_without_patch +{ + local result=0 + + precheck_mvninstall + + if [[ $? -gt 0 ]]; then + ((result = result +1 )) + fi + + precheck_javac + + if [[ $? -gt 0 ]]; then + ((result = result +1 )) + fi + + precheck_javadoc + + if [[ $? -gt 0 ]]; then + ((result = result +1 )) + fi + + precheck_site + + if [[ $? -gt 0 ]]; then + ((result = result +1 )) + fi + + if [[ ${result} -gt 0 ]]; then + return 1 + fi + + return 0 +} + +## @description Check the current directory for @author tags ## @audience private ## @stability evolving ## @replaceable no ## @return 0 on success ## @return 1 on failure -function check_javadoc +function check_author { - local numBranchJavadocWarnings - local numPatchJavadocWarnings + local authorTags + local -r appname=$(basename "${BASH_SOURCE-$0}") - verify_needed_test javadoc + big_console_header "Checking there are no @author tags in the patch." - if [[ $? == 0 ]]; then - echo "This patch does not appear to need javadoc checks." + if [[ ${CHANGED_FILES} =~ ${appname} ]]; then + echo "Skipping @author checks as ${appname} has been patched." + add_vote_table 0 @author "Skipping @author checks as ${appname} has been patched." return 0 fi - big_console_header "Determining number of patched javadoc warnings" - start_clock - if [[ -d hadoop-project ]]; then - (cd hadoop-project; "${MVN}" "${MAVEN_ARGS[@]}" install > /dev/null 2>&1) - fi - if [[ -d hadoop-common-project/hadoop-annotations ]]; then - (cd hadoop-common-project/hadoop-annotations; "${MVN}" "${MAVEN_ARGS[@]}" install > /dev/null 2>&1) - fi - echo_and_redirect "${PATCH_DIR}/patchJavadocWarnings.txt" "${MVN}" "${MAVEN_ARGS[@]}" clean test javadoc:javadoc -DskipTests -Pdocs -D${PROJECT_NAME}PatchProcess - count_javadoc_warns "${PATCH_DIR}/${PATCH_BRANCH}JavadocWarnings.txt" - numBranchJavadocWarnings=$? - count_javadoc_warns "${PATCH_DIR}/patchJavadocWarnings.txt" - numPatchJavadocWarnings=$? - - echo "There appear to be ${numBranchJavadocWarnings} javadoc warnings before the patch and ${numPatchJavadocWarnings} javadoc warnings after applying the patch." - if [[ ${numBranchJavadocWarnings} != "" && ${numPatchJavadocWarnings} != "" ]] ; then - if [[ ${numPatchJavadocWarnings} -gt ${numBranchJavadocWarnings} ]] ; then - - ${GREP} -i warning "${PATCH_DIR}/${PATCH_BRANCH}JavadocWarnings.txt" > "${PATCH_DIR}/${PATCH_BRANCH}JavadocWarningsFiltered.txt" - ${GREP} -i warning "${PATCH_DIR}/patchJavadocWarnings.txt" > "${PATCH_DIR}/patchJavadocWarningsFiltered.txt" - ${DIFF} -u "${PATCH_DIR}/${PATCH_BRANCH}JavadocWarningsFiltered.txt" \ - "${PATCH_DIR}/patchJavadocWarningsFiltered.txt" \ - > "${PATCH_DIR}/diffJavadocWarnings.txt" - rm -f "${PATCH_DIR}/${PATCH_BRANCH}JavadocWarningsFiltered.txt" "${PATCH_DIR}/patchJavadocWarningsFiltered.txt" - - add_jira_table -1 javadoc "The applied patch generated "\ - "$((numPatchJavadocWarnings-numBranchJavadocWarnings))" \ - " additional warning messages." - add_jira_footer javadoc "@@BASE@@/diffJavadocWarnings.txt" - return 1 - fi + authorTags=$("${GREP}" -c -i '^[^-].*@author' "${PATCH_DIR}/patch") + echo "There appear to be ${authorTags} @author tags in the patch." + if [[ ${authorTags} != 0 ]] ; then + add_vote_table -1 @author \ + "The patch appears to contain ${authorTags} @author tags which the" \ + " community has agreed to not allow in code contributions." + return 1 fi - add_jira_table +1 javadoc "There were no new javadoc warning messages." + add_vote_table +1 @author "The patch does not contain any @author tags." return 0 } -## @description Make sure site still compiles +## @description Check the patch file for changed/new tests ## @audience private ## @stability evolving ## @replaceable no ## @return 0 on success ## @return 1 on failure -function check_site +function check_modified_unittests { - local -r mypwd=$(pwd) + local testReferences=0 + local i - verify_needed_test site + big_console_header "Checking there are new or changed tests in the patch." + + verify_needed_test unit if [[ $? == 0 ]]; then - echo "This patch does not appear to need site checks." + echo "Patch does not appear to need new or modified tests." return 0 fi - big_console_header "Determining if patched site still builds" - start_clock - echo "site creation for ${mypwd}" - echo_and_redirect "${PATCH_DIR}/patchSiteWarnings.txt" "${MVN}" "${MAVEN_ARGS[@]}" clean site site:stage -DskipTests -Dmaven.javadoc.skip=true -D${PROJECT_NAME}PatchProcess - if [[ $? != 0 ]] ; then - echo "Site compilation is broken" - add_jira_table -1 site "Site compilation is broken." - add_jira_footer site "@@BASE@@/patchSiteWarnings.txt" + for i in ${CHANGED_FILES}; do + if [[ ${i} =~ /test/ ]]; then + ((testReferences=testReferences + 1)) + fi + done + + echo "There appear to be ${testReferences} test file(s) referenced in the patch." + if [[ ${testReferences} == 0 ]] ; then + add_vote_table -1 "test4tests" \ + "The patch doesn't appear to include any new or modified tests. " \ + "Please justify why no new tests are needed for this patch." \ + "Also please list what manual steps were performed to verify this patch." return 1 fi - add_jira_table +1 site "Site still builds." + add_vote_table +1 "test4tests" \ + "The patch appears to include ${testReferences} new or modified test files." return 0 } -## @description Helper for check_javac +## @description Helper for check_patch_javac ## @audience private ## @stability evolving ## @replaceable no ## @return 0 on success ## @return 1 on failure -function count_javac_warns +function count_javac_probs { local warningfile=$1 - #shellcheck disable=SC2016,SC2046 - return $(${AWK} 'BEGIN {total = 0} {total += 1} END {print total}' "${warningfile}") + local val1 + local val2 + + case ${BUILDTOOL} in + maven) + #shellcheck disable=SC2016,SC2046 + ${AWK} 'BEGIN {total = 0} {total += 1} END {print total}' "${warningfile}" + ;; + ant) + #shellcheck disable=SC2016 + val1=$(${GREP} -E "\[javac\] [0-9]+ errors?$" "${warningfile}" | ${AWK} '{sum+=$2} END {print sum}') + #shellcheck disable=SC2016 + val2=$(${GREP} -E "\[javac\] [0-9]+ warnings?$" "${warningfile}" | ${AWK} '{sum+=$2} END {print sum}') + echo $((val1+val2)) + ;; + esac } ## @description Count and compare the number of javac warnings pre- and post- patch @@ -1726,387 +2390,352 @@ function count_javac_warns ## @replaceable no ## @return 0 on success ## @return 1 on failure -function check_javac +function check_patch_javac { - local branchJavacWarnings - local patchJavacWarnings + local i + local result=0 + local fn + local -r savejavahome=${JAVA_HOME} + local multijdkmode=false + local jdk="" + local jdkindex=0 + local statusjdk + declare -i numbranch=0 + declare -i numpatch=0 + + big_console_header "Determining number of patched javac errors" verify_needed_test javac if [[ $? == 0 ]]; then - echo "This patch does not appear to need javac checks." + echo "Patch does not appear to need javac tests." return 0 fi - big_console_header "Determining number of patched javac warnings." + verify_multijdk_test javac + if [[ $? == 1 ]]; then + multijdkmode=true + fi - start_clock + for jdkindex in ${JDK_DIR_LIST}; do + if [[ ${multijdkmode} == true ]]; then + JAVA_HOME=${jdkindex} + jdk=$(report_jvm_version "${JAVA_HOME}") + yetus_debug "Using ${JAVA_HOME} to run this set of tests" + statusjdk=" with JDK v${jdk}" + jdk="-jdk${jdk}" + jdk=${jdk// /} + fi - echo_and_redirect "${PATCH_DIR}/patchJavacWarnings.txt" "${MVN}" "${MAVEN_ARGS[@]}" clean test -DskipTests -D${PROJECT_NAME}PatchProcess ${NATIVE_PROFILE} -Ptest-patch - if [[ $? != 0 ]] ; then - add_jira_table -1 javac "The patch appears to cause the build to fail." - return 2 - fi - ### Compare ${PATCH_BRANCH} and patch javac warning numbers - if [[ -f ${PATCH_DIR}/patchJavacWarnings.txt ]] ; then - ${GREP} '\[WARNING\]' "${PATCH_DIR}/${PATCH_BRANCH}JavacWarnings.txt" > "${PATCH_DIR}/filtered${PATCH_BRANCH}JavacWarnings.txt" - ${GREP} '\[WARNING\]' "${PATCH_DIR}/patchJavacWarnings.txt" > "${PATCH_DIR}/filteredPatchJavacWarnings.txt" + personality_modules patch javac + + case ${BUILDTOOL} in + maven) + modules_workers patch javac clean compile + ;; + ant) + modules_workers patch javac + ;; + *) + yetus_error "ERROR: Unsupported build tool." + return 1 + ;; + esac - count_javac_warns "${PATCH_DIR}/filtered${PATCH_BRANCH}JavacWarnings.txt" - branchJavacWarnings=$? - count_javac_warns "${PATCH_DIR}/filteredPatchJavacWarnings.txt" - patchJavacWarnings=$? + i=0 + until [[ ${i} -eq ${#MODULE[@]} ]]; do + if [[ ${MODULE_STATUS[${i}]} == -1 ]]; then + ((result=result+1)) + ((i=i+1)) + continue + fi - echo "There appear to be ${branchJavacWarnings} javac compiler warnings before the patch and ${patchJavacWarnings} javac compiler warnings after applying the patch." - if [[ ${patchJavacWarnings} != "" && ${branchJavacWarnings} != "" ]] ; then - if [[ ${patchJavacWarnings} -gt ${branchJavacWarnings} ]] ; then + fn=$(module_file_fragment "${MODULE[${i}]}") + fn="${fn}${jdk}" + module_suffix=$(basename "${MODULE[${i}]}") + if [[ ${module_suffix} == \. ]]; then + module_suffix=root + fi - ${DIFF} "${PATCH_DIR}/filtered${PATCH_BRANCH}JavacWarnings.txt" \ - "${PATCH_DIR}/filteredPatchJavacWarnings.txt" \ - > "${PATCH_DIR}/diffJavacWarnings.txt" + # if it was a new module, this won't exist. + if [[ -f "${PATCH_DIR}/branch-javac-${fn}.txt" ]]; then + ${GREP} '\[WARNING\]' "${PATCH_DIR}/branch-javac-${fn}.txt" \ + > "${PATCH_DIR}/branch-javac-${fn}-warning.txt" + else + touch "${PATCH_DIR}/branch-javac-${fn}.txt" \ + "${PATCH_DIR}/branch-javac-${fn}-warning.txt" + fi - add_jira_table -1 javac "The applied patch generated "\ - "$((patchJavacWarnings-branchJavacWarnings))" \ - " additional warning messages." + ${GREP} '\[WARNING\]' "${PATCH_DIR}/patch-javac-${fn}.txt" \ + > "${PATCH_DIR}/patch-javac-${fn}-warning.txt" - add_jira_footer javac "@@BASE@@/diffJavacWarnings.txt" + numbranch=$(count_javac_probs "${PATCH_DIR}/branch-javac-${fn}-warning.txt") + numpatch=$(count_javac_probs "${PATCH_DIR}/patch-javac-${fn}-warning.txt") - return 1 + if [[ -n ${numbranch} + && -n ${numpatch} + && ${numpatch} -gt ${numbranch} ]]; then + + ${DIFF} -u "${PATCH_DIR}/branch-javac-${fn}-warning.txt" \ + "${PATCH_DIR}/patch-javac-${fn}-warning.txt" \ + > "${PATCH_DIR}/javac-${fn}-diff.txt" + + module_status ${i} -1 "javac-${fn}-diff.txt" \ + "Patched ${module_suffix} generated "\ + "$((numpatch-numbranch)) additional warning messages${statusjdk}." \ + + ((result=result+1)) fi - fi - fi + ((i=i+1)) + done - add_jira_table +1 javac "There were no new javac warning messages." + modules_messages patch javac true + done + JAVA_HOME=${savejavahome} + + if [[ ${result} -gt 0 ]]; then + return 1 + fi return 0 } -## @description Verify all files have an Apache License +## @description Helper for check_patch_javadoc ## @audience private ## @stability evolving ## @replaceable no ## @return 0 on success ## @return 1 on failure -function check_apachelicense +function count_javadoc_probs { - big_console_header "Determining number of patched release audit warnings." - - start_clock - - echo_and_redirect "${PATCH_DIR}/patchReleaseAuditOutput.txt" "${MVN}" "${MAVEN_ARGS[@]}" apache-rat:check -D${PROJECT_NAME}PatchProcess - #shellcheck disable=SC2038 - find "${BASEDIR}" -name rat.txt | xargs cat > "${PATCH_DIR}/patchReleaseAuditWarnings.txt" - - ### Compare ${PATCH_BRANCH} and patch release audit warning numbers - if [[ -f ${PATCH_DIR}/patchReleaseAuditWarnings.txt ]] ; then - patchReleaseAuditWarnings=$("${GREP}" -c '\!?????' "${PATCH_DIR}/patchReleaseAuditWarnings.txt") - echo "" - echo "" - echo "There appear to be ${patchReleaseAuditWarnings} release audit warnings after applying the patch." - if [[ ${patchReleaseAuditWarnings} != "" ]] ; then - if [[ ${patchReleaseAuditWarnings} -gt 0 ]] ; then - add_jira_table -1 "release audit" "The applied patch generated ${patchReleaseAuditWarnings} release audit warnings." - - ${GREP} '\!?????' "${PATCH_DIR}/patchReleaseAuditWarnings.txt" \ - > "${PATCH_DIR}/patchReleaseAuditProblems.txt" - - echo "Lines that start with ????? in the release audit "\ - "report indicate files that do not have an Apache license header." \ - >> "${PATCH_DIR}/patchReleaseAuditProblems.txt" - - add_jira_footer "Release Audit" "@@BASE@@/patchReleaseAuditProblems.txt" + local warningfile=$1 + local val1 + local val2 - return 1 - fi - fi - fi - add_jira_table 1 "release audit" "The applied patch does not increase the total number of release audit warnings." - return 0 + case ${BUILDTOOL} in + maven) + #shellcheck disable=SC2016,SC2046 + ${GREP} -E "^[0-9]+ warnings?$" "${warningfile}" | ${AWK} '{sum+=$1} END {print sum}' + ;; + ant) + #shellcheck disable=SC2016 + val1=$(${GREP} -E "\[javadoc\] [0-9]+ errors?$" "${warningfile}" | ${AWK} '{sum+=$2} END {print sum}') + #shellcheck disable=SC2016 + val2=$(${GREP} -E "\[javadoc\] [0-9]+ warnings?$" "${warningfile}" | ${AWK} '{sum+=$2} END {print sum}') + echo $((val1+val2)) + ;; + esac } -## @description Verify mvn install works +## @description Count and compare the number of JavaDoc warnings pre- and post- patch ## @audience private ## @stability evolving ## @replaceable no ## @return 0 on success ## @return 1 on failure -function check_mvn_install +function check_patch_javadoc { - local retval + local i + local result=0 + local fn + local -r savejavahome=${JAVA_HOME} + local multijdkmode=false + local jdk="" + local jdkindex=0 + local statusjdk + declare -i numbranch=0 + declare -i numpatch=0 + + big_console_header "Determining number of patched javadoc warnings" verify_needed_test javadoc - retval=$? + if [[ $? == 0 ]]; then + echo "Patch does not appear to need javadoc tests." + return 0 + fi - verify_needed_test javac - ((retval = retval + $? )) + verify_multijdk_test javadoc + if [[ $? == 1 ]]; then + multijdkmode=true + fi + + for jdkindex in ${JDK_DIR_LIST}; do + if [[ ${multijdkmode} == true ]]; then + JAVA_HOME=${jdkindex} + jdk=$(report_jvm_version "${JAVA_HOME}") + yetus_debug "Using ${JAVA_HOME} to run this set of tests" + statusjdk=" with JDK v${jdk}" + jdk="-jdk${jdk}" + jdk=${jdk// /} + fi + + personality_modules patch javadoc + case ${BUILDTOOL} in + maven) + modules_workers patch javadoc clean javadoc:javadoc + ;; + ant) + modules_workers patch javadoc clean javadoc + ;; + *) + yetus_error "ERROR: Unsupported build tool." + return 1 + ;; + esac + + i=0 + until [[ ${i} -eq ${#MODULE[@]} ]]; do + if [[ ${MODULE_STATUS[${i}]} == -1 ]]; then + ((result=result+1)) + ((i=i+1)) + continue + fi + + fn=$(module_file_fragment "${MODULE[${i}]}") + fn="${fn}${jdk}" + numbranch=$(count_javadoc_probs "${PATCH_DIR}/branch-javadoc-${fn}.txt") + numpatch=$(count_javadoc_probs "${PATCH_DIR}/patch-javadoc-${fn}.txt") + + if [[ -n ${numbranch} + && -n ${numpatch} + && ${numpatch} -gt ${numbranch} ]] ; then + + if [[ -f "${PATCH_DIR}/branch-javadoc-${fn}.txt" ]]; then + ${GREP} -i warning "${PATCH_DIR}/branch-javadoc-${fn}.txt" \ + > "${PATCH_DIR}/branch-javadoc-${fn}-filtered.txt" + else + touch "${PATCH_DIR}/branch-javadoc-${fn}.txt" \ + "${PATCH_DIR}/branch-javadoc-${fn}-filtered.txt" + fi + + ${GREP} -i warning "${PATCH_DIR}/patch-javadoc-${fn}.txt" \ + > "${PATCH_DIR}/patch-javadoc-${fn}-filtered.txt" - if [[ ${retval} == 0 ]]; then - echo "This patch does not appear to need mvn install checks." - return 0 - fi + ${DIFF} -u "${PATCH_DIR}/branch-javadoc-${fn}-filtered.txt" \ + "${PATCH_DIR}/patch-javadoc-${fn}-filtered.txt" \ + > "${PATCH_DIR}/javadoc-${fn}-diff.txt" + rm -f "${PATCH_DIR}/branch-javadoc-${fn}-filtered.txt" \ + "${PATCH_DIR}/patch-javadoc-${fn}-filtered.txt" - big_console_header "Installing all of the jars" + module_status ${i} -1 "javadoc-${fn}-diff.txt" \ + "Patched ${MODULE[${i}]} generated "\ + "$((numpatch-numbranch)) additional warning messages${statusjdk}." - start_clock - echo_and_redirect "${PATCH_DIR}/jarinstall.txt" "${MVN}" "${MAVEN_ARGS[@]}" install -Dmaven.javadoc.skip=true -DskipTests -D${PROJECT_NAME}PatchProcess - retval=$? - if [[ ${retval} != 0 ]]; then - add_jira_table -1 install "The patch causes mvn install to fail." - else - add_jira_table +1 install "mvn install still works." - fi - return ${retval} -} + ((result=result+1)) + fi + ((i=i+1)) + done -## @description are the needed bits for findbugs present? -## @audience private -## @stability evolving -## @replaceable no -## @return 0 findbugs will work for our use -## @return 1 findbugs is missing some component -function findbugs_is_installed -{ - if [[ ! -e "${FINDBUGS_HOME}/bin/findbugs" ]]; then - printf "\n\n%s is not executable.\n\n" "${FINDBUGS_HOME}/bin/findbugs" - add_jira_table -1 findbugs "Findbugs is not installed." + modules_messages patch javadoc true + done + JAVA_HOME=${savejavahome} + + if [[ ${result} -gt 0 ]]; then return 1 fi return 0 } -## @description Run the maven findbugs plugin and record found issues in a bug database +## @description Make sure site still compiles ## @audience private ## @stability evolving ## @replaceable no ## @return 0 on success ## @return 1 on failure -function findbugs_mvnrunner +function check_site { - local name=$1 - local logfile=$2 - local warnings_file=$3 + local result=0 - echo_and_redirect "${logfile}" "${MVN}" "${MAVEN_ARGS[@]}" clean test findbugs:findbugs -DskipTests \ - "-D${PROJECT_NAME}PatchProcess" < /dev/null - if [[ $? != 0 ]]; then - return 1 + if [[ ${BUILDTOOL} != maven ]]; then + return 0 fi - cp target/findbugsXml.xml "${warnings_file}.xml" - "${FINDBUGS_HOME}/bin/setBugDatabaseInfo" -name "${name}" \ - "${warnings_file}.xml" "${warnings_file}.xml" - if [[ $? != 0 ]]; then - return 1 + big_console_header "Determining number of patched site errors" + + verify_needed_test site + if [[ $? == 0 ]]; then + echo "Patch does not appear to need site tests." + return 0 fi - "${FINDBUGS_HOME}/bin/convertXmlToText" -html "${warnings_file}.xml" \ - "${warnings_file}.html" - if [[ $? != 0 ]]; then + personality_modules patch site + modules_workers patch site clean site site:stage -Dmaven.javadoc.skip=true + result=$? + modules_messages patch site true + if [[ ${result} != 0 ]]; then return 1 fi - return 0 } -## @description Track pre-existing findbugs warnings +## @description Verify mvn install works ## @audience private ## @stability evolving ## @replaceable no ## @return 0 on success ## @return 1 on failure -function precheck_findbugs +function precheck_mvninstall { - local -r mypwd=$(pwd) - local module_suffix - local modules=${CHANGED_MODULES} - local module - local findbugs_version - local rc=0 - local module_findbugs_warnings - local findbugs_warnings=0 + local result=0 - verify_needed_test findbugs - - if [[ $? == 0 ]]; then - echo "Patch does not appear to need findbugs tests." + if [[ ${BUILDTOOL} != maven ]]; then return 0 fi - echo "findbugs baseline for ${mypwd}" - - findbugs_is_installed - if [[ $? != 0 ]]; then - return 1 - fi - - for module in ${modules} - do - pushd "${module}" >/dev/null - echo " Running findbugs in ${module}" - module_suffix=$(basename "${module}") - findbugs_mvnrunner "${PATCH_BRANCH}" \ - "${PATCH_DIR}/${PATCH_BRANCH}FindBugsOutput${module_suffix}.txt" \ - "${PATCH_DIR}/${PATCH_BRANCH}FindbugsWarnings${module_suffix}" - (( rc = rc + $? )) - - if [[ "${FINDBUGS_WARNINGS_FAIL_PRECHECK}" == "true" ]]; then - #shellcheck disable=SC2016 - module_findbugs_warnings=$("${FINDBUGS_HOME}/bin/filterBugs" -first \ - "${PATCH_BRANCH}" \ - "${PATCH_DIR}/${PATCH_BRANCH}FindbugsWarnings${module_suffix}".xml \ - "${PATCH_DIR}/${PATCH_BRANCH}FindbugsWarnings${module_suffix}".xml \ - | ${AWK} '{print $1}') - if [[ $? != 0 ]]; then - popd >/dev/null - return 1 - fi - - findbugs_warnings=$((findbugs_warnings+module_findbugs_warnings)) - - if [[ ${module_findbugs_warnings} -gt 0 ]] ; then - add_jira_footer "Pre-patch Findbugs warnings" "@@BASE@@/${PATCH_BRANCH}FindbugsWarnings${module_suffix}.html" - fi - fi - popd >/dev/null - done + big_console_header "Verifying mvn install works" - #shellcheck disable=SC2016 - findbugs_version=$(${AWK} 'match($0, /findbugs-maven-plugin:[^:]*:findbugs/) { print substr($0, RSTART + 22, RLENGTH - 31); exit }' "${PATCH_DIR}/${PATCH_BRANCH}FindBugsOutput${module_suffix}.txt") + verify_needed_test javadoc + retval=$? - if [[ ${rc} -ne 0 ]]; then - echo "Pre-patch ${PATCH_BRANCH} findbugs is broken?" - add_jira_table -1 pre-patch "Findbugs (version ${findbugs_version}) appears to be broken on ${PATCH_BRANCH}." - return 1 + verify_needed_test javac + ((retval = retval + $? )) + if [[ ${retval} == 0 ]]; then + echo "This patch does not appear to need mvn install checks." + return 0 fi - if [[ "${FINDBUGS_WARNINGS_FAIL_PRECHECK}" == "true" && \ - ${findbugs_warnings} -gt 0 ]] ; then - echo "Pre-patch ${PATCH_BRANCH} findbugs has ${findbugs_warnings} warnings." - add_jira_table -1 pre-patch "Pre-patch ${PATCH_BRANCH} has ${findbugs_warnings} extant Findbugs (version ${findbugs_version}) warnings." + personality_modules branch mvninstall + modules_workers branch mvninstall -fae clean install -Dmaven.javadoc.skip=true + result=$? + modules_messages branch mvninstall true + if [[ ${result} != 0 ]]; then return 1 fi - return 0 } -## @description Verify patch does not trigger any findbugs warnings +## @description Verify mvn install works ## @audience private ## @stability evolving ## @replaceable no ## @return 0 on success ## @return 1 on failure -function check_findbugs +function check_mvninstall { - local rc=0 - local module - local modules=${CHANGED_MODULES} - local module_suffix - local combined_xml - local newBugs - local new_findbugs_warnings - local new_findbugs_fixed_warnings - local findbugs_warnings=0 - local findbugs_fixed_warnings=0 - local line - local firstpart - local secondpart - local findbugs_version + local result=0 - verify_needed_test findbugs - - if [[ $? == 0 ]]; then + if [[ ${BUILDTOOL} != maven ]]; then return 0 fi - big_console_header "Determining number of patched Findbugs warnings." + big_console_header "Verifying mvn install still works" - start_clock + verify_needed_test javadoc + retval=$? - findbugs_is_installed - if [[ $? != 0 ]]; then - return 1 + verify_needed_test javac + ((retval = retval + $? )) + if [[ ${retval} == 0 ]]; then + echo "This patch does not appear to need mvn install checks." + return 0 fi - for module in ${modules} - do - pushd "${module}" >/dev/null - echo " Running findbugs in ${module}" - module_suffix=$(basename "${module}") - - findbugs_mvnrunner patch \ - "${PATCH_DIR}/patchFindBugsOutput${module_suffix}.txt" \ - "${PATCH_DIR}/patchFindbugsWarnings${module_suffix}" - - if [[ $? != 0 ]] ; then - ((rc = rc +1)) - echo "Post-patch findbugs compilation is broken." - add_jira_table -1 findbugs "Post-patch findbugs ${module} compilation is broken." - continue - fi - - combined_xml="$PATCH_DIR/combinedFindbugsWarnings${module_suffix}.xml" - newBugs="${PATCH_DIR}/newPatchFindbugsWarnings${module_suffix}" - "${FINDBUGS_HOME}/bin/computeBugHistory" -useAnalysisTimes -withMessages \ - -output "${combined_xml}" \ - "${PATCH_DIR}/${PATCH_BRANCH}FindbugsWarnings${module_suffix}.xml" \ - "${PATCH_DIR}/patchFindbugsWarnings${module_suffix}.xml" - if [[ $? != 0 ]]; then - popd >/dev/null - return 1 - fi - - #shellcheck disable=SC2016 - new_findbugs_warnings=$("${FINDBUGS_HOME}/bin/filterBugs" -first patch \ - "${combined_xml}" "${newBugs}.xml" | ${AWK} '{print $1}') - if [[ $? != 0 ]]; then - popd >/dev/null - return 1 - fi - #shellcheck disable=SC2016 - new_findbugs_fixed_warnings=$("${FINDBUGS_HOME}/bin/filterBugs" -fixed patch \ - "${combined_xml}" "${newBugs}.xml" | ${AWK} '{print $1}') - if [[ $? != 0 ]]; then - popd >/dev/null - return 1 - fi - - echo "Found ${new_findbugs_warnings} new Findbugs warnings and ${new_findbugs_fixed_warnings} newly fixed warnings." - findbugs_warnings=$((findbugs_warnings+new_findbugs_warnings)) - findbugs_fixed_warnings=$((findbugs_fixed_warnings+new_findbugs_fixed_warnings)) - - "${FINDBUGS_HOME}/bin/convertXmlToText" -html "${newBugs}.xml" \ - "${newBugs}.html" - if [[ $? != 0 ]]; then - popd >/dev/null - return 1 - fi - - if [[ ${new_findbugs_warnings} -gt 0 ]] ; then - populate_test_table FindBugs "module:${module_suffix}" - while read line; do - firstpart=$(echo "${line}" | cut -f2 -d:) - secondpart=$(echo "${line}" | cut -f9- -d' ') - add_jira_test_table "" "${firstpart}:${secondpart}" - done < <("${FINDBUGS_HOME}/bin/convertXmlToText" "${newBugs}.xml") - - add_jira_footer "Findbugs warnings" "@@BASE@@/newPatchFindbugsWarnings${module_suffix}.html" - fi - - popd >/dev/null - done - - #shellcheck disable=SC2016 - findbugs_version=$(${AWK} 'match($0, /findbugs-maven-plugin:[^:]*:findbugs/) { print substr($0, RSTART + 22, RLENGTH - 31); exit }' "${PATCH_DIR}/patchFindBugsOutput${module_suffix}.txt") - - if [[ ${findbugs_warnings} -gt 0 ]] ; then - add_jira_table -1 findbugs "The patch appears to introduce ${findbugs_warnings} new Findbugs (version ${findbugs_version}) warnings." + personality_modules patch mvninstall + modules_workers patch mvninstall install -Dmaven.javadoc.skip=true + result=$? + modules_messages patch mvninstall true + if [[ ${result} != 0 ]]; then return 1 fi - - if [[ ${findbugs_fixed_warnings} -gt 0 ]] ; then - add_jira_table +1 findbugs "The patch does not introduce any new Findbugs (version ${findbugs_version}) warnings, and fixes ${findbugs_fixed_warnings} pre-existing warnings." - else - add_jira_table +1 findbugs "The patch does not introduce any new Findbugs (version ${findbugs_version}) warnings." - fi return 0 } @@ -2118,7 +2747,11 @@ function check_findbugs ## @return 1 on failure function check_mvn_eclipse { - big_console_header "Running mvn eclipse:eclipse." + if [[ ${BUILDTOOL} != maven ]]; then + return 0 + fi + + big_console_header "Verifying mvn eclipse:eclipse still works" verify_needed_test javac if [[ $? == 0 ]]; then @@ -2126,14 +2759,13 @@ function check_mvn_eclipse return 0 fi - start_clock - - echo_and_redirect "${PATCH_DIR}/patchEclipseOutput.txt" "${MVN}" "${MAVEN_ARGS[@]}" eclipse:eclipse -D${PROJECT_NAME}PatchProcess - if [[ $? != 0 ]] ; then - add_jira_table -1 eclipse:eclipse "The patch failed to build with eclipse:eclipse." + personality_modules patch eclipse + modules_workers patch eclipse eclipse:eclipse + result=$? + modules_messages patch eclipse true + if [[ ${result} != 0 ]]; then return 1 fi - add_jira_table +1 eclipse:eclipse "The patch built with eclipse:eclipse." return 0 } @@ -2152,10 +2784,10 @@ function populate_test_table for i in "$@"; do if [[ -z "${first}" ]]; then - add_jira_test_table "${reason}" "${i}" + add_test_table "${reason}" "${i}" first="${reason}" else - add_jira_test_table " " "${i}" + add_test_table " " "${i}" fi done } @@ -2168,6 +2800,20 @@ function populate_test_table ## @return 1 on failure function check_unittests { + local i + local failed_tests="" + local test_timeouts="" + local test_logfile + local module_test_timeouts="" + local result=0 + local -r savejavahome=${JAVA_HOME} + local multijdkmode=false + local jdk="" + local jdkindex=0 + local statusjdk + + big_console_header "Running unit tests" + verify_needed_test unit if [[ $? == 0 ]]; then @@ -2175,128 +2821,92 @@ function check_unittests return 0 fi - big_console_header "Running unit tests" - - start_clock + verify_multijdk_test unit + if [[ $? == 1 ]]; then + multijdkmode=true + fi - local failed_tests="" - local modules=${CHANGED_MODULES} - local building_common=0 - local hdfs_modules - local ordered_modules="" - local failed_test_builds="" - local test_timeouts="" - local test_logfile - local test_build_result - local module_test_timeouts="" - local result - local totalresult=0 - local module_prefix - - # - # If we are building hadoop-hdfs-project, we must build the native component - # of hadoop-common-project first. In order to accomplish this, we move the - # hadoop-hdfs subprojects to the end of the list so that common will come - # first. - # - # Of course, we may not be building hadoop-common at all-- in this case, we - # explicitly insert a mvn compile -Pnative of common, to ensure that the - # native libraries show up where we need them. - # - - for module in ${modules}; do - if [[ ${module} == hadoop-hdfs-project* ]]; then - hdfs_modules="${hdfs_modules} ${module}" - elif [[ ${module} == hadoop-common-project* ]]; then - ordered_modules="${ordered_modules} ${module}" - building_common=1 - else - ordered_modules="${ordered_modules} ${module}" + for jdkindex in ${JDK_DIR_LIST}; do + if [[ ${multijdkmode} == true ]]; then + JAVA_HOME=${jdkindex} + jdk=$(report_jvm_version "${JAVA_HOME}") + statusjdk="JDK v${jdk} " + jdk="-jdk${jdk}" + jdk=${jdk// /} fi - done - if [[ -n "${hdfs_modules}" ]]; then - ordered_modules="${ordered_modules} ${hdfs_modules}" - if [[ ${building_common} -eq 0 ]]; then - echo " Building hadoop-common with -Pnative in order to provide libhadoop.so to the hadoop-hdfs unit tests." - echo_and_redirect "${PATCH_DIR}/testrun_native.txt" "${MVN}" "${MAVEN_ARGS[@]}" compile ${NATIVE_PROFILE} "-D${PROJECT_NAME}PatchProcess" - if [[ $? != 0 ]]; then - add_jira_table -1 "native" "Failed to build the native portion " \ - "of hadoop-common prior to running the unit tests in ${ordered_modules}" + personality_modules patch unit + case ${BUILDTOOL} in + maven) + modules_workers patch unit clean install -fae + ;; + ant) + modules_workers patch unit + ;; + *) + yetus_error "ERROR: Unsupported build tool." return 1 - else - add_jira_table +1 "native" "Pre-build of native portion" - fi - fi - fi + ;; + esac + ((result=result+$?)) - for module in ${ordered_modules}; do - result=0 - start_clock - pushd "${module}" >/dev/null - module_suffix=$(basename "${module}") - module_prefix=$(echo "${module}" | cut -f2 -d- ) - - test_logfile=${PATCH_DIR}/testrun_${module_suffix}.txt - echo " Running tests in ${module_suffix}" - echo_and_redirect "${test_logfile}" "${MVN}" "${MAVEN_ARGS[@]}" clean install -fae ${NATIVE_PROFILE} ${REQUIRE_TEST_LIB_HADOOP} -D${PROJECT_NAME}PatchProcess - test_build_result=$? - - add_jira_footer "${module_suffix} test log" "@@BASE@@/testrun_${module_suffix}.txt" - - # shellcheck disable=2016 - module_test_timeouts=$(${AWK} '/^Running / { if (last) { print last } last=$2 } /^Tests run: / { last="" }' "${test_logfile}") - if [[ -n "${module_test_timeouts}" ]] ; then - test_timeouts="${test_timeouts} ${module_test_timeouts}" - result=1 + modules_messages patch unit false + if [[ ${result} == 0 ]]; then + continue fi - #shellcheck disable=SC2026,SC2038,SC2016 - module_failed_tests=$(find . -name 'TEST*.xml'\ - | xargs "${GREP}" -l -E "/dev/null + pushd "${MODULE[${i}]}" >/dev/null + #shellcheck disable=SC2026,SC2038,SC2016 + module_failed_tests=$(find . -name 'TEST*.xml'\ + | xargs "${GREP}" -l -E "/dev/null + + if [[ -n "${module_failed_tests}" ]] ; then + failed_tests="${failed_tests} ${module_failed_tests}" + ((result=result+1)) + fi + + ((i=i+1)) + done + + if [[ -n "${failed_tests}" ]] ; then + # shellcheck disable=SC2086 + populate_test_table "${statusjdk}Failed unit tests" ${failed_tests} + failed_tests="" + fi + if [[ -n "${test_timeouts}" ]] ; then + # shellcheck disable=SC2086 + populate_test_table "${statusjdk}Timed out tests" ${test_timeouts} + test_timeouts="" fi - ((totalresult = totalresult + result)) done - - if [[ -n "${failed_tests}" ]] ; then - # shellcheck disable=SC2086 - populate_test_table "Failed unit tests" ${failed_tests} - fi - if [[ -n "${test_timeouts}" ]] ; then - # shellcheck disable=SC2086 - populate_test_table "Timed out tests" ${test_timeouts} - fi - if [[ -n "${failed_test_builds}" ]] ; then - # shellcheck disable=SC2086 - populate_test_table "Failed build" ${failed_test_builds} - fi + JAVA_HOME=${savejavahome} if [[ ${JENKINS} == true ]]; then - add_jira_footer "Test Results" "${BUILD_URL}testReport/" + add_footer_table "${statusjdk} Test Results" "${BUILD_URL}testReport/" fi - if [[ ${totalresult} -gt 0 ]]; then + if [[ ${result} -gt 0 ]]; then return 1 - else - return 0 fi + return 0 } ## @description Print out the finished details on the console @@ -2310,7 +2920,7 @@ function output_to_console { local result=$1 shift - local i + local i=0 local ourstring local vote local subs @@ -2359,25 +2969,24 @@ function output_to_console rm "${spcfx}" fi - seccoladj=$(findlargest 2 "${JIRA_COMMENT_TABLE[@]}") + seccoladj=$(findlargest 2 "${TP_VOTE_TABLE[@]}") if [[ ${seccoladj} -lt 10 ]]; then seccoladj=10 fi seccoladj=$((seccoladj + 2 )) i=0 - until [[ $i -eq ${#JIRA_HEADER[@]} ]]; do - printf "%s\n" "${JIRA_HEADER[${i}]}" + until [[ $i -eq ${#TP_HEADER[@]} ]]; do + printf "%s\n" "${TP_HEADER[${i}]}" ((i=i+1)) done printf "| %s | %*s | %s | %s\n" "Vote" ${seccoladj} Subsystem Runtime "Comment" echo "============================================================================" i=0 - until [[ $i -eq ${#JIRA_COMMENT_TABLE[@]} ]]; do - ourstring=$(echo "${JIRA_COMMENT_TABLE[${i}]}" | tr -s ' ') + until [[ $i -eq ${#TP_VOTE_TABLE[@]} ]]; do + ourstring=$(echo "${TP_VOTE_TABLE[${i}]}" | tr -s ' ') vote=$(echo "${ourstring}" | cut -f2 -d\|) - vote=$(colorstripper "${vote}") subs=$(echo "${ourstring}" | cut -f3 -d\|) ela=$(echo "${ourstring}" | cut -f4 -d\|) comment=$(echo "${ourstring}" | cut -f5 -d\|) @@ -2396,12 +3005,12 @@ function output_to_console rm "${commentfile2}" "${commentfile1}" 2>/dev/null done - if [[ ${#JIRA_TEST_TABLE[@]} -gt 0 ]]; then - seccoladj=$(findlargest 1 "${JIRA_TEST_TABLE[@]}") + if [[ ${#TP_TEST_TABLE[@]} -gt 0 ]]; then + seccoladj=$(findlargest 1 "${TP_TEST_TABLE[@]}") printf "\n\n%*s | Tests\n" "${seccoladj}" "Reason" i=0 - until [[ $i -eq ${#JIRA_TEST_TABLE[@]} ]]; do - ourstring=$(echo "${JIRA_TEST_TABLE[${i}]}" | tr -s ' ') + until [[ $i -eq ${#TP_TEST_TABLE[@]} ]]; do + ourstring=$(echo "${TP_TEST_TABLE[${i}]}" | tr -s ' ') vote=$(echo "${ourstring}" | cut -f2 -d\|) subs=$(echo "${ourstring}" | cut -f3 -d\|) printf "%*s | %s\n" "${seccoladj}" "${vote}" "${subs}" @@ -2413,87 +3022,21 @@ function output_to_console echo "============================================================================" i=0 - until [[ $i -eq ${#JIRA_FOOTER_TABLE[@]} ]]; do - comment=$(echo "${JIRA_FOOTER_TABLE[${i}]}" | + until [[ $i -eq ${#TP_FOOTER_TABLE[@]} ]]; do + comment=$(echo "${TP_FOOTER_TABLE[${i}]}" | ${SED} -e "s,@@BASE@@,${PATCH_DIR},g") printf "%s\n" "${comment}" ((i=i+1)) done } -## @description Print out the finished details to the JIRA issue +## @description Write the final output to the selected bug system ## @audience private ## @stability evolving ## @replaceable no -## @param runresult -function output_to_jira +function output_to_bugsystem { - local result=$1 - local i - local commentfile=${PATCH_DIR}/commentfile - local comment - - rm "${commentfile}" 2>/dev/null - - if [[ ${JENKINS} != "true" ]] ; then - return 0 - fi - - big_console_header "Adding comment to JIRA" - - add_jira_footer "Console output" "${BUILD_URL}console" - - if [[ ${result} == 0 ]]; then - add_jira_header "(/) *{color:green}+1 overall{color}*" - else - add_jira_header "(x) *{color:red}-1 overall{color}*" - fi - - - { echo "\\\\" ; echo "\\\\"; } >> "${commentfile}" - - i=0 - until [[ $i -eq ${#JIRA_HEADER[@]} ]]; do - printf "%s\n" "${JIRA_HEADER[${i}]}" >> "${commentfile}" - ((i=i+1)) - done - - { echo "\\\\" ; echo "\\\\"; } >> "${commentfile}" - - echo "|| Vote || Subsystem || Runtime || Comment ||" >> "${commentfile}" - - i=0 - until [[ $i -eq ${#JIRA_COMMENT_TABLE[@]} ]]; do - printf "%s\n" "${JIRA_COMMENT_TABLE[${i}]}" >> "${commentfile}" - ((i=i+1)) - done - - - if [[ ${#JIRA_TEST_TABLE[@]} -gt 0 ]]; then - { echo "\\\\" ; echo "\\\\"; } >> "${commentfile}" - - echo "|| Reason || Tests ||" >> "${commentfile}" - i=0 - until [[ $i -eq ${#JIRA_TEST_TABLE[@]} ]]; do - printf "%s\n" "${JIRA_TEST_TABLE[${i}]}" >> "${commentfile}" - ((i=i+1)) - done - fi - - { echo "\\\\" ; echo "\\\\"; } >> "${commentfile}" - - echo "|| Subsystem || Report/Notes ||" >> "${commentfile}" - i=0 - until [[ $i -eq ${#JIRA_FOOTER_TABLE[@]} ]]; do - comment=$(echo "${JIRA_FOOTER_TABLE[${i}]}" | - ${SED} -e "s,@@BASE@@,${BUILD_URL}artifact/patchprocess,g") - printf "%s\n" "${comment}" >> "${commentfile}" - ((i=i+1)) - done - - printf "\n\nThis message was automatically generated.\n\n" >> "${commentfile}" - - write_to_jira "${commentfile}" + "${BUGSYSTEM}_finalreport" "${@}" } ## @description Clean the filesystem as appropriate and then exit @@ -2511,9 +3054,9 @@ function cleanup_and_exit # there is no need to move it since we assume that # Jenkins or whatever already knows where it is at # since it told us to put it there! - relative_patchdir >/dev/null + relative_dir "${PATCH_DIR}" >/dev/null if [[ $? == 1 ]]; then - hadoop_debug "mv ${PATCH_DIR} ${BASEDIR}" + yetus_debug "mv ${PATCH_DIR} ${BASEDIR}" mv "${PATCH_DIR}" "${BASEDIR}" fi fi @@ -2532,17 +3075,16 @@ function postcheckout local routine local plugin - for routine in find_java_home verify_patch_file - do + for routine in find_java_home verify_patch_file; do verify_patchdir_still_exists - hadoop_debug "Running ${routine}" + yetus_debug "Running ${routine}" ${routine} (( RESULT = RESULT + $? )) if [[ ${RESULT} != 0 ]] ; then output_to_console 1 - output_to_jira 1 + output_to_bugsystem 1 cleanup_and_exit 1 fi done @@ -2552,14 +3094,14 @@ function postcheckout if declare -f ${plugin}_postcheckout >/dev/null 2>&1; then - hadoop_debug "Running ${plugin}_postcheckout" + yetus_debug "Running ${plugin}_postcheckout" #shellcheck disable=SC2086 ${plugin}_postcheckout (( RESULT = RESULT + $? )) if [[ ${RESULT} != 0 ]] ; then output_to_console 1 - output_to_jira 1 + output_to_bugsystem 1 cleanup_and_exit 1 fi fi @@ -2580,7 +3122,7 @@ function preapply do verify_patchdir_still_exists - hadoop_debug "Running ${routine}" + yetus_debug "Running ${routine}" ${routine} (( RESULT = RESULT + $? )) @@ -2591,7 +3133,7 @@ function preapply if declare -f ${plugin}_preapply >/dev/null 2>&1; then - hadoop_debug "Running ${plugin}_preapply" + yetus_debug "Running ${plugin}_preapply" #shellcheck disable=SC2086 ${plugin}_preapply @@ -2610,32 +3152,30 @@ function postapply local plugin local retval - compute_gitdiff "${GITDIFFLINES}" + compute_gitdiff - check_javac + check_patch_javac retval=$? if [[ ${retval} -gt 1 ]] ; then output_to_console 1 - output_to_jira 1 + output_to_bugsystem 1 cleanup_and_exit 1 fi ((RESULT = RESULT + retval)) - for routine in check_javadoc check_apachelicense check_site + for routine in check_patch_javadoc check_site do verify_patchdir_still_exists - hadoop_debug "Running ${routine}" - $routine - + yetus_debug "Running ${routine}" + ${routine} (( RESULT = RESULT + $? )) - done for plugin in ${PLUGINS}; do verify_patchdir_still_exists if declare -f ${plugin}_postapply >/dev/null 2>&1; then - hadoop_debug "Running ${plugin}_postapply" + yetus_debug "Running ${plugin}_postapply" #shellcheck disable=SC2086 ${plugin}_postapply (( RESULT = RESULT + $? )) @@ -2652,24 +3192,19 @@ function postinstall local routine local plugin - for routine in check_mvn_eclipse check_findbugs - do - verify_patchdir_still_exists - hadoop_debug "Running ${routine}" - ${routine} - (( RESULT = RESULT + $? )) - done + verify_patchdir_still_exists + check_mvn_eclipse + (( RESULT = RESULT + $? )) for plugin in ${PLUGINS}; do verify_patchdir_still_exists if declare -f ${plugin}_postinstall >/dev/null 2>&1; then - hadoop_debug "Running ${plugin}_postinstall" + yetus_debug "Running ${plugin}_postinstall" #shellcheck disable=SC2086 ${plugin}_postinstall (( RESULT = RESULT + $? )) fi done - } ## @description Driver to execute _tests routines @@ -2692,7 +3227,7 @@ function runtests for plugin in ${PLUGINS}; do verify_patchdir_still_exists if declare -f ${plugin}_tests >/dev/null 2>&1; then - hadoop_debug "Running ${plugin}_tests" + yetus_debug "Running ${plugin}_tests" #shellcheck disable=SC2086 ${plugin}_tests (( RESULT = RESULT + $? )) @@ -2717,13 +3252,49 @@ function importplugins fi if [[ -n "${USER_PLUGIN_DIR}" && -d "${USER_PLUGIN_DIR}" ]]; then - hadoop_debug "Loading user provided plugins from ${USER_PLUGIN_DIR}" + yetus_debug "Loading user provided plugins from ${USER_PLUGIN_DIR}" files=("${files[@]}" ${USER_PLUGIN_DIR}/*.sh) fi for i in "${files[@]}"; do - hadoop_debug "Importing ${i}" - . "${i}" + if [[ -f ${i} ]]; then + yetus_debug "Importing ${i}" + . "${i}" + fi + done + + if [[ -z ${PERSONALITY} + && -f "${BINDIR}/personality/${PROJECT_NAME}.sh" ]]; then + PERSONALITY="${BINDIR}/personality/${PROJECT_NAME}.sh" + fi + + if [[ -n ${PERSONALITY} ]]; then + if [[ ! -f ${PERSONALITY} ]]; then + if [[ -f "${BINDIR}/personality/${PROJECT_NAME}.sh" ]]; then + PERSONALITY="${BINDIR}/personality/${PROJECT_NAME}.sh" + else + yetus_debug "Can't find ${PERSONALITY} to import." + return + fi + fi + yetus_debug "Importing ${PERSONALITY}" + . "${PERSONALITY}" + fi +} + +## @description Let plugins also get a copy of the arguments +## @audience private +## @stability evolving +## @replaceable no +function parse_args_plugins +{ + for plugin in ${PLUGINS} ${BUGSYSTEMS}; do + if declare -f ${plugin}_parse_args >/dev/null 2>&1; then + yetus_debug "Running ${plugin}_parse_args" + #shellcheck disable=SC2086 + ${plugin}_parse_args "$@" + (( RESULT = RESULT + $? )) + fi done } @@ -2736,6 +3307,15 @@ function add_plugin PLUGINS="${PLUGINS} $1" } +## @description Register test-patch.d plugins +## @audience public +## @stability stable +## @replaceable no +function add_bugsystem +{ + BUGSYSTEMS="${BUGSYSTEMS} $1" +} + ############################################################################### ############################################################################### ############################################################################### @@ -2748,11 +3328,11 @@ parse_args "$@" importplugins -locate_patch +parse_args_plugins "$@" -find_changed_files +finish_docker_stats -determine_needed_tests +locate_patch # from here on out, we'll be in ${BASEDIR} for cwd # routines need to pushd/popd if they change. @@ -2764,28 +3344,40 @@ if [[ ${JENKINS} == "true" ]] ; then fi fi +find_changed_files + check_reexec +determine_needed_tests + postcheckout +fullyqualifyjdks + +prepopulate_footer + find_changed_modules preapply apply_patch_file +# we find changed modules again +# in case the patch adds or removes a module +# this also means that test suites need to be +# aware that there might not be a 'before' +find_changed_modules + postapply -check_mvn_install +check_mvninstall postinstall runtests -close_jira_footer - -close_jira_table +finish_vote_table output_to_console ${RESULT} -output_to_jira ${RESULT} +output_to_bugsystem ${RESULT} cleanup_and_exit ${RESULT} diff --git a/hadoop-common-project/hadoop-common/CHANGES-HADOOP-12111.txt b/hadoop-common-project/hadoop-common/CHANGES-HADOOP-12111.txt index 0ec7a0f75d340..cb2f8887d4774 100644 --- a/hadoop-common-project/hadoop-common/CHANGES-HADOOP-12111.txt +++ b/hadoop-common-project/hadoop-common/CHANGES-HADOOP-12111.txt @@ -1 +1,3 @@ Breakdown of HADOOP-12111 sub-tasks: + + HADOOP-12113. update test-patch branch to latest code (aw) From 375783793a0b20049964c15eaa9150fbe299d7cf Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Fri, 26 Jun 2015 23:58:57 -0700 Subject: [PATCH 003/130] HADOOP-12113. update test-patch branch to latest code (continued) (aw) --- dev-support/docs/precommit-advanced.md | 164 ++++++++ dev-support/docs/precommit-architecture.md | 98 +++++ dev-support/docs/precommit-basic.md | 123 ++++++ dev-support/docs/precommit-glossary.md | 37 ++ dev-support/personality/flink.sh | 151 +++++++ dev-support/personality/hadoop.sh | 217 ++++++++++ dev-support/personality/hbase.sh | 238 +++++++++++ dev-support/personality/pig.sh | 57 +++ dev-support/personality/tajo.sh | 60 +++ dev-support/personality/tez.sh | 66 +++ .../test-patch-docker/Dockerfile-endstub | 19 + .../test-patch-docker/Dockerfile-startstub | 83 ++++ .../test-patch-docker/launch-test-patch.sh | 51 +++ .../test-patch-docker/test-patch-docker.sh | 383 ++++++++++++++++++ dev-support/test-patch.d/apache-rat.sh | 84 ++++ .../test-patch.d/builtin-personality.sh | 157 +++++++ dev-support/test-patch.d/findbugs.sh | 379 +++++++++++++++++ dev-support/test-patch.d/github.sh | 112 +++++ dev-support/test-patch.d/jira.sh | 190 +++++++++ dev-support/test-patch.d/xml.sh | 70 ++++ 20 files changed, 2739 insertions(+) create mode 100644 dev-support/docs/precommit-advanced.md create mode 100644 dev-support/docs/precommit-architecture.md create mode 100644 dev-support/docs/precommit-basic.md create mode 100644 dev-support/docs/precommit-glossary.md create mode 100644 dev-support/personality/flink.sh create mode 100644 dev-support/personality/hadoop.sh create mode 100644 dev-support/personality/hbase.sh create mode 100644 dev-support/personality/pig.sh create mode 100644 dev-support/personality/tajo.sh create mode 100644 dev-support/personality/tez.sh create mode 100644 dev-support/test-patch-docker/Dockerfile-endstub create mode 100644 dev-support/test-patch-docker/Dockerfile-startstub create mode 100644 dev-support/test-patch-docker/launch-test-patch.sh create mode 100644 dev-support/test-patch-docker/test-patch-docker.sh create mode 100644 dev-support/test-patch.d/apache-rat.sh create mode 100644 dev-support/test-patch.d/builtin-personality.sh create mode 100644 dev-support/test-patch.d/findbugs.sh create mode 100644 dev-support/test-patch.d/github.sh create mode 100644 dev-support/test-patch.d/jira.sh create mode 100644 dev-support/test-patch.d/xml.sh diff --git a/dev-support/docs/precommit-advanced.md b/dev-support/docs/precommit-advanced.md new file mode 100644 index 0000000000000..0a7eac5e04034 --- /dev/null +++ b/dev-support/docs/precommit-advanced.md @@ -0,0 +1,164 @@ + + +test-patch +========== + +* [Docker Support](#Docker_Support) +* [Maven Profiles](#Maven_Profiles) +* [Plug-ins](#Plug-ins) +* [Configuring for Other Projects](#Configuring_for_Other_Projects) + +# Docker Support + +By default, test-patch runs in the same shell where it was launched. It can alternatively use Docker to launch itself into a container. This is particularly useful if running under a QA environment that does not provide all the necessary binaries. For example, the patch requires a newer version of Java. + +The `--docker` parameter tells test-patch to run in Docker mode. The `--dockerfile` parameter allows one to provide a custom Dockerfile. Be aware that test-patch will copy this file and append its necessary hooks in order to execute. + +test-patch includes code to automatically manage broken/stale container images that are hanging around if it is run in --jenkins mode. In this way, if Docker fails to build the image, the disk space should eventually return. + +# Maven Profiles + +By default, test-patch will pass -Ptest-patch and -D${PROJECT_NAME}PatchProcess to Maven. This will allow you to configure special actions that should only happen when running underneath test-patch. + + +# Plug-ins + +test-patch allows one to add to its basic feature set via plug-ins. There is a directory called test-patch.d off of the directory where test-patch.sh lives. Inside this directory one may place some bash shell fragments that, if setup with proper functions, will allow for test-patch to call it as necessary. + + +Every plugin must have one line in order to be recognized: + +```bash +add_plugin +``` + +This function call registers the `pluginname` so that test-patch knows that it exists. This plug-in name also acts as the key to the custom functions that you can define. For example: + +```bash +function pluginname_filefilter +``` + +This function gets called for every file that a patch may contain. This allows the plug-in author to determine if this plug-in should be called, what files it might need to analyze, etc. + +Similarly, there are other functions that may be defined during the test-patch run: + +* pluginname_postcheckout + - executed prior to the patch being applied but after the git repository is setup. This is useful for any early error checking that might need to be done before any heavier work. + +* pluginname_preapply + - executed prior to the patch being applied. This is useful for any "before"-type data collection for later comparisons + +* pluginname_postapply + - executed after the patch has been applied. This is useful for any "after"-type data collection. + +* pluginname_postinstall + - executed after the mvn install test has been done. If any tests require the Maven repository to be up-to-date with the contents of the patch, this is the place. + +* pluginname_tests + - executed after the unit tests have completed. + +If the plug-in has some specific options, one can use following functions: + +* pluginname_usage + + - executed when the help message is displayed. This is used to display the plug-in specific options for the user. + +* pluginname_parse_args + + - executed prior to any other above functions except for pluginname_usage. This is useful for parsing the arguments passed from the user and setting up the execution environment. + + HINT: It is recommend to make the pluginname relatively small, 10 characters at the most. Otherwise the ASCII output table may be skewed. + + +# Configuring for Other Projects + +It is impossible for any general framework to be predictive about what types of special rules any given project may have, especially when it comes to ordering and Maven profiles. In order to assist non-Hadoop projects, a project `personality` should be added that enacts these custom rules. + +A personality consists of two functions. One that determines which test types to run and another that allows a project to dictate ordering rules, flags, and profiles on a per-module, per-test run. + +There can be only **one** of each personality function defined. + +## Test Determination + +The `personality_file_tests` function determines which tests to turn on based upon the file name. It is realtively simple. For example, to turn on a full suite of tests for Java files: + +```bash +function personality_file_tests +{ + local filename=$1 + + if [[ ${filename} =~ \.java$ ]]; then + add_test findbugs + add_test javac + add_test javadoc + add_test mvninstall + add_test unit + fi + +} +``` + +The `add_test` function is used to activate the standard tests. Additional plug-ins (such as checkstyle), will get queried on their own. + +## Module & Profile Determination + +Once the tests are determined, it is now time to pick which [modules](precommit-glossary.md#genericoutside-definitions) should get used. That's the job of the `personality_modules` function. + +```bash +function personality_modules +{ + + clear_personality_queue + +... + + personality_enqueue_module + +} +``` + +It takes exactly two parameters `repostatus` and `testtype`. + +The `repostatus` parameter tells the `personality` function exactly what state the repository is in. It can only be in one of two states: `branch` or `patch`. `branch` means the patch has not been applied. The `patch` state is after the patch has been applied. + +The `testtype` state tells the personality exactly which test is about to be executed. + +In order to communicate back to test-patch, there are two functions for the personality to use. + +The first is `clear_personality_queue`. This removes the previous test's configuration so that a new module queue may be built. + +The second is `personality_enqueue_module`. This function takes two parameters. The first parameter is the name of the module to add to this test's queue. The second parameter is an option list of additional flags to pass to Maven when processing it. `personality_enqueue_module` may be called as many times as necessary for your project. + + NOTE: A module name of . signifies the root of the repository. + +For example, let's say your project uses a special configuration to skip unit tests (-DskipTests). Running unit tests during a javadoc build isn't very interesting. We can write a simple personality check to disable the unit tests: + + +```bash +function personality_modules +{ + local repostatus=$1 + local testtype=$2 + + if [[ ${testtype} == 'javadoc' ]]; then + personality_enqueue_module . -DskipTests + return + fi + ... + +``` + +This function will tell test-patch that when the javadoc test is being run, do the documentation test at the base of the repository and make sure the -DskipTests flag is passed to Maven. + diff --git a/dev-support/docs/precommit-architecture.md b/dev-support/docs/precommit-architecture.md new file mode 100644 index 0000000000000..c134728cb8eae --- /dev/null +++ b/dev-support/docs/precommit-architecture.md @@ -0,0 +1,98 @@ + + +# Some Philosophy + +* Everyone's time is valuable. The quicker contributors can get feedback and iterate, the more likely their contribution will get checked in. A committer should be able to focus on the core issues of a contribution rather than details that might be able to be determined automatically. + +* Precommit checks should be fast. There is no value in testing parts of the source tree that are not immediately impacted by a change. Unit testing is the target. They are not a replacement for full builds, which is where integration tests should happen. + +* Many open source projects have a desire to have this capability. Why not generalize a solution? + +* In many build systems (especially with maven), a modular design has been picked. Why not leverage that design to make checks faster? + +* Projects that use the same language will, with a high degree of certainity, benefit from the same types of checks. + +* Portability matters. + +# Phases + +test-patch works effectively under several different phases: + +## Setup + +This is where test-patch configures and validates the environemnt. Some things done in this phase: + +* Defaults +* Parameter handling +* Importing plugins and personalities +* Docker container launching +* Re-exec support +* Patch file downloading +* git repository management (fresh pull, branch switching, etc) + +## Post-checkout + +Checks done here are *fatal*. + +This acts as a verification of all of the setup parts and is the final place to short-cut the full test cycle. The most significant built-in check done here is verifying the patch file is a valid. + +## Pre-apply + +This is where the 'before' work is handled. Some things done in this phase: + +* The first pass of files and modules that will get patched +* Validation and information gathering of java, javadoc, site, the mvn repo, findbugs, etc. +* Author checks +* check for modified unit tests + +## Patch is Applied + +The patch gets applied. Then a second pass to determine which modules and files have been changed in order to handle any modules that might have added or moved. + +## Post-apply + +Now that the patch has been applied, many of the same checks performed in the Pre-apply step are done again to build an 'after' picture. + +* Validation and information gathering of java, javadoc, site, the mvn repo, findbugs, etc. + +## Post-install + +Some tests only work correctly when the repo is up-to-date. So +mvn install is run to update the local repo and we enter this phase. Tests performed here: + +* Verification that maven eclipse integration still works +* FindBugs + +## Unit Tests + +Since unit tests are generally the slowest part of the precommit process, they are run last. At this point, all the prerequisites to running them should be in place and ready to go. + +## Reporting + +Finally, the results are reported to the screen and, optionally, to JIRA. + +# Test Flow + +The basic workflow for many of the sub-items in individual phases are: + +1. print a header, so the end user knows that something is happening +1. verify if the test is needed. If so, continue on. Otherwise, return success and let the next part of the phase execute. +1. Ask the personality about what modules and what flags should get used +1. Execute maven in the given modules with the given flags. Log the output and record the time and result code. +1. Do any extra work as appropriate (diffs, counts, etc) and either accept the status and message given by the maven run or change the vote, message, log file, etc. +1. Add the outcome(s) to the report generator + +As one can see, the modules list is one of the key inputs into what actually gets executed. As a result, projects must full flexibility in either adding, modifying, or even removing modules from the test list. If a personality removes the entire list of modules, then that test should just be ignored. + diff --git a/dev-support/docs/precommit-basic.md b/dev-support/docs/precommit-basic.md new file mode 100644 index 0000000000000..ee2e063c17c07 --- /dev/null +++ b/dev-support/docs/precommit-basic.md @@ -0,0 +1,123 @@ + + +test-patch +========== + +* [Purpose](#Purpose) +* [Pre-requisites](#Pre-requisites) +* [Basic Usage](#Basic_Usage) + +## Purpose + +As part of Hadoop's commit process, all patches to the source base go through a precommit test that does some (usually) light checking to make sure the proposed change does not break unit tests and/or passes some other prerequisites. This is meant as a preliminary check for committers so that the basic patch is in a known state. This check, called test-patch, may also be used by individual developers to verify a patch prior to sending to the Hadoop QA systems. + +Other projects have adopted a similar methodology after seeing great success in the Hadoop model. Some have even gone as far as forking Hadoop's precommit code and modifying it to meet their project's needs. + +This is a modification to Hadoop's version of test-patch so that we may bring together all of these forks under a common code base to help the community as a whole. + + +## Pre-requisites + +test-patch has the following requirements: + +* Ant- or Maven-based project (and ant/maven installed) +* git-based project (and git installed) +* bash v3.2 or higher +* findbugs 3.x installed +* shellcheck installed +* GNU diff +* GNU patch +* POSIX awk +* POSIX grep +* POSIX sed +* wget +* file command +* smart-apply-patch.sh + +Maven plugins requirements: + +* Apache RAT +* Apache FindBugs + +Optional: + +* Apache JIRA-based issue tracking +* JIRA cli tools + +The locations of these files are (mostly) assumed to be in the file path, but may be overridden via command line options. For Solaris and Solaris-like operating systems, the default location for the POSIX binaries is in /usr/xpg4/bin. + + +## Basic Usage + +This command will execute basic patch testing against a patch file stored in filename: + +```bash +$ cd +$ dev-support/test-patch.sh --dirty-workspace --project=projectname +``` + +The `--dirty-workspace` flag tells test-patch that the repository is not clean and it is ok to continue. This version command does not run the unit tests. + +To do that, we need to provide the --run-tests command: + + +```bash +$ cd +$ dev-support/test-patch.sh --dirty-workspace --run-tests +``` + +This is the same command, but now runs the unit tests. + +A typical configuration is to have two repositories. One with the code you are working on and another, clean repository. This means you can: + +```bash +$ cd +$ git diff --no-prefix trunk > /tmp/patchfile +$ cd ../ +$ /dev-support/test-patch.sh --basedir= --resetrepo /tmp/patchfile +``` + +We used two new options here. --basedir sets the location of the repository to use for testing. --resetrepo tells test patch that it can go into **destructive** mode. Destructive mode will wipe out any changes made to that repository, so use it with care! + +After the tests have run, there is a directory that contains all of the test-patch related artifacts. This is generally referred to as the patchprocess directory. By default, test-patch tries to make something off of /tmp to contain this content. Using the `--patchdir` command, one can specify exactly which directory to use. This is helpful for automated precommit testing so that the Jenkins or other automated workflow system knows where to look to gather up the output. + +## Providing Patch Files + +It is a fairly common practice within the Apache community to use Apache's JIRA instance to store potential patches. As a result, test-patch supports providing just a JIRA issue number. test-patch will find the *last* attachment, download it, then process it. + +For example: + +```bash +$ test-patch.sh (other options) HADOOP-9905 +``` + +... will process the patch file associated with this JIRA issue. + + +A new practice is to use a service such as GitHub and its Pull Request (PR) feature. Luckily, test-patch supports URLs and many services like GitHub provide ways to provide unified diffs via URLs. + +For example: + +```bash +$ test-patch.sh (other options) https://github.com/apache/flink/pull/773.patch +``` + +... will grab a unified diff of PR #773 and process it. + +## In Closing + +test-patch has many other features and command line options for the basic user. Many of these are self-explanatory. To see the list of options, run test-patch.sh without any options or with --help. + + diff --git a/dev-support/docs/precommit-glossary.md b/dev-support/docs/precommit-glossary.md new file mode 100644 index 0000000000000..ca3d15fa79bdf --- /dev/null +++ b/dev-support/docs/precommit-glossary.md @@ -0,0 +1,37 @@ + + +# Glossary + +## Generic/outside definitions: + +Apache's [new contributor documentation](https://community.apache.org/contributors/) and Maven's [glossary](https://maven.apache.org/glossary.html) are great places to start if you are new to Apache or Maven. + +* Module + + Almost the same meaning as "sub-project" on Maven. + +## test-patch specific + +* Personality + + A chunk of shell code that tells test-patch this particular project's needs and special requirements + +* Plug-ins + + Shell code snippets that define new, not built-in test types. + +* Precommit + + An automated process that verifies a patch is "good" prior to a committer looking at it. diff --git a/dev-support/personality/flink.sh b/dev-support/personality/flink.sh new file mode 100644 index 0000000000000..a32e2d643b432 --- /dev/null +++ b/dev-support/personality/flink.sh @@ -0,0 +1,151 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +#shellcheck disable=SC2034 +PATCH_BRANCH_DEFAULT=master +#shellcheck disable=SC2034 +ISSUE_RE='^FLINK-[0-9]+$' +#shellcheck disable=SC2034 +HOW_TO_CONTRIBUTE="" + +add_plugin flinklib + +function fliblib_filefilter +{ + local filename=$1 + + if [[ ${filename} =~ \.java$ + || ${filename} =~ \.scala$ + || ${filename} =~ pom.xml$ ]]; then + add_test flinklib + fi +} + +function flinklib_count +{ + find "${BASEDIR}" \ + | ${GREP} "/lib/" \ + | ${GREP} -v "_qa_workdir" \ + | wc -l +} + +function flinklib_preapply +{ + start_clock + big_console_header "${PATCH_BRANCH} flink library dependencies" + + verify_needed_test flinklib + if [[ $? == 0 ]]; then + echo "Patch does not need flinklib testing." + return 0 + fi + + pushd "${BASEDIR}" >/dev/null + echo_and_redirect "${PATCH_DIR}/branch-flinklib-root.txt" \ + "${MVN}" "${MAVEN_ARGS[@]}" package -DskipTests -Dmaven.javadoc.skip=true -Ptest-patch + if [[ $? != 0 ]]; then + add_vote_table -1 flinklib "Unable to determine flink libs in ${PATCH_BRANCH}." + fi + FLINK_PRE_LIB_FILES=$(flinklib_count) + popd >/dev/null +} + +function flinklib_postapply +{ + start_clock + big_console_header "Patch flink library dependencies" + + verify_needed_test flinklib + if [[ $? == 0 ]]; then + echo "Patch does not need flinklib testing." + return 0 + fi + + pushd "${BASEDIR}" >/dev/null + echo_and_redirect "${PATCH_DIR}/patch-flinklib-root.txt" \ + "${MVN}" "${MAVEN_ARGS[@]}" package -DskipTests -Dmaven.javadoc.skip=true -Ptest-patch + FLINK_POST_LIB_FILES=$(flinklib_count) + popd >/dev/null + + + if [[ "${FLINK_POST_LIB_FILES}" -gt "${FLINK_PRE_LIB_FILES}" ]]; then + add_vote_table -1 flinklib "Patch increases lib folder dependencies from " \ + "${FLINK_PRE_LIB_FILES} to ${FLINK_POST_LIB_FILES}" + return 1 + elif [[ "${FLINK_POST_LIB_FILES}" -eq "${FLINK_PRE_LIB_FILES}" ]]; then + add_vote_table 0 flinklib "Patch did not change lib dependencies" \ + " (still ${FLINK_PRE_LIB_FILES})" + else + add_vote_table +1 flinklib "Patch decreases lib folder dependencies by " \ + "$((FLINK_PRE_LIB_FILES-FLINK_POST_LIB_FILES))." + fi + return 0 +} + +function personality_modules +{ + local repostatus=$1 + local testtype=$2 + local extra="" + + yetus_debug "Personality: ${repostatus} ${testtype}" + + clear_personality_queue + + case ${testtype} in + mvninstall) + if [[ ${repostatus} == branch ]]; then + personality_enqueue_module . -DskipTests + return + fi + return + ;; + releaseaudit) + # this is very fast and provides the full path if we do it from + # the root of the source + personality_enqueue_module . + return + ;; + unit) + if [[ ${TEST_PARALLEL} == "true" ]] ; then + extra="-Pparallel-tests" + if [[ -n ${TEST_THREADS:-} ]]; then + extra="${extra} -DtestsThreadCount=${TEST_THREADS}" + fi + fi + ;; + *) + extra="-DskipTests" + ;; + esac + + for module in ${CHANGED_MODULES}; do + # shellcheck disable=SC2086 + personality_enqueue_module ${module} ${extra} + done +} + +function personality_file_tests +{ + local filename=$1 + + yetus_debug "Using personality_file_tests, but calling the built-in:" + builtin_personality_file_tests "${1}" + + if [[ ${filename} =~ \.scala$ ]]; then + add_test unit + fi +} diff --git a/dev-support/personality/hadoop.sh b/dev-support/personality/hadoop.sh new file mode 100644 index 0000000000000..059d051640284 --- /dev/null +++ b/dev-support/personality/hadoop.sh @@ -0,0 +1,217 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Override these to match Apache Hadoop's requirements + +#shellcheck disable=SC2034 +PATCH_BRANCH_DEFAULT=trunk +#shellcheck disable=SC2034 +HOW_TO_CONTRIBUTE="https://wiki.apache.org/hadoop/HowToContribute" +#shellcheck disable=SC2034 +ISSUE_RE='^(HADOOP|YARN|MAPREDUCE|HDFS)-[0-9]+$' + +HADOOP_MODULES="" + +function hadoop_module_manipulation +{ + local need_common=0 + local module + local hdfs_modules + local ordered_modules + local tools_modules + local passed_modules=${CHANGED_MODULES} + + yetus_debug "hmm: starting list: ${passed_modules}" + + # if one of our modules is ., then shortcut: + # ignore the rest and just set it to everything. + if [[ ${CHANGED_MODULES} == ' . ' ]]; then + HADOOP_MODULES='.' + return + fi + + # ${CHANGED_MODULES} is already sorted and uniq'd. + # let's remove child modules if we're going to + # touch their parent + for module in ${CHANGED_MODULES}; do + yetus_debug "Stripping ${module}" + # shellcheck disable=SC2086 + passed_modules=$(echo ${passed_modules} | tr ' ' '\n' | ${GREP} -v ${module}/ ) + done + + for module in ${passed_modules}; do + yetus_debug "Personality ordering ${module}" + if [[ ${module} == hadoop-hdfs-project* ]]; then + hdfs_modules="${hdfs_modules} ${module}" + need_common=1 + elif [[ ${module} == hadoop-common-project/hadoop-common + || ${module} == hadoop-common-project ]]; then + ordered_modules="${ordered_modules} ${module}" + building_common=1 + elif [[ ${module} == hadoop-tools* ]]; then + tools_modules="${tools_modules} ${module}" + else + ordered_modules="${ordered_modules} ${module}" + fi + done + + ordered_modules="${ordered_modules} ${hdfs_modules} ${tools_modules}" + + if [[ ${need_common} -eq 1 + && ${building_common} -eq 0 ]]; then + ordered_modules="hadoop-common-project/hadoop-common ${ordered_modules}" + fi + + yetus_debug "hmm: ${ordered_modules}" + HADOOP_MODULES=${ordered_modules} +} + +function hadoop_javac_ordering +{ + local special=$1 + local ordered_modules + local module + + # Based upon HADOOP-11937 + # + # Some notes: + # + # - getting fuse to compile on anything but Linux + # is always tricky. + # - Darwin assumes homebrew is in use. + # - HADOOP-12027 required for bzip2 on OS X. + # - bzip2 is broken in lots of places. + # e.g, HADOOP-12027 for OS X. so no -Drequire.bzip2 + # + + for module in ${HADOOP_MODULES}; do + if [[ ${JENKINS} == true + && ${DOCKERSUPPORT} == false ]]; then + # shellcheck disable=SC2086 + personality_enqueue_module "${module}" ${special} \ + -Pnative \ + -Drequire.snappy -Drequire.openssl -Drequire.fuse \ + -Drequire.test.libhadoop + else + case ${OSTYPE} in + Linux) + # shellcheck disable=SC2086 + personality_enqueue_module ${module} ${special} \ + -Pnative -Drequire.libwebhdfs \ + -Drequire.snappy -Drequire.openssl -Drequire.fuse \ + -Drequire.test.libhadoop + ;; + Darwin) + JANSSON_INCLUDE_DIR=/usr/local/opt/jansson/include + JANSSON_LIBRARY=/usr/local/opt/jansson/lib + export JANSSON_LIBRARY JANSSON_INCLUDE_DIR + # shellcheck disable=SC2086 + personality_enqueue_module ${module} ${special} \ + -Pnative -Drequire.snappy \ + -Drequire.openssl \ + -Dopenssl.prefix=/usr/local/opt/openssl/ \ + -Dopenssl.include=/usr/local/opt/openssl/include \ + -Dopenssl.lib=/usr/local/opt/openssl/lib \ + -Drequire.libwebhdfs -Drequire.test.libhadoop + ;; + *) + # shellcheck disable=SC2086 + personality_enqueue_module ${module} ${special} \ + -Pnative \ + -Drequire.snappy -Drequire.openssl \ + -Drequire.libwebhdfs -Drequire.test.libhadoop + ;; + esac + fi + done +} + +function personality_modules +{ + local repostatus=$1 + local testtype=$2 + local extra="" + local fn + local i + + yetus_debug "Personality: ${repostatus} ${testtype}" + + clear_personality_queue + + case ${testtype} in + javac) + if [[ ${BUILD_NATIVE} == true ]]; then + hadoop_module_manipulation + hadoop_javac_ordering -DskipTests + return + fi + extra="-DskipTests" + ;; + javadoc) + if [[ ${repostatus} == patch ]]; then + echo "javadoc pre-reqs:" + for i in hadoop-project \ + hadoop-common-project/hadoop-annotations; do + fn=$(module_file_fragment "${i}") + pushd "${BASEDIR}/${i}" >/dev/null + echo "cd ${i}" + echo_and_redirect "${PATCH_DIR}/maven-${fn}-install.txt" \ + "${MVN}" "${MAVEN_ARGS[@]}" install + popd >/dev/null + done + fi + extra="-Pdocs -DskipTests" + ;; + mvninstall) + extra="-DskipTests" + if [[ ${repostatus} == branch ]]; then + HADOOP_MODULES=. + hadoop_javac_ordering -DskipTests + return + fi + ;; + releaseaudit) + # this is very fast and provides the full path if we do it from + # the root of the source + personality_enqueue_module . + return + ;; + unit) + if [[ ${TEST_PARALLEL} == "true" ]] ; then + extra="-Pparallel-tests" + if [[ -n ${TEST_THREADS:-} ]]; then + extra="${extra} -DtestsThreadCount=${TEST_THREADS}" + fi + fi + if [[ ${BUILD_NATIVE} == true ]]; then + hadoop_module_manipulation + # shellcheck disable=SC2086 + hadoop_javac_ordering ${extra} + return + fi + ;; + *) + extra="-DskipTests" + ;; + esac + + hadoop_module_manipulation + for module in ${HADOOP_MODULES}; do + # shellcheck disable=SC2086 + personality_enqueue_module ${module} ${extra} + done +} + diff --git a/dev-support/personality/hbase.sh b/dev-support/personality/hbase.sh new file mode 100644 index 0000000000000..46ad3902bc14c --- /dev/null +++ b/dev-support/personality/hbase.sh @@ -0,0 +1,238 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +#shellcheck disable=SC2034 +PATCH_BRANCH_DEFAULT=master +#shellcheck disable=SC2034 +ISSUE_RE='^HBASE-[0-9]+$' +#shellcheck disable=SC2034 +HOW_TO_CONTRIBUTE="" + +# All supported Hadoop versions that we want to test the compilation with +HADOOP2_VERSIONS="2.4.1 2.5.2 2.6.0" + +# Override the maven options +MAVEN_OPTS="${MAVEN_OPTS:-"-Xmx3100M"}" + +function personality_modules +{ + local repostatus=$1 + local testtype=$2 + local extra="" + + yetus_debug "Personality: ${repostatus} ${testtype}" + + clear_personality_queue + + case ${testtype} in + javac) + personality_enqueue_module . -DskipTests + return + ;; + mvninstall) + extra="-DskipTests -DHBasePatchProcess" + if [[ ${repostatus} == branch ]]; then + personality_enqueue_module . "${extra}" + return + fi + return + ;; + releaseaudit) + # this is very fast and provides the full path if we do it from + # the root of the source + personality_enqueue_module . -DHBasePatchProcess + return + ;; + unit) + if [[ ${TEST_PARALLEL} == "true" ]] ; then + extra="-Pparallel-tests -DHBasePatchProcess" + if [[ -n ${TEST_THREADS:-} ]]; then + extra="${extra} -DtestsThreadCount=${TEST_THREADS}" + fi + fi + ;; + *) + extra="-DskipTests -DHBasePatchProcess" + ;; + esac + + for module in ${CHANGED_MODULES}; do + + # skip findbugs on hbase-shell + if [[ ${module} == hbase-shell + && ${testtype} == findbugs ]]; then + continue + else + # shellcheck disable=SC2086 + personality_enqueue_module ${module} ${extra} + fi + done +} + +################################################### + +add_plugin hadoopcheck + +function hadoopcheck_filefilter +{ + local filename=$1 + + if [[ ${filename} =~ \.java$ ]]; then + add_test hadoopcheck + fi +} + +function hadoopcheck_postapply +{ + local HADOOP2_VERSION + local logfile + local count + local result=0 + + big_console_header "Compiling against various Hadoop versions" + + export MAVEN_OPTS="${MAVEN_OPTS}" + for HADOOP2_VERSION in ${HADOOP2_VERSIONS}; do + logfile="${PATCH_DIR}/patch-javac-${HADOOP2_VERSION}.txt" + echo_and_redirect "${logfile}" \ + "${MVN}" clean install \ + -DskipTests -DHBasePatchProcess \ + -Dhadoop-two.version="${HADOOP2_VERSION}" + count=$(${GREP} -c ERROR "${logfile}") + if [[ ${count} -gt 0 ]]; then + add_vote_table -1 hadoopcheck "Patch causes ${count} errors with Hadoop v${HADOOP2_VERSION}." + ((result=result+1)) + fi + done + + if [[ ${result} -gt 0 ]]; then + return 1 + fi + + add_vote_table +1 hadoopcheck "Patch does not cause any errors with Hadoop ${HADOOP2_VERSIONS}." + return 0 +} + +###################################### + +add_plugin hbaseprotoc + +function hbaseprotoc_filefilter +{ + local filename=$1 + + if [[ ${filename} =~ \.proto$ ]]; then + add_test hbaseprotoc + fi +} + +function hbaseprotoc_postapply +{ + local i=0 + local fn + local module + local logfile + local count + local results + + big_console_header "Patch HBase protoc plugin" + + start_clock + + verify_needed_test hbaseprotoc + if [[ $? == 0 ]]; then + echo "Patch does not need hbaseprotoc testing." + return 0 + fi + + personality_modules patch hbaseprotoc + modules_workers patch hbaseprotoc -DskipTests -Pcompile-protobuf -X -DHBasePatchProcess + + # shellcheck disable=SC2153 + until [[ $i -eq ${#MODULE[@]} ]]; do + if [[ ${MODULE_STATUS[${i}]} == -1 ]]; then + ((result=result+1)) + ((i=i+1)) + continue + fi + module=${MODULE[$i]} + fn=$(module_file_fragment "${module}") + logfile="${PATCH_DIR}/patch-hbaseprotoc-${fn}.txt" + + count=$(${GREP} -c ERROR "${logfile}") + + if [[ ${count} -gt 0 ]]; then + module_status ${i} -1 "patch-hbaseprotoc-${fn}.txt" "Patch generated "\ + "${count} new protoc errors in ${module}." + ((results=results+1)) + fi + ((i=i+1)) + done + + modules_messages patch hbaseprotoc true + if [[ ${results} -gt 0 ]]; then + return 1 + fi + return 0 +} + +###################################### + +add_plugin hbaseanti + +function hbaseanti_filefilter +{ + local filename=$1 + + if [[ ${filename} =~ \.java$ ]]; then + add_test hbaseanti + fi +} + +function hbaseanti_preapply +{ + local warnings + local result + + big_console_header "Checking for known anti-patterns" + + start_clock + + verify_needed_test hbaseanti + if [[ $? == 0 ]]; then + echo "Patch does not need hbaseanti testing." + return 0 + fi + + warnings=$(${GREP} 'new TreeMap&2 + JAVA_HOME="" +fi + +if [[ -z ${JAVA_HOME} ]]; then + JAVA_HOME=$(find /usr/lib/jvm/ -name "java-*" -type d | tail -1) + export JAVA_HOME +fi + +# Avoid out of memory errors in builds +MAVEN_OPTS=${MAVEN_OPTS:-"-Xms256m -Xmx1g"} +export MAVEN_OPTS + +# strip out --docker param to prevent re-exec again +TESTPATCHMODE=${TESTPATCHMODE/--docker } + + +cd "${BASEDIR}" +PATCH_DIR=$(cd -P -- "${PATCH_DIR}" >/dev/null && pwd -P) + +cd "${PATCH_DIR}/precommit/" +#shellcheck disable=SC2086 +"${PATCH_DIR}/precommit/test-patch.sh" \ + --reexec \ + --dockermode ${TESTPATCHMODE} \ + --basedir="${BASEDIR}" \ + --patch-dir="${PATCH_DIR}" \ + --java-home="${JAVA_HOME}" \ + --plugins="${PATCH_DIR}/precommit/user-plugins" \ + --jira-cmd=/opt/jiracli/jira-cli-2.2.0/jira.sh diff --git a/dev-support/test-patch-docker/test-patch-docker.sh b/dev-support/test-patch-docker/test-patch-docker.sh new file mode 100644 index 0000000000000..f8e46b4bbc183 --- /dev/null +++ b/dev-support/test-patch-docker/test-patch-docker.sh @@ -0,0 +1,383 @@ +#!/usr/bin/env bash + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +DID=${RANDOM} + +## @description Print a message to stderr if --debug is turned on +## @audience private +## @stability stable +## @replaceable no +## @param string +function yetus_debug +{ + if [[ -n "${TP_SHELL_SCRIPT_DEBUG}" ]]; then + echo "[$(date) DEBUG]: $*" 1>&2 + fi +} + +## @description Run docker with some arguments, and +## @description optionally send to debug +## @audience private +## @stability evolving +## @replaceable no +## @param args +function dockercmd +{ + yetus_debug "docker $*" + docker "$@" +} + +## @description Handle command line arguments +## @audience private +## @stability evolving +## @replaceable no +## @param args +function parse_args +{ + local i + + for i in "$@"; do + case ${i} in + --debug) + TP_SHELL_SCRIPT_DEBUG=true + ;; + --dockerversion=*) + DOCKER_VERSION=${i#*=} + ;; + --help|-help|-h|help|--h|--\?|-\?|\?) + yetus_usage + exit 0 + ;; + --java-home=*) + JAVA_HOME=${i#*=} + ;; + --patch-dir=*) + PATCH_DIR=${i#*=} + ;; + --project=*) + PROJECT_NAME=${i#*=} + ;; + *) + ;; + esac + done +} + +## @description Stop and delete all defunct containers +## @audience private +## @stability evolving +## @replaceable no +## @param args +function stop_exited_containers +{ + local line + local id + local value + local size + + echo "Docker containers in exit state:" + + dockercmd ps -a | grep Exited + + # stop *all* containers that are in exit state for + # more than > 8 hours + while read line; do + id=$(echo "${line}" | cut -f1 -d' ') + value=$(echo "${line}" | cut -f2 -d' ') + size=$(echo "${line}" | cut -f3 -d' ') + + if [[ ${size} =~ day + || ${size} =~ week + || ${size} =~ month + || ${size} =~ year ]]; then + echo "Removing docker ${id}" + dockercmd rm "${id}" + fi + + if [[ ${size} =~ hours + && ${value} -gt 8 ]]; then + echo "Removing docker ${id}" + dockercmd rm "${id}" + fi + done < <( + dockercmd ps -a \ + | grep Exited \ + | sed -e 's,ago,,g' \ + | awk '{print $1" "$(NF - 2)" "$(NF - 1)}') +} + +## @description Remove all containers that are not +## @description are not running + older than 1 day +## @audience private +## @stability evolving +## @replaceable no +## @param args +function rm_old_containers +{ + local line + local id + local value + local size + + while read line; do + id=$(echo "${line}" | cut -f1 -d, ) + state=$(echo "${line}" | cut -f2 -d, ) + stoptime=$(echo "${line}" | cut -f3 -d, | cut -f1 -d. ) + + # believe it or not, date is not even close to standardized... + if [[ $(uname -s) == Linux ]]; then + + # GNU date + stoptime=$(date -d "${stoptime}" "+%s") + else + + # BSD date + stoptime=$(date -j -f "%Y-%m-%dT%H:%M:%S" "${stoptime}" "+%s") + fi + + if [[ ${state} == false ]]; then + curtime=$(date "+%s") + ((difftime = curtime - stoptime)) + if [[ ${difftime} -gt 86400 ]]; then + echo "Removing docker ${id}" + dockercmd rm "${id}" + fi + fi + done < <( + # see https://github.com/koalaman/shellcheck/issues/375 + # shellcheck disable=SC2046 + dockercmd inspect \ + -f '{{.Id}},{{.State.Running}},{{.State.FinishedAt}}' \ + $(dockercmd ps -qa) 2>/dev/null) +} + +## @description Remove untagged/unused images +## @audience private +## @stability evolving +## @replaceable no +## @param args +function remove_untagged_images +{ + # this way is a bit more compatible with older docker versions + dockercmd images | tail -n +2 | awk '$1 == "" {print $3}' | \ + xargs --no-run-if-empty docker rmi +} + +## @description Remove defunct tagged images +## @audience private +## @stability evolving +## @replaceable no +## @param args +function remove_old_tagged_images +{ + local line + local id + local created + + while read line; do + id=$(echo "${line}" | awk '{print $1}') + created=$(echo "${line}" | awk '{print $5}') + + if [[ ${created} =~ week + || ${created} =~ month + || ${created} =~ year ]]; then + echo "Removing docker image ${id}" + dockercmd rmi "${id}" + fi + + if [[ ${id} =~ test-patch-base-${PROJECT_NAME}-date ]]; then + if [[ ${created} =~ day + || ${created} =~ hours ]]; then + echo "Removing docker image ${id}" + dockercmd rmi "${id}" + fi + fi + done < <(dockercmd images) + +} + +## @description Performance docker maintenance on Jenkins +## @audience private +## @stability evolving +## @replaceable no +## @param args +function cleanup_apache_jenkins_docker +{ + echo "==========================" + echo "Docker Images:" + dockercmd images + echo "==========================" + echo "Docker Containers:" + dockercmd ps -a + echo "==========================" + + stop_exited_containers + + rm_old_containers + + remove_untagged_images + + remove_old_tagged_images +} + +## @description Clean up our old images used for patch testing +## @audience private +## @stability evolving +## @replaceable no +## @param args +function cleanup_test_patch_images +{ + local images + local imagecount + local rmimage + local rmi + + # we always want to leave at least one of our images + # so that the whole thing doesn't have to be rebuilt. + # This also let's us purge any old images so that + # we can get fresh stuff sometimes + images=$(dockercmd images | grep --color=none "test-patch-tp-${PROJECT_NAME}" | awk '{print $1}') 2>&1 + + # shellcheck disable=SC2086 + imagecount=$(echo ${images} | tr ' ' '\n' | wc -l) + ((imagecount = imagecount - 1 )) + + # shellcheck disable=SC2086 + rmimage=$(echo ${images} | tr ' ' '\n' | tail -${imagecount}) + for rmi in ${rmimage} + do + echo "Removing image ${rmi}" + dockercmd rmi "${rmi}" + done +} + +## @description Perform pre-run maintenance to free up +## @description resources. With --jenkins, it is a lot +## @description more destructive. +## @audience private +## @stability evolving +## @replaceable no +## @param args +function cleanup +{ + if [[ ${TESTPATCHMODE} =~ jenkins ]]; then + cleanup_apache_jenkins_docker + fi + + cleanup_test_patch_images +} + +## @description Deterine the user name and user id of the user +## @description that the docker container should use +## @audience private +## @stability evolving +## @replaceable no +## @param args +function determine_user +{ + # On the Apache Jenkins hosts, $USER is pretty much untrustable beacuse some + # ... person ... sets it to an account that doesn't actually exist. + # so instead, we need to try and override it with something that's + # probably close to reality. + if [[ ${TESTPATCHMODE} =~ jenkins ]]; then + USER=$(id | cut -f2 -d\( | cut -f1 -d\)) + fi + + if [[ "$(uname -s)" == "Linux" ]]; then + USER_NAME=${SUDO_USER:=$USER} + USER_ID=$(id -u "${USER_NAME}") + GROUP_ID=$(id -g "${USER_NAME}") + else # boot2docker uid and gid + USER_NAME=${USER} + USER_ID=1000 + GROUP_ID=50 + fi +} + +## @description Determine the revision of a dockerfile +## @audience private +## @stability evolving +## @replaceable no +## @param args +function getdockerfilerev +{ + grep 'TEST_PATCH_PRIVATE: gitrev=' \ + "${PATCH_DIR}/precommit/test-patch-docker/Dockerfile" \ + | cut -f2 -d= +} + +## @description Start a test patch docker container +## @audience private +## @stability evolving +## @replaceable no +## @param args +function run_image +{ + local dockerfilerev + local baseimagename + + dockerfilerev=$(getdockerfilerev) + + baseimagename="test-patch-base-${PROJECT_NAME}-${dockerfilerev}" + + # make a base image, if it isn't available + dockercmd build -t "${baseimagename}" "${PATCH_DIR}/precommit/test-patch-docker" + + # using the base image, make one that is patch specific + dockercmd build -t "test-patch-tp-${PROJECT_NAME}-${DID}" - < "${PATCH_DIR}/patch-asflicense.txt" + + if [[ -s "${PATCH_DIR}/patch-asflicense.txt" ]] ; then + numpatch=$("${GREP}" -c '\!?????' "${PATCH_DIR}/patch-asflicense.txt") + echo "" + echo "" + echo "There appear to be ${numpatch} ASF License warnings after applying the patch." + if [[ -n ${numpatch} + && ${numpatch} -gt 0 ]] ; then + add_vote_table -1 asflicense "Patch generated ${numpatch} ASF License warnings." + + echo "Lines that start with ????? in the ASF License "\ + "report indicate files that do not have an Apache license header:" \ + > "${PATCH_DIR}/patch-asflicense-problems.txt" + + ${GREP} '\!?????' "${PATCH_DIR}/patch-asflicense.txt" \ + >> "${PATCH_DIR}/patch-asflicense-problems.txt" + + add_footer_table asflicense "@@BASE@@/patch-asflicense-problems.txt" + fi + else + # if we're here, then maven actually failed + modules_messages patch asflicense true + fi + return 1 +} diff --git a/dev-support/test-patch.d/builtin-personality.sh b/dev-support/test-patch.d/builtin-personality.sh new file mode 100644 index 0000000000000..dc944e485f1e1 --- /dev/null +++ b/dev-support/test-patch.d/builtin-personality.sh @@ -0,0 +1,157 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +function builtin_personality_modules +{ + local repostatus=$1 + local testtype=$2 + + local module + + yetus_debug "Using builtin personality_modules" + yetus_debug "Personality: ${repostatus} ${testtype}" + + clear_personality_queue + + # this always makes sure the local repo has a fresh + # copy of everything per pom rules. + if [[ ${repostatus} == branch + && ${testtype} == mvninstall ]];then + personality_enqueue_module . + return + fi + + for module in ${CHANGED_MODULES}; do + # shellcheck disable=SC2086 + personality_enqueue_module ${module} + done +} + +function personality_modules +{ + builtin_personality_modules "$@" +} + +function builtin_mvn_personality_file_tests +{ + local filename=$1 + + yetus_debug "Using builtin mvn personality_file_tests" + + if [[ ${filename} =~ src/main/webapp ]]; then + yetus_debug "tests/webapp: ${filename}" + elif [[ ${filename} =~ \.sh + || ${filename} =~ \.cmd + ]]; then + yetus_debug "tests/shell: ${filename}" + elif [[ ${filename} =~ \.md$ + || ${filename} =~ \.md\.vm$ + || ${filename} =~ src/site + || ${filename} =~ src/main/docs + ]]; then + yetus_debug "tests/site: ${filename}" + add_test site + elif [[ ${filename} =~ \.c$ + || ${filename} =~ \.cc$ + || ${filename} =~ \.h$ + || ${filename} =~ \.hh$ + || ${filename} =~ \.proto$ + || ${filename} =~ src/test + || ${filename} =~ \.cmake$ + || ${filename} =~ CMakeLists.txt + ]]; then + yetus_debug "tests/units: ${filename}" + add_test javac + add_test mvninstall + add_test unit + elif [[ ${filename} =~ pom.xml$ + || ${filename} =~ \.java$ + || ${filename} =~ \.scala$ + || ${filename} =~ src/main + ]]; then + if [[ ${filename} =~ src/main/bin + || ${filename} =~ src/main/sbin ]]; then + yetus_debug "tests/shell: ${filename}" + else + yetus_debug "tests/javadoc+units: ${filename}" + add_test javac + add_test javadoc + add_test mvninstall + add_test unit + fi + fi + + if [[ ${filename} =~ \.java$ ]]; then + add_test findbugs + fi +} + +function builtin_ant_personality_file_tests +{ + local filename=$1 + + yetus_debug "Using builtin ant personality_file_tests" + + if [[ ${filename} =~ \.sh + || ${filename} =~ \.cmd + ]]; then + yetus_debug "tests/shell: ${filename}" + elif [[ ${filename} =~ \.c$ + || ${filename} =~ \.cc$ + || ${filename} =~ \.h$ + || ${filename} =~ \.hh$ + || ${filename} =~ \.proto$ + || ${filename} =~ src/test + || ${filename} =~ \.cmake$ + || ${filename} =~ CMakeLists.txt + ]]; then + yetus_debug "tests/units: ${filename}" + add_test javac + add_test unit + elif [[ ${filename} =~ build.xml + || ${filename} =~ ivy.xml + || ${filename} =~ \.java$ + ]]; then + yetus_debug "tests/javadoc+units: ${filename}" + add_test javac + add_test javadoc + add_test unit + fi + + if [[ ${filename} =~ \.java$ ]]; then + add_test findbugs + fi +} + +function builtin_personality_file_tests +{ + case ${BUILDTOOL} in + maven) + builtin_mvn_personality_file_tests "$@" + ;; + ant) + builtin_ant_personality_file_tests "$@" + ;; + *) + return 1 + ;; + esac +} + +function personality_file_tests +{ + builtin_personality_file_tests "$@" +} diff --git a/dev-support/test-patch.d/findbugs.sh b/dev-support/test-patch.d/findbugs.sh new file mode 100644 index 0000000000000..2fe23394c9ca7 --- /dev/null +++ b/dev-support/test-patch.d/findbugs.sh @@ -0,0 +1,379 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +FINDBUGS_HOME=${FINDBUGS_HOME:-} +FINDBUGS_WARNINGS_FAIL_PRECHECK=false + +add_plugin findbugs + +function findbugs_file_filter +{ + local filename=$1 + + if [[ ${BUILDTOOL} == maven + || ${BUILDTOOL} == ant ]]; then + if [[ ${filename} =~ \.java$ ]]; then + add_test findbugs + fi + fi +} + +function findbugs_usage +{ + echo "FindBugs specific:" + echo "--findbugs-home= Findbugs home directory (default FINDBUGS_HOME environment variable)" + echo "--findbugs-strict-precheck If there are Findbugs warnings during precheck, fail" +} + +function findbugs_parse_args +{ + local i + + for i in "$@"; do + case ${i} in + --findbugs-home=*) + FINDBUGS_HOME=${i#*=} + ;; + --findbugs-strict-precheck) + FINDBUGS_WARNINGS_FAIL_PRECHECK=true + ;; + esac + done +} + +## @description are the needed bits for findbugs present? +## @audience private +## @stability evolving +## @replaceable no +## @return 0 findbugs will work for our use +## @return 1 findbugs is missing some component +function findbugs_is_installed +{ + if [[ ! -e "${FINDBUGS_HOME}/bin/findbugs" ]]; then + printf "\n\n%s is not executable.\n\n" "${FINDBUGS_HOME}/bin/findbugs" + add_vote_table -1 findbugs "Findbugs is not installed." + return 1 + fi + return 0 +} + +## @description Run the maven findbugs plugin and record found issues in a bug database +## @audience private +## @stability evolving +## @replaceable no +## @return 0 on success +## @return 1 on failure +function findbugs_runner +{ + local name=$1 + local module + local result=0 + local fn + local warnings_file + local i=0 + local savestop + + personality_modules "${name}" findbugs + case ${BUILDTOOL} in + maven) + modules_workers "${name}" findbugs clean test findbugs:findbugs + ;; + ant) + modules_workers "${name}" findbugs findbugs + ;; + esac + + #shellcheck disable=SC2153 + until [[ ${i} -eq ${#MODULE[@]} ]]; do + if [[ ${MODULE_STATUS[${i}]} == -1 ]]; then + ((result=result+1)) + ((i=i+1)) + continue + fi + start_clock + offset_clock "${MODULE_STATUS_TIMER[${i}]}" + module="${MODULE[${i}]}" + fn=$(module_file_fragment "${module}") + + case ${BUILDTOOL} in + maven) + file="${module}/target/findbugsXml.xml" + ;; + ant) + file="${ANT_FINDBUGSXML}" + ;; + esac + + if [[ ! -f ${file} ]]; then + module_status ${i} -1 "" "${name}/${module} no findbugs output file (${file})" + ((i=i+1)) + continue + fi + + warnings_file="${PATCH_DIR}/${name}-findbugs-${fn}-warnings" + + cp -p "${file}" "${warnings_file}.xml" + + if [[ ${name} == branch ]]; then + "${FINDBUGS_HOME}/bin/setBugDatabaseInfo" -name "${PATCH_BRANCH}" \ + "${warnings_file}.xml" "${warnings_file}.xml" + else + "${FINDBUGS_HOME}/bin/setBugDatabaseInfo" -name patch \ + "${warnings_file}.xml" "${warnings_file}.xml" + fi + if [[ $? != 0 ]]; then + savestop=$(stop_clock) + MODULE_STATUS_TIMER[${i}]=${savestop} + module_status ${i} -1 "" "${name}/${module} cannot run setBugDatabaseInfo from findbugs" + ((retval = retval + 1)) + ((i=i+1)) + continue + fi + + "${FINDBUGS_HOME}/bin/convertXmlToText" -html \ + "${warnings_file}.xml" \ + "${warnings_file}.html" + if [[ $? != 0 ]]; then + savestop=$(stop_clock) + MODULE_STATUS_TIMER[${i}]=${savestop} + module_status ${i} -1 "" "${name}/${module} cannot run convertXmlToText from findbugs" + ((result = result + 1)) + fi + + if [[ -z ${FINDBUGS_VERSION} + && ${name} == branch ]]; then + FINDBUGS_VERSION=$(${GREP} -i "BugCollection version=" "${warnings_file}.xml" \ + | cut -f2 -d\" \ + | cut -f1 -d\" ) + if [[ -n ${FINDBUGS_VERSION} ]]; then + add_footer_table findbugs "v${FINDBUGS_VERSION}" + fi + fi + + ((i=i+1)) + done + return ${result} +} + +## @description Track pre-existing findbugs warnings +## @audience private +## @stability evolving +## @replaceable no +## @return 0 on success +## @return 1 on failure +function findbugs_preapply +{ + local fn + local module + local i=0 + local warnings_file + local module_findbugs_warnings + local results=0 + + big_console_header "Pre-patch findbugs detection" + + verify_needed_test findbugs + + if [[ $? == 0 ]]; then + echo "Patch does not appear to need findbugs tests." + return 0 + fi + + findbugs_is_installed + if [[ $? != 0 ]]; then + return 1 + fi + + findbugs_runner branch + results=$? + + if [[ "${FINDBUGS_WARNINGS_FAIL_PRECHECK}" == "true" ]]; then + until [[ $i -eq ${#MODULE[@]} ]]; do + if [[ ${MODULE_STATUS[${i}]} == -1 ]]; then + ((result=result+1)) + ((i=i+1)) + continue + fi + module=${MODULE[${i}]} + start_clock + offset_clock "${MODULE_STATUS_TIMER[${i}]}" + fn=$(module_file_fragment "${module}") + warnings_file="${PATCH_DIR}/branch-findbugs-${fn}-warnings" + # shellcheck disable=SC2016 + module_findbugs_warnings=$("${FINDBUGS_HOME}/bin/filterBugs" -first \ + "${PATCH_BRANCH}" \ + "${warnings_file}.xml" \ + "${warnings_file}.xml" \ + | ${AWK} '{print $1}') + + if [[ ${module_findbugs_warnings} -gt 0 ]] ; then + module_status ${i} -1 "branch-findbugs-${fn}.html" "${module} in ${PATCH_BRANCH} cannot run convertXmlToText from findbugs" + ((results=results+1)) + fi + savestop=$(stop_clock) + MODULE_STATUS_TIMER[${i}]=${savestop} + ((i=i+1)) + done + modules_messages branch findbugs true + fi + + if [[ ${results} != 0 ]]; then + return 1 + fi + return 0 +} + +## @description Verify patch does not trigger any findbugs warnings +## @audience private +## @stability evolving +## @replaceable no +## @return 0 on success +## @return 1 on failure +function findbugs_postinstall +{ + local module + local fn + local combined_xml + local branchxml + local patchxml + local newbugsbase + local new_findbugs_warnings + local line + local firstpart + local secondpart + local i=0 + local results=0 + local savestop + + big_console_header "Patch findbugs detection" + + verify_needed_test findbugs + + if [[ $? == 0 ]]; then + echo "Patch does not appear to need findbugs tests." + return 0 + fi + + findbugs_is_installed + if [[ $? != 0 ]]; then + return 1 + fi + + findbugs_runner patch + + until [[ $i -eq ${#MODULE[@]} ]]; do + if [[ ${MODULE_STATUS[${i}]} == -1 ]]; then + ((result=result+1)) + ((i=i+1)) + continue + fi + start_clock + offset_clock "${MODULE_STATUS_TIMER[${i}]}" + module="${MODULE[${i}]}" + pushd "${module}" >/dev/null + fn=$(module_file_fragment "${module}") + + combined_xml="${PATCH_DIR}/combined-findbugs-${fn}.xml" + branchxml="${PATCH_DIR}/branch-findbugs-${fn}-warnings.xml" + patchxml="${PATCH_DIR}/patch-findbugs-${fn}-warnings.xml" + + if [[ ! -f "${branchxml}" ]]; then + branchxml=${patchxml} + fi + + newbugsbase="${PATCH_DIR}/new-findbugs-${fn}" + + "${FINDBUGS_HOME}/bin/computeBugHistory" -useAnalysisTimes -withMessages \ + -output "${combined_xml}" \ + "${branchxml}" \ + "${patchxml}" + if [[ $? != 0 ]]; then + popd >/dev/null + module_status ${i} -1 "" "${module} cannot run computeBugHistory from findbugs" + ((result=result+1)) + savestop=$(stop_clock) + MODULE_STATUS_TIMER[${i}]=${savestop} + ((i=i+1)) + continue + fi + + #shellcheck disable=SC2016 + new_findbugs_warnings=$("${FINDBUGS_HOME}/bin/filterBugs" -first patch \ + "${combined_xml}" "${newbugsbase}.xml" | ${AWK} '{print $1}') + if [[ $? != 0 ]]; then + popd >/dev/null + module_status ${i} -1 "" "${module} cannot run filterBugs (#1) from findbugs" + ((result=result+1)) + savestop=$(stop_clock) + MODULE_STATUS_TIMER[${i}]=${savestop} + ((i=i+1)) + continue + fi + + #shellcheck disable=SC2016 + new_findbugs_fixed_warnings=$("${FINDBUGS_HOME}/bin/filterBugs" -fixed patch \ + "${combined_xml}" "${newbugsbase}.xml" | ${AWK} '{print $1}') + if [[ $? != 0 ]]; then + popd >/dev/null + module_status ${i} -1 "" "${module} cannot run filterBugs (#2) from findbugs" + ((result=result+1)) + savestop=$(stop_clock) + MODULE_STATUS_TIMER[${i}]=${savestop} + ((i=i+1)) + continue + fi + + echo "Found ${new_findbugs_warnings} new Findbugs warnings and ${new_findbugs_fixed_warnings} newly fixed warnings." + findbugs_warnings=$((findbugs_warnings+new_findbugs_warnings)) + findbugs_fixed_warnings=$((findbugs_fixed_warnings+new_findbugs_fixed_warnings)) + + "${FINDBUGS_HOME}/bin/convertXmlToText" -html "${newbugsbase}.xml" \ + "${newbugsbase}.html" + if [[ $? != 0 ]]; then + popd >/dev/null + module_status ${i} -1 "" "${module} cannot run convertXmlToText from findbugs" + ((result=result+1)) + savestop=$(stop_clock) + MODULE_STATUS_TIMER[${i}]=${savestop} + ((i=i+1)) + continue + fi + + if [[ ${new_findbugs_warnings} -gt 0 ]] ; then + populate_test_table FindBugs "module:${module}" + while read line; do + firstpart=$(echo "${line}" | cut -f2 -d:) + secondpart=$(echo "${line}" | cut -f9- -d' ') + add_test_table "" "${firstpart}:${secondpart}" + done < <("${FINDBUGS_HOME}/bin/convertXmlToText" "${newbugsbase}.xml") + + module_status ${i} -1 "new-findbugs-${fn}.html" "${module} introduced "\ + "${new_findbugs_warnings} new FindBugs issues." + ((result=result+1)) + fi + savestop=$(stop_clock) + MODULE_STATUS_TIMER[${i}]=${savestop} + popd >/dev/null + ((i=i+1)) + done + + modules_messages patch findbugs true + if [[ ${result} != 0 ]]; then + return 1 + fi + return 0 +} diff --git a/dev-support/test-patch.d/github.sh b/dev-support/test-patch.d/github.sh new file mode 100644 index 0000000000000..281f15b232880 --- /dev/null +++ b/dev-support/test-patch.d/github.sh @@ -0,0 +1,112 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +add_bugsystem github + +## @description Write the contents of a file to github +## @params filename +## @stability stable +## @audience public +function github_write_comment +{ + local -r commentfile=${1} + shift + + local retval=1 + + return ${retval} +} + + +## @description Print out the finished details to the Github PR +## @audience private +## @stability evolving +## @replaceable no +## @param runresult +function github_finalreport +{ + local result=$1 + local i + local commentfile=${PATCH_DIR}/commentfile + local comment + + rm "${commentfile}" 2>/dev/null + + if [[ ${JENKINS} != "true" ]] ; then + return 0 + fi + + big_console_header "Adding comment to Github" + + add_footer_table "Console output" "${BUILD_URL}console" + + if [[ ${result} == 0 ]]; then + add_header_line ":confetti_ball: **+1 overall**" + else + add_header_line ":broken_heart: **-1 overall**" + fi + + printf "\n\n\n\n" >> "${commentfile}" + + i=0 + until [[ $i -eq ${#TP_HEADER[@]} ]]; do + printf "%s\n\n" "${TP_HEADER[${i}]}" >> "${commentfile}" + ((i=i+1)) + done + + { + printf "\n\n" + echo "| Vote | Subsystem | Runtime | Comment |" + echo "|:----:|----------:|--------:|:--------|" + } >> "${commentfile}" + + i=0 + until [[ $i -eq ${#TP_VOTE_TABLE[@]} ]]; do + echo "${TP_VOTE_TABLE[${i}]}" >> "${commentfile}" + ((i=i+1)) + done + + if [[ ${#TP_TEST_TABLE[@]} -gt 0 ]]; then + { + printf "\n\n" + echo "| Reason | Tests |" + echo "|-------:|:------|" + } >> "${commentfile}" + i=0 + until [[ $i -eq ${#TP_TEST_TABLE[@]} ]]; do + echo "${TP_TEST_TABLE[${i}]}" >> "${commentfile}" + ((i=i+1)) + done + fi + + { + printf "\n\n" + echo "| Subsystem | Report/Notes |" + echo "|----------:|:-------------|" + } >> "${commentfile}" + + i=0 + until [[ $i -eq ${#TP_FOOTER_TABLE[@]} ]]; do + comment=$(echo "${TP_FOOTER_TABLE[${i}]}" | + ${SED} -e "s,@@BASE@@,${BUILD_URL}artifact/patchprocess,g") + printf "%s\n" "${comment}" >> "${commentfile}" + ((i=i+1)) + done + + printf "\n\nThis message was automatically generated.\n\n" >> "${commentfile}" + + write_to_github "${commentfile}" +} diff --git a/dev-support/test-patch.d/jira.sh b/dev-support/test-patch.d/jira.sh new file mode 100644 index 0000000000000..f95ca6f40fa85 --- /dev/null +++ b/dev-support/test-patch.d/jira.sh @@ -0,0 +1,190 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +JIRACLI=${JIRA:-jira} + +add_bugsystem jira + +function jira_usage +{ + echo "JIRA Options:" + echo "--jira-cmd= The 'jira' command to use (default 'jira')" + echo "--jira-password= The password for the 'jira' command" + echo "--jira-user= The user for the 'jira' command" +} + +function jira_parse_args +{ + local i + + for i in "$@"; do + case ${i} in + --jira-cmd=*) + JIRACLI=${i#*=} + ;; + --jira-password=*) + JIRA_PASSWD=${i#*=} + ;; + --jira-user=*) + JIRA_USER=${i#*=} + ;; + esac + done +} + +## @description Write the contents of a file to JIRA +## @params filename +## @stability stable +## @audience public +## @returns ${JIRACLI} exit code +function jira_write_comment +{ + local -r commentfile=${1} + shift + + local retval=0 + + + if [[ -n ${JIRA_PASSWD} + && -n ${JIRA_USER} ]]; then + # shellcheck disable=SC2086 + ${JIRACLI} --comment "$(cat ${commentfile})" \ + -s https://issues.apache.org/jira \ + -a addcomment -u ${JIRA_USER} \ + -p "${JIRA_PASSWD}" \ + --issue "${ISSUE}" + retval=$? + ${JIRACLI} -s https://issues.apache.org/jira \ + -a logout -u "${JIRA_USER}" \ + -p "${JIRA_PASSWD}" + fi + return ${retval} +} + +## @description Print out the finished details to the JIRA issue +## @audience private +## @stability evolving +## @replaceable no +## @param runresult +function jira_finalreport +{ + local result=$1 + local i + local commentfile=${PATCH_DIR}/commentfile + local comment + local vote + local ourstring + local ela + local subs + local color + local comment + + rm "${commentfile}" 2>/dev/null + + if [[ ${JENKINS} != "true" ]] ; then + return 0 + fi + + big_console_header "Adding comment to JIRA" + + add_footer_table "Console output" "${BUILD_URL}console" + + if [[ ${result} == 0 ]]; then + add_header_line "| (/) *{color:green}+1 overall{color}* |" + else + add_header_line "| (x) *{color:red}-1 overall{color}* |" + fi + + { echo "\\\\" ; echo "\\\\"; } >> "${commentfile}" + + i=0 + until [[ $i -eq ${#TP_HEADER[@]} ]]; do + printf "%s\n" "${TP_HEADER[${i}]}" >> "${commentfile}" + ((i=i+1)) + done + + { echo "\\\\" ; echo "\\\\"; } >> "${commentfile}" + + echo "|| Vote || Subsystem || Runtime || Comment ||" >> "${commentfile}" + + i=0 + until [[ $i -eq ${#TP_VOTE_TABLE[@]} ]]; do + ourstring=$(echo "${TP_VOTE_TABLE[${i}]}" | tr -s ' ') + vote=$(echo "${ourstring}" | cut -f2 -d\| | tr -d ' ') + subs=$(echo "${ourstring}" | cut -f3 -d\|) + ela=$(echo "${ourstring}" | cut -f4 -d\|) + comment=$(echo "${ourstring}" | cut -f5 -d\|) + + # summary line + if [[ -z ${vote} + && -n ${ela} ]]; then + color="black" + elif [[ -z ${vote} ]]; then + # keep same color + true + else + # new vote line + case ${vote} in + 1|"+1") + color="green" + ;; + -1) + color="red" + ;; + 0) + color="blue" + ;; + *) + color="black" + ;; + esac + fi + + printf "| {color:%s}%s{color} | {color:%s}%s{color} | {color:%s}%s{color} | {color:%s}%s{color} |\n" \ + "${color}" "${vote}" \ + "${color}" "${subs}" \ + "${color}" "${ela}" \ + "${color}" "${comment}" \ + >> "${commentfile}" + ((i=i+1)) + done + + if [[ ${#TP_TEST_TABLE[@]} -gt 0 ]]; then + { echo "\\\\" ; echo "\\\\"; } >> "${commentfile}" + + echo "|| Reason || Tests ||" >> "${commentfile}" + i=0 + until [[ $i -eq ${#TP_TEST_TABLE[@]} ]]; do + printf "%s\n" "${TP_TEST_TABLE[${i}]}" >> "${commentfile}" + ((i=i+1)) + done + fi + + { echo "\\\\" ; echo "\\\\"; } >> "${commentfile}" + + echo "|| Subsystem || Report/Notes ||" >> "${commentfile}" + i=0 + until [[ $i -eq ${#TP_FOOTER_TABLE[@]} ]]; do + comment=$(echo "${TP_FOOTER_TABLE[${i}]}" | + ${SED} -e "s,@@BASE@@,${BUILD_URL}artifact/patchprocess,g") + printf "%s\n" "${comment}" >> "${commentfile}" + ((i=i+1)) + done + + printf "\n\nThis message was automatically generated.\n\n" >> "${commentfile}" + + jira_write_comment "${commentfile}" +} diff --git a/dev-support/test-patch.d/xml.sh b/dev-support/test-patch.d/xml.sh new file mode 100644 index 0000000000000..b33c7cdc42434 --- /dev/null +++ b/dev-support/test-patch.d/xml.sh @@ -0,0 +1,70 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +add_plugin xml + +function xml_filefilter +{ + local filename=$1 + + if [[ ${filename} =~ \.xml$ ]]; then + add_test xml + fi +} + +function xml_postapply +{ + local js + local i + local count + + verify_needed_test xml + if [[ $? == 0 ]]; then + return 0 + fi + + big_console_header "Checking if XML files are well-formed" + + js="${JAVA_HOME}/bin/jrunscript" + if [[ ! -x ${js} ]]; then + yetus_error "${js} does not exist" + return 0 + fi + + start_clock + + pushd "${BASEDIR}" >/dev/null + for i in ${CHANGED_FILES}; do + if [[ ! ${i} =~ \.xml$ ]]; then + continue + fi + ${js} -e "XMLDocument(arguments[0])" "${i}" >> "${PATCH_DIR}/xml.txt" 2>&1 + if [[ $? != 0 ]]; then + ((count=count+1)) + fi + done + + if [[ ${count} -gt 0 ]]; then + add_vote_table -1 xml "The patch has ${count} ill-formed XML file(s)." + add_footer_table xml "@@BASE@@/xml.txt" + popd >/dev/null + return 1 + fi + + popd >/dev/null + add_vote_table +1 xml "The patch has no ill-formed XML file." + return 0 +} From 63c7e92380063b25e39c9075240bfd014be24cef Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Sat, 27 Jun 2015 00:00:13 -0700 Subject: [PATCH 004/130] HADOOP-12113. update test-patch branch to latest code (continued2) (aw) --- dev-support/findHangingTest.sh | 0 dev-support/personality/flink.sh | 0 dev-support/personality/hadoop.sh | 0 dev-support/personality/hbase.sh | 0 dev-support/personality/pig.sh | 0 dev-support/personality/tajo.sh | 0 dev-support/personality/tez.sh | 0 dev-support/test-patch-docker/launch-test-patch.sh | 0 dev-support/test-patch-docker/test-patch-docker.sh | 0 dev-support/test-patch.d/apache-rat.sh | 0 dev-support/test-patch.d/builtin-personality.sh | 0 dev-support/test-patch.d/findbugs.sh | 0 dev-support/test-patch.d/github.sh | 0 dev-support/test-patch.d/jira.sh | 0 dev-support/test-patch.d/xml.sh | 0 15 files changed, 0 insertions(+), 0 deletions(-) mode change 100644 => 100755 dev-support/findHangingTest.sh mode change 100644 => 100755 dev-support/personality/flink.sh mode change 100644 => 100755 dev-support/personality/hadoop.sh mode change 100644 => 100755 dev-support/personality/hbase.sh mode change 100644 => 100755 dev-support/personality/pig.sh mode change 100644 => 100755 dev-support/personality/tajo.sh mode change 100644 => 100755 dev-support/personality/tez.sh mode change 100644 => 100755 dev-support/test-patch-docker/launch-test-patch.sh mode change 100644 => 100755 dev-support/test-patch-docker/test-patch-docker.sh mode change 100644 => 100755 dev-support/test-patch.d/apache-rat.sh mode change 100644 => 100755 dev-support/test-patch.d/builtin-personality.sh mode change 100644 => 100755 dev-support/test-patch.d/findbugs.sh mode change 100644 => 100755 dev-support/test-patch.d/github.sh mode change 100644 => 100755 dev-support/test-patch.d/jira.sh mode change 100644 => 100755 dev-support/test-patch.d/xml.sh diff --git a/dev-support/findHangingTest.sh b/dev-support/findHangingTest.sh old mode 100644 new mode 100755 diff --git a/dev-support/personality/flink.sh b/dev-support/personality/flink.sh old mode 100644 new mode 100755 diff --git a/dev-support/personality/hadoop.sh b/dev-support/personality/hadoop.sh old mode 100644 new mode 100755 diff --git a/dev-support/personality/hbase.sh b/dev-support/personality/hbase.sh old mode 100644 new mode 100755 diff --git a/dev-support/personality/pig.sh b/dev-support/personality/pig.sh old mode 100644 new mode 100755 diff --git a/dev-support/personality/tajo.sh b/dev-support/personality/tajo.sh old mode 100644 new mode 100755 diff --git a/dev-support/personality/tez.sh b/dev-support/personality/tez.sh old mode 100644 new mode 100755 diff --git a/dev-support/test-patch-docker/launch-test-patch.sh b/dev-support/test-patch-docker/launch-test-patch.sh old mode 100644 new mode 100755 diff --git a/dev-support/test-patch-docker/test-patch-docker.sh b/dev-support/test-patch-docker/test-patch-docker.sh old mode 100644 new mode 100755 diff --git a/dev-support/test-patch.d/apache-rat.sh b/dev-support/test-patch.d/apache-rat.sh old mode 100644 new mode 100755 diff --git a/dev-support/test-patch.d/builtin-personality.sh b/dev-support/test-patch.d/builtin-personality.sh old mode 100644 new mode 100755 diff --git a/dev-support/test-patch.d/findbugs.sh b/dev-support/test-patch.d/findbugs.sh old mode 100644 new mode 100755 diff --git a/dev-support/test-patch.d/github.sh b/dev-support/test-patch.d/github.sh old mode 100644 new mode 100755 diff --git a/dev-support/test-patch.d/jira.sh b/dev-support/test-patch.d/jira.sh old mode 100644 new mode 100755 diff --git a/dev-support/test-patch.d/xml.sh b/dev-support/test-patch.d/xml.sh old mode 100644 new mode 100755 From 8e657fba2fd33f7550597ea9c4c6e9a87aa1ef1c Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Sat, 27 Jun 2015 08:59:50 -0700 Subject: [PATCH 005/130] HADOOP-11807. add a lint mode to releasedocmaker (ramtin via aw) --- dev-support/releasedocmaker.py | 76 ++++++++++++++++--- .../hadoop-common/CHANGES.txt | 2 + 2 files changed, 68 insertions(+), 10 deletions(-) diff --git a/dev-support/releasedocmaker.py b/dev-support/releasedocmaker.py index 2ccc1c0b84a47..8e68b3cb9e02b 100755 --- a/dev-support/releasedocmaker.py +++ b/dev-support/releasedocmaker.py @@ -87,8 +87,15 @@ def notableclean(str): str=str.rstrip() return str +# clean output dir +def cleanOutputDir(dir): + files = os.listdir(dir) + for name in files: + os.remove(os.path.join(dir,name)) + os.rmdir(dir) + def mstr(obj): - if (obj == None): + if (obj is None): return "" return unicode(obj) @@ -148,7 +155,7 @@ def getDescription(self): return mstr(self.fields['description']) def getReleaseNote(self): - if (self.notes == None): + if (self.notes is None): field = self.parent.fieldIdMap['Release Note'] if (self.fields.has_key(field)): self.notes=mstr(self.fields[field]) @@ -159,14 +166,14 @@ def getReleaseNote(self): def getPriority(self): ret = "" pri = self.fields['priority'] - if(pri != None): + if(pri is not None): ret = pri['name'] return mstr(ret) def getAssignee(self): ret = "" mid = self.fields['assignee'] - if(mid != None): + if(mid is not None): ret = mid['displayName'] return mstr(ret) @@ -182,21 +189,21 @@ def getSummary(self): def getType(self): ret = "" mid = self.fields['issuetype'] - if(mid != None): + if(mid is not None): ret = mid['name'] return mstr(ret) def getReporter(self): ret = "" mid = self.fields['reporter'] - if(mid != None): + if(mid is not None): ret = mid['displayName'] return mstr(ret) def getProject(self): ret = "" mid = self.fields['project'] - if(mid != None): + if(mid is not None): ret = mid['key'] return mstr(ret) @@ -214,7 +221,7 @@ def __cmp__(self,other): return False def getIncompatibleChange(self): - if (self.incompat == None): + if (self.incompat is None): field = self.parent.fieldIdMap['Hadoop Flags'] self.reviewed=False self.incompat=False @@ -227,6 +234,24 @@ def getIncompatibleChange(self): self.reviewed=True return self.incompat + def checkMissingComponent(self): + if (len(self.fields['components'])>0): + return False + return True + + def checkMissingAssignee(self): + if (self.fields['assignee'] is not None): + return False + return True + + def checkVersionString(self): + field = self.parent.fieldIdMap['Fix Version/s'] + for h in self.fields[field]: + found = re.match('^((\d+)(\.\d+)*).*$|^(\w+\-\d+)$', h['name']) + if not found: + return True + return False + def getReleaseDate(self,version): for j in range(len(self.fields['fixVersions'])): if self.fields['fixVersions'][j]==version: @@ -339,9 +364,11 @@ def main(): help="build an index file") parser.add_option("-u","--usetoday", dest="usetoday", action="store_true", help="use current date for unreleased versions") + parser.add_option("-n","--lint", dest="lint", action="store_true", + help="use lint flag to exit on failures") (options, args) = parser.parse_args() - if (options.versions == None): + if (options.versions is None): options.versions = [] if (len(args) > 2): @@ -396,6 +423,9 @@ def main(): reloutputs.writeAll(relhead) choutputs.writeAll(chhead) + errorCount=0 + warningCount=0 + lintMessage="" incompatlist=[] buglist=[] improvementlist=[] @@ -408,6 +438,14 @@ def main(): for jira in sorted(jlist): if jira.getIncompatibleChange(): incompatlist.append(jira) + if (len(jira.getReleaseNote())==0): + warningCount+=1 + + if jira.checkVersionString(): + warningCount+=1 + + if jira.checkMissingComponent() or jira.checkMissingAssignee(): + errorCount+=1 elif jira.getType() == "Bug": buglist.append(jira) elif jira.getType() == "Improvement": @@ -431,15 +469,33 @@ def main(): reloutputs.writeKeyRaw(jira.getProject(),"\n---\n\n") reloutputs.writeKeyRaw(jira.getProject(), line) line ='\n**WARNING: No release note provided for this incompatible change.**\n\n' - print 'WARNING: incompatible change %s lacks release notes.' % (notableclean(jira.getId())) + lintMessage += "\nWARNING: incompatible change %s lacks release notes." % (notableclean(jira.getId())) reloutputs.writeKeyRaw(jira.getProject(), line) + if jira.checkVersionString(): + lintMessage += "\nWARNING: Version string problem for %s " % jira.getId() + + if (jira.checkMissingComponent() or jira.checkMissingAssignee()): + errorMessage=[] + jira.checkMissingComponent() and errorMessage.append("component") + jira.checkMissingAssignee() and errorMessage.append("assignee") + lintMessage += "\nERROR: missing %s for %s " % (" and ".join(errorMessage) , jira.getId()) + if (len(jira.getReleaseNote())>0): reloutputs.writeKeyRaw(jira.getProject(),"\n---\n\n") reloutputs.writeKeyRaw(jira.getProject(), line) line ='\n%s\n\n' % (tableclean(jira.getReleaseNote())) reloutputs.writeKeyRaw(jira.getProject(), line) + if (options.lint is True): + print lintMessage + print "=======================================" + print "Error:%d, Warning:%d \n" % (errorCount, warningCount) + + if (errorCount>0): + cleanOutputDir(version) + sys.exit(1) + reloutputs.writeAll("\n\n") reloutputs.close() diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index e1d9ca9ccb2ca..aa7806f91c50e 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -227,6 +227,8 @@ Trunk (Unreleased) HADOOP-11142. Remove hdfs dfs reference from file system shell documentation (Kengo Seki via aw) + HADOOP-11807. add a lint mode to releasedocmaker (ramtin via aw) + BUG FIXES HADOOP-11473. test-patch says "-1 overall" even when all checks are +1 From 2f801d6415c1a6cadce8bc9f7c2101f2e79b4eeb Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Mon, 29 Jun 2015 11:27:09 -0700 Subject: [PATCH 006/130] HADOOP-12134. Pig personality always fails at precheck_javac and check_patch_javac (Kengo Seki via aw) --- dev-support/personality/pig.sh | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/dev-support/personality/pig.sh b/dev-support/personality/pig.sh index 69dabf1f5b897..d01a410f30f2f 100755 --- a/dev-support/personality/pig.sh +++ b/dev-support/personality/pig.sh @@ -37,21 +37,21 @@ function personality_modules case ${testtype} in findbugs) - # shellcheck disable=SC2034 + # shellcheck disable=SC2034 ANT_FINDBUGSXML="${BASEDIR}/build/test/findbugs/pig-findbugs-report.xml" extra="-Dfindbugs.home=${FINDBUGS_HOME}" ;; javac) - extra="${extra} -Djavac.args=-Xlint -Dcompile.c++=yes clean tar" + extra="${extra} -Djavac.args=-Xlint -Dcompile.c++=yes clean piggybank" ;; javadoc) extra="${extra} -Dforrest.home=${FORREST_HOME}" ;; unit) - extra="${extra} -Dtest.junit.output.format=xml -Dcompile.c++=yes -Dtest.output=yes test-core" + extra="${extra} -Dtest.junit.output.format=xml -Dcompile.c++=yes -Dtest.output=yes test-core" ;; esac - # shellcheck disable=SC2086 + # shellcheck disable=SC2086 personality_enqueue_module . ${extra} } From 6225dccb8a6863f2603f91f06f18538409e4b4f1 Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Mon, 29 Jun 2015 11:49:02 -0700 Subject: [PATCH 007/130] Removing ./hadoop-common-project/hadoop-common/CHANGES-HADOOP-12111.txt --- hadoop-common-project/hadoop-common/CHANGES-HADOOP-12111.txt | 3 --- 1 file changed, 3 deletions(-) delete mode 100644 hadoop-common-project/hadoop-common/CHANGES-HADOOP-12111.txt diff --git a/hadoop-common-project/hadoop-common/CHANGES-HADOOP-12111.txt b/hadoop-common-project/hadoop-common/CHANGES-HADOOP-12111.txt deleted file mode 100644 index cb2f8887d4774..0000000000000 --- a/hadoop-common-project/hadoop-common/CHANGES-HADOOP-12111.txt +++ /dev/null @@ -1,3 +0,0 @@ - Breakdown of HADOOP-12111 sub-tasks: - - HADOOP-12113. update test-patch branch to latest code (aw) From c5815a6ac8a9d82cc9cd42c7f4d330a0f526821a Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Mon, 29 Jun 2015 11:50:50 -0700 Subject: [PATCH 008/130] HADOOP-12142. Test code modification is not detected if test directory is at the top level of the project --- dev-support/test-patch.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-support/test-patch.sh b/dev-support/test-patch.sh index e1dadd282c833..b958abfb58758 100755 --- a/dev-support/test-patch.sh +++ b/dev-support/test-patch.sh @@ -2339,7 +2339,7 @@ function check_modified_unittests start_clock for i in ${CHANGED_FILES}; do - if [[ ${i} =~ /test/ ]]; then + if [[ ${i} =~ (^|/)test/ ]]; then ((testReferences=testReferences + 1)) fi done From 1b80406285453c52bf849bc2ec6bb90a7ddd4815 Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Tue, 30 Jun 2015 11:11:06 -0700 Subject: [PATCH 009/130] HADOOP-12147. bundled dockerfile should use the JDK version of openjdk, not JRE (aw) --- dev-support/test-patch-docker/Dockerfile-startstub | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/dev-support/test-patch-docker/Dockerfile-startstub b/dev-support/test-patch-docker/Dockerfile-startstub index 1afd435103de0..e534f14bc1839 100644 --- a/dev-support/test-patch-docker/Dockerfile-startstub +++ b/dev-support/test-patch-docker/Dockerfile-startstub @@ -32,7 +32,8 @@ RUN apt-get update && apt-get install --no-install-recommends -y \ libjansson-dev \ fuse libfuse-dev \ libcurl4-openssl-dev \ - python python2.7 + python python2.7 \ + openjdk-7-jdk # Fixing the Apache commons / Maven dependency problem under Ubuntu: # See http://wiki.apache.org/commons/VfsProblems From b4c7aa1126094d48be4e57da710f18b13c5c3fc1 Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Tue, 30 Jun 2015 13:07:30 -0700 Subject: [PATCH 010/130] HADOOP-11914. test-patch.sh confused by certain patch formats (Kengo Seki via aw) --- dev-support/test-patch.sh | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) diff --git a/dev-support/test-patch.sh b/dev-support/test-patch.sh index b958abfb58758..f93cb4a1a431e 100755 --- a/dev-support/test-patch.sh +++ b/dev-support/test-patch.sh @@ -1060,16 +1060,11 @@ function find_changed_files { # get a list of all of the files that have been changed, # except for /dev/null (which would be present for new files). - # Additionally, remove any a/ b/ patterns at the front - # of the patch filenames and any revision info at the end + # Additionally, remove any a/ b/ patterns at the front of the patch filenames. # shellcheck disable=SC2016 - CHANGED_FILES=$(${GREP} -E '^(\+\+\+|---) ' "${PATCH_DIR}/patch" \ - | ${SED} \ - -e 's,^....,,' \ - -e 's,^[ab]/,,' \ - | ${GREP} -v /dev/null \ - | ${AWK} '{print $1}' \ - | sort -u) + CHANGED_FILES=$(${AWK} 'function p(s){sub("^[ab]/","",s); if(s!~"^/dev/null"){print s}} + /^diff --git / { p($3); p($4) } + /^(\+\+\+|---) / { p($2) }' "${PATCH_DIR}/patch" | sort -u) } ## @description Find the modules of the build that ${PATCH_DIR}/patch modifies @@ -1453,7 +1448,7 @@ function determine_needed_tests local i for i in ${CHANGED_FILES}; do - + yetus_debug "Determining needed tests for ${i}" personality_file_tests "${i}" for plugin in ${PLUGINS}; do From 76ce1ce73b2df077a64db8f848443094460ae534 Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Mon, 6 Jul 2015 15:47:32 -0700 Subject: [PATCH 011/130] HADOOP-12127. some personalities are still using releaseaudit instead of asflicense (aw) --- dev-support/personality/flink.sh | 2 +- dev-support/personality/hadoop.sh | 2 +- dev-support/personality/hbase.sh | 2 +- dev-support/personality/tajo.sh | 2 +- dev-support/personality/tez.sh | 2 +- dev-support/test-patch.d/{apache-rat.sh => asflicense.sh} | 0 6 files changed, 5 insertions(+), 5 deletions(-) rename dev-support/test-patch.d/{apache-rat.sh => asflicense.sh} (100%) diff --git a/dev-support/personality/flink.sh b/dev-support/personality/flink.sh index a32e2d643b432..de2a0f1b0b5df 100755 --- a/dev-support/personality/flink.sh +++ b/dev-support/personality/flink.sh @@ -113,7 +113,7 @@ function personality_modules fi return ;; - releaseaudit) + asflicense) # this is very fast and provides the full path if we do it from # the root of the source personality_enqueue_module . diff --git a/dev-support/personality/hadoop.sh b/dev-support/personality/hadoop.sh index 059d051640284..7722afb1bf349 100755 --- a/dev-support/personality/hadoop.sh +++ b/dev-support/personality/hadoop.sh @@ -183,7 +183,7 @@ function personality_modules return fi ;; - releaseaudit) + asflicense) # this is very fast and provides the full path if we do it from # the root of the source personality_enqueue_module . diff --git a/dev-support/personality/hbase.sh b/dev-support/personality/hbase.sh index 46ad3902bc14c..d8ca9010af5b9 100755 --- a/dev-support/personality/hbase.sh +++ b/dev-support/personality/hbase.sh @@ -50,7 +50,7 @@ function personality_modules fi return ;; - releaseaudit) + asflicense) # this is very fast and provides the full path if we do it from # the root of the source personality_enqueue_module . -DHBasePatchProcess diff --git a/dev-support/personality/tajo.sh b/dev-support/personality/tajo.sh index 719badae7e47a..56e544243e3f1 100755 --- a/dev-support/personality/tajo.sh +++ b/dev-support/personality/tajo.sh @@ -40,7 +40,7 @@ function personality_modules fi return ;; - releaseaudit) + asflicense) # this is very fast and provides the full path if we do it from # the root of the source personality_enqueue_module . diff --git a/dev-support/personality/tez.sh b/dev-support/personality/tez.sh index 77ad624dfeb9f..1d6a2278d4a18 100755 --- a/dev-support/personality/tez.sh +++ b/dev-support/personality/tez.sh @@ -40,7 +40,7 @@ function personality_modules fi return ;; - releaseaudit) + asflicense) # this is very fast and provides the full path if we do it from # the root of the source personality_enqueue_module . diff --git a/dev-support/test-patch.d/apache-rat.sh b/dev-support/test-patch.d/asflicense.sh similarity index 100% rename from dev-support/test-patch.d/apache-rat.sh rename to dev-support/test-patch.d/asflicense.sh From 3fee9f8d18dd60d83da674b3cfbefe666915fad8 Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Mon, 6 Jul 2015 15:49:03 -0700 Subject: [PATCH 012/130] HADOOP-12135. cleanup releasedocmaker --- dev-support/releasedocmaker.py | 384 ++++++++++++++++++--------------- 1 file changed, 207 insertions(+), 177 deletions(-) diff --git a/dev-support/releasedocmaker.py b/dev-support/releasedocmaker.py index 8e68b3cb9e02b..6e0126073c0ec 100755 --- a/dev-support/releasedocmaker.py +++ b/dev-support/releasedocmaker.py @@ -19,6 +19,7 @@ from glob import glob from optparse import OptionParser from time import gmtime, strftime +import pprint import os import re import sys @@ -99,23 +100,44 @@ def mstr(obj): return "" return unicode(obj) -def buildindex(master): +def buildindex(title,license): versions=reversed(sorted(glob("[0-9]*.[0-9]*.[0-9]*"))) with open("index.md","w") as indexfile: + if license is True: + indexfile.write(asflicense) for v in versions: - indexfile.write("* Apache Hadoop v%s\n" % (v)) + indexfile.write("* %s v%s\n" % (title,v)) for k in ("Changes","Release Notes"): - indexfile.write(" * %s\n" %(k)) - indexfile.write(" * [Combined %s](%s/%s.%s.html)\n" \ + indexfile.write(" * %s (%s/%s.%s.html)\n" \ % (k,v,k.upper().replace(" ",""),v)) - if not master: - indexfile.write(" * [Hadoop Common %s](%s/%s.HADOOP.%s.html)\n" \ - % (k,v,k.upper().replace(" ",""),v)) - for p in ("HDFS","MapReduce","YARN"): - indexfile.write(" * [%s %s](%s/%s.%s.%s.html)\n" \ - % (p,k,v,k.upper().replace(" ",""),p.upper(),v)) indexfile.close() +class GetVersions: + """ yo """ + def __init__(self,versions, projects): + versions = versions + projects = projects + self.newversions = [] + pp = pprint.PrettyPrinter(indent=4) + at=0 + end=1 + count=100 + versions.sort() + print "Looking for %s through %s"%(versions[0],versions[-1]) + for p in projects: + resp = urllib.urlopen("https://issues.apache.org/jira/rest/api/2/project/%s/versions"%p) + data = json.loads(resp.read()) + for d in data: + if d['name'][0].isdigit and versions[0] <= d['name'] and d['name'] <= versions[-1]: + print "Adding %s to the list" % d['name'] + self.newversions.append(d['name']) + newlist=list(set(self.newversions)) + self.newversions=newlist + + def getlist(self): + pp = pprint.PrettyPrinter(indent=4) + return(self.newversions) + class Version: """Represents a version number""" def __init__(self, data): @@ -261,8 +283,10 @@ def getReleaseDate(self,version): class JiraIter: """An Iterator of JIRAs""" - def __init__(self, versions): - self.versions = versions + def __init__(self, version, projects): + self.version = version + self.projects = projects + v=str(version).replace("-SNAPSHOT","") resp = urllib.urlopen("https://issues.apache.org/jira/rest/api/2/field") data = json.loads(resp.read()) @@ -276,7 +300,7 @@ def __init__(self, versions): end=1 count=100 while (at < end): - params = urllib.urlencode({'jql': "project in (HADOOP,HDFS,MAPREDUCE,YARN) and fixVersion in ('"+"' , '".join([str(v).replace("-SNAPSHOT","") for v in versions])+"') and resolution = Fixed", 'startAt':at, 'maxResults':count}) + params = urllib.urlencode({'jql': "project in ('"+"' , '".join(projects)+"') and fixVersion in ('"+v+"') and resolution = Fixed", 'startAt':at, 'maxResults':count}) resp = urllib.urlopen("https://issues.apache.org/jira/rest/api/2/search?%s"%params) data = json.loads(resp.read()) if (data.has_key('errorMessages')): @@ -286,10 +310,8 @@ def __init__(self, versions): self.jiras.extend(data['issues']) needaversion=False - for j in versions: - v=str(j).replace("-SNAPSHOT","") - if v not in releaseVersion: - needaversion=True + if v not in releaseVersion: + needaversion=True if needaversion is True: for i in range(len(data['issues'])): @@ -351,21 +373,29 @@ def writeList(self, mylist): self.writeKeyRaw(jira.getProject(), line) def main(): - parser = OptionParser(usage="usage: %prog --version VERSION [--version VERSION2 ...]", + parser = OptionParser(usage="usage: %prog --project PROJECT [--project PROJECT] --version VERSION [--version VERSION2 ...]", epilog= "Markdown-formatted CHANGES and RELEASENOTES files will be stored in a directory" " named after the highest version provided.") - parser.add_option("-v", "--version", dest="versions", - action="append", type="string", - help="versions in JIRA to include in releasenotes", metavar="VERSION") - parser.add_option("-m","--master", dest="master", action="store_true", - help="only create the master, merged project files") parser.add_option("-i","--index", dest="index", action="store_true", - help="build an index file") - parser.add_option("-u","--usetoday", dest="usetoday", action="store_true", - help="use current date for unreleased versions") + default=False, help="build an index file") + parser.add_option("-l","--license", dest="license", action="store_false", + default=True, help="Add an ASF license") parser.add_option("-n","--lint", dest="lint", action="store_true", help="use lint flag to exit on failures") + parser.add_option("-p", "--project", dest="projects", + action="append", type="string", + help="projects in JIRA to include in releasenotes", metavar="PROJECT") + parser.add_option("-r", "--range", dest="range", action="store_true", + default=False, help="Given versions are a range") + parser.add_option("-t", "--projecttitle", dest="title", + type="string", + help="Title to use for the project (default is Apache PROJECT)") + parser.add_option("-u","--usetoday", dest="usetoday", action="store_true", + default=False, help="use current date for unreleased versions") + parser.add_option("-v", "--version", dest="versions", + action="append", type="string", + help="versions in JIRA to include in releasenotes", metavar="VERSION") (options, args) = parser.parse_args() if (options.versions is None): @@ -377,169 +407,169 @@ def main(): if (len(options.versions) <= 0): parser.error("At least one version needs to be supplied") - versions = [ Version(v) for v in options.versions ]; + projects = options.projects + + if (options.range is True): + versions = [ Version(v) for v in GetVersions(options.versions, projects).getlist() ] + else: + versions = [ Version(v) for v in options.versions ] versions.sort(); - maxVersion = str(versions[-1]) + if (options.title is None): + title=projects[0] + else: + title=options.title - jlist = JiraIter(versions) - version = maxVersion + for v in versions: + vstr=str(v) + jlist = JiraIter(vstr,projects) - if version in releaseVersion: - reldate=releaseVersion[version] - elif options.usetoday: - reldate=strftime("%Y-%m-%d", gmtime()) - else: - reldate="Unreleased" + if vstr in releaseVersion: + reldate=releaseVersion[vstr] + elif options.usetoday: + reldate=strftime("%Y-%m-%d", gmtime()) + else: + reldate="Unreleased" - if not os.path.exists(version): - os.mkdir(version) + if not os.path.exists(vstr): + os.mkdir(vstr) - if options.master: reloutputs = Outputs("%(ver)s/RELEASENOTES.%(ver)s.md", "%(ver)s/RELEASENOTES.%(key)s.%(ver)s.md", - [], {"ver":maxVersion, "date":reldate}) + [], {"ver":v, "date":reldate, "title":title}) choutputs = Outputs("%(ver)s/CHANGES.%(ver)s.md", "%(ver)s/CHANGES.%(key)s.%(ver)s.md", - [], {"ver":maxVersion, "date":reldate}) - else: - reloutputs = Outputs("%(ver)s/RELEASENOTES.%(ver)s.md", - "%(ver)s/RELEASENOTES.%(key)s.%(ver)s.md", - ["HADOOP","HDFS","MAPREDUCE","YARN"], {"ver":maxVersion, "date":reldate}) - choutputs = Outputs("%(ver)s/CHANGES.%(ver)s.md", - "%(ver)s/CHANGES.%(key)s.%(ver)s.md", - ["HADOOP","HDFS","MAPREDUCE","YARN"], {"ver":maxVersion, "date":reldate}) - - reloutputs.writeAll(asflicense) - choutputs.writeAll(asflicense) - - relhead = '# Hadoop %(key)s %(ver)s Release Notes\n\n' \ - 'These release notes cover new developer and user-facing incompatibilities, features, and major improvements.\n\n' - - chhead = '# Hadoop Changelog\n\n' \ - '## Release %(ver)s - %(date)s\n'\ - '\n' - - reloutputs.writeAll(relhead) - choutputs.writeAll(chhead) - - errorCount=0 - warningCount=0 - lintMessage="" - incompatlist=[] - buglist=[] - improvementlist=[] - newfeaturelist=[] - subtasklist=[] - tasklist=[] - testlist=[] - otherlist=[] - - for jira in sorted(jlist): - if jira.getIncompatibleChange(): - incompatlist.append(jira) - if (len(jira.getReleaseNote())==0): - warningCount+=1 - - if jira.checkVersionString(): - warningCount+=1 - - if jira.checkMissingComponent() or jira.checkMissingAssignee(): - errorCount+=1 - elif jira.getType() == "Bug": - buglist.append(jira) - elif jira.getType() == "Improvement": - improvementlist.append(jira) - elif jira.getType() == "New Feature": - newfeaturelist.append(jira) - elif jira.getType() == "Sub-task": - subtasklist.append(jira) - elif jira.getType() == "Task": - tasklist.append(jira) - elif jira.getType() == "Test": - testlist.append(jira) - else: - otherlist.append(jira) - - line = '* [%s](https://issues.apache.org/jira/browse/%s) | *%s* | **%s**\n' \ - % (notableclean(jira.getId()), notableclean(jira.getId()), notableclean(jira.getPriority()), - notableclean(jira.getSummary())) - - if (jira.getIncompatibleChange()) and (len(jira.getReleaseNote())==0): - reloutputs.writeKeyRaw(jira.getProject(),"\n---\n\n") - reloutputs.writeKeyRaw(jira.getProject(), line) - line ='\n**WARNING: No release note provided for this incompatible change.**\n\n' - lintMessage += "\nWARNING: incompatible change %s lacks release notes." % (notableclean(jira.getId())) - reloutputs.writeKeyRaw(jira.getProject(), line) - - if jira.checkVersionString(): - lintMessage += "\nWARNING: Version string problem for %s " % jira.getId() - - if (jira.checkMissingComponent() or jira.checkMissingAssignee()): - errorMessage=[] - jira.checkMissingComponent() and errorMessage.append("component") - jira.checkMissingAssignee() and errorMessage.append("assignee") - lintMessage += "\nERROR: missing %s for %s " % (" and ".join(errorMessage) , jira.getId()) - - if (len(jira.getReleaseNote())>0): - reloutputs.writeKeyRaw(jira.getProject(),"\n---\n\n") - reloutputs.writeKeyRaw(jira.getProject(), line) - line ='\n%s\n\n' % (tableclean(jira.getReleaseNote())) - reloutputs.writeKeyRaw(jira.getProject(), line) - - if (options.lint is True): - print lintMessage - print "=======================================" - print "Error:%d, Warning:%d \n" % (errorCount, warningCount) - - if (errorCount>0): - cleanOutputDir(version) - sys.exit(1) - - reloutputs.writeAll("\n\n") - reloutputs.close() - - choutputs.writeAll("### INCOMPATIBLE CHANGES:\n\n") - choutputs.writeAll("| JIRA | Summary | Priority | Component | Reporter | Contributor |\n") - choutputs.writeAll("|:---- |:---- | :--- |:---- |:---- |:---- |\n") - choutputs.writeList(incompatlist) - - choutputs.writeAll("\n\n### NEW FEATURES:\n\n") - choutputs.writeAll("| JIRA | Summary | Priority | Component | Reporter | Contributor |\n") - choutputs.writeAll("|:---- |:---- | :--- |:---- |:---- |:---- |\n") - choutputs.writeList(newfeaturelist) - - choutputs.writeAll("\n\n### IMPROVEMENTS:\n\n") - choutputs.writeAll("| JIRA | Summary | Priority | Component | Reporter | Contributor |\n") - choutputs.writeAll("|:---- |:---- | :--- |:---- |:---- |:---- |\n") - choutputs.writeList(improvementlist) - - choutputs.writeAll("\n\n### BUG FIXES:\n\n") - choutputs.writeAll("| JIRA | Summary | Priority | Component | Reporter | Contributor |\n") - choutputs.writeAll("|:---- |:---- | :--- |:---- |:---- |:---- |\n") - choutputs.writeList(buglist) - - choutputs.writeAll("\n\n### TESTS:\n\n") - choutputs.writeAll("| JIRA | Summary | Priority | Component | Reporter | Contributor |\n") - choutputs.writeAll("|:---- |:---- | :--- |:---- |:---- |:---- |\n") - choutputs.writeList(testlist) - - choutputs.writeAll("\n\n### SUB-TASKS:\n\n") - choutputs.writeAll("| JIRA | Summary | Priority | Component | Reporter | Contributor |\n") - choutputs.writeAll("|:---- |:---- | :--- |:---- |:---- |:---- |\n") - choutputs.writeList(subtasklist) - - choutputs.writeAll("\n\n### OTHER:\n\n") - choutputs.writeAll("| JIRA | Summary | Priority | Component | Reporter | Contributor |\n") - choutputs.writeAll("|:---- |:---- | :--- |:---- |:---- |:---- |\n") - choutputs.writeList(otherlist) - choutputs.writeList(tasklist) - - choutputs.writeAll("\n\n") - choutputs.close() + [], {"ver":v, "date":reldate, "title":title}) + + if (options.license is True): + reloutputs.writeAll(asflicense) + choutputs.writeAll(asflicense) + + relhead = '# %(title)s %(key)s %(ver)s Release Notes\n\n' \ + 'These release notes cover new developer and user-facing incompatibilities, features, and major improvements.\n\n' + chhead = '# %(title)s Changelog\n\n' \ + '## Release %(ver)s - %(date)s\n'\ + '\n' + + reloutputs.writeAll(relhead) + choutputs.writeAll(chhead) + errorCount=0 + warningCount=0 + lintMessage="" + incompatlist=[] + buglist=[] + improvementlist=[] + newfeaturelist=[] + subtasklist=[] + tasklist=[] + testlist=[] + otherlist=[] + + for jira in sorted(jlist): + if jira.getIncompatibleChange(): + incompatlist.append(jira) + if (len(jira.getReleaseNote())==0): + warningCount+=1 + + if jira.checkVersionString(): + warningCount+=1 + + if jira.checkMissingComponent() or jira.checkMissingAssignee(): + errorCount+=1 + elif jira.getType() == "Bug": + buglist.append(jira) + elif jira.getType() == "Improvement": + improvementlist.append(jira) + elif jira.getType() == "New Feature": + newfeaturelist.append(jira) + elif jira.getType() == "Sub-task": + subtasklist.append(jira) + elif jira.getType() == "Task": + tasklist.append(jira) + elif jira.getType() == "Test": + testlist.append(jira) + else: + otherlist.append(jira) + + line = '* [%s](https://issues.apache.org/jira/browse/%s) | *%s* | **%s**\n' \ + % (notableclean(jira.getId()), notableclean(jira.getId()), notableclean(jira.getPriority()), + notableclean(jira.getSummary())) + + if (jira.getIncompatibleChange()) and (len(jira.getReleaseNote())==0): + reloutputs.writeKeyRaw(jira.getProject(),"\n---\n\n") + reloutputs.writeKeyRaw(jira.getProject(), line) + line ='\n**WARNING: No release note provided for this incompatible change.**\n\n' + lintMessage += "\nWARNING: incompatible change %s lacks release notes." % (notableclean(jira.getId())) + reloutputs.writeKeyRaw(jira.getProject(), line) + + if jira.checkVersionString(): + lintMessage += "\nWARNING: Version string problem for %s " % jira.getId() + + if (jira.checkMissingComponent() or jira.checkMissingAssignee()): + errorMessage=[] + jira.checkMissingComponent() and errorMessage.append("component") + jira.checkMissingAssignee() and errorMessage.append("assignee") + lintMessage += "\nERROR: missing %s for %s " % (" and ".join(errorMessage) , jira.getId()) + + if (len(jira.getReleaseNote())>0): + reloutputs.writeKeyRaw(jira.getProject(),"\n---\n\n") + reloutputs.writeKeyRaw(jira.getProject(), line) + line ='\n%s\n\n' % (tableclean(jira.getReleaseNote())) + reloutputs.writeKeyRaw(jira.getProject(), line) + + if (options.lint is True): + print lintMessage + print "=======================================" + print "Error:%d, Warning:%d \n" % (errorCount, warningCount) + + if (errorCount>0): + cleanOutputDir(version) + sys.exit(1) + + reloutputs.writeAll("\n\n") + reloutputs.close() + + choutputs.writeAll("### INCOMPATIBLE CHANGES:\n\n") + choutputs.writeAll("| JIRA | Summary | Priority | Component | Reporter | Contributor |\n") + choutputs.writeAll("|:---- |:---- | :--- |:---- |:---- |:---- |\n") + choutputs.writeList(incompatlist) + + choutputs.writeAll("\n\n### NEW FEATURES:\n\n") + choutputs.writeAll("| JIRA | Summary | Priority | Component | Reporter | Contributor |\n") + choutputs.writeAll("|:---- |:---- | :--- |:---- |:---- |:---- |\n") + choutputs.writeList(newfeaturelist) + + choutputs.writeAll("\n\n### IMPROVEMENTS:\n\n") + choutputs.writeAll("| JIRA | Summary | Priority | Component | Reporter | Contributor |\n") + choutputs.writeAll("|:---- |:---- | :--- |:---- |:---- |:---- |\n") + choutputs.writeList(improvementlist) + + choutputs.writeAll("\n\n### BUG FIXES:\n\n") + choutputs.writeAll("| JIRA | Summary | Priority | Component | Reporter | Contributor |\n") + choutputs.writeAll("|:---- |:---- | :--- |:---- |:---- |:---- |\n") + choutputs.writeList(buglist) + + choutputs.writeAll("\n\n### TESTS:\n\n") + choutputs.writeAll("| JIRA | Summary | Priority | Component | Reporter | Contributor |\n") + choutputs.writeAll("|:---- |:---- | :--- |:---- |:---- |:---- |\n") + choutputs.writeList(testlist) + + choutputs.writeAll("\n\n### SUB-TASKS:\n\n") + choutputs.writeAll("| JIRA | Summary | Priority | Component | Reporter | Contributor |\n") + choutputs.writeAll("|:---- |:---- | :--- |:---- |:---- |:---- |\n") + choutputs.writeList(subtasklist) + + choutputs.writeAll("\n\n### OTHER:\n\n") + choutputs.writeAll("| JIRA | Summary | Priority | Component | Reporter | Contributor |\n") + choutputs.writeAll("|:---- |:---- | :--- |:---- |:---- |:---- |\n") + choutputs.writeList(otherlist) + choutputs.writeList(tasklist) + + choutputs.writeAll("\n\n") + choutputs.close() if options.index: - buildindex(options.master) + buildindex(title,options.license) if __name__ == "__main__": main() From dcde7e4a23ca0a5dcd4b01104d2d39365b557bac Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Mon, 6 Jul 2015 15:50:46 -0700 Subject: [PATCH 013/130] HADOOP-12156. modernize smart-apply-patch (aw) --- dev-support/smart-apply-patch.sh | 628 ++++++++++++++++++++++++------- dev-support/test-patch.sh | 2 +- 2 files changed, 492 insertions(+), 138 deletions(-) diff --git a/dev-support/smart-apply-patch.sh b/dev-support/smart-apply-patch.sh index ddfd940064fe0..bfd2aeb585f29 100755 --- a/dev-support/smart-apply-patch.sh +++ b/dev-support/smart-apply-patch.sh @@ -11,179 +11,533 @@ # See the License for the specific language governing permissions and # limitations under the License. -# -# Determine if the git diff patch file has prefixes. -# These files are generated via "git diff" *without* the --no-prefix option. -# -# We can apply these patches more easily because we know that the a/ and b/ -# prefixes in the "diff" lines stands for the project root directory. -# So we don't have to hunt for the project root. -# And of course, we know that the patch file was generated using git, so we -# know git apply can handle it properly. -# -# Arguments: git diff file name. -# Return: 0 if it is a git diff with prefix; 1 otherwise. -# -has_prefix() { - awk '/^diff --git / { if ($3 !~ "^a/" || $4 !~ "^b/") { exit 1 } } - /^\+{3}|-{3} / { if ($2 !~ "^[ab]/" && $2 !~ "^/dev/null") { exit 1 } }' "$1" - return $? -} +# Make sure that bash version meets the pre-requisite -PATCH_FILE=$1 -DRY_RUN=$2 -if [ -z "$PATCH_FILE" ]; then - echo usage: $0 patch-file +if [[ -z "${BASH_VERSINFO}" ]] \ + || [[ "${BASH_VERSINFO[0]}" -lt 3 ]] \ + || [[ "${BASH_VERSINFO[0]}" -eq 3 && "${BASH_VERSINFO[1]}" -lt 2 ]]; then + echo "bash v3.2+ is required. Sorry." exit 1 fi -TMPDIR=${TMPDIR:-/tmp} -PATCH=${PATCH:-patch} # allow overriding patch binary +RESULT=0 + +## @description Print a message to stderr +## @audience public +## @stability stable +## @replaceable no +## @param string +function yetus_error +{ + echo "$*" 1>&2 +} -# Cleanup handler for temporary files -TOCLEAN="" -cleanup() { - if [[ -n ${TOCLEAN} ]]; then - rm $TOCLEAN +## @description Print a message to stderr if --debug is turned on +## @audience public +## @stability stable +## @replaceable no +## @param string +function yetus_debug +{ + if [[ -n "${YETUS_SHELL_SCRIPT_DEBUG}" ]]; then + echo "[$(date) DEBUG]: $*" 1>&2 fi - exit $1 } -trap "cleanup 1" HUP INT QUIT TERM -# Allow passing "-" for stdin patches -if [ "$PATCH_FILE" == "-" ]; then - PATCH_FILE="$TMPDIR/smart-apply.in.$RANDOM" - cat /dev/fd/0 > $PATCH_FILE - TOCLEAN="$TOCLEAN $PATCH_FILE" -fi +## @description Clean the filesystem as appropriate and then exit +## @audience private +## @stability evolving +## @replaceable no +## @param runresult +function cleanup_and_exit +{ + local result=$1 -ISSUE_RE='^(HADOOP|YARN|MAPREDUCE|HDFS)-[0-9]+$' -if [[ ${PATCH_FILE} =~ ^http || ${PATCH_FILE} =~ ${ISSUE_RE} ]]; then - # Allow downloading of patches - PFILE="$TMPDIR/smart-apply.in.$RANDOM" - TOCLEAN="$TOCLEAN $PFILE" - if [[ ${PATCH_FILE} =~ ^http ]]; then - patchURL="${PATCH_FILE}" - else # Get URL of patch from JIRA - wget -q -O "${PFILE}" "http://issues.apache.org/jira/browse/${PATCH_FILE}" - if [[ $? != 0 ]]; then - echo "Unable to determine what ${PATCH_FILE} may reference." 1>&2 - cleanup 1 - elif [[ $(grep -c 'Patch Available' "${PFILE}") == 0 ]]; then - echo "${PATCH_FILE} is not \"Patch Available\". Exiting." 1>&2 - cleanup 1 - fi - relativePatchURL=$(grep -o '"/jira/secure/attachment/[0-9]*/[^"]*' "${PFILE}" | grep -v -e 'htm[l]*$' | sort | tail -1 | grep -o '/jira/secure/attachment/[0-9]*/[^"]*') - patchURL="http://issues.apache.org${relativePatchURL}" + if [[ ${PATCH_DIR} =~ ^/tmp/apply-patch + && -d ${PATCH_DIR} ]]; then + rm -rf "${PATCH_DIR}" + fi + + # shellcheck disable=SC2086 + exit ${result} +} + +## @description Setup the default global variables +## @audience public +## @stability stable +## @replaceable no +function setup_defaults +{ + PATCHURL="" + OSTYPE=$(uname -s) + + # Solaris needs POSIX, not SVID + case ${OSTYPE} in + SunOS) + AWK=${AWK:-/usr/xpg4/bin/awk} + SED=${SED:-/usr/xpg4/bin/sed} + WGET=${WGET:-wget} + GIT=${GIT:-git} + GREP=${GREP:-/usr/xpg4/bin/grep} + PATCH=${PATCH:-/usr/gnu/bin/patch} + DIFF=${DIFF:-/usr/gnu/bin/diff} + FILE=${FILE:-file} + ;; + *) + AWK=${AWK:-awk} + SED=${SED:-sed} + WGET=${WGET:-wget} + GIT=${GIT:-git} + GREP=${GREP:-grep} + PATCH=${PATCH:-patch} + DIFF=${DIFF:-diff} + FILE=${FILE:-file} + ;; + esac + + DRYRUNMODE=false + PATCH_DIR=/tmp + while [[ -e ${PATCH_DIR} ]]; do + PATCH_DIR=/tmp/apply-patch-${RANDOM}.${RANDOM} + done + PATCHMODES=("git" "patch") + PATCHMODE="" + PATCHPREFIX=0 +} + +## @description Print the usage information +## @audience public +## @stability stable +## @replaceable no +function yetus_usage +{ + echo "Usage: apply-patch.sh [options] patch-file | issue-number | http" + echo + echo "--debug If set, then output some extra stuff to stderr" + echo "--dry-run Check for patch viability without applying" + echo "--patch-dir= The directory for working and output files (default '/tmp/apply-patch-(random))" + echo + echo "Shell binary overrides:" + echo "--file-cmd= The 'file' command to use (default 'file')" + echo "--grep-cmd= The 'grep' command to use (default 'grep')" + echo "--git-cmd= The 'git' command to use (default 'git')" + echo "--patch-cmd= The GNU-compatible 'patch' command to use (default 'patch')" + echo "--wget-cmd= The 'wget' command to use (default 'wget')" +} + +## @description Interpret the command line parameters +## @audience private +## @stability stable +## @replaceable no +## @params $@ +## @return May exit on failure +function parse_args +{ + local i + + for i in "$@"; do + case ${i} in + --debug) + YETUS_SHELL_SCRIPT_DEBUG=true + ;; + --dry-run) + DRYRUNMODE=true + ;; + --file-cmd=*) + FILE=${i#*=} + ;; + --git-cmd=*) + GIT=${i#*=} + ;; + --grep-cmd=*) + GREP=${i#*=} + ;; + --help|-help|-h|help|--h|--\?|-\?|\?) + yetus_usage + exit 0 + ;; + --patch-cmd=*) + PATCH=${i#*=} + ;; + --patch-dir=*) + PATCH_DIR=${i#*=} + ;; + --wget-cmd=*) + WGET=${i#*=} + ;; + --*) + ## PATCH_OR_ISSUE can't be a --. So this is probably + ## a plugin thing. + continue + ;; + *) + PATCH_OR_ISSUE=${i#*=} + ;; + esac + done + + if [[ ! -d ${PATCH_DIR} ]]; then + mkdir -p "${PATCH_DIR}" + if [[ $? != 0 ]] ; then + yetus_error "ERROR: Unable to create ${PATCH_DIR}" + cleanup_and_exit 1 + fi fi - if [[ -n $DRY_RUN ]]; then - echo "Downloading ${patchURL}" +} + +## @description Given a possible patch file, guess if it's a patch file without using smart-apply-patch +## @audience private +## @stability evolving +## @param path to patch file to test +## @return 0 we think it's a patch file +## @return 1 we think it's not a patch file +function guess_patch_file +{ + local patch=$1 + local fileOutput + + yetus_debug "Trying to guess is ${patch} is a patch file." + fileOutput=$("${FILE}" "${patch}") + if [[ $fileOutput =~ \ diff\ ]]; then + yetus_debug "file magic says it's a diff." + return 0 fi - wget -q -O "${PFILE}" "${patchURL}" - if [[ $? != 0 ]]; then - echo "${PATCH_FILE} could not be downloaded." 1>&2 - cleanup 1 + fileOutput=$(head -n 1 "${patch}" | "${GREP}" -E "^(From [a-z0-9]* Mon Sep 17 00:00:00 2001)|(diff .*)|(Index: .*)$") + if [[ $? == 0 ]]; then + yetus_debug "first line looks like a patch file." + return 0 fi - PATCH_FILE="${PFILE}" -fi + return 1 +} + +## @description Given ${PATCH_ISSUE}, determine what type of patch file is in use, and do the +## @description necessary work to place it into ${PATCH_DIR}/patch. +## @audience private +## @stability evolving +## @replaceable no +## @return 0 on success +## @return 1 on failure, may exit +function locate_patch +{ + local notSureIfPatch=false + yetus_debug "locate patch" -# Case for git-diff patches -if grep -q "^diff --git" "${PATCH_FILE}"; then - GIT_FLAGS="--binary -v" - if has_prefix "$PATCH_FILE"; then - GIT_FLAGS="$GIT_FLAGS -p1" + # Allow passing "-" for stdin patches + if [[ ${PATCH_OR_ISSUE} == - ]]; then + PATCH_FILE="${PATCH_DIR}/patch" + cat /dev/fd/0 > "${PATCH_FILE}" + elif [[ -f ${PATCH_OR_ISSUE} ]]; then + PATCH_FILE="${PATCH_OR_ISSUE}" else - GIT_FLAGS="$GIT_FLAGS -p0" + if [[ ${PATCH_OR_ISSUE} =~ ^http ]]; then + echo "Patch is being downloaded at $(date) from" + PATCHURL="${PATCH_OR_ISSUE}" + else + ${WGET} -q -O "${PATCH_DIR}/jira" "http://issues.apache.org/jira/browse/${PATCH_OR_ISSUE}" + + case $? in + 0) + ;; + 2) + yetus_error "ERROR: .wgetrc/.netrc parsing error." + cleanup_and_exit 1 + ;; + 3) + yetus_error "ERROR: File IO error." + cleanup_and_exit 1 + ;; + 4) + yetus_error "ERROR: URL ${PATCH_OR_ISSUE} is unreachable." + cleanup_and_exit 1 + ;; + *) + yetus_error "ERROR: Unable to fetch ${PATCH_OR_ISSUE}." + cleanup_and_exit 1 + ;; + esac + + if [[ -z "${PATCH_FILE}" ]]; then + if [[ $(${GREP} -c 'Patch Available' "${PATCH_DIR}/jira") == 0 ]] ; then + if [[ ${JENKINS} == true ]]; then + yetus_error "ERROR: ${PATCH_OR_ISSUE} is not \"Patch Available\"." + cleanup_and_exit 1 + else + yetus_error "WARNING: ${PATCH_OR_ISSUE} is not \"Patch Available\"." + fi + fi + + relativePatchURL=$(${GREP} -o '"/jira/secure/attachment/[0-9]*/[^"]*' "${PATCH_DIR}/jira" | ${GREP} -v -e 'htm[l]*$' | sort | tail -1 | ${GREP} -o '/jira/secure/attachment/[0-9]*/[^"]*') + PATCHURL="http://issues.apache.org${relativePatchURL}" + if [[ ! ${PATCHURL} =~ \.patch$ ]]; then + notSureIfPatch=true + fi + echo "${ISSUE} patch is being downloaded at $(date) from" + fi + fi + if [[ -z "${PATCH_FILE}" ]]; then + ${WGET} -q -O "${PATCH_DIR}/patch" "${PATCHURL}" + if [[ $? != 0 ]];then + yetus_error "ERROR: ${PATCH_OR_ISSUE} could not be downloaded." + cleanup_and_exit 1 + fi + PATCH_FILE="${PATCH_DIR}/patch" + fi fi - if [[ -z $DRY_RUN ]]; then - GIT_FLAGS="$GIT_FLAGS --stat --apply" - echo Going to apply git patch with: git apply "${GIT_FLAGS}" - else - GIT_FLAGS="$GIT_FLAGS --check" + + if [[ ! -f "${PATCH_DIR}/patch" ]]; then + cp "${PATCH_FILE}" "${PATCH_DIR}/patch" + if [[ $? == 0 ]] ; then + echo "Patch file ${PATCH_FILE} copied to ${PATCH_DIR}" + else + yetus_error "ERROR: Could not copy ${PATCH_FILE} to ${PATCH_DIR}" + cleanup_and_exit 1 + fi fi - # shellcheck disable=SC2086 - git apply ${GIT_FLAGS} "${PATCH_FILE}" - if [[ $? == 0 ]]; then - cleanup 0 + + if [[ ! -f "${PATCH_DIR}/patch" ]]; then + cp "${PATCH_FILE}" "${PATCH_DIR}/patch" + if [[ $? == 0 ]] ; then + echo "Patch file ${PATCH_FILE} copied to ${PATCH_DIR}" + else + yetus_error "ERROR: Could not copy ${PATCH_FILE} to ${PATCH_DIR}" + cleanup_and_exit 1 + fi fi - echo "git apply failed. Going to apply the patch with: ${PATCH}" -fi -# Come up with a list of changed files into $TMP -TMP="$TMPDIR/smart-apply.paths.$RANDOM" -TOCLEAN="$TOCLEAN $TMP" + if [[ ${notSureIfPatch} == "true" ]]; then + guess_patch_file "${PATCH_FILE}" + if [[ $? != 0 ]]; then + yetus_error "ERROR: ${PATCHURL} is not a patch file." + cleanup_and_exit 1 + else + yetus_debug "The patch ${PATCHURL} was not named properly, but it looks like a patch file. proceeding, but issue/branch matching might go awry." + fi + fi +} + +## @description if patch-level zero, then verify we aren't +## @description just adding files +## @audience public +## @stability stable +## @param filename +## @param command +## @param [..] +## @replaceable no +## @returns $? +function verify_zero +{ + local logfile=$1 + shift + local dir + + # don't return /dev/null + # shellcheck disable=SC2016 + changed_files1=$(${AWK} 'function p(s){if(s!~"^/dev/null"){print s}} + /^diff --git / { p($3); p($4) } + /^(\+\+\+|---) / { p($2) }' "${PATCH_DIR}/patch" | sort -u) + + # maybe we interpreted the patch wrong? check the log file + # shellcheck disable=SC2016 + changed_files2=$(${GREP} -E '^[cC]heck' "${logfile}" \ + | ${AWK} '{print $3}' \ + | ${SED} -e 's,\.\.\.$,,g') -if $PATCH -p0 -E --dry-run < $PATCH_FILE 2>&1 > $TMP; then - PLEVEL=0 - #if the patch applied at P0 there is the possability that all we are doing - # is adding new files and they would apply anywhere. So try to guess the - # correct place to put those files. + for filename in ${changed_files1} ${changed_files2}; do - TMP2="$TMPDIR/smart-apply.paths.2.$RANDOM" - TOCLEAN="$TOCLEAN $TMP2" + # leading prefix = bad + if [[ ${filename} =~ ^(a|b)/ ]]; then + return 1 + fi + + # touching an existing file is proof enough + # that pl=0 is good + if [[ -f ${filename} ]]; then + return 0 + fi - egrep '^patching file |^checking file ' $TMP | awk '{print $3}' | grep -v /dev/null | sort -u > $TMP2 + dir=$(dirname ${filename} 2>/dev/null) + if [[ -n ${dir} && -d ${dir} ]]; then + return 0 + fi + done - if [ ! -s $TMP2 ]; then - echo "Error: Patch dryrun couldn't detect changes the patch would make. Exiting." - cleanup 1 + # ¯\_(ツ)_/¯ - no way for us to know, all new files with no prefix! + yetus_error "WARNING: Patch only adds files; using patch level ${PATCHPREFIX}" + return 0 +} + +## @description run the command, sending stdout and stderr to the given filename +## @audience public +## @stability stable +## @param filename +## @param command +## @param [..] +## @replaceable no +## @returns $? +function run_and_redirect +{ + local logfile=$1 + shift + + # to the log + echo "${*}" > "${logfile}" + # the actual command + "${@}" >> "${logfile}" 2>&1 +} + +## @description git patch dryrun +## @replaceable no +## @audience private +## @stability evolving +function git_dryrun +{ + local prefixsize=${1:-0} + + while [[ ${prefixsize} -lt 4 + && -z ${PATCHMODE} ]]; do + run_and_redirect "${PATCH_DIR}/apply-patch-git-dryrun.log" \ + "${GIT}" apply --binary -v --check "-p${prefixsize}" "${PATCH_FILE}" + if [[ $? == 0 ]]; then + PATCHPREFIX=${prefixsize} + PATCHMODE=git + echo "Verifying the patch:" + cat "${PATCH_DIR}/apply-patch-git-dryrun.log" + break + fi + ((prefixsize=prefixsize+1)) + done + + if [[ ${prefixsize} -eq 0 ]]; then + verify_zero "${PATCH_DIR}/apply-patch-git-dryrun.log" + if [[ $? != 0 ]]; then + PATCHMODE="" + PATCHPREFIX="" + git_dryrun 1 + fi fi +} - #first off check that all of the files do not exist - FOUND_ANY=0 - for CHECK_FILE in $(cat $TMP2) - do - if [[ -f $CHECK_FILE ]]; then - FOUND_ANY=1 +## @description patch patch dryrun +## @replaceable no +## @audience private +## @stability evolving +function patch_dryrun +{ + local prefixsize=${1:-0} + + while [[ ${prefixsize} -lt 4 + && -z ${PATCHMODE} ]]; do + run_and_redirect "${PATCH_DIR}/apply-patch-patch-dryrun.log" \ + "${PATCH}" "-p${prefixsize}" -E --dry-run < "${PATCH_FILE}" + if [[ $? == 0 ]]; then + PATCHPREFIX=${prefixsize} + PATCHMODE=patch + if [[ ${DRYRUNMODE} == true ]]; then + echo "Verifying the patch:" + cat "${PATCH_DIR}/apply-patch-patch-dryrun.log" + fi + break fi + ((prefixsize=prefixsize+1)) done - if [[ "$FOUND_ANY" = "0" ]]; then - #all of the files are new files so we have to guess where the correct place to put it is. + if [[ ${prefixsize} -eq 0 ]]; then + verify_zero "${PATCH_DIR}/apply-patch-patch-dryrun.log" + if [[ $? != 0 ]]; then + PATCHMODE="" + PATCHPREFIX="" + patch_dryrun 1 + fi + fi +} + +## @description driver for dryrun methods +## @replaceable no +## @audience private +## @stability evolving +function dryrun +{ + local method - # if all of the lines start with a/ or b/, then this is a git patch that - # was generated without --no-prefix - if ! grep -qv '^a/\|^b/' $TMP2 ; then - echo Looks like this is a git patch. Stripping a/ and b/ prefixes - echo and incrementing PLEVEL - PLEVEL=$[$PLEVEL + 1] - sed -i -e 's,^[ab]/,,' $TMP2 + for method in "${PATCHMODES[@]}"; do + if declare -f ${method}_dryrun >/dev/null; then + "${method}_dryrun" fi + if [[ -n ${PATCHMODE} ]]; then + break + fi + done - PREFIX_DIRS_AND_FILES=$(cut -d '/' -f 1 $TMP2 | sort -u) - - # if we are at the project root then nothing more to do - if [[ -d hadoop-common-project ]]; then - echo Looks like this is being run at project root + if [[ -n ${PATCHMODE} ]]; then + RESULT=0 + return 0 + fi + RESULT=1 + return 1 +} - # if all of the lines start with hadoop-common/, hadoop-hdfs/, hadoop-yarn/ or hadoop-mapreduce/, this is - # relative to the hadoop root instead of the subproject root, so we need - # to chop off another layer - elif [[ "$PREFIX_DIRS_AND_FILES" =~ ^(hadoop-common-project|hadoop-hdfs-project|hadoop-yarn-project|hadoop-mapreduce-project)$ ]]; then +## @description git patch apply +## @replaceable no +## @audience private +## @stability evolving +function git_apply +{ + echo "Applying the patch:" + run_and_redirect "${PATCH_DIR}/apply-patch-git-apply.log" \ + "${GIT}" apply --binary -v --stat --apply "-p${PATCHPREFIX}" "${PATCH_FILE}" + cat "${PATCH_DIR}/apply-patch-git-apply.log" +} - echo Looks like this is relative to project root. Increasing PLEVEL - PLEVEL=$[$PLEVEL + 1] - elif ! echo "$PREFIX_DIRS_AND_FILES" | grep -vxq 'hadoop-common-project\|hadoop-hdfs-project\|hadoop-yarn-project\|hadoop-mapreduce-project' ; then - echo Looks like this is a cross-subproject patch. Try applying from the project root - cleanup 1 +## @description patch patch apply +## @replaceable no +## @audience private +## @stability evolving +function patch_apply +{ + echo "Applying the patch:" + run_and_redirect "${PATCH_DIR}/apply-patch-patch-apply.log" \ + "${PATCH}" "-p${PATCHPREFIX}" -E < "${PATCH_FILE}" + cat "${PATCH_DIR}/apply-patch-patch-apply.log" +} + + +## @description driver for patch apply methods +## @replaceable no +## @audience private +## @stability evolving +function apply +{ + if declare -f ${PATCHMODE}_apply >/dev/null; then + "${PATCHMODE}_apply" + if [[ $? -gt 0 ]]; then + RESULT=1 + else + RESULT=0 fi + else + yetus_error "ERROR: Patching method ${PATCHMODE} does not have a way to apply patches!" + RESULT=1 fi -elif $PATCH -p1 -E --dry-run < $PATCH_FILE 2>&1 > /dev/null; then - PLEVEL=1 -elif $PATCH -p2 -E --dry-run < $PATCH_FILE 2>&1 > /dev/null; then - PLEVEL=2 -else - echo "The patch does not appear to apply with p0 to p2"; - cleanup 1; -fi +} + +trap "cleanup_and_exit 1" HUP INT QUIT TERM -# If this is a dry run then exit instead of applying the patch -if [[ -n $DRY_RUN ]]; then - cleanup 0; +setup_defaults + +parse_args "$@" + +locate_patch + +dryrun + +if [[ ${RESULT} -gt 0 ]]; then + yetus_error "ERROR: Aborting! The patch cannot be verified." + cleanup_and_exit ${RESULT} fi -echo Going to apply patch with: $PATCH -p$PLEVEL -$PATCH -p$PLEVEL -E < $PATCH_FILE +if [[ ${DRYRUNMODE} == false ]]; then + apply +fi -cleanup $? +cleanup_and_exit ${RESULT} \ No newline at end of file diff --git a/dev-support/test-patch.sh b/dev-support/test-patch.sh index f93cb4a1a431e..793c42fbb217b 100755 --- a/dev-support/test-patch.sh +++ b/dev-support/test-patch.sh @@ -1601,7 +1601,7 @@ function verify_patch_file # Before building, check to make sure that the patch is valid export PATCH - "${BINDIR}/smart-apply-patch.sh" "${PATCH_DIR}/patch" dryrun + "${BINDIR}/smart-apply-patch.sh" --dry-run "${PATCH_DIR}/patch" if [[ $? != 0 ]] ; then echo "PATCH APPLICATION FAILED" add_vote_table -1 patch "The patch command could not apply the patch during dryrun." From 0d7a70857552a74b60de22773bea5ea47f6ad2a7 Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Tue, 7 Jul 2015 12:07:53 -0700 Subject: [PATCH 014/130] HADOOP-12202. releasedocmaker drops missing component and assignee entries (aw) --- dev-support/releasedocmaker.py | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/dev-support/releasedocmaker.py b/dev-support/releasedocmaker.py index 6e0126073c0ec..e7d73fc99abad 100755 --- a/dev-support/releasedocmaker.py +++ b/dev-support/releasedocmaker.py @@ -420,6 +420,8 @@ def main(): else: title=options.title + haderrors=False + for v in versions: vstr=str(v) jlist = JiraIter(vstr,projects) @@ -468,14 +470,6 @@ def main(): for jira in sorted(jlist): if jira.getIncompatibleChange(): incompatlist.append(jira) - if (len(jira.getReleaseNote())==0): - warningCount+=1 - - if jira.checkVersionString(): - warningCount+=1 - - if jira.checkMissingComponent() or jira.checkMissingAssignee(): - errorCount+=1 elif jira.getType() == "Bug": buglist.append(jira) elif jira.getType() == "Improvement": @@ -496,6 +490,7 @@ def main(): notableclean(jira.getSummary())) if (jira.getIncompatibleChange()) and (len(jira.getReleaseNote())==0): + warningCount+=1 reloutputs.writeKeyRaw(jira.getProject(),"\n---\n\n") reloutputs.writeKeyRaw(jira.getProject(), line) line ='\n**WARNING: No release note provided for this incompatible change.**\n\n' @@ -503,9 +498,11 @@ def main(): reloutputs.writeKeyRaw(jira.getProject(), line) if jira.checkVersionString(): + warningCount+=1 lintMessage += "\nWARNING: Version string problem for %s " % jira.getId() if (jira.checkMissingComponent() or jira.checkMissingAssignee()): + errorCount+=1 errorMessage=[] jira.checkMissingComponent() and errorMessage.append("component") jira.checkMissingAssignee() and errorMessage.append("assignee") @@ -520,11 +517,12 @@ def main(): if (options.lint is True): print lintMessage print "=======================================" - print "Error:%d, Warning:%d \n" % (errorCount, warningCount) + print "%s: Error:%d, Warning:%d \n" % (vstr, errorCount, warningCount) - if (errorCount>0): - cleanOutputDir(version) - sys.exit(1) + if (errorCount>0): + haderrors=True + cleanOutputDir(vstr) + continue reloutputs.writeAll("\n\n") reloutputs.close() @@ -571,5 +569,8 @@ def main(): if options.index: buildindex(title,options.license) + if haderrors is True: + sys.exit(1) + if __name__ == "__main__": main() From 38190e87ae2a545b05ed25e65fed181ce0546279 Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Tue, 7 Jul 2015 12:13:52 -0700 Subject: [PATCH 015/130] Revert "HADOOP-12202. releasedocmaker drops missing component and assignee entries (aw)" This reverts commit 0d7a70857552a74b60de22773bea5ea47f6ad2a7. --- dev-support/releasedocmaker.py | 25 ++++++++++++------------- 1 file changed, 12 insertions(+), 13 deletions(-) diff --git a/dev-support/releasedocmaker.py b/dev-support/releasedocmaker.py index e7d73fc99abad..6e0126073c0ec 100755 --- a/dev-support/releasedocmaker.py +++ b/dev-support/releasedocmaker.py @@ -420,8 +420,6 @@ def main(): else: title=options.title - haderrors=False - for v in versions: vstr=str(v) jlist = JiraIter(vstr,projects) @@ -470,6 +468,14 @@ def main(): for jira in sorted(jlist): if jira.getIncompatibleChange(): incompatlist.append(jira) + if (len(jira.getReleaseNote())==0): + warningCount+=1 + + if jira.checkVersionString(): + warningCount+=1 + + if jira.checkMissingComponent() or jira.checkMissingAssignee(): + errorCount+=1 elif jira.getType() == "Bug": buglist.append(jira) elif jira.getType() == "Improvement": @@ -490,7 +496,6 @@ def main(): notableclean(jira.getSummary())) if (jira.getIncompatibleChange()) and (len(jira.getReleaseNote())==0): - warningCount+=1 reloutputs.writeKeyRaw(jira.getProject(),"\n---\n\n") reloutputs.writeKeyRaw(jira.getProject(), line) line ='\n**WARNING: No release note provided for this incompatible change.**\n\n' @@ -498,11 +503,9 @@ def main(): reloutputs.writeKeyRaw(jira.getProject(), line) if jira.checkVersionString(): - warningCount+=1 lintMessage += "\nWARNING: Version string problem for %s " % jira.getId() if (jira.checkMissingComponent() or jira.checkMissingAssignee()): - errorCount+=1 errorMessage=[] jira.checkMissingComponent() and errorMessage.append("component") jira.checkMissingAssignee() and errorMessage.append("assignee") @@ -517,12 +520,11 @@ def main(): if (options.lint is True): print lintMessage print "=======================================" - print "%s: Error:%d, Warning:%d \n" % (vstr, errorCount, warningCount) + print "Error:%d, Warning:%d \n" % (errorCount, warningCount) - if (errorCount>0): - haderrors=True - cleanOutputDir(vstr) - continue + if (errorCount>0): + cleanOutputDir(version) + sys.exit(1) reloutputs.writeAll("\n\n") reloutputs.close() @@ -569,8 +571,5 @@ def main(): if options.index: buildindex(title,options.license) - if haderrors is True: - sys.exit(1) - if __name__ == "__main__": main() From adbacf7010373dbe6df239688b4cebd4a93a69e4 Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Tue, 7 Jul 2015 14:30:32 -0700 Subject: [PATCH 016/130] HADOOP-12202. releasedocmaker drops missing component and assignee entries (aw) --- dev-support/releasedocmaker.py | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/dev-support/releasedocmaker.py b/dev-support/releasedocmaker.py index 6e0126073c0ec..409d8e3825c93 100755 --- a/dev-support/releasedocmaker.py +++ b/dev-support/releasedocmaker.py @@ -420,6 +420,8 @@ def main(): else: title=options.title + haderrors=False + for v in versions: vstr=str(v) jlist = JiraIter(vstr,projects) @@ -468,14 +470,6 @@ def main(): for jira in sorted(jlist): if jira.getIncompatibleChange(): incompatlist.append(jira) - if (len(jira.getReleaseNote())==0): - warningCount+=1 - - if jira.checkVersionString(): - warningCount+=1 - - if jira.checkMissingComponent() or jira.checkMissingAssignee(): - errorCount+=1 elif jira.getType() == "Bug": buglist.append(jira) elif jira.getType() == "Improvement": @@ -496,6 +490,7 @@ def main(): notableclean(jira.getSummary())) if (jira.getIncompatibleChange()) and (len(jira.getReleaseNote())==0): + warningCount+=1 reloutputs.writeKeyRaw(jira.getProject(),"\n---\n\n") reloutputs.writeKeyRaw(jira.getProject(), line) line ='\n**WARNING: No release note provided for this incompatible change.**\n\n' @@ -503,9 +498,11 @@ def main(): reloutputs.writeKeyRaw(jira.getProject(), line) if jira.checkVersionString(): + warningCount+=1 lintMessage += "\nWARNING: Version string problem for %s " % jira.getId() if (jira.checkMissingComponent() or jira.checkMissingAssignee()): + errorCount+=1 errorMessage=[] jira.checkMissingComponent() and errorMessage.append("component") jira.checkMissingAssignee() and errorMessage.append("assignee") @@ -520,11 +517,11 @@ def main(): if (options.lint is True): print lintMessage print "=======================================" - print "Error:%d, Warning:%d \n" % (errorCount, warningCount) - + print "%s: Error:%d, Warning:%d \n" % (vstr, errorCount, warningCount) if (errorCount>0): - cleanOutputDir(version) - sys.exit(1) + haderrors=True + cleanOutputDir(vstr) + continue reloutputs.writeAll("\n\n") reloutputs.close() @@ -571,5 +568,8 @@ def main(): if options.index: buildindex(title,options.license) + if haderrors is True: + sys.exit(1) + if __name__ == "__main__": main() From 5b2412e861e7a4d65670d70a585e5f78f2fba857 Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Fri, 10 Jul 2015 08:13:54 -0700 Subject: [PATCH 017/130] HADOOP-12165. author tests show entire run time not test time when skipped (Kengo Seki via aw) --- dev-support/test-patch.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/dev-support/test-patch.sh b/dev-support/test-patch.sh index 793c42fbb217b..694a055aba9bc 100755 --- a/dev-support/test-patch.sh +++ b/dev-support/test-patch.sh @@ -2291,14 +2291,14 @@ function check_author big_console_header "Checking there are no @author tags in the patch." + start_clock + if [[ ${CHANGED_FILES} =~ ${appname} ]]; then echo "Skipping @author checks as ${appname} has been patched." add_vote_table 0 @author "Skipping @author checks as ${appname} has been patched." return 0 fi - start_clock - authorTags=$("${GREP}" -c -i '^[^-].*@author' "${PATCH_DIR}/patch") echo "There appear to be ${authorTags} @author tags in the patch." if [[ ${authorTags} != 0 ]] ; then From ccc25a981be6f6e325be7e9c1b2d30251f1736f2 Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Fri, 10 Jul 2015 08:19:06 -0700 Subject: [PATCH 018/130] HADOOP-12188. javac warning file is always empty on ant-based projects (Kengo Seki via aw) --- dev-support/test-patch.sh | 53 ++++++++++++++++++++++++--------------- 1 file changed, 33 insertions(+), 20 deletions(-) diff --git a/dev-support/test-patch.sh b/dev-support/test-patch.sh index 694a055aba9bc..3cb2b4b0e7c8c 100755 --- a/dev-support/test-patch.sh +++ b/dev-support/test-patch.sh @@ -2367,7 +2367,7 @@ function count_javac_probs case ${BUILDTOOL} in maven) #shellcheck disable=SC2016,SC2046 - ${AWK} 'BEGIN {total = 0} {total += 1} END {print total}' "${warningfile}" + ${GREP} '\[WARNING\]' "${warningfile}" | ${AWK} '{sum+=1} END {print sum}' ;; ant) #shellcheck disable=SC2016 @@ -2454,15 +2454,20 @@ function check_patch_javac # if it was a new module, this won't exist. if [[ -f "${PATCH_DIR}/branch-javac-${fn}.txt" ]]; then - ${GREP} '\[WARNING\]' "${PATCH_DIR}/branch-javac-${fn}.txt" \ + ${GREP} -i warning "${PATCH_DIR}/branch-javac-${fn}.txt" \ > "${PATCH_DIR}/branch-javac-${fn}-warning.txt" else touch "${PATCH_DIR}/branch-javac-${fn}.txt" \ "${PATCH_DIR}/branch-javac-${fn}-warning.txt" fi - ${GREP} '\[WARNING\]' "${PATCH_DIR}/patch-javac-${fn}.txt" \ - > "${PATCH_DIR}/patch-javac-${fn}-warning.txt" + if [[ -f "${PATCH_DIR}/patch-javac-${fn}.txt" ]]; then + ${GREP} -i warning "${PATCH_DIR}/patch-javac-${fn}.txt" \ + > "${PATCH_DIR}/patch-javac-${fn}-warning.txt" + else + touch "${PATCH_DIR}/patch-javac-${fn}.txt" \ + "${PATCH_DIR}/patch-javac-${fn}-warning.txt" + fi numbranch=$(count_javac_probs "${PATCH_DIR}/branch-javac-${fn}-warning.txt") numpatch=$(count_javac_probs "${PATCH_DIR}/patch-javac-${fn}-warning.txt") @@ -2587,6 +2592,27 @@ function check_patch_javadoc fn=$(module_file_fragment "${MODULE[${i}]}") fn="${fn}${jdk}" + module_suffix=$(basename "${MODULE[${i}]}") + if [[ ${module_suffix} == \. ]]; then + module_suffix=root + fi + + if [[ -f "${PATCH_DIR}/branch-javadoc-${fn}.txt" ]]; then + ${GREP} -i warning "${PATCH_DIR}/branch-javadoc-${fn}.txt" \ + > "${PATCH_DIR}/branch-javadoc-${fn}-warning.txt" + else + touch "${PATCH_DIR}/branch-javadoc-${fn}.txt" \ + "${PATCH_DIR}/branch-javadoc-${fn}-warning.txt" + fi + + if [[ -f "${PATCH_DIR}/patch-javadoc-${fn}.txt" ]]; then + ${GREP} -i warning "${PATCH_DIR}/patch-javadoc-${fn}.txt" \ + > "${PATCH_DIR}/patch-javadoc-${fn}-warning.txt" + else + touch "${PATCH_DIR}/patch-javadoc-${fn}.txt" \ + "${PATCH_DIR}/patch-javadoc-${fn}-warning.txt" + fi + numbranch=$(count_javadoc_probs "${PATCH_DIR}/branch-javadoc-${fn}.txt") numpatch=$(count_javadoc_probs "${PATCH_DIR}/patch-javadoc-${fn}.txt") @@ -2594,25 +2620,12 @@ function check_patch_javadoc && -n ${numpatch} && ${numpatch} -gt ${numbranch} ]] ; then - if [[ -f "${PATCH_DIR}/branch-javadoc-${fn}.txt" ]]; then - ${GREP} -i warning "${PATCH_DIR}/branch-javadoc-${fn}.txt" \ - > "${PATCH_DIR}/branch-javadoc-${fn}-filtered.txt" - else - touch "${PATCH_DIR}/branch-javadoc-${fn}.txt" \ - "${PATCH_DIR}/branch-javadoc-${fn}-filtered.txt" - fi - - ${GREP} -i warning "${PATCH_DIR}/patch-javadoc-${fn}.txt" \ - > "${PATCH_DIR}/patch-javadoc-${fn}-filtered.txt" - - ${DIFF} -u "${PATCH_DIR}/branch-javadoc-${fn}-filtered.txt" \ - "${PATCH_DIR}/patch-javadoc-${fn}-filtered.txt" \ + ${DIFF} -u "${PATCH_DIR}/branch-javadoc-${fn}-warning.txt" \ + "${PATCH_DIR}/patch-javadoc-${fn}-warning.txt" \ > "${PATCH_DIR}/javadoc-${fn}-diff.txt" - rm -f "${PATCH_DIR}/branch-javadoc-${fn}-filtered.txt" \ - "${PATCH_DIR}/patch-javadoc-${fn}-filtered.txt" module_status ${i} -1 "javadoc-${fn}-diff.txt" \ - "Patched ${MODULE[${i}]} generated "\ + "Patched ${module_suffix} generated "\ "$((numpatch-numbranch)) additional warning messages${statusjdk}." ((result=result+1)) From 2253f1f8fff00f919e526c5004b5b54afd3bff35 Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Fri, 10 Jul 2015 08:27:00 -0700 Subject: [PATCH 019/130] HADOOP-12206. The preceding invocations of findlargest in test-patch effect the following invocation results (Kengo Seki via aw) --- dev-support/test-patch.sh | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/dev-support/test-patch.sh b/dev-support/test-patch.sh index 3cb2b4b0e7c8c..8e139d3db2387 100755 --- a/dev-support/test-patch.sh +++ b/dev-support/test-patch.sh @@ -465,11 +465,13 @@ function findlargest local a=("$@") local sizeofa=${#a[@]} local i=0 + local string + local maxlen=0 - until [[ ${i} -gt ${sizeofa} ]]; do + until [[ ${i} -eq ${sizeofa} ]]; do # shellcheck disable=SC2086 string=$( echo ${a[$i]} | cut -f$((column + 1)) -d\| ) - if [[ ${#string} -gt $maxlen ]]; then + if [[ ${#string} -gt ${maxlen} ]]; then maxlen=${#string} fi i=$((i+1)) From 5e42d11fb53ca56001a30aa09c172317a20ba8fd Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Fri, 10 Jul 2015 08:29:57 -0700 Subject: [PATCH 020/130] HADOOP-12199. Optimize find_changed_modules (Kengo Seki via aw) --- dev-support/test-patch.sh | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/dev-support/test-patch.sh b/dev-support/test-patch.sh index 8e139d3db2387..4b6b263f5d691 100755 --- a/dev-support/test-patch.sh +++ b/dev-support/test-patch.sh @@ -1006,9 +1006,7 @@ function parse_args ## @return directory containing the pom.xml. Nothing returned if not found. function find_pomxml_dir { - local dir - - dir=$(dirname "$1") + local dir=$1 yetus_debug "Find pom.xml dir for: ${dir}" @@ -1033,9 +1031,7 @@ function find_pomxml_dir ## @return directory containing the build.xml. Nothing returned if not found. function find_buildxml_dir { - local dir - - dir=$(dirname "$1") + local dir=$1 yetus_debug "Find build.xml dir for: ${dir}" @@ -1076,18 +1072,21 @@ function find_changed_files ## @return None; sets ${CHANGED_MODULES} and ${CHANGED_UNFILTERED_MODULES} function find_changed_modules { - # Come up with a list of changed files into ${TMP} + local i + local changed_dirs local pomdirs local pomdir local module local pommods + changed_dirs=$(for i in ${CHANGED_FILES}; do dirname "${i}"; done | sort -u) + # Now find all the modules that were changed - for file in ${CHANGED_FILES}; do + for i in ${changed_dirs}; do case ${BUILDTOOL} in maven) #shellcheck disable=SC2086 - pomdir=$(find_pomxml_dir ${file}) + pomdir=$(find_pomxml_dir ${i}) if [[ -z ${pomdir} ]]; then output_to_console 1 output_to_bugsystem 1 @@ -1097,7 +1096,7 @@ function find_changed_modules ;; ant) #shellcheck disable=SC2086 - pomdir=$(find_buildxml_dir ${file}) + pomdir=$(find_buildxml_dir ${i}) if [[ -z ${pomdir} ]]; then output_to_console 1 output_to_bugsystem 1 From fed76980c6ab8be67eef483eb99ec635369b09b6 Mon Sep 17 00:00:00 2001 From: Sean Busbey Date: Sat, 11 Jul 2015 17:07:02 -0500 Subject: [PATCH 021/130] HADOOP-12225. add docs overview page --- dev-support/docs/README.md | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) create mode 100644 dev-support/docs/README.md diff --git a/dev-support/docs/README.md b/dev-support/docs/README.md new file mode 100644 index 0000000000000..610014cadefbc --- /dev/null +++ b/dev-support/docs/README.md @@ -0,0 +1,30 @@ + + +# Overview + +Yetus helps community driven software projects improve their contribution and release processes by providing: + +* A robust system for automatically checking new contributions against a variety of community accepted requirements +* The means to document a well defined supported interface for downstream projects +* Tooling to help release managers generate release documentation based on the information provided by community issue trackers and source repositories + +# Yetus Precommit + +The Yetus Precommit Patch Tester allows projects to codify their patch acceptance criteria and then evaluate incoming contributions prior to review by a committer. + +* Take a quick look at [our glossary of terms](precommit-glossary.md) to ensure you are familiar with the ASF and Maven jargon we'll use as terminology specific to this project. +* For an overview of Yetus' philosophy on testing contributions and how evaluation is performed, see our [overview](precommit-architecture.md). +* To get started on your project, including an explanation of what we'll expect in a runtime environment and what optional utilities we'll leverage, read through the [basic usage guide](precommit-basic.md). +* If your project has advanced requirements such as module relationships not expressed in Maven, special profiles, or a need on os-specific prerequisites not managed by Maven then you'll need to use our [advanced usage guide](precommit-advanced.md). From f22ec7e8bf04fdaf1f3653b6fad7066fe8c6bb53 Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Mon, 13 Jul 2015 10:12:35 -0700 Subject: [PATCH 022/130] HADOOP-12187. Whitespace plugin shows unexpected error messages if gitdiffcontent is empty (Kengo Seki via aw) --- dev-support/test-patch.sh | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/dev-support/test-patch.sh b/dev-support/test-patch.sh index 4b6b263f5d691..327e6f1a4b18a 100755 --- a/dev-support/test-patch.sh +++ b/dev-support/test-patch.sh @@ -625,6 +625,14 @@ function compute_gitdiff IFS=${oldifs} fi done < <("${GIT}" diff --unified=0 --no-color) + + if [[ ! -f ${GITDIFFLINES} ]]; then + touch "${GITDIFFLINES}" + fi + if [[ ! -f ${GITDIFFCONTENT} ]]; then + touch "${GITDIFFCONTENT}" + fi + popd >/dev/null } From 5e19855013c1254b5db66a9b5009fa0458cd8d8e Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Mon, 13 Jul 2015 10:53:09 -0700 Subject: [PATCH 023/130] HADOOP-12196. shellcheck plugin is picking up target executables (Kengo Seki via aw) --- dev-support/test-patch.d/shellcheck.sh | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/dev-support/test-patch.d/shellcheck.sh b/dev-support/test-patch.d/shellcheck.sh index 1c06a5d5e11a0..0ccd1c0fd5044 100755 --- a/dev-support/test-patch.d/shellcheck.sh +++ b/dev-support/test-patch.d/shellcheck.sh @@ -47,7 +47,15 @@ function shellcheck_private_findbash while read line; do value=$(find "${line}" ! -name '*.cmd' -type f \ | ${GREP} -E -v '(.orig$|.rej$)') - list="${list} ${value}" + + for i in ${value}; do + if [[ ! ${i} =~ \.sh(\.|$) + && ! $(head -n 1 "${i}") =~ ^#! ]]; then + yetus_debug "Shellcheck skipped: ${i}" + continue + fi + list="${list} ${i}" + done done < <(find . -type d -name bin -o -type d -name sbin -o -type d -name libexec -o -type d -name shellprofile.d) # shellcheck disable=SC2086 echo ${list} ${SHELLCHECK_SPECIFICFILES} | tr ' ' '\n' | sort -u @@ -137,6 +145,9 @@ function shellcheck_calcdiffs function shellcheck_postapply { local i + local numPrepatch + local numPostpatch + local diffPostpatch verify_needed_test shellcheck if [[ $? == 0 ]]; then From 33f2feb1a5dfaa3882c5208a0f21996217422666 Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Mon, 13 Jul 2015 10:58:47 -0700 Subject: [PATCH 024/130] HADOOP-12197. smart-apply-patch shouldn't print successful dryrun in apply mode (Kengo Seki via aw) --- dev-support/smart-apply-patch.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-support/smart-apply-patch.sh b/dev-support/smart-apply-patch.sh index bfd2aeb585f29..96bda92a93ade 100755 --- a/dev-support/smart-apply-patch.sh +++ b/dev-support/smart-apply-patch.sh @@ -485,7 +485,7 @@ function git_apply echo "Applying the patch:" run_and_redirect "${PATCH_DIR}/apply-patch-git-apply.log" \ "${GIT}" apply --binary -v --stat --apply "-p${PATCHPREFIX}" "${PATCH_FILE}" - cat "${PATCH_DIR}/apply-patch-git-apply.log" + ${GREP} -v "^Checking" "${PATCH_DIR}/apply-patch-git-apply.log" } From 840e0e5f7808790e53cbb67bccf7e216b2f84034 Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Thu, 16 Jul 2015 10:36:36 -0700 Subject: [PATCH 025/130] HADOOP-12226. CHANGED_MODULES is wrong for ant (aw) --- dev-support/test-patch.sh | 224 +++++++++++++++++++++++++------------- 1 file changed, 151 insertions(+), 73 deletions(-) diff --git a/dev-support/test-patch.sh b/dev-support/test-patch.sh index 327e6f1a4b18a..24dafc34178dd 100755 --- a/dev-support/test-patch.sh +++ b/dev-support/test-patch.sh @@ -84,8 +84,12 @@ function setup_defaults PATCH_BRANCH="" PATCH_BRANCH_DEFAULT="master" - #shellcheck disable=SC2034 + # shellcheck disable=SC2034 CHANGED_MODULES="" + # shellcheck disable=SC2034 + CHANGED_UNFILTERED_MODULES="" + # shellcheck disable=SC2034 + CHANGED_UNION_MODULES="" USER_MODULE_LIST="" OFFLINE=false CHANGED_FILES="" @@ -656,10 +660,12 @@ function echo_and_redirect find "${BASEDIR}" -type d -exec chmod +x {} \; # to the screen + echo "cd $(pwd)" echo "${*} > ${logfile} 2>&1" # to the log - echo "${*}" > "${logfile}" - # the actual command + echo "cd $(pwd)" > "${logfile}" + echo "${*}" >> "${logfile}" + # run the actual command "${@}" >> "${logfile}" 2>&1 } @@ -723,6 +729,7 @@ function testpatch_usage echo "--project= The short name for project currently using test-patch (default 'yetus')" echo "--resetrepo Forcibly clean the repo" echo "--run-tests Run all relevant tests below the base directory" + echo "--skip-dirs= Skip following directories for module finding" echo "--skip-system-plugins Do not load plugins from ${BINDIR}/test-patch.d" echo "--summarize= Allow tests to summarize results" echo "--testlist= Specify which subsystem tests to use (comma delimited)" @@ -768,6 +775,7 @@ function parse_args { local i local j + local testlist for i in "$@"; do case ${i} in @@ -893,6 +901,11 @@ function parse_args --run-tests) RUN_TESTS=true ;; + --skip-dirs=*) + MODULE_SKIPDIRS=${i#*=} + MODULE_SKIPDIRS=${MODULE_SKIPDIRS//,/ } + yetus_debug "Setting skipdirs to ${MODULE_SKIPDIRS}" + ;; --skip-system-plugins) LOAD_SYSTEM_PLUGINS=false ;; @@ -1007,50 +1020,28 @@ function parse_args GITDIFFCONTENT="${PATCH_DIR}/gitdiffcontent.txt" } -## @description Locate the pom.xml file for a given directory -## @audience private -## @stability stable -## @replaceable no -## @return directory containing the pom.xml. Nothing returned if not found. -function find_pomxml_dir -{ - local dir=$1 - - yetus_debug "Find pom.xml dir for: ${dir}" - - while builtin true; do - if [[ -f "${dir}/pom.xml" ]];then - echo "${dir}" - yetus_debug "Found: ${dir}" - return - elif [[ ${dir} == "." ]]; then - yetus_error "ERROR: pom.xml is not found. Make sure the target is a Maven-based project." - return - else - dir=$(dirname "${dir}") - fi - done -} - -## @description Locate the build.xml file for a given directory +## @description Locate the build file for a given directory ## @audience private ## @stability stable ## @replaceable no -## @return directory containing the build.xml. Nothing returned if not found. -function find_buildxml_dir +## @return directory containing the buildfile. Nothing returned if not found. +## @params buildfile +## @params directory +function find_buildfile_dir { - local dir=$1 + local buildfile=$1 + local dir=$2 - yetus_debug "Find build.xml dir for: ${dir}" + yetus_debug "Find ${buildfile} dir for: ${dir}" while builtin true; do - if [[ -f "${dir}/build.xml" ]];then + if [[ -f "${dir}/${buildfile}" ]];then echo "${dir}" yetus_debug "Found: ${dir}" - return + return 0 elif [[ ${dir} == "." ]]; then - yetus_error "ERROR: build.xml is not found. Make sure the target is a Ant-based project." - return + yetus_debug "ERROR: ${buildfile} is not found." + return 1 else dir=$(dirname "${dir}") fi @@ -1073,6 +1064,42 @@ function find_changed_files /^(\+\+\+|---) / { p($2) }' "${PATCH_DIR}/patch" | sort -u) } +## @description Check for directories to skip during +## @description changed module calcuation +## @audience private +## @stability stable +## @replaceable no +## @params directory +## @returns 0 for use +## @returns 1 for skip +function module_skipdir +{ + local dir=${1} + local i + + yetus_debug "Checking skipdirs for ${dir}" + + if [[ -z ${MODULE_SKIPDIRS} ]]; then + yetus_debug "Skipping skipdirs" + return 0 + fi + + while builtin true; do + for i in ${MODULE_SKIPDIRS}; do + if [[ ${dir} = "${i}" ]];then + yetus_debug "Found a skip: ${dir}" + return 1 + fi + done + if [[ ${dir} == "." ]]; then + return 0 + else + dir=$(dirname "${dir}") + yetus_debug "Trying to skip: ${dir}" + fi + done +} + ## @description Find the modules of the build that ${PATCH_DIR}/patch modifies ## @audience private ## @stability stable @@ -1082,60 +1109,111 @@ function find_changed_modules { local i local changed_dirs - local pomdirs - local pomdir + local builddirs + local builddir local module - local pommods + local buildmods + local prev_builddir + local i=1 + local dir + local buildfile + + case ${BUILDTOOL} in + maven) + buildfile=pom.xml + ;; + ant) + buildfile=build.xml + ;; + *) + yetus_error "ERROR: Unsupported build tool." + output_to_console 1 + output_to_bugsystem 1 + cleanup_and_exit 1 + ;; + esac changed_dirs=$(for i in ${CHANGED_FILES}; do dirname "${i}"; done | sort -u) # Now find all the modules that were changed for i in ${changed_dirs}; do - case ${BUILDTOOL} in - maven) - #shellcheck disable=SC2086 - pomdir=$(find_pomxml_dir ${i}) - if [[ -z ${pomdir} ]]; then - output_to_console 1 - output_to_bugsystem 1 - cleanup_and_exit 1 - fi - pomdirs="${pomdirs} ${pomdir}" - ;; - ant) - #shellcheck disable=SC2086 - pomdir=$(find_buildxml_dir ${i}) - if [[ -z ${pomdir} ]]; then - output_to_console 1 - output_to_bugsystem 1 - cleanup_and_exit 1 - fi - pomdirs="${pomdirs} ${pomdir}" - ;; - *) - yetus_error "ERROR: Unsupported build tool." - output_to_console 1 - output_to_bugsystem 1 - cleanup_and_exit 1 - ;; - esac + + module_skipdir "${i}" + if [[ $? != 0 ]]; then + continue + fi + + builddir=$(find_buildfile_dir ${buildfile} "${i}") + if [[ -z ${builddir} ]]; then + yetus_error "ERROR: ${buildfile} is not found. Make sure the target is a ${BUILDTOOL}-based project." + output_to_console 1 + output_to_bugsystem 1 + cleanup_and_exit 1 + fi + builddirs="${builddirs} ${builddir}" done #shellcheck disable=SC2086,SC2034 - CHANGED_UNFILTERED_MODULES=$(echo ${pomdirs} ${USER_MODULE_LIST} | tr ' ' '\n' | sort -u) + CHANGED_UNFILTERED_MODULES=$(echo ${builddirs} ${USER_MODULE_LIST} | tr ' ' '\n' | sort -u) + #shellcheck disable=SC2086,SC2116 + CHANGED_UNFILTERED_MODULES=$(echo ${CHANGED_UNFILTERED_MODULES}) + - if [[ ${BUILDTOOL} == maven ]]; then + if [[ ${BUILDTOOL} = maven + && ${QETESTMODE} = false ]]; then # Filter out modules without code - for module in ${pomdirs}; do + for module in ${builddirs}; do ${GREP} "pom" "${module}/pom.xml" > /dev/null if [[ "$?" != 0 ]]; then - pommods="${pommods} ${module}" + buildmods="${buildmods} ${module}" fi done + elif [[ ${QETESTMODE} = true ]]; then + buildmods=${builddirs} fi #shellcheck disable=SC2086,SC2034 - CHANGED_MODULES=$(echo ${pommods} ${USER_MODULE_LIST} | tr ' ' '\n' | sort -u) + CHANGED_MODULES=$(echo ${buildmods} ${USER_MODULE_LIST} | tr ' ' '\n' | sort -u) + + # turn it back into a list so that anyone printing doesn't + # generate multiline output + #shellcheck disable=SC2086,SC2116 + CHANGED_MODULES=$(echo ${CHANGED_MODULES}) + + yetus_debug "Locate the union of ${CHANGED_MODULES}" + # shellcheck disable=SC2086 + count=$(echo ${CHANGED_MODULES} | wc -w) + if [[ ${count} -lt 2 ]]; then + yetus_debug "Only one entry, so keeping it ${CHANGED_MODULES}" + # shellcheck disable=SC2034 + CHANGED_UNION_MODULES=${CHANGED_MODULES} + return + fi + + i=1 + while [[ ${i} -lt 100 ]] + do + module=$(echo "${CHANGED_MODULES}" | tr ' ' '\n' | cut -f1-${i} -d/ | uniq) + count=$(echo "${module}" | wc -w) + if [[ ${count} -eq 1 + && -f ${module}/${buildfile} ]]; then + prev_builddir=${module} + elif [[ ${count} -gt 1 ]]; then + builddir=${prev_builddir} + break + fi + ((i=i+1)) + done + + if [[ -z ${builddir} ]]; then + builddir="." + fi + + yetus_debug "Finding union of ${builddir}" + builddir=$(find_buildfile_dir ${buildfile} "${builddir}" || true) + + #shellcheck disable=SC2034 + CHANGED_UNION_MODULES="${builddir}" } ## @description git checkout the appropriate branch to test. Additionally, this calls From d84da00b18f9f5d7dabe18997352f098b8c93799 Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Mon, 20 Jul 2015 09:42:01 -0700 Subject: [PATCH 026/130] HADOOP-12157. test-patch should report max memory consumed (Kengo Seki via aw) --- dev-support/test-patch.sh | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/dev-support/test-patch.sh b/dev-support/test-patch.sh index 24dafc34178dd..25e86495ae327 100755 --- a/dev-support/test-patch.sh +++ b/dev-support/test-patch.sh @@ -386,6 +386,23 @@ function finish_docker_stats fi } +## @description Put the max memory consumed by maven at the bottom of the table. +## @audience private +## @stability stable +## @replaceable no +function finish_footer_table +{ + local maxmem + + # shellcheck disable=SC2016,SC2086 + maxmem=$(find "${PATCH_DIR}" -type f -exec ${AWK} 'match($0, /^\[INFO\] Final Memory: [0-9]+/) + { print substr($0, 22, RLENGTH-21) }' {} \; | sort -nr | head -n 1) + + if [[ -n ${maxmem} ]]; then + add_footer_table "Max memory used" "${maxmem}MB" + fi +} + ## @description Put the final elapsed time at the bottom of the table. ## @audience private ## @stability stable @@ -3473,6 +3490,8 @@ runtests finish_vote_table +finish_footer_table + output_to_console ${RESULT} output_to_bugsystem ${RESULT} cleanup_and_exit ${RESULT} From b41fe3111ae37478cbace2a07e6ac35a676ef978 Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Mon, 20 Jul 2015 09:47:46 -0700 Subject: [PATCH 027/130] HADOOP-12237. releasedocmaker.py doesn't work behind a proxy (Tsuyoshi Ozawa via aw) --- dev-support/releasedocmaker.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/dev-support/releasedocmaker.py b/dev-support/releasedocmaker.py index 409d8e3825c93..d2e5dda9647cb 100755 --- a/dev-support/releasedocmaker.py +++ b/dev-support/releasedocmaker.py @@ -24,6 +24,7 @@ import re import sys import urllib +import urllib2 try: import json except ImportError: @@ -125,7 +126,7 @@ def __init__(self,versions, projects): versions.sort() print "Looking for %s through %s"%(versions[0],versions[-1]) for p in projects: - resp = urllib.urlopen("https://issues.apache.org/jira/rest/api/2/project/%s/versions"%p) + resp = urllib2.urlopen("https://issues.apache.org/jira/rest/api/2/project/%s/versions"%p) data = json.loads(resp.read()) for d in data: if d['name'][0].isdigit and versions[0] <= d['name'] and d['name'] <= versions[-1]: @@ -288,7 +289,7 @@ def __init__(self, version, projects): self.projects = projects v=str(version).replace("-SNAPSHOT","") - resp = urllib.urlopen("https://issues.apache.org/jira/rest/api/2/field") + resp = urllib2.urlopen("https://issues.apache.org/jira/rest/api/2/field") data = json.loads(resp.read()) self.fieldIdMap = {} @@ -301,7 +302,7 @@ def __init__(self, version, projects): count=100 while (at < end): params = urllib.urlencode({'jql': "project in ('"+"' , '".join(projects)+"') and fixVersion in ('"+v+"') and resolution = Fixed", 'startAt':at, 'maxResults':count}) - resp = urllib.urlopen("https://issues.apache.org/jira/rest/api/2/search?%s"%params) + resp = urllib2.urlopen("https://issues.apache.org/jira/rest/api/2/search?%s"%params) data = json.loads(resp.read()) if (data.has_key('errorMessages')): raise Exception(data['errorMessages']) @@ -407,6 +408,10 @@ def main(): if (len(options.versions) <= 0): parser.error("At least one version needs to be supplied") + proxy = urllib2.ProxyHandler() + opener = urllib2.build_opener(proxy) + urllib2.install_opener(opener) + projects = options.projects if (options.range is True): From 27a2328c5dea952704a1a8807ae0c7fb44fa84f4 Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Tue, 21 Jul 2015 16:51:03 -0700 Subject: [PATCH 028/130] HADOOP-12198. hadoop patches that hit multiple modules need to build at the union (aw) --- dev-support/personality/hadoop.sh | 237 +++++++++++++++---------- dev-support/test-patch.d/checkstyle.sh | 2 +- dev-support/test-patch.d/findbugs.sh | 2 +- dev-support/test-patch.sh | 19 +- 4 files changed, 160 insertions(+), 100 deletions(-) diff --git a/dev-support/personality/hadoop.sh b/dev-support/personality/hadoop.sh index 7722afb1bf349..3d6e3faeca1a0 100755 --- a/dev-support/personality/hadoop.sh +++ b/dev-support/personality/hadoop.sh @@ -27,63 +27,107 @@ HADOOP_MODULES="" function hadoop_module_manipulation { - local need_common=0 + local startingmodules=${1:-normal} local module local hdfs_modules local ordered_modules local tools_modules - local passed_modules=${CHANGED_MODULES} + local passed_modules + local flags + + yetus_debug "hmm in: ${startingmodules}" - yetus_debug "hmm: starting list: ${passed_modules}" + if [[ ${startingmodules} = normal ]]; then + startingmodules=${CHANGED_MODULES} + elif [[ ${startingmodules} = union ]]; then + startingmodules=${CHANGED_UNION_MODULES} + fi - # if one of our modules is ., then shortcut: - # ignore the rest and just set it to everything. - if [[ ${CHANGED_MODULES} == ' . ' ]]; then - HADOOP_MODULES='.' + yetus_debug "hmm expanded to: ${startingmodules}" + + if [[ ${startingmodules} = "." ]]; then + yetus_debug "hmm shortcut since ." + HADOOP_MODULES=. return fi - # ${CHANGED_MODULES} is already sorted and uniq'd. + # ${startingmodules} is already sorted and uniq'd. # let's remove child modules if we're going to - # touch their parent - for module in ${CHANGED_MODULES}; do + # touch their parent. + passed_modules=${startingmodules} + for module in ${startingmodules}; do yetus_debug "Stripping ${module}" # shellcheck disable=SC2086 passed_modules=$(echo ${passed_modules} | tr ' ' '\n' | ${GREP} -v ${module}/ ) done + yetus_debug "hmm pre-ordering: ${startingmodules}" + + # yarn will almost always be after common in the sort order + # so really just need to make sure that common comes before + # everything else and tools comes last + for module in ${passed_modules}; do yetus_debug "Personality ordering ${module}" - if [[ ${module} == hadoop-hdfs-project* ]]; then + if [[ ${module} = "." ]]; then + HADOOP_MODULES=. + break + fi + + if [[ ${module} = hadoop-hdfs-project* ]]; then hdfs_modules="${hdfs_modules} ${module}" - need_common=1 - elif [[ ${module} == hadoop-common-project/hadoop-common - || ${module} == hadoop-common-project ]]; then + elif [[ ${module} = hadoop-common-project/hadoop-common + || ${module} = hadoop-common-project ]]; then ordered_modules="${ordered_modules} ${module}" - building_common=1 - elif [[ ${module} == hadoop-tools* ]]; then + elif [[ ${module} = hadoop-tools* ]]; then tools_modules="${tools_modules} ${module}" else ordered_modules="${ordered_modules} ${module}" fi done - ordered_modules="${ordered_modules} ${hdfs_modules} ${tools_modules}" + HADOOP_MODULES="${ordered_modules} ${hdfs_modules} ${tools_modules}" + + yetus_debug "hmm out: ${HADOOP_MODULES}" +} + +function hadoop_unittest_prereqs +{ + local need_common=0 + local building_common=0 + local module + local flags + local fn + + for module in ${HADOOP_MODULES}; do + if [[ ${module} = hadoop-hdfs-project* ]]; then + need_common=1 + elif [[ ${module} = hadoop-common-project/hadoop-common + || ${module} = hadoop-common-project ]]; then + building_common=1 + fi + done if [[ ${need_common} -eq 1 && ${building_common} -eq 0 ]]; then - ordered_modules="hadoop-common-project/hadoop-common ${ordered_modules}" + echo "unit test pre-reqs:" + module="hadoop-common-project/hadoop-common" + fn=$(module_file_fragment "${module}") + flags=$(hadoop_native_flags) + pushd "${BASEDIR}/${module}" >/dev/null + # shellcheck disable=SC2086 + echo_and_redirect "${PATCH_DIR}/maven-unit-prereq-${fn}-install.txt" \ + "${MVN}" "${MAVEN_ARGS[@]}" install -DskipTests ${flags} + popd >/dev/null fi - - yetus_debug "hmm: ${ordered_modules}" - HADOOP_MODULES=${ordered_modules} } -function hadoop_javac_ordering +function hadoop_native_flags { - local special=$1 - local ordered_modules - local module + + if [[ ${BUILD_NATIVE} != true ]]; then + return + fi # Based upon HADOOP-11937 # @@ -97,46 +141,45 @@ function hadoop_javac_ordering # e.g, HADOOP-12027 for OS X. so no -Drequire.bzip2 # - for module in ${HADOOP_MODULES}; do - if [[ ${JENKINS} == true - && ${DOCKERSUPPORT} == false ]]; then + # current build servers are pretty limited in + # what they support + if [[ ${JENKINS} = true + && ${DOCKERSUPPORT} = false ]]; then + # shellcheck disable=SC2086 + echo -Pnative \ + -Drequire.snappy -Drequire.openssl -Drequire.fuse \ + -Drequire.test.libhadoop + return + fi + + case ${OSTYPE} in + Linux) # shellcheck disable=SC2086 - personality_enqueue_module "${module}" ${special} \ - -Pnative \ + echo -Pnative -Drequire.libwebhdfs \ -Drequire.snappy -Drequire.openssl -Drequire.fuse \ -Drequire.test.libhadoop - else - case ${OSTYPE} in - Linux) - # shellcheck disable=SC2086 - personality_enqueue_module ${module} ${special} \ - -Pnative -Drequire.libwebhdfs \ - -Drequire.snappy -Drequire.openssl -Drequire.fuse \ - -Drequire.test.libhadoop - ;; - Darwin) - JANSSON_INCLUDE_DIR=/usr/local/opt/jansson/include - JANSSON_LIBRARY=/usr/local/opt/jansson/lib - export JANSSON_LIBRARY JANSSON_INCLUDE_DIR - # shellcheck disable=SC2086 - personality_enqueue_module ${module} ${special} \ - -Pnative -Drequire.snappy \ - -Drequire.openssl \ - -Dopenssl.prefix=/usr/local/opt/openssl/ \ - -Dopenssl.include=/usr/local/opt/openssl/include \ - -Dopenssl.lib=/usr/local/opt/openssl/lib \ - -Drequire.libwebhdfs -Drequire.test.libhadoop - ;; - *) - # shellcheck disable=SC2086 - personality_enqueue_module ${module} ${special} \ - -Pnative \ - -Drequire.snappy -Drequire.openssl \ - -Drequire.libwebhdfs -Drequire.test.libhadoop - ;; - esac - fi - done + ;; + Darwin) + JANSSON_INCLUDE_DIR=/usr/local/opt/jansson/include + JANSSON_LIBRARY=/usr/local/opt/jansson/lib + export JANSSON_LIBRARY JANSSON_INCLUDE_DIR + # shellcheck disable=SC2086 + echo \ + -Pnative -Drequire.snappy \ + -Drequire.openssl \ + -Dopenssl.prefix=/usr/local/opt/openssl/ \ + -Dopenssl.include=/usr/local/opt/openssl/include \ + -Dopenssl.lib=/usr/local/opt/openssl/lib \ + -Drequire.libwebhdfs -Drequire.test.libhadoop + ;; + *) + # shellcheck disable=SC2086 + echo \ + -Pnative \ + -Drequire.snappy -Drequire.openssl \ + -Drequire.libwebhdfs -Drequire.test.libhadoop + ;; + esac } function personality_modules @@ -144,6 +187,9 @@ function personality_modules local repostatus=$1 local testtype=$2 local extra="" + local ordering="normal" + local needflags=false + local flags local fn local i @@ -152,16 +198,29 @@ function personality_modules clear_personality_queue case ${testtype} in + asflicense) + # this is very fast and provides the full path if we do it from + # the root of the source + personality_enqueue_module . + return + ;; + checkstyle) + ordering="union" + extra="-DskipTests" + ;; javac) - if [[ ${BUILD_NATIVE} == true ]]; then - hadoop_module_manipulation - hadoop_javac_ordering -DskipTests - return - fi + ordering="union" extra="-DskipTests" + needflags=true + + # if something in common changed, we build the whole world + if [[ ${CHANGED_MODULES} =~ hadoop-common ]]; then + yetus_debug "hadoop personality: javac + hadoop-common = ordering set to . " + ordering="." + fi ;; javadoc) - if [[ ${repostatus} == patch ]]; then + if [[ ${repostatus} = patch ]]; then echo "javadoc pre-reqs:" for i in hadoop-project \ hadoop-common-project/hadoop-annotations; do @@ -177,38 +236,34 @@ function personality_modules ;; mvninstall) extra="-DskipTests" - if [[ ${repostatus} == branch ]]; then - HADOOP_MODULES=. - hadoop_javac_ordering -DskipTests - return + if [[ ${repostatus} = branch ]]; then + ordering=. fi ;; - asflicense) - # this is very fast and provides the full path if we do it from - # the root of the source - personality_enqueue_module . - return - ;; unit) - if [[ ${TEST_PARALLEL} == "true" ]] ; then - extra="-Pparallel-tests" - if [[ -n ${TEST_THREADS:-} ]]; then - extra="${extra} -DtestsThreadCount=${TEST_THREADS}" - fi - fi - if [[ ${BUILD_NATIVE} == true ]]; then - hadoop_module_manipulation - # shellcheck disable=SC2086 - hadoop_javac_ordering ${extra} - return - fi + # As soon as HADOOP-11984 gets committed, + # this code should get uncommented + #if [[ ${TEST_PARALLEL} = "true" ]] ; then + # extra="-Pparallel-tests" + # if [[ -n ${TEST_THREADS:-} ]]; then + # extra="${extra} -DtestsThreadCount=${TEST_THREADS}" + # fi + #fi + needflags=true + hadoop_unittest_prereqs ;; *) extra="-DskipTests" ;; esac - hadoop_module_manipulation + if [[ ${needflags} = true ]]; then + flags=$(hadoop_native_flags) + extra="${extra} ${flags}" + fi + + hadoop_module_manipulation ${ordering} + for module in ${HADOOP_MODULES}; do # shellcheck disable=SC2086 personality_enqueue_module ${module} ${extra} diff --git a/dev-support/test-patch.d/checkstyle.sh b/dev-support/test-patch.d/checkstyle.sh index 1fbe88e920ee7..f4bf79dd48ece 100755 --- a/dev-support/test-patch.d/checkstyle.sh +++ b/dev-support/test-patch.d/checkstyle.sh @@ -64,7 +64,7 @@ function checkstyle_runner case ${BUILDTOOL} in maven) - cmd="${MVN} ${MAVEN_ARGS[*]} clean test \ + cmd="${MVN} ${MAVEN_ARGS[*]} \ checkstyle:checkstyle \ -Dcheckstyle.consoleOutput=true \ ${MODULEEXTRAPARAM[${i}]//@@@MODULEFN@@@/${fn}} -Ptest-patch" diff --git a/dev-support/test-patch.d/findbugs.sh b/dev-support/test-patch.d/findbugs.sh index 2fe23394c9ca7..1d7118b85f63a 100755 --- a/dev-support/test-patch.d/findbugs.sh +++ b/dev-support/test-patch.d/findbugs.sh @@ -90,7 +90,7 @@ function findbugs_runner personality_modules "${name}" findbugs case ${BUILDTOOL} in maven) - modules_workers "${name}" findbugs clean test findbugs:findbugs + modules_workers "${name}" findbugs test-compile findbugs:findbugs ;; ant) modules_workers "${name}" findbugs findbugs diff --git a/dev-support/test-patch.sh b/dev-support/test-patch.sh index 25e86495ae327..4dc9cfdca341f 100755 --- a/dev-support/test-patch.sh +++ b/dev-support/test-patch.sh @@ -1175,7 +1175,6 @@ function find_changed_modules #shellcheck disable=SC2086,SC2116 CHANGED_UNFILTERED_MODULES=$(echo ${CHANGED_UNFILTERED_MODULES}) - if [[ ${BUILDTOOL} = maven && ${QETESTMODE} = false ]]; then # Filter out modules without code @@ -2229,7 +2228,7 @@ function precheck_javac personality_modules branch javac case ${BUILDTOOL} in maven) - modules_workers branch javac clean compile + modules_workers branch javac clean test-compile ;; ant) modules_workers branch javac @@ -2530,7 +2529,7 @@ function check_patch_javac case ${BUILDTOOL} in maven) - modules_workers patch javac clean compile + modules_workers patch javac clean test-compile ;; ant) modules_workers patch javac @@ -2842,7 +2841,7 @@ function check_mvninstall fi personality_modules patch mvninstall - modules_workers patch mvninstall install -Dmaven.javadoc.skip=true + modules_workers patch mvninstall clean install -Dmaven.javadoc.skip=true result=$? modules_messages patch mvninstall true if [[ ${result} != 0 ]]; then @@ -3276,7 +3275,8 @@ function postapply ((RESULT = RESULT + retval)) - for routine in check_patch_javadoc check_site + # shellcheck disable=SC2043 + for routine in check_site do verify_patchdir_still_exists yetus_debug "Running ${routine}" @@ -3305,8 +3305,13 @@ function postinstall local plugin verify_patchdir_still_exists - check_mvn_eclipse - (( RESULT = RESULT + $? )) + for routine in check_patch_javadoc check_mvn_eclipse + do + verify_patchdir_still_exists + yetus_debug "Running ${routine}" + ${routine} + (( RESULT = RESULT + $? )) + done for plugin in ${PLUGINS}; do verify_patchdir_still_exists From b8750c6854fae945798eb8530b1804669d863644 Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Wed, 22 Jul 2015 12:55:10 -0700 Subject: [PATCH 029/130] HADOOP-12207. Add support for pylint (Kengo Seki via aw) --- dev-support/test-patch.d/pylint.sh | 186 +++++++++++++++++++++++++++++ 1 file changed, 186 insertions(+) create mode 100755 dev-support/test-patch.d/pylint.sh diff --git a/dev-support/test-patch.d/pylint.sh b/dev-support/test-patch.d/pylint.sh new file mode 100755 index 0000000000000..8542dadc4b5fd --- /dev/null +++ b/dev-support/test-patch.d/pylint.sh @@ -0,0 +1,186 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +add_plugin pylint + +PYLINT_TIMER=0 + +PYLINT=${PYLINT:-$(which pylint 2>/dev/null)} + +function pylint_usage +{ + echo "Pylint specific:" + echo "--pylint= path to pylint executable" +} + +function pylint_parse_args +{ + local i + + for i in "$@"; do + case ${i} in + --pylint=*) + PYLINT=${i#*=} + ;; + esac + done +} + +function pylint_filefilter +{ + local filename=$1 + + if [[ ${filename} =~ \.py$ ]]; then + add_test pylint + fi +} + +function pylint_preapply +{ + local i + + verify_needed_test pylint + if [[ $? == 0 ]]; then + return 0 + fi + + big_console_header "pylint plugin: prepatch" + + if [[ ! -x ${PYLINT} ]]; then + yetus_error "${PYLINT} does not exist." + return 0 + fi + + start_clock + + echo "Running pylint against modified python scripts." + pushd "${BASEDIR}" >/dev/null + for i in ${CHANGED_FILES}; do + if [[ ${i} =~ \.py$ && -f ${i} ]]; then + ${PYLINT} --indent-string=" " --output-format=parseable --reports=n "${i}" 2>/dev/null | + ${AWK} '1> "${PATCH_DIR}/branchpylint-result.txt" + fi + done + popd >/dev/null + # keep track of how much as elapsed for us already + PYLINT_TIMER=$(stop_clock) + return 0 +} + +function pylint_calcdiffs +{ + local orig=$1 + local new=$2 + local diffout=$3 + local tmp=${PATCH_DIR}/pl.$$.${RANDOM} + local count=0 + local j + + # first, pull out just the errors + # shellcheck disable=SC2016 + ${AWK} -F: '{print $NF}' "${orig}" >> "${tmp}.branch" + + # shellcheck disable=SC2016 + ${AWK} -F: '{print $NF}' "${new}" >> "${tmp}.patch" + + # compare the errors, generating a string of line + # numbers. Sorry portability: GNU diff makes this too easy + ${DIFF} --unchanged-line-format="" \ + --old-line-format="" \ + --new-line-format="%dn " \ + "${tmp}.branch" \ + "${tmp}.patch" > "${tmp}.lined" + + # now, pull out those lines of the raw output + # shellcheck disable=SC2013 + for j in $(cat "${tmp}.lined"); do + # shellcheck disable=SC2086 + head -${j} "${new}" | tail -1 >> "${diffout}" + done + + if [[ -f "${diffout}" ]]; then + # shellcheck disable=SC2016 + count=$(${AWK} -F: 'BEGIN {sum=0} 2/dev/null + echo "${count}" +} + +function pylint_postapply +{ + local i + local msg + local numPrepatch + local numPostpatch + local diffPostpatch + + verify_needed_test pylint + if [[ $? == 0 ]]; then + return 0 + fi + + big_console_header "pylint plugin: postpatch" + + if [[ ! -x ${PYLINT} ]]; then + yetus_error "${PYLINT} is not available." + add_vote_table 0 pylint "Pylint was not available." + return 0 + fi + + start_clock + + # add our previous elapsed to our new timer + # by setting the clock back + offset_clock "${PYLINT_TIMER}" + + echo "Running pylint against modified python scripts." + # we re-check this in case one has been added + pushd "${BASEDIR}" >/dev/null + for i in ${CHANGED_FILES}; do + if [[ ${i} =~ \.py$ && -f ${i} ]]; then + ${PYLINT} --indent-string=" " --output-format=parseable --reports=n "${i}" 2>/dev/null | + ${AWK} '1> "${PATCH_DIR}/patchpylint-result.txt" + fi + done + popd >/dev/null + + # shellcheck disable=SC2016 + PYLINT_VERSION=$(${PYLINT} --version 2>/dev/null | ${GREP} pylint | ${AWK} '{print $NF}') + PYLINT_VERSION=${PYLINT_VERSION%,} + msg="v${PYLINT_VERSION}" + add_footer_table pylint "${msg}" + + diffPostpatch=$(pylint_calcdiffs \ + "${PATCH_DIR}/branchpylint-result.txt" \ + "${PATCH_DIR}/patchpylint-result.txt" \ + "${PATCH_DIR}/diffpatchpylint.txt") + + if [[ ${diffPostpatch} -gt 0 ]] ; then + # shellcheck disable=SC2016 + numPrepatch=$(${AWK} -F: 'BEGIN {sum=0} 2 Date: Mon, 27 Jul 2015 10:53:50 -0700 Subject: [PATCH 030/130] HADOOP-12226. CHANGED_MODULES is wrong for ant (addendum patch) (aw) --- dev-support/test-patch.sh | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/dev-support/test-patch.sh b/dev-support/test-patch.sh index 4dc9cfdca341f..1c9be9c794114 100755 --- a/dev-support/test-patch.sh +++ b/dev-support/test-patch.sh @@ -1175,8 +1175,7 @@ function find_changed_modules #shellcheck disable=SC2086,SC2116 CHANGED_UNFILTERED_MODULES=$(echo ${CHANGED_UNFILTERED_MODULES}) - if [[ ${BUILDTOOL} = maven - && ${QETESTMODE} = false ]]; then + if [[ ${BUILDTOOL} = maven ]]; then # Filter out modules without code for module in ${builddirs}; do ${GREP} "pom" "${module}/pom.xml" > /dev/null @@ -1184,8 +1183,6 @@ function find_changed_modules buildmods="${buildmods} ${module}" fi done - elif [[ ${QETESTMODE} = true ]]; then - buildmods=${builddirs} fi #shellcheck disable=SC2086,SC2034 From 4d4f288d3037d5a7a2b570ca87a685e4797cc29f Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Mon, 27 Jul 2015 11:05:48 -0700 Subject: [PATCH 031/130] HADOOP-12265. Pylint should be installed in test-patch docker environment (Kengo Seki via aw) --- dev-support/docker/Dockerfile | 2 +- dev-support/docs/precommit-basic.md | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/dev-support/docker/Dockerfile b/dev-support/docker/Dockerfile index f761f8b4404b2..862819f8f44ff 100644 --- a/dev-support/docker/Dockerfile +++ b/dev-support/docker/Dockerfile @@ -44,7 +44,7 @@ RUN apt-get update && apt-get install --no-install-recommends -y \ libjansson-dev \ fuse libfuse-dev \ libcurl4-openssl-dev \ - python python2.7 + python python2.7 pylint # Install Forrest RUN mkdir -p /usr/local/apache-forrest ; \ diff --git a/dev-support/docs/precommit-basic.md b/dev-support/docs/precommit-basic.md index ee2e063c17c07..a830cdb9c2f46 100644 --- a/dev-support/docs/precommit-basic.md +++ b/dev-support/docs/precommit-basic.md @@ -37,6 +37,7 @@ test-patch has the following requirements: * bash v3.2 or higher * findbugs 3.x installed * shellcheck installed +* pylint installed * GNU diff * GNU patch * POSIX awk From 7c92f0fe9176c55f7c16bc20211c854cdbfe0141 Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Tue, 28 Jul 2015 10:43:22 -0700 Subject: [PATCH 032/130] HADOOP-12273. releasedocmaker.py fails with stacktrace if --project option is not specified (Kengo Seki via aw) --- dev-support/releasedocmaker.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/dev-support/releasedocmaker.py b/dev-support/releasedocmaker.py index d2e5dda9647cb..c59ae9934f806 100755 --- a/dev-support/releasedocmaker.py +++ b/dev-support/releasedocmaker.py @@ -400,12 +400,6 @@ def main(): (options, args) = parser.parse_args() if (options.versions is None): - options.versions = [] - - if (len(args) > 2): - options.versions.append(args[2]) - - if (len(options.versions) <= 0): parser.error("At least one version needs to be supplied") proxy = urllib2.ProxyHandler() @@ -413,6 +407,8 @@ def main(): urllib2.install_opener(opener) projects = options.projects + if projects is None: + parser.error("At least one project needs to be supplied") if (options.range is True): versions = [ Version(v) for v in GetVersions(options.versions, projects).getlist() ] From 03335bb4d5a047569519ee6775e6edd0c939cf03 Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Tue, 28 Jul 2015 10:44:37 -0700 Subject: [PATCH 033/130] HADOOP-12254. test-patch.sh should run findbugs if only findbugs-exclude.xml has changed (Kengo Seki via aw) --- dev-support/test-patch.d/findbugs.sh | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/dev-support/test-patch.d/findbugs.sh b/dev-support/test-patch.d/findbugs.sh index 1d7118b85f63a..4fa54288bf6bd 100755 --- a/dev-support/test-patch.d/findbugs.sh +++ b/dev-support/test-patch.d/findbugs.sh @@ -20,13 +20,14 @@ FINDBUGS_WARNINGS_FAIL_PRECHECK=false add_plugin findbugs -function findbugs_file_filter +function findbugs_filefilter { local filename=$1 if [[ ${BUILDTOOL} == maven || ${BUILDTOOL} == ant ]]; then - if [[ ${filename} =~ \.java$ ]]; then + if [[ ${filename} =~ \.java$ + || ${filename} =~ (^|/)findbugs-exclude.xml$ ]]; then add_test findbugs fi fi @@ -139,7 +140,7 @@ function findbugs_runner savestop=$(stop_clock) MODULE_STATUS_TIMER[${i}]=${savestop} module_status ${i} -1 "" "${name}/${module} cannot run setBugDatabaseInfo from findbugs" - ((retval = retval + 1)) + ((result=result+1)) ((i=i+1)) continue fi @@ -151,7 +152,7 @@ function findbugs_runner savestop=$(stop_clock) MODULE_STATUS_TIMER[${i}]=${savestop} module_status ${i} -1 "" "${name}/${module} cannot run convertXmlToText from findbugs" - ((result = result + 1)) + ((result=result+1)) fi if [[ -z ${FINDBUGS_VERSION} @@ -182,7 +183,7 @@ function findbugs_preapply local i=0 local warnings_file local module_findbugs_warnings - local results=0 + local result=0 big_console_header "Pre-patch findbugs detection" @@ -199,7 +200,7 @@ function findbugs_preapply fi findbugs_runner branch - results=$? + result=$? if [[ "${FINDBUGS_WARNINGS_FAIL_PRECHECK}" == "true" ]]; then until [[ $i -eq ${#MODULE[@]} ]]; do @@ -222,7 +223,7 @@ function findbugs_preapply if [[ ${module_findbugs_warnings} -gt 0 ]] ; then module_status ${i} -1 "branch-findbugs-${fn}.html" "${module} in ${PATCH_BRANCH} cannot run convertXmlToText from findbugs" - ((results=results+1)) + ((result=result+1)) fi savestop=$(stop_clock) MODULE_STATUS_TIMER[${i}]=${savestop} @@ -231,7 +232,7 @@ function findbugs_preapply modules_messages branch findbugs true fi - if [[ ${results} != 0 ]]; then + if [[ ${result} != 0 ]]; then return 1 fi return 0 @@ -256,7 +257,7 @@ function findbugs_postinstall local firstpart local secondpart local i=0 - local results=0 + local result=0 local savestop big_console_header "Patch findbugs detection" From 42d0c0fac1c2b9d1822193762512e86a442ee08e Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Wed, 29 Jul 2015 18:27:21 -0700 Subject: [PATCH 034/130] HADOOP-12204. releasedocmaker should pass pylint (Kengo Seki via aw) --- dev-support/releasedocmaker.py | 1015 ++++++++++++++++---------------- 1 file changed, 511 insertions(+), 504 deletions(-) diff --git a/dev-support/releasedocmaker.py b/dev-support/releasedocmaker.py index c59ae9934f806..37bd58aa69869 100755 --- a/dev-support/releasedocmaker.py +++ b/dev-support/releasedocmaker.py @@ -19,21 +19,20 @@ from glob import glob from optparse import OptionParser from time import gmtime, strftime -import pprint import os import re import sys import urllib import urllib2 try: - import json + import json except ImportError: - import simplejson as json + import simplejson as json -releaseVersion={} -namePattern = re.compile(r' \([0-9]+\)') +RELEASE_VERSION = {} +NAME_PATTERN = re.compile(r' \([0-9]+\)') -asflicense=''' +ASF_LICENSE = ''' ''' -def clean(str): - return tableclean(re.sub(namePattern, "", str)) +def clean(_str): + return tableclean(re.sub(NAME_PATTERN, "", _str)) -def formatComponents(str): - str = re.sub(namePattern, '', str).replace("'", "") - if str != "": - ret = str - else: - # some markdown parsers don't like empty tables - ret = "." - return clean(ret) +def format_components(_str): + _str = re.sub(NAME_PATTERN, '', _str).replace("'", "") + if _str != "": + ret = _str + else: + # some markdown parsers don't like empty tables + ret = "." + return clean(ret) # convert to utf-8 # protect some known md metachars # or chars that screw up doxia -def tableclean(str): - str=str.encode('utf-8') - str=str.replace("_","\_") - str=str.replace("\r","") - str=str.rstrip() - return str +def tableclean(_str): + _str = _str.encode('utf-8') + _str = _str.replace("_", r"\_") + _str = _str.replace("\r", "") + _str = _str.rstrip() + return _str # same thing as tableclean, # except table metachars are also # escaped as well as more # things we don't want doxia to # screw up -def notableclean(str): - str=tableclean(str) - str=str.replace("|","\|") - str=str.replace("<","\<") - str=str.replace(">","\>") - str=str.replace("*","\*") - str=str.rstrip() - return str +def notableclean(_str): + _str = tableclean(_str) + _str = _str.replace("|", r"\|") + _str = _str.replace("<", r"\<") + _str = _str.replace(">", r"\>") + _str = _str.replace("*", r"\*") + _str = _str.rstrip() + return _str # clean output dir -def cleanOutputDir(dir): - files = os.listdir(dir) +def clean_output_dir(directory): + files = os.listdir(directory) for name in files: - os.remove(os.path.join(dir,name)) - os.rmdir(dir) + os.remove(os.path.join(directory, name)) + os.rmdir(directory) def mstr(obj): - if (obj is None): - return "" - return unicode(obj) - -def buildindex(title,license): - versions=reversed(sorted(glob("[0-9]*.[0-9]*.[0-9]*"))) - with open("index.md","w") as indexfile: - if license is True: - indexfile.write(asflicense) - for v in versions: - indexfile.write("* %s v%s\n" % (title,v)) - for k in ("Changes","Release Notes"): - indexfile.write(" * %s (%s/%s.%s.html)\n" \ - % (k,v,k.upper().replace(" ",""),v)) - indexfile.close() - -class GetVersions: - """ yo """ - def __init__(self,versions, projects): - versions = versions - projects = projects - self.newversions = [] - pp = pprint.PrettyPrinter(indent=4) - at=0 - end=1 - count=100 - versions.sort() - print "Looking for %s through %s"%(versions[0],versions[-1]) - for p in projects: - resp = urllib2.urlopen("https://issues.apache.org/jira/rest/api/2/project/%s/versions"%p) - data = json.loads(resp.read()) - for d in data: - if d['name'][0].isdigit and versions[0] <= d['name'] and d['name'] <= versions[-1]: - print "Adding %s to the list" % d['name'] - self.newversions.append(d['name']) - newlist=list(set(self.newversions)) - self.newversions=newlist - - def getlist(self): - pp = pprint.PrettyPrinter(indent=4) - return(self.newversions) - -class Version: - """Represents a version number""" - def __init__(self, data): - self.mod = False - self.data = data - found = re.match('^((\d+)(\.\d+)*).*$', data) - if (found): - self.parts = [ int(p) for p in found.group(1).split('.') ] - else: - self.parts = [] - # backfill version with zeroes if missing parts - self.parts.extend((0,) * (3 - len(self.parts))) - - def __str__(self): - if (self.mod): - return '.'.join([ str(p) for p in self.parts ]) - return self.data - - def __cmp__(self, other): - return cmp(self.parts, other.parts) - -class Jira: - """A single JIRA""" - - def __init__(self, data, parent): - self.key = data['key'] - self.fields = data['fields'] - self.parent = parent - self.notes = None - self.incompat = None - self.reviewed = None - - def getId(self): - return mstr(self.key) - - def getDescription(self): - return mstr(self.fields['description']) - - def getReleaseNote(self): - if (self.notes is None): - field = self.parent.fieldIdMap['Release Note'] - if (self.fields.has_key(field)): - self.notes=mstr(self.fields[field]) - else: - self.notes=self.getDescription() - return self.notes - - def getPriority(self): - ret = "" - pri = self.fields['priority'] - if(pri is not None): - ret = pri['name'] - return mstr(ret) - - def getAssignee(self): - ret = "" - mid = self.fields['assignee'] - if(mid is not None): - ret = mid['displayName'] - return mstr(ret) - - def getComponents(self): - if (len(self.fields['components'])>0): - return ", ".join([ comp['name'] for comp in self.fields['components'] ]) - else: - return "" - - def getSummary(self): - return self.fields['summary'] - - def getType(self): - ret = "" - mid = self.fields['issuetype'] - if(mid is not None): - ret = mid['name'] - return mstr(ret) - - def getReporter(self): - ret = "" - mid = self.fields['reporter'] - if(mid is not None): - ret = mid['displayName'] - return mstr(ret) - - def getProject(self): - ret = "" - mid = self.fields['project'] - if(mid is not None): - ret = mid['key'] - return mstr(ret) - - def __cmp__(self,other): - selfsplit=self.getId().split('-') - othersplit=other.getId().split('-') - v1=cmp(selfsplit[0],othersplit[0]) - if (v1!=0): - return v1 - else: - if selfsplit[1] < othersplit[1]: + if obj is None: + return "" + return unicode(obj) + +def buildindex(title, asf_license): + versions = reversed(sorted(glob("[0-9]*.[0-9]*.[0-9]*"))) + with open("index.md", "w") as indexfile: + if asf_license is True: + indexfile.write(ASF_LICENSE) + for version in versions: + indexfile.write("* %s v%s\n" % (title, version)) + for k in ("Changes", "Release Notes"): + indexfile.write(" * %s (%s/%s.%s.html)\n" \ + % (k, version, k.upper().replace(" ", ""), version)) + indexfile.close() + +class GetVersions(object): + """ yo """ + def __init__(self, versions, projects): + versions = versions + projects = projects + self.newversions = [] + versions.sort() + print "Looking for %s through %s"%(versions[0], versions[-1]) + for project in projects: + url = "https://issues.apache.org/jira/rest/api/2/project/%s/versions" % project + resp = urllib2.urlopen(url) + datum = json.loads(resp.read()) + for data in datum: + name = data['name'] + if name[0].isdigit and versions[0] <= name and name <= versions[-1]: + print "Adding %s to the list" % name + self.newversions.append(name) + newlist = list(set(self.newversions)) + self.newversions = newlist + + def getlist(self): + return self.newversions + +class Version(object): + """Represents a version number""" + def __init__(self, data): + self.mod = False + self.data = data + found = re.match(r'^((\d+)(\.\d+)*).*$', data) + if found: + self.parts = [int(p) for p in found.group(1).split('.')] + else: + self.parts = [] + # backfill version with zeroes if missing parts + self.parts.extend((0,) * (3 - len(self.parts))) + + def __str__(self): + if self.mod: + return '.'.join([str(p) for p in self.parts]) + return self.data + + def __cmp__(self, other): + return cmp(self.parts, other.parts) + +class Jira(object): + """A single JIRA""" + + def __init__(self, data, parent): + self.key = data['key'] + self.fields = data['fields'] + self.parent = parent + self.notes = None + self.incompat = None + self.reviewed = None + + def get_id(self): + return mstr(self.key) + + def get_description(self): + return mstr(self.fields['description']) + + def get_release_note(self): + if self.notes is None: + field = self.parent.field_id_map['Release Note'] + if self.fields.has_key(field): + self.notes = mstr(self.fields[field]) + else: + self.notes = self.get_description() + return self.notes + + def get_priority(self): + ret = "" + pri = self.fields['priority'] + if pri is not None: + ret = pri['name'] + return mstr(ret) + + def get_assignee(self): + ret = "" + mid = self.fields['assignee'] + if mid is not None: + ret = mid['displayName'] + return mstr(ret) + + def get_components(self): + if len(self.fields['components']) > 0: + return ", ".join([comp['name'] for comp in self.fields['components']]) + else: + return "" + + def get_summary(self): + return self.fields['summary'] + + def get_type(self): + ret = "" + mid = self.fields['issuetype'] + if mid is not None: + ret = mid['name'] + return mstr(ret) + + def get_reporter(self): + ret = "" + mid = self.fields['reporter'] + if mid is not None: + ret = mid['displayName'] + return mstr(ret) + + def get_project(self): + ret = "" + mid = self.fields['project'] + if mid is not None: + ret = mid['key'] + return mstr(ret) + + def __cmp__(self, other): + selfsplit = self.get_id().split('-') + othersplit = other.get_id().split('-') + result = cmp(selfsplit[0], othersplit[0]) + if result != 0: + return result + else: + if selfsplit[1] < othersplit[1]: + return True + elif selfsplit[1] > othersplit[1]: + return False + return False + + def get_incompatible_change(self): + if self.incompat is None: + field = self.parent.field_id_map['Hadoop Flags'] + self.reviewed = False + self.incompat = False + if self.fields.has_key(field): + if self.fields[field]: + for flag in self.fields[field]: + if flag['value'] == "Incompatible change": + self.incompat = True + if flag['value'] == "Reviewed": + self.reviewed = True + return self.incompat + + def check_missing_component(self): + if len(self.fields['components']) > 0: + return False return True - elif selfsplit[1] > othersplit[1]: + + def check_missing_assignee(self): + if self.fields['assignee'] is not None: + return False + return True + + def check_version_string(self): + field = self.parent.field_id_map['Fix Version/s'] + for ver in self.fields[field]: + found = re.match(r'^((\d+)(\.\d+)*).*$|^(\w+\-\d+)$', ver['name']) + if not found: + return True return False - return False - - def getIncompatibleChange(self): - if (self.incompat is None): - field = self.parent.fieldIdMap['Hadoop Flags'] - self.reviewed=False - self.incompat=False - if (self.fields.has_key(field)): - if self.fields[field]: - for hf in self.fields[field]: - if hf['value'] == "Incompatible change": - self.incompat=True - if hf['value'] == "Reviewed": - self.reviewed=True - return self.incompat - - def checkMissingComponent(self): - if (len(self.fields['components'])>0): - return False - return True - - def checkMissingAssignee(self): - if (self.fields['assignee'] is not None): - return False - return True - - def checkVersionString(self): - field = self.parent.fieldIdMap['Fix Version/s'] - for h in self.fields[field]: - found = re.match('^((\d+)(\.\d+)*).*$|^(\w+\-\d+)$', h['name']) - if not found: - return True - return False - - def getReleaseDate(self,version): - for j in range(len(self.fields['fixVersions'])): - if self.fields['fixVersions'][j]==version: - return(self.fields['fixVersions'][j]['releaseDate']) - return None - -class JiraIter: - """An Iterator of JIRAs""" - - def __init__(self, version, projects): - self.version = version - self.projects = projects - v=str(version).replace("-SNAPSHOT","") - - resp = urllib2.urlopen("https://issues.apache.org/jira/rest/api/2/field") - data = json.loads(resp.read()) - - self.fieldIdMap = {} - for part in data: - self.fieldIdMap[part['name']] = part['id'] - - self.jiras = [] - at=0 - end=1 - count=100 - while (at < end): - params = urllib.urlencode({'jql': "project in ('"+"' , '".join(projects)+"') and fixVersion in ('"+v+"') and resolution = Fixed", 'startAt':at, 'maxResults':count}) - resp = urllib2.urlopen("https://issues.apache.org/jira/rest/api/2/search?%s"%params) - data = json.loads(resp.read()) - if (data.has_key('errorMessages')): - raise Exception(data['errorMessages']) - at = data['startAt'] + data['maxResults'] - end = data['total'] - self.jiras.extend(data['issues']) - - needaversion=False - if v not in releaseVersion: - needaversion=True - - if needaversion is True: - for i in range(len(data['issues'])): - for j in range(len(data['issues'][i]['fields']['fixVersions'])): - if 'releaseDate' in data['issues'][i]['fields']['fixVersions'][j]: - releaseVersion[data['issues'][i]['fields']['fixVersions'][j]['name']]=\ - data['issues'][i]['fields']['fixVersions'][j]['releaseDate'] - - self.iter = self.jiras.__iter__() - - def __iter__(self): - return self - - def next(self): - data = self.iter.next() - j = Jira(data, self) - return j - -class Outputs: - """Several different files to output to at the same time""" - - def __init__(self, base_file_name, file_name_pattern, keys, params={}): - self.params = params - self.base = open(base_file_name%params, 'w') - self.others = {} - for key in keys: - both = dict(params) - both['key'] = key - self.others[key] = open(file_name_pattern%both, 'w') - - def writeAll(self, pattern): - both = dict(self.params) - both['key'] = '' - self.base.write(pattern%both) - for key in self.others.keys(): - both = dict(self.params) - both['key'] = key - self.others[key].write(pattern%both) - - def writeKeyRaw(self, key, str): - self.base.write(str) - if (self.others.has_key(key)): - self.others[key].write(str) - - def close(self): - self.base.close() - for fd in self.others.values(): - fd.close() - - def writeList(self, mylist): - for jira in sorted(mylist): - line = '| [%s](https://issues.apache.org/jira/browse/%s) | %s | %s | %s | %s | %s |\n' \ - % (notableclean(jira.getId()), notableclean(jira.getId()), - notableclean(jira.getSummary()), - notableclean(jira.getPriority()), - formatComponents(jira.getComponents()), - notableclean(jira.getReporter()), - notableclean(jira.getAssignee())) - self.writeKeyRaw(jira.getProject(), line) + + def get_release_date(self, version): + fix_versions = self.fields['fixVersions'] + for j in range(len(fix_versions)): + if fix_versions[j] == version: + return fix_versions[j]['releaseDate'] + return None + +class JiraIter(object): + """An Iterator of JIRAs""" + + def __init__(self, version, projects): + self.version = version + self.projects = projects + ver = str(version).replace("-SNAPSHOT", "") + + resp = urllib2.urlopen("https://issues.apache.org/jira/rest/api/2/field") + data = json.loads(resp.read()) + + self.field_id_map = {} + for part in data: + self.field_id_map[part['name']] = part['id'] + + self.jiras = [] + pos = 0 + end = 1 + count = 100 + while pos < end: + pjs = "','".join(projects) + jql = "project in ('%s') and fixVersion in ('%s') and resolution = Fixed" % (pjs, ver) + params = urllib.urlencode({'jql': jql, 'startAt':pos, 'maxResults':count}) + resp = urllib2.urlopen("https://issues.apache.org/jira/rest/api/2/search?%s" % params) + data = json.loads(resp.read()) + if data.has_key('error_messages'): + raise Exception(data['error_messages']) + pos = data['startAt'] + data['maxResults'] + end = data['total'] + self.jiras.extend(data['issues']) + + needaversion = False + if ver not in RELEASE_VERSION: + needaversion = True + + if needaversion is True: + issues = data['issues'] + for i in range(len(issues)): + fix_versions = issues[i]['fields']['fixVersions'] + for j in range(len(fix_versions)): + fields = fix_versions[j] + if 'releaseDate' in fields: + RELEASE_VERSION[fields['name']] = fields['releaseDate'] + + self.iter = self.jiras.__iter__() + + def __iter__(self): + return self + + def next(self): + data = self.iter.next() + j = Jira(data, self) + return j + +class Outputs(object): + """Several different files to output to at the same time""" + + def __init__(self, base_file_name, file_name_pattern, keys, params=None): + if params is None: + params = {} + self.params = params + self.base = open(base_file_name%params, 'w') + self.others = {} + for key in keys: + both = dict(params) + both['key'] = key + self.others[key] = open(file_name_pattern%both, 'w') + + def write_all(self, pattern): + both = dict(self.params) + both['key'] = '' + self.base.write(pattern%both) + for key in self.others.keys(): + both = dict(self.params) + both['key'] = key + self.others[key].write(pattern%both) + + def write_key_raw(self, key, _str): + self.base.write(_str) + if self.others.has_key(key): + self.others[key].write(_str) + + def close(self): + self.base.close() + for value in self.others.values(): + value.close() + + def write_list(self, mylist): + for jira in sorted(mylist): + line = '| [%s](https://issues.apache.org/jira/browse/%s) | %s | %s | %s | %s | %s |\n' + line = line % (notableclean(jira.get_id()), + notableclean(jira.get_id()), + notableclean(jira.get_summary()), + notableclean(jira.get_priority()), + format_components(jira.get_components()), + notableclean(jira.get_reporter()), + notableclean(jira.get_assignee())) + self.write_key_raw(jira.get_project(), line) def main(): - parser = OptionParser(usage="usage: %prog --project PROJECT [--project PROJECT] --version VERSION [--version VERSION2 ...]", - epilog= - "Markdown-formatted CHANGES and RELEASENOTES files will be stored in a directory" - " named after the highest version provided.") - parser.add_option("-i","--index", dest="index", action="store_true", - default=False, help="build an index file") - parser.add_option("-l","--license", dest="license", action="store_false", - default=True, help="Add an ASF license") - parser.add_option("-n","--lint", dest="lint", action="store_true", - help="use lint flag to exit on failures") - parser.add_option("-p", "--project", dest="projects", - action="append", type="string", - help="projects in JIRA to include in releasenotes", metavar="PROJECT") - parser.add_option("-r", "--range", dest="range", action="store_true", - default=False, help="Given versions are a range") - parser.add_option("-t", "--projecttitle", dest="title", - type="string", - help="Title to use for the project (default is Apache PROJECT)") - parser.add_option("-u","--usetoday", dest="usetoday", action="store_true", - default=False, help="use current date for unreleased versions") - parser.add_option("-v", "--version", dest="versions", - action="append", type="string", - help="versions in JIRA to include in releasenotes", metavar="VERSION") - (options, args) = parser.parse_args() - - if (options.versions is None): - parser.error("At least one version needs to be supplied") - - proxy = urllib2.ProxyHandler() - opener = urllib2.build_opener(proxy) - urllib2.install_opener(opener) - - projects = options.projects - if projects is None: - parser.error("At least one project needs to be supplied") - - if (options.range is True): - versions = [ Version(v) for v in GetVersions(options.versions, projects).getlist() ] - else: - versions = [ Version(v) for v in options.versions ] - versions.sort(); - - if (options.title is None): - title=projects[0] - else: - title=options.title - - haderrors=False - - for v in versions: - vstr=str(v) - jlist = JiraIter(vstr,projects) - - if vstr in releaseVersion: - reldate=releaseVersion[vstr] - elif options.usetoday: - reldate=strftime("%Y-%m-%d", gmtime()) + usage = "usage: %prog --project PROJECT [--project PROJECT] --version VERSION [--version VERSION2 ...]" + parser = OptionParser(usage=usage, + epilog="Markdown-formatted CHANGES and RELEASENOTES files will be stored" + "in a directory named after the highest version provided.") + parser.add_option("-i", "--index", dest="index", action="store_true", + default=False, help="build an index file") + parser.add_option("-l", "--license", dest="license", action="store_false", + default=True, help="Add an ASF license") + parser.add_option("-n", "--lint", dest="lint", action="store_true", + help="use lint flag to exit on failures") + parser.add_option("-p", "--project", dest="projects", + action="append", type="string", + help="projects in JIRA to include in releasenotes", metavar="PROJECT") + parser.add_option("-r", "--range", dest="range", action="store_true", + default=False, help="Given versions are a range") + parser.add_option("-t", "--projecttitle", dest="title", type="string", + help="Title to use for the project (default is Apache PROJECT)") + parser.add_option("-u", "--usetoday", dest="usetoday", action="store_true", + default=False, help="use current date for unreleased versions") + parser.add_option("-v", "--version", dest="versions", action="append", type="string", + help="versions in JIRA to include in releasenotes", metavar="VERSION") + (options, _) = parser.parse_args() + + if options.versions is None: + parser.error("At least one version needs to be supplied") + + proxy = urllib2.ProxyHandler() + opener = urllib2.build_opener(proxy) + urllib2.install_opener(opener) + + projects = options.projects + if projects is None: + parser.error("At least one project needs to be supplied") + + if options.range is True: + versions = [Version(v) for v in GetVersions(options.versions, projects).getlist()] + else: + versions = [Version(v) for v in options.versions] + versions.sort() + + if options.title is None: + title = projects[0] else: - reldate="Unreleased" - - if not os.path.exists(vstr): - os.mkdir(vstr) - - reloutputs = Outputs("%(ver)s/RELEASENOTES.%(ver)s.md", - "%(ver)s/RELEASENOTES.%(key)s.%(ver)s.md", - [], {"ver":v, "date":reldate, "title":title}) - choutputs = Outputs("%(ver)s/CHANGES.%(ver)s.md", - "%(ver)s/CHANGES.%(key)s.%(ver)s.md", - [], {"ver":v, "date":reldate, "title":title}) - - if (options.license is True): - reloutputs.writeAll(asflicense) - choutputs.writeAll(asflicense) - - relhead = '# %(title)s %(key)s %(ver)s Release Notes\n\n' \ - 'These release notes cover new developer and user-facing incompatibilities, features, and major improvements.\n\n' - chhead = '# %(title)s Changelog\n\n' \ - '## Release %(ver)s - %(date)s\n'\ - '\n' - - reloutputs.writeAll(relhead) - choutputs.writeAll(chhead) - errorCount=0 - warningCount=0 - lintMessage="" - incompatlist=[] - buglist=[] - improvementlist=[] - newfeaturelist=[] - subtasklist=[] - tasklist=[] - testlist=[] - otherlist=[] - - for jira in sorted(jlist): - if jira.getIncompatibleChange(): - incompatlist.append(jira) - elif jira.getType() == "Bug": - buglist.append(jira) - elif jira.getType() == "Improvement": - improvementlist.append(jira) - elif jira.getType() == "New Feature": - newfeaturelist.append(jira) - elif jira.getType() == "Sub-task": - subtasklist.append(jira) - elif jira.getType() == "Task": - tasklist.append(jira) - elif jira.getType() == "Test": - testlist.append(jira) - else: - otherlist.append(jira) - - line = '* [%s](https://issues.apache.org/jira/browse/%s) | *%s* | **%s**\n' \ - % (notableclean(jira.getId()), notableclean(jira.getId()), notableclean(jira.getPriority()), - notableclean(jira.getSummary())) - - if (jira.getIncompatibleChange()) and (len(jira.getReleaseNote())==0): - warningCount+=1 - reloutputs.writeKeyRaw(jira.getProject(),"\n---\n\n") - reloutputs.writeKeyRaw(jira.getProject(), line) - line ='\n**WARNING: No release note provided for this incompatible change.**\n\n' - lintMessage += "\nWARNING: incompatible change %s lacks release notes." % (notableclean(jira.getId())) - reloutputs.writeKeyRaw(jira.getProject(), line) - - if jira.checkVersionString(): - warningCount+=1 - lintMessage += "\nWARNING: Version string problem for %s " % jira.getId() - - if (jira.checkMissingComponent() or jira.checkMissingAssignee()): - errorCount+=1 - errorMessage=[] - jira.checkMissingComponent() and errorMessage.append("component") - jira.checkMissingAssignee() and errorMessage.append("assignee") - lintMessage += "\nERROR: missing %s for %s " % (" and ".join(errorMessage) , jira.getId()) - - if (len(jira.getReleaseNote())>0): - reloutputs.writeKeyRaw(jira.getProject(),"\n---\n\n") - reloutputs.writeKeyRaw(jira.getProject(), line) - line ='\n%s\n\n' % (tableclean(jira.getReleaseNote())) - reloutputs.writeKeyRaw(jira.getProject(), line) - - if (options.lint is True): - print lintMessage - print "=======================================" - print "%s: Error:%d, Warning:%d \n" % (vstr, errorCount, warningCount) - if (errorCount>0): - haderrors=True - cleanOutputDir(vstr) - continue - - reloutputs.writeAll("\n\n") - reloutputs.close() - - choutputs.writeAll("### INCOMPATIBLE CHANGES:\n\n") - choutputs.writeAll("| JIRA | Summary | Priority | Component | Reporter | Contributor |\n") - choutputs.writeAll("|:---- |:---- | :--- |:---- |:---- |:---- |\n") - choutputs.writeList(incompatlist) - - choutputs.writeAll("\n\n### NEW FEATURES:\n\n") - choutputs.writeAll("| JIRA | Summary | Priority | Component | Reporter | Contributor |\n") - choutputs.writeAll("|:---- |:---- | :--- |:---- |:---- |:---- |\n") - choutputs.writeList(newfeaturelist) - - choutputs.writeAll("\n\n### IMPROVEMENTS:\n\n") - choutputs.writeAll("| JIRA | Summary | Priority | Component | Reporter | Contributor |\n") - choutputs.writeAll("|:---- |:---- | :--- |:---- |:---- |:---- |\n") - choutputs.writeList(improvementlist) - - choutputs.writeAll("\n\n### BUG FIXES:\n\n") - choutputs.writeAll("| JIRA | Summary | Priority | Component | Reporter | Contributor |\n") - choutputs.writeAll("|:---- |:---- | :--- |:---- |:---- |:---- |\n") - choutputs.writeList(buglist) - - choutputs.writeAll("\n\n### TESTS:\n\n") - choutputs.writeAll("| JIRA | Summary | Priority | Component | Reporter | Contributor |\n") - choutputs.writeAll("|:---- |:---- | :--- |:---- |:---- |:---- |\n") - choutputs.writeList(testlist) - - choutputs.writeAll("\n\n### SUB-TASKS:\n\n") - choutputs.writeAll("| JIRA | Summary | Priority | Component | Reporter | Contributor |\n") - choutputs.writeAll("|:---- |:---- | :--- |:---- |:---- |:---- |\n") - choutputs.writeList(subtasklist) - - choutputs.writeAll("\n\n### OTHER:\n\n") - choutputs.writeAll("| JIRA | Summary | Priority | Component | Reporter | Contributor |\n") - choutputs.writeAll("|:---- |:---- | :--- |:---- |:---- |:---- |\n") - choutputs.writeList(otherlist) - choutputs.writeList(tasklist) - - choutputs.writeAll("\n\n") - choutputs.close() - - if options.index: - buildindex(title,options.license) - - if haderrors is True: - sys.exit(1) + title = options.title + + haderrors = False + + for version in versions: + vstr = str(version) + jlist = JiraIter(vstr, projects) + + if vstr in RELEASE_VERSION: + reldate = RELEASE_VERSION[vstr] + elif options.usetoday: + reldate = strftime("%Y-%m-%d", gmtime()) + else: + reldate = "Unreleased" + + if not os.path.exists(vstr): + os.mkdir(vstr) + + reloutputs = Outputs("%(ver)s/RELEASENOTES.%(ver)s.md", + "%(ver)s/RELEASENOTES.%(key)s.%(ver)s.md", + [], {"ver":version, "date":reldate, "title":title}) + choutputs = Outputs("%(ver)s/CHANGES.%(ver)s.md", + "%(ver)s/CHANGES.%(key)s.%(ver)s.md", + [], {"ver":version, "date":reldate, "title":title}) + + if options.license is True: + reloutputs.write_all(ASF_LICENSE) + choutputs.write_all(ASF_LICENSE) + + relhead = '# %(title)s %(key)s %(ver)s Release Notes\n\n' \ + 'These release notes cover new developer and user-facing ' \ + 'incompatibilities, features, and major improvements.\n\n' + chhead = '# %(title)s Changelog\n\n' \ + '## Release %(ver)s - %(date)s\n'\ + '\n' + + reloutputs.write_all(relhead) + choutputs.write_all(chhead) + error_count = 0 + warning_count = 0 + lint_message = "" + incompatlist = [] + buglist = [] + improvementlist = [] + newfeaturelist = [] + subtasklist = [] + tasklist = [] + testlist = [] + otherlist = [] + + for jira in sorted(jlist): + if jira.get_incompatible_change(): + incompatlist.append(jira) + elif jira.get_type() == "Bug": + buglist.append(jira) + elif jira.get_type() == "Improvement": + improvementlist.append(jira) + elif jira.get_type() == "New Feature": + newfeaturelist.append(jira) + elif jira.get_type() == "Sub-task": + subtasklist.append(jira) + elif jira.get_type() == "Task": + tasklist.append(jira) + elif jira.get_type() == "Test": + testlist.append(jira) + else: + otherlist.append(jira) + + line = '* [%s](https://issues.apache.org/jira/browse/%s) | *%s* | **%s**\n' \ + % (notableclean(jira.get_id()), notableclean(jira.get_id()), + notableclean(jira.get_priority()), notableclean(jira.get_summary())) + + if jira.get_incompatible_change() and len(jira.get_release_note()) == 0: + warning_count += 1 + reloutputs.write_key_raw(jira.get_project(), "\n---\n\n") + reloutputs.write_key_raw(jira.get_project(), line) + line = '\n**WARNING: No release note provided for this incompatible change.**\n\n' + lint_message += "\nWARNING: incompatible change %s lacks release notes." % \ + (notableclean(jira.get_id())) + reloutputs.write_key_raw(jira.get_project(), line) + + if jira.check_version_string(): + warning_count += 1 + lint_message += "\nWARNING: Version string problem for %s " % jira.get_id() + + if jira.check_missing_component() or jira.check_missing_assignee(): + error_count += 1 + error_message = [] + if jira.check_missing_component(): + error_message.append("component") + if jira.check_missing_assignee(): + error_message.append("assignee") + lint_message += "\nERROR: missing %s for %s " \ + % (" and ".join(error_message), jira.get_id()) + + if len(jira.get_release_note()) > 0: + reloutputs.write_key_raw(jira.get_project(), "\n---\n\n") + reloutputs.write_key_raw(jira.get_project(), line) + line = '\n%s\n\n' % (tableclean(jira.get_release_note())) + reloutputs.write_key_raw(jira.get_project(), line) + + if options.lint is True: + print lint_message + print "=======================================" + print "%s: Error:%d, Warning:%d \n" % (vstr, error_count, warning_count) + if error_count > 0: + haderrors = True + clean_output_dir(vstr) + continue + + reloutputs.write_all("\n\n") + reloutputs.close() + + choutputs.write_all("### INCOMPATIBLE CHANGES:\n\n") + choutputs.write_all("| JIRA | Summary | Priority | Component | Reporter | Contributor |\n") + choutputs.write_all("|:---- |:---- | :--- |:---- |:---- |:---- |\n") + choutputs.write_list(incompatlist) + + choutputs.write_all("\n\n### NEW FEATURES:\n\n") + choutputs.write_all("| JIRA | Summary | Priority | Component | Reporter | Contributor |\n") + choutputs.write_all("|:---- |:---- | :--- |:---- |:---- |:---- |\n") + choutputs.write_list(newfeaturelist) + + choutputs.write_all("\n\n### IMPROVEMENTS:\n\n") + choutputs.write_all("| JIRA | Summary | Priority | Component | Reporter | Contributor |\n") + choutputs.write_all("|:---- |:---- | :--- |:---- |:---- |:---- |\n") + choutputs.write_list(improvementlist) + + choutputs.write_all("\n\n### BUG FIXES:\n\n") + choutputs.write_all("| JIRA | Summary | Priority | Component | Reporter | Contributor |\n") + choutputs.write_all("|:---- |:---- | :--- |:---- |:---- |:---- |\n") + choutputs.write_list(buglist) + + choutputs.write_all("\n\n### TESTS:\n\n") + choutputs.write_all("| JIRA | Summary | Priority | Component | Reporter | Contributor |\n") + choutputs.write_all("|:---- |:---- | :--- |:---- |:---- |:---- |\n") + choutputs.write_list(testlist) + + choutputs.write_all("\n\n### SUB-TASKS:\n\n") + choutputs.write_all("| JIRA | Summary | Priority | Component | Reporter | Contributor |\n") + choutputs.write_all("|:---- |:---- | :--- |:---- |:---- |:---- |\n") + choutputs.write_list(subtasklist) + + choutputs.write_all("\n\n### OTHER:\n\n") + choutputs.write_all("| JIRA | Summary | Priority | Component | Reporter | Contributor |\n") + choutputs.write_all("|:---- |:---- | :--- |:---- |:---- |:---- |\n") + choutputs.write_list(otherlist) + choutputs.write_list(tasklist) + + choutputs.write_all("\n\n") + choutputs.close() + + if options.index: + buildindex(title, options.license) + + if haderrors is True: + sys.exit(1) if __name__ == "__main__": - main() + main() From bddc16c6174807012b3e9d95624d60f78b6ba6c6 Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Wed, 29 Jul 2015 18:48:57 -0700 Subject: [PATCH 035/130] HADOOP-12243. Rewrite grep -o's to be POSIX compliant (Kengo Seki via aw) --- dev-support/smart-apply-patch.sh | 8 +++++--- dev-support/test-patch.sh | 7 +++++-- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/dev-support/smart-apply-patch.sh b/dev-support/smart-apply-patch.sh index 96bda92a93ade..00e3a0a2033a6 100755 --- a/dev-support/smart-apply-patch.sh +++ b/dev-support/smart-apply-patch.sh @@ -266,7 +266,9 @@ function locate_patch fi fi - relativePatchURL=$(${GREP} -o '"/jira/secure/attachment/[0-9]*/[^"]*' "${PATCH_DIR}/jira" | ${GREP} -v -e 'htm[l]*$' | sort | tail -1 | ${GREP} -o '/jira/secure/attachment/[0-9]*/[^"]*') + #shellcheck disable=SC2016 + relativePatchURL=$(${AWK} 'match($0,"\"/jira/secure/attachment/[0-9]*/[^\"]*"){print substr($0,RSTART+1,RLENGTH-1)}' "${PATCH_DIR}/jira" | + ${GREP} -v -e 'htm[l]*$' | sort | tail -1) PATCHURL="http://issues.apache.org${relativePatchURL}" if [[ ! ${PATCHURL} =~ \.patch$ ]]; then notSureIfPatch=true @@ -355,7 +357,7 @@ function verify_zero return 0 fi - dir=$(dirname ${filename} 2>/dev/null) + dir=$(dirname "${filename}" 2>/dev/null) if [[ -n ${dir} && -d ${dir} ]]; then return 0 fi @@ -540,4 +542,4 @@ if [[ ${DRYRUNMODE} == false ]]; then apply fi -cleanup_and_exit ${RESULT} \ No newline at end of file +cleanup_and_exit ${RESULT} diff --git a/dev-support/test-patch.sh b/dev-support/test-patch.sh index 1c9be9c794114..c516bebd3f771 100755 --- a/dev-support/test-patch.sh +++ b/dev-support/test-patch.sh @@ -1621,12 +1621,15 @@ function locate_patch fi fi - relativePatchURL=$(${GREP} -o '"/jira/secure/attachment/[0-9]*/[^"]*' "${PATCH_DIR}/jira" | ${GREP} -v -e 'htm[l]*$' | sort | tail -1 | ${GREP} -o '/jira/secure/attachment/[0-9]*/[^"]*') + #shellcheck disable=SC2016 + relativePatchURL=$(${AWK} 'match($0,"\"/jira/secure/attachment/[0-9]*/[^\"]*"){print substr($0,RSTART+1,RLENGTH-1)}' "${PATCH_DIR}/jira" | + ${GREP} -v -e 'htm[l]*$' | sort | tail -1) PATCHURL="http://issues.apache.org${relativePatchURL}" if [[ ! ${PATCHURL} =~ \.patch$ ]]; then notSureIfPatch=true fi - patchNum=$(echo "${PATCHURL}" | ${GREP} -o '[0-9]*/' | ${GREP} -o '[0-9]*') + #shellcheck disable=SC2016 + patchNum=$(echo "${PATCHURL}" | ${AWK} 'match($0,"[0-9]*/"){print substr($0,RSTART,RLENGTH-1)}') echo "${ISSUE} patch is being downloaded at $(date) from" fi fi From aa2d6e12dc3c3be848e187da7eb03b077dccac5d Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Thu, 30 Jul 2015 08:50:58 -0700 Subject: [PATCH 036/130] HADOOP-12266. make test-patch plugins more consistent (Kengo Seki via aw) --- dev-support/test-patch.d/checkstyle.sh | 45 +------------- dev-support/test-patch.d/pylint.sh | 62 +++---------------- dev-support/test-patch.d/shellcheck.sh | 85 +++++++------------------- dev-support/test-patch.sh | 38 ++++++++++++ 4 files changed, 70 insertions(+), 160 deletions(-) diff --git a/dev-support/test-patch.d/checkstyle.sh b/dev-support/test-patch.d/checkstyle.sh index f4bf79dd48ece..5538790255cf1 100755 --- a/dev-support/test-patch.d/checkstyle.sh +++ b/dev-support/test-patch.d/checkstyle.sh @@ -141,45 +141,6 @@ function checkstyle_preapply return 0 } -function checkstyle_calcdiffs -{ - local orig=$1 - local new=$2 - local diffout=$3 - local tmp=${PATCH_DIR}/cs.$$.${RANDOM} - local count=0 - local j - - # first, pull out just the errors - # shellcheck disable=SC2016 - ${AWK} -F: '{print $NF}' "${orig}" >> "${tmp}.branch" - - # shellcheck disable=SC2016 - ${AWK} -F: '{print $NF}' "${new}" >> "${tmp}.patch" - - # compare the errors, generating a string of line - # numbers. Sorry portability: GNU diff makes this too easy - ${DIFF} --unchanged-line-format="" \ - --old-line-format="" \ - --new-line-format="%dn " \ - "${tmp}.branch" \ - "${tmp}.patch" > "${tmp}.lined" - - # now, pull out those lines of the raw output - # shellcheck disable=SC2013 - for j in $(cat "${tmp}.lined"); do - # shellcheck disable=SC2086 - head -${j} "${new}" | tail -1 >> "${diffout}" - done - - if [[ -f "${diffout}" ]]; then - # shellcheck disable=SC2016 - count=$(wc -l "${diffout}" | ${AWK} '{print $1}' ) - fi - rm "${tmp}.branch" "${tmp}.patch" "${tmp}.lined" 2>/dev/null - echo "${count}" -} - function checkstyle_postapply { local result @@ -222,11 +183,9 @@ function checkstyle_postapply touch "${PATCH_DIR}/branch-checkstyle-${fn}.txt" fi + calcdiffs "${PATCH_DIR}/branch-checkstyle-${fn}.txt" "${PATCH_DIR}/patch-checkstyle-${fn}.txt" > "${PATCH_DIR}/diff-checkstyle-${fn}.txt" #shellcheck disable=SC2016 - diffpostpatch=$(checkstyle_calcdiffs \ - "${PATCH_DIR}/branch-checkstyle-${fn}.txt" \ - "${PATCH_DIR}/patch-checkstyle-${fn}.txt" \ - "${PATCH_DIR}/diff-checkstyle-${fn}.txt" ) + diffpostpatch=$(wc -l "${PATCH_DIR}/diff-checkstyle-${fn}.txt" | ${AWK} '{print $1}') if [[ ${diffpostpatch} -gt 0 ]] ; then ((result = result + 1)) diff --git a/dev-support/test-patch.d/pylint.sh b/dev-support/test-patch.d/pylint.sh index 8542dadc4b5fd..faa8136055df1 100755 --- a/dev-support/test-patch.d/pylint.sh +++ b/dev-support/test-patch.d/pylint.sh @@ -71,7 +71,7 @@ function pylint_preapply for i in ${CHANGED_FILES}; do if [[ ${i} =~ \.py$ && -f ${i} ]]; then ${PYLINT} --indent-string=" " --output-format=parseable --reports=n "${i}" 2>/dev/null | - ${AWK} '1> "${PATCH_DIR}/branchpylint-result.txt" + ${AWK} '1> "${PATCH_DIR}/branch-pylint-result.txt" fi done popd >/dev/null @@ -80,49 +80,9 @@ function pylint_preapply return 0 } -function pylint_calcdiffs -{ - local orig=$1 - local new=$2 - local diffout=$3 - local tmp=${PATCH_DIR}/pl.$$.${RANDOM} - local count=0 - local j - - # first, pull out just the errors - # shellcheck disable=SC2016 - ${AWK} -F: '{print $NF}' "${orig}" >> "${tmp}.branch" - - # shellcheck disable=SC2016 - ${AWK} -F: '{print $NF}' "${new}" >> "${tmp}.patch" - - # compare the errors, generating a string of line - # numbers. Sorry portability: GNU diff makes this too easy - ${DIFF} --unchanged-line-format="" \ - --old-line-format="" \ - --new-line-format="%dn " \ - "${tmp}.branch" \ - "${tmp}.patch" > "${tmp}.lined" - - # now, pull out those lines of the raw output - # shellcheck disable=SC2013 - for j in $(cat "${tmp}.lined"); do - # shellcheck disable=SC2086 - head -${j} "${new}" | tail -1 >> "${diffout}" - done - - if [[ -f "${diffout}" ]]; then - # shellcheck disable=SC2016 - count=$(${AWK} -F: 'BEGIN {sum=0} 2/dev/null - echo "${count}" -} - function pylint_postapply { local i - local msg local numPrepatch local numPostpatch local diffPostpatch @@ -152,32 +112,28 @@ function pylint_postapply for i in ${CHANGED_FILES}; do if [[ ${i} =~ \.py$ && -f ${i} ]]; then ${PYLINT} --indent-string=" " --output-format=parseable --reports=n "${i}" 2>/dev/null | - ${AWK} '1> "${PATCH_DIR}/patchpylint-result.txt" + ${AWK} '1> "${PATCH_DIR}/patch-pylint-result.txt" fi done popd >/dev/null # shellcheck disable=SC2016 PYLINT_VERSION=$(${PYLINT} --version 2>/dev/null | ${GREP} pylint | ${AWK} '{print $NF}') - PYLINT_VERSION=${PYLINT_VERSION%,} - msg="v${PYLINT_VERSION}" - add_footer_table pylint "${msg}" + add_footer_table pylint "v${PYLINT_VERSION%,}" - diffPostpatch=$(pylint_calcdiffs \ - "${PATCH_DIR}/branchpylint-result.txt" \ - "${PATCH_DIR}/patchpylint-result.txt" \ - "${PATCH_DIR}/diffpatchpylint.txt") + calcdiffs "${PATCH_DIR}/branch-pylint-result.txt" "${PATCH_DIR}/patch-pylint-result.txt" > "${PATCH_DIR}/diff-patch-pylint.txt" + diffPostpatch=$(${AWK} -F: 'BEGIN {sum=0} 2/dev/null for i in $(shellcheck_private_findbash); do if [[ -f ${i} ]]; then - ${SHELLCHECK} -f gcc "${i}" >> "${PATCH_DIR}/${PATCH_BRANCH}shellcheck-result.txt" + ${SHELLCHECK} -f gcc "${i}" >> "${PATCH_DIR}/branch-shellcheck-result.txt" fi done popd > /dev/null @@ -103,48 +92,10 @@ function shellcheck_preapply return 0 } -function shellcheck_calcdiffs -{ - local orig=$1 - local new=$2 - local diffout=$3 - local tmp=${PATCH_DIR}/sc.$$.${RANDOM} - local count=0 - local j - - # first, pull out just the errors - # shellcheck disable=SC2016 - ${AWK} -F: '{print $NF}' "${orig}" >> "${tmp}.branch" - - # shellcheck disable=SC2016 - ${AWK} -F: '{print $NF}' "${new}" >> "${tmp}.patch" - - # compare the errors, generating a string of line - # numbers. Sorry portability: GNU diff makes this too easy - ${DIFF} --unchanged-line-format="" \ - --old-line-format="" \ - --new-line-format="%dn " \ - "${tmp}.branch" \ - "${tmp}.patch" > "${tmp}.lined" - - # now, pull out those lines of the raw output - # shellcheck disable=SC2013 - for j in $(cat "${tmp}.lined"); do - # shellcheck disable=SC2086 - head -${j} "${new}" | tail -1 >> "${diffout}" - done - - if [[ -f "${diffout}" ]]; then - # shellcheck disable=SC2016 - count=$(wc -l "${diffout}" | ${AWK} '{print $1}' ) - fi - rm "${tmp}.branch" "${tmp}.patch" "${tmp}.lined" 2>/dev/null - echo "${count}" -} - function shellcheck_postapply { local i + local msg local numPrepatch local numPostpatch local diffPostpatch @@ -171,29 +122,35 @@ function shellcheck_postapply echo "Running shellcheck against all identifiable shell scripts" # we re-check this in case one has been added for i in $(shellcheck_private_findbash); do - ${SHELLCHECK} -f gcc "${i}" >> "${PATCH_DIR}/patchshellcheck-result.txt" + ${SHELLCHECK} -f gcc "${i}" >> "${PATCH_DIR}/patch-shellcheck-result.txt" done - if [[ ! -f "${PATCH_DIR}/${PATCH_BRANCH}shellcheck-result.txt" ]]; then - touch "${PATCH_DIR}/${PATCH_BRANCH}shellcheck-result.txt" + if [[ ! -f "${PATCH_DIR}/branch-shellcheck-result.txt" ]]; then + touch "${PATCH_DIR}/branch-shellcheck-result.txt" fi # shellcheck disable=SC2016 - numPrepatch=$(wc -l "${PATCH_DIR}/${PATCH_BRANCH}shellcheck-result.txt" | ${AWK} '{print $1}') + SHELLCHECK_VERSION=$(${SHELLCHECK} --version | ${GREP} version: | ${AWK} '{print $NF}') + msg="v${SHELLCHECK_VERSION}" + if [[ ${SHELLCHECK_VERSION} =~ 0.[0-3].[0-5] ]]; then + msg="${msg} (This is an old version that has serious bugs. Consider upgrading.)" + fi + add_footer_table shellcheck "${msg}" + calcdiffs "${PATCH_DIR}/branch-shellcheck-result.txt" "${PATCH_DIR}/patch-shellcheck-result.txt" > "${PATCH_DIR}/diff-patch-shellcheck.txt" # shellcheck disable=SC2016 - numPostpatch=$(wc -l "${PATCH_DIR}/patchshellcheck-result.txt" | ${AWK} '{print $1}') - - diffPostpatch=$(shellcheck_calcdiffs \ - "${PATCH_DIR}/${PATCH_BRANCH}shellcheck-result.txt" \ - "${PATCH_DIR}/patchshellcheck-result.txt" \ - "${PATCH_DIR}/diffpatchshellcheck.txt" - ) + diffPostpatch=$(wc -l "${PATCH_DIR}/diff-patch-shellcheck.txt" | ${AWK} '{print $1}') if [[ ${diffPostpatch} -gt 0 ]] ; then + # shellcheck disable=SC2016 + numPrepatch=$(wc -l "${PATCH_DIR}/branch-shellcheck-result.txt" | ${AWK} '{print $1}') + + # shellcheck disable=SC2016 + numPostpatch=$(wc -l "${PATCH_DIR}/patch-shellcheck-result.txt" | ${AWK} '{print $1}') + add_vote_table -1 shellcheck "The applied patch generated "\ - "${diffPostpatch} new shellcheck (v${SHELLCHECK_VERSION}) issues (total was ${numPrepatch}, now ${numPostpatch})." - add_footer_table shellcheck "@@BASE@@/diffpatchshellcheck.txt" + "${diffPostpatch} new shellcheck issues (total was ${numPrepatch}, now ${numPostpatch})." + add_footer_table shellcheck "@@BASE@@/diff-patch-shellcheck.txt" return 1 fi diff --git a/dev-support/test-patch.sh b/dev-support/test-patch.sh index c516bebd3f771..48f83a062f791 100755 --- a/dev-support/test-patch.sh +++ b/dev-support/test-patch.sh @@ -3433,6 +3433,44 @@ function add_bugsystem BUGSYSTEMS="${BUGSYSTEMS} $1" } +## @description Calculate the differences between the specified files +## @description and output it to stdout. +## @audience public +## @stability evolving +## @replaceable no +function calcdiffs +{ + local orig=$1 + local new=$2 + local tmp=${PATCH_DIR}/pl.$$.${RANDOM} + local count=0 + local j + + # first, pull out just the errors + # shellcheck disable=SC2016 + ${AWK} -F: '{print $NF}' "${orig}" > "${tmp}.branch" + + # shellcheck disable=SC2016 + ${AWK} -F: '{print $NF}' "${new}" > "${tmp}.patch" + + # compare the errors, generating a string of line + # numbers. Sorry portability: GNU diff makes this too easy + ${DIFF} --unchanged-line-format="" \ + --old-line-format="" \ + --new-line-format="%dn " \ + "${tmp}.branch" \ + "${tmp}.patch" > "${tmp}.lined" + + # now, pull out those lines of the raw output + # shellcheck disable=SC2013 + for j in $(cat "${tmp}.lined"); do + # shellcheck disable=SC2086 + head -${j} "${new}" | tail -1 + done + + rm "${tmp}.branch" "${tmp}.patch" "${tmp}.lined" 2>/dev/null +} + ############################################################################### ############################################################################### ############################################################################### From e4c9d5238a6f09a43a725522dc51a5cf22011b3c Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Thu, 30 Jul 2015 10:06:01 -0700 Subject: [PATCH 037/130] HADOOP-12255. Add support for rubocop (Kengo Seki via aw) --- dev-support/docker/Dockerfile | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/dev-support/docker/Dockerfile b/dev-support/docker/Dockerfile index 862819f8f44ff..83341f5e59613 100644 --- a/dev-support/docker/Dockerfile +++ b/dev-support/docker/Dockerfile @@ -44,7 +44,8 @@ RUN apt-get update && apt-get install --no-install-recommends -y \ libjansson-dev \ fuse libfuse-dev \ libcurl4-openssl-dev \ - python python2.7 pylint + python python2.7 pylint \ + ruby # Install Forrest RUN mkdir -p /usr/local/apache-forrest ; \ @@ -63,6 +64,9 @@ ENV FINDBUGS_HOME /opt/findbugs RUN apt-get install -y cabal-install RUN cabal update && cabal install shellcheck --global +# Install rubocop +RUN gem install rubocop + # Fixing the Apache commons / Maven dependency problem under Ubuntu: # See http://wiki.apache.org/commons/VfsProblems RUN cd /usr/share/maven/lib && ln -s ../../java/commons-lang.jar . From 8f1f650d826cd73aad4c8c209b236e3314fec20c Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Thu, 30 Jul 2015 14:42:27 -0700 Subject: [PATCH 038/130] HADOOP-12298. Move recent yetus docker changes to the yetus dockerfile (aw) --- dev-support/docker/Dockerfile | 6 +----- dev-support/test-patch-docker/Dockerfile-startstub | 8 +++++++- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/dev-support/docker/Dockerfile b/dev-support/docker/Dockerfile index 83341f5e59613..f761f8b4404b2 100644 --- a/dev-support/docker/Dockerfile +++ b/dev-support/docker/Dockerfile @@ -44,8 +44,7 @@ RUN apt-get update && apt-get install --no-install-recommends -y \ libjansson-dev \ fuse libfuse-dev \ libcurl4-openssl-dev \ - python python2.7 pylint \ - ruby + python python2.7 # Install Forrest RUN mkdir -p /usr/local/apache-forrest ; \ @@ -64,9 +63,6 @@ ENV FINDBUGS_HOME /opt/findbugs RUN apt-get install -y cabal-install RUN cabal update && cabal install shellcheck --global -# Install rubocop -RUN gem install rubocop - # Fixing the Apache commons / Maven dependency problem under Ubuntu: # See http://wiki.apache.org/commons/VfsProblems RUN cd /usr/share/maven/lib && ln -s ../../java/commons-lang.jar . diff --git a/dev-support/test-patch-docker/Dockerfile-startstub b/dev-support/test-patch-docker/Dockerfile-startstub index e534f14bc1839..5e5ca78cdfa48 100644 --- a/dev-support/test-patch-docker/Dockerfile-startstub +++ b/dev-support/test-patch-docker/Dockerfile-startstub @@ -32,7 +32,8 @@ RUN apt-get update && apt-get install --no-install-recommends -y \ libjansson-dev \ fuse libfuse-dev \ libcurl4-openssl-dev \ - python python2.7 \ + python python2.7 pylint \ + ruby \ openjdk-7-jdk # Fixing the Apache commons / Maven dependency problem under Ubuntu: @@ -71,6 +72,11 @@ ENV FINDBUGS_HOME /opt/findbugs RUN apt-get install -y cabal-install RUN cabal update && cabal install shellcheck --global +#### +# Install rubocop +### +RUN gem install rubocop + ##### # Install JIRA CLI ##### From d793970290cf0923d97d70ecbacf2b789e6936ce Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Thu, 30 Jul 2015 17:06:02 -0700 Subject: [PATCH 039/130] HADOOP-12228. Document releasedocmaker (aw) --- dev-support/docs/releasedocmaker.md | 230 ++++++++++++++++++++++++++++ 1 file changed, 230 insertions(+) create mode 100644 dev-support/docs/releasedocmaker.md diff --git a/dev-support/docs/releasedocmaker.md b/dev-support/docs/releasedocmaker.md new file mode 100644 index 0000000000000..d8eb13cdaf9cf --- /dev/null +++ b/dev-support/docs/releasedocmaker.md @@ -0,0 +1,230 @@ + + +releasedocmaker +=============== + +* [Purpose](#Purpose) +* [Basic Usage](#Basic_Usage) +* [Changing the Header](#Changing_the_Header) +* [Multiple Versions](#Multiple_Versions) +* [Unreleased Dates](#Unreleased_Dates) +* [Lint Mode](#Lint_Mode) + +# Purpose + +Building changelog information in a form that is human digestible but still containing as much useful information is difficult. Many attempts over the years have resulted in a variety of methods that projects use to solve this problem: + +* JIRA-generated release notes from the "Release Notes" button +* Manually modified CHANGES file +* Processing git log information + +All of these methods have their pros and cons. Some have issues with accuracy. Some have issues with lack of details. None of these methods seem to cover all of the needs of many projects and are full of potential pitfalls. + +In order to solve these problems, releasedocmaker was written to automatically generate a changelog and release notes by querying Apache's JIRA instance. + +# Basic Usage + +Minimally, the name of the JIRA project and a version registered in JIRA must be provided: + +```bash +$ releasedocmaker.py --project (project) --version (version) +``` + +This will query Apache JIRA, generating two files in a directory named after the given version in an extended markdown format which can be processed by both mvn site and GitHub. + +* CHANGES.(version).md + +This is similar to the JIRA "Release Notes" button but is in tabular format and includes the priority, component, reporter, and contributor fields. It also highlights Incompatible Changes so that readers know what to look out for when upgrading. The top of the file also includes the date that the version was marked as released in JIRA. + + +* RELEASENOTES.(version).md + +If your JIRA project supports the release note field, this will contain any JIRA mentioned in the CHANGES log that is either an incompatible change or has a release note associated with it. If your JIRA project does not support the release notes field, this will be the description field. + +For example, to build the release documentation for HBase v1.2.0... + +```bash +$ releasedocmaker.py --project HBASE --version 1.2.0 +``` + +... will create a 1.2.0 directory and inside that directory will be CHANGES.1.2.0.md and RELEASENOTES.1.2.0.md . + + +# Changing the Header + +By default, it will use a header that matches the project name. But that is kind of ugly and the case may be wrong. Luckily, the title can be changed: + +```bash +$ releasedocmaker.py --project HBASE --version 1.2.0 --projecttitle "Apache HBase" +``` + +Now instead of "HBASE", it will use "Apache HBASE" for some titles and headers. + +# Multiple Versions + +The script can also generate multiple versions at once, by + +```bash +$ releasedocmaker.py --project HBASE --version 1.0.0 --version 1.2.0 +``` + +This will create the files for versions 1.0.0 and versions 1.2.0 in their own directories. + +But what if the version numbers are not known? releasedocmaker can also generate version data based upon ranges: + +```bash +$ releasedocmaker.py --project HBASE --version 1.0.0 --version 1.2.0 --range +``` + +In this form, releasedocmaker will query JIRA, discover all versions that alphabetically appear to be between 1.0.0 and 1.2.0, inclusive, and generate all of the relative release documents. This is especially useful when bootstrapping an existing project. + +# Unreleased Dates + +For released versions, releasedocmaker will pull the date of the release from JIRA. However, for unreleased versions it marks the release as "Unreleased". This can be inconvenient when actually building a release and wanting to include it inside the source package. + +The --usetoday option can be used to signify that instead of using Unreleased, releasedocmaker should use today's date. + +```bash +$ releasedocmaker.py --project HBASE --version 1.0.0 --usetoday +``` + +After using this option and release, don't forget to change JIRA's release date to match! + +# Lint Mode + +In order to ensure proper formatting while using mvn site, releasedocmaker puts in periods (.) for fields that are empty or unassigned. This can be unsightly and not proper for any given project. There are also other things, such as missing release notes for incompatible changes, that are less than desirable. + +In order to help release managers from having to scan through potentially large documents, releasedocmaker features a lint mode, triggered via --lint: + +```bash +$ releasedocmaker.py --project HBASE --version 1.0.0 --lint +``` + +This will do the normal JIRA querying, looking for items it considers problematic. It will print the information to the screen and then exit with either success or failure, depending upon if any issues were discovered. + + +releasedocmaker +=============== + +* [Purpose](#Purpose) +* [Basic Usage](#Basic_Usage) +* [Changing the Header](#Changing_the_Header) +* [Multiple Versions](#Multiple_Versions) +* [Unreleased Dates](#Unreleased_Dates) +* [Lint Mode](#Lint_Mode) + +# Purpose + +Building changelog information in a form that is human digestible but still containing as much useful information is difficult. Many attempts over the years have resulted in a variety of methods that projects use to solve this problem: + +* JIRA-generated release notes from the "Release Notes" button +* Manually modified CHANGES file +* Processing git log information + +All of these methods have their pros and cons. Some have issues with accuracy. Some have issues with lack of details. None of these methods seem to cover all of the needs of many projects and are full of potential pitfalls. + +In order to solve these problems, releasedocmaker was written to automatically generate a changelog and release notes by querying Apache's JIRA instance. + +# Basic Usage + +Minimally, the name of the JIRA project and a version registered in JIRA must be provided: + +```bash +$ releasedocmaker.py --project (project) --version (version) +``` + +This will query Apache JIRA, generating two files in a directory named after the given version in an extended markdown format which can be processed by both mvn site and GitHub. + +* CHANGES.(version).md + +This is similar to the JIRA "Release Notes" button but is in tabular format and includes the priority, component, reporter, and contributor fields. It also highlights Incompatible Changes so that readers know what to look out for when upgrading. The top of the file also includes the date that the version was marked as released in JIRA. + + +* RELEASENOTES.(version).md + +If your JIRA project supports the release note field, this will contain any JIRA mentioned in the CHANGES log that is either an incompatible change or has a release note associated with it. If your JIRA project does not support the release notes field, this will be the description field. + +For example, to build the release documentation for HBase v1.2.0... + +```bash +$ releasedocmaker.py --project HBASE --version 1.2.0 +``` + +... will create a 1.2.0 directory and inside that directory will be CHANGES.1.2.0.md and RELEASENOTES.1.2.0.md . + + +# Changing the Header + +By default, it will use a header that matches the project name. But that is kind of ugly and the case may be wrong. Luckily, the title can be changed: + +```bash +$ releasedocmaker.py --project HBASE --version 1.2.0 --projecttitle "Apache HBase" +``` + +Now instead of "HBASE", it will use "Apache HBASE" for some titles and headers. + +# Multiple Versions + +The script can also generate multiple versions at once, by + +```bash +$ releasedocmaker.py --project HBASE --version 1.0.0 --version 1.2.0 +``` + +This will create the files for versions 1.0.0 and versions 1.2.0 in their own directories. + +But what if the version numbers are not known? releasedocmaker can also generate version data based upon ranges: + +```bash +$ releasedocmaker.py --project HBASE --version 1.0.0 --version 1.2.0 --range +``` + +In this form, releasedocmaker will query JIRA, discover all versions that alphabetically appear to be between 1.0.0 and 1.2.0, inclusive, and generate all of the relative release documents. This is especially useful when bootstrapping an existing project. + +# Unreleased Dates + +For released versions, releasedocmaker will pull the date of the release from JIRA. However, for unreleased versions it marks the release as "Unreleased". This can be inconvenient when actually building a release and wanting to include it inside the source package. + +The --usetoday option can be used to signify that instead of using Unreleased, releasedocmaker should use today's date. + +```bash +$ releasedocmaker.py --project HBASE --version 1.0.0 --usetoday +``` + +After using this option and release, don't forget to change JIRA's release date to match! + +# Lint Mode + +In order to ensure proper formatting while using mvn site, releasedocmaker puts in periods (.) for fields that are empty or unassigned. This can be unsightly and not proper for any given project. There are also other things, such as missing release notes for incompatible changes, that are less than desirable. + +In order to help release managers from having to scan through potentially large documents, releasedocmaker features a lint mode, triggered via --lint: + +```bash +$ releasedocmaker.py --project HBASE --version 1.0.0 --lint +``` + +This will do the normal JIRA querying, looking for items it considers problematic. It will print the information to the screen and then exit with either success or failure, depending upon if any issues were discovered. From a38e2f276fc41e5eb6bb316510b03dd65cec6e2c Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Thu, 30 Jul 2015 17:06:42 -0700 Subject: [PATCH 040/130] HADOOP-12255. Add support for rubocop (missed a file) (Kengo Seki via aw) --- dev-support/test-patch.d/rubocop.sh | 140 ++++++++++++++++++++++++++++ 1 file changed, 140 insertions(+) create mode 100755 dev-support/test-patch.d/rubocop.sh diff --git a/dev-support/test-patch.d/rubocop.sh b/dev-support/test-patch.d/rubocop.sh new file mode 100755 index 0000000000000..ba9810eaaa284 --- /dev/null +++ b/dev-support/test-patch.d/rubocop.sh @@ -0,0 +1,140 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +add_plugin rubocop + +RUBOCOP_TIMER=0 + +RUBOCOP=${RUBOCOP:-$(which rubocop 2>/dev/null)} + +function rubocop_usage +{ + echo "Rubocop specific:" + echo "--rubocop= path to rubocop executable" +} + +function rubocop_parse_args +{ + local i + + for i in "$@"; do + case ${i} in + --rubocop=*) + RUBOCOP=${i#*=} + ;; + esac + done +} + +function rubocop_filefilter +{ + local filename=$1 + + if [[ ${filename} =~ \.rb$ ]]; then + add_test rubocop + fi +} + +function rubocop_preapply +{ + local i + + verify_needed_test rubocop + if [[ $? == 0 ]]; then + return 0 + fi + + big_console_header "rubocop plugin: prepatch" + + if [[ ! -x ${RUBOCOP} ]]; then + yetus_error "${RUBOCOP} does not exist." + return 0 + fi + + start_clock + + echo "Running rubocop against modified ruby scripts." + pushd "${BASEDIR}" >/dev/null + for i in ${CHANGED_FILES}; do + if [[ ${i} =~ \.rb$ && -f ${i} ]]; then + ${RUBOCOP} -f c "${i}" | ${AWK} '!/[0-9]* files? inspected/' >> "${PATCH_DIR}/branch-rubocop-result.txt" + fi + done + popd >/dev/null + # keep track of how much as elapsed for us already + RUBOCOP_TIMER=$(stop_clock) + return 0 +} + +function rubocop_postapply +{ + local i + local numPrepatch + local numPostpatch + local diffPostpatch + + verify_needed_test rubocop + if [[ $? == 0 ]]; then + return 0 + fi + + big_console_header "rubocop plugin: postpatch" + + if [[ ! -x ${RUBOCOP} ]]; then + yetus_error "${RUBOCOP} is not available." + add_vote_table 0 rubocop "Rubocop was not available." + return 0 + fi + + start_clock + + # add our previous elapsed to our new timer + # by setting the clock back + offset_clock "${RUBOCOP_TIMER}" + + echo "Running rubocop against modified ruby scripts." + # we re-check this in case one has been added + pushd "${BASEDIR}" >/dev/null + for i in ${CHANGED_FILES}; do + if [[ ${i} =~ \.rb$ && -f ${i} ]]; then + ${RUBOCOP} -f c "${i}" | ${AWK} '!/[0-9]* files? inspected/' >> "${PATCH_DIR}/patch-rubocop-result.txt" + fi + done + popd >/dev/null + + # shellcheck disable=SC2016 + RUBOCOP_VERSION=$(${RUBOCOP} -v | ${AWK} '{print $NF}') + add_footer_table rubocop "v${RUBOCOP_VERSION}" + + calcdiffs "${PATCH_DIR}/branch-rubocop-result.txt" "${PATCH_DIR}/patch-rubocop-result.txt" > "${PATCH_DIR}/diff-patch-rubocop.txt" + diffPostpatch=$(${AWK} -F: 'BEGIN {sum=0} 4 Date: Thu, 30 Jul 2015 17:10:33 -0700 Subject: [PATCH 041/130] HADOOP-12228. Document releasedocmaker (fix the commit) (aw) --- dev-support/docs/releasedocmaker.md | 115 ---------------------------- 1 file changed, 115 deletions(-) diff --git a/dev-support/docs/releasedocmaker.md b/dev-support/docs/releasedocmaker.md index d8eb13cdaf9cf..b39de2e64c9db 100644 --- a/dev-support/docs/releasedocmaker.md +++ b/dev-support/docs/releasedocmaker.md @@ -62,121 +62,6 @@ $ releasedocmaker.py --project HBASE --version 1.2.0 ... will create a 1.2.0 directory and inside that directory will be CHANGES.1.2.0.md and RELEASENOTES.1.2.0.md . -# Changing the Header - -By default, it will use a header that matches the project name. But that is kind of ugly and the case may be wrong. Luckily, the title can be changed: - -```bash -$ releasedocmaker.py --project HBASE --version 1.2.0 --projecttitle "Apache HBase" -``` - -Now instead of "HBASE", it will use "Apache HBASE" for some titles and headers. - -# Multiple Versions - -The script can also generate multiple versions at once, by - -```bash -$ releasedocmaker.py --project HBASE --version 1.0.0 --version 1.2.0 -``` - -This will create the files for versions 1.0.0 and versions 1.2.0 in their own directories. - -But what if the version numbers are not known? releasedocmaker can also generate version data based upon ranges: - -```bash -$ releasedocmaker.py --project HBASE --version 1.0.0 --version 1.2.0 --range -``` - -In this form, releasedocmaker will query JIRA, discover all versions that alphabetically appear to be between 1.0.0 and 1.2.0, inclusive, and generate all of the relative release documents. This is especially useful when bootstrapping an existing project. - -# Unreleased Dates - -For released versions, releasedocmaker will pull the date of the release from JIRA. However, for unreleased versions it marks the release as "Unreleased". This can be inconvenient when actually building a release and wanting to include it inside the source package. - -The --usetoday option can be used to signify that instead of using Unreleased, releasedocmaker should use today's date. - -```bash -$ releasedocmaker.py --project HBASE --version 1.0.0 --usetoday -``` - -After using this option and release, don't forget to change JIRA's release date to match! - -# Lint Mode - -In order to ensure proper formatting while using mvn site, releasedocmaker puts in periods (.) for fields that are empty or unassigned. This can be unsightly and not proper for any given project. There are also other things, such as missing release notes for incompatible changes, that are less than desirable. - -In order to help release managers from having to scan through potentially large documents, releasedocmaker features a lint mode, triggered via --lint: - -```bash -$ releasedocmaker.py --project HBASE --version 1.0.0 --lint -``` - -This will do the normal JIRA querying, looking for items it considers problematic. It will print the information to the screen and then exit with either success or failure, depending upon if any issues were discovered. - - -releasedocmaker -=============== - -* [Purpose](#Purpose) -* [Basic Usage](#Basic_Usage) -* [Changing the Header](#Changing_the_Header) -* [Multiple Versions](#Multiple_Versions) -* [Unreleased Dates](#Unreleased_Dates) -* [Lint Mode](#Lint_Mode) - -# Purpose - -Building changelog information in a form that is human digestible but still containing as much useful information is difficult. Many attempts over the years have resulted in a variety of methods that projects use to solve this problem: - -* JIRA-generated release notes from the "Release Notes" button -* Manually modified CHANGES file -* Processing git log information - -All of these methods have their pros and cons. Some have issues with accuracy. Some have issues with lack of details. None of these methods seem to cover all of the needs of many projects and are full of potential pitfalls. - -In order to solve these problems, releasedocmaker was written to automatically generate a changelog and release notes by querying Apache's JIRA instance. - -# Basic Usage - -Minimally, the name of the JIRA project and a version registered in JIRA must be provided: - -```bash -$ releasedocmaker.py --project (project) --version (version) -``` - -This will query Apache JIRA, generating two files in a directory named after the given version in an extended markdown format which can be processed by both mvn site and GitHub. - -* CHANGES.(version).md - -This is similar to the JIRA "Release Notes" button but is in tabular format and includes the priority, component, reporter, and contributor fields. It also highlights Incompatible Changes so that readers know what to look out for when upgrading. The top of the file also includes the date that the version was marked as released in JIRA. - - -* RELEASENOTES.(version).md - -If your JIRA project supports the release note field, this will contain any JIRA mentioned in the CHANGES log that is either an incompatible change or has a release note associated with it. If your JIRA project does not support the release notes field, this will be the description field. - -For example, to build the release documentation for HBase v1.2.0... - -```bash -$ releasedocmaker.py --project HBASE --version 1.2.0 -``` - -... will create a 1.2.0 directory and inside that directory will be CHANGES.1.2.0.md and RELEASENOTES.1.2.0.md . - - # Changing the Header By default, it will use a header that matches the project name. But that is kind of ugly and the case may be wrong. Luckily, the title can be changed: From 21e21b990902c99c5bb33464b9b096c28c30edf4 Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Fri, 31 Jul 2015 14:53:29 -0700 Subject: [PATCH 042/130] HADOOP-12130. document features added in 12113 (aw) --- dev-support/docs/precommit-advanced.md | 68 ++++++++++++---- dev-support/docs/precommit-architecture.md | 31 ++++--- dev-support/docs/precommit-basic.md | 94 ++++++++++++++++++---- 3 files changed, 149 insertions(+), 44 deletions(-) diff --git a/dev-support/docs/precommit-advanced.md b/dev-support/docs/precommit-advanced.md index 0a7eac5e04034..a424199e7f5a6 100644 --- a/dev-support/docs/precommit-advanced.md +++ b/dev-support/docs/precommit-advanced.md @@ -16,22 +16,35 @@ test-patch ========== * [Docker Support](#Docker_Support) -* [Maven Profiles](#Maven_Profiles) +* [Maven Specific](#Maven_Specific) +* [Ant Specific](#Ant_Specific) * [Plug-ins](#Plug-ins) * [Configuring for Other Projects](#Configuring_for_Other_Projects) +* [Important Variables](#Important_Variables) # Docker Support -By default, test-patch runs in the same shell where it was launched. It can alternatively use Docker to launch itself into a container. This is particularly useful if running under a QA environment that does not provide all the necessary binaries. For example, the patch requires a newer version of Java. +By default, test-patch runs in the same shell where it was launched. It can alternatively use Docker to launch itself into a container. This is particularly useful if running under a QA environment that does not provide all the necessary binaries. For example, if the patch requires a newer version of Java. -The `--docker` parameter tells test-patch to run in Docker mode. The `--dockerfile` parameter allows one to provide a custom Dockerfile. Be aware that test-patch will copy this file and append its necessary hooks in order to execute. +The `--docker` parameter tells test-patch to run in Docker mode. The `--dockerfile` parameter allows one to provide a custom Dockerfile. The Dockerfile should contain all of the necessary binaries and tooling needed to run the test. However be aware that test-patch will copy this file and append its necessary hooks to re-launch itself prior to executing docker. -test-patch includes code to automatically manage broken/stale container images that are hanging around if it is run in --jenkins mode. In this way, if Docker fails to build the image, the disk space should eventually return. +Dockerfile images will be named with a test-patch prefix and suffix with either a date or a git commit hash. By using this information, test-patch will automatically manage broken/stale container images that are hanging around if it is run in --jenkins mode. In this way, if Docker fails to build the image, the disk space should eventually be cleaned and returned back to the system. -# Maven Profiles +# Maven Specific -By default, test-patch will pass -Ptest-patch and -D${PROJECT_NAME}PatchProcess to Maven. This will allow you to configure special actions that should only happen when running underneath test-patch. +## Command Arguments +test-patch always passes --batch-mode to maven to force it into non-interactive mode. Additionally, some tests will also force -fae in order to get all of messages/errors during that mode. It *does not* pass -DskipTests. Additional arguments should be handled via the personality. + +## Test Profile + +By default, test-patch will pass -Ptest-patch to Maven. This will allow you to configure special actions that should only happen when running underneath test-patch. + +# Ant Specific + +## Command Arguments + +test-patch always passes -noinput to Ant. This force ant to be non-interactive. # Plug-ins @@ -58,7 +71,7 @@ Similarly, there are other functions that may be defined during the test-patch r - executed prior to the patch being applied but after the git repository is setup. This is useful for any early error checking that might need to be done before any heavier work. * pluginname_preapply - - executed prior to the patch being applied. This is useful for any "before"-type data collection for later comparisons + - executed prior to the patch being applied. This is useful for any "before"-type data collection for later comparisons. * pluginname_postapply - executed after the patch has been applied. This is useful for any "after"-type data collection. @@ -79,12 +92,12 @@ If the plug-in has some specific options, one can use following functions: - executed prior to any other above functions except for pluginname_usage. This is useful for parsing the arguments passed from the user and setting up the execution environment. - HINT: It is recommend to make the pluginname relatively small, 10 characters at the most. Otherwise the ASCII output table may be skewed. + HINT: It is recommended to make the pluginname relatively small, 10 characters at the most. Otherwise, the ASCII output table may be skewed. # Configuring for Other Projects -It is impossible for any general framework to be predictive about what types of special rules any given project may have, especially when it comes to ordering and Maven profiles. In order to assist non-Hadoop projects, a project `personality` should be added that enacts these custom rules. +It is impossible for any general framework to be predictive about what types of special rules any given project may have, especially when it comes to ordering and Maven profiles. In order to direct test-patch to do the correct action, a project `personality` should be added that enacts these custom rules. A personality consists of two functions. One that determines which test types to run and another that allows a project to dictate ordering rules, flags, and profiles on a per-module, per-test run. @@ -92,7 +105,7 @@ There can be only **one** of each personality function defined. ## Test Determination -The `personality_file_tests` function determines which tests to turn on based upon the file name. It is realtively simple. For example, to turn on a full suite of tests for Java files: +The `personality_file_tests` function determines which tests to turn on based upon the file name. It is relatively simple. For example, to turn on a full suite of tests for Java files: ```bash function personality_file_tests @@ -131,19 +144,19 @@ function personality_modules It takes exactly two parameters `repostatus` and `testtype`. -The `repostatus` parameter tells the `personality` function exactly what state the repository is in. It can only be in one of two states: `branch` or `patch`. `branch` means the patch has not been applied. The `patch` state is after the patch has been applied. +The `repostatus` parameter tells the `personality` function exactly what state the source repository is in. It can only be in one of two states: `branch` or `patch`. `branch` means the patch has not been applied. The `patch` state is after the patch has been applied. The `testtype` state tells the personality exactly which test is about to be executed. In order to communicate back to test-patch, there are two functions for the personality to use. -The first is `clear_personality_queue`. This removes the previous test's configuration so that a new module queue may be built. +The first is `clear_personality_queue`. This removes the previous test's configuration so that a new module queue may be built. Custom personality_modules will almost always want to do this as the first action. The second is `personality_enqueue_module`. This function takes two parameters. The first parameter is the name of the module to add to this test's queue. The second parameter is an option list of additional flags to pass to Maven when processing it. `personality_enqueue_module` may be called as many times as necessary for your project. NOTE: A module name of . signifies the root of the repository. -For example, let's say your project uses a special configuration to skip unit tests (-DskipTests). Running unit tests during a javadoc build isn't very interesting. We can write a simple personality check to disable the unit tests: +For example, let's say your project uses a special configuration to skip unit tests (-DskipTests). Running unit tests during a javadoc build isn't very useful and wastes a lot of time. We can write a simple personality check to disable the unit tests: ```bash @@ -160,5 +173,32 @@ function personality_modules ``` -This function will tell test-patch that when the javadoc test is being run, do the documentation test at the base of the repository and make sure the -DskipTests flag is passed to Maven. +This function will tell test-patch that when the javadoc test is being run, do the documentation build at the base of the source repository and make sure the -DskipTests flag is passed to our build tool. + + + +# Important Variables + +There are a handful of extremely important variables that make life easier for personality and plug-in writers: + +* BUILD\_NATIVE will be set to true if the system has requested that non-JVM-based code be built (e.g., JNI or other compiled C code). Under Jenkins, this is always true. + +* BUILDTOOL specifies which tool is currently being used to drive compilation. Additionally, many build tools define xyz\_ARGS to pass on to the build tool command line. (e.g., MAVEN\_ARGS if maven is in use). Projects may set this in their personality. NOTE: today, only one build tool at a time is supported. This may change in the future. + +* CHANGED\_FILES is a list of all files that appear to be added, deleted, or modified in the patch. + +* CHANGED\_UNFILTERED\_MODULES is a list of all modules that house all of the CHANGED\_FILES. Be aware that the root of the source tree is reported as '.'. + +* CHANGED\_MODULES reports which modules that appear to have source code in them. + +* HOW\_TO\_CONTRIBUTE should be a URL that points to a project's on-boarding documentation for new users. Currently, it is used to suggest a review of patch naming guidelines. Since this should be project specific information, it is useful to set in a project's personality. + +* ISSUE\_RE is to help test-patch when talking to JIRA. It helps determine if the given project is appropriate for the given JIRA issue. + +* MODULE and other MODULE\_\* are arrays that contain which modules, the status, etc, to be operated upon. These should be treated as read-only by plug-ins. + +* PATCH\_BRANCH\_DEFAULT is the name of the branch in the git repo that is considered the master. This is useful to set in personalities. + +* PATCH\_DIR is the name of the temporary directory that houses test-patch artifacts (such as logs and the patch file itself) +* TEST\_PARALLEL if parallel unit tests have been requested. Project personalities are responsible for actually enabling or ignoring the request. TEST\_THREADS is the number of threads that have been requested to run in parallel. diff --git a/dev-support/docs/precommit-architecture.md b/dev-support/docs/precommit-architecture.md index c134728cb8eae..cd527ae0e0c59 100644 --- a/dev-support/docs/precommit-architecture.md +++ b/dev-support/docs/precommit-architecture.md @@ -14,17 +14,17 @@ # Some Philosophy -* Everyone's time is valuable. The quicker contributors can get feedback and iterate, the more likely their contribution will get checked in. A committer should be able to focus on the core issues of a contribution rather than details that might be able to be determined automatically. +* Everyone's time is valuable. The quicker contributors can get feedback and iterate, the more likely and faster their contribution will get checked in. A committer should be able to focus on the core issues of a contribution rather than details that can be determined automatically. -* Precommit checks should be fast. There is no value in testing parts of the source tree that are not immediately impacted by a change. Unit testing is the target. They are not a replacement for full builds, which is where integration tests should happen. +* Precommit checks should be fast. There is no value in testing parts of the source tree that are not immediately impacted by a change. Unit testing is the target. They are not a replacement for full builds or integration tests. * Many open source projects have a desire to have this capability. Why not generalize a solution? * In many build systems (especially with maven), a modular design has been picked. Why not leverage that design to make checks faster? -* Projects that use the same language will, with a high degree of certainity, benefit from the same types of checks. +* Projects that use the same language will, with a high degree of certainty, benefit from the same types of checks. -* Portability matters. +* Portability matters. Tooling should be as operating system and language agnostic as possible. # Phases @@ -32,7 +32,7 @@ test-patch works effectively under several different phases: ## Setup -This is where test-patch configures and validates the environemnt. Some things done in this phase: +This is where test-patch configures and validates the environment. Some things done in this phase: * Defaults * Parameter handling @@ -50,12 +50,12 @@ This acts as a verification of all of the setup parts and is the final place to ## Pre-apply -This is where the 'before' work is handled. Some things done in this phase: +This is where the 'before' work is handled. Some things that typically get checked in this phase: * The first pass of files and modules that will get patched -* Validation and information gathering of java, javadoc, site, the mvn repo, findbugs, etc. +* Validation and information gathering of the source tree pre-patch * Author checks -* check for modified unit tests +* Check for modified unit tests ## Patch is Applied @@ -65,15 +65,14 @@ The patch gets applied. Then a second pass to determine which modules and files Now that the patch has been applied, many of the same checks performed in the Pre-apply step are done again to build an 'after' picture. -* Validation and information gathering of java, javadoc, site, the mvn repo, findbugs, etc. - ## Post-install Some tests only work correctly when the repo is up-to-date. So -mvn install is run to update the local repo and we enter this phase. Tests performed here: +mvn install is run to update the local repo and we enter this phase. Some example tests performed here: -* Verification that maven eclipse integration still works -* FindBugs +* javadoc +* Findbugs +* Maven eclipse integration still works ## Unit Tests @@ -81,7 +80,7 @@ Since unit tests are generally the slowest part of the precommit process, they a ## Reporting -Finally, the results are reported to the screen and, optionally, to JIRA. +Finally, the results are reported to the screen and, optionally, to JIRA and/or whatever bug system has been configured. # Test Flow @@ -90,8 +89,8 @@ The basic workflow for many of the sub-items in individual phases are: 1. print a header, so the end user knows that something is happening 1. verify if the test is needed. If so, continue on. Otherwise, return success and let the next part of the phase execute. 1. Ask the personality about what modules and what flags should get used -1. Execute maven in the given modules with the given flags. Log the output and record the time and result code. -1. Do any extra work as appropriate (diffs, counts, etc) and either accept the status and message given by the maven run or change the vote, message, log file, etc. +1. Execute maven (or some other build tool) in the given modules with the given flags. Log the output and record the time and result code. +1. Do any extra work as appropriate (diffs, counts, etc) and either accept the status and message given by the maven run or change the vote, message, log file, etc, based upon this extra work. 1. Add the outcome(s) to the report generator As one can see, the modules list is one of the key inputs into what actually gets executed. As a result, projects must full flexibility in either adding, modifying, or even removing modules from the test list. If a personality removes the entire list of modules, then that test should just be ignored. diff --git a/dev-support/docs/precommit-basic.md b/dev-support/docs/precommit-basic.md index a830cdb9c2f46..e68ad071361bf 100644 --- a/dev-support/docs/precommit-basic.md +++ b/dev-support/docs/precommit-basic.md @@ -18,25 +18,30 @@ test-patch * [Purpose](#Purpose) * [Pre-requisites](#Pre-requisites) * [Basic Usage](#Basic_Usage) +* [Build Tool](#Build_Tool) +* [Providing Patch Files](#Providing_Patch_Files) +* [Project-Specific Capabilities](#Project-Specific_Capabilities) +* [MultiJDK](#MultiJDK) +* [Docker](#Docker) -## Purpose +# Purpose -As part of Hadoop's commit process, all patches to the source base go through a precommit test that does some (usually) light checking to make sure the proposed change does not break unit tests and/or passes some other prerequisites. This is meant as a preliminary check for committers so that the basic patch is in a known state. This check, called test-patch, may also be used by individual developers to verify a patch prior to sending to the Hadoop QA systems. +As part of Hadoop's commit process, all patches to the source base go through a precommit test that does some (relatively) light checking to make sure the proposed change does not break unit tests and/or passes some other prerequisites such as code formatting guidelines. This is meant as a preliminary check for committers so that the basic patch is in a known state and for contributors to know if they have followed the project's guidelines. This check, called test-patch, may also be used by individual developers to verify a patch prior to sending to the Hadoop QA systems. Other projects have adopted a similar methodology after seeing great success in the Hadoop model. Some have even gone as far as forking Hadoop's precommit code and modifying it to meet their project's needs. This is a modification to Hadoop's version of test-patch so that we may bring together all of these forks under a common code base to help the community as a whole. -## Pre-requisites +# Pre-requisites test-patch has the following requirements: * Ant- or Maven-based project (and ant/maven installed) -* git-based project (and git installed) +* git-based project (and git 1.7.3 or higher installed) * bash v3.2 or higher * findbugs 3.x installed -* shellcheck installed +* shellcheck installed, preferably 0.3.6 or higher * pylint installed * GNU diff * GNU patch @@ -57,21 +62,21 @@ Optional: * Apache JIRA-based issue tracking * JIRA cli tools -The locations of these files are (mostly) assumed to be in the file path, but may be overridden via command line options. For Solaris and Solaris-like operating systems, the default location for the POSIX binaries is in /usr/xpg4/bin. +The locations of these files are (mostly) assumed to be in the file path, but may be overridden via command line options. For Solaris and Solaris-like operating systems, the default location for the POSIX binaries is in /usr/xpg4/bin and the default location for the GNU binaries is /usr/gnu/bin. -## Basic Usage +# Basic Usage -This command will execute basic patch testing against a patch file stored in filename: +This command will execute basic patch testing against a patch file stored in "filename": ```bash $ cd $ dev-support/test-patch.sh --dirty-workspace --project=projectname ``` -The `--dirty-workspace` flag tells test-patch that the repository is not clean and it is ok to continue. This version command does not run the unit tests. +The `--dirty-workspace` flag tells test-patch that the repository is not clean and it is ok to continue. By default, unit tests are not run since they may take a significant amount of time. -To do that, we need to provide the --run-tests command: +To do turn them on, we need to provide the --run-tests option: ```bash @@ -85,16 +90,34 @@ A typical configuration is to have two repositories. One with the code you are ```bash $ cd -$ git diff --no-prefix trunk > /tmp/patchfile +$ git diff master > /tmp/patchfile $ cd ../ $ /dev-support/test-patch.sh --basedir= --resetrepo /tmp/patchfile ``` We used two new options here. --basedir sets the location of the repository to use for testing. --resetrepo tells test patch that it can go into **destructive** mode. Destructive mode will wipe out any changes made to that repository, so use it with care! -After the tests have run, there is a directory that contains all of the test-patch related artifacts. This is generally referred to as the patchprocess directory. By default, test-patch tries to make something off of /tmp to contain this content. Using the `--patchdir` command, one can specify exactly which directory to use. This is helpful for automated precommit testing so that the Jenkins or other automated workflow system knows where to look to gather up the output. +After the tests have run, there is a directory that contains all of the test-patch related artifacts. This is generally referred to as the patchprocess directory. By default, test-patch tries to make something off of /tmp to contain this content. Using the `--patch-dir` option, one can specify exactly which directory to use. This is helpful for automated precommit testing so that Jenkins or other automated workflow system knows where to look to gather up the output. -## Providing Patch Files +For example: + +```bash +$ test-patch.sh --jenkins --patch-dir=${WORKSPACE}/patchprocess --basedir=${WORKSPACE}/source ${WORKSPACE}/patchfile +``` + +... will trigger test-patch to run in fully automated Jenkins mode, using ${WORKSPACE}/patchprocess as its scratch space, ${WORKSPACE}/source as the source repository, and ${WORKSPACE}/patchfile as the name of the patch to test against. + +# Build Tool + +Out of the box, test-patch is built to use maven. But what if the project is built using something else, such as ant? + +```bash +$ test-patch.sh (other options) --build-tool=ant +``` + +will tell test-patch to use ant instead of maven to drive the project. + +# Providing Patch Files It is a fairly common practice within the Apache community to use Apache's JIRA instance to store potential patches. As a result, test-patch supports providing just a JIRA issue number. test-patch will find the *last* attachment, download it, then process it. @@ -106,7 +129,6 @@ $ test-patch.sh (other options) HADOOP-9905 ... will process the patch file associated with this JIRA issue. - A new practice is to use a service such as GitHub and its Pull Request (PR) feature. Luckily, test-patch supports URLs and many services like GitHub provide ways to provide unified diffs via URLs. For example: @@ -117,6 +139,50 @@ $ test-patch.sh (other options) https://github.com/apache/flink/pull/773.patch ... will grab a unified diff of PR #773 and process it. +# Project-specific Capabilities + +Due to the extensible nature of the system, test-patch allows for projects to define project-specific rules which we call personalities. (How to build those rules is covered elsewhere.) There are two ways to specify which personality to use: + +## Direct Method + +```bash +$ test-patch.sh (other options) --personality=(filename) +``` + +This tells test-patch to use the personality in the given file. + +## Project Method + +However, test-patch can detect if it is a personality that is in its "personality" directory based upon the project name: + +```bash +$ test-patch.sh (other options) --project=(project) +``` + +# MultiJDK + +For many projects, it is useful to test Java code against multiple versions of JDKs at the same time. test-patch can do this with the --multijdkdirs option: + +```bash +$ test-patch.sh (other options) --multijdkdirs="/j/d/k/1,/j/d/k/2" +``` + +Not all Java tests support this mode, but those that do will now run their tests with all of the given versions of Java consecutively (e.g., javac--the Java compliation test). Tests that do not support MultiJDK mode (e.g., checkstyle, mvn install) will use JAVA\_HOME. + +NOTE: JAVA\_HOME is always appended to the list of JDKs in MultiJDK mode. If JAVA\_HOME is in the list, it will be moved to the end. + +# Docker + +test-patch also has a mode to utilize Docker: + +```bash +$ test-patch.sh (other options) --docker +``` + +This will do some preliminary setup and then re-execute itself inside a Docker container. For more information on how to provide a custom Dockerfile, see the advanced guide. + + + ## In Closing test-patch has many other features and command line options for the basic user. Many of these are self-explanatory. To see the list of options, run test-patch.sh without any options or with --help. From fd4424b6a7899358ff9584b888d1672920e5636d Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Mon, 3 Aug 2015 10:34:01 -0700 Subject: [PATCH 043/130] HADOOP-12286. test-patch pylint plugin should support indent-string option (Kengo Seki via aw) --- dev-support/personality/hadoop.sh | 2 ++ dev-support/test-patch.d/pylint.sh | 19 +++++++++++++------ 2 files changed, 15 insertions(+), 6 deletions(-) diff --git a/dev-support/personality/hadoop.sh b/dev-support/personality/hadoop.sh index 3d6e3faeca1a0..60dbb3d7989bc 100755 --- a/dev-support/personality/hadoop.sh +++ b/dev-support/personality/hadoop.sh @@ -22,6 +22,8 @@ PATCH_BRANCH_DEFAULT=trunk HOW_TO_CONTRIBUTE="https://wiki.apache.org/hadoop/HowToContribute" #shellcheck disable=SC2034 ISSUE_RE='^(HADOOP|YARN|MAPREDUCE|HDFS)-[0-9]+$' +#shellcheck disable=SC2034 +PYLINT_OPTIONS="--indent-string=' '" HADOOP_MODULES="" diff --git a/dev-support/test-patch.d/pylint.sh b/dev-support/test-patch.d/pylint.sh index faa8136055df1..ebac162156a93 100755 --- a/dev-support/test-patch.d/pylint.sh +++ b/dev-support/test-patch.d/pylint.sh @@ -19,11 +19,13 @@ add_plugin pylint PYLINT_TIMER=0 PYLINT=${PYLINT:-$(which pylint 2>/dev/null)} +PYLINT_OPTIONS=${PYLINT_OPTIONS:-} function pylint_usage { echo "Pylint specific:" - echo "--pylint= path to pylint executable" + echo "--pylint= path to pylint executable" + echo "--pylint-options= pylint options other than output-format and reports" } function pylint_parse_args @@ -35,6 +37,9 @@ function pylint_parse_args --pylint=*) PYLINT=${i#*=} ;; + --pylint-options=*) + PYLINT_OPTIONS=${i#*=} + ;; esac done } @@ -70,7 +75,8 @@ function pylint_preapply pushd "${BASEDIR}" >/dev/null for i in ${CHANGED_FILES}; do if [[ ${i} =~ \.py$ && -f ${i} ]]; then - ${PYLINT} --indent-string=" " --output-format=parseable --reports=n "${i}" 2>/dev/null | + # shellcheck disable=SC2086 + eval "${PYLINT} ${PYLINT_OPTIONS} --output-format=parseable --reports=n ${i}" 2>/dev/null | ${AWK} '1> "${PATCH_DIR}/branch-pylint-result.txt" fi done @@ -111,7 +117,8 @@ function pylint_postapply pushd "${BASEDIR}" >/dev/null for i in ${CHANGED_FILES}; do if [[ ${i} =~ \.py$ && -f ${i} ]]; then - ${PYLINT} --indent-string=" " --output-format=parseable --reports=n "${i}" 2>/dev/null | + # shellcheck disable=SC2086 + eval "${PYLINT} ${PYLINT_OPTIONS} --output-format=parseable --reports=n ${i}" 2>/dev/null | ${AWK} '1> "${PATCH_DIR}/patch-pylint-result.txt" fi done @@ -122,14 +129,14 @@ function pylint_postapply add_footer_table pylint "v${PYLINT_VERSION%,}" calcdiffs "${PATCH_DIR}/branch-pylint-result.txt" "${PATCH_DIR}/patch-pylint-result.txt" > "${PATCH_DIR}/diff-patch-pylint.txt" - diffPostpatch=$(${AWK} -F: 'BEGIN {sum=0} 2 Date: Mon, 3 Aug 2015 10:36:24 -0700 Subject: [PATCH 044/130] HADOOP-12287. add support for perlcritic (Kengo Seki via aw) --- dev-support/test-patch-docker/Dockerfile-startstub | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/dev-support/test-patch-docker/Dockerfile-startstub b/dev-support/test-patch-docker/Dockerfile-startstub index 5e5ca78cdfa48..080f86f22839e 100644 --- a/dev-support/test-patch-docker/Dockerfile-startstub +++ b/dev-support/test-patch-docker/Dockerfile-startstub @@ -34,7 +34,8 @@ RUN apt-get update && apt-get install --no-install-recommends -y \ libcurl4-openssl-dev \ python python2.7 pylint \ ruby \ - openjdk-7-jdk + openjdk-7-jdk \ + libperl-critic-perl # Fixing the Apache commons / Maven dependency problem under Ubuntu: # See http://wiki.apache.org/commons/VfsProblems From 81c937426b23afad088580697d8f3f1e6aca60c8 Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Mon, 3 Aug 2015 10:38:55 -0700 Subject: [PATCH 045/130] HADOOP-12256. add support for ruby-lint (Kengo Seki via aw) --- dev-support/test-patch-docker/Dockerfile-startstub | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/dev-support/test-patch-docker/Dockerfile-startstub b/dev-support/test-patch-docker/Dockerfile-startstub index 080f86f22839e..fd3e4c5b742b7 100644 --- a/dev-support/test-patch-docker/Dockerfile-startstub +++ b/dev-support/test-patch-docker/Dockerfile-startstub @@ -78,6 +78,11 @@ RUN cabal update && cabal install shellcheck --global ### RUN gem install rubocop +#### +# Install ruby-lint +### +RUN gem install ruby-lint + ##### # Install JIRA CLI ##### From a67aa6c18146673df812d0f69c44b1567395c92f Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Mon, 3 Aug 2015 10:44:38 -0700 Subject: [PATCH 046/130] HADOOP-12121. smarter branch detection (aw) --- dev-support/test-patch.sh | 160 ++++++++++++++++++++++---------------- 1 file changed, 94 insertions(+), 66 deletions(-) diff --git a/dev-support/test-patch.sh b/dev-support/test-patch.sh index 48f83a062f791..1f1f88e9f82cd 100755 --- a/dev-support/test-patch.sh +++ b/dev-support/test-patch.sh @@ -1281,9 +1281,6 @@ function git_checkout fi determine_branch - if [[ ${PATCH_BRANCH} =~ ^git ]]; then - PATCH_BRANCH=$(echo "${PATCH_BRANCH}" | cut -dt -f2) - fi # we need to explicitly fetch in case the # git ref hasn't been brought in tree yet @@ -1322,9 +1319,6 @@ function git_checkout fi determine_branch - if [[ ${PATCH_BRANCH} =~ ^git ]]; then - PATCH_BRANCH=$(echo "${PATCH_BRANCH}" | cut -dt -f2) - fi currentbranch=$(${GIT} rev-parse --abbrev-ref HEAD) if [[ "${currentbranch}" != "${PATCH_BRANCH}" ]];then @@ -1351,20 +1345,19 @@ function git_checkout return 0 } -## @description Confirm the given branch is a member of the list of space -## @description delimited branches or a git ref +## @description Confirm the given branch is a git reference +## @descriptoin or a valid gitXYZ commit hash ## @audience private ## @stability evolving ## @replaceable no ## @param branch -## @param branchlist -## @return 0 on success +## @return 0 on success, if gitXYZ was passed, PATCH_BRANCH=xyz ## @return 1 on failure function verify_valid_branch { - local branches=$1 - local check=$2 + local check=$1 local i + local hash # shortcut some common # non-resolvable names @@ -1372,26 +1365,22 @@ function verify_valid_branch return 1 fi - if [[ ${check} == patch ]]; then - return 1 - fi - if [[ ${check} =~ ^git ]]; then - ref=$(echo "${check}" | cut -f2 -dt) - count=$(echo "${ref}" | wc -c | tr -d ' ') - - if [[ ${count} == 8 || ${count} == 41 ]]; then - return 0 + hash=$(echo "${check}" | cut -f2- -dt) + if [[ -n ${hash} ]]; then + ${GIT} cat-file -t "${hash}" >/dev/null 2>&1 + if [[ $? -eq 0 ]]; then + PATCH_BRANCH=${hash} + return 0 + fi + return 1 + else + return 1 fi - return 1 fi - for i in ${branches}; do - if [[ "${i}" == "${check}" ]]; then - return 0 - fi - done - return 1 + ${GIT} show-ref "${check}" >/dev/null 2>&1 + return $? } ## @description Try to guess the branch being tested using a variety of heuristics @@ -1402,10 +1391,9 @@ function verify_valid_branch ## @return 1 on failure, with PATCH_BRANCH updated to PATCH_BRANCH_DEFAULT function determine_branch { - local allbranches local patchnamechunk - - yetus_debug "Determine branch" + local total + local count # something has already set this, so move on if [[ -n ${PATCH_BRANCH} ]]; then @@ -1414,6 +1402,13 @@ function determine_branch pushd "${BASEDIR}" > /dev/null + yetus_debug "Determine branch" + + # something has already set this, so move on + if [[ -n ${PATCH_BRANCH} ]]; then + return + fi + # developer mode, existing checkout, whatever if [[ "${DIRTY_WORKSPACE}" == true ]];then PATCH_BRANCH=$(${GIT} rev-parse --abbrev-ref HEAD) @@ -1421,50 +1416,83 @@ function determine_branch return fi - allbranches=$(${GIT} branch -r | tr -d ' ' | ${SED} -e s,origin/,,g) - for j in "${PATCHURL}" "${PATCH_OR_ISSUE}"; do - yetus_debug "Determine branch: starting with ${j}" - # shellcheck disable=SC2016 - patchnamechunk=$(echo "${j}" | ${AWK} -F/ '{print $NF}') - - # ISSUE.branch.##.patch - yetus_debug "Determine branch: ISSUE.branch.##.patch" - PATCH_BRANCH=$(echo "${patchnamechunk}" | cut -f2 -d. ) - verify_valid_branch "${allbranches}" "${PATCH_BRANCH}" - if [[ $? == 0 ]]; then - return + if [[ -z "${j}" ]]; then + continue fi - - # ISSUE-branch-##.patch - yetus_debug "Determine branch: ISSUE-branch-##.patch" + yetus_debug "Determine branch: starting with ${j}" + patchnamechunk=$(echo "${j}" \ + | ${SED} -e 's,.*/\(.*\)$,\1,' \ + -e 's,\.txt,.,' \ + -e 's,.patch,.,g' \ + -e 's,.diff,.,g' \ + -e 's,\.\.,.,g' \ + -e 's,\.$,,g' ) + + # ISSUE-branch-## PATCH_BRANCH=$(echo "${patchnamechunk}" | cut -f3- -d- | cut -f1,2 -d-) - verify_valid_branch "${allbranches}" "${PATCH_BRANCH}" - if [[ $? == 0 ]]; then - return + yetus_debug "Determine branch: ISSUE-branch-## = ${PATCH_BRANCH}" + if [[ -n "${PATCH_BRANCH}" ]]; then + verify_valid_branch "${PATCH_BRANCH}" + if [[ $? == 0 ]]; then + return + fi fi - # ISSUE-##.patch.branch - yetus_debug "Determine branch: ISSUE-##.patch.branch" - # shellcheck disable=SC2016 - PATCH_BRANCH=$(echo "${patchnamechunk}" | ${AWK} -F. '{print $NF}') - verify_valid_branch "${allbranches}" "${PATCH_BRANCH}" - if [[ $? == 0 ]]; then - return - fi + # ISSUE-##[.##].branch + PATCH_BRANCH=$(echo "${patchnamechunk}" | cut -f3- -d. ) + count="${PATCH_BRANCH//[^.]}" + total=${#count} + ((total = total + 3 )) + until [[ ${total} -eq 2 ]]; do + PATCH_BRANCH=$(echo "${patchnamechunk}" | cut -f3-${total} -d.) + yetus_debug "Determine branch: ISSUE[.##].branch = ${PATCH_BRANCH}" + ((total=total-1)) + if [[ -n "${PATCH_BRANCH}" ]]; then + verify_valid_branch "${PATCH_BRANCH}" + if [[ $? == 0 ]]; then + return + fi + fi + done + + # ISSUE.branch.## + PATCH_BRANCH=$(echo "${patchnamechunk}" | cut -f2- -d. ) + count="${PATCH_BRANCH//[^.]}" + total=${#count} + ((total = total + 3 )) + until [[ ${total} -eq 2 ]]; do + PATCH_BRANCH=$(echo "${patchnamechunk}" | cut -f2-${total} -d.) + yetus_debug "Determine branch: ISSUE.branch[.##] = ${PATCH_BRANCH}" + ((total=total-1)) + if [[ -n "${PATCH_BRANCH}" ]]; then + verify_valid_branch "${PATCH_BRANCH}" + if [[ $? == 0 ]]; then + return + fi + fi + done + + # ISSUE-branch.## + PATCH_BRANCH=$(echo "${patchnamechunk}" | cut -f3- -d- | cut -f1- -d. ) + count="${PATCH_BRANCH//[^.]}" + total=${#count} + ((total = total + 1 )) + until [[ ${total} -eq 1 ]]; do + PATCH_BRANCH=$(echo "${patchnamechunk}" | cut -f3- -d- | cut -f1-${total} -d. ) + yetus_debug "Determine branch: ISSUE-branch[.##] = ${PATCH_BRANCH}" + ((total=total-1)) + if [[ -n "${PATCH_BRANCH}" ]]; then + verify_valid_branch "${PATCH_BRANCH}" + if [[ $? == 0 ]]; then + return + fi + fi + done - # ISSUE-branch.##.patch - yetus_debug "Determine branch: ISSUE-branch.##.patch" - # shellcheck disable=SC2016 - PATCH_BRANCH=$(echo "${patchnamechunk}" | cut -f3- -d- | ${AWK} -F. '{print $(NF-2)}' 2>/dev/null) - verify_valid_branch "${allbranches}" "${PATCH_BRANCH}" - if [[ $? == 0 ]]; then - return - fi done PATCH_BRANCH="${PATCH_BRANCH_DEFAULT}" - popd >/dev/null } From 595c724689ca982681dd1d9cb029c2d78b22b447 Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Mon, 3 Aug 2015 10:47:11 -0700 Subject: [PATCH 047/130] HADOOP-12121. smarter branch detection (aw) --- dev-support/test-patch.sh | 160 ++++++++++++++++++++++---------------- 1 file changed, 94 insertions(+), 66 deletions(-) diff --git a/dev-support/test-patch.sh b/dev-support/test-patch.sh index 48f83a062f791..1f1f88e9f82cd 100755 --- a/dev-support/test-patch.sh +++ b/dev-support/test-patch.sh @@ -1281,9 +1281,6 @@ function git_checkout fi determine_branch - if [[ ${PATCH_BRANCH} =~ ^git ]]; then - PATCH_BRANCH=$(echo "${PATCH_BRANCH}" | cut -dt -f2) - fi # we need to explicitly fetch in case the # git ref hasn't been brought in tree yet @@ -1322,9 +1319,6 @@ function git_checkout fi determine_branch - if [[ ${PATCH_BRANCH} =~ ^git ]]; then - PATCH_BRANCH=$(echo "${PATCH_BRANCH}" | cut -dt -f2) - fi currentbranch=$(${GIT} rev-parse --abbrev-ref HEAD) if [[ "${currentbranch}" != "${PATCH_BRANCH}" ]];then @@ -1351,20 +1345,19 @@ function git_checkout return 0 } -## @description Confirm the given branch is a member of the list of space -## @description delimited branches or a git ref +## @description Confirm the given branch is a git reference +## @descriptoin or a valid gitXYZ commit hash ## @audience private ## @stability evolving ## @replaceable no ## @param branch -## @param branchlist -## @return 0 on success +## @return 0 on success, if gitXYZ was passed, PATCH_BRANCH=xyz ## @return 1 on failure function verify_valid_branch { - local branches=$1 - local check=$2 + local check=$1 local i + local hash # shortcut some common # non-resolvable names @@ -1372,26 +1365,22 @@ function verify_valid_branch return 1 fi - if [[ ${check} == patch ]]; then - return 1 - fi - if [[ ${check} =~ ^git ]]; then - ref=$(echo "${check}" | cut -f2 -dt) - count=$(echo "${ref}" | wc -c | tr -d ' ') - - if [[ ${count} == 8 || ${count} == 41 ]]; then - return 0 + hash=$(echo "${check}" | cut -f2- -dt) + if [[ -n ${hash} ]]; then + ${GIT} cat-file -t "${hash}" >/dev/null 2>&1 + if [[ $? -eq 0 ]]; then + PATCH_BRANCH=${hash} + return 0 + fi + return 1 + else + return 1 fi - return 1 fi - for i in ${branches}; do - if [[ "${i}" == "${check}" ]]; then - return 0 - fi - done - return 1 + ${GIT} show-ref "${check}" >/dev/null 2>&1 + return $? } ## @description Try to guess the branch being tested using a variety of heuristics @@ -1402,10 +1391,9 @@ function verify_valid_branch ## @return 1 on failure, with PATCH_BRANCH updated to PATCH_BRANCH_DEFAULT function determine_branch { - local allbranches local patchnamechunk - - yetus_debug "Determine branch" + local total + local count # something has already set this, so move on if [[ -n ${PATCH_BRANCH} ]]; then @@ -1414,6 +1402,13 @@ function determine_branch pushd "${BASEDIR}" > /dev/null + yetus_debug "Determine branch" + + # something has already set this, so move on + if [[ -n ${PATCH_BRANCH} ]]; then + return + fi + # developer mode, existing checkout, whatever if [[ "${DIRTY_WORKSPACE}" == true ]];then PATCH_BRANCH=$(${GIT} rev-parse --abbrev-ref HEAD) @@ -1421,50 +1416,83 @@ function determine_branch return fi - allbranches=$(${GIT} branch -r | tr -d ' ' | ${SED} -e s,origin/,,g) - for j in "${PATCHURL}" "${PATCH_OR_ISSUE}"; do - yetus_debug "Determine branch: starting with ${j}" - # shellcheck disable=SC2016 - patchnamechunk=$(echo "${j}" | ${AWK} -F/ '{print $NF}') - - # ISSUE.branch.##.patch - yetus_debug "Determine branch: ISSUE.branch.##.patch" - PATCH_BRANCH=$(echo "${patchnamechunk}" | cut -f2 -d. ) - verify_valid_branch "${allbranches}" "${PATCH_BRANCH}" - if [[ $? == 0 ]]; then - return + if [[ -z "${j}" ]]; then + continue fi - - # ISSUE-branch-##.patch - yetus_debug "Determine branch: ISSUE-branch-##.patch" + yetus_debug "Determine branch: starting with ${j}" + patchnamechunk=$(echo "${j}" \ + | ${SED} -e 's,.*/\(.*\)$,\1,' \ + -e 's,\.txt,.,' \ + -e 's,.patch,.,g' \ + -e 's,.diff,.,g' \ + -e 's,\.\.,.,g' \ + -e 's,\.$,,g' ) + + # ISSUE-branch-## PATCH_BRANCH=$(echo "${patchnamechunk}" | cut -f3- -d- | cut -f1,2 -d-) - verify_valid_branch "${allbranches}" "${PATCH_BRANCH}" - if [[ $? == 0 ]]; then - return + yetus_debug "Determine branch: ISSUE-branch-## = ${PATCH_BRANCH}" + if [[ -n "${PATCH_BRANCH}" ]]; then + verify_valid_branch "${PATCH_BRANCH}" + if [[ $? == 0 ]]; then + return + fi fi - # ISSUE-##.patch.branch - yetus_debug "Determine branch: ISSUE-##.patch.branch" - # shellcheck disable=SC2016 - PATCH_BRANCH=$(echo "${patchnamechunk}" | ${AWK} -F. '{print $NF}') - verify_valid_branch "${allbranches}" "${PATCH_BRANCH}" - if [[ $? == 0 ]]; then - return - fi + # ISSUE-##[.##].branch + PATCH_BRANCH=$(echo "${patchnamechunk}" | cut -f3- -d. ) + count="${PATCH_BRANCH//[^.]}" + total=${#count} + ((total = total + 3 )) + until [[ ${total} -eq 2 ]]; do + PATCH_BRANCH=$(echo "${patchnamechunk}" | cut -f3-${total} -d.) + yetus_debug "Determine branch: ISSUE[.##].branch = ${PATCH_BRANCH}" + ((total=total-1)) + if [[ -n "${PATCH_BRANCH}" ]]; then + verify_valid_branch "${PATCH_BRANCH}" + if [[ $? == 0 ]]; then + return + fi + fi + done + + # ISSUE.branch.## + PATCH_BRANCH=$(echo "${patchnamechunk}" | cut -f2- -d. ) + count="${PATCH_BRANCH//[^.]}" + total=${#count} + ((total = total + 3 )) + until [[ ${total} -eq 2 ]]; do + PATCH_BRANCH=$(echo "${patchnamechunk}" | cut -f2-${total} -d.) + yetus_debug "Determine branch: ISSUE.branch[.##] = ${PATCH_BRANCH}" + ((total=total-1)) + if [[ -n "${PATCH_BRANCH}" ]]; then + verify_valid_branch "${PATCH_BRANCH}" + if [[ $? == 0 ]]; then + return + fi + fi + done + + # ISSUE-branch.## + PATCH_BRANCH=$(echo "${patchnamechunk}" | cut -f3- -d- | cut -f1- -d. ) + count="${PATCH_BRANCH//[^.]}" + total=${#count} + ((total = total + 1 )) + until [[ ${total} -eq 1 ]]; do + PATCH_BRANCH=$(echo "${patchnamechunk}" | cut -f3- -d- | cut -f1-${total} -d. ) + yetus_debug "Determine branch: ISSUE-branch[.##] = ${PATCH_BRANCH}" + ((total=total-1)) + if [[ -n "${PATCH_BRANCH}" ]]; then + verify_valid_branch "${PATCH_BRANCH}" + if [[ $? == 0 ]]; then + return + fi + fi + done - # ISSUE-branch.##.patch - yetus_debug "Determine branch: ISSUE-branch.##.patch" - # shellcheck disable=SC2016 - PATCH_BRANCH=$(echo "${patchnamechunk}" | cut -f3- -d- | ${AWK} -F. '{print $(NF-2)}' 2>/dev/null) - verify_valid_branch "${allbranches}" "${PATCH_BRANCH}" - if [[ $? == 0 ]]; then - return - fi done PATCH_BRANCH="${PATCH_BRANCH_DEFAULT}" - popd >/dev/null } From 9a3596a26c024b6576fc3ce40d19054bfe12baa2 Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Mon, 3 Aug 2015 11:03:18 -0700 Subject: [PATCH 048/130] add missing files from previous commits because I can never remember to do a git add --- dev-support/test-patch.d/perlcritic.sh | 140 +++++++++++++++++++++++++ dev-support/test-patch.d/ruby-lint.sh | 140 +++++++++++++++++++++++++ 2 files changed, 280 insertions(+) create mode 100755 dev-support/test-patch.d/perlcritic.sh create mode 100755 dev-support/test-patch.d/ruby-lint.sh diff --git a/dev-support/test-patch.d/perlcritic.sh b/dev-support/test-patch.d/perlcritic.sh new file mode 100755 index 0000000000000..1cec3f3630373 --- /dev/null +++ b/dev-support/test-patch.d/perlcritic.sh @@ -0,0 +1,140 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +add_plugin perlcritic + +PERLCRITIC_TIMER=0 + +PERLCRITIC=${PERLCRITIC:-$(which perlcritic 2>/dev/null)} + +function perlcritic_usage +{ + echo "Perl::Critic specific:" + echo "--perlcritic= path to perlcritic executable" +} + +function perlcritic_parse_args +{ + local i + + for i in "$@"; do + case ${i} in + --perlcritic=*) + PERLCRITIC=${i#*=} + ;; + esac + done +} + +function perlcritic_filefilter +{ + local filename=$1 + + if [[ ${filename} =~ \.p[lm]$ ]]; then + add_test perlcritic + fi +} + +function perlcritic_preapply +{ + local i + + verify_needed_test perlcritic + if [[ $? == 0 ]]; then + return 0 + fi + + big_console_header "Perl::Critic plugin: prepatch" + + if [[ ! -x ${PERLCRITIC} ]]; then + yetus_error "${PERLCRITIC} does not exist." + return 0 + fi + + start_clock + + echo "Running perlcritic against modified perl scripts/modules." + pushd "${BASEDIR}" >/dev/null + for i in ${CHANGED_FILES}; do + if [[ ${i} =~ \.p[lm]$ && -f ${i} ]]; then + ${PERLCRITIC} -1 --verbose 1 "${i}" 2>/dev/null >> "${PATCH_DIR}/branch-perlcritic-result.txt" + fi + done + popd >/dev/null + # keep track of how much as elapsed for us already + PERLCRITIC_TIMER=$(stop_clock) + return 0 +} + +function perlcritic_postapply +{ + local i + local numPrepatch + local numPostpatch + local diffPostpatch + + verify_needed_test perlcritic + if [[ $? == 0 ]]; then + return 0 + fi + + big_console_header "Perl::Critic plugin: postpatch" + + if [[ ! -x ${PERLCRITIC} ]]; then + yetus_error "${PERLCRITIC} is not available." + add_vote_table 0 perlcritic "Perl::Critic was not available." + return 0 + fi + + start_clock + + # add our previous elapsed to our new timer + # by setting the clock back + offset_clock "${PERLCRITIC_TIMER}" + + echo "Running perlcritic against modified perl scripts/modules." + # we re-check this in case one has been added + pushd "${BASEDIR}" >/dev/null + for i in ${CHANGED_FILES}; do + if [[ ${i} =~ \.p[lm]$ && -f ${i} ]]; then + ${PERLCRITIC} -1 --verbose 1 "${i}" 2>/dev/null >> "${PATCH_DIR}/patch-perlcritic-result.txt" + fi + done + popd >/dev/null + + PERLCRITIC_VERSION=$(${PERLCRITIC} --version 2>/dev/null) + add_footer_table perlcritic "v${PERLCRITIC_VERSION}" + + calcdiffs "${PATCH_DIR}/branch-perlcritic-result.txt" "${PATCH_DIR}/patch-perlcritic-result.txt" > "${PATCH_DIR}/diff-patch-perlcritic.txt" + # shellcheck disable=SC2016 + diffPostpatch=$(wc -l "${PATCH_DIR}/diff-patch-perlcritic.txt" | ${AWK} '{print $1}') + + if [[ ${diffPostpatch} -gt 0 ]] ; then + # shellcheck disable=SC2016 + numPrepatch=$(wc -l "${PATCH_DIR}/branch-perlcritic-result.txt" | ${AWK} '{print $1}') + + # shellcheck disable=SC2016 + numPostpatch=$(wc -l "${PATCH_DIR}/patch-perlcritic-result.txt" | ${AWK} '{print $1}') + + add_vote_table -1 perlcritic "The applied patch generated "\ + "${diffPostpatch} new Perl::Critic issues (total was ${numPrepatch}, now ${numPostpatch})." + add_footer_table perlcritic "@@BASE@@/diff-patch-perlcritic.txt" + return 1 + fi + + add_vote_table +1 perlcritic "There were no new perlcritic issues." + return 0 +} diff --git a/dev-support/test-patch.d/ruby-lint.sh b/dev-support/test-patch.d/ruby-lint.sh new file mode 100755 index 0000000000000..35d96041f3007 --- /dev/null +++ b/dev-support/test-patch.d/ruby-lint.sh @@ -0,0 +1,140 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +add_plugin ruby_lint + +RUBY_LINT_TIMER=0 + +RUBY_LINT=${RUBY_LINT:-$(which ruby-lint 2>/dev/null)} + +function ruby_lint_usage +{ + echo "Ruby-lint specific:" + echo "--ruby-lint= path to ruby-lint executable" +} + +function ruby_lint_parse_args +{ + local i + + for i in "$@"; do + case ${i} in + --ruby-lint=*) + RUBY_LINT=${i#*=} + ;; + esac + done +} + +function ruby_lint_filefilter +{ + local filename=$1 + + if [[ ${filename} =~ \.rb$ ]]; then + add_test ruby_lint + fi +} + +function ruby_lint_preapply +{ + local i + + verify_needed_test ruby_lint + if [[ $? == 0 ]]; then + return 0 + fi + + big_console_header "ruby-lint plugin: prepatch" + + if [[ ! -x ${RUBY_LINT} ]]; then + yetus_error "${RUBY_LINT} does not exist." + return 0 + fi + + start_clock + + echo "Running ruby-lint against modified ruby scripts." + pushd "${BASEDIR}" >/dev/null + for i in ${CHANGED_FILES}; do + if [[ ${i} =~ \.rb$ && -f ${i} ]]; then + ${RUBY_LINT} -p syntastic "${i}" | sort -t : -k 1,1 -k 3,3n -k 4,4n >> "${PATCH_DIR}/branch-ruby-lint-result.txt" + fi + done + popd >/dev/null + # keep track of how much as elapsed for us already + RUBY_LINT_TIMER=$(stop_clock) + return 0 +} + +function ruby_lint_postapply +{ + local i + local numPrepatch + local numPostpatch + local diffPostpatch + + verify_needed_test ruby_lint + if [[ $? == 0 ]]; then + return 0 + fi + + big_console_header "ruby-lint plugin: postpatch" + + if [[ ! -x ${RUBY_LINT} ]]; then + yetus_error "${RUBY_LINT} is not available." + add_vote_table 0 ruby-lint "Ruby-lint was not available." + return 0 + fi + + start_clock + + # add our previous elapsed to our new timer + # by setting the clock back + offset_clock "${RUBY_LINT_TIMER}" + + echo "Running ruby-lint against modified ruby scripts." + # we re-check this in case one has been added + pushd "${BASEDIR}" >/dev/null + for i in ${CHANGED_FILES}; do + if [[ ${i} =~ \.rb$ && -f ${i} ]]; then + ${RUBY_LINT} -p syntastic "${i}" | sort -t : -k 1,1 -k 3,3n -k 4,4n >> "${PATCH_DIR}/patch-ruby-lint-result.txt" + fi + done + popd >/dev/null + + # shellcheck disable=SC2016 + RUBY_LINT_VERSION=$(${RUBY_LINT} -v | ${AWK} '{print $2}') + add_footer_table ruby-lint "${RUBY_LINT_VERSION}" + + calcdiffs "${PATCH_DIR}/branch-ruby-lint-result.txt" "${PATCH_DIR}/patch-ruby-lint-result.txt" > "${PATCH_DIR}/diff-patch-ruby-lint.txt" + diffPostpatch=$(${AWK} -F: 'BEGIN {sum=0} 4 Date: Mon, 3 Aug 2015 10:38:05 -0500 Subject: [PATCH 049/130] YARN-3978. Configurably turn off the saving of container info in Generic AHS (Eric Payne via jeagles) --- hadoop-yarn-project/CHANGES.txt | 3 + .../hadoop/yarn/conf/YarnConfiguration.java | 9 +++ .../hadoop/yarn/server/webapp/AppBlock.java | 12 +-- .../rmcontainer/RMContainerImpl.java | 43 +++++++++- .../resourcemanager/TestClientRMService.java | 1 + .../rmcontainer/TestRMContainerImpl.java | 79 ++++++++++++++++++- .../capacity/TestChildQueueOrder.java | 1 + .../scheduler/capacity/TestLeafQueue.java | 2 + .../scheduler/capacity/TestReservations.java | 6 +- .../scheduler/fifo/TestFifoScheduler.java | 2 + 10 files changed, 146 insertions(+), 12 deletions(-) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index 61b3cce32c5cb..cd8cbd3b2475b 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -733,6 +733,9 @@ Release 2.7.2 - UNRELEASED YARN-3967. Fetch the application report from the AHS if the RM does not know about it. (Mit Desai via xgong) + YARN-3978. Configurably turn off the saving of container info in Generic AHS + (Eric Payne via jeagles) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java index 98327296dfd1b..f1baf5c6f3fda 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java @@ -1435,6 +1435,15 @@ private static void addDeprecatedKeys() { public static final String APPLICATION_HISTORY_STORE = APPLICATION_HISTORY_PREFIX + "store-class"; + /** Save container meta-info in the application history store. */ + @Private + public static final String + APPLICATION_HISTORY_SAVE_NON_AM_CONTAINER_META_INFO = + APPLICATION_HISTORY_PREFIX + "save-non-am-container-meta-info"; + @Private + public static final boolean + DEFAULT_APPLICATION_HISTORY_SAVE_NON_AM_CONTAINER_META_INFO = true; + /** URI for FileSystemApplicationHistoryStore */ @Private public static final String FS_APPLICATION_HISTORY_STORE_URI = diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java index eec32b29d9c06..871bac3596470 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java @@ -266,11 +266,13 @@ protected void generateApplicationTable(Block html, @Override public ContainerReport run() throws Exception { ContainerReport report = null; - try { - report = appBaseProt.getContainerReport(request) - .getContainerReport(); - } catch (ContainerNotFoundException ex) { - LOG.warn(ex.getMessage()); + if (request.getContainerId() != null) { + try { + report = appBaseProt.getContainerReport(request) + .getContainerReport(); + } catch (ContainerNotFoundException ex) { + LOG.warn(ex.getMessage()); + } } return report; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java index 940f76f1cb73b..a3d8beea5693b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java @@ -38,6 +38,7 @@ import org.apache.hadoop.yarn.api.records.Priority; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.ResourceRequest; +import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.server.api.protocolrecords.NMContainerStatus; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; @@ -173,6 +174,8 @@ public RMContainerImpl(Container container, .currentTimeMillis(), ""); } + private boolean saveNonAMContainerMetaInfo; + public RMContainerImpl(Container container, ApplicationAttemptId appAttemptId, NodeId nodeId, String user, RMContext rmContext, String nodeLabelExpression) { @@ -201,9 +204,21 @@ public RMContainerImpl(Container container, this.readLock = lock.readLock(); this.writeLock = lock.writeLock(); + saveNonAMContainerMetaInfo = rmContext.getYarnConfiguration().getBoolean( + YarnConfiguration.APPLICATION_HISTORY_SAVE_NON_AM_CONTAINER_META_INFO, + YarnConfiguration + .DEFAULT_APPLICATION_HISTORY_SAVE_NON_AM_CONTAINER_META_INFO); + rmContext.getRMApplicationHistoryWriter().containerStarted(this); - rmContext.getSystemMetricsPublisher().containerCreated( - this, this.creationTime); + + // If saveNonAMContainerMetaInfo is true, store system metrics for all + // containers. If false, and if this container is marked as the AM, metrics + // will still be published for this container, but that calculation happens + // later. + if (saveNonAMContainerMetaInfo) { + rmContext.getSystemMetricsPublisher().containerCreated( + this, this.creationTime); + } } @Override @@ -376,6 +391,15 @@ public void setAMContainer(boolean isAMContainer) { } finally { writeLock.unlock(); } + + // Even if saveNonAMContainerMetaInfo is not true, the AM container's system + // metrics still need to be saved so that the AM's logs can be accessed. + // This call to getSystemMetricsPublisher().containerCreated() is mutually + // exclusive with the one in the RMContainerImpl constructor. + if (!saveNonAMContainerMetaInfo && this.isAMContainer) { + rmContext.getSystemMetricsPublisher().containerCreated( + this, this.creationTime); + } } @Override @@ -516,8 +540,19 @@ public void transition(RMContainerImpl container, RMContainerEvent event) { container.rmContext.getRMApplicationHistoryWriter().containerFinished( container); - container.rmContext.getSystemMetricsPublisher().containerFinished( - container, container.finishTime); + + boolean saveNonAMContainerMetaInfo = + container.rmContext.getYarnConfiguration().getBoolean( + YarnConfiguration + .APPLICATION_HISTORY_SAVE_NON_AM_CONTAINER_META_INFO, + YarnConfiguration + .DEFAULT_APPLICATION_HISTORY_SAVE_NON_AM_CONTAINER_META_INFO); + + if (saveNonAMContainerMetaInfo || container.isAMContainer()) { + container.rmContext.getSystemMetricsPublisher().containerFinished( + container, container.finishTime); + } + } private static void updateAttemptMetrics(RMContainerImpl container) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java index 20343a51856cd..b9e1d811f5562 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java @@ -1172,6 +1172,7 @@ private void mockRMContext(YarnScheduler yarnScheduler, RMContext rmContext) when(rmContext.getRMApplicationHistoryWriter()).thenReturn(writer); SystemMetricsPublisher publisher = mock(SystemMetricsPublisher.class); when(rmContext.getSystemMetricsPublisher()).thenReturn(publisher); + when(rmContext.getYarnConfiguration()).thenReturn(new YarnConfiguration()); ConcurrentHashMap apps = getRMApps(rmContext, yarnScheduler); when(rmContext.getRMApps()).thenReturn(apps); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/TestRMContainerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/TestRMContainerImpl.java index 21aba3bfec0fc..e4e2049d4df10 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/TestRMContainerImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/TestRMContainerImpl.java @@ -19,12 +19,14 @@ package org.apache.hadoop.yarn.server.resourcemanager.rmcontainer; import static org.junit.Assert.assertEquals; -import static org.mockito.Mockito.any; -import static org.mockito.Mockito.anyLong; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyLong; +import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; import static org.mockito.Mockito.reset; import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @@ -270,4 +272,77 @@ public void testExistenceOfResourceRequestInRMContainer() throws Exception { Assert.assertNull(scheduler.getRMContainer(containerId2) .getResourceRequests()); } + + @Test (timeout = 180000) + public void testStoreAllContainerMetrics() throws Exception { + Configuration conf = new Configuration(); + conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 1); + MockRM rm1 = new MockRM(conf); + + SystemMetricsPublisher publisher = mock(SystemMetricsPublisher.class); + rm1.getRMContext().setSystemMetricsPublisher(publisher); + + rm1.start(); + MockNM nm1 = rm1.registerNode("unknownhost:1234", 8000); + RMApp app1 = rm1.submitApp(1024); + MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1); + nm1.nodeHeartbeat(am1.getApplicationAttemptId(), 1, ContainerState.RUNNING); + + // request a container. + am1.allocate("127.0.0.1", 1024, 1, new ArrayList()); + ContainerId containerId2 = ContainerId.newContainerId( + am1.getApplicationAttemptId(), 2); + rm1.waitForState(nm1, containerId2, RMContainerState.ALLOCATED); + am1.allocate(new ArrayList(), new ArrayList()) + .getAllocatedContainers(); + rm1.waitForState(nm1, containerId2, RMContainerState.ACQUIRED); + nm1.nodeHeartbeat(am1.getApplicationAttemptId(), 2, ContainerState.RUNNING); + nm1.nodeHeartbeat(am1.getApplicationAttemptId(), 2, ContainerState.COMPLETE); + nm1.nodeHeartbeat(am1.getApplicationAttemptId(), 1, ContainerState.COMPLETE); + rm1.waitForState(nm1, containerId2, RMContainerState.COMPLETED); + rm1.stop(); + + // RMContainer should be publishing system metrics for all containers. + // Since there is 1 AM container and 1 non-AM container, there should be 2 + // container created events and 2 container finished events. + verify(publisher, times(2)).containerCreated(any(RMContainer.class), anyLong()); + verify(publisher, times(2)).containerFinished(any(RMContainer.class), anyLong()); + } + + @Test (timeout = 180000) + public void testStoreOnlyAMContainerMetrics() throws Exception { + Configuration conf = new Configuration(); + conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 1); + conf.setBoolean( + YarnConfiguration.APPLICATION_HISTORY_SAVE_NON_AM_CONTAINER_META_INFO, + false); + MockRM rm1 = new MockRM(conf); + + SystemMetricsPublisher publisher = mock(SystemMetricsPublisher.class); + rm1.getRMContext().setSystemMetricsPublisher(publisher); + + rm1.start(); + MockNM nm1 = rm1.registerNode("unknownhost:1234", 8000); + RMApp app1 = rm1.submitApp(1024); + MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1); + nm1.nodeHeartbeat(am1.getApplicationAttemptId(), 1, ContainerState.RUNNING); + + // request a container. + am1.allocate("127.0.0.1", 1024, 1, new ArrayList()); + ContainerId containerId2 = ContainerId.newContainerId( + am1.getApplicationAttemptId(), 2); + rm1.waitForState(nm1, containerId2, RMContainerState.ALLOCATED); + am1.allocate(new ArrayList(), new ArrayList()) + .getAllocatedContainers(); + rm1.waitForState(nm1, containerId2, RMContainerState.ACQUIRED); + nm1.nodeHeartbeat(am1.getApplicationAttemptId(), 2, ContainerState.RUNNING); + nm1.nodeHeartbeat(am1.getApplicationAttemptId(), 2, ContainerState.COMPLETE); + nm1.nodeHeartbeat(am1.getApplicationAttemptId(), 1, ContainerState.COMPLETE); + rm1.waitForState(nm1, containerId2, RMContainerState.COMPLETED); + rm1.stop(); + + // RMContainer should be publishing system metrics only for AM container. + verify(publisher, times(1)).containerCreated(any(RMContainer.class), anyLong()); + verify(publisher, times(1)).containerFinished(any(RMContainer.class), anyLong()); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestChildQueueOrder.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestChildQueueOrder.java index 31661da810816..295a31a0b5874 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestChildQueueOrder.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestChildQueueOrder.java @@ -270,6 +270,7 @@ public void testSortedQueues() throws Exception { when(rmContext.getDispatcher()).thenReturn(drainDispatcher); when(rmContext.getRMApplicationHistoryWriter()).thenReturn(writer); when(rmContext.getSystemMetricsPublisher()).thenReturn(publisher); + when(rmContext.getYarnConfiguration()).thenReturn(new YarnConfiguration()); ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId( app_0.getApplicationId(), 1); ContainerId containerId = BuilderUtils.newContainerId(appAttemptId, 1); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java index d225bd049f53e..d63130088fc71 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java @@ -168,6 +168,8 @@ public void setUp() throws Exception { cs.start(); when(spyRMContext.getScheduler()).thenReturn(cs); + when(spyRMContext.getYarnConfiguration()) + .thenReturn(new YarnConfiguration()); when(cs.getNumClusterNodes()).thenReturn(3); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestReservations.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestReservations.java index fff4a8645d909..66ad3a82e7100 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestReservations.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestReservations.java @@ -129,7 +129,9 @@ private void setup(CapacitySchedulerConfiguration csConf) throws Exception { spyRMContext = spy(rmContext); when(spyRMContext.getScheduler()).thenReturn(cs); - + when(spyRMContext.getYarnConfiguration()) + .thenReturn(new YarnConfiguration()); + cs.setRMContext(spyRMContext); cs.init(csConf); cs.start(); @@ -642,6 +644,7 @@ public void testGetAppToUnreserve() throws Exception { when(rmContext.getDispatcher()).thenReturn(drainDispatcher); when(rmContext.getRMApplicationHistoryWriter()).thenReturn(writer); when(rmContext.getSystemMetricsPublisher()).thenReturn(publisher); + when(rmContext.getYarnConfiguration()).thenReturn(new YarnConfiguration()); ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId( app_0.getApplicationId(), 1); ContainerId containerId = BuilderUtils.newContainerId(appAttemptId, 1); @@ -711,6 +714,7 @@ public void testFindNodeToUnreserve() throws Exception { when(rmContext.getDispatcher()).thenReturn(drainDispatcher); when(rmContext.getRMApplicationHistoryWriter()).thenReturn(writer); when(rmContext.getSystemMetricsPublisher()).thenReturn(publisher); + when(rmContext.getYarnConfiguration()).thenReturn(new YarnConfiguration()); ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId( app_0.getApplicationId(), 1); ContainerId containerId = BuilderUtils.newContainerId(appAttemptId, 1); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java index a454801ca3573..5f9030fd66309 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java @@ -222,6 +222,7 @@ public void testNodeLocalAssignment() throws Exception { scheduler); ((RMContextImpl) rmContext).setSystemMetricsPublisher( mock(SystemMetricsPublisher.class)); + ((RMContextImpl) rmContext).setYarnConfiguration(new YarnConfiguration()); scheduler.setRMContext(rmContext); scheduler.init(conf); @@ -303,6 +304,7 @@ public Map getNodes(){ scheduler); ((RMContextImpl) rmContext).setSystemMetricsPublisher( mock(SystemMetricsPublisher.class)); + ((RMContextImpl) rmContext).setYarnConfiguration(new YarnConfiguration()); NullRMNodeLabelsManager nlm = new NullRMNodeLabelsManager(); nlm.init(new Configuration()); rmContext.setNodeLabelManager(nlm); From 85cadb0933573bb5392b5519d89c4ef897130092 Mon Sep 17 00:00:00 2001 From: Jason Lowe Date: Mon, 3 Aug 2015 15:53:32 +0000 Subject: [PATCH 050/130] YARN-3965. Add startup timestamp to nodemanager UI. Contributed by Hong Zhiguo --- hadoop-yarn-project/CHANGES.txt | 2 ++ .../hadoop/yarn/server/nodemanager/NodeManager.java | 5 +++++ .../hadoop/yarn/server/nodemanager/webapp/NodePage.java | 8 +++++--- .../yarn/server/nodemanager/webapp/dao/NodeInfo.java | 6 ++++++ .../yarn/server/nodemanager/webapp/TestNMWebServices.java | 2 +- 5 files changed, 19 insertions(+), 4 deletions(-) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index cd8cbd3b2475b..14961440a18fc 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -361,6 +361,8 @@ Release 2.8.0 - UNRELEASED YARN-3950. Add unique SHELL_ID environment variable to DistributedShell (Robert Kanter via jlowe) + YARN-3965. Add startup timestamp to nodemanager UI (Hong Zhiguo via jlowe) + OPTIMIZATIONS YARN-3339. TestDockerContainerExecutor should pull a single image and not diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java index 185ba126a4d54..b8889eeded8cd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java @@ -81,6 +81,7 @@ public class NodeManager extends CompositeService public static final int SHUTDOWN_HOOK_PRIORITY = 30; private static final Log LOG = LogFactory.getLog(NodeManager.class); + private static long nmStartupTime = System.currentTimeMillis(); protected final NodeManagerMetrics metrics = NodeManagerMetrics.create(); private ApplicationACLsManager aclsManager; private NodeHealthCheckerService nodeHealthChecker; @@ -101,6 +102,10 @@ public NodeManager() { super(NodeManager.class.getName()); } + public static long getNMStartupTime() { + return nmStartupTime; + } + protected NodeStatusUpdater createNodeStatusUpdater(Context context, Dispatcher dispatcher, NodeHealthCheckerService healthChecker) { return new NodeStatusUpdaterImpl(context, dispatcher, healthChecker, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NodePage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NodePage.java index 92c4187c20130..f51f0c551af6a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NodePage.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NodePage.java @@ -73,15 +73,17 @@ protected void render(Block html) { StringUtils.byteDesc(info.getTotalPmemAllocated() * BYTES_IN_MB)) ._("Pmem enforcement enabled", info.isPmemCheckEnabled()) - ._("Total VCores allocated for Containers", - String.valueOf(info.getTotalVCoresAllocated())) + ._("Total VCores allocated for Containers", + String.valueOf(info.getTotalVCoresAllocated())) ._("NodeHealthyStatus", info.getHealthStatus()) ._("LastNodeHealthTime", new Date( info.getLastNodeUpdateTime())) ._("NodeHealthReport", info.getHealthReport()) - ._("Node Manager Version:", info.getNMBuildVersion() + + ._("NodeManager started on", new Date( + info.getNMStartupTime())) + ._("NodeManager Version:", info.getNMBuildVersion() + " on " + info.getNMVersionBuiltOn()) ._("Hadoop Version:", info.getHadoopBuildVersion() + " on " + info.getHadoopVersionBuiltOn()); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/dao/NodeInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/dao/NodeInfo.java index 5d67c9e6d6b3d..32e39cf59fb2f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/dao/NodeInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/dao/NodeInfo.java @@ -24,6 +24,7 @@ import org.apache.hadoop.util.VersionInfo; import org.apache.hadoop.yarn.server.nodemanager.Context; +import org.apache.hadoop.yarn.server.nodemanager.NodeManager; import org.apache.hadoop.yarn.server.nodemanager.ResourceView; import org.apache.hadoop.yarn.util.YarnVersionInfo; @@ -49,6 +50,7 @@ public class NodeInfo { protected String hadoopVersionBuiltOn; protected String id; protected String nodeHostName; + protected long nmStartupTime; public NodeInfo() { } // JAXB needs this @@ -77,6 +79,7 @@ public NodeInfo(final Context context, final ResourceView resourceView) { this.hadoopVersion = VersionInfo.getVersion(); this.hadoopBuildVersion = VersionInfo.getBuildVersion(); this.hadoopVersionBuiltOn = VersionInfo.getDate(); + this.nmStartupTime = NodeManager.getNMStartupTime(); } public String getNodeId() { @@ -143,4 +146,7 @@ public boolean isPmemCheckEnabled() { return this.pmemCheckEnabled; } + public long getNMStartupTime() { + return nmStartupTime; + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServices.java index 5a89e742190f5..1f5590ca46f68 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServices.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServices.java @@ -402,7 +402,7 @@ public void verifyNodesXML(NodeList nodes) throws JSONException, Exception { public void verifyNodeInfo(JSONObject json) throws JSONException, Exception { assertEquals("incorrect number of elements", 1, json.length()); JSONObject info = json.getJSONObject("nodeInfo"); - assertEquals("incorrect number of elements", 16, info.length()); + assertEquals("incorrect number of elements", 17, info.length()); verifyNodeInfoGeneric(info.getString("id"), info.getString("healthReport"), info.getLong("totalVmemAllocatedContainersMB"), info.getLong("totalPmemAllocatedContainersMB"), From 97eb3f5c9a2a091ec1c7ccaadcee5ae78a1ce10b Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Mon, 3 Aug 2015 10:34:01 -0700 Subject: [PATCH 051/130] HADOOP-12286. test-patch pylint plugin should support indent-string option (Kengo Seki via aw) --- dev-support/personality/hadoop.sh | 2 ++ dev-support/test-patch.d/pylint.sh | 19 +++++++++++++------ 2 files changed, 15 insertions(+), 6 deletions(-) diff --git a/dev-support/personality/hadoop.sh b/dev-support/personality/hadoop.sh index 3d6e3faeca1a0..60dbb3d7989bc 100755 --- a/dev-support/personality/hadoop.sh +++ b/dev-support/personality/hadoop.sh @@ -22,6 +22,8 @@ PATCH_BRANCH_DEFAULT=trunk HOW_TO_CONTRIBUTE="https://wiki.apache.org/hadoop/HowToContribute" #shellcheck disable=SC2034 ISSUE_RE='^(HADOOP|YARN|MAPREDUCE|HDFS)-[0-9]+$' +#shellcheck disable=SC2034 +PYLINT_OPTIONS="--indent-string=' '" HADOOP_MODULES="" diff --git a/dev-support/test-patch.d/pylint.sh b/dev-support/test-patch.d/pylint.sh index faa8136055df1..ebac162156a93 100755 --- a/dev-support/test-patch.d/pylint.sh +++ b/dev-support/test-patch.d/pylint.sh @@ -19,11 +19,13 @@ add_plugin pylint PYLINT_TIMER=0 PYLINT=${PYLINT:-$(which pylint 2>/dev/null)} +PYLINT_OPTIONS=${PYLINT_OPTIONS:-} function pylint_usage { echo "Pylint specific:" - echo "--pylint= path to pylint executable" + echo "--pylint= path to pylint executable" + echo "--pylint-options= pylint options other than output-format and reports" } function pylint_parse_args @@ -35,6 +37,9 @@ function pylint_parse_args --pylint=*) PYLINT=${i#*=} ;; + --pylint-options=*) + PYLINT_OPTIONS=${i#*=} + ;; esac done } @@ -70,7 +75,8 @@ function pylint_preapply pushd "${BASEDIR}" >/dev/null for i in ${CHANGED_FILES}; do if [[ ${i} =~ \.py$ && -f ${i} ]]; then - ${PYLINT} --indent-string=" " --output-format=parseable --reports=n "${i}" 2>/dev/null | + # shellcheck disable=SC2086 + eval "${PYLINT} ${PYLINT_OPTIONS} --output-format=parseable --reports=n ${i}" 2>/dev/null | ${AWK} '1> "${PATCH_DIR}/branch-pylint-result.txt" fi done @@ -111,7 +117,8 @@ function pylint_postapply pushd "${BASEDIR}" >/dev/null for i in ${CHANGED_FILES}; do if [[ ${i} =~ \.py$ && -f ${i} ]]; then - ${PYLINT} --indent-string=" " --output-format=parseable --reports=n "${i}" 2>/dev/null | + # shellcheck disable=SC2086 + eval "${PYLINT} ${PYLINT_OPTIONS} --output-format=parseable --reports=n ${i}" 2>/dev/null | ${AWK} '1> "${PATCH_DIR}/patch-pylint-result.txt" fi done @@ -122,14 +129,14 @@ function pylint_postapply add_footer_table pylint "v${PYLINT_VERSION%,}" calcdiffs "${PATCH_DIR}/branch-pylint-result.txt" "${PATCH_DIR}/patch-pylint-result.txt" > "${PATCH_DIR}/diff-patch-pylint.txt" - diffPostpatch=$(${AWK} -F: 'BEGIN {sum=0} 2 Date: Mon, 3 Aug 2015 10:36:24 -0700 Subject: [PATCH 052/130] HADOOP-12287. add support for perlcritic (Kengo Seki via aw) --- dev-support/test-patch-docker/Dockerfile-startstub | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/dev-support/test-patch-docker/Dockerfile-startstub b/dev-support/test-patch-docker/Dockerfile-startstub index 5e5ca78cdfa48..080f86f22839e 100644 --- a/dev-support/test-patch-docker/Dockerfile-startstub +++ b/dev-support/test-patch-docker/Dockerfile-startstub @@ -34,7 +34,8 @@ RUN apt-get update && apt-get install --no-install-recommends -y \ libcurl4-openssl-dev \ python python2.7 pylint \ ruby \ - openjdk-7-jdk + openjdk-7-jdk \ + libperl-critic-perl # Fixing the Apache commons / Maven dependency problem under Ubuntu: # See http://wiki.apache.org/commons/VfsProblems From 530b0d665dc166a168448cf2b2b1eb70ec09fb06 Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Mon, 3 Aug 2015 10:38:55 -0700 Subject: [PATCH 053/130] HADOOP-12256. add support for ruby-lint (Kengo Seki via aw) --- dev-support/test-patch-docker/Dockerfile-startstub | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/dev-support/test-patch-docker/Dockerfile-startstub b/dev-support/test-patch-docker/Dockerfile-startstub index 080f86f22839e..fd3e4c5b742b7 100644 --- a/dev-support/test-patch-docker/Dockerfile-startstub +++ b/dev-support/test-patch-docker/Dockerfile-startstub @@ -78,6 +78,11 @@ RUN cabal update && cabal install shellcheck --global ### RUN gem install rubocop +#### +# Install ruby-lint +### +RUN gem install ruby-lint + ##### # Install JIRA CLI ##### From f9819cb5eb9bd1b86cd016329439ff4e66e0ed37 Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Mon, 3 Aug 2015 19:40:51 -0700 Subject: [PATCH 054/130] Convert 'unit' to 'junit' (aw) --- dev-support/test-patch.d/junit.sh | 68 +++++++++++++++++++++++++++++++ dev-support/test-patch.sh | 53 +++++++++--------------- 2 files changed, 88 insertions(+), 33 deletions(-) create mode 100755 dev-support/test-patch.d/junit.sh diff --git a/dev-support/test-patch.d/junit.sh b/dev-support/test-patch.d/junit.sh new file mode 100755 index 0000000000000..4393511b9c1be --- /dev/null +++ b/dev-support/test-patch.d/junit.sh @@ -0,0 +1,68 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +add_test_format junit + +JUNIT_TEST_TIMEOUTS="" +JUNIT_FAILED_TESTS="" + +function junit_process_tests +{ + # shellcheck disable=SC2034 + declare module=$1 + declare buildlogfile=$2 + declare result=0 + declare module_test_timeouts + declare module_failed_tests + + # shellcheck disable=SC2016 + module_test_timeouts=$(${AWK} '/^Running / { array[$NF] = 1 } /^Tests run: .* in / { delete array[$NF] } END { for (x in array) { print x } }' "${buildlogfile}") + if [[ -n "${module_test_timeouts}" ]] ; then + JUNIT_TEST_TIMEOUTS="${JUNIT_TEST_TIMEOUTS} ${module_test_timeouts}" + ((result=result+1)) + fi + + #shellcheck disable=SC2026,SC2038,SC2016 + module_failed_tests=$(find . -name 'TEST*.xml'\ + | xargs "${GREP}" -l -E "/dev/null 2>&1; then echo "${plugin}_usage" @@ -2940,10 +2941,7 @@ function populate_test_table function check_unittests { local i - local failed_tests="" - local test_timeouts="" local test_logfile - local module_test_timeouts="" local result=0 local -r savejavahome=${JAVA_HOME} local multijdkmode=false @@ -3001,40 +2999,20 @@ function check_unittests fn="${fn}${jdk}" test_logfile="${PATCH_DIR}/patch-unit-${fn}.txt" - # shellcheck disable=2016 - module_test_timeouts=$(${AWK} '/^Running / { array[$NF] = 1 } /^Tests run: .* in / { delete array[$NF] } END { for (x in array) { print x } }' "${test_logfile}") - if [[ -n "${module_test_timeouts}" ]] ; then - test_timeouts="${test_timeouts} ${module_test_timeouts}" - ((result=result+1)) - fi - pushd "${MODULE[${i}]}" >/dev/null - #shellcheck disable=SC2026,SC2038,SC2016 - module_failed_tests=$(find . -name 'TEST*.xml'\ - | xargs "${GREP}" -l -E "/dev/null + for j in ${TESTSYSTEMS}; do + if declare -f ${j}_process_tests; then + "${j}_process_tests" "${module}" "${test_logfile}" + ((results=results+$?)) + fi + done - if [[ -n "${module_failed_tests}" ]] ; then - failed_tests="${failed_tests} ${module_failed_tests}" - ((result=result+1)) - fi + popd >/dev/null ((i=i+1)) done - if [[ -n "${failed_tests}" ]] ; then - # shellcheck disable=SC2086 - populate_test_table "${statusjdk}Failed unit tests" ${failed_tests} - failed_tests="" - fi - if [[ -n "${test_timeouts}" ]] ; then - # shellcheck disable=SC2086 - populate_test_table "${statusjdk}Timed out tests" ${test_timeouts} - test_timeouts="" - fi - done JAVA_HOME=${savejavahome} @@ -3433,7 +3411,7 @@ function importplugins ## @replaceable no function parse_args_plugins { - for plugin in ${PLUGINS} ${BUGSYSTEMS}; do + for plugin in ${PLUGINS} ${BUGSYSTEMS} ${TESTFORMATS}; do if declare -f ${plugin}_parse_args >/dev/null 2>&1; then yetus_debug "Running ${plugin}_parse_args" #shellcheck disable=SC2086 @@ -3452,7 +3430,7 @@ function add_plugin PLUGINS="${PLUGINS} $1" } -## @description Register test-patch.d plugins +## @description Register test-patch.d bugsystems ## @audience public ## @stability stable ## @replaceable no @@ -3461,6 +3439,15 @@ function add_bugsystem BUGSYSTEMS="${BUGSYSTEMS} $1" } +## @description Register test-patch.d test output formats +## @audience public +## @stability stable +## @replaceable no +function add_test_format +{ + TESTFORMATS="${TESTFORMATS} $1" +} + ## @description Calculate the differences between the specified files ## @description and output it to stdout. ## @audience public From bd06968f6e80a7a4af443d93c0c9fb8581270d17 Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Mon, 3 Aug 2015 19:40:51 -0700 Subject: [PATCH 055/130] Convert 'unit' to 'junit' (aw) --- dev-support/test-patch.d/junit.sh | 68 +++++++++++++++++++++++++++++++ dev-support/test-patch.sh | 53 +++++++++--------------- 2 files changed, 88 insertions(+), 33 deletions(-) create mode 100755 dev-support/test-patch.d/junit.sh diff --git a/dev-support/test-patch.d/junit.sh b/dev-support/test-patch.d/junit.sh new file mode 100755 index 0000000000000..4393511b9c1be --- /dev/null +++ b/dev-support/test-patch.d/junit.sh @@ -0,0 +1,68 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +add_test_format junit + +JUNIT_TEST_TIMEOUTS="" +JUNIT_FAILED_TESTS="" + +function junit_process_tests +{ + # shellcheck disable=SC2034 + declare module=$1 + declare buildlogfile=$2 + declare result=0 + declare module_test_timeouts + declare module_failed_tests + + # shellcheck disable=SC2016 + module_test_timeouts=$(${AWK} '/^Running / { array[$NF] = 1 } /^Tests run: .* in / { delete array[$NF] } END { for (x in array) { print x } }' "${buildlogfile}") + if [[ -n "${module_test_timeouts}" ]] ; then + JUNIT_TEST_TIMEOUTS="${JUNIT_TEST_TIMEOUTS} ${module_test_timeouts}" + ((result=result+1)) + fi + + #shellcheck disable=SC2026,SC2038,SC2016 + module_failed_tests=$(find . -name 'TEST*.xml'\ + | xargs "${GREP}" -l -E "/dev/null 2>&1; then echo "${plugin}_usage" @@ -2940,10 +2941,7 @@ function populate_test_table function check_unittests { local i - local failed_tests="" - local test_timeouts="" local test_logfile - local module_test_timeouts="" local result=0 local -r savejavahome=${JAVA_HOME} local multijdkmode=false @@ -3001,40 +2999,20 @@ function check_unittests fn="${fn}${jdk}" test_logfile="${PATCH_DIR}/patch-unit-${fn}.txt" - # shellcheck disable=2016 - module_test_timeouts=$(${AWK} '/^Running / { array[$NF] = 1 } /^Tests run: .* in / { delete array[$NF] } END { for (x in array) { print x } }' "${test_logfile}") - if [[ -n "${module_test_timeouts}" ]] ; then - test_timeouts="${test_timeouts} ${module_test_timeouts}" - ((result=result+1)) - fi - pushd "${MODULE[${i}]}" >/dev/null - #shellcheck disable=SC2026,SC2038,SC2016 - module_failed_tests=$(find . -name 'TEST*.xml'\ - | xargs "${GREP}" -l -E "/dev/null + for j in ${TESTSYSTEMS}; do + if declare -f ${j}_process_tests; then + "${j}_process_tests" "${module}" "${test_logfile}" + ((results=results+$?)) + fi + done - if [[ -n "${module_failed_tests}" ]] ; then - failed_tests="${failed_tests} ${module_failed_tests}" - ((result=result+1)) - fi + popd >/dev/null ((i=i+1)) done - if [[ -n "${failed_tests}" ]] ; then - # shellcheck disable=SC2086 - populate_test_table "${statusjdk}Failed unit tests" ${failed_tests} - failed_tests="" - fi - if [[ -n "${test_timeouts}" ]] ; then - # shellcheck disable=SC2086 - populate_test_table "${statusjdk}Timed out tests" ${test_timeouts} - test_timeouts="" - fi - done JAVA_HOME=${savejavahome} @@ -3433,7 +3411,7 @@ function importplugins ## @replaceable no function parse_args_plugins { - for plugin in ${PLUGINS} ${BUGSYSTEMS}; do + for plugin in ${PLUGINS} ${BUGSYSTEMS} ${TESTFORMATS}; do if declare -f ${plugin}_parse_args >/dev/null 2>&1; then yetus_debug "Running ${plugin}_parse_args" #shellcheck disable=SC2086 @@ -3452,7 +3430,7 @@ function add_plugin PLUGINS="${PLUGINS} $1" } -## @description Register test-patch.d plugins +## @description Register test-patch.d bugsystems ## @audience public ## @stability stable ## @replaceable no @@ -3461,6 +3439,15 @@ function add_bugsystem BUGSYSTEMS="${BUGSYSTEMS} $1" } +## @description Register test-patch.d test output formats +## @audience public +## @stability stable +## @replaceable no +function add_test_format +{ + TESTFORMATS="${TESTFORMATS} $1" +} + ## @description Calculate the differences between the specified files ## @description and output it to stdout. ## @audience public From 89b9192e5d9b07b57a0362c56f9f69292088b0c1 Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Thu, 6 Aug 2015 19:25:08 -0700 Subject: [PATCH 056/130] initial jira --- dev-support/personality/flink.sh | 2 +- dev-support/personality/hadoop.sh | 2 +- dev-support/personality/hbase.sh | 2 +- dev-support/personality/pig.sh | 2 +- dev-support/personality/tajo.sh | 2 +- dev-support/personality/tez.sh | 2 +- dev-support/test-patch.d/jira.sh | 120 ++++++++++++++++++++++---- dev-support/test-patch.sh | 134 +++++++----------------------- 8 files changed, 141 insertions(+), 125 deletions(-) diff --git a/dev-support/personality/flink.sh b/dev-support/personality/flink.sh index de2a0f1b0b5df..9f59233ee0302 100755 --- a/dev-support/personality/flink.sh +++ b/dev-support/personality/flink.sh @@ -17,7 +17,7 @@ #shellcheck disable=SC2034 PATCH_BRANCH_DEFAULT=master #shellcheck disable=SC2034 -ISSUE_RE='^FLINK-[0-9]+$' +JIRA_ISSUE_RE='^FLINK-[0-9]+$' #shellcheck disable=SC2034 HOW_TO_CONTRIBUTE="" diff --git a/dev-support/personality/hadoop.sh b/dev-support/personality/hadoop.sh index 60dbb3d7989bc..ff2e18fea7aa1 100755 --- a/dev-support/personality/hadoop.sh +++ b/dev-support/personality/hadoop.sh @@ -21,7 +21,7 @@ PATCH_BRANCH_DEFAULT=trunk #shellcheck disable=SC2034 HOW_TO_CONTRIBUTE="https://wiki.apache.org/hadoop/HowToContribute" #shellcheck disable=SC2034 -ISSUE_RE='^(HADOOP|YARN|MAPREDUCE|HDFS)-[0-9]+$' +JIRA_ISSUE_RE='^(HADOOP|YARN|MAPREDUCE|HDFS)-[0-9]+$' #shellcheck disable=SC2034 PYLINT_OPTIONS="--indent-string=' '" diff --git a/dev-support/personality/hbase.sh b/dev-support/personality/hbase.sh index d8ca9010af5b9..7becfddffefa5 100755 --- a/dev-support/personality/hbase.sh +++ b/dev-support/personality/hbase.sh @@ -17,7 +17,7 @@ #shellcheck disable=SC2034 PATCH_BRANCH_DEFAULT=master #shellcheck disable=SC2034 -ISSUE_RE='^HBASE-[0-9]+$' +JIRA_ISSUE_RE='^HBASE-[0-9]+$' #shellcheck disable=SC2034 HOW_TO_CONTRIBUTE="" diff --git a/dev-support/personality/pig.sh b/dev-support/personality/pig.sh index d01a410f30f2f..2d562d326beb4 100755 --- a/dev-support/personality/pig.sh +++ b/dev-support/personality/pig.sh @@ -17,7 +17,7 @@ #shellcheck disable=SC2034 PATCH_BRANCH_DEFAULT=trunk #shellcheck disable=SC2034 -ISSUE_RE='^PIG-[0-9]+$' +JIRA_ISSUE_RE='^PIG-[0-9]+$' #shellcheck disable=SC2034 HOW_TO_CONTRIBUTE="" #shellcheck disable=SC2034 diff --git a/dev-support/personality/tajo.sh b/dev-support/personality/tajo.sh index 56e544243e3f1..a03efadec04d8 100755 --- a/dev-support/personality/tajo.sh +++ b/dev-support/personality/tajo.sh @@ -17,7 +17,7 @@ #shellcheck disable=SC2034 PATCH_BRANCH_DEFAULT=master #shellcheck disable=SC2034 -ISSUE_RE='^TAJO-[0-9]+$' +JIRA_ISSUE_RE='^TAJO-[0-9]+$' #shellcheck disable=SC2034 HOW_TO_CONTRIBUTE="https://cwiki.apache.org/confluence/display/TAJO/How+to+Contribute+to+Tajo" diff --git a/dev-support/personality/tez.sh b/dev-support/personality/tez.sh index 1d6a2278d4a18..d2f2e58fa2f5d 100755 --- a/dev-support/personality/tez.sh +++ b/dev-support/personality/tez.sh @@ -17,7 +17,7 @@ #shellcheck disable=SC2034 PATCH_BRANCH_DEFAULT=master #shellcheck disable=SC2034 -ISSUE_RE='^TEZ-[0-9]+$' +JIRA_ISSUE_RE='^TEZ-[0-9]+$' #shellcheck disable=SC2034 HOW_TO_CONTRIBUTE="https://cwiki.apache.org/confluence/display/TEZ/How+to+Contribute+to+Tez" diff --git a/dev-support/test-patch.d/jira.sh b/dev-support/test-patch.d/jira.sh index f95ca6f40fa85..032f697dbe095 100755 --- a/dev-support/test-patch.d/jira.sh +++ b/dev-support/test-patch.d/jira.sh @@ -15,26 +15,36 @@ # limitations under the License. JIRACLI=${JIRA:-jira} +JIRAURL=${JIRAURL:-"http://issues.apache.org/jira"} +JIRA_ISSUE_RE='^(YETUS)-[0-9]+$' add_bugsystem jira function jira_usage { echo "JIRA Options:" - echo "--jira-cmd= The 'jira' command to use (default 'jira')" + echo "--jira-issue-re= Bash regular expression to use when trying to find a jira ref in the patch name (default: \'${JIRA_ISSUE_RE}\')" + echo "--jira-cmd= The 'jira' command to use (default '${JIRACLI}')" echo "--jira-password= The password for the 'jira' command" + echo "--jira-base-url= The URL of the JIRA server (default:'${JIRAURL}')" echo "--jira-user= The user for the 'jira' command" } function jira_parse_args { - local i + declare i for i in "$@"; do case ${i} in --jira-cmd=*) JIRACLI=${i#*=} ;; + --jira-base-url=*) + JIRA_URL=${i#*=} + ;; + --jira-issue-re=*) + JIRA_ISSUE_RE=${i#*=} + ;; --jira-password=*) JIRA_PASSWD=${i#*=} ;; @@ -45,6 +55,84 @@ function jira_parse_args done } +function jira_determine_issue +{ + declare input=$1 + declare patchnamechunk + declare maybeissue + + # shellcheck disable=SC2016 + patchnamechunk=$(echo "${input}" | ${AWK} -F/ '{print $NF}') + + maybeissue=$(echo "${patchnamechunk}" | cut -f1,2 -d-) + + if [[ ${maybeissue} =~ ${JIRA_ISSUE_RE} ]]; then + ISSUE=${maybeissue} + return 0 + fi + + return 1 +} + +function jira_http_fetch +{ + declare input=$1 + declare output=$2 + + if [[ -n "${JIRA_USER}" + && -n "${JIRA_PASSWD}" ]]; then + ${WGET} --user="${JIRA_USER}" --password="${JIRA_PASSWD}" -q -O "${output}" "${JIRAURL}/${input}" + else + ${WGET} -q -O "${output}" "${JIRAURL}/${input}" + fi +} + +function jira_locate_patch +{ + declare input=$1 + declare fileloc=$2 + + yetus_debug "jira_locate_patch: trying ${JIRAURL}/browse/${input}" + + jira_http_fetch "browse/${input}" "${PATCH_DIR}/jira" + + if [[ $? != 0 ]]; then + yetus_debug "jira_locate_patch: not a JIRA." + return 1 + fi + + if [[ $(${GREP} -c 'Patch Available' "${PATCH_DIR}/jira") == 0 ]] ; then + if [[ ${JENKINS} == true ]]; then + yetus_error "ERROR: ${input} is not \"Patch Available\"." + cleanup_and_exit 1 + else + yetus_error "WARNING: ${input} is not \"Patch Available\"." + fi + fi + + #shellcheck disable=SC2016 + relativePatchURL=$(${AWK} 'match($0,"/secure/attachment/[0-9]*/[^\"]*"){print substr($0,RSTART,RLENGTH)}' "${PATCH_DIR}/jira" | + ${GREP} -v -e 'htm[l]*$' | sort | tail -1 | ${SED} -e 's,[ ]*$,,g') + PATCHURL="${JIRAURL}${relativePatchURL}" + if [[ ! ${PATCHURL} =~ \.patch$ ]]; then + guess_patch_file "${PATCH_DIR}/patch" + if [[ $? == 0 ]]; then + yetus_debug "The patch ${PATCHURL} was not named properly, but it looks like a patch file. proceeding, but issue/branch matching might go awry." + add_vote_table 0 patch "The patch file was not named according to ${PROJECT_NAME}'s naming conventions. Please see ${HOW_TO_CONTRIBUTE} for instructions." + fi + fi + #shellcheck disable=SC2016 + patchNum=$(echo "${PATCHURL}" | ${AWK} 'match($0,"[0-9]*/"){print substr($0,RSTART,RLENGTH-1)}') + echo "${ISSUE} patch is being downloaded at $(date) from" + echo "${PATCHURL}" + add_footer_table "JIRA Patch URL" "${PATCHURL}" + jira_http_fetch "${relativePatchURL}" "${fileloc}" + if [[ $? != 0 ]];then + yetus_error "ERROR: ${PATCH_OR_ISSUE} could not be downloaded." + cleanup_and_exit 1 + fi +} + ## @description Write the contents of a file to JIRA ## @params filename ## @stability stable @@ -52,22 +140,22 @@ function jira_parse_args ## @returns ${JIRACLI} exit code function jira_write_comment { - local -r commentfile=${1} + declare -r commentfile=${1} shift - local retval=0 + declare retval=0 if [[ -n ${JIRA_PASSWD} && -n ${JIRA_USER} ]]; then # shellcheck disable=SC2086 ${JIRACLI} --comment "$(cat ${commentfile})" \ - -s https://issues.apache.org/jira \ + -s "${JIRAURL}" \ -a addcomment -u ${JIRA_USER} \ -p "${JIRA_PASSWD}" \ --issue "${ISSUE}" retval=$? - ${JIRACLI} -s https://issues.apache.org/jira \ + ${JIRACLI} -s "${JIRAURL}" \ -a logout -u "${JIRA_USER}" \ -p "${JIRA_PASSWD}" fi @@ -81,16 +169,16 @@ function jira_write_comment ## @param runresult function jira_finalreport { - local result=$1 - local i - local commentfile=${PATCH_DIR}/commentfile - local comment - local vote - local ourstring - local ela - local subs - local color - local comment + declare result=$1 + declare i + declare commentfile=${PATCH_DIR}/commentfile + declare comment + declare vote + declare ourstring + declare ela + declare subs + declare color + declare comment rm "${commentfile}" 2>/dev/null diff --git a/dev-support/test-patch.sh b/dev-support/test-patch.sh index 1faf99f2d4e0f..61f82002278c3 100755 --- a/dev-support/test-patch.sh +++ b/dev-support/test-patch.sh @@ -96,7 +96,6 @@ function setup_defaults REEXECED=false RESETREPO=false ISSUE="" - ISSUE_RE='^(YETUS)-[0-9]+$' TIMER=$(date +"%s") PATCHURL="" OSTYPE=$(uname -s) @@ -735,7 +734,6 @@ function testpatch_usage echo "--dirty-workspace Allow the local git workspace to have uncommitted changes" echo "--docker Spawn a docker container" echo "--dockerfile= Dockerfile fragment to use as the base" - echo "--issue-re= Bash regular expression to use when trying to find a jira ref in the patch name (default: \'${ISSUE_RE}\')" echo "--java-home= Set JAVA_HOME (In Docker mode, this should be local to the image)" echo "--multijdkdirs= Comma delimited lists of JDK paths to use for multi-JDK tests" echo "--multijdktests= Comma delimited tests to use when multijdkdirs is used. (default: javac,javadoc,unit)" @@ -861,9 +859,6 @@ function parse_args testpatch_usage exit 0 ;; - --issue-re=*) - ISSUE_RE=${i#*=} - ;; --java-home=*) JAVA_HOME=${i#*=} ;; @@ -1445,7 +1440,7 @@ function determine_branch count="${PATCH_BRANCH//[^.]}" total=${#count} ((total = total + 3 )) - until [[ ${total} -eq 2 ]]; do + until [[ ${total} -lt 2 ]]; do PATCH_BRANCH=$(echo "${patchnamechunk}" | cut -f3-${total} -d.) yetus_debug "Determine branch: ISSUE[.##].branch = ${PATCH_BRANCH}" ((total=total-1)) @@ -1462,7 +1457,7 @@ function determine_branch count="${PATCH_BRANCH//[^.]}" total=${#count} ((total = total + 3 )) - until [[ ${total} -eq 2 ]]; do + until [[ ${total} -lt 2 ]]; do PATCH_BRANCH=$(echo "${patchnamechunk}" | cut -f2-${total} -d.) yetus_debug "Determine branch: ISSUE.branch[.##] = ${PATCH_BRANCH}" ((total=total-1)) @@ -1507,26 +1502,21 @@ function determine_issue { local patchnamechunk local maybeissue + local bugsys yetus_debug "Determine issue" - # we can shortcut jenkins - if [[ ${JENKINS} == true ]]; then - ISSUE=${PATCH_OR_ISSUE} - return 0 - fi - - # shellcheck disable=SC2016 - patchnamechunk=$(echo "${PATCH_OR_ISSUE}" | ${AWK} -F/ '{print $NF}') - - maybeissue=$(echo "${patchnamechunk}" | cut -f1,2 -d-) - - if [[ ${maybeissue} =~ ${ISSUE_RE} ]]; then - ISSUE=${maybeissue} - return 0 - fi - ISSUE="Unknown" + + for bugsys in ${BUGSYSTEMS}; do + if declare -f ${bugsys}_determine_issue >/dev/null; then + "${bugsys}_determine_issue" "${PATCH_OR_URL}" + if [[ $? == 0 ]]; then + yetus_debug "${bugsys} says ${ISSUE}" + return 0 + fi + fi + done return 1 } @@ -1599,100 +1589,38 @@ function determine_needed_tests ## @return 1 on failure, may exit function locate_patch { - local notSureIfPatch=false + local bugsys + local patchfile="" + yetus_debug "locate patch" if [[ -f ${PATCH_OR_ISSUE} ]]; then - PATCH_FILE="${PATCH_OR_ISSUE}" + patchfile="${PATCH_OR_ISSUE}" else - if [[ ${PATCH_OR_ISSUE} =~ ^http ]]; then - echo "Patch is being downloaded at $(date) from" - PATCHURL="${PATCH_OR_ISSUE}" - else - ${WGET} -q -O "${PATCH_DIR}/jira" "http://issues.apache.org/jira/browse/${PATCH_OR_ISSUE}" - - case $? in - 0) - ;; - 2) - yetus_error "ERROR: .wgetrc/.netrc parsing error." - cleanup_and_exit 1 - ;; - 3) - yetus_error "ERROR: File IO error." - cleanup_and_exit 1 - ;; - 4) - yetus_error "ERROR: URL ${PATCH_OR_ISSUE} is unreachable." - cleanup_and_exit 1 - ;; - *) - # we want to try and do as much as we can in docker mode, - # but if the patch was passed as a file, then we may not - # be able to continue. - if [[ ${REEXECED} == true - && -f "${PATCH_DIR}/patch" ]]; then - PATCH_FILE="${PATCH_DIR}/patch" - else - yetus_error "ERROR: Unable to fetch ${PATCH_OR_ISSUE}." - cleanup_and_exit 1 - fi - ;; - esac - - if [[ -z "${PATCH_FILE}" ]]; then - if [[ $(${GREP} -c 'Patch Available' "${PATCH_DIR}/jira") == 0 ]] ; then - if [[ ${JENKINS} == true ]]; then - yetus_error "ERROR: ${PATCH_OR_ISSUE} is not \"Patch Available\"." - cleanup_and_exit 1 - else - yetus_error "WARNING: ${PATCH_OR_ISSUE} is not \"Patch Available\"." - fi - fi - - #shellcheck disable=SC2016 - relativePatchURL=$(${AWK} 'match($0,"\"/jira/secure/attachment/[0-9]*/[^\"]*"){print substr($0,RSTART+1,RLENGTH-1)}' "${PATCH_DIR}/jira" | - ${GREP} -v -e 'htm[l]*$' | sort | tail -1) - PATCHURL="http://issues.apache.org${relativePatchURL}" - if [[ ! ${PATCHURL} =~ \.patch$ ]]; then - notSureIfPatch=true + for bugsys in ${BUGSYSTEMS}; do + if declare -f ${bugsys}_locate_patch >/dev/null 2>&1; then + "${bugsys}_locate_patch" "${PATCH_OR_ISSUE}" "${PATCH_DIR}/patch" + if [[ $? == 0 ]]; then + break; fi - #shellcheck disable=SC2016 - patchNum=$(echo "${PATCHURL}" | ${AWK} 'match($0,"[0-9]*/"){print substr($0,RSTART,RLENGTH-1)}') - echo "${ISSUE} patch is being downloaded at $(date) from" - fi - fi - if [[ -z "${PATCH_FILE}" ]]; then - echo "${PATCHURL}" - add_footer_table "Patch URL" "${PATCHURL}" - ${WGET} -q -O "${PATCH_DIR}/patch" "${PATCHURL}" - if [[ $? != 0 ]];then - yetus_error "ERROR: ${PATCH_OR_ISSUE} could not be downloaded." - cleanup_and_exit 1 fi - PATCH_FILE="${PATCH_DIR}/patch" - fi + done fi - if [[ ! -f "${PATCH_DIR}/patch" ]]; then - cp "${PATCH_FILE}" "${PATCH_DIR}/patch" + if [[ ! -f "${PATCH_DIR}/patch" ]]; then + cp "${patchfile}" "${PATCH_DIR}/patch" if [[ $? == 0 ]] ; then - echo "Patch file ${PATCH_FILE} copied to ${PATCH_DIR}" + echo "Patch file ${patchfile} copied to ${PATCH_DIR}" else - yetus_error "ERROR: Could not copy ${PATCH_FILE} to ${PATCH_DIR}" + yetus_error "ERROR: Could not copy ${patchfile} to ${PATCH_DIR}" cleanup_and_exit 1 fi fi - if [[ ${notSureIfPatch} == "true" ]]; then - guess_patch_file "${PATCH_DIR}/patch" - if [[ $? != 0 ]]; then - yetus_error "ERROR: ${PATCHURL} is not a patch file." - cleanup_and_exit 1 - else - yetus_debug "The patch ${PATCHURL} was not named properly, but it looks like a patch file. proceeding, but issue/branch matching might go awry." - add_vote_table 0 patch "The patch file was not named according to ${PROJECT_NAME}'s naming conventions. Please see ${HOW_TO_CONTRIBUTE} for instructions." - fi + guess_patch_file "${PATCH_DIR}/patch" + if [[ $? != 0 ]]; then + yetus_error "ERROR: ${PATCHURL} is not a patch file." + cleanup_and_exit 1 fi } From c5cf462cf9e0b85933135912c576b1e9cb856673 Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Thu, 6 Aug 2015 21:15:35 -0700 Subject: [PATCH 057/130] many fixes --- dev-support/test-patch.d/builtin-bugsystem.sh | 142 ++++++++++++++++++ dev-support/test-patch.d/github.sh | 80 +++++++++- dev-support/test-patch.d/jira.sh | 15 +- dev-support/test-patch.sh | 136 ++--------------- 4 files changed, 240 insertions(+), 133 deletions(-) create mode 100644 dev-support/test-patch.d/builtin-bugsystem.sh diff --git a/dev-support/test-patch.d/builtin-bugsystem.sh b/dev-support/test-patch.d/builtin-bugsystem.sh new file mode 100644 index 0000000000000..85505bd922730 --- /dev/null +++ b/dev-support/test-patch.d/builtin-bugsystem.sh @@ -0,0 +1,142 @@ +add_bugsystem console + +# we always call this one last + +function generic_locate_patch +{ + declare input=$1 + declare output=$2 + + if [[ "${OFFLINE}" == true ]]; then + yetus_debug "generic_locate_patch: offline, skipping" + return 1 + fi + + ${WGET} -q -O "${output}" "${JIRAURL}/${input}" + if [[ $? != 0 ]]; then + yetus_debug "jira_locate_patch: not a JIRA." + return 1 + fi + return 0 +} + +## @description Print out the finished details on the console +## @audience private +## @stability evolving +## @replaceable no +## @param runresult +## @return 0 on success +## @return 1 on failure +function console_finalreport +{ + declare result=$1 + shift + declare i=0 + declare ourstring + declare vote + declare subs + declare ela + declare comment + declare commentfile1="${PATCH_DIR}/comment.1" + declare commentfile2="${PATCH_DIR}/comment.2" + declare normaltop + declare line + declare seccoladj=0 + declare spcfx=${PATCH_DIR}/spcl.txt + + if [[ ${result} == 0 ]]; then + if [[ ${JENKINS} == false ]]; then + { + printf "IF9fX19fX19fX18gCjwgU3VjY2VzcyEgPgogLS0tLS0tLS0tLSAKIFwgICAg"; + printf "IC9cICBfX18gIC9cCiAgXCAgIC8vIFwvICAgXC8gXFwKICAgICAoKCAgICBP"; + printf "IE8gICAgKSkKICAgICAgXFwgLyAgICAgXCAvLwogICAgICAgXC8gIHwgfCAg"; + printf "XC8gCiAgICAgICAgfCAgfCB8ICB8ICAKICAgICAgICB8ICB8IHwgIHwgIAog"; + printf "ICAgICAgIHwgICBvICAgfCAgCiAgICAgICAgfCB8ICAgfCB8ICAKICAgICAg"; + printf "ICB8bXwgICB8bXwgIAo" + } > "${spcfx}" + fi + printf "\n\n+1 overall\n\n" + else + if [[ ${JENKINS} == false ]]; then + { + printf "IF9fX19fICAgICBfIF8gICAgICAgICAgICAgICAgXyAKfCAgX19ffF8gXyhf"; + printf "KSB8XyAgIF8gXyBfXyBfX198IHwKfCB8XyAvIF9gIHwgfCB8IHwgfCB8ICdf"; + printf "Xy8gXyBcIHwKfCAgX3wgKF98IHwgfCB8IHxffCB8IHwgfCAgX18vX3wKfF98"; + printf "ICBcX18sX3xffF98XF9fLF98X3wgIFxfX18oXykKICAgICAgICAgICAgICAg"; + printf "ICAgICAgICAgICAgICAgICAK" + } > "${spcfx}" + fi + printf "\n\n-1 overall\n\n" + fi + + if [[ -f ${spcfx} ]]; then + if which base64 >/dev/null 2>&1; then + base64 --decode "${spcfx}" 2>/dev/null + elif which openssl >/dev/null 2>&1; then + openssl enc -A -d -base64 -in "${spcfx}" 2>/dev/null + fi + echo + echo + rm "${spcfx}" + fi + + seccoladj=$(findlargest 2 "${TP_VOTE_TABLE[@]}") + if [[ ${seccoladj} -lt 10 ]]; then + seccoladj=10 + fi + + seccoladj=$((seccoladj + 2 )) + i=0 + until [[ $i -eq ${#TP_HEADER[@]} ]]; do + printf "%s\n" "${TP_HEADER[${i}]}" + ((i=i+1)) + done + + printf "| %s | %*s | %s | %s\n" "Vote" ${seccoladj} Subsystem Runtime "Comment" + echo "============================================================================" + i=0 + until [[ $i -eq ${#TP_VOTE_TABLE[@]} ]]; do + ourstring=$(echo "${TP_VOTE_TABLE[${i}]}" | tr -s ' ') + vote=$(echo "${ourstring}" | cut -f2 -d\|) + subs=$(echo "${ourstring}" | cut -f3 -d\|) + ela=$(echo "${ourstring}" | cut -f4 -d\|) + comment=$(echo "${ourstring}" | cut -f5 -d\|) + + echo "${comment}" | fold -s -w $((78-seccoladj-22)) > "${commentfile1}" + normaltop=$(head -1 "${commentfile1}") + ${SED} -e '1d' "${commentfile1}" > "${commentfile2}" + + printf "| %4s | %*s | %-10s |%-s\n" "${vote}" ${seccoladj} \ + "${subs}" "${ela}" "${normaltop}" + while read line; do + printf "| | %*s | | %-s\n" ${seccoladj} " " "${line}" + done < "${commentfile2}" + + ((i=i+1)) + rm "${commentfile2}" "${commentfile1}" 2>/dev/null + done + + if [[ ${#TP_TEST_TABLE[@]} -gt 0 ]]; then + seccoladj=$(findlargest 1 "${TP_TEST_TABLE[@]}") + printf "\n\n%*s | Tests\n" "${seccoladj}" "Reason" + i=0 + until [[ $i -eq ${#TP_TEST_TABLE[@]} ]]; do + ourstring=$(echo "${TP_TEST_TABLE[${i}]}" | tr -s ' ') + vote=$(echo "${ourstring}" | cut -f2 -d\|) + subs=$(echo "${ourstring}" | cut -f3 -d\|) + printf "%*s | %s\n" "${seccoladj}" "${vote}" "${subs}" + ((i=i+1)) + done + fi + + printf "\n\n|| Subsystem || Report/Notes ||\n" + echo "============================================================================" + i=0 + + until [[ $i -eq ${#TP_FOOTER_TABLE[@]} ]]; do + comment=$(echo "${TP_FOOTER_TABLE[${i}]}" | + ${SED} -e "s,@@BASE@@,${PATCH_DIR},g") + printf "%s\n" "${comment}" + ((i=i+1)) + done +} diff --git a/dev-support/test-patch.d/github.sh b/dev-support/test-patch.d/github.sh index 281f15b232880..1ca428af81599 100755 --- a/dev-support/test-patch.d/github.sh +++ b/dev-support/test-patch.d/github.sh @@ -16,16 +16,79 @@ add_bugsystem github +GITHUBURL="https://github.com" +GITHUBREPO="apache/hadoop" + +function github_usage +{ + echo "GITHUB Options:" + echo "--github-base-url= The URL of the JIRA server (default:'${GITHUBURL}')" + echo "--github-password= Github password" + echo "--github-repo= github repo to use (default:'${GITHUBREPO}')" + echo "--github-token= The token to use to write to github" + echo "--github-user= Github user" + +} + +function github_parse_args +{ + declare i + + for i in "$@"; do + case ${i} in + --github-base-url=*) + GITHUBURL=${i#*=} + ;; + --github-repo=*) + GITHUBREPO=${i#*=} + ;; + --github-token=*) + GITHUB_TOKEN=${i#*=} + ;; + --github-password=*) + GITHUB_PASSWD=${i#*=} + ;; + --github-user=*) + GITHUB_USER=${i#*=} + ;; + esac + done +} + +function github_locate_patch +{ + declare input=$1 + declare output=$2 + + if [[ "${OFFLINE}" == true ]]; then + yetus_debug "github_locate_patch: offline, skipping" + return 1 + fi + + ${WGET} -q -O "${output}" "${GITHUBURL}/${GITHUBREPO}/pull/${input}.patch" + if [[ $? != 0 ]]; then + yetus_debug "github_locate_patch: not a github pull request." + return 1 + fi + + # https://api.github.com/repos/apache/hadoop/pulls/25 + # base->sha? + + GITHUBISSUE=${input} + GITHUBCOMMITID="" + return 0 +} + ## @description Write the contents of a file to github ## @params filename ## @stability stable ## @audience public function github_write_comment { - local -r commentfile=${1} + declare -r commentfile=${1} shift - local retval=1 + declare retval=1 return ${retval} } @@ -38,14 +101,17 @@ function github_write_comment ## @param runresult function github_finalreport { - local result=$1 - local i - local commentfile=${PATCH_DIR}/commentfile - local comment + declare result=$1 + declare i + declare commentfile=${PATCH_DIR}/commentfile + declare comment + + # TODO: There really should be a reference to the JIRA issue, as needed rm "${commentfile}" 2>/dev/null - if [[ ${JENKINS} != "true" ]] ; then + if [[ ${JENKINS} != "true" + || -z ${GITHUBISSUE} ]] ; then return 0 fi diff --git a/dev-support/test-patch.d/jira.sh b/dev-support/test-patch.d/jira.sh index 032f697dbe095..b32b668f18c6d 100755 --- a/dev-support/test-patch.d/jira.sh +++ b/dev-support/test-patch.d/jira.sh @@ -94,6 +94,11 @@ function jira_locate_patch yetus_debug "jira_locate_patch: trying ${JIRAURL}/browse/${input}" + if [[ "${OFFLINE}" == true ]]; then + yetus_debug "jira_locate_patch: offline, skipping" + return 1 + fi + jira_http_fetch "browse/${input}" "${PATCH_DIR}/jira" if [[ $? != 0 ]]; then @@ -101,6 +106,11 @@ function jira_locate_patch return 1 fi + # TODO: we should check for a gitbub-based pull request here + # if we find one, call the github plug-in directly with the + # appropriate bits so that it gets setup to write a comment + # to the PR + if [[ $(${GREP} -c 'Patch Available' "${PATCH_DIR}/jira") == 0 ]] ; then if [[ ${JENKINS} == true ]]; then yetus_error "ERROR: ${input} is not \"Patch Available\"." @@ -131,6 +141,7 @@ function jira_locate_patch yetus_error "ERROR: ${PATCH_OR_ISSUE} could not be downloaded." cleanup_and_exit 1 fi + return 0 } ## @description Write the contents of a file to JIRA @@ -145,7 +156,6 @@ function jira_write_comment declare retval=0 - if [[ -n ${JIRA_PASSWD} && -n ${JIRA_USER} ]]; then # shellcheck disable=SC2086 @@ -182,7 +192,8 @@ function jira_finalreport rm "${commentfile}" 2>/dev/null - if [[ ${JENKINS} != "true" ]] ; then + if [[ ${JENKINS} != "true" + || ${OFFLINE} == true ]] ; then return 0 fi diff --git a/dev-support/test-patch.sh b/dev-support/test-patch.sh index 61f82002278c3..6e307f280b432 100755 --- a/dev-support/test-patch.sh +++ b/dev-support/test-patch.sh @@ -1591,6 +1591,7 @@ function locate_patch { local bugsys local patchfile="" + local gotit=false yetus_debug "locate patch" @@ -1601,13 +1602,22 @@ function locate_patch if declare -f ${bugsys}_locate_patch >/dev/null 2>&1; then "${bugsys}_locate_patch" "${PATCH_OR_ISSUE}" "${PATCH_DIR}/patch" if [[ $? == 0 ]]; then - break; + guess_patch_file "${PATCH_DIR}/patch" + if [[ $? == 0 ]]; then + gotit=true + break; + fi fi fi done + + if [[ ${gotit} == false ]]; then + generic_locate_patch "${PATCH_OR_ISSUE}" "${PATCH_DIR}/patch" + fi fi - if [[ ! -f "${PATCH_DIR}/patch" ]]; then + if [[ ! -f "${PATCH_DIR}/patch" + && -f "${patchfile}" ]]; then cp "${patchfile}" "${PATCH_DIR}/patch" if [[ $? == 0 ]] ; then echo "Patch file ${patchfile} copied to ${PATCH_DIR}" @@ -2954,127 +2964,6 @@ function check_unittests return 0 } -## @description Print out the finished details on the console -## @audience private -## @stability evolving -## @replaceable no -## @param runresult -## @return 0 on success -## @return 1 on failure -function output_to_console -{ - local result=$1 - shift - local i=0 - local ourstring - local vote - local subs - local ela - local comment - local commentfile1="${PATCH_DIR}/comment.1" - local commentfile2="${PATCH_DIR}/comment.2" - local normaltop - local line - local seccoladj=0 - local spcfx=${PATCH_DIR}/spcl.txt - - if [[ ${result} == 0 ]]; then - if [[ ${JENKINS} == false ]]; then - { - printf "IF9fX19fX19fX18gCjwgU3VjY2VzcyEgPgogLS0tLS0tLS0tLSAKIFwgICAg"; - printf "IC9cICBfX18gIC9cCiAgXCAgIC8vIFwvICAgXC8gXFwKICAgICAoKCAgICBP"; - printf "IE8gICAgKSkKICAgICAgXFwgLyAgICAgXCAvLwogICAgICAgXC8gIHwgfCAg"; - printf "XC8gCiAgICAgICAgfCAgfCB8ICB8ICAKICAgICAgICB8ICB8IHwgIHwgIAog"; - printf "ICAgICAgIHwgICBvICAgfCAgCiAgICAgICAgfCB8ICAgfCB8ICAKICAgICAg"; - printf "ICB8bXwgICB8bXwgIAo" - } > "${spcfx}" - fi - printf "\n\n+1 overall\n\n" - else - if [[ ${JENKINS} == false ]]; then - { - printf "IF9fX19fICAgICBfIF8gICAgICAgICAgICAgICAgXyAKfCAgX19ffF8gXyhf"; - printf "KSB8XyAgIF8gXyBfXyBfX198IHwKfCB8XyAvIF9gIHwgfCB8IHwgfCB8ICdf"; - printf "Xy8gXyBcIHwKfCAgX3wgKF98IHwgfCB8IHxffCB8IHwgfCAgX18vX3wKfF98"; - printf "ICBcX18sX3xffF98XF9fLF98X3wgIFxfX18oXykKICAgICAgICAgICAgICAg"; - printf "ICAgICAgICAgICAgICAgICAK" - } > "${spcfx}" - fi - printf "\n\n-1 overall\n\n" - fi - - if [[ -f ${spcfx} ]]; then - if which base64 >/dev/null 2>&1; then - base64 --decode "${spcfx}" 2>/dev/null - elif which openssl >/dev/null 2>&1; then - openssl enc -A -d -base64 -in "${spcfx}" 2>/dev/null - fi - echo - echo - rm "${spcfx}" - fi - - seccoladj=$(findlargest 2 "${TP_VOTE_TABLE[@]}") - if [[ ${seccoladj} -lt 10 ]]; then - seccoladj=10 - fi - - seccoladj=$((seccoladj + 2 )) - i=0 - until [[ $i -eq ${#TP_HEADER[@]} ]]; do - printf "%s\n" "${TP_HEADER[${i}]}" - ((i=i+1)) - done - - printf "| %s | %*s | %s | %s\n" "Vote" ${seccoladj} Subsystem Runtime "Comment" - echo "============================================================================" - i=0 - until [[ $i -eq ${#TP_VOTE_TABLE[@]} ]]; do - ourstring=$(echo "${TP_VOTE_TABLE[${i}]}" | tr -s ' ') - vote=$(echo "${ourstring}" | cut -f2 -d\|) - subs=$(echo "${ourstring}" | cut -f3 -d\|) - ela=$(echo "${ourstring}" | cut -f4 -d\|) - comment=$(echo "${ourstring}" | cut -f5 -d\|) - - echo "${comment}" | fold -s -w $((78-seccoladj-22)) > "${commentfile1}" - normaltop=$(head -1 "${commentfile1}") - ${SED} -e '1d' "${commentfile1}" > "${commentfile2}" - - printf "| %4s | %*s | %-10s |%-s\n" "${vote}" ${seccoladj} \ - "${subs}" "${ela}" "${normaltop}" - while read line; do - printf "| | %*s | | %-s\n" ${seccoladj} " " "${line}" - done < "${commentfile2}" - - ((i=i+1)) - rm "${commentfile2}" "${commentfile1}" 2>/dev/null - done - - if [[ ${#TP_TEST_TABLE[@]} -gt 0 ]]; then - seccoladj=$(findlargest 1 "${TP_TEST_TABLE[@]}") - printf "\n\n%*s | Tests\n" "${seccoladj}" "Reason" - i=0 - until [[ $i -eq ${#TP_TEST_TABLE[@]} ]]; do - ourstring=$(echo "${TP_TEST_TABLE[${i}]}" | tr -s ' ') - vote=$(echo "${ourstring}" | cut -f2 -d\|) - subs=$(echo "${ourstring}" | cut -f3 -d\|) - printf "%*s | %s\n" "${seccoladj}" "${vote}" "${subs}" - ((i=i+1)) - done - fi - - printf "\n\n|| Subsystem || Report/Notes ||\n" - echo "============================================================================" - i=0 - - until [[ $i -eq ${#TP_FOOTER_TABLE[@]} ]]; do - comment=$(echo "${TP_FOOTER_TABLE[${i}]}" | - ${SED} -e "s,@@BASE@@,${PATCH_DIR},g") - printf "%s\n" "${comment}" - ((i=i+1)) - done -} - ## @description Write the final output to the selected bug system ## @audience private ## @stability evolving @@ -3128,7 +3017,6 @@ function postcheckout (( RESULT = RESULT + $? )) if [[ ${RESULT} != 0 ]] ; then - output_to_console 1 output_to_bugsystem 1 cleanup_and_exit 1 fi From 6c40877813ada43b5498ee6998871f60e9853c64 Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Thu, 6 Aug 2015 21:52:43 -0700 Subject: [PATCH 058/130] more fixes --- dev-support/test-patch.d/builtin-bugsystem.sh | 16 ++++++ dev-support/test-patch.d/github.sh | 17 +++++-- dev-support/test-patch.d/jira.sh | 22 ++++---- dev-support/test-patch.sh | 51 ++++++++++--------- 4 files changed, 68 insertions(+), 38 deletions(-) mode change 100644 => 100755 dev-support/test-patch.d/builtin-bugsystem.sh diff --git a/dev-support/test-patch.d/builtin-bugsystem.sh b/dev-support/test-patch.d/builtin-bugsystem.sh old mode 100644 new mode 100755 index 85505bd922730..4c2551971d4d5 --- a/dev-support/test-patch.d/builtin-bugsystem.sh +++ b/dev-support/test-patch.d/builtin-bugsystem.sh @@ -1,3 +1,19 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + add_bugsystem console # we always call this one last diff --git a/dev-support/test-patch.d/github.sh b/dev-support/test-patch.d/github.sh index 1ca428af81599..10c3161103595 100755 --- a/dev-support/test-patch.d/github.sh +++ b/dev-support/test-patch.d/github.sh @@ -19,6 +19,12 @@ add_bugsystem github GITHUBURL="https://github.com" GITHUBREPO="apache/hadoop" +GITHUB_PASSWD="" +GITHUB_TOKEN="" +GITHUB_USER="" +GITHUB_COMMITID="" +GITHUB_ISSUE="" + function github_usage { echo "GITHUB Options:" @@ -74,8 +80,8 @@ function github_locate_patch # https://api.github.com/repos/apache/hadoop/pulls/25 # base->sha? - GITHUBISSUE=${input} - GITHUBCOMMITID="" + GITHUB_ISSUE=${input} + GITHUB_COMMITID="" return 0 } @@ -90,6 +96,11 @@ function github_write_comment declare retval=1 + if [[ "${OFFLINE}" == true ]]; then + return 0 + fi + + yetus_debug "${GITHUB_USER} ${GITHUB_PASSWD} ${GITHUB_TOKEN} ${GITHUB_COMMITID}" return ${retval} } @@ -111,7 +122,7 @@ function github_finalreport rm "${commentfile}" 2>/dev/null if [[ ${JENKINS} != "true" - || -z ${GITHUBISSUE} ]] ; then + || -z ${GITHUB_ISSUE} ]] ; then return 0 fi diff --git a/dev-support/test-patch.d/jira.sh b/dev-support/test-patch.d/jira.sh index b32b668f18c6d..31116e90124d2 100755 --- a/dev-support/test-patch.d/jira.sh +++ b/dev-support/test-patch.d/jira.sh @@ -15,7 +15,7 @@ # limitations under the License. JIRACLI=${JIRA:-jira} -JIRAURL=${JIRAURL:-"http://issues.apache.org/jira"} +JIRA_URL=${JIRA_URL:-"http://issues.apache.org/jira"} JIRA_ISSUE_RE='^(YETUS)-[0-9]+$' add_bugsystem jira @@ -26,7 +26,7 @@ function jira_usage echo "--jira-issue-re= Bash regular expression to use when trying to find a jira ref in the patch name (default: \'${JIRA_ISSUE_RE}\')" echo "--jira-cmd= The 'jira' command to use (default '${JIRACLI}')" echo "--jira-password= The password for the 'jira' command" - echo "--jira-base-url= The URL of the JIRA server (default:'${JIRAURL}')" + echo "--jira-base-url= The URL of the JIRA server (default:'${JIRA_URL}')" echo "--jira-user= The user for the 'jira' command" } @@ -81,9 +81,9 @@ function jira_http_fetch if [[ -n "${JIRA_USER}" && -n "${JIRA_PASSWD}" ]]; then - ${WGET} --user="${JIRA_USER}" --password="${JIRA_PASSWD}" -q -O "${output}" "${JIRAURL}/${input}" + ${WGET} --user="${JIRA_USER}" --password="${JIRA_PASSWD}" -q -O "${output}" "${JIRA_URL}/${input}" else - ${WGET} -q -O "${output}" "${JIRAURL}/${input}" + ${WGET} -q -O "${output}" "${JIRA_URL}/${input}" fi } @@ -92,7 +92,7 @@ function jira_locate_patch declare input=$1 declare fileloc=$2 - yetus_debug "jira_locate_patch: trying ${JIRAURL}/browse/${input}" + yetus_debug "jira_locate_patch: trying ${JIRA_URL}/browse/${input}" if [[ "${OFFLINE}" == true ]]; then yetus_debug "jira_locate_patch: offline, skipping" @@ -123,7 +123,7 @@ function jira_locate_patch #shellcheck disable=SC2016 relativePatchURL=$(${AWK} 'match($0,"/secure/attachment/[0-9]*/[^\"]*"){print substr($0,RSTART,RLENGTH)}' "${PATCH_DIR}/jira" | ${GREP} -v -e 'htm[l]*$' | sort | tail -1 | ${SED} -e 's,[ ]*$,,g') - PATCHURL="${JIRAURL}${relativePatchURL}" + PATCHURL="${JIRA_URL}${relativePatchURL}" if [[ ! ${PATCHURL} =~ \.patch$ ]]; then guess_patch_file "${PATCH_DIR}/patch" if [[ $? == 0 ]]; then @@ -131,8 +131,6 @@ function jira_locate_patch add_vote_table 0 patch "The patch file was not named according to ${PROJECT_NAME}'s naming conventions. Please see ${HOW_TO_CONTRIBUTE} for instructions." fi fi - #shellcheck disable=SC2016 - patchNum=$(echo "${PATCHURL}" | ${AWK} 'match($0,"[0-9]*/"){print substr($0,RSTART,RLENGTH-1)}') echo "${ISSUE} patch is being downloaded at $(date) from" echo "${PATCHURL}" add_footer_table "JIRA Patch URL" "${PATCHURL}" @@ -156,16 +154,20 @@ function jira_write_comment declare retval=0 + if [[ "${OFFLINE}" == true ]]; then + return 0 + fi + if [[ -n ${JIRA_PASSWD} && -n ${JIRA_USER} ]]; then # shellcheck disable=SC2086 ${JIRACLI} --comment "$(cat ${commentfile})" \ - -s "${JIRAURL}" \ + -s "${JIRA_URL}" \ -a addcomment -u ${JIRA_USER} \ -p "${JIRA_PASSWD}" \ --issue "${ISSUE}" retval=$? - ${JIRACLI} -s "${JIRAURL}" \ + ${JIRACLI} -s "${JIRA_URL}" \ -a logout -u "${JIRA_USER}" \ -p "${JIRA_PASSWD}" fi diff --git a/dev-support/test-patch.sh b/dev-support/test-patch.sh index 6e307f280b432..923f334ae6b8f 100755 --- a/dev-support/test-patch.sh +++ b/dev-support/test-patch.sh @@ -100,7 +100,6 @@ function setup_defaults PATCHURL="" OSTYPE=$(uname -s) BUILDTOOL=maven - BUGSYSTEM=jira TESTFORMATS="" JDK_TEST_LIST="javac javadoc unit" GITDIFFLINES="${PATCH_DIR}/gitdifflines.txt" @@ -239,6 +238,7 @@ function offset_clock ## @param string function add_header_line { + # shellcheck disable=SC2034 TP_HEADER[${TP_HEADER_COUNTER}]="$*" ((TP_HEADER_COUNTER=TP_HEADER_COUNTER+1 )) } @@ -419,6 +419,7 @@ function finish_vote_table echo "Total Elapsed time: ${calctime}" echo "" + # shellcheck disable=SC2034 TP_VOTE_TABLE[${TP_VOTE_COUNTER}]="| | | ${calctime} | |" ((TP_VOTE_COUNTER=TP_VOTE_COUNTER+1 )) } @@ -436,6 +437,7 @@ function add_footer_table local subsystem=$1 shift 1 + # shellcheck disable=SC2034 TP_FOOTER_TABLE[${TP_FOOTER_COUNTER}]="| ${subsystem} | $* |" ((TP_FOOTER_COUNTER=TP_FOOTER_COUNTER+1 )) } @@ -451,6 +453,7 @@ function add_test_table local failure=$1 shift 1 + # shellcheck disable=SC2034 TP_TEST_TABLE[${TP_TEST_COUNTER}]="| ${failure} | $* |" ((TP_TEST_COUNTER=TP_TEST_COUNTER+1 )) } @@ -534,24 +537,25 @@ function find_java_home return 0 } -## @description Write the contents of a file to jenkins +## @description Write the contents of a file to all of the bug systems +## @description (so content should avoid special formatting) ## @params filename ## @stability stable ## @audience public -## @returns ${JIRACLI} exit code function write_comment { local -r commentfile=${1} shift + declare bug + declare retval local retval=0 - if [[ ${OFFLINE} == false - && ${JENKINS} == true ]]; then - ${BUGSYSTEM}_write_comment "${commentfile}" - retval=$? - fi - return ${retval} + for bug in ${BUGSYSTEMS}; do + if declare -f ${bug}_write_comment >/dev/null; then + "${bug}_write_comment" "${commentfile}" + fi + done } ## @description Verify that the patch directory is still in working order @@ -725,8 +729,6 @@ function testpatch_usage echo "--basedir= The directory to apply the patch to (default current directory)" echo "--branch= Forcibly set the branch" echo "--branch-default= If the branch isn't forced and we don't detect one in the patch name, use this branch (default 'master')" - #not quite working yet - #echo "--bugsystem= The bug system in use ('jira', the default, or 'github')" echo "--build-native= If true, then build native components (default 'true')" echo "--build-tool= Pick which build tool to focus around (maven, ant)" echo "--contrib-guide= URL to point new users towards project conventions. (default: ${HOW_TO_CONTRIBUTE} )" @@ -810,9 +812,6 @@ function parse_args --branch-default=*) PATCH_BRANCH_DEFAULT=${i#*=} ;; - --bugsystem=*) - BUGSYSTEM=${i#*=} - ;; --build-native=*) BUILD_NATIVE=${i#*=} ;; @@ -1140,7 +1139,7 @@ function find_changed_modules ;; *) yetus_error "ERROR: Unsupported build tool." - output_to_console 1 + # output_to_console 1 output_to_bugsystem 1 cleanup_and_exit 1 ;; @@ -1159,7 +1158,7 @@ function find_changed_modules builddir=$(find_buildfile_dir ${buildfile} "${i}") if [[ -z ${builddir} ]]; then yetus_error "ERROR: ${buildfile} is not found. Make sure the target is a ${BUILDTOOL}-based project." - output_to_console 1 + # output_to_console 1 output_to_bugsystem 1 cleanup_and_exit 1 fi @@ -1327,8 +1326,6 @@ function git_checkout determine_issue GIT_REVISION=$(${GIT} rev-parse --verify --short HEAD) - # shellcheck disable=SC2034 - VERSION=${GIT_REVISION}_${ISSUE}_PATCH-${patchNum} if [[ "${ISSUE}" == 'Unknown' ]]; then echo "Testing patch on ${PATCH_BRANCH}." @@ -1500,8 +1497,6 @@ function determine_branch ## @return 1 on failure, with ISSUE updated to "Unknown" function determine_issue { - local patchnamechunk - local maybeissue local bugsys yetus_debug "Determine issue" @@ -1697,7 +1692,7 @@ function apply_patch_file echo "PATCH APPLICATION FAILED" ((RESULT = RESULT + 1)) add_vote_table -1 patch "The patch command could not apply the patch." - output_to_console 1 + # output_to_console 1 output_to_bugsystem 1 cleanup_and_exit 1 fi @@ -2970,7 +2965,13 @@ function check_unittests ## @replaceable no function output_to_bugsystem { - "${BUGSYSTEM}_finalreport" "${@}" + declare bugs + + for bugs in ${BUGSYSTEMS}; do + if declare -f ${bugs}_finalreport >/dev/null;then + "${bugs}_finalreport" "${@}" + fi + done } ## @description Clean the filesystem as appropriate and then exit @@ -3033,7 +3034,7 @@ function postcheckout (( RESULT = RESULT + $? )) if [[ ${RESULT} != 0 ]] ; then - output_to_console 1 + # output_to_console 1 output_to_bugsystem 1 cleanup_and_exit 1 fi @@ -3090,7 +3091,7 @@ function postapply check_patch_javac retval=$? if [[ ${retval} -gt 1 ]] ; then - output_to_console 1 + # output_to_console 1 output_to_bugsystem 1 cleanup_and_exit 1 fi @@ -3366,6 +3367,6 @@ finish_vote_table finish_footer_table -output_to_console ${RESULT} +# output_to_console ${RESULT} output_to_bugsystem ${RESULT} cleanup_and_exit ${RESULT} From c225ef1b2574df9c664d815209eabcd7539ce585 Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Thu, 6 Aug 2015 22:04:24 -0700 Subject: [PATCH 059/130] remove the comments --- dev-support/test-patch.sh | 6 ------ 1 file changed, 6 deletions(-) diff --git a/dev-support/test-patch.sh b/dev-support/test-patch.sh index 923f334ae6b8f..924241adc7aeb 100755 --- a/dev-support/test-patch.sh +++ b/dev-support/test-patch.sh @@ -1139,7 +1139,6 @@ function find_changed_modules ;; *) yetus_error "ERROR: Unsupported build tool." - # output_to_console 1 output_to_bugsystem 1 cleanup_and_exit 1 ;; @@ -1158,7 +1157,6 @@ function find_changed_modules builddir=$(find_buildfile_dir ${buildfile} "${i}") if [[ -z ${builddir} ]]; then yetus_error "ERROR: ${buildfile} is not found. Make sure the target is a ${BUILDTOOL}-based project." - # output_to_console 1 output_to_bugsystem 1 cleanup_and_exit 1 fi @@ -1692,7 +1690,6 @@ function apply_patch_file echo "PATCH APPLICATION FAILED" ((RESULT = RESULT + 1)) add_vote_table -1 patch "The patch command could not apply the patch." - # output_to_console 1 output_to_bugsystem 1 cleanup_and_exit 1 fi @@ -3034,7 +3031,6 @@ function postcheckout (( RESULT = RESULT + $? )) if [[ ${RESULT} != 0 ]] ; then - # output_to_console 1 output_to_bugsystem 1 cleanup_and_exit 1 fi @@ -3091,7 +3087,6 @@ function postapply check_patch_javac retval=$? if [[ ${retval} -gt 1 ]] ; then - # output_to_console 1 output_to_bugsystem 1 cleanup_and_exit 1 fi @@ -3367,6 +3362,5 @@ finish_vote_table finish_footer_table -# output_to_console ${RESULT} output_to_bugsystem ${RESULT} cleanup_and_exit ${RESULT} From 596e71b7ff75daaf72a3bab37cdad0415c3ac1b7 Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Fri, 7 Aug 2015 21:07:02 -0700 Subject: [PATCH 060/130] switch to curl from wget and jira-cli --- dev-support/smart-apply-patch.sh | 19 +++---- .../test-patch-docker/Dockerfile-startstub | 15 +----- dev-support/test-patch.d/builtin-bugsystem.sh | 4 +- dev-support/test-patch.d/github.sh | 11 ++-- dev-support/test-patch.d/jira.sh | 50 ++++++++++++------- dev-support/test-patch.sh | 16 +++--- 6 files changed, 64 insertions(+), 51 deletions(-) diff --git a/dev-support/smart-apply-patch.sh b/dev-support/smart-apply-patch.sh index 00e3a0a2033a6..c9221b932ecc8 100755 --- a/dev-support/smart-apply-patch.sh +++ b/dev-support/smart-apply-patch.sh @@ -76,7 +76,7 @@ function setup_defaults SunOS) AWK=${AWK:-/usr/xpg4/bin/awk} SED=${SED:-/usr/xpg4/bin/sed} - WGET=${WGET:-wget} + CURL=${CURL:-curl} GIT=${GIT:-git} GREP=${GREP:-/usr/xpg4/bin/grep} PATCH=${PATCH:-/usr/gnu/bin/patch} @@ -86,7 +86,7 @@ function setup_defaults *) AWK=${AWK:-awk} SED=${SED:-sed} - WGET=${WGET:-wget} + CURL=${CURL:-curl} GIT=${GIT:-git} GREP=${GREP:-grep} PATCH=${PATCH:-patch} @@ -122,7 +122,7 @@ function yetus_usage echo "--grep-cmd= The 'grep' command to use (default 'grep')" echo "--git-cmd= The 'git' command to use (default 'git')" echo "--patch-cmd= The GNU-compatible 'patch' command to use (default 'patch')" - echo "--wget-cmd= The 'wget' command to use (default 'wget')" + echo "--curl-cmd= The 'curl' command to use (default 'curl')" } ## @description Interpret the command line parameters @@ -162,8 +162,8 @@ function parse_args --patch-dir=*) PATCH_DIR=${i#*=} ;; - --wget-cmd=*) - WGET=${i#*=} + --curl-cmd=*) + CURL=${i#*=} ;; --*) ## PATCH_OR_ISSUE can't be a --. So this is probably @@ -233,13 +233,14 @@ function locate_patch echo "Patch is being downloaded at $(date) from" PATCHURL="${PATCH_OR_ISSUE}" else - ${WGET} -q -O "${PATCH_DIR}/jira" "http://issues.apache.org/jira/browse/${PATCH_OR_ISSUE}" - + ${CURL} --silent \ + --output "${PATCH_DIR}/jira" \ + "http://issues.apache.org/jira/browse/${PATCH_OR_ISSUE}" case $? in 0) ;; 2) - yetus_error "ERROR: .wgetrc/.netrc parsing error." + yetus_error "ERROR: .curlrc/.netrc parsing error." cleanup_and_exit 1 ;; 3) @@ -277,7 +278,7 @@ function locate_patch fi fi if [[ -z "${PATCH_FILE}" ]]; then - ${WGET} -q -O "${PATCH_DIR}/patch" "${PATCHURL}" + ${CURL} -q -O "${PATCH_DIR}/patch" "${PATCHURL}" if [[ $? != 0 ]];then yetus_error "ERROR: ${PATCH_OR_ISSUE} could not be downloaded." cleanup_and_exit 1 diff --git a/dev-support/test-patch-docker/Dockerfile-startstub b/dev-support/test-patch-docker/Dockerfile-startstub index fd3e4c5b742b7..c49b5891c902a 100644 --- a/dev-support/test-patch-docker/Dockerfile-startstub +++ b/dev-support/test-patch-docker/Dockerfile-startstub @@ -62,8 +62,8 @@ RUN apt-get install -y oracle-java8-installer # Install findbugs ###### RUN mkdir -p /opt/findbugs && \ - wget https://sourceforge.net/projects/findbugs/files/findbugs/3.0.1/findbugs-noUpdateChecks-3.0.1.tar.gz/download \ - -O /opt/findbugs.tar.gz && \ + curl https://sourceforge.net/projects/findbugs/files/findbugs/3.0.1/findbugs-noUpdateChecks-3.0.1.tar.gz/download \ + -o /opt/findbugs.tar.gz && \ tar xzf /opt/findbugs.tar.gz --strip-components 1 -C /opt/findbugs ENV FINDBUGS_HOME /opt/findbugs @@ -83,14 +83,3 @@ RUN gem install rubocop ### RUN gem install ruby-lint -##### -# Install JIRA CLI -##### - -RUN mkdir -p /opt/jiracli && \ - wget https://bobswift.atlassian.net/wiki/download/attachments/16285777/jira-cli-2.2.0-distribution.zip \ - -O /tmp/jiracli.zip && \ - unzip -qq -d /opt/jiracli /tmp/jiracli.zip && \ - ln -s /opt/jiracli/jira-cli-2.2.0 /opt/jiracli/latest && \ - chmod -R a+rx /opt/jiracli/jira-cli-2.2.0 -ENV JIRACLI_HOME /opt/jiracli/latest diff --git a/dev-support/test-patch.d/builtin-bugsystem.sh b/dev-support/test-patch.d/builtin-bugsystem.sh index 4c2551971d4d5..118d7212f525c 100755 --- a/dev-support/test-patch.d/builtin-bugsystem.sh +++ b/dev-support/test-patch.d/builtin-bugsystem.sh @@ -28,7 +28,9 @@ function generic_locate_patch return 1 fi - ${WGET} -q -O "${output}" "${JIRAURL}/${input}" + ${CURL} --silent \ + --output "${output}" \ + "${input}" if [[ $? != 0 ]]; then yetus_debug "jira_locate_patch: not a JIRA." return 1 diff --git a/dev-support/test-patch.d/github.sh b/dev-support/test-patch.d/github.sh index 10c3161103595..ef238c42e6cab 100755 --- a/dev-support/test-patch.d/github.sh +++ b/dev-support/test-patch.d/github.sh @@ -71,7 +71,10 @@ function github_locate_patch return 1 fi - ${WGET} -q -O "${output}" "${GITHUBURL}/${GITHUBREPO}/pull/${input}.patch" + ${CURL} --silent --fail \ + --output "${output}" \ + "${GITHUBURL}/${GITHUBREPO}/pull/${input}.patch" + if [[ $? != 0 ]]; then yetus_debug "github_locate_patch: not a github pull request." return 1 @@ -114,7 +117,7 @@ function github_finalreport { declare result=$1 declare i - declare commentfile=${PATCH_DIR}/commentfile + declare commentfile=${PATCH_DIR}/gitcommentfile.$$ declare comment # TODO: There really should be a reference to the JIRA issue, as needed @@ -131,9 +134,9 @@ function github_finalreport add_footer_table "Console output" "${BUILD_URL}console" if [[ ${result} == 0 ]]; then - add_header_line ":confetti_ball: **+1 overall**" + echo ":confetti_ball: **+1 overall**" >> ${commentfile} else - add_header_line ":broken_heart: **-1 overall**" + echo ":broken_heart: **-1 overall**" >> ${commentfile} fi printf "\n\n\n\n" >> "${commentfile}" diff --git a/dev-support/test-patch.d/jira.sh b/dev-support/test-patch.d/jira.sh index 31116e90124d2..33262d544a698 100755 --- a/dev-support/test-patch.d/jira.sh +++ b/dev-support/test-patch.d/jira.sh @@ -15,7 +15,7 @@ # limitations under the License. JIRACLI=${JIRA:-jira} -JIRA_URL=${JIRA_URL:-"http://issues.apache.org/jira"} +JIRA_URL=${JIRA_URL:-"https://issues.apache.org/jira"} JIRA_ISSUE_RE='^(YETUS)-[0-9]+$' add_bugsystem jira @@ -68,6 +68,7 @@ function jira_determine_issue if [[ ${maybeissue} =~ ${JIRA_ISSUE_RE} ]]; then ISSUE=${maybeissue} + add_footer_table "JIRA Issue" "${ISSUE}" return 0 fi @@ -81,9 +82,16 @@ function jira_http_fetch if [[ -n "${JIRA_USER}" && -n "${JIRA_PASSWD}" ]]; then - ${WGET} --user="${JIRA_USER}" --password="${JIRA_PASSWD}" -q -O "${output}" "${JIRA_URL}/${input}" + ${CURL} --silent --fail \ + --user "${JIRA_USER}:${JIRA_PASSWD}" \ + --output "${output}" \ + --location \ + "${JIRA_URL}/${input}" else - ${WGET} -q -O "${output}" "${JIRA_URL}/${input}" + ${CURL} --silent --fail \ + --output "${output}" \ + --location \ + "${JIRA_URL}/${input}" fi } @@ -160,16 +168,24 @@ function jira_write_comment if [[ -n ${JIRA_PASSWD} && -n ${JIRA_USER} ]]; then - # shellcheck disable=SC2086 - ${JIRACLI} --comment "$(cat ${commentfile})" \ - -s "${JIRA_URL}" \ - -a addcomment -u ${JIRA_USER} \ - -p "${JIRA_PASSWD}" \ - --issue "${ISSUE}" + + echo "{\"body\":\"" > "${PATCH_DIR}/jiracomment.$$" + sed -e 's,\\,\\\\,g' \ + -e 's,\",\\\",g' \ + -e 's,$,\\r\\n,g' "${commentfile}" \ + | tr -d '\n'>> "${PATCH_DIR}/jiracomment.$$" + echo "\"}" >> "${PATCH_DIR}/jiracomment.$$" + + ${CURL} -X POST \ + -H "Accept: application/json" \ + -H "Content-Type: application/json" \ + -u "${JIRA_USER}:${JIRA_PASSWD}" \ + -d @"${PATCH_DIR}/jiracomment.$$" \ + --silent --location \ + "${JIRA_URL}/rest/api/2/issue/${ISSUE}/comment" \ + >/dev/null retval=$? - ${JIRACLI} -s "${JIRA_URL}" \ - -a logout -u "${JIRA_USER}" \ - -p "${JIRA_PASSWD}" + #rm "${PATCH_DIR}/jiracomment.$$" fi return ${retval} } @@ -183,7 +199,7 @@ function jira_finalreport { declare result=$1 declare i - declare commentfile=${PATCH_DIR}/commentfile + declare commentfile=${PATCH_DIR}/jiracommentfile declare comment declare vote declare ourstring @@ -204,12 +220,12 @@ function jira_finalreport add_footer_table "Console output" "${BUILD_URL}console" if [[ ${result} == 0 ]]; then - add_header_line "| (/) *{color:green}+1 overall{color}* |" + echo "| (/) *{color:green}+1 overall{color}* |" >> ${commentfile} else - add_header_line "| (x) *{color:red}-1 overall{color}* |" + echo "| (x) *{color:red}-1 overall{color}* |" >> ${commentfile} fi - { echo "\\\\" ; echo "\\\\"; } >> "${commentfile}" + echo "\\\\" >> "${commentfile}" i=0 until [[ $i -eq ${#TP_HEADER[@]} ]]; do @@ -217,7 +233,7 @@ function jira_finalreport ((i=i+1)) done - { echo "\\\\" ; echo "\\\\"; } >> "${commentfile}" + echo "\\\\" >> "${commentfile}" echo "|| Vote || Subsystem || Runtime || Comment ||" >> "${commentfile}" diff --git a/dev-support/test-patch.sh b/dev-support/test-patch.sh index 924241adc7aeb..59fa20690e285 100755 --- a/dev-support/test-patch.sh +++ b/dev-support/test-patch.sh @@ -23,6 +23,8 @@ if [[ -z "${BASH_VERSINFO}" ]] \ exit 1 fi +set -x + ### BUILD_URL is set by Hudson if it is run by patch process this="${BASH_SOURCE-$0}" @@ -110,7 +112,7 @@ function setup_defaults SunOS) AWK=${AWK:-/usr/xpg4/bin/awk} SED=${SED:-/usr/xpg4/bin/sed} - WGET=${WGET:-wget} + CURL=${CURL:-curl} GIT=${GIT:-git} GREP=${GREP:-/usr/xpg4/bin/grep} PATCH=${PATCH:-/usr/gnu/bin/patch} @@ -120,7 +122,7 @@ function setup_defaults *) AWK=${AWK:-awk} SED=${SED:-sed} - WGET=${WGET:-wget} + CURL=${CURL:-curl} GIT=${GIT:-git} GREP=${GREP:-grep} PATCH=${PATCH:-patch} @@ -757,6 +759,7 @@ function testpatch_usage echo "Shell binary overrides:" echo "--ant-cmd= The 'ant' command to use (default \${ANT_HOME}/bin/ant, or 'ant')" echo "--awk-cmd= The 'awk' command to use (default 'awk')" + echo "--curl-cmd= The 'wget' command to use (default 'curl')" echo "--diff-cmd= The GNU-compatible 'diff' command to use (default 'diff')" echo "--file-cmd= The 'file' command to use (default 'file')" echo "--git-cmd= The 'git' command to use (default 'git')" @@ -771,7 +774,6 @@ function testpatch_usage echo "--build-url Set the build location web page" echo "--eclipse-home= Eclipse home directory (default ECLIPSE_HOME environment variable)" echo "--mv-patch-dir Move the patch-dir into the basedir during cleanup." - echo "--wget-cmd= The 'wget' command to use (default 'wget')" importplugins @@ -824,6 +826,9 @@ function parse_args --contrib-guide=*) HOW_TO_CONTRIBUTE=${i#*=} ;; + --curl-cmd=*) + CURL=${i#*=} + ;; --debug) TP_SHELL_SCRIPT_DEBUG=true ;; @@ -945,9 +950,6 @@ function parse_args --tpreexectimer=*) REEXECLAUNCHTIMER=${i#*=} ;; - --wget-cmd=*) - WGET=${i#*=} - ;; --*) ## PATCH_OR_ISSUE can't be a --. So this is probably ## a plugin thing. @@ -1503,7 +1505,7 @@ function determine_issue for bugsys in ${BUGSYSTEMS}; do if declare -f ${bugsys}_determine_issue >/dev/null; then - "${bugsys}_determine_issue" "${PATCH_OR_URL}" + "${bugsys}_determine_issue" "${PATCH_OR_ISSUE}" if [[ $? == 0 ]]; then yetus_debug "${bugsys} says ${ISSUE}" return 0 From 90c34c667028f3f2da55767cfcf714a89ef00f92 Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Fri, 7 Aug 2015 21:08:41 -0700 Subject: [PATCH 061/130] update docs for switch --- dev-support/docs/precommit-basic.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/dev-support/docs/precommit-basic.md b/dev-support/docs/precommit-basic.md index e68ad071361bf..e8121056036df 100644 --- a/dev-support/docs/precommit-basic.md +++ b/dev-support/docs/precommit-basic.md @@ -48,7 +48,7 @@ test-patch has the following requirements: * POSIX awk * POSIX grep * POSIX sed -* wget +* curl * file command * smart-apply-patch.sh @@ -59,8 +59,8 @@ Maven plugins requirements: Optional: -* Apache JIRA-based issue tracking -* JIRA cli tools +* JIRA-based issue tracking +* GitHub-based issue tracking The locations of these files are (mostly) assumed to be in the file path, but may be overridden via command line options. For Solaris and Solaris-like operating systems, the default location for the POSIX binaries is in /usr/xpg4/bin and the default location for the GNU binaries is /usr/gnu/bin. From dddec72f9839769de520d366ef84580cfa1a6e66 Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Mon, 10 Aug 2015 09:44:47 -0700 Subject: [PATCH 062/130] HADOOP-12275. releasedocmaker: unreleased should still be dated (Kengo Seki via aw) --- dev-support/releasedocmaker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-support/releasedocmaker.py b/dev-support/releasedocmaker.py index 37bd58aa69869..3c398befbc93d 100755 --- a/dev-support/releasedocmaker.py +++ b/dev-support/releasedocmaker.py @@ -434,7 +434,7 @@ def main(): elif options.usetoday: reldate = strftime("%Y-%m-%d", gmtime()) else: - reldate = "Unreleased" + reldate = "Unreleased (as of %s)" % strftime("%Y-%m-%d", gmtime()) if not os.path.exists(vstr): os.mkdir(vstr) From 3d888ecc54cce0eefe77700cab2ac2759f4c030f Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Mon, 10 Aug 2015 09:46:06 -0700 Subject: [PATCH 063/130] HADOOP-12310. final memory report sometimes generates spurious errors (Kengo Seki via aw) --- dev-support/test-patch.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/dev-support/test-patch.sh b/dev-support/test-patch.sh index 1faf99f2d4e0f..446d5cf70ea6e 100755 --- a/dev-support/test-patch.sh +++ b/dev-support/test-patch.sh @@ -395,9 +395,10 @@ function finish_footer_table { local maxmem + # `sort | head` can cause a broken pipe error, but we can ignore it just like compute_gitdiff. # shellcheck disable=SC2016,SC2086 maxmem=$(find "${PATCH_DIR}" -type f -exec ${AWK} 'match($0, /^\[INFO\] Final Memory: [0-9]+/) - { print substr($0, 22, RLENGTH-21) }' {} \; | sort -nr | head -n 1) + { print substr($0, 22, RLENGTH-21) }' {} \; | sort -nr 2>/dev/null | head -n 1) if [[ -n ${maxmem} ]]; then add_footer_table "Max memory used" "${maxmem}MB" From 872606974bdfa494fb4f165d682718db2a85bca6 Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Mon, 10 Aug 2015 11:10:36 -0700 Subject: [PATCH 064/130] HADOOP-12248. Add native support for TAP (aw) --- dev-support/personality/hadoop.sh | 74 +++++++++++++++++++ .../test-patch.d/builtin-personality.sh | 22 +++--- dev-support/test-patch.d/shellcheck.sh | 2 +- dev-support/test-patch.sh | 40 ++++++++-- 4 files changed, 122 insertions(+), 16 deletions(-) diff --git a/dev-support/personality/hadoop.sh b/dev-support/personality/hadoop.sh index 60dbb3d7989bc..1243a1786e35b 100755 --- a/dev-support/personality/hadoop.sh +++ b/dev-support/personality/hadoop.sh @@ -253,6 +253,26 @@ function personality_modules #fi needflags=true hadoop_unittest_prereqs + + verify_needed_test javac + if [[ $? == 0 ]]; then + yetus_debug "hadoop: javac not requested" + verify_needed_test native + if [[ $? == 0 ]]; then + yetus_debug "hadoop: native not requested" + yetus_debug "hadoop: adding -DskipTests to unit test" + extra="-DskipTests" + fi + fi + + verify_needed_test shellcheck + if [[ $? == 0 + && ! ${CHANGED_FILES} =~ \.bats ]]; then + yetus_debug "hadoop: NO shell code change detected; disabling shelltest profile" + extra="${extra} -P!shelltest" + else + extra="${extra} -Pshelltest" + fi ;; *) extra="-DskipTests" @@ -272,3 +292,57 @@ function personality_modules done } +function personality_file_tests +{ + local filename=$1 + + yetus_debug "Using Hadoop-specific personality_file_tests" + + if [[ ${filename} =~ src/main/webapp ]]; then + yetus_debug "tests/webapp: ${filename}" + elif [[ ${filename} =~ \.sh + || ${filename} =~ \.cmd + || ${filename} =~ src/scripts + || ${filename} =~ src/test/scripts + ]]; then + yetus_debug "tests/shell: ${filename}" + add_test unit + elif [[ ${filename} =~ \.md$ + || ${filename} =~ \.md\.vm$ + || ${filename} =~ src/site + ]]; then + yetus_debug "tests/site: ${filename}" + add_test site + elif [[ ${filename} =~ \.c$ + || ${filename} =~ \.cc$ + || ${filename} =~ \.h$ + || ${filename} =~ \.hh$ + || ${filename} =~ \.proto$ + || ${filename} =~ \.cmake$ + || ${filename} =~ CMakeLists.txt + ]]; then + yetus_debug "tests/units: ${filename}" + add_test cc + add_test unit + add_test javac + elif [[ ${filename} =~ build.xml$ + || ${filename} =~ pom.xml$ + || ${filename} =~ \.java$ + || ${filename} =~ src/main + ]]; then + yetus_debug "tests/javadoc+units: ${filename}" + add_test javac + add_test javadoc + add_test mvninstall + add_test unit + fi + + if [[ ${filename} =~ src/test ]]; then + yetus_debug "tests" + add_test unit + fi + + if [[ ${filename} =~ \.java$ ]]; then + add_test findbugs + fi +} diff --git a/dev-support/test-patch.d/builtin-personality.sh b/dev-support/test-patch.d/builtin-personality.sh index dc944e485f1e1..4be3bfa3c2b2d 100755 --- a/dev-support/test-patch.d/builtin-personality.sh +++ b/dev-support/test-patch.d/builtin-personality.sh @@ -55,6 +55,8 @@ function builtin_mvn_personality_file_tests yetus_debug "tests/webapp: ${filename}" elif [[ ${filename} =~ \.sh || ${filename} =~ \.cmd + || ${filename} =~ src/main/scripts + || ${filename} =~ src/test/scripts ]]; then yetus_debug "tests/shell: ${filename}" elif [[ ${filename} =~ \.md$ @@ -69,29 +71,31 @@ function builtin_mvn_personality_file_tests || ${filename} =~ \.h$ || ${filename} =~ \.hh$ || ${filename} =~ \.proto$ - || ${filename} =~ src/test || ${filename} =~ \.cmake$ || ${filename} =~ CMakeLists.txt ]]; then yetus_debug "tests/units: ${filename}" + add_test cc + add_test unit + elif [[ ${filename} =~ \.scala$ ]]; then add_test javac - add_test mvninstall add_test unit - elif [[ ${filename} =~ pom.xml$ + add_test mvninstall + elif [[ ${filename} =~ build.xml$ + || ${filename} =~ pom.xml$ || ${filename} =~ \.java$ - || ${filename} =~ \.scala$ || ${filename} =~ src/main ]]; then - if [[ ${filename} =~ src/main/bin - || ${filename} =~ src/main/sbin ]]; then - yetus_debug "tests/shell: ${filename}" - else yetus_debug "tests/javadoc+units: ${filename}" add_test javac add_test javadoc add_test mvninstall add_test unit - fi + fi + + if [[ ${filename} =~ src/test ]]; then + yetus_debug "tests" + add_test unit fi if [[ ${filename} =~ \.java$ ]]; then diff --git a/dev-support/test-patch.d/shellcheck.sh b/dev-support/test-patch.d/shellcheck.sh index 14d1d1837637f..4d177685fe344 100755 --- a/dev-support/test-patch.d/shellcheck.sh +++ b/dev-support/test-patch.d/shellcheck.sh @@ -56,7 +56,7 @@ function shellcheck_private_findbash fi list="${list} ${i}" done - done < <(find . -type d -name bin -o -type d -name sbin -o -type d -name libexec -o -type d -name shellprofile.d) + done < <(find . -type d -name bin -o -type d -name sbin -o -type d -name scripts -o -type d -name libexec -o -type d -name shellprofile.d) # shellcheck disable=SC2086 echo ${list} ${SHELLCHECK_SPECIFICFILES} | tr ' ' '\n' | sort -u } diff --git a/dev-support/test-patch.sh b/dev-support/test-patch.sh index 446d5cf70ea6e..4dd15e69e1b6b 100755 --- a/dev-support/test-patch.sh +++ b/dev-support/test-patch.sh @@ -30,6 +30,8 @@ BINDIR=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P) STARTINGDIR=$(pwd) USER_PARAMS=("$@") GLOBALTIMER=$(date +"%s") +#shellcheck disable=SC2034 +QATESTMODE=false # global arrays declare -a MAVEN_ARGS=("--batch-mode") @@ -1177,6 +1179,7 @@ function find_changed_modules #shellcheck disable=SC2086,SC2116 CHANGED_UNFILTERED_MODULES=$(echo ${CHANGED_UNFILTERED_MODULES}) + if [[ ${BUILDTOOL} = maven ]]; then # Filter out modules without code for module in ${builddirs}; do @@ -1786,7 +1789,7 @@ function copytpbits # if we've already copied, then don't bother doing it again if [[ ${STARTDIR} == ${PATCH_DIR}/precommit ]]; then - hadoop_debug "Skipping copytpbits; already copied once" + yetus_debug "Skipping copytpbits; already copied once" return fi @@ -2942,6 +2945,7 @@ function populate_test_table function check_unittests { local i + local testsys local test_logfile local result=0 local -r savejavahome=${JAVA_HOME} @@ -2949,6 +2953,9 @@ function check_unittests local jdk="" local jdkindex=0 local statusjdk + local formatresult=0 + local needlog + local unitlogs big_console_header "Running unit tests" @@ -2976,7 +2983,7 @@ function check_unittests personality_modules patch unit case ${BUILDTOOL} in maven) - modules_workers patch unit clean install -fae + modules_workers patch unit clean test -fae ;; ant) modules_workers patch unit @@ -3002,13 +3009,23 @@ function check_unittests pushd "${MODULE[${i}]}" >/dev/null - for j in ${TESTSYSTEMS}; do - if declare -f ${j}_process_tests; then - "${j}_process_tests" "${module}" "${test_logfile}" - ((results=results+$?)) + needlog=0 + for testsys in ${TESTFORMATS}; do + if declare -f ${testsys}_process_tests >/dev/null; then + yetus_debug "Calling ${testsys}_process_tests" + "${testsys}_process_tests" "${module}" "${test_logfile}" "${fn}" + formatresult=$? + ((results=results+formatresult)) + if [[ "${formatresult}" != 0 ]]; then + needlog=1 + fi fi done + if [[ ${needlog} == 1 ]]; then + unitlogs="${unitlogs} @@BASE@@/patch-unit-${fn}.txt" + fi + popd >/dev/null ((i=i+1)) @@ -3017,10 +3034,21 @@ function check_unittests done JAVA_HOME=${savejavahome} + if [[ -n "${unitlogs}" ]]; then + add_footer_table "unit test logs" "${unitlogs}" + fi + if [[ ${JENKINS} == true ]]; then add_footer_table "${statusjdk} Test Results" "${BUILD_URL}testReport/" fi + for testsys in ${TESTFORMATS}; do + if declare -f ${testsys}_finalize_results >/dev/null; then + yetus_debug "Calling ${testsys}_finalize_results" + "${testsys}_finalize_results" "${statusjdk}" + fi + done + if [[ ${result} -gt 0 ]]; then return 1 fi From cd9ae01bc1e76a295d5d79536b6ef6df80987748 Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Mon, 10 Aug 2015 11:12:03 -0700 Subject: [PATCH 065/130] more fixes --- dev-support/test-patch.d/github.sh | 8 ++++++++ dev-support/test-patch.d/jira.sh | 16 ++++++++++------ 2 files changed, 18 insertions(+), 6 deletions(-) diff --git a/dev-support/test-patch.d/github.sh b/dev-support/test-patch.d/github.sh index ef238c42e6cab..d1b16f8763302 100755 --- a/dev-support/test-patch.d/github.sh +++ b/dev-support/test-patch.d/github.sh @@ -61,6 +61,14 @@ function github_parse_args done } +## @description this gets called when JIRA thinks this +## @description issue is just a pointer to github +function github_jira_bridge +{ + + +} + function github_locate_patch { declare input=$1 diff --git a/dev-support/test-patch.d/jira.sh b/dev-support/test-patch.d/jira.sh index 33262d544a698..6ab7697c00259 100755 --- a/dev-support/test-patch.d/jira.sh +++ b/dev-support/test-patch.d/jira.sh @@ -23,11 +23,11 @@ add_bugsystem jira function jira_usage { echo "JIRA Options:" - echo "--jira-issue-re= Bash regular expression to use when trying to find a jira ref in the patch name (default: \'${JIRA_ISSUE_RE}\')" - echo "--jira-cmd= The 'jira' command to use (default '${JIRACLI}')" - echo "--jira-password= The password for the 'jira' command" - echo "--jira-base-url= The URL of the JIRA server (default:'${JIRA_URL}')" - echo "--jira-user= The user for the 'jira' command" + echo "--jira-issue-re= Bash regular expression to use when trying to find a jira ref in the patch name (default: \'${JIRA_ISSUE_RE}\')" + echo "--jira-cmd= The 'jira' command to use (default '${JIRACLI}')" + echo "--jira-password= The password for the 'jira' command" + echo "--jira-base-url= The URL of the JIRA server (default:'${JIRA_URL}')" + echo "--jira-user= The user for the 'jira' command" } function jira_parse_args @@ -99,6 +99,7 @@ function jira_locate_patch { declare input=$1 declare fileloc=$2 + declare githuburl yetus_debug "jira_locate_patch: trying ${JIRA_URL}/browse/${input}" @@ -119,13 +120,16 @@ function jira_locate_patch # appropriate bits so that it gets setup to write a comment # to the PR - if [[ $(${GREP} -c 'Patch Available' "${PATCH_DIR}/jira") == 0 ]] ; then + if [[ $(${GREP} -c 'Patch Available' "${PATCH_DIR}/jira") == 0 ]]; then if [[ ${JENKINS} == true ]]; then yetus_error "ERROR: ${input} is not \"Patch Available\"." cleanup_and_exit 1 else yetus_error "WARNING: ${input} is not \"Patch Available\"." fi + elif [[ ${GREP} -c '^.*[https://github.com/.*patch].*$' "${PATCH_DIR}/jira" ]]; then + github_jira_bridge + return $? fi #shellcheck disable=SC2016 From a4c0debf833bf9498e7ea5fe22d1a6515de00e7d Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Mon, 10 Aug 2015 15:33:01 -0700 Subject: [PATCH 066/130] more github fixes --- dev-support/test-patch.d/github.sh | 119 ++++++++++++++++++++++++++--- dev-support/test-patch.d/jira.sh | 40 ++++------ 2 files changed, 126 insertions(+), 33 deletions(-) diff --git a/dev-support/test-patch.d/github.sh b/dev-support/test-patch.d/github.sh index d1b16f8763302..72afee70f5372 100755 --- a/dev-support/test-patch.d/github.sh +++ b/dev-support/test-patch.d/github.sh @@ -16,8 +16,8 @@ add_bugsystem github -GITHUBURL="https://github.com" -GITHUBREPO="apache/hadoop" +GITHUB_URL="https://github.com" +GITHUB_REPO="apache/hadoop" GITHUB_PASSWD="" GITHUB_TOKEN="" @@ -28,9 +28,9 @@ GITHUB_ISSUE="" function github_usage { echo "GITHUB Options:" - echo "--github-base-url= The URL of the JIRA server (default:'${GITHUBURL}')" + echo "--github-base-url= The URL of the JIRA server (default:'${GITHUB_URL}')" echo "--github-password= Github password" - echo "--github-repo= github repo to use (default:'${GITHUBREPO}')" + echo "--github-repo= github repo to use (default:'${GITHUB_REPO}')" echo "--github-token= The token to use to write to github" echo "--github-user= Github user" @@ -43,10 +43,10 @@ function github_parse_args for i in "$@"; do case ${i} in --github-base-url=*) - GITHUBURL=${i#*=} + GITHUB_URL=${i#*=} ;; --github-repo=*) - GITHUBREPO=${i#*=} + GITHUB_REPO=${i#*=} ;; --github-token=*) GITHUB_TOKEN=${i#*=} @@ -65,8 +65,52 @@ function github_parse_args ## @description issue is just a pointer to github function github_jira_bridge { + declare fileloc=$1 + declare urlfromjira + declare count + declare pos1 + declare pos2 + # the JIRA issue has already been downloaded. So let's + # find the URL. This is currently hard-coded to github.com + # Sorry Github Enterprise users. :( + # shellcheck disable=SC2016 + urlfromjira=$(${AWK} 'match($0,"https://github.com/.*patch"){print $1}' "${PATCH_DIR}/jira" | tail -1) + count=${urlfromjira//[^\/]} + count=${#count} + ((pos2=count-3)) + ((pos1=pos2)) + + GITHUB_URL=$(echo "${urlfromjira}" | cut -f1-${pos2} -d/) + + ((pos1=pos1+1)) + ((pos2=pos1+1)) + + GITHUB_REPO=$(echo "${urlfromjira}" | cut -f${pos1}-${pos2} -d/) + + ((pos1=pos2+2)) + unset pos2 + + GITHUB_ISSUE=$(echo "${urlfromjira}" | cut -f${pos1}-${pos2} -d/ | cut -f1 -d.) + + github_locate_patch "${GITHUB_ISSUE}" "${fileloc}" +} + +function github_determine_issue +{ + declare input=$1 + declare patchnamechunk + declare maybeissue + + + if [[ ${input} =~ ^[0-9]+$ + && -n ${GITHUB_REPO} ]]; then + ISSUE=${input} + return 0 + fi + + return 1 } function github_locate_patch @@ -79,9 +123,19 @@ function github_locate_patch return 1 fi + if [[ ! ${input} =~ ^[0-9]+$ ]]; then + yetus_debug "github: ${input} is not a pull request #" + return 1 + fi + + PATCHURL="${GITHUB_URL}/${GITHUB_REPO}/pull/${input}.patch" + echo "GITHUB PR #${input} is being downloaded at $(date) from" + echo "${PATCHURL}" + ${CURL} --silent --fail \ --output "${output}" \ - "${GITHUBURL}/${GITHUBREPO}/pull/${input}.patch" + --location \ + "${PATCHURL}" if [[ $? != 0 ]]; then yetus_debug "github_locate_patch: not a github pull request." @@ -93,6 +147,9 @@ function github_locate_patch GITHUB_ISSUE=${input} GITHUB_COMMITID="" + + add_footer_table "GITHUB PR" "${GITHUB_URL}/${GITHUB_REPO}/pull/${input}" + return 0 } @@ -100,6 +157,48 @@ function github_locate_patch ## @params filename ## @stability stable ## @audience public +function github_write_comment +{ + declare -r commentfile=${1} + shift + + declare retval=0 + + if [[ "${OFFLINE}" == true ]]; then + return 0 + fi + + echo "{\"body\":\"" > "${PATCH_DIR}/ghcomment.$$" + sed -e 's,\\,\\\\,g' \ + -e 's,\",\\\",g' \ + | tr -d '\n'>> "${PATCH_DIR}/ghcomment.$$" + echo "\"}" >> "${PATCH_DIR}/ghcomment.$$" + + if [[ -n ${GITHUB_USER} + && -n ${GITHUB_PASSWD} ]]; then + githubauth="-u \"${GITHUB_USER}:${GITHUB_PASSWD}\"" + elif [[ -n ${GITHUB_TOKEN} ]]; then + githubauth="-H \"Authorization: token ${GITHUB_TOKEN}\"" + else + return 0 + fi + + ${CURL} -X POST \ + -H "Accept: application/json" \ + -H "Content-Type: application/json" \ + -u "${GITHUB_USER}:${GITHUB_PASSWD}" \ + -d @"${PATCH_DIR}/jiracomment.$$" \ + --silent --location \ + "${JIRA_URL}/rest/api/2/issue/${ISSUE}/comment" \ + >/dev/null + + retval=$? + rm "${PATCH_DIR}/jiracomment.$$" + fi + return ${retval} +} + + function github_write_comment { declare -r commentfile=${1} @@ -111,6 +210,8 @@ function github_write_comment return 0 fi + + yetus_debug "${GITHUB_USER} ${GITHUB_PASSWD} ${GITHUB_TOKEN} ${GITHUB_COMMITID}" return ${retval} } @@ -142,9 +243,9 @@ function github_finalreport add_footer_table "Console output" "${BUILD_URL}console" if [[ ${result} == 0 ]]; then - echo ":confetti_ball: **+1 overall**" >> ${commentfile} + echo ":confetti_ball: **+1 overall**" >> "${commentfile}" else - echo ":broken_heart: **-1 overall**" >> ${commentfile} + echo ":broken_heart: **-1 overall**" >> "${commentfile}" fi printf "\n\n\n\n" >> "${commentfile}" diff --git a/dev-support/test-patch.d/jira.sh b/dev-support/test-patch.d/jira.sh index 6ab7697c00259..066cca5da67b9 100755 --- a/dev-support/test-patch.d/jira.sh +++ b/dev-support/test-patch.d/jira.sh @@ -14,7 +14,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -JIRACLI=${JIRA:-jira} JIRA_URL=${JIRA_URL:-"https://issues.apache.org/jira"} JIRA_ISSUE_RE='^(YETUS)-[0-9]+$' @@ -24,7 +23,6 @@ function jira_usage { echo "JIRA Options:" echo "--jira-issue-re= Bash regular expression to use when trying to find a jira ref in the patch name (default: \'${JIRA_ISSUE_RE}\')" - echo "--jira-cmd= The 'jira' command to use (default '${JIRACLI}')" echo "--jira-password= The password for the 'jira' command" echo "--jira-base-url= The URL of the JIRA server (default:'${JIRA_URL}')" echo "--jira-user= The user for the 'jira' command" @@ -36,9 +34,6 @@ function jira_parse_args for i in "$@"; do case ${i} in - --jira-cmd=*) - JIRACLI=${i#*=} - ;; --jira-base-url=*) JIRA_URL=${i#*=} ;; @@ -99,7 +94,7 @@ function jira_locate_patch { declare input=$1 declare fileloc=$2 - declare githuburl + declare relativeurl yetus_debug "jira_locate_patch: trying ${JIRA_URL}/browse/${input}" @@ -115,27 +110,24 @@ function jira_locate_patch return 1 fi - # TODO: we should check for a gitbub-based pull request here - # if we find one, call the github plug-in directly with the - # appropriate bits so that it gets setup to write a comment - # to the PR - - if [[ $(${GREP} -c 'Patch Available' "${PATCH_DIR}/jira") == 0 ]]; then + # Sorry enterprise github users. Currently hard-coded to github.com + if [[ $(${GREP} -c 'https://github.com/.*patch' "${PATCH_DIR}/jira") != 0 ]]; then + echo "${input} appears to be a Github PR. Switching Modes." + github_jira_bridge "${fileloc}" + return $? + elif [[ $(${GREP} -c 'Patch Available' "${PATCH_DIR}/jira") == 0 ]]; then if [[ ${JENKINS} == true ]]; then yetus_error "ERROR: ${input} is not \"Patch Available\"." cleanup_and_exit 1 else yetus_error "WARNING: ${input} is not \"Patch Available\"." fi - elif [[ ${GREP} -c '^.*[https://github.com/.*patch].*$' "${PATCH_DIR}/jira" ]]; then - github_jira_bridge - return $? fi #shellcheck disable=SC2016 - relativePatchURL=$(${AWK} 'match($0,"/secure/attachment/[0-9]*/[^\"]*"){print substr($0,RSTART,RLENGTH)}' "${PATCH_DIR}/jira" | + relativeurl=$(${AWK} 'match($0,"/secure/attachment/[0-9]*/[^\"]*"){print substr($0,RSTART,RLENGTH)}' "${PATCH_DIR}/jira" | ${GREP} -v -e 'htm[l]*$' | sort | tail -1 | ${SED} -e 's,[ ]*$,,g') - PATCHURL="${JIRA_URL}${relativePatchURL}" + PATCHURL="${JIRA_URL}${relativeurl}" if [[ ! ${PATCHURL} =~ \.patch$ ]]; then guess_patch_file "${PATCH_DIR}/patch" if [[ $? == 0 ]]; then @@ -143,12 +135,12 @@ function jira_locate_patch add_vote_table 0 patch "The patch file was not named according to ${PROJECT_NAME}'s naming conventions. Please see ${HOW_TO_CONTRIBUTE} for instructions." fi fi - echo "${ISSUE} patch is being downloaded at $(date) from" + echo "${input} patch is being downloaded at $(date) from" echo "${PATCHURL}" add_footer_table "JIRA Patch URL" "${PATCHURL}" - jira_http_fetch "${relativePatchURL}" "${fileloc}" + jira_http_fetch "${relativeurl}" "${fileloc}" if [[ $? != 0 ]];then - yetus_error "ERROR: ${PATCH_OR_ISSUE} could not be downloaded." + yetus_error "ERROR: ${input}/${PATCHURL} could not be downloaded." cleanup_and_exit 1 fi return 0 @@ -158,7 +150,7 @@ function jira_locate_patch ## @params filename ## @stability stable ## @audience public -## @returns ${JIRACLI} exit code +## @returns ${CURL} exit code function jira_write_comment { declare -r commentfile=${1} @@ -189,7 +181,7 @@ function jira_write_comment "${JIRA_URL}/rest/api/2/issue/${ISSUE}/comment" \ >/dev/null retval=$? - #rm "${PATCH_DIR}/jiracomment.$$" + rm "${PATCH_DIR}/jiracomment.$$" fi return ${retval} } @@ -224,9 +216,9 @@ function jira_finalreport add_footer_table "Console output" "${BUILD_URL}console" if [[ ${result} == 0 ]]; then - echo "| (/) *{color:green}+1 overall{color}* |" >> ${commentfile} + echo "| (/) *{color:green}+1 overall{color}* |" >> "${commentfile}" else - echo "| (x) *{color:red}-1 overall{color}* |" >> ${commentfile} + echo "| (x) *{color:red}-1 overall{color}* |" >> "${commentfile}" fi echo "\\\\" >> "${commentfile}" From 54b8439983b3c0f2fde13bd69ad72085ddc942bb Mon Sep 17 00:00:00 2001 From: Jian He Date: Wed, 5 Aug 2015 16:12:45 -0700 Subject: [PATCH 067/130] HADOOP-11932. MetricsSinkAdapter may hang when being stopped. Contributed by Brahma Reddy Battula --- .../hadoop-common/CHANGES.txt | 3 + .../metrics2/impl/MetricsSinkAdapter.java | 6 +- .../metrics2/impl/TestMetricsSystemImpl.java | 60 +++++++++++++++++++ 3 files changed, 66 insertions(+), 3 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 7571cc501c08a..7d7982f61a00a 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -1079,6 +1079,9 @@ Release 2.7.2 - UNRELEASED HADOOP-12304. Applications using FileContext fail with the default file system configured to be wasb/s3/etc. (cnauroth) + HADOOP-11932. MetricsSinkAdapter may hang when being stopped. + (Brahma Reddy Battula via jianhe) + Release 2.7.1 - 2015-07-06 INCOMPATIBLE CHANGES diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSinkAdapter.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSinkAdapter.java index ed52317538e16..b2f3c4aa21b65 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSinkAdapter.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSinkAdapter.java @@ -206,14 +206,14 @@ void start() { void stop() { stopping = true; sinkThread.interrupt(); + if (sink instanceof Closeable) { + IOUtils.cleanup(LOG, (Closeable)sink); + } try { sinkThread.join(); } catch (InterruptedException e) { LOG.warn("Stop interrupted", e); } - if (sink instanceof Closeable) { - IOUtils.cleanup(LOG, (Closeable)sink); - } } String name() { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestMetricsSystemImpl.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestMetricsSystemImpl.java index 0f7b15f2ef9c9..6238d7915a2e9 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestMetricsSystemImpl.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestMetricsSystemImpl.java @@ -18,6 +18,8 @@ package org.apache.hadoop.metrics2.impl; +import java.io.Closeable; +import java.io.IOException; import java.util.*; import java.util.concurrent.*; import java.util.concurrent.atomic.*; @@ -484,6 +486,64 @@ public boolean apply(@Nullable AbstractMetric input) { } } + /** + * Class to verify HADOOP-11932. Instead of reading from HTTP, going in loop + * until closed. + */ + private static class TestClosableSink implements MetricsSink, Closeable { + + boolean closed = false; + CountDownLatch collectingLatch; + + public TestClosableSink(CountDownLatch collectingLatch) { + this.collectingLatch = collectingLatch; + } + + @Override + public void init(SubsetConfiguration conf) { + } + + @Override + public void close() throws IOException { + closed = true; + } + + @Override + public void putMetrics(MetricsRecord record) { + while (!closed) { + collectingLatch.countDown(); + } + } + + @Override + public void flush() { + } + } + + /** + * HADOOP-11932 + */ + @Test(timeout = 5000) + public void testHangOnSinkRead() throws Exception { + new ConfigBuilder().add("*.period", 8) + .add("test.sink.test.class", TestSink.class.getName()) + .save(TestMetricsConfig.getTestFilename("hadoop-metrics2-test")); + MetricsSystemImpl ms = new MetricsSystemImpl("Test"); + ms.start(); + try { + CountDownLatch collectingLatch = new CountDownLatch(1); + MetricsSink sink = new TestClosableSink(collectingLatch); + ms.registerSink("closeableSink", + "The sink will be used to test closeability", sink); + // trigger metric collection first time + ms.onTimerEvent(); + // Make sure that sink is collecting metrics + assertTrue(collectingLatch.await(1, TimeUnit.SECONDS)); + } finally { + ms.stop(); + } + } + @Metrics(context="test") private static class TestSource { @Metric("C1 desc") MutableCounterLong c1; From 5e518f4ece7991ece11c4ac6f875f63ef92112d8 Mon Sep 17 00:00:00 2001 From: rohithsharmaks Date: Thu, 6 Aug 2015 10:43:37 +0530 Subject: [PATCH 068/130] YARN-3992. TestApplicationPriority.testApplicationPriorityAllocation fails intermittently. (Contributed by Sunil G) --- hadoop-yarn-project/CHANGES.txt | 3 + .../yarn/server/resourcemanager/MockAM.java | 14 ++-- .../capacity/TestApplicationPriority.java | 69 +++++-------------- 3 files changed, 29 insertions(+), 57 deletions(-) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index cff7f6bfae84c..29a7630448e0b 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -732,6 +732,9 @@ Release 2.8.0 - UNRELEASED YARN-433. When RM is catching up with node updates then it should not expire acquired containers. (Xuan Gong via zxu) + YARN-3992. TestApplicationPriority.testApplicationPriorityAllocation fails + intermittently. (Contributed by Sunil G) + Release 2.7.2 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockAM.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockAM.java index 0e25360016627..5660b785f7290 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockAM.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockAM.java @@ -309,16 +309,20 @@ public Object run() throws Exception { public ApplicationAttemptId getApplicationAttemptId() { return this.attemptId; } - + public List allocateAndWaitForContainers(int nContainer, int memory, MockNM nm) throws Exception { + return allocateAndWaitForContainers("ANY", nContainer, memory, nm); + } + + public List allocateAndWaitForContainers(String host, + int nContainer, int memory, MockNM nm) throws Exception { // AM request for containers - allocate("ANY", memory, nContainer, null); + allocate(host, memory, nContainer, null); // kick the scheduler nm.nodeHeartbeat(true); - List conts = - allocate(new ArrayList(), null) - .getAllocatedContainers(); + List conts = allocate(new ArrayList(), null) + .getAllocatedContainers(); while (conts.size() < nContainer) { nm.nodeHeartbeat(true); conts.addAll(allocate(new ArrayList(), diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationPriority.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationPriority.java index 80eff064cc038..db094e3df16f6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationPriority.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationPriority.java @@ -22,20 +22,16 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; -import java.io.IOException; +import java.util.ArrayList; import java.util.List; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.Container; +import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.Priority; import org.apache.hadoop.yarn.conf.YarnConfiguration; -import org.apache.hadoop.yarn.exceptions.ApplicationIdNotProvidedException; -import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.server.resourcemanager.MockAM; import org.apache.hadoop.yarn.server.resourcemanager.MockNM; import org.apache.hadoop.yarn.server.resourcemanager.MockNodes; @@ -44,7 +40,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppImpl; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptImpl; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptMetrics; -import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNodeReport; @@ -59,8 +54,6 @@ import org.junit.Test; public class TestApplicationPriority { - private static final Log LOG = LogFactory - .getLog(TestApplicationPriority.class); private final int GB = 1024; private YarnConfiguration conf; @@ -166,19 +159,10 @@ public void testApplicationPriorityAllocation() throws Exception { MockAM am1 = MockRM.launchAM(app1, rm, nm1); am1.registerAppAttempt(); - // add request for containers - am1.addRequests(new String[]{"127.0.0.1", "127.0.0.2"}, 2 * GB, 1, 7); - AllocateResponse alloc1Response = am1.schedule(); // send the request + // allocate 7 containers for App1 + List allocated1 = am1.allocateAndWaitForContainers("127.0.0.1", + 7, 2 * GB, nm1); - // kick the scheduler, 7 containers will be allocated for App1 - nm1.nodeHeartbeat(true); - while (alloc1Response.getAllocatedContainers().size() < 1) { - LOG.info("Waiting for containers to be created for app 1..."); - Thread.sleep(100); - alloc1Response = am1.schedule(); - } - - List allocated1 = alloc1Response.getAllocatedContainers(); Assert.assertEquals(7, allocated1.size()); Assert.assertEquals(2 * GB, allocated1.get(0).getResource().getMemory()); @@ -193,9 +177,7 @@ public void testApplicationPriorityAllocation() throws Exception { RMApp app2 = rm.submitApp(1 * GB, appPriority2); // kick the scheduler, 1 GB which was free is given to AM of App2 - nm1.nodeHeartbeat(true); - MockAM am2 = rm.sendAMLaunched(app2.getCurrentAppAttempt() - .getAppAttemptId()); + MockAM am2 = MockRM.launchAM(app2, rm, nm1); am2.registerAppAttempt(); // check node report, 16 GB used and 0 GB available @@ -210,7 +192,7 @@ public void testApplicationPriorityAllocation() throws Exception { FiCaSchedulerApp schedulerAppAttempt = cs.getSchedulerApplications() .get(app1.getApplicationId()).getCurrentAppAttempt(); - // kill 2 containers to free up some space + // kill 2 containers of App1 to free up some space int counter = 0; for (Container c : allocated1) { if (++counter > 2) { @@ -224,22 +206,16 @@ public void testApplicationPriorityAllocation() throws Exception { Assert.assertEquals(12 * GB, report_nm1.getUsedResource().getMemory()); Assert.assertEquals(4 * GB, report_nm1.getAvailableResource().getMemory()); - // add request for containers App1 - am1.addRequests(new String[]{"127.0.0.1", "127.0.0.2"}, 2 * GB, 1, 10); - am1.schedule(); // send the request for App1 - - // add request for containers App2 - am2.addRequests(new String[]{"127.0.0.1", "127.0.0.2"}, 2 * GB, 1, 3); - AllocateResponse alloc1Response4 = am2.schedule(); // send the request + // send updated request for App1 + am1.allocate("127.0.0.1", 2 * GB, 10, new ArrayList()); // kick the scheduler, since App2 priority is more than App1, it will get // remaining cluster space. - nm1.nodeHeartbeat(true); - while (alloc1Response4.getAllocatedContainers().size() < 1) { - LOG.info("Waiting for containers to be created for app 2..."); - Thread.sleep(100); - alloc1Response4 = am2.schedule(); - } + List allocated2 = am2.allocateAndWaitForContainers("127.0.0.1", + 2, 2 * GB, nm1); + + // App2 has got 2 containers now. + Assert.assertEquals(2, allocated2.size()); // check node report, 16 GB used and 0 GB available report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId()); @@ -268,19 +244,10 @@ public void testPriorityWithPendingApplications() throws Exception { MockAM am1 = MockRM.launchAM(app1, rm, nm1); am1.registerAppAttempt(); - // add request for containers - am1.addRequests(new String[]{"127.0.0.1", "127.0.0.2"}, 1 * GB, 1, 7); - AllocateResponse alloc1Response = am1.schedule(); // send the request - // kick the scheduler, 7 containers will be allocated for App1 - nm1.nodeHeartbeat(true); - while (alloc1Response.getAllocatedContainers().size() < 1) { - LOG.info("Waiting for containers to be created for app 1..."); - Thread.sleep(100); - alloc1Response = am1.schedule(); - } + List allocated1 = am1.allocateAndWaitForContainers("127.0.0.1", + 7, 1 * GB, nm1); - List allocated1 = alloc1Response.getAllocatedContainers(); Assert.assertEquals(7, allocated1.size()); Assert.assertEquals(1 * GB, allocated1.get(0).getResource().getMemory()); @@ -308,9 +275,7 @@ public void testPriorityWithPendingApplications() throws Exception { rm.killApp(app1.getApplicationId()); // kick the scheduler, app3 (high among pending) gets free space - nm1.nodeHeartbeat(true); - MockAM am3 = rm.sendAMLaunched(app3.getCurrentAppAttempt() - .getAppAttemptId()); + MockAM am3 = MockRM.launchAM(app3, rm, nm1); am3.registerAppAttempt(); // check node report, 1 GB used and 7 GB available From f363ea680977db0a4d253d567d0ca5082d4817d6 Mon Sep 17 00:00:00 2001 From: Vinayakumar B Date: Thu, 6 Aug 2015 11:10:48 +0530 Subject: [PATCH 069/130] HDFS-8815. DFS getStoragePolicy implementation using single RPC call (Contributed by Surendra Singh Lilhore) --- .../hadoop/hdfs/protocol/ClientProtocol.java | 14 +++++++++++ .../main/proto/ClientNamenodeProtocol.proto | 10 ++++++++ hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ .../org/apache/hadoop/hdfs/DFSClient.java | 23 ++++++++++--------- ...amenodeProtocolServerSideTranslatorPB.java | 17 ++++++++++++++ .../ClientNamenodeProtocolTranslatorPB.java | 13 +++++++++++ .../hdfs/server/namenode/FSDirAttrOp.java | 23 +++++++++++++++++++ .../hdfs/server/namenode/FSNamesystem.java | 19 +++++++++++++++ .../server/namenode/NameNodeRpcServer.java | 6 +++++ .../hadoop/hdfs/TestBlockStoragePolicy.java | 23 +++++++++++++++++++ 10 files changed, 140 insertions(+), 11 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java index 713c23cc58d5d..852899973536a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java @@ -279,6 +279,20 @@ boolean setReplication(String src, short replication) void setStoragePolicy(String src, String policyName) throws IOException; + /** + * Get the storage policy for a file/directory. + * @param path + * Path of an existing file/directory. + * @throws AccessControlException + * If access is denied + * @throws org.apache.hadoop.fs.UnresolvedLinkException + * if src contains a symlink + * @throws java.io.FileNotFoundException + * If file/dir src is not found + */ + @Idempotent + BlockStoragePolicy getStoragePolicy(String path) throws IOException; + /** * Set permissions for an existing file/directory. * diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto index b44c556bbaa68..7d3256887db4b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto @@ -112,6 +112,14 @@ message SetStoragePolicyRequestProto { message SetStoragePolicyResponseProto { // void response } +message GetStoragePolicyRequestProto { + required string path = 1; +} + +message GetStoragePolicyResponseProto { + required BlockStoragePolicyProto storagePolicy = 1; +} + message GetStoragePoliciesRequestProto { // void request } @@ -725,6 +733,8 @@ service ClientNamenodeProtocol { returns(SetReplicationResponseProto); rpc setStoragePolicy(SetStoragePolicyRequestProto) returns(SetStoragePolicyResponseProto); + rpc getStoragePolicy(GetStoragePolicyRequestProto) + returns(GetStoragePolicyResponseProto); rpc getStoragePolicies(GetStoragePoliciesRequestProto) returns(GetStoragePoliciesResponseProto); rpc setPermission(SetPermissionRequestProto) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index f093eeaff3399..40f91f9f9698d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -770,6 +770,9 @@ Release 2.8.0 - UNRELEASED HDFS-6860. BlockStateChange logs are too noisy. (Chang Li and xyao via xyao) + HDFS-8815. DFS getStoragePolicy implementation using single RPC call + (Surendra Singh Lilhore via vinayakumarb) + OPTIMIZATIONS HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index 44713a455fec2..3f4621eb114db 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -1574,21 +1574,22 @@ public void setStoragePolicy(String src, String policyName) } /** + * @param path file/directory name * @return Get the storage policy for specified path */ public BlockStoragePolicy getStoragePolicy(String path) throws IOException { - HdfsFileStatus status = getFileInfo(path); - if (status == null) { - throw new FileNotFoundException("File does not exist: " + path); - } - byte storagePolicyId = status.getStoragePolicy(); - BlockStoragePolicy[] policies = getStoragePolicies(); - for (BlockStoragePolicy policy : policies) { - if (policy.getId() == storagePolicyId) { - return policy; - } + checkOpen(); + TraceScope scope = getPathTraceScope("getStoragePolicy", path); + try { + return namenode.getStoragePolicy(path); + } catch (RemoteException e) { + throw e.unwrapRemoteException(AccessControlException.class, + FileNotFoundException.class, + SafeModeException.class, + UnresolvedPathException.class); + } finally { + scope.close(); } - return null; } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java index 480b3d9bf8b11..8e81fdc14a6b4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java @@ -128,6 +128,8 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesResponseProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesRequestProto; @@ -198,6 +200,7 @@ import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.GetEZForPathRequestProto; import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListEncryptionZonesResponseProto; import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListEncryptionZonesRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto; @@ -1457,6 +1460,20 @@ public SetStoragePolicyResponseProto setStoragePolicy( return VOID_SET_STORAGE_POLICY_RESPONSE; } + @Override + public GetStoragePolicyResponseProto getStoragePolicy( + RpcController controller, GetStoragePolicyRequestProto request) + throws ServiceException { + try { + BlockStoragePolicyProto policy = PBHelper.convert(server + .getStoragePolicy(request.getPath())); + return GetStoragePolicyResponseProto.newBuilder() + .setStoragePolicy(policy).build(); + } catch (IOException e) { + throw new ServiceException(e); + } + } + @Override public GetStoragePoliciesResponseProto getStoragePolicies( RpcController controller, GetStoragePoliciesRequestProto request) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java index 566d54f01da64..d6afa6ed6a426 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java @@ -124,6 +124,7 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesResponseProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesResponseProto; @@ -1484,6 +1485,18 @@ public void setStoragePolicy(String src, String policyName) } } + @Override + public BlockStoragePolicy getStoragePolicy(String path) throws IOException { + GetStoragePolicyRequestProto request = GetStoragePolicyRequestProto + .newBuilder().setPath(path).build(); + try { + return PBHelper.convert(rpcProxy.getStoragePolicy(null, request) + .getStoragePolicy()); + } catch (ServiceException e) { + throw ProtobufHelper.getRemoteException(e); + } + } + @Override public BlockStoragePolicy[] getStoragePolicies() throws IOException { try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java index b322b698e6f4e..d624f84ae289b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java @@ -200,6 +200,29 @@ static BlockStoragePolicy[] getStoragePolicies(BlockManager bm) return bm.getStoragePolicies(); } + static BlockStoragePolicy getStoragePolicy(FSDirectory fsd, BlockManager bm, + String path) throws IOException { + FSPermissionChecker pc = fsd.getPermissionChecker(); + byte[][] pathComponents = FSDirectory + .getPathComponentsForReservedPath(path); + fsd.readLock(); + try { + path = fsd.resolvePath(pc, path, pathComponents); + final INodesInPath iip = fsd.getINodesInPath(path, false); + if (fsd.isPermissionEnabled()) { + fsd.checkPathAccess(pc, iip, FsAction.READ); + } + INode inode = iip.getLastINode(); + if (inode == null) { + throw new FileNotFoundException("File/Directory does not exist: " + + iip.getPath()); + } + return bm.getStoragePolicy(inode.getStoragePolicyID()); + } finally { + fsd.readUnlock(); + } + } + static long getPreferredBlockSize(FSDirectory fsd, String src) throws IOException { FSPermissionChecker pc = fsd.getPermissionChecker(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index a259070ff3213..e3717abbe6c43 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -1956,6 +1956,25 @@ void setStoragePolicy(String src, String policyName) throws IOException { logAuditEvent(true, "setStoragePolicy", src, null, auditStat); } + /** + * Get the storage policy for a file or a directory. + * + * @param src + * file/directory path + * @return storage policy object + */ + BlockStoragePolicy getStoragePolicy(String src) throws IOException { + checkOperation(OperationCategory.READ); + waitForLoadingFSImage(); + readLock(); + try { + checkOperation(OperationCategory.READ); + return FSDirAttrOp.getStoragePolicy(dir, blockManager, src); + } finally { + readUnlock(); + } + } + /** * @return All the existing block storage policies */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java index 52aaabd13e6d5..6b7e8cfa6c3f5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java @@ -690,6 +690,12 @@ public void setStoragePolicy(String src, String policyName) namesystem.setStoragePolicy(src, policyName); } + @Override + public BlockStoragePolicy getStoragePolicy(String path) throws IOException { + checkNNStartup(); + return namesystem.getStoragePolicy(path); + } + @Override public BlockStoragePolicy[] getStoragePolicies() throws IOException { checkNNStartup(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java index 631d9f787c729..689a1d187e29f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java @@ -979,6 +979,29 @@ public void testSetStoragePolicy() throws Exception { } } + @Test + public void testGetStoragePolicy() throws Exception { + final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) + .numDataNodes(REPLICATION).build(); + cluster.waitActive(); + final DistributedFileSystem fs = cluster.getFileSystem(); + try { + final Path dir = new Path("/testGetStoragePolicy"); + final Path fooFile = new Path(dir, "foo"); + DFSTestUtil.createFile(fs, fooFile, FILE_LEN, REPLICATION, 0L); + DFSClient client = new DFSClient(cluster.getNameNode(0) + .getNameNodeAddress(), conf); + client.setStoragePolicy("/testGetStoragePolicy/foo", + HdfsConstants.COLD_STORAGE_POLICY_NAME); + String policyName = client.getStoragePolicy("/testGetStoragePolicy/foo") + .getName(); + Assert.assertEquals("File storage policy should be COLD", + HdfsConstants.COLD_STORAGE_POLICY_NAME, policyName); + } finally { + cluster.shutdown(); + } + } + @Test public void testSetStoragePolicyWithSnapshot() throws Exception { final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) From 58e1e842e659536fabf21759c04e8262e240020d Mon Sep 17 00:00:00 2001 From: Arun Suresh Date: Wed, 5 Aug 2015 23:14:14 -0700 Subject: [PATCH 070/130] YARN-3961. Expose pending, running and reserved containers of a queue in REST api and yarn top (adhoot via asuresh) --- hadoop-yarn-project/CHANGES.txt | 3 ++ .../yarn/api/records/QueueStatistics.java | 36 +++++++++++++++++++ .../src/main/proto/yarn_protos.proto | 3 ++ .../apache/hadoop/yarn/client/cli/TopCLI.java | 24 +++++++++---- .../impl/pb/QueueStatisticsPBImpl.java | 36 +++++++++++++++++++ .../scheduler/capacity/AbstractCSQueue.java | 3 ++ .../dao/CapacitySchedulerQueueInfo.java | 18 ++++++++++ .../webapp/dao/FairSchedulerQueueInfo.java | 24 +++++++++++-- .../TestRMWebServicesCapacitySched.java | 4 +-- 9 files changed, 141 insertions(+), 10 deletions(-) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index 29a7630448e0b..6b409ddd5c639 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -371,6 +371,9 @@ Release 2.8.0 - UNRELEASED YARN-4004. container-executor should print output of docker logs if the docker container exits with non-0 exit status. (Varun Vasudev via xgong) + YARN-3961. Expose pending, running and reserved containers of a queue in REST + api and yarn top (adhoot via asuresh) + OPTIMIZATIONS YARN-3339. TestDockerContainerExecutor should pull a single image and not diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueStatistics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueStatistics.java index a93047e725df8..808766364f679 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueStatistics.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueStatistics.java @@ -262,6 +262,42 @@ public static QueueStatistics newInstance(long submitted, long running, */ public abstract void setPendingVCores(long pendingVCores); + /** + * Get the number of pending containers. + * @return the number of pending containers. + */ + public abstract long getPendingContainers(); + + /** + * Set the number of pending containers. + * @param pendingContainers the pending containers. + */ + public abstract void setPendingContainers(long pendingContainers); + + /** + * Get the number of allocated containers. + * @return the number of allocated containers. + */ + public abstract long getAllocatedContainers(); + + /** + * Set the number of allocated containers. + * @param allocatedContainers the allocated containers. + */ + public abstract void setAllocatedContainers(long allocatedContainers); + + /** + * Get the number of reserved containers. + * @return the number of reserved containers. + */ + public abstract long getReservedContainers(); + + /** + * Set the number of reserved containers. + * @param reservedContainers the reserved containers. + */ + public abstract void setReservedContainers(long reservedContainers); + /** * Get the reserved vcores * diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto index 7bd03975a1a1d..fdacab3edf0fa 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto @@ -384,6 +384,9 @@ message QueueStatisticsProto { optional int64 allocatedVCores = 13; optional int64 pendingVCores = 14; optional int64 reservedVCores = 15; + optional int64 allocatedContainers = 16; + optional int64 pendingContainers = 17; + optional int64 reservedContainers = 18; } message QueueInfoProto { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/TopCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/TopCLI.java index 2b7d7f3ec67ed..423fb3c8733d2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/TopCLI.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/TopCLI.java @@ -71,7 +71,7 @@ public class TopCLI extends YarnCLI { private String SET_CURSOR_HOME = "\u001b[H"; private String CHANGE_BACKGROUND = "\u001b[7m"; private String RESET_BACKGROUND = "\u001b[0m"; - private String SET_CURSOR_LINE_6_COLUMN_0 = "\u001b[6;0f"; + private String SET_CURSOR_LINE_7_COLUMN_0 = "\u001b[7;0f"; // guava cache for getapplications call protected Cache> @@ -331,6 +331,9 @@ private static class QueueMetrics { long allocatedVCores; long pendingVCores; long reservedVCores; + long allocatedContainers; + long reservedContainers; + long pendingContainers; } private class KeyboardMonitor extends Thread { @@ -596,14 +599,14 @@ protected void setTerminalSequences() throws IOException, String[] tput_cursor_home = { "tput", "cup", "0", "0" }; String[] tput_clear = { "tput", "clear" }; String[] tput_clear_line = { "tput", "el" }; - String[] tput_set_cursor_line_6_column_0 = { "tput", "cup", "5", "0" }; + String[] tput_set_cursor_line_7_column_0 = { "tput", "cup", "6", "0" }; String[] tput_change_background = { "tput", "smso" }; String[] tput_reset_background = { "tput", "rmso" }; SET_CURSOR_HOME = getCommandOutput(tput_cursor_home); CLEAR = getCommandOutput(tput_clear); CLEAR_LINE = getCommandOutput(tput_clear_line); - SET_CURSOR_LINE_6_COLUMN_0 = - getCommandOutput(tput_set_cursor_line_6_column_0); + SET_CURSOR_LINE_7_COLUMN_0 = + getCommandOutput(tput_set_cursor_line_7_column_0); CHANGE_BACKGROUND = getCommandOutput(tput_change_background); RESET_BACKGROUND = getCommandOutput(tput_reset_background); } @@ -712,6 +715,9 @@ protected QueueMetrics getQueueMetrics() { queueMetrics.allocatedVCores += stats.getAllocatedVCores(); queueMetrics.pendingVCores += stats.getPendingVCores(); queueMetrics.reservedVCores += stats.getReservedVCores(); + queueMetrics.allocatedContainers += stats.getAllocatedContainers(); + queueMetrics.pendingContainers += stats.getPendingContainers(); + queueMetrics.reservedContainers += stats.getReservedContainers(); } } queueMetrics.availableMemoryGB = queueMetrics.availableMemoryGB / 1024; @@ -793,12 +799,18 @@ String getHeader(QueueMetrics queueMetrics, NodesInformation nodes) { queueMetrics.availableVCores, queueMetrics.allocatedVCores, queueMetrics.pendingVCores, queueMetrics.reservedVCores), terminalWidth, true)); + + ret.append(CLEAR_LINE); + ret.append(limitLineLength(String.format( + "Queue(s) Containers: %d allocated, %d pending, %d reserved%n", + queueMetrics.allocatedContainers, queueMetrics.pendingContainers, + queueMetrics.reservedContainers), terminalWidth, true)); return ret.toString(); } String getPrintableAppInformation(List appsInfo) { StringBuilder ret = new StringBuilder(); - int limit = terminalHeight - 8; + int limit = terminalHeight - 9; List columns = new ArrayList<>(); for (int i = 0; i < limit; ++i) { ret.append(CLEAR_LINE); @@ -944,7 +956,7 @@ protected void showTopScreen() { synchronized (lock) { printHeader(header); printApps(appsStr); - System.out.print(SET_CURSOR_LINE_6_COLUMN_0); + System.out.print(SET_CURSOR_LINE_7_COLUMN_0); System.out.print(CLEAR_LINE); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/QueueStatisticsPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/QueueStatisticsPBImpl.java index 9506a5fa64ed4..ba394dc6adb14 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/QueueStatisticsPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/QueueStatisticsPBImpl.java @@ -254,4 +254,40 @@ public void setReservedVCores(long reservedVCores) { maybeInitBuilder(); builder.setReservedVCores(reservedVCores); } + + @Override + public long getPendingContainers() { + QueueStatisticsProtoOrBuilder p = viaProto ? proto : builder; + return (p.hasPendingContainers()) ? p.getPendingContainers() : -1; + } + + @Override + public void setPendingContainers(long pendingContainers) { + maybeInitBuilder(); + builder.setPendingContainers(pendingContainers); + } + + @Override + public long getAllocatedContainers() { + QueueStatisticsProtoOrBuilder p = viaProto ? proto : builder; + return (p.hasAllocatedContainers()) ? p.getAllocatedContainers() : -1; + } + + @Override + public void setAllocatedContainers(long allocatedContainers) { + maybeInitBuilder(); + builder.setAllocatedContainers(allocatedContainers); + } + + @Override + public long getReservedContainers() { + QueueStatisticsProtoOrBuilder p = viaProto ? proto : builder; + return (p.hasReservedContainers()) ? p.getReservedContainers() : -1; + } + + @Override + public void setReservedContainers(long reservedContainers) { + maybeInitBuilder(); + builder.setReservedContainers(reservedContainers); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java index 134b9414648f2..792c25c332baf 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java @@ -323,6 +323,9 @@ public QueueStatistics getQueueStatistics() { stats.setAllocatedVCores(getMetrics().getAllocatedVirtualCores()); stats.setPendingVCores(getMetrics().getPendingVirtualCores()); stats.setReservedVCores(getMetrics().getReservedVirtualCores()); + stats.setPendingContainers(getMetrics().getPendingContainers()); + stats.setAllocatedContainers(getMetrics().getAllocatedContainers()); + stats.setReservedContainers(getMetrics().getReservedContainers()); return stats; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/CapacitySchedulerQueueInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/CapacitySchedulerQueueInfo.java index 81b28fd5ec995..d85687116f3f7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/CapacitySchedulerQueueInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/CapacitySchedulerQueueInfo.java @@ -57,6 +57,9 @@ public class CapacitySchedulerQueueInfo { protected ResourceInfo resourcesUsed; private boolean hideReservationQueues = false; protected ArrayList nodeLabels = new ArrayList(); + protected long allocatedContainers; + protected long reservedContainers; + protected long pendingContainers; CapacitySchedulerQueueInfo() { }; @@ -81,6 +84,9 @@ public class CapacitySchedulerQueueInfo { absoluteUsedCapacity = cap(qCapacities.getAbsoluteUsedCapacity(nodeLabel), 0f, 1f) * 100; numApplications = q.getNumApplications(); + allocatedContainers = q.getMetrics().getAllocatedContainers(); + pendingContainers = q.getMetrics().getPendingContainers(); + reservedContainers = q.getMetrics().getReservedContainers(); queueName = q.getQueueName(); state = q.getState(); resourcesUsed = new ResourceInfo(queueResourceUsage.getUsed(nodeLabel)); @@ -124,6 +130,18 @@ public int getNumApplications() { return numApplications; } + public long getAllocatedContainers() { + return allocatedContainers; + } + + public long getReservedContainers() { + return reservedContainers; + } + + public long getPendingContainers() { + return pendingContainers; + } + public String getQueueName() { return this.queueName; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FairSchedulerQueueInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FairSchedulerQueueInfo.java index 7ba0988be80e9..ee37f184edacd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FairSchedulerQueueInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FairSchedulerQueueInfo.java @@ -57,7 +57,11 @@ public class FairSchedulerQueueInfo { private ResourceInfo steadyFairResources; private ResourceInfo fairResources; private ResourceInfo clusterResources; - + + private long pendingContainers; + private long allocatedContainers; + private long reservedContainers; + private String queueName; private String schedulingPolicy; @@ -95,6 +99,10 @@ public FairSchedulerQueueInfo(FSQueue queue, FairScheduler scheduler) { maxApps = allocConf.getQueueMaxApps(queueName); + pendingContainers = queue.getMetrics().getPendingContainers(); + allocatedContainers = queue.getMetrics().getAllocatedContainers(); + reservedContainers = queue.getMetrics().getReservedContainers(); + if (allocConf.isReservable(queueName) && !allocConf.getShowReservationAsQueues(queueName)) { return; @@ -103,6 +111,18 @@ public FairSchedulerQueueInfo(FSQueue queue, FairScheduler scheduler) { childQueues = getChildQueues(queue, scheduler); } + public long getPendingContainers() { + return pendingContainers; + } + + public long getAllocatedContainers() { + return allocatedContainers; + } + + public long getReservedContainers() { + return reservedContainers; + } + protected FairSchedulerQueueInfoList getChildQueues(FSQueue queue, FairScheduler scheduler) { // Return null to omit 'childQueues' field from the return value of @@ -172,7 +192,7 @@ public String getQueueName() { public ResourceInfo getUsedResources() { return usedResources; } - + /** * Returns the queue's min share in as a fraction of the entire * cluster capacity. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java index 456595de23412..dad720e74c856 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java @@ -349,10 +349,10 @@ private void verifyClusterSchedulerGeneric(String type, float usedCapacity, private void verifySubQueue(JSONObject info, String q, float parentAbsCapacity, float parentAbsMaxCapacity) throws JSONException, Exception { - int numExpectedElements = 13; + int numExpectedElements = 16; boolean isParentQueue = true; if (!info.has("queues")) { - numExpectedElements = 25; + numExpectedElements = 28; isParentQueue = false; } assertEquals("incorrect number of elements", numExpectedElements, info.length()); From e1c229699667a10c8311e59026c7fcf8c1bec4b0 Mon Sep 17 00:00:00 2001 From: Junping Du Date: Thu, 6 Aug 2015 06:49:45 -0700 Subject: [PATCH 071/130] YARN-4019. Add JvmPauseMonitor to ResourceManager and NodeManager. Contributed by Robert Kanter. --- hadoop-yarn-project/CHANGES.txt | 3 +++ .../hadoop/yarn/server/nodemanager/NodeManager.java | 13 +++++++++++-- .../nodemanager/metrics/NodeManagerMetrics.java | 13 +++++++++++-- .../server/resourcemanager/ResourceManager.java | 11 ++++++++++- 4 files changed, 35 insertions(+), 5 deletions(-) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index 6b409ddd5c639..1840b1bdec3e2 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -374,6 +374,9 @@ Release 2.8.0 - UNRELEASED YARN-3961. Expose pending, running and reserved containers of a queue in REST api and yarn top (adhoot via asuresh) + YARN-4019. Add JvmPauseMonitor to ResourceManager and NodeManager. (Robert Kanter + via junping_du) + OPTIMIZATIONS YARN-3339. TestDockerContainerExecutor should pull a single image and not diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java index b8889eeded8cd..a06293dc8bb6d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java @@ -40,6 +40,7 @@ import org.apache.hadoop.service.CompositeService; import org.apache.hadoop.util.ExitUtil; import org.apache.hadoop.util.GenericOptionsParser; +import org.apache.hadoop.util.JvmPauseMonitor; import org.apache.hadoop.util.NodeHealthScriptRunner; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.ShutdownHookManager; @@ -83,6 +84,7 @@ public class NodeManager extends CompositeService private static final Log LOG = LogFactory.getLog(NodeManager.class); private static long nmStartupTime = System.currentTimeMillis(); protected final NodeManagerMetrics metrics = NodeManagerMetrics.create(); + private JvmPauseMonitor pauseMonitor; private ApplicationACLsManager aclsManager; private NodeHealthCheckerService nodeHealthChecker; private NodeLabelsProvider nodeLabelsProvider; @@ -307,13 +309,16 @@ protected void serviceInit(Configuration conf) throws Exception { dispatcher.register(ContainerManagerEventType.class, containerManager); dispatcher.register(NodeManagerEventType.class, this); addService(dispatcher); - + + pauseMonitor = new JvmPauseMonitor(conf); + metrics.getJvmMetrics().setPauseMonitor(pauseMonitor); + DefaultMetricsSystem.initialize("NodeManager"); // StatusUpdater should be added last so that it get started last // so that we make sure everything is up before registering with RM. addService(nodeStatusUpdater); - + super.serviceInit(conf); // TODO add local dirs to del } @@ -325,6 +330,7 @@ protected void serviceStart() throws Exception { } catch (IOException e) { throw new YarnRuntimeException("Failed NodeManager login", e); } + pauseMonitor.start(); super.serviceStart(); } @@ -336,6 +342,9 @@ protected void serviceStop() throws Exception { try { super.serviceStop(); DefaultMetricsSystem.shutdown(); + if (pauseMonitor != null) { + pauseMonitor.stop(); + } } finally { // YARN-3641: NM's services stop get failed shouldn't block the // release of NMLevelDBStore. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/metrics/NodeManagerMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/metrics/NodeManagerMetrics.java index 400f14bfcc17e..56797d11c8e2c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/metrics/NodeManagerMetrics.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/metrics/NodeManagerMetrics.java @@ -57,17 +57,26 @@ public class NodeManagerMetrics { @Metric("Disk utilization % on good log dirs") MutableGaugeInt goodLogDirsDiskUtilizationPerc; + private JvmMetrics jvmMetrics = null; private long allocatedMB; private long availableMB; + public NodeManagerMetrics(JvmMetrics jvmMetrics) { + this.jvmMetrics = jvmMetrics; + } + public static NodeManagerMetrics create() { return create(DefaultMetricsSystem.instance()); } static NodeManagerMetrics create(MetricsSystem ms) { - JvmMetrics.create("NodeManager", null, ms); - return ms.register(new NodeManagerMetrics()); + JvmMetrics jm = JvmMetrics.create("NodeManager", null, ms); + return ms.register(new NodeManagerMetrics(jm)); + } + + public JvmMetrics getJvmMetrics() { + return jvmMetrics; } // Potential instrumentation interface methods diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java index 1b606b4654a90..817565b572be9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java @@ -39,6 +39,7 @@ import org.apache.hadoop.service.Service; import org.apache.hadoop.util.ExitUtil; import org.apache.hadoop.util.GenericOptionsParser; +import org.apache.hadoop.util.JvmPauseMonitor; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.ShutdownHookManager; import org.apache.hadoop.util.StringUtils; @@ -157,6 +158,7 @@ public class ResourceManager extends CompositeService implements Recoverable { private WebApp webApp; private AppReportFetcher fetcher = null; protected ResourceTrackerService resourceTracker; + private JvmPauseMonitor pauseMonitor; @VisibleForTesting protected String webAppAddress; @@ -511,7 +513,9 @@ protected void serviceInit(Configuration configuration) throws Exception { rmContext.setResourceTrackerService(resourceTracker); DefaultMetricsSystem.initialize("ResourceManager"); - JvmMetrics.initSingleton("ResourceManager", null); + JvmMetrics jm = JvmMetrics.initSingleton("ResourceManager", null); + pauseMonitor = new JvmPauseMonitor(conf); + jm.setPauseMonitor(pauseMonitor); // Initialize the Reservation system if (conf.getBoolean(YarnConfiguration.RM_RESERVATION_SYSTEM_ENABLE, @@ -566,6 +570,8 @@ protected void serviceStart() throws Exception { // need events to move to further states. rmStore.start(); + pauseMonitor.start(); + if(recoveryEnabled) { try { LOG.info("Recovery started"); @@ -591,6 +597,9 @@ protected void serviceStart() throws Exception { protected void serviceStop() throws Exception { DefaultMetricsSystem.shutdown(); + if (pauseMonitor != null) { + pauseMonitor.stop(); + } if (rmContext != null) { RMStateStore store = rmContext.getStateStore(); From 5f6168ba389b861de405c5729c914d6a24e339db Mon Sep 17 00:00:00 2001 From: Junping Du Date: Thu, 6 Aug 2015 07:00:42 -0700 Subject: [PATCH 072/130] MAPREDUCE-6443. Add JvmPauseMonitor to JobHistoryServer. Contributed by Robert Kanter. (cherry picked from commit c8469357bad481ca8e341050553a5ae6d98bf8e5) Conflicts: hadoop-mapreduce-project/CHANGES.txt --- hadoop-mapreduce-project/CHANGES.txt | 3 +++ .../hadoop/mapreduce/v2/hs/JobHistoryServer.java | 14 ++++++++++++-- 2 files changed, 15 insertions(+), 2 deletions(-) diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index 2001c5764d9c0..e910cddf199dd 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -373,6 +373,9 @@ Release 2.8.0 - UNRELEASED MAPREDUCE-6384. Add the last reporting reducer info for too many fetch failure diagnostics (Chang Li via jlowe) + MAPREDUCE-6443. Add JvmPauseMonitor to JobHistoryServer. (Robert Kanter + via junping_du) + OPTIMIZATIONS MAPREDUCE-6376. Add avro binary support for jhist files (Ray Chiang via diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JobHistoryServer.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JobHistoryServer.java index 252ac55d4d3c8..b5ac91a9376fb 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JobHistoryServer.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JobHistoryServer.java @@ -38,6 +38,7 @@ import org.apache.hadoop.service.CompositeService; import org.apache.hadoop.util.ExitUtil; import org.apache.hadoop.util.GenericOptionsParser; +import org.apache.hadoop.util.JvmPauseMonitor; import org.apache.hadoop.util.ShutdownHookManager; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.yarn.YarnUncaughtExceptionHandler; @@ -70,6 +71,7 @@ public class JobHistoryServer extends CompositeService { private AggregatedLogDeletionService aggLogDelService; private HSAdminServer hsAdminServer; private HistoryServerStateStoreService stateStore; + private JvmPauseMonitor pauseMonitor; // utility class to start and stop secret manager as part of service // framework and implement state recovery for secret manager on startup @@ -140,6 +142,12 @@ protected void serviceInit(Configuration conf) throws Exception { addService(clientService); addService(aggLogDelService); addService(hsAdminServer); + + DefaultMetricsSystem.initialize("JobHistoryServer"); + JvmMetrics jm = JvmMetrics.initSingleton("JobHistoryServer", null); + pauseMonitor = new JvmPauseMonitor(getConfig()); + jm.setPauseMonitor(pauseMonitor); + super.serviceInit(config); } @@ -190,14 +198,16 @@ public static InetSocketAddress getBindAddress(Configuration conf) { @Override protected void serviceStart() throws Exception { - DefaultMetricsSystem.initialize("JobHistoryServer"); - JvmMetrics.initSingleton("JobHistoryServer", null); + pauseMonitor.start(); super.serviceStart(); } @Override protected void serviceStop() throws Exception { DefaultMetricsSystem.shutdown(); + if (pauseMonitor != null) { + pauseMonitor.stop(); + } super.serviceStop(); } From 87352924a4203af3eea824c9cd6d0269af2af8e1 Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Thu, 6 Aug 2015 10:13:18 -0700 Subject: [PATCH 073/130] MAPREDUCE-6257. Document encrypted spills (Bibin A Chundatt via aw) --- hadoop-mapreduce-project/CHANGES.txt | 2 ++ .../src/main/resources/mapred-default.xml | 20 +++++++++++++++++++ .../src/site/markdown/EncryptedShuffle.md | 8 +++++++- 3 files changed, 29 insertions(+), 1 deletion(-) diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index e910cddf199dd..c424667132a16 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -206,6 +206,8 @@ Trunk (Unreleased) MAPREDUCE-6406. Update FileOutputCommitter.FILEOUTPUTCOMMITTER_ALGORITHM_VERSION_DEFAULT to match mapred-default.xml. (Ray Chiang via devaraj) + MAPREDUCE-6257. Document encrypted spills (Bibin A Chundatt via aw) + BREAKDOWN OF MAPREDUCE-2841 (NATIVE TASK) SUBTASKS MAPREDUCE-5985. native-task: Fix build on macosx. Contributed by diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml index ddcd2dfdee0ff..6d205c58618e6 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml @@ -1760,4 +1760,24 @@ + + mapreduce.job.encrypted-intermediate-data + false + Encrypt intermediate MapReduce spill files or not + default is false + + + + mapreduce.job.encrypted-intermediate-data-key-size-bits + 128 + Mapreduce encrypt data key size default is 128 + + + + mapreduce.job.encrypted-intermediate-data.buffer.kb + 128 + Buffer size for intermediate encrypt data in kb + default is 128 + + diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/EncryptedShuffle.md b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/EncryptedShuffle.md index c23be7a992907..fddd84f82362f 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/EncryptedShuffle.md +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/EncryptedShuffle.md @@ -260,4 +260,10 @@ Encrypted Intermediate Data Spill files This capability allows encryption of the intermediate files generated during the merge and shuffle phases. It can be enabled by setting the `mapreduce.job.encrypted-intermediate-data` job property to `true`. -**NOTE:** Currently, enabling encrypted intermediate data spills would restrict the number of attempts of the job to 1. \ No newline at end of file +| Name | Type | Description | +|:---- |:---- |:---- | +| mapreduce.job.encrypted-intermediate-data | boolean | Enable or disable encrypt intermediate mapreduce spill files.Default is false. | +| mapreduce.job.encrypted-intermediate-data-key-size-bits | int | The key length used by keygenerator to encrypt data spilled to disk. | +| mapreduce.job.encrypted-intermediate-data.buffer.kb | int | The buffer size in kb for stream written to disk after encryption. | + +**NOTE:** Currently, enabling encrypted intermediate data spills would restrict the number of attempts of the job to 1. From 2d07300a4f72d97115e2d59d809e99aabbb350f0 Mon Sep 17 00:00:00 2001 From: Jing Zhao Date: Wed, 5 Aug 2015 17:41:54 -0700 Subject: [PATCH 074/130] Revert "HDFS-8623. Refactor NameNode handling of invalid, corrupt, and under-recovery blocks. Contributed by Zhe Zhang." This reverts commit de480d6c8945bd8b5b00e8657b7a72ce8dd9b6b5. --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 - .../server/blockmanagement/BlockInfo.java | 12 +- .../blockmanagement/BlockInfoContiguous.java | 2 +- .../BlockInfoUnderConstructionContiguous.java | 2 +- .../server/blockmanagement/BlockManager.java | 576 ++++++++---------- .../blockmanagement/DatanodeStorageInfo.java | 15 +- .../hdfs/server/namenode/FSNamesystem.java | 18 +- .../hdfs/server/namenode/NamenodeFsck.java | 2 +- .../server/blockmanagement/TestBlockInfo.java | 2 +- .../blockmanagement/TestBlockManager.java | 4 +- .../server/blockmanagement/TestNodeCount.java | 2 +- .../TestOverReplicatedBlocks.java | 4 +- .../TestReplicationPolicy.java | 2 +- 13 files changed, 284 insertions(+), 360 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 40f91f9f9698d..59623850f561e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -689,9 +689,6 @@ Release 2.8.0 - UNRELEASED HDFS-8651. Make hadoop-hdfs-project Native code -Wall-clean (Alan Burlison via Colin P. McCabe) - HDFS-8623. Refactor NameNode handling of invalid, corrupt, and under-recovery - blocks. (Zhe Zhang via jing9) - HDFS-8653. Code cleanup for DatanodeManager, DatanodeDescriptor and DatanodeStorageInfo. (Zhe Zhang via wang) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java index 5ad992b5c729c..4cc2791e75456 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java @@ -172,23 +172,19 @@ public int getCapacity() { public abstract int numNodes(); /** - * Add a {@link DatanodeStorageInfo} location for a block - * @param storage The storage to add - * @param reportedBlock The block reported from the datanode. This is only - * used by erasure coded blocks, this block's id contains - * information indicating the index of the block in the - * corresponding block group. + * Add a {@link DatanodeStorageInfo} location for a block. */ - abstract boolean addStorage(DatanodeStorageInfo storage, Block reportedBlock); + abstract boolean addStorage(DatanodeStorageInfo storage); /** * Remove {@link DatanodeStorageInfo} location for a block */ abstract boolean removeStorage(DatanodeStorageInfo storage); + /** * Replace the current BlockInfo with the new one in corresponding - * DatanodeStorageInfo's linked list. + * DatanodeStorageInfo's linked list */ abstract void replaceBlock(BlockInfo newBlock); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java index de64ad84b86b8..b9abcd03f2960 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java @@ -45,7 +45,7 @@ protected BlockInfoContiguous(BlockInfo from) { } @Override - boolean addStorage(DatanodeStorageInfo storage, Block reportedBlock) { + boolean addStorage(DatanodeStorageInfo storage) { return ContiguousBlockStorageOp.addStorage(this, storage); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstructionContiguous.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstructionContiguous.java index d3cb337b1214e..c66675a29a4bf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstructionContiguous.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstructionContiguous.java @@ -69,7 +69,7 @@ public BlockInfoContiguous convertToCompleteBlock() { } @Override - boolean addStorage(DatanodeStorageInfo storage, Block reportedBlock) { + boolean addStorage(DatanodeStorageInfo storage) { return ContiguousBlockStorageOp.addStorage(this, storage); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index 3ffd1bf2659ed..1597f419ff98e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -203,8 +203,8 @@ public int getPendingDataNodeMessageCount() { * Maps a StorageID to the set of blocks that are "extra" for this * DataNode. We'll eventually remove these extras. */ - public final Map> excessReplicateMap = - new TreeMap<>(); + public final Map> excessReplicateMap = + new TreeMap>(); /** * Store set of Blocks that need to be replicated 1 or more times. @@ -508,8 +508,8 @@ public void setBlockPlacementPolicy(BlockPlacementPolicy newpolicy) { /** Dump meta data to out. */ public void metaSave(PrintWriter out) { assert namesystem.hasWriteLock(); - final List live = new ArrayList<>(); - final List dead = new ArrayList<>(); + final List live = new ArrayList(); + final List dead = new ArrayList(); datanodeManager.fetchDatanodes(live, dead, false); out.println("Live Datanodes: " + live.size()); out.println("Dead Datanodes: " + dead.size()); @@ -548,8 +548,8 @@ private void dumpBlockMeta(Block block, PrintWriter out) { List containingNodes = new ArrayList(); List containingLiveReplicasNodes = - new ArrayList<>(); - + new ArrayList(); + NumberReplicas numReplicas = new NumberReplicas(); // source node returned is not used chooseSourceDatanode(block, containingNodes, @@ -578,7 +578,7 @@ private void dumpBlockMeta(Block block, PrintWriter out) { Collection corruptNodes = corruptReplicas.getNodes(block); - for (DatanodeStorageInfo storage : getStorages(block)) { + for (DatanodeStorageInfo storage : blocksMap.getStorages(block)) { final DatanodeDescriptor node = storage.getDatanodeDescriptor(); String state = ""; if (corruptNodes != null && corruptNodes.contains(node)) { @@ -601,23 +601,11 @@ public int getMaxReplicationStreams() { return maxReplicationStreams; } - public int getDefaultStorageNum(BlockInfo block) { - return defaultReplication; - } - - public short getMinStorageNum(BlockInfo block) { - return minReplication; - } - /** - * @return true if the block has minimum stored copies + * @return true if the block has minimum replicas */ - public boolean hasMinStorage(BlockInfo block) { - return hasMinStorage(block, countNodes(block).liveReplicas()); - } - - public boolean hasMinStorage(BlockInfo block, int liveNum) { - return liveNum >= getMinStorageNum(block); + public boolean checkMinReplication(BlockInfo block) { + return (countNodes(block).liveReplicas() >= minReplication); } /** @@ -632,9 +620,8 @@ public boolean hasMinStorage(BlockInfo block, int liveNum) { private static boolean commitBlock( final BlockInfoUnderConstruction block, final Block commitBlock) throws IOException { - if (block.getBlockUCState() == BlockUCState.COMMITTED) { + if (block.getBlockUCState() == BlockUCState.COMMITTED) return false; - } assert block.getNumBytes() <= commitBlock.getNumBytes() : "commitBlock length is less than the stored one " + commitBlock.getNumBytes() + " vs. " + block.getNumBytes(); @@ -654,22 +641,18 @@ private static boolean commitBlock( */ public boolean commitOrCompleteLastBlock(BlockCollection bc, Block commitBlock) throws IOException { - if (commitBlock == null) { + if(commitBlock == null) return false; // not committing, this is a block allocation retry - } BlockInfo lastBlock = bc.getLastBlock(); - if (lastBlock == null) { + if(lastBlock == null) return false; // no blocks in file yet - } - if (lastBlock.isComplete()) { + if(lastBlock.isComplete()) return false; // already completed (e.g. by syncBlock) - } - + final boolean b = commitBlock( (BlockInfoUnderConstruction) lastBlock, commitBlock); - if(hasMinStorage(lastBlock)) { + if(countNodes(lastBlock).liveReplicas() >= minReplication) completeBlock(bc, bc.numBlocks()-1, false); - } return b; } @@ -682,24 +665,20 @@ public boolean commitOrCompleteLastBlock(BlockCollection bc, */ private BlockInfo completeBlock(final BlockCollection bc, final int blkIndex, boolean force) throws IOException { - if (blkIndex < 0) { + if(blkIndex < 0) return null; - } BlockInfo curBlock = bc.getBlocks()[blkIndex]; - if(curBlock.isComplete()) { + if(curBlock.isComplete()) return curBlock; - } BlockInfoUnderConstruction ucBlock = (BlockInfoUnderConstruction) curBlock; int numNodes = ucBlock.numNodes(); - if (!force && !hasMinStorage(curBlock, numNodes)) { + if (!force && numNodes < minReplication) throw new IOException("Cannot complete block: " + "block does not satisfy minimal replication requirement."); - } - if(!force && ucBlock.getBlockUCState() != BlockUCState.COMMITTED) { + if(!force && ucBlock.getBlockUCState() != BlockUCState.COMMITTED) throw new IOException( "Cannot complete block: block has not been COMMITTED by the client"); - } BlockInfo completeBlock = ucBlock.convertToCompleteBlock(); // replace penultimate block in file bc.setBlock(blkIndex, completeBlock); @@ -784,7 +763,7 @@ public LocatedBlock convertLastBlockToUnderConstruction( // count in safe-mode. namesystem.adjustSafeModeBlockTotals( // decrement safe if we had enough - hasMinStorage(oldBlock, targets.length) ? -1 : 0, + targets.length >= minReplication ? -1 : 0, // always decrement total blocks -1); @@ -798,8 +777,8 @@ public LocatedBlock convertLastBlockToUnderConstruction( */ private List getValidLocations(Block block) { final List locations - = new ArrayList<>(blocksMap.numNodes(block)); - for(DatanodeStorageInfo storage : getStorages(block)) { + = new ArrayList(blocksMap.numNodes(block)); + for(DatanodeStorageInfo storage : blocksMap.getStorages(block)) { // filter invalidate replicas if(!invalidateBlocks.contains(storage.getDatanodeDescriptor(), block)) { locations.add(storage); @@ -812,7 +791,7 @@ private List createLocatedBlockList( final BlockInfo[] blocks, final long offset, final long length, final int nrBlocksToReturn, final AccessMode mode) throws IOException { - int curBlk; + int curBlk = 0; long curPos = 0, blkSize = 0; int nrBlocks = (blocks[0].getNumBytes() == 0) ? 0 : blocks.length; for (curBlk = 0; curBlk < nrBlocks; curBlk++) { @@ -825,10 +804,10 @@ private List createLocatedBlockList( } if (nrBlocks > 0 && curBlk == nrBlocks) // offset >= end of file - return Collections.emptyList(); + return Collections.emptyList(); long endOff = offset + length; - List results = new ArrayList<>(blocks.length); + List results = new ArrayList(blocks.length); do { results.add(createLocatedBlock(blocks[curBlk], curPos, mode)); curPos += blocks[curBlk].getNumBytes(); @@ -841,7 +820,7 @@ private List createLocatedBlockList( private LocatedBlock createLocatedBlock(final BlockInfo[] blocks, final long endPos, final AccessMode mode) throws IOException { - int curBlk; + int curBlk = 0; long curPos = 0; int nrBlocks = (blocks[0].getNumBytes() == 0) ? 0 : blocks.length; for (curBlk = 0; curBlk < nrBlocks; curBlk++) { @@ -865,8 +844,8 @@ private LocatedBlock createLocatedBlock(final BlockInfo blk, final long pos, } /** @return a LocatedBlock for the given block */ - private LocatedBlock createLocatedBlock(final BlockInfo blk, final long pos) - throws IOException { + private LocatedBlock createLocatedBlock(final BlockInfo blk, final long pos + ) throws IOException { if (blk instanceof BlockInfoUnderConstruction) { if (blk.isComplete()) { throw new IOException( @@ -876,8 +855,7 @@ private LocatedBlock createLocatedBlock(final BlockInfo blk, final long pos) final BlockInfoUnderConstruction uc = (BlockInfoUnderConstruction) blk; final DatanodeStorageInfo[] storages = uc.getExpectedStorageLocations(); - final ExtendedBlock eb = - new ExtendedBlock(namesystem.getBlockPoolId(), blk); + final ExtendedBlock eb = new ExtendedBlock(namesystem.getBlockPoolId(), blk); return newLocatedBlock(eb, storages, pos, false); } @@ -897,12 +875,11 @@ private LocatedBlock createLocatedBlock(final BlockInfo blk, final long pos) final DatanodeStorageInfo[] machines = new DatanodeStorageInfo[numMachines]; int j = 0; if (numMachines > 0) { - for(DatanodeStorageInfo storage : getStorages(blk)) { + for(DatanodeStorageInfo storage : blocksMap.getStorages(blk)) { final DatanodeDescriptor d = storage.getDatanodeDescriptor(); final boolean replicaCorrupt = corruptReplicas.isReplicaCorrupt(blk, d); - if (isCorrupt || (!replicaCorrupt)) { + if (isCorrupt || (!replicaCorrupt)) machines[j++] = storage; - } } } assert j == machines.length : @@ -1076,7 +1053,7 @@ private BlocksWithLocations getBlocksWithLocations(final DatanodeID datanode, for(int i=0; i results = new ArrayList<>(); + List results = new ArrayList(); long totalSize = 0; BlockInfo curBlock; while(totalSize it = node.getBlockIterator(); + final Iterator it = node.getBlockIterator(); while(it.hasNext()) { removeStoredBlock(it.next(), node); } @@ -1114,10 +1091,10 @@ void removeBlocksAssociatedTo(final DatanodeDescriptor node) { /** Remove the blocks associated to the given DatanodeStorageInfo. */ void removeBlocksAssociatedTo(final DatanodeStorageInfo storageInfo) { assert namesystem.hasWriteLock(); - final Iterator it = storageInfo.getBlockIterator(); + final Iterator it = storageInfo.getBlockIterator(); DatanodeDescriptor node = storageInfo.getDatanodeDescriptor(); while(it.hasNext()) { - BlockInfo block = it.next(); + Block block = it.next(); removeStoredBlock(block, node); invalidateBlocks.remove(node, block); } @@ -1139,19 +1116,18 @@ void addToInvalidates(final Block block, final DatanodeInfo datanode) { * Adds block to list of blocks which will be invalidated on all its * datanodes. */ - private void addToInvalidates(BlockInfo storedBlock) { + private void addToInvalidates(Block b) { if (!namesystem.isPopulatingReplQueues()) { return; } StringBuilder datanodes = new StringBuilder(); - for(DatanodeStorageInfo storage : blocksMap.getStorages(storedBlock, - State.NORMAL)) { + for(DatanodeStorageInfo storage : blocksMap.getStorages(b, State.NORMAL)) { final DatanodeDescriptor node = storage.getDatanodeDescriptor(); - invalidateBlocks.add(storedBlock, node, false); + invalidateBlocks.add(b, node, false); datanodes.append(node).append(" "); } if (datanodes.length() != 0) { - blockLog.debug("BLOCK* addToInvalidates: {} {}", storedBlock, + blockLog.debug("BLOCK* addToInvalidates: {} {}", b, datanodes.toString()); } } @@ -1179,8 +1155,7 @@ void removeFromInvalidates(final DatanodeInfo datanode) { public void findAndMarkBlockAsCorrupt(final ExtendedBlock blk, final DatanodeInfo dn, String storageID, String reason) throws IOException { assert namesystem.hasWriteLock(); - final Block reportedBlock = blk.getLocalBlock(); - final BlockInfo storedBlock = getStoredBlock(reportedBlock); + final BlockInfo storedBlock = getStoredBlock(blk.getLocalBlock()); if (storedBlock == null) { // Check if the replica is in the blockMap, if not // ignore the request for now. This could happen when BlockScanner @@ -1196,8 +1171,8 @@ public void findAndMarkBlockAsCorrupt(final ExtendedBlock blk, + " as corrupt because datanode " + dn + " (" + dn.getDatanodeUuid() + ") does not exist"); } - - markBlockAsCorrupt(new BlockToMarkCorrupt(reportedBlock, storedBlock, + + markBlockAsCorrupt(new BlockToMarkCorrupt(storedBlock, blk.getGenerationStamp(), reason, Reason.CORRUPTION_REPORTED), storageID == null ? null : node.getStorageInfo(storageID), node); @@ -1213,18 +1188,18 @@ private void markBlockAsCorrupt(BlockToMarkCorrupt b, DatanodeStorageInfo storageInfo, DatanodeDescriptor node) throws IOException { - if (b.stored.isDeleted()) { + if (b.corrupted.isDeleted()) { blockLog.debug("BLOCK markBlockAsCorrupt: {} cannot be marked as" + " corrupt as it does not belong to any file", b); addToInvalidates(b.corrupted, node); return; } short expectedReplicas = - getExpectedReplicaNum(b.stored.getBlockCollection(), b.stored); + b.corrupted.getBlockCollection().getPreferredBlockReplication(); // Add replica to the data-node if it is not already there if (storageInfo != null) { - storageInfo.addBlock(b.stored, b.corrupted); + storageInfo.addBlock(b.stored); } // Add this replica to corruptReplicas Map @@ -1234,8 +1209,8 @@ private void markBlockAsCorrupt(BlockToMarkCorrupt b, NumberReplicas numberOfReplicas = countNodes(b.stored); boolean hasEnoughLiveReplicas = numberOfReplicas.liveReplicas() >= expectedReplicas; - boolean minReplicationSatisfied = hasMinStorage(b.stored, - numberOfReplicas.liveReplicas()); + boolean minReplicationSatisfied = + numberOfReplicas.liveReplicas() >= minReplication; boolean hasMoreCorruptReplicas = minReplicationSatisfied && (numberOfReplicas.liveReplicas() + numberOfReplicas.corruptReplicas()) > expectedReplicas; @@ -1378,7 +1353,7 @@ int computeReplicationWorkForBlocks(List> blocksToReplicate) { int additionalReplRequired; int scheduledWork = 0; - List work = new LinkedList<>(); + List work = new LinkedList(); namesystem.writeLock(); try { @@ -1395,11 +1370,11 @@ int computeReplicationWorkForBlocks(List> blocksToReplicate) { continue; } - requiredReplication = getExpectedReplicaNum(bc, block); + requiredReplication = bc.getPreferredBlockReplication(); // get a source data-node - containingNodes = new ArrayList<>(); - List liveReplicaNodes = new ArrayList<>(); + containingNodes = new ArrayList(); + List liveReplicaNodes = new ArrayList(); NumberReplicas numReplicas = new NumberReplicas(); srcNode = chooseSourceDatanode( block, containingNodes, liveReplicaNodes, numReplicas, @@ -1419,7 +1394,7 @@ int computeReplicationWorkForBlocks(List> blocksToReplicate) { if (numEffectiveReplicas >= requiredReplication) { if ( (pendingReplications.getNumReplicas(block) > 0) || - (blockHasEnoughRacks(block, requiredReplication)) ) { + (blockHasEnoughRacks(block)) ) { neededReplications.remove(block, priority); // remove from neededReplications blockLog.debug("BLOCK* Removing {} from neededReplications as" + " it has enough replicas", block); @@ -1443,7 +1418,7 @@ int computeReplicationWorkForBlocks(List> blocksToReplicate) { namesystem.writeUnlock(); } - final Set excludedNodes = new HashSet<>(); + final Set excludedNodes = new HashSet(); for(ReplicationWork rw : work){ // Exclude all of the containing nodes from being targets. // This list includes decommissioning or corrupt nodes. @@ -1479,7 +1454,7 @@ int computeReplicationWorkForBlocks(List> blocksToReplicate) { rw.targets = null; continue; } - requiredReplication = getExpectedReplicaNum(bc, block); + requiredReplication = bc.getPreferredBlockReplication(); // do not schedule more if enough replicas is already pending NumberReplicas numReplicas = countNodes(block); @@ -1488,7 +1463,7 @@ int computeReplicationWorkForBlocks(List> blocksToReplicate) { if (numEffectiveReplicas >= requiredReplication) { if ( (pendingReplications.getNumReplicas(block) > 0) || - (blockHasEnoughRacks(block, requiredReplication)) ) { + (blockHasEnoughRacks(block)) ) { neededReplications.remove(block, priority); // remove from neededReplications rw.targets = null; blockLog.debug("BLOCK* Removing {} from neededReplications as" + @@ -1498,7 +1473,7 @@ int computeReplicationWorkForBlocks(List> blocksToReplicate) { } if ( (numReplicas.liveReplicas() >= requiredReplication) && - (!blockHasEnoughRacks(block, requiredReplication)) ) { + (!blockHasEnoughRacks(block)) ) { if (rw.srcNode.getNetworkLocation().equals( targets[0].getDatanodeDescriptor().getNetworkLocation())) { //No use continuing, unless a new rack in this case @@ -1613,7 +1588,7 @@ public DatanodeStorageInfo[] chooseTarget4NewBlock(final String src, List getDatanodeDescriptors(List nodes) { List datanodeDescriptors = null; if (nodes != null) { - datanodeDescriptors = new ArrayList<>(nodes.size()); + datanodeDescriptors = new ArrayList(nodes.size()); for (int i = 0; i < nodes.size(); i++) { DatanodeDescriptor node = datanodeManager.getDatanodeDescriptor(nodes.get(i)); if (node != null) { @@ -1669,9 +1644,9 @@ DatanodeDescriptor chooseSourceDatanode(Block block, int excess = 0; Collection nodesCorrupt = corruptReplicas.getNodes(block); - for(DatanodeStorageInfo storage : getStorages(block)) { + for(DatanodeStorageInfo storage : blocksMap.getStorages(block)) { final DatanodeDescriptor node = storage.getDatanodeDescriptor(); - LightWeightLinkedSet excessBlocks = + LightWeightLinkedSet excessBlocks = excessReplicateMap.get(node.getDatanodeUuid()); int countableReplica = storage.getState() == State.NORMAL ? 1 : 0; if ((nodesCorrupt != null) && (nodesCorrupt.contains(node))) @@ -1739,7 +1714,7 @@ private void processPendingReplications() { * Use the blockinfo from the blocksmap to be certain we're working * with the most up-to-date block information (e.g. genstamp). */ - BlockInfo bi = getStoredBlock(timedOutItems[i]); + BlockInfo bi = blocksMap.getStoredBlock(timedOutItems[i]); if (bi == null) { continue; } @@ -1789,7 +1764,7 @@ static class StatefulBlockInfo { final BlockInfoUnderConstruction storedBlock; final Block reportedBlock; final ReplicaState reportedState; - + StatefulBlockInfo(BlockInfoUnderConstruction storedBlock, Block reportedBlock, ReplicaState reportedState) { this.storedBlock = storedBlock; @@ -1797,34 +1772,14 @@ static class StatefulBlockInfo { this.reportedState = reportedState; } } - - private static class BlockInfoToAdd { - private final BlockInfo stored; - private final Block reported; - - BlockInfoToAdd(BlockInfo stored, Block reported) { - this.stored = stored; - this.reported = reported; - } - - public BlockInfo getStored() { - return stored; - } - - public Block getReported() { - return reported; - } - } - + /** * BlockToMarkCorrupt is used to build the "toCorrupt" list, which is a * list of blocks that should be considered corrupt due to a block report. */ private static class BlockToMarkCorrupt { - /** The corrupted block in a datanode. This is the one reported by the - * datanode. - */ - final Block corrupted; + /** The corrupted block in a datanode. */ + final BlockInfo corrupted; /** The corresponding block stored in the BlockManager. */ final BlockInfo stored; /** The reason to mark corrupt. */ @@ -1832,7 +1787,7 @@ private static class BlockToMarkCorrupt { /** The reason code to be stored */ final Reason reasonCode; - BlockToMarkCorrupt(Block corrupted, + BlockToMarkCorrupt(BlockInfo corrupted, BlockInfo stored, String reason, Reason reasonCode) { Preconditions.checkNotNull(corrupted, "corrupted is null"); @@ -1844,9 +1799,15 @@ private static class BlockToMarkCorrupt { this.reasonCode = reasonCode; } - BlockToMarkCorrupt(Block corrupted, BlockInfo stored, long gs, - String reason, Reason reasonCode) { - this(corrupted, stored, reason, reasonCode); + BlockToMarkCorrupt(BlockInfo stored, String reason, + Reason reasonCode) { + this(stored, stored, reason, reasonCode); + } + + BlockToMarkCorrupt(BlockInfo stored, long gs, String reason, + Reason reasonCode) { + this(new BlockInfoContiguous(stored), stored, + reason, reasonCode); //the corrupted block in datanode has a different generation stamp corrupted.setGenerationStamp(gs); } @@ -2033,7 +1994,7 @@ void rescanPostponedMisreplicatedBlocks() { break; } - BlockInfo bi = getStoredBlock(b); + BlockInfo bi = blocksMap.getStoredBlock(b); if (bi == null) { if (LOG.isDebugEnabled()) { LOG.debug("BLOCK* rescanPostponedMisreplicatedBlocks: " + @@ -2065,7 +2026,7 @@ void rescanPostponedMisreplicatedBlocks() { endPostponedMisReplicatedBlocksCount) + " blocks are removed."); } } - + private Collection processReport( final DatanodeStorageInfo storageInfo, final BlockListAsLongs report) throws IOException { @@ -2073,26 +2034,25 @@ private Collection processReport( // Modify the (block-->datanode) map, according to the difference // between the old and new block report. // - Collection toAdd = new LinkedList<>(); - Collection toRemove = new TreeSet<>(); - Collection toInvalidate = new LinkedList<>(); - Collection toCorrupt = new LinkedList<>(); - Collection toUC = new LinkedList<>(); + Collection toAdd = new LinkedList(); + Collection toRemove = new TreeSet(); + Collection toInvalidate = new LinkedList(); + Collection toCorrupt = new LinkedList(); + Collection toUC = new LinkedList(); reportDiff(storageInfo, report, toAdd, toRemove, toInvalidate, toCorrupt, toUC); - + DatanodeDescriptor node = storageInfo.getDatanodeDescriptor(); // Process the blocks on each queue - for (StatefulBlockInfo b : toUC) { + for (StatefulBlockInfo b : toUC) { addStoredBlockUnderConstruction(b, storageInfo); } - for (BlockInfo b : toRemove) { + for (Block b : toRemove) { removeStoredBlock(b, node); } int numBlocksLogged = 0; - for (BlockInfoToAdd b : toAdd) { - addStoredBlock(b.getStored(), b.getReported(), storageInfo, null, - numBlocksLogged < maxNumBlocksToLog); + for (BlockInfo b : toAdd) { + addStoredBlock(b, storageInfo, null, numBlocksLogged < maxNumBlocksToLog); numBlocksLogged++; } if (numBlocksLogged > maxNumBlocksToLog) { @@ -2113,17 +2073,17 @@ private Collection processReport( * Mark block replicas as corrupt except those on the storages in * newStorages list. */ - public void markBlockReplicasAsCorrupt(Block oldBlock, BlockInfo block, - long oldGenerationStamp, long oldNumBytes, + public void markBlockReplicasAsCorrupt(BlockInfo block, + long oldGenerationStamp, long oldNumBytes, DatanodeStorageInfo[] newStorages) throws IOException { assert namesystem.hasWriteLock(); BlockToMarkCorrupt b = null; if (block.getGenerationStamp() != oldGenerationStamp) { - b = new BlockToMarkCorrupt(oldBlock, block, oldGenerationStamp, + b = new BlockToMarkCorrupt(block, oldGenerationStamp, "genstamp does not match " + oldGenerationStamp + " : " + block.getGenerationStamp(), Reason.GENSTAMP_MISMATCH); } else if (block.getNumBytes() != oldNumBytes) { - b = new BlockToMarkCorrupt(oldBlock, block, + b = new BlockToMarkCorrupt(block, "length does not match " + oldNumBytes + " : " + block.getNumBytes(), Reason.SIZE_MISMATCH); } else { @@ -2182,7 +2142,7 @@ private void processFirstBlockReport( continue; } - BlockInfo storedBlock = getStoredBlock(iblk); + BlockInfo storedBlock = blocksMap.getStoredBlock(iblk); // If block does not belong to any file, we are done. if (storedBlock == null) continue; @@ -2220,26 +2180,24 @@ private void processFirstBlockReport( } //add replica if appropriate if (reportedState == ReplicaState.FINALIZED) { - addStoredBlockImmediate(storedBlock, iblk, storageInfo); + addStoredBlockImmediate(storedBlock, storageInfo); } } } - private void reportDiff(DatanodeStorageInfo storageInfo, - BlockListAsLongs newReport, - Collection toAdd, // add to DatanodeDescriptor - Collection toRemove, // remove from DatanodeDescriptor + private void reportDiff(DatanodeStorageInfo storageInfo, + BlockListAsLongs newReport, + Collection toAdd, // add to DatanodeDescriptor + Collection toRemove, // remove from DatanodeDescriptor Collection toInvalidate, // should be removed from DN Collection toCorrupt, // add to corrupt replicas list Collection toUC) { // add to under-construction list - // place a delimiter in the list which separates blocks + // place a delimiter in the list which separates blocks // that have been reported from those that have not - Block delimiterBlock = new Block(); - BlockInfo delimiter = new BlockInfoContiguous(delimiterBlock, - (short) 1); - AddBlockResult result = storageInfo.addBlock(delimiter, delimiterBlock); - assert result == AddBlockResult.ADDED + BlockInfo delimiter = new BlockInfoContiguous(new Block(), (short) 1); + AddBlockResult result = storageInfo.addBlock(delimiter); + assert result == AddBlockResult.ADDED : "Delimiting block cannot be present in the node"; int headIndex = 0; //currently the delimiter is in the head of the list int curIndex; @@ -2256,8 +2214,7 @@ private void reportDiff(DatanodeStorageInfo storageInfo, // move block to the head of the list if (storedBlock != null && (curIndex = storedBlock.findStorageInfo(storageInfo)) >= 0) { - headIndex = - storageInfo.moveBlockToHead(storedBlock, curIndex, headIndex); + headIndex = storageInfo.moveBlockToHead(storedBlock, curIndex, headIndex); } } @@ -2265,9 +2222,8 @@ private void reportDiff(DatanodeStorageInfo storageInfo, // all of them are next to the delimiter Iterator it = storageInfo.new BlockIterator(delimiter.getNext(0)); - while (it.hasNext()) { + while(it.hasNext()) toRemove.add(it.next()); - } storageInfo.removeBlock(delimiter); } @@ -2304,12 +2260,12 @@ private void reportDiff(DatanodeStorageInfo storageInfo, */ private BlockInfo processReportedBlock( final DatanodeStorageInfo storageInfo, - final Block block, final ReplicaState reportedState, - final Collection toAdd, - final Collection toInvalidate, + final Block block, final ReplicaState reportedState, + final Collection toAdd, + final Collection toInvalidate, final Collection toCorrupt, final Collection toUC) { - + DatanodeDescriptor dn = storageInfo.getDatanodeDescriptor(); if(LOG.isDebugEnabled()) { @@ -2317,16 +2273,16 @@ private BlockInfo processReportedBlock( + " on " + dn + " size " + block.getNumBytes() + " replicaState = " + reportedState); } - + if (shouldPostponeBlocksFromFuture && namesystem.isGenStampInFuture(block)) { queueReportedBlock(storageInfo, block, reportedState, QUEUE_REASON_FUTURE_GENSTAMP); return null; } - + // find block by blockId - BlockInfo storedBlock = getStoredBlock(block); + BlockInfo storedBlock = blocksMap.getStoredBlock(block); if(storedBlock == null) { // If blocksMap does not contain reported block id, // the replica should be removed from the data-node. @@ -2334,7 +2290,7 @@ private BlockInfo processReportedBlock( return null; } BlockUCState ucState = storedBlock.getBlockUCState(); - + // Block is on the NN if(LOG.isDebugEnabled()) { LOG.debug("In memory blockUCState = " + ucState); @@ -2379,8 +2335,8 @@ private BlockInfo processReportedBlock( // but now okay, it might need to be updated. if (reportedState == ReplicaState.FINALIZED && (storedBlock.findStorageInfo(storageInfo) == -1 || - corruptReplicas.isReplicaCorrupt(storedBlock, dn))) { - toAdd.add(new BlockInfoToAdd(storedBlock, block)); + corruptReplicas.isReplicaCorrupt(storedBlock, dn))) { + toAdd.add(storedBlock); } return storedBlock; } @@ -2426,7 +2382,7 @@ private void processQueuedMessages(Iterable rbis) if (rbi.getReportedState() == null) { // This is a DELETE_BLOCK request DatanodeStorageInfo storageInfo = rbi.getStorageInfo(); - removeStoredBlock(getStoredBlock(rbi.getBlock()), + removeStoredBlock(rbi.getBlock(), storageInfo.getDatanodeDescriptor()); } else { processAndHandleReportedBlock(rbi.getStorageInfo(), @@ -2474,15 +2430,15 @@ private BlockToMarkCorrupt checkReplicaCorrupt( case COMMITTED: if (storedBlock.getGenerationStamp() != reported.getGenerationStamp()) { final long reportedGS = reported.getGenerationStamp(); - return new BlockToMarkCorrupt(new Block(reported), storedBlock, reportedGS, + return new BlockToMarkCorrupt(storedBlock, reportedGS, "block is " + ucState + " and reported genstamp " + reportedGS - + " does not match genstamp in block map " - + storedBlock.getGenerationStamp(), Reason.GENSTAMP_MISMATCH); + + " does not match genstamp in block map " + + storedBlock.getGenerationStamp(), Reason.GENSTAMP_MISMATCH); } else if (storedBlock.getNumBytes() != reported.getNumBytes()) { - return new BlockToMarkCorrupt(new Block(reported), storedBlock, + return new BlockToMarkCorrupt(storedBlock, "block is " + ucState + " and reported length " + - reported.getNumBytes() + " does not match " + - "length in block map " + storedBlock.getNumBytes(), + reported.getNumBytes() + " does not match " + + "length in block map " + storedBlock.getNumBytes(), Reason.SIZE_MISMATCH); } else { return null; // not corrupt @@ -2490,12 +2446,11 @@ private BlockToMarkCorrupt checkReplicaCorrupt( case UNDER_CONSTRUCTION: if (storedBlock.getGenerationStamp() > reported.getGenerationStamp()) { final long reportedGS = reported.getGenerationStamp(); - return new BlockToMarkCorrupt(new Block(reported), storedBlock, - reportedGS, "block is " + ucState + " and reported state " - + reportedState + ", But reported genstamp " + reportedGS + return new BlockToMarkCorrupt(storedBlock, reportedGS, "block is " + + ucState + " and reported state " + reportedState + + ", But reported genstamp " + reportedGS + " does not match genstamp in block map " - + storedBlock.getGenerationStamp(), - Reason.GENSTAMP_MISMATCH); + + storedBlock.getGenerationStamp(), Reason.GENSTAMP_MISMATCH); } return null; default: @@ -2505,15 +2460,12 @@ private BlockToMarkCorrupt checkReplicaCorrupt( case RWR: if (!storedBlock.isComplete()) { return null; // not corrupt - } else if (storedBlock.getGenerationStamp() != - reported.getGenerationStamp()) { + } else if (storedBlock.getGenerationStamp() != reported.getGenerationStamp()) { final long reportedGS = reported.getGenerationStamp(); - return new BlockToMarkCorrupt( - new Block(reported), storedBlock, reportedGS, - "reported " + reportedState + - " replica with genstamp " + reportedGS + - " does not match COMPLETE block's genstamp in block map " + - storedBlock.getGenerationStamp(), Reason.GENSTAMP_MISMATCH); + return new BlockToMarkCorrupt(storedBlock, reportedGS, + "reported " + reportedState + " replica with genstamp " + reportedGS + + " does not match COMPLETE block's genstamp in block map " + + storedBlock.getGenerationStamp(), Reason.GENSTAMP_MISMATCH); } else { // COMPLETE block, same genstamp if (reportedState == ReplicaState.RBW) { // If it's a RBW report for a COMPLETE block, it may just be that @@ -2525,7 +2477,7 @@ private BlockToMarkCorrupt checkReplicaCorrupt( "complete with the same genstamp"); return null; } else { - return new BlockToMarkCorrupt(new Block(reported), storedBlock, + return new BlockToMarkCorrupt(storedBlock, "reported replica has invalid state " + reportedState, Reason.INVALID_STATE); } @@ -2538,8 +2490,7 @@ private BlockToMarkCorrupt checkReplicaCorrupt( " on " + dn + " size " + storedBlock.getNumBytes(); // log here at WARN level since this is really a broken HDFS invariant LOG.warn(msg); - return new BlockToMarkCorrupt(new Block(reported), storedBlock, msg, - Reason.INVALID_STATE); + return new BlockToMarkCorrupt(storedBlock, msg, Reason.INVALID_STATE); } } @@ -2572,7 +2523,7 @@ void addStoredBlockUnderConstruction(StatefulBlockInfo ucBlock, if (ucBlock.reportedState == ReplicaState.FINALIZED && (block.findStorageInfo(storageInfo) < 0)) { - addStoredBlock(block, ucBlock.reportedBlock, storageInfo, null, true); + addStoredBlock(block, storageInfo, null, true); } } @@ -2587,23 +2538,23 @@ void addStoredBlockUnderConstruction(StatefulBlockInfo ucBlock, * * @throws IOException */ - private void addStoredBlockImmediate(BlockInfo storedBlock, Block reported, + private void addStoredBlockImmediate(BlockInfo storedBlock, DatanodeStorageInfo storageInfo) - throws IOException { + throws IOException { assert (storedBlock != null && namesystem.hasWriteLock()); - if (!namesystem.isInStartupSafeMode() + if (!namesystem.isInStartupSafeMode() || namesystem.isPopulatingReplQueues()) { - addStoredBlock(storedBlock, reported, storageInfo, null, false); + addStoredBlock(storedBlock, storageInfo, null, false); return; } // just add it - AddBlockResult result = storageInfo.addBlock(storedBlock, reported); + AddBlockResult result = storageInfo.addBlock(storedBlock); // Now check for completion of blocks and safe block count int numCurrentReplica = countLiveNodes(storedBlock); if (storedBlock.getBlockUCState() == BlockUCState.COMMITTED - && hasMinStorage(storedBlock, numCurrentReplica)) { + && numCurrentReplica >= minReplication) { completeBlock(storedBlock.getBlockCollection(), storedBlock, false); } else if (storedBlock.isComplete() && result == AddBlockResult.ADDED) { // check whether safe replication is reached for the block @@ -2617,20 +2568,19 @@ && hasMinStorage(storedBlock, numCurrentReplica)) { /** * Modify (block-->datanode) map. Remove block from set of * needed replications if this takes care of the problem. - * @return the block that is stored in blocksMap. + * @return the block that is stored in blockMap. */ private Block addStoredBlock(final BlockInfo block, - final Block reportedBlock, - DatanodeStorageInfo storageInfo, - DatanodeDescriptor delNodeHint, - boolean logEveryBlock) - throws IOException { + DatanodeStorageInfo storageInfo, + DatanodeDescriptor delNodeHint, + boolean logEveryBlock) + throws IOException { assert block != null && namesystem.hasWriteLock(); BlockInfo storedBlock; DatanodeDescriptor node = storageInfo.getDatanodeDescriptor(); if (block instanceof BlockInfoUnderConstruction) { //refresh our copy in case the block got completed in another thread - storedBlock = getStoredBlock(block); + storedBlock = blocksMap.getStoredBlock(block); } else { storedBlock = block; } @@ -2644,9 +2594,10 @@ private Block addStoredBlock(final BlockInfo block, return block; } BlockCollection bc = storedBlock.getBlockCollection(); + assert bc != null : "Block must belong to a file"; // add block to the datanode - AddBlockResult result = storageInfo.addBlock(storedBlock, reportedBlock); + AddBlockResult result = storageInfo.addBlock(storedBlock); int curReplicaDelta; if (result == AddBlockResult.ADDED) { @@ -2674,10 +2625,10 @@ private Block addStoredBlock(final BlockInfo block, NumberReplicas num = countNodes(storedBlock); int numLiveReplicas = num.liveReplicas(); int numCurrentReplica = numLiveReplicas - + pendingReplications.getNumReplicas(storedBlock); + + pendingReplications.getNumReplicas(storedBlock); if(storedBlock.getBlockUCState() == BlockUCState.COMMITTED && - hasMinStorage(storedBlock, numLiveReplicas)) { + numLiveReplicas >= minReplication) { storedBlock = completeBlock(bc, storedBlock, false); } else if (storedBlock.isComplete() && result == AddBlockResult.ADDED) { // check whether safe replication is reached for the block @@ -2687,7 +2638,7 @@ private Block addStoredBlock(final BlockInfo block, // handles the safe block count maintenance. namesystem.incrementSafeBlockCount(numCurrentReplica); } - + // if file is under construction, then done for now if (bc.isUnderConstruction()) { return storedBlock; @@ -2699,7 +2650,7 @@ private Block addStoredBlock(final BlockInfo block, } // handle underReplication/overReplication - short fileReplication = getExpectedReplicaNum(bc, storedBlock); + short fileReplication = bc.getPreferredBlockReplication(); if (!isNeededReplication(storedBlock, fileReplication, numCurrentReplica)) { neededReplications.remove(storedBlock, numCurrentReplica, num.decommissionedAndDecommissioning(), fileReplication); @@ -2715,12 +2666,11 @@ private Block addStoredBlock(final BlockInfo block, int numCorruptNodes = num.corruptReplicas(); if (numCorruptNodes != corruptReplicasCount) { LOG.warn("Inconsistent number of corrupt replicas for " + - storedBlock + ". blockMap has " + numCorruptNodes + + storedBlock + "blockMap has " + numCorruptNodes + " but corrupt replicas map has " + corruptReplicasCount); } - if ((corruptReplicasCount > 0) && (numLiveReplicas >= fileReplication)) { - invalidateCorruptReplicas(storedBlock, reportedBlock); - } + if ((corruptReplicasCount > 0) && (numLiveReplicas >= fileReplication)) + invalidateCorruptReplicas(storedBlock); return storedBlock; } @@ -2752,7 +2702,7 @@ private void logAddStoredBlock(BlockInfo storedBlock, * * @param blk Block whose corrupt replicas need to be invalidated */ - private void invalidateCorruptReplicas(BlockInfo blk, Block reported) { + private void invalidateCorruptReplicas(BlockInfo blk) { Collection nodes = corruptReplicas.getNodes(blk); boolean removedFromBlocksMap = true; if (nodes == null) @@ -2762,8 +2712,8 @@ private void invalidateCorruptReplicas(BlockInfo blk, Block reported) { DatanodeDescriptor[] nodesCopy = nodes.toArray(new DatanodeDescriptor[0]); for (DatanodeDescriptor node : nodesCopy) { try { - if (!invalidateBlock(new BlockToMarkCorrupt(reported, blk, null, - Reason.ANY), node)) { + if (!invalidateBlock(new BlockToMarkCorrupt(blk, null, + Reason.ANY), node)) { removedFromBlocksMap = false; } } catch (IOException e) { @@ -2931,7 +2881,7 @@ private MisReplicationResult processMisReplicatedBlock(BlockInfo block) { } // calculate current replication short expectedReplication = - getExpectedReplicaNum(block.getBlockCollection(), block); + block.getBlockCollection().getPreferredBlockReplication(); NumberReplicas num = countNodes(block); int numCurrentReplica = num.liveReplicas(); // add to under-replicated queue if need to be @@ -2990,14 +2940,14 @@ public void setReplication(final short oldRepl, final short newRepl, * If there are any extras, call chooseExcessReplicates() to * mark them in the excessReplicateMap. */ - private void processOverReplicatedBlock(final BlockInfo block, + private void processOverReplicatedBlock(final Block block, final short replication, final DatanodeDescriptor addedNode, DatanodeDescriptor delNodeHint) { assert namesystem.hasWriteLock(); if (addedNode == delNodeHint) { delNodeHint = null; } - Collection nonExcess = new ArrayList<>(); + Collection nonExcess = new ArrayList(); Collection corruptNodes = corruptReplicas .getNodes(block); for(DatanodeStorageInfo storage : blocksMap.getStorages(block, State.NORMAL)) { @@ -3011,8 +2961,8 @@ private void processOverReplicatedBlock(final BlockInfo block, postponeBlock(block); return; } - LightWeightLinkedSet excessBlocks = excessReplicateMap.get( - cur.getDatanodeUuid()); + LightWeightLinkedSet excessBlocks = excessReplicateMap.get(cur + .getDatanodeUuid()); if (excessBlocks == null || !excessBlocks.contains(block)) { if (!cur.isDecommissionInProgress() && !cur.isDecommissioned()) { // exclude corrupt replicas @@ -3022,7 +2972,7 @@ private void processOverReplicatedBlock(final BlockInfo block, } } } - chooseExcessReplicates(nonExcess, block, replication, + chooseExcessReplicates(nonExcess, block, replication, addedNode, delNodeHint, blockplacement); } @@ -3041,29 +2991,29 @@ private void processOverReplicatedBlock(final BlockInfo block, * If no such a node is available, * then pick a node with least free space */ - private void chooseExcessReplicates( - final Collection nonExcess, - BlockInfo storedBlock, short replication, - DatanodeDescriptor addedNode, - DatanodeDescriptor delNodeHint, - BlockPlacementPolicy replicator) { + private void chooseExcessReplicates(final Collection nonExcess, + Block b, short replication, + DatanodeDescriptor addedNode, + DatanodeDescriptor delNodeHint, + BlockPlacementPolicy replicator) { assert namesystem.hasWriteLock(); // first form a rack to datanodes map and - BlockCollection bc = getBlockCollection(storedBlock); - final BlockStoragePolicy storagePolicy = storagePolicySuite.getPolicy( - bc.getStoragePolicyID()); + BlockCollection bc = getBlockCollection(b); + final BlockStoragePolicy storagePolicy = storagePolicySuite.getPolicy(bc.getStoragePolicyID()); final List excessTypes = storagePolicy.chooseExcess( replication, DatanodeStorageInfo.toStorageTypes(nonExcess)); - final Map> rackMap = new HashMap<>(); - final List moreThanOne = new ArrayList<>(); - final List exactlyOne = new ArrayList<>(); + final Map> rackMap + = new HashMap>(); + final List moreThanOne = new ArrayList(); + final List exactlyOne = new ArrayList(); + // split nodes into two sets // moreThanOne contains nodes on rack with more than one replica // exactlyOne contains the remaining nodes replicator.splitNodesWithRack(nonExcess, rackMap, moreThanOne, exactlyOne); - + // pick one node to delete that favors the delete hint // otherwise pick one with least space from priSet if it is not empty // otherwise one node with least space from remains @@ -3078,7 +3028,7 @@ private void chooseExcessReplicates( moreThanOne, excessTypes)) { cur = delNodeHintStorage; } else { // regular excessive replica removal - cur = replicator.chooseReplicaToDelete(bc, storedBlock, replication, + cur = replicator.chooseReplicaToDelete(bc, b, replication, moreThanOne, exactlyOne, excessTypes); } firstOne = false; @@ -3087,27 +3037,22 @@ private void chooseExcessReplicates( replicator.adjustSetsWithChosenReplica(rackMap, moreThanOne, exactlyOne, cur); - processChosenExcessReplica(nonExcess, cur, storedBlock); - } - } + nonExcess.remove(cur); + addToExcessReplicate(cur.getDatanodeDescriptor(), b); - private void processChosenExcessReplica( - final Collection nonExcess, - final DatanodeStorageInfo chosen, BlockInfo storedBlock) { - nonExcess.remove(chosen); - addToExcessReplicate(chosen.getDatanodeDescriptor(), storedBlock); - // - // The 'excessblocks' tracks blocks until we get confirmation - // that the datanode has deleted them; the only way we remove them - // is when we get a "removeBlock" message. - // - // The 'invalidate' list is used to inform the datanode the block - // should be deleted. Items are removed from the invalidate list - // upon giving instructions to the datanodes. - // - addToInvalidates(storedBlock, chosen.getDatanodeDescriptor()); - blockLog.debug("BLOCK* chooseExcessReplicates: " - +"({}, {}) is added to invalidated blocks set", chosen, storedBlock); + // + // The 'excessblocks' tracks blocks until we get confirmation + // that the datanode has deleted them; the only way we remove them + // is when we get a "removeBlock" message. + // + // The 'invalidate' list is used to inform the datanode the block + // should be deleted. Items are removed from the invalidate list + // upon giving instructions to the namenode. + // + addToInvalidates(b, cur.getDatanodeDescriptor()); + blockLog.debug("BLOCK* chooseExcessReplicates: " + +"({}, {}) is added to invalidated blocks set", cur, b); + } } /** Check if we can use delHint */ @@ -3131,18 +3076,17 @@ static boolean useDelHint(boolean isFirst, DatanodeStorageInfo delHint, } } - private void addToExcessReplicate(DatanodeInfo dn, BlockInfo storedBlock) { + private void addToExcessReplicate(DatanodeInfo dn, Block block) { assert namesystem.hasWriteLock(); - LightWeightLinkedSet excessBlocks = excessReplicateMap.get( - dn.getDatanodeUuid()); + LightWeightLinkedSet excessBlocks = excessReplicateMap.get(dn.getDatanodeUuid()); if (excessBlocks == null) { - excessBlocks = new LightWeightLinkedSet<>(); + excessBlocks = new LightWeightLinkedSet(); excessReplicateMap.put(dn.getDatanodeUuid(), excessBlocks); } - if (excessBlocks.add(storedBlock)) { + if (excessBlocks.add(block)) { excessBlocksCount.incrementAndGet(); blockLog.debug("BLOCK* addToExcessReplicate: ({}, {}) is added to" - + " excessReplicateMap", dn, storedBlock); + + " excessReplicateMap", dn, block); } } @@ -3154,26 +3098,26 @@ private void removeStoredBlock(DatanodeStorageInfo storageInfo, Block block, QUEUE_REASON_FUTURE_GENSTAMP); return; } - removeStoredBlock(getStoredBlock(block), node); + removeStoredBlock(block, node); } /** * Modify (block-->datanode) map. Possibly generate replication tasks, if the * removed block is still valid. */ - public void removeStoredBlock(BlockInfo storedBlock, - DatanodeDescriptor node) { - blockLog.debug("BLOCK* removeStoredBlock: {} from {}", storedBlock, node); + public void removeStoredBlock(Block block, DatanodeDescriptor node) { + blockLog.debug("BLOCK* removeStoredBlock: {} from {}", block, node); assert (namesystem.hasWriteLock()); { + BlockInfo storedBlock = getStoredBlock(block); if (storedBlock == null || !blocksMap.removeNode(storedBlock, node)) { blockLog.debug("BLOCK* removeStoredBlock: {} has already been" + - " removed from node {}", storedBlock, node); + " removed from node {}", block, node); return; } CachedBlock cblock = namesystem.getCacheManager().getCachedBlocks() - .get(new CachedBlock(storedBlock.getBlockId(), (short) 0, false)); + .get(new CachedBlock(block.getBlockId(), (short) 0, false)); if (cblock != null) { boolean removed = false; removed |= node.getPendingCached().remove(cblock); @@ -3181,7 +3125,7 @@ public void removeStoredBlock(BlockInfo storedBlock, removed |= node.getPendingUncached().remove(cblock); if (removed) { blockLog.debug("BLOCK* removeStoredBlock: {} removed from caching " - + "related lists on node {}", storedBlock, node); + + "related lists on node {}", block, node); } } @@ -3191,7 +3135,7 @@ public void removeStoredBlock(BlockInfo storedBlock, // necessary. In that case, put block on a possibly-will- // be-replicated list. // - BlockCollection bc = storedBlock.getBlockCollection(); + BlockCollection bc = blocksMap.getBlockCollection(block); if (bc != null) { namesystem.decrementSafeBlockCount(storedBlock); updateNeededReplications(storedBlock, -1, 0); @@ -3201,13 +3145,13 @@ public void removeStoredBlock(BlockInfo storedBlock, // We've removed a block from a node, so it's definitely no longer // in "excess" there. // - LightWeightLinkedSet excessBlocks = excessReplicateMap.get( - node.getDatanodeUuid()); + LightWeightLinkedSet excessBlocks = excessReplicateMap.get(node + .getDatanodeUuid()); if (excessBlocks != null) { - if (excessBlocks.remove(storedBlock)) { + if (excessBlocks.remove(block)) { excessBlocksCount.decrementAndGet(); blockLog.debug("BLOCK* removeStoredBlock: {} is removed from " + - "excessBlocks", storedBlock); + "excessBlocks", block); if (excessBlocks.size() == 0) { excessReplicateMap.remove(node.getDatanodeUuid()); } @@ -3215,7 +3159,7 @@ public void removeStoredBlock(BlockInfo storedBlock, } // Remove the replica from corruptReplicas - corruptReplicas.removeFromCorruptReplicasMap(storedBlock, node); + corruptReplicas.removeFromCorruptReplicasMap(block, node); } } @@ -3223,7 +3167,7 @@ public void removeStoredBlock(BlockInfo storedBlock, * Get all valid locations of the block & add the block to results * return the length of the added block; 0 if the block is not added */ - private long addBlock(BlockInfo block, List results) { + private long addBlock(Block block, List results) { final List locations = getValidLocations(block); if(locations.size() == 0) { return 0; @@ -3275,32 +3219,31 @@ void addBlock(DatanodeStorageInfo storageInfo, Block block, String delHint) processAndHandleReportedBlock(storageInfo, block, ReplicaState.FINALIZED, delHintNode); } - + private void processAndHandleReportedBlock( DatanodeStorageInfo storageInfo, Block block, ReplicaState reportedState, DatanodeDescriptor delHintNode) throws IOException { // blockReceived reports a finalized block - Collection toAdd = new LinkedList<>(); - Collection toInvalidate = new LinkedList<>(); - Collection toCorrupt = new LinkedList<>(); - Collection toUC = new LinkedList<>(); + Collection toAdd = new LinkedList(); + Collection toInvalidate = new LinkedList(); + Collection toCorrupt = new LinkedList(); + Collection toUC = new LinkedList(); final DatanodeDescriptor node = storageInfo.getDatanodeDescriptor(); - processReportedBlock(storageInfo, block, reportedState, toAdd, toInvalidate, - toCorrupt, toUC); + processReportedBlock(storageInfo, block, reportedState, + toAdd, toInvalidate, toCorrupt, toUC); // the block is only in one of the to-do lists // if it is in none then data-node already has it assert toUC.size() + toAdd.size() + toInvalidate.size() + toCorrupt.size() <= 1 - : "The block should be only in one of the lists."; + : "The block should be only in one of the lists."; - for (StatefulBlockInfo b : toUC) { + for (StatefulBlockInfo b : toUC) { addStoredBlockUnderConstruction(b, storageInfo); } long numBlocksLogged = 0; - for (BlockInfoToAdd b : toAdd) { - addStoredBlock(b.getStored(), b.getReported(), storageInfo, delHintNode, - numBlocksLogged < maxNumBlocksToLog); + for (BlockInfo b : toAdd) { + addStoredBlock(b, storageInfo, delHintNode, numBlocksLogged < maxNumBlocksToLog); numBlocksLogged++; } if (numBlocksLogged > maxNumBlocksToLog) { @@ -3365,7 +3308,7 @@ public void processIncrementalBlockReport(final DatanodeID nodeID, ReplicaState.RBW, null); break; default: - String msg = + String msg = "Unknown block status code reported by " + nodeID + ": " + rdbi; blockLog.warn(msg); @@ -3401,8 +3344,8 @@ public NumberReplicas countNodes(BlockInfo b) { } else if (node.isDecommissioned()) { decommissioned++; } else { - LightWeightLinkedSet blocksExcess = - excessReplicateMap.get(node.getDatanodeUuid()); + LightWeightLinkedSet blocksExcess = excessReplicateMap.get(node + .getDatanodeUuid()); if (blocksExcess != null && blocksExcess.contains(b)) { excess++; } else { @@ -3455,13 +3398,13 @@ void processOverReplicatedBlocksOnReCommission( int numOverReplicated = 0; while(it.hasNext()) { final BlockInfo block = it.next(); - int expectedReplication = this.getReplication(block); + BlockCollection bc = blocksMap.getBlockCollection(block); + short expectedReplication = bc.getPreferredBlockReplication(); NumberReplicas num = countNodes(block); int numCurrentReplica = num.liveReplicas(); if (numCurrentReplica > expectedReplication) { // over-replicated block - processOverReplicatedBlock(block, (short) expectedReplication, null, - null); + processOverReplicatedBlock(block, expectedReplication, null, null); numOverReplicated++; } } @@ -3487,7 +3430,7 @@ boolean isNodeHealthyForDecommission(DatanodeDescriptor node) { if (pendingReplicationBlocksCount == 0 && underReplicatedBlocksCount == 0) { LOG.info("Node {} is dead and there are no under-replicated" + - " blocks or blocks pending replication. Safe to decommission.", + " blocks or blocks pending replication. Safe to decommission.", node); return true; } @@ -3505,12 +3448,6 @@ public int getActiveBlockCount() { return blocksMap.size(); } - - /** @return an iterator of the datanodes. */ - public Iterable getStorages(final Block block) { - return blocksMap.getStorages(block); - } - public DatanodeStorageInfo[] getStorages(BlockInfo block) { final DatanodeStorageInfo[] storages = new DatanodeStorageInfo[block.numNodes()]; int i = 0; @@ -3596,13 +3533,13 @@ public boolean checkBlocksProperlyReplicated( String src, BlockInfo[] blocks) { for (BlockInfo b: blocks) { if (!b.isComplete()) { + final BlockInfoUnderConstruction uc = + (BlockInfoUnderConstruction)b; final int numNodes = b.numNodes(); - final int min = getMinStorageNum(b); - final BlockUCState state = b.getBlockUCState(); - LOG.info("BLOCK* " + b + " is not COMPLETE (ucState = " + state - + ", replication# = " + numNodes - + (numNodes < min ? " < " : " >= ") - + " minimum = " + min + ") in file " + src); + LOG.info("BLOCK* " + b + " is not COMPLETE (ucState = " + + uc.getBlockUCState() + ", replication# = " + numNodes + + (numNodes < minReplication ? " < ": " >= ") + + " minimum = " + minReplication + ") in file " + src); return false; } } @@ -3613,15 +3550,15 @@ public boolean checkBlocksProperlyReplicated( * @return 0 if the block is not found; * otherwise, return the replication factor of the block. */ - private int getReplication(BlockInfo block) { + private int getReplication(Block block) { final BlockCollection bc = blocksMap.getBlockCollection(block); - return bc == null? 0: getExpectedReplicaNum(bc, block); + return bc == null? 0: bc.getPreferredBlockReplication(); } /** - * Get blocks to invalidate for nodeId. - * in {@link #invalidateBlocks}.boolean blockHasEnoughRacks + * Get blocks to invalidate for nodeId + * in {@link #invalidateBlocks}. * * @return number of blocks scheduled for removal during this iteration. */ @@ -3659,20 +3596,22 @@ private int invalidateWorkForOneNode(DatanodeInfo dn) { return toInvalidate.size(); } - boolean blockHasEnoughRacks(BlockInfo storedBlock, int expectedStorageNum) { + boolean blockHasEnoughRacks(Block b) { if (!this.shouldCheckForEnoughRacks) { return true; } - boolean enoughRacks = false; - Collection corruptNodes = - corruptReplicas.getNodes(storedBlock); + boolean enoughRacks = false;; + Collection corruptNodes = + corruptReplicas.getNodes(b); + int numExpectedReplicas = getReplication(b); String rackName = null; - for(DatanodeStorageInfo storage : getStorages(storedBlock)) { + for(DatanodeStorageInfo storage : blocksMap.getStorages(b)) { final DatanodeDescriptor cur = storage.getDatanodeDescriptor(); if (!cur.isDecommissionInProgress() && !cur.isDecommissioned()) { if ((corruptNodes == null ) || !corruptNodes.contains(cur)) { - if (expectedStorageNum == 1 || (expectedStorageNum > 1 && - !datanodeManager.hasClusterEverBeenMultiRack())) { + if (numExpectedReplicas == 1 || + (numExpectedReplicas > 1 && + !datanodeManager.hasClusterEverBeenMultiRack())) { enoughRacks = true; break; } @@ -3693,13 +3632,8 @@ boolean blockHasEnoughRacks(BlockInfo storedBlock, int expectedStorageNum) { * A block needs replication if the number of replicas is less than expected * or if it does not have enough racks. */ - boolean isNeededReplication(BlockInfo storedBlock, int expected, - int current) { - return current < expected || !blockHasEnoughRacks(storedBlock, expected); - } - - public short getExpectedReplicaNum(BlockCollection bc, BlockInfo block) { - return bc.getPreferredBlockReplication(); + boolean isNeededReplication(Block b, int expected, int current) { + return current < expected || !blockHasEnoughRacks(b); } public long getMissingBlocksCount() { @@ -3721,6 +3655,11 @@ public BlockCollection getBlockCollection(Block b) { return blocksMap.getBlockCollection(b); } + /** @return an iterator of the datanodes. */ + public Iterable getStorages(final Block block) { + return blocksMap.getStorages(block); + } + public int numCorruptReplicas(Block block) { return corruptReplicas.numCorruptReplicas(block); } @@ -3736,10 +3675,9 @@ public void removeBlockFromMap(Block block) { * If a block is removed from blocksMap, remove it from excessReplicateMap. */ private void removeFromExcessReplicateMap(Block block) { - for (DatanodeStorageInfo info : getStorages(block)) { + for (DatanodeStorageInfo info : blocksMap.getStorages(block)) { String uuid = info.getDatanodeDescriptor().getDatanodeUuid(); - LightWeightLinkedSet excessReplicas = - excessReplicateMap.get(uuid); + LightWeightLinkedSet excessReplicas = excessReplicateMap.get(uuid); if (excessReplicas != null) { if (excessReplicas.remove(block)) { excessBlocksCount.decrementAndGet(); @@ -3928,7 +3866,7 @@ private void chooseTargets(BlockPlacementPolicy blockplacement, /** * A simple result enum for the result of - * {@link BlockManager#processMisReplicatedBlock}. + * {@link BlockManager#processMisReplicatedBlock(BlockInfo)}. */ enum MisReplicationResult { /** The block should be invalidated since it belongs to a deleted file. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java index 92841a634cb2c..216d6d2bf14a9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java @@ -24,7 +24,6 @@ import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.fs.StorageType; -import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage.State; @@ -234,7 +233,7 @@ long getBlockPoolUsed() { return blockPoolUsed; } - public AddBlockResult addBlock(BlockInfo b, Block reportedBlock) { + public AddBlockResult addBlock(BlockInfo b) { // First check whether the block belongs to a different storage // on the same DN. AddBlockResult result = AddBlockResult.ADDED; @@ -253,18 +252,10 @@ public AddBlockResult addBlock(BlockInfo b, Block reportedBlock) { } // add to the head of the data-node list - b.addStorage(this, reportedBlock); - insertToList(b); - return result; - } - - AddBlockResult addBlock(BlockInfo b) { - return addBlock(b, b); - } - - public void insertToList(BlockInfo b) { + b.addStorage(this); blockList = b.listInsert(blockList, this); numBlocks++; + return result; } public boolean removeBlock(BlockInfo b) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index e3717abbe6c43..3d176b052d8fe 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -142,6 +142,7 @@ import org.apache.hadoop.fs.CacheFlag; import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.CreateFlag; +import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FsServerDefaults; @@ -2790,7 +2791,7 @@ void removeBlocksAndUpdateSafemodeTotal(BlocksMapUpdateInfo blocks) { if (trackBlockCounts) { if (b.isComplete()) { numRemovedComplete++; - if (blockManager.hasMinStorage(b)) { + if (blockManager.checkMinReplication(b)) { numRemovedSafe++; } } @@ -3022,7 +3023,7 @@ boolean internalReleaseLease(Lease lease, String src, INodesInPath iip, curBlock = blocks[nrCompleteBlocks]; if(!curBlock.isComplete()) break; - assert blockManager.hasMinStorage(curBlock) : + assert blockManager.checkMinReplication(curBlock) : "A COMPLETE block is not minimally replicated in " + src; } @@ -3058,7 +3059,7 @@ boolean internalReleaseLease(Lease lease, String src, INodesInPath iip, // If penultimate block doesn't exist then its minReplication is met boolean penultimateBlockMinReplication = penultimateBlock == null ? true : - blockManager.hasMinStorage(penultimateBlock); + blockManager.checkMinReplication(penultimateBlock); switch(lastBlockState) { case COMPLETE: @@ -3067,7 +3068,7 @@ boolean internalReleaseLease(Lease lease, String src, INodesInPath iip, case COMMITTED: // Close file if committed blocks are minimally replicated if(penultimateBlockMinReplication && - blockManager.hasMinStorage(lastBlock)) { + blockManager.checkMinReplication(lastBlock)) { finalizeINodeFileUnderConstruction(src, pendingFile, iip.getLatestSnapshotId()); NameNode.stateChangeLog.warn("BLOCK*" @@ -3359,9 +3360,9 @@ void commitBlockSynchronization(ExtendedBlock oldBlock, trimmedTargets.get(i).getStorageInfo(trimmedStorages.get(i)); if (storageInfo != null) { if(copyTruncate) { - storageInfo.addBlock(truncatedBlock, truncatedBlock); + storageInfo.addBlock(truncatedBlock); } else { - storageInfo.addBlock(storedBlock, storedBlock); + storageInfo.addBlock(storedBlock); } } } @@ -3377,9 +3378,8 @@ void commitBlockSynchronization(ExtendedBlock oldBlock, } else { iFile.setLastBlock(storedBlock, trimmedStorageInfos); if (closeFile) { - blockManager.markBlockReplicasAsCorrupt(oldBlock.getLocalBlock(), - storedBlock, oldGenerationStamp, oldNumBytes, - trimmedStorageInfos); + blockManager.markBlockReplicasAsCorrupt(storedBlock, + oldGenerationStamp, oldNumBytes, trimmedStorageInfos); } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java index ab179b454c0e5..7d4cd7e52050c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java @@ -647,7 +647,7 @@ private void collectBlocksSummary(String parent, HdfsFileStatus file, Result res .getStorageType())); } if (showReplicaDetails) { - LightWeightLinkedSet blocksExcess = + LightWeightLinkedSet blocksExcess = bm.excessReplicateMap.get(dnDesc.getDatanodeUuid()); Collection corruptReplicas = bm.getCorruptReplicas(block.getLocalBlock()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfo.java index bae4f1d41bb5c..5126aa78dfb9f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfo.java @@ -63,7 +63,7 @@ public void testAddStorage() throws Exception { final DatanodeStorageInfo storage = DFSTestUtil.createDatanodeStorageInfo("storageID", "127.0.0.1"); - boolean added = blockInfo.addStorage(storage, blockInfo); + boolean added = blockInfo.addStorage(storage); Assert.assertTrue(added); Assert.assertEquals(storage, blockInfo.getStorageInfo(0)); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java index 9e3167085413f..396dff302a998 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java @@ -383,7 +383,7 @@ private void fulfillPipeline(BlockInfo blockInfo, for (int i = 1; i < pipeline.length; i++) { DatanodeStorageInfo storage = pipeline[i]; bm.addBlock(storage, blockInfo, null); - blockInfo.addStorage(storage, blockInfo); + blockInfo.addStorage(storage); } } @@ -393,7 +393,7 @@ private BlockInfo blockOnNodes(long blkId, List nodes) { for (DatanodeDescriptor dn : nodes) { for (DatanodeStorageInfo storage : dn.getStorageInfos()) { - blockInfo.addStorage(storage, blockInfo); + blockInfo.addStorage(storage); } } return blockInfo; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNodeCount.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNodeCount.java index c33667d5e00f5..1c3f075d5f426 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNodeCount.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNodeCount.java @@ -100,7 +100,7 @@ public void testNodeCount() throws Exception { DatanodeDescriptor nonExcessDN = null; for(DatanodeStorageInfo storage : bm.blocksMap.getStorages(block.getLocalBlock())) { final DatanodeDescriptor dn = storage.getDatanodeDescriptor(); - Collection blocks = bm.excessReplicateMap.get(dn.getDatanodeUuid()); + Collection blocks = bm.excessReplicateMap.get(dn.getDatanodeUuid()); if (blocks == null || !blocks.contains(block.getLocalBlock()) ) { nonExcessDN = dn; break; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java index 83b3aa0f6a178..2d7bb440d0cd8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java @@ -34,6 +34,7 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties; +import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.server.datanode.DataNode; @@ -41,6 +42,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; +import org.apache.hadoop.util.Time; import org.junit.Test; public class TestOverReplicatedBlocks { @@ -183,7 +185,7 @@ public void testChooseReplicaToDelete() throws Exception { // All replicas for deletion should be scheduled on lastDN. // And should not actually be deleted, because lastDN does not heartbeat. namesystem.readLock(); - Collection dnBlocks = + Collection dnBlocks = namesystem.getBlockManager().excessReplicateMap.get(lastDNid); assertEquals("Replicas on node " + lastDNid + " should have been deleted", SMALL_FILE_LENGTH / SMALL_BLOCK_SIZE, dnBlocks.size()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java index 44f0e65f38bf1..28129572370af 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java @@ -1250,7 +1250,7 @@ public void testAddStoredBlockDoesNotCauseSkippedReplication() when(storage.removeBlock(any(BlockInfo.class))).thenReturn(true); when(storage.addBlock(any(BlockInfo.class))).thenReturn (DatanodeStorageInfo.AddBlockResult.ADDED); - ucBlock.addStorage(storage, ucBlock); + ucBlock.addStorage(storage); when(mbc.setLastBlock((BlockInfo) any(), (DatanodeStorageInfo[]) any())) .thenReturn(ucBlock); From 0faa3913b9a7ce499d8336dc27ce5f5437647f48 Mon Sep 17 00:00:00 2001 From: Jing Zhao Date: Wed, 5 Aug 2015 17:52:50 -0700 Subject: [PATCH 075/130] Revert "HDFS-8499. Refactor BlockInfo class hierarchy with static helper class. Contributed by Zhe Zhang." This reverts commit c17439c2ddd921b63b1635e6f1cba634b8da8557. --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 - .../blockmanagement/BlockCollection.java | 2 +- .../server/blockmanagement/BlockInfo.java | 24 ++-- .../blockmanagement/BlockInfoContiguous.java | 77 +++++++++--- ...BlockInfoContiguousUnderConstruction.java} | 107 +++++++++-------- .../BlockInfoUnderConstructionContiguous.java | 110 ------------------ .../server/blockmanagement/BlockManager.java | 40 +++---- .../ContiguousBlockStorageOp.java | 106 ----------------- .../blockmanagement/DatanodeDescriptor.java | 13 +-- .../blockmanagement/DatanodeManager.java | 4 +- .../hdfs/server/namenode/FSDirTruncateOp.java | 11 +- .../server/namenode/FSDirWriteFileOp.java | 15 ++- .../hdfs/server/namenode/FSEditLogLoader.java | 15 ++- .../hdfs/server/namenode/FSImageFormat.java | 7 +- .../server/namenode/FSImageFormatPBINode.java | 6 +- .../server/namenode/FSImageSerialization.java | 4 +- .../hdfs/server/namenode/FSNamesystem.java | 15 ++- .../FileUnderConstructionFeature.java | 10 +- .../hdfs/server/namenode/INodeFile.java | 14 +-- .../hdfs/server/namenode/Namesystem.java | 4 +- .../namenode/snapshot/FileDiffList.java | 4 +- .../org/apache/hadoop/hdfs/DFSTestUtil.java | 8 +- .../TestBlockInfoUnderConstruction.java | 6 +- .../blockmanagement/TestBlockManager.java | 6 +- .../TestHeartbeatHandling.java | 8 +- .../TestReplicationPolicy.java | 5 +- .../namenode/TestBlockUnderConstruction.java | 4 +- .../TestCommitBlockSynchronization.java | 9 +- .../server/namenode/TestFileTruncate.java | 6 +- .../namenode/ha/TestRetryCacheWithHA.java | 6 +- .../namenode/snapshot/SnapshotTestHelper.java | 4 +- 31 files changed, 234 insertions(+), 419 deletions(-) rename hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/{BlockInfoUnderConstruction.java => BlockInfoContiguousUnderConstruction.java} (83%) delete mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstructionContiguous.java delete mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ContiguousBlockStorageOp.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 59623850f561e..138ed107a0df7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -635,9 +635,6 @@ Release 2.8.0 - UNRELEASED HDFS-7923. The DataNodes should rate-limit their full block reports by asking the NN on heartbeat messages (cmccabe) - HDFS-8499. Refactor BlockInfo class hierarchy with static helper class. - (Zhe Zhang via wang) - HDFS-8540. Mover should exit with NO_MOVE_BLOCK if no block can be moved. (surendra singh lilhore via szetszwo) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java index f11a825c62d65..02a1d0522deee 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java @@ -79,7 +79,7 @@ public interface BlockCollection { * Convert the last block of the collection to an under-construction block * and set the locations. */ - public BlockInfoUnderConstruction setLastBlock(BlockInfo lastBlock, + public BlockInfoContiguousUnderConstruction setLastBlock(BlockInfo lastBlock, DatanodeStorageInfo[] targets) throws IOException; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java index 4cc2791e75456..dea31c42f9735 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java @@ -51,7 +51,7 @@ public abstract class BlockInfo extends Block * per replica is 42 bytes (LinkedList#Entry object per replica) versus 16 * bytes using the triplets. */ - Object[] triplets; + protected Object[] triplets; /** * Construct an entry for blocksmap @@ -295,7 +295,7 @@ public BlockInfo moveBlockToHead(BlockInfo head, /** * BlockInfo represents a block that is not being constructed. * In order to start modifying the block, the BlockInfo should be converted - * to {@link BlockInfoUnderConstruction}. + * to {@link BlockInfoContiguousUnderConstruction}. * @return {@link BlockUCState#COMPLETE} */ public BlockUCState getBlockUCState() { @@ -312,29 +312,27 @@ public boolean isComplete() { } /** - * Convert a block to an under construction block. + * Convert a complete block to an under construction block. * @return BlockInfoUnderConstruction - an under construction block. */ - public BlockInfoUnderConstruction convertToBlockUnderConstruction( + public BlockInfoContiguousUnderConstruction convertToBlockUnderConstruction( BlockUCState s, DatanodeStorageInfo[] targets) { if(isComplete()) { - return convertCompleteBlockToUC(s, targets); + BlockInfoContiguousUnderConstruction ucBlock = + new BlockInfoContiguousUnderConstruction(this, + getBlockCollection().getPreferredBlockReplication(), s, targets); + ucBlock.setBlockCollection(getBlockCollection()); + return ucBlock; } // the block is already under construction - BlockInfoUnderConstruction ucBlock = - (BlockInfoUnderConstruction)this; + BlockInfoContiguousUnderConstruction ucBlock = + (BlockInfoContiguousUnderConstruction)this; ucBlock.setBlockUCState(s); ucBlock.setExpectedLocations(targets); ucBlock.setBlockCollection(getBlockCollection()); return ucBlock; } - /** - * Convert a complete block to an under construction block. - */ - abstract BlockInfoUnderConstruction convertCompleteBlockToUC( - BlockUCState s, DatanodeStorageInfo[] targets); - @Override public int hashCode() { // Super implementation is sufficient diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java index b9abcd03f2960..eff89a8083222 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java @@ -19,13 +19,13 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hdfs.protocol.Block; -import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; /** * Subclass of {@link BlockInfo}, used for a block with replication scheme. */ @InterfaceAudience.Private public class BlockInfoContiguous extends BlockInfo { + public static final BlockInfoContiguous[] EMPTY_ARRAY = {}; public BlockInfoContiguous(short size) { super(size); @@ -40,37 +40,84 @@ public BlockInfoContiguous(Block blk, short size) { * This is used to convert BlockReplicationInfoUnderConstruction * @param from BlockReplicationInfo to copy from. */ - protected BlockInfoContiguous(BlockInfo from) { + protected BlockInfoContiguous(BlockInfoContiguous from) { super(from); } + /** + * Ensure that there is enough space to include num more triplets. + * @return first free triplet index. + */ + private int ensureCapacity(int num) { + assert this.triplets != null : "BlockInfo is not initialized"; + int last = numNodes(); + if (triplets.length >= (last+num)*3) { + return last; + } + /* Not enough space left. Create a new array. Should normally + * happen only when replication is manually increased by the user. */ + Object[] old = triplets; + triplets = new Object[(last+num)*3]; + System.arraycopy(old, 0, triplets, 0, last * 3); + return last; + } + @Override boolean addStorage(DatanodeStorageInfo storage) { - return ContiguousBlockStorageOp.addStorage(this, storage); + // find the last null node + int lastNode = ensureCapacity(1); + setStorageInfo(lastNode, storage); + setNext(lastNode, null); + setPrevious(lastNode, null); + return true; } @Override boolean removeStorage(DatanodeStorageInfo storage) { - return ContiguousBlockStorageOp.removeStorage(this, storage); + int dnIndex = findStorageInfo(storage); + if (dnIndex < 0) { // the node is not found + return false; + } + assert getPrevious(dnIndex) == null && getNext(dnIndex) == null : + "Block is still in the list and must be removed first."; + // find the last not null node + int lastNode = numNodes()-1; + // replace current node triplet by the lastNode one + setStorageInfo(dnIndex, getStorageInfo(lastNode)); + setNext(dnIndex, getNext(lastNode)); + setPrevious(dnIndex, getPrevious(lastNode)); + // set the last triplet to null + setStorageInfo(lastNode, null); + setNext(lastNode, null); + setPrevious(lastNode, null); + return true; } @Override public int numNodes() { - return ContiguousBlockStorageOp.numNodes(this); + assert this.triplets != null : "BlockInfo is not initialized"; + assert triplets.length % 3 == 0 : "Malformed BlockInfo"; + + for (int idx = getCapacity()-1; idx >= 0; idx--) { + if (getDatanode(idx) != null) { + return idx + 1; + } + } + return 0; } @Override void replaceBlock(BlockInfo newBlock) { - ContiguousBlockStorageOp.replaceBlock(this, newBlock); - } + assert newBlock instanceof BlockInfoContiguous; + for (int i = this.numNodes() - 1; i >= 0; i--) { + final DatanodeStorageInfo storage = this.getStorageInfo(i); + final boolean removed = storage.removeBlock(this); + assert removed : "currentBlock not found."; - @Override - BlockInfoUnderConstruction convertCompleteBlockToUC( - HdfsServerConstants.BlockUCState s, DatanodeStorageInfo[] targets) { - BlockInfoUnderConstructionContiguous ucBlock = - new BlockInfoUnderConstructionContiguous(this, - getBlockCollection().getPreferredBlockReplication(), s, targets); - ucBlock.setBlockCollection(getBlockCollection()); - return ucBlock; + final DatanodeStorageInfo.AddBlockResult result = storage.addBlock( + newBlock); + assert result == DatanodeStorageInfo.AddBlockResult.ADDED : + "newBlock already exists."; + } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguousUnderConstruction.java similarity index 83% rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java rename to hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguousUnderConstruction.java index bbc4232e3fde8..7ca6419899987 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguousUnderConstruction.java @@ -18,12 +18,11 @@ package org.apache.hadoop.hdfs.server.blockmanagement; import java.io.IOException; +import java.util.ArrayList; import java.util.Iterator; import java.util.List; -import com.google.common.base.Preconditions; import org.apache.hadoop.hdfs.protocol.Block; -import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; import org.apache.hadoop.hdfs.server.namenode.NameNode; @@ -32,15 +31,15 @@ * Represents a block that is currently being constructed.
* This is usually the last block of a file opened for write or append. */ -public abstract class BlockInfoUnderConstruction extends BlockInfo { +public class BlockInfoContiguousUnderConstruction extends BlockInfoContiguous { /** Block state. See {@link BlockUCState} */ - protected BlockUCState blockUCState; + private BlockUCState blockUCState; /** * Block replicas as assigned when the block was allocated. * This defines the pipeline order. */ - protected List replicas; + private List replicas; /** * Index of the primary data node doing the recovery. Useful for log @@ -58,12 +57,12 @@ public abstract class BlockInfoUnderConstruction extends BlockInfo { /** * The block source to use in the event of copy-on-write truncate. */ - protected Block truncateBlock; + private Block truncateBlock; /** * ReplicaUnderConstruction contains information about replicas while * they are under construction. - * The GS, the length and the state of the replica is as reported by + * The GS, the length and the state of the replica is as reported by * the data-node. * It is not guaranteed, but expected, that data-nodes actually have * corresponding replicas. @@ -144,7 +143,7 @@ public String toString() { appendStringTo(b); return b.toString(); } - + @Override public void appendStringTo(StringBuilder sb) { sb.append("ReplicaUC[") @@ -159,24 +158,45 @@ public void appendStringTo(StringBuilder sb) { * Create block and set its state to * {@link BlockUCState#UNDER_CONSTRUCTION}. */ - public BlockInfoUnderConstruction(Block blk, short replication) { + public BlockInfoContiguousUnderConstruction(Block blk, short replication) { this(blk, replication, BlockUCState.UNDER_CONSTRUCTION, null); } /** * Create a block that is currently being constructed. */ - public BlockInfoUnderConstruction(Block blk, short replication, + public BlockInfoContiguousUnderConstruction(Block blk, short replication, BlockUCState state, DatanodeStorageInfo[] targets) { super(blk, replication); - Preconditions.checkState(getBlockUCState() != BlockUCState.COMPLETE, - "BlockInfoUnderConstruction cannot be in COMPLETE state"); + assert getBlockUCState() != BlockUCState.COMPLETE : + "BlockInfoUnderConstruction cannot be in COMPLETE state"; this.blockUCState = state; setExpectedLocations(targets); } - /** Set expected locations. */ - public abstract void setExpectedLocations(DatanodeStorageInfo[] targets); + /** + * Convert an under construction block to a complete block. + * + * @return BlockInfo - a complete block. + * @throws IOException if the state of the block + * (the generation stamp and the length) has not been committed by + * the client or it does not have at least a minimal number of replicas + * reported from data-nodes. + */ + BlockInfo convertToCompleteBlock() throws IOException { + assert getBlockUCState() != BlockUCState.COMPLETE : + "Trying to convert a COMPLETE block"; + return new BlockInfoContiguous(this); + } + + /** Set expected locations */ + public void setExpectedLocations(DatanodeStorageInfo[] targets) { + int numLocations = targets == null ? 0 : targets.length; + this.replicas = new ArrayList(numLocations); + for(int i = 0; i < numLocations; i++) + replicas.add( + new ReplicaUnderConstruction(this, targets[i], ReplicaState.RBW)); + } /** * Create array of expected replica locations @@ -185,13 +205,12 @@ public BlockInfoUnderConstruction(Block blk, short replication, public DatanodeStorageInfo[] getExpectedStorageLocations() { int numLocations = replicas == null ? 0 : replicas.size(); DatanodeStorageInfo[] storages = new DatanodeStorageInfo[numLocations]; - for(int i = 0; i < numLocations; i++) { + for(int i = 0; i < numLocations; i++) storages[i] = replicas.get(i).getExpectedStorageLocation(); - } return storages; } - /** Get the number of expected locations. */ + /** Get the number of expected locations */ public int getNumExpectedLocations() { return replicas == null ? 0 : replicas.size(); } @@ -209,15 +228,19 @@ void setBlockUCState(BlockUCState s) { blockUCState = s; } - /** Get block recovery ID. */ + /** Get block recovery ID */ public long getBlockRecoveryId() { return blockRecoveryId; } - /** Get recover block. */ - public abstract Block getTruncateBlock(); + /** Get recover block */ + public Block getTruncateBlock() { + return truncateBlock; + } - public abstract void setTruncateBlock(Block recoveryBlock); + public void setTruncateBlock(Block recoveryBlock) { + this.truncateBlock = recoveryBlock; + } /** * Process the recorded replicas. When about to commit or finish the @@ -227,9 +250,8 @@ public long getBlockRecoveryId() { public void setGenerationStampAndVerifyReplicas(long genStamp) { // Set the generation stamp for the block. setGenerationStamp(genStamp); - if (replicas == null) { + if (replicas == null) return; - } // Remove the replicas with wrong gen stamp. // The replica list is unchanged. @@ -245,14 +267,13 @@ public void setGenerationStampAndVerifyReplicas(long genStamp) { /** * Commit block's length and generation stamp as reported by the client. * Set block state to {@link BlockUCState#COMMITTED}. - * @param block - contains client reported block length and generation + * @param block - contains client reported block length and generation * @throws IOException if block ids are inconsistent. */ void commitBlock(Block block) throws IOException { - if(getBlockId() != block.getBlockId()) { + if(getBlockId() != block.getBlockId()) throw new IOException("Trying to commit inconsistent block: id = " + block.getBlockId() + ", expected id = " + getBlockId()); - } blockUCState = BlockUCState.COMMITTED; this.set(getBlockId(), block.getNumBytes(), block.getGenerationStamp()); // Sort out invalid replicas. @@ -268,17 +289,16 @@ public void initializeBlockRecovery(long recoveryId) { setBlockUCState(BlockUCState.UNDER_RECOVERY); blockRecoveryId = recoveryId; if (replicas.size() == 0) { - NameNode.blockStateChangeLog.warn("BLOCK* " + - "BlockInfoUnderConstruction.initLeaseRecovery: " + - "No blocks found, lease removed."); + NameNode.blockStateChangeLog.warn("BLOCK*" + + " BlockInfoUnderConstruction.initLeaseRecovery:" + + " No blocks found, lease removed."); } boolean allLiveReplicasTriedAsPrimary = true; for (int i = 0; i < replicas.size(); i++) { // Check if all replicas have been tried or not. if (replicas.get(i).isAlive()) { allLiveReplicasTriedAsPrimary = - (allLiveReplicasTriedAsPrimary && - replicas.get(i).getChosenAsPrimary()); + (allLiveReplicasTriedAsPrimary && replicas.get(i).getChosenAsPrimary()); } } if (allLiveReplicasTriedAsPrimary) { @@ -292,8 +312,7 @@ public void initializeBlockRecovery(long recoveryId) { primaryNodeIndex = -1; for(int i = 0; i < replicas.size(); i++) { // Skip alive replicas which have been chosen for recovery. - if (!(replicas.get(i).isAlive() && - !replicas.get(i).getChosenAsPrimary())) { + if (!(replicas.get(i).isAlive() && !replicas.get(i).getChosenAsPrimary())) { continue; } final ReplicaUnderConstruction ruc = replicas.get(i); @@ -306,8 +325,7 @@ public void initializeBlockRecovery(long recoveryId) { } } if (primary != null) { - primary.getExpectedStorageLocation(). - getDatanodeDescriptor().addBlockToBeRecovered(this); + primary.getExpectedStorageLocation().getDatanodeDescriptor().addBlockToBeRecovered(this); primary.setChosenAsPrimary(true); NameNode.blockStateChangeLog.debug( "BLOCK* {} recovery started, primary={}", this, primary); @@ -340,25 +358,6 @@ void addReplicaIfNotPresent(DatanodeStorageInfo storage, replicas.add(new ReplicaUnderConstruction(block, storage, rState)); } - /** - * Convert an under construction block to a complete block. - * - * @return a complete block. - * @throws IOException - * if the state of the block (the generation stamp and the length) - * has not been committed by the client or it does not have at - * least a minimal number of replicas reported from data-nodes. - */ - public abstract BlockInfo convertToCompleteBlock(); - - @Override - BlockInfoUnderConstruction convertCompleteBlockToUC - (HdfsServerConstants.BlockUCState s, DatanodeStorageInfo[] targets) { - BlockManager.LOG.error("convertCompleteBlockToUC should only be applied " + - "on complete blocks."); - return null; - } - @Override // BlockInfo // BlockInfoUnderConstruction participates in maps the same way as BlockInfo public int hashCode() { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstructionContiguous.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstructionContiguous.java deleted file mode 100644 index c66675a29a4bf..0000000000000 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstructionContiguous.java +++ /dev/null @@ -1,110 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdfs.server.blockmanagement; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.hdfs.protocol.Block; -import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; - -import java.util.ArrayList; - -/** - * Subclass of {@link BlockInfoUnderConstruction}, representing a block under - * the contiguous (instead of striped) layout. - */ -public class BlockInfoUnderConstructionContiguous extends - BlockInfoUnderConstruction { - /** - * Create block and set its state to - * {@link HdfsServerConstants.BlockUCState#UNDER_CONSTRUCTION}. - */ - public BlockInfoUnderConstructionContiguous(Block blk, short replication) { - this(blk, replication, HdfsServerConstants.BlockUCState.UNDER_CONSTRUCTION, - null); - } - - /** - * Create a block that is currently being constructed. - */ - public BlockInfoUnderConstructionContiguous(Block blk, short replication, - HdfsServerConstants.BlockUCState state, DatanodeStorageInfo[] targets) { - super(blk, replication); - Preconditions.checkState(getBlockUCState() != - HdfsServerConstants.BlockUCState.COMPLETE, - "BlockInfoUnderConstructionContiguous cannot be in COMPLETE state"); - this.blockUCState = state; - setExpectedLocations(targets); - } - - /** - * Convert an under construction block to a complete block. - * - * @return BlockInfo - a complete block. - * @throws IOException if the state of the block - * (the generation stamp and the length) has not been committed by - * the client or it does not have at least a minimal number of replicas - * reported from data-nodes. - */ - @Override - public BlockInfoContiguous convertToCompleteBlock() { - Preconditions.checkState(getBlockUCState() != - HdfsServerConstants.BlockUCState.COMPLETE, - "Trying to convert a COMPLETE block"); - return new BlockInfoContiguous(this); - } - - @Override - boolean addStorage(DatanodeStorageInfo storage) { - return ContiguousBlockStorageOp.addStorage(this, storage); - } - - @Override - boolean removeStorage(DatanodeStorageInfo storage) { - return ContiguousBlockStorageOp.removeStorage(this, storage); - } - - @Override - public int numNodes() { - return ContiguousBlockStorageOp.numNodes(this); - } - - @Override - void replaceBlock(BlockInfo newBlock) { - ContiguousBlockStorageOp.replaceBlock(this, newBlock); - } - - @Override - public void setExpectedLocations(DatanodeStorageInfo[] targets) { - int numLocations = targets == null ? 0 : targets.length; - this.replicas = new ArrayList<>(numLocations); - for(int i = 0; i < numLocations; i++) { - replicas.add( - new ReplicaUnderConstruction(this, targets[i], HdfsServerConstants.ReplicaState.RBW)); - } - } - - @Override - public Block getTruncateBlock() { - return truncateBlock; - } - - @Override - public void setTruncateBlock(Block recoveryBlock) { - this.truncateBlock = recoveryBlock; - } -} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index 1597f419ff98e..508da858c78b8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -618,7 +618,7 @@ public boolean checkMinReplication(BlockInfo block) { * of replicas reported from data-nodes. */ private static boolean commitBlock( - final BlockInfoUnderConstruction block, final Block commitBlock) + final BlockInfoContiguousUnderConstruction block, final Block commitBlock) throws IOException { if (block.getBlockUCState() == BlockUCState.COMMITTED) return false; @@ -650,7 +650,7 @@ public boolean commitOrCompleteLastBlock(BlockCollection bc, return false; // already completed (e.g. by syncBlock) final boolean b = commitBlock( - (BlockInfoUnderConstruction) lastBlock, commitBlock); + (BlockInfoContiguousUnderConstruction) lastBlock, commitBlock); if(countNodes(lastBlock).liveReplicas() >= minReplication) completeBlock(bc, bc.numBlocks()-1, false); return b; @@ -670,8 +670,8 @@ private BlockInfo completeBlock(final BlockCollection bc, BlockInfo curBlock = bc.getBlocks()[blkIndex]; if(curBlock.isComplete()) return curBlock; - BlockInfoUnderConstruction ucBlock = - (BlockInfoUnderConstruction) curBlock; + BlockInfoContiguousUnderConstruction ucBlock = + (BlockInfoContiguousUnderConstruction) curBlock; int numNodes = ucBlock.numNodes(); if (!force && numNodes < minReplication) throw new IOException("Cannot complete block: " + @@ -713,7 +713,7 @@ private BlockInfo completeBlock(final BlockCollection bc, * when tailing edit logs as a Standby. */ public BlockInfo forceCompleteBlock(final BlockCollection bc, - final BlockInfoUnderConstruction block) throws IOException { + final BlockInfoContiguousUnderConstruction block) throws IOException { block.commitBlock(block); return completeBlock(bc, block, true); } @@ -744,7 +744,7 @@ public LocatedBlock convertLastBlockToUnderConstruction( DatanodeStorageInfo[] targets = getStorages(oldBlock); - BlockInfoUnderConstruction ucBlock = + BlockInfoContiguousUnderConstruction ucBlock = bc.setLastBlock(oldBlock, targets); blocksMap.replaceBlock(ucBlock); @@ -846,14 +846,14 @@ private LocatedBlock createLocatedBlock(final BlockInfo blk, final long pos, /** @return a LocatedBlock for the given block */ private LocatedBlock createLocatedBlock(final BlockInfo blk, final long pos ) throws IOException { - if (blk instanceof BlockInfoUnderConstruction) { + if (blk instanceof BlockInfoContiguousUnderConstruction) { if (blk.isComplete()) { throw new IOException( "blk instanceof BlockInfoUnderConstruction && blk.isComplete()" + ", blk=" + blk); } - final BlockInfoUnderConstruction uc = - (BlockInfoUnderConstruction) blk; + final BlockInfoContiguousUnderConstruction uc = + (BlockInfoContiguousUnderConstruction) blk; final DatanodeStorageInfo[] storages = uc.getExpectedStorageLocations(); final ExtendedBlock eb = new ExtendedBlock(namesystem.getBlockPoolId(), blk); return newLocatedBlock(eb, storages, pos, false); @@ -1761,11 +1761,11 @@ public long requestBlockReportLeaseId(DatanodeRegistration nodeReg) { * reported by the datanode in the block report. */ static class StatefulBlockInfo { - final BlockInfoUnderConstruction storedBlock; + final BlockInfoContiguousUnderConstruction storedBlock; final Block reportedBlock; final ReplicaState reportedState; - StatefulBlockInfo(BlockInfoUnderConstruction storedBlock, + StatefulBlockInfo(BlockInfoContiguousUnderConstruction storedBlock, Block reportedBlock, ReplicaState reportedState) { this.storedBlock = storedBlock; this.reportedBlock = reportedBlock; @@ -1806,7 +1806,7 @@ private static class BlockToMarkCorrupt { BlockToMarkCorrupt(BlockInfo stored, long gs, String reason, Reason reasonCode) { - this(new BlockInfoContiguous(stored), stored, + this(new BlockInfoContiguous((BlockInfoContiguous)stored), stored, reason, reasonCode); //the corrupted block in datanode has a different generation stamp corrupted.setGenerationStamp(gs); @@ -2165,13 +2165,13 @@ private void processFirstBlockReport( // If block is under construction, add this replica to its list if (isBlockUnderConstruction(storedBlock, ucState, reportedState)) { - ((BlockInfoUnderConstruction)storedBlock) + ((BlockInfoContiguousUnderConstruction)storedBlock) .addReplicaIfNotPresent(storageInfo, iblk, reportedState); // OpenFileBlocks only inside snapshots also will be added to safemode // threshold. So we need to update such blocks to safemode // refer HDFS-5283 - BlockInfoUnderConstruction blockUC = - (BlockInfoUnderConstruction) storedBlock; + BlockInfoContiguousUnderConstruction blockUC = + (BlockInfoContiguousUnderConstruction) storedBlock; if (namesystem.isInSnapshot(blockUC)) { int numOfReplicas = blockUC.getNumExpectedLocations(); namesystem.incrementSafeBlockCount(numOfReplicas); @@ -2326,7 +2326,7 @@ private BlockInfo processReportedBlock( if (isBlockUnderConstruction(storedBlock, ucState, reportedState)) { toUC.add(new StatefulBlockInfo( - (BlockInfoUnderConstruction) storedBlock, + (BlockInfoContiguousUnderConstruction) storedBlock, new Block(block), reportedState)); return storedBlock; } @@ -2517,7 +2517,7 @@ private boolean isBlockUnderConstruction(BlockInfo storedBlock, void addStoredBlockUnderConstruction(StatefulBlockInfo ucBlock, DatanodeStorageInfo storageInfo) throws IOException { - BlockInfoUnderConstruction block = ucBlock.storedBlock; + BlockInfoContiguousUnderConstruction block = ucBlock.storedBlock; block.addReplicaIfNotPresent( storageInfo, ucBlock.reportedBlock, ucBlock.reportedState); @@ -2578,7 +2578,7 @@ private Block addStoredBlock(final BlockInfo block, assert block != null && namesystem.hasWriteLock(); BlockInfo storedBlock; DatanodeDescriptor node = storageInfo.getDatanodeDescriptor(); - if (block instanceof BlockInfoUnderConstruction) { + if (block instanceof BlockInfoContiguousUnderConstruction) { //refresh our copy in case the block got completed in another thread storedBlock = blocksMap.getStoredBlock(block); } else { @@ -3533,8 +3533,8 @@ public boolean checkBlocksProperlyReplicated( String src, BlockInfo[] blocks) { for (BlockInfo b: blocks) { if (!b.isComplete()) { - final BlockInfoUnderConstruction uc = - (BlockInfoUnderConstruction)b; + final BlockInfoContiguousUnderConstruction uc = + (BlockInfoContiguousUnderConstruction)b; final int numNodes = b.numNodes(); LOG.info("BLOCK* " + b + " is not COMPLETE (ucState = " + uc.getBlockUCState() + ", replication# = " + numNodes diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ContiguousBlockStorageOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ContiguousBlockStorageOp.java deleted file mode 100644 index 092f65ec3cbd8..0000000000000 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ContiguousBlockStorageOp.java +++ /dev/null @@ -1,106 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdfs.server.blockmanagement; - -import com.google.common.base.Preconditions; - -/** - * Utility class with logic on managing storage locations shared between - * complete and under-construction blocks under the contiguous format -- - * {@link BlockInfoContiguous} and - * {@link BlockInfoUnderConstructionContiguous}. - */ -class ContiguousBlockStorageOp { - /** - * Ensure that there is enough space to include num more triplets. - * @return first free triplet index. - */ - private static int ensureCapacity(BlockInfo b, int num) { - Preconditions.checkArgument(b.triplets != null, - "BlockInfo is not initialized"); - int last = b.numNodes(); - if (b.triplets.length >= (last+num)*3) { - return last; - } - /* Not enough space left. Create a new array. Should normally - * happen only when replication is manually increased by the user. */ - Object[] old = b.triplets; - b.triplets = new Object[(last+num)*3]; - System.arraycopy(old, 0, b.triplets, 0, last * 3); - return last; - } - - static boolean addStorage(BlockInfo b, DatanodeStorageInfo storage) { - // find the last null node - int lastNode = ensureCapacity(b, 1); - b.setStorageInfo(lastNode, storage); - b.setNext(lastNode, null); - b.setPrevious(lastNode, null); - return true; - } - - static boolean removeStorage(BlockInfo b, - DatanodeStorageInfo storage) { - int dnIndex = b.findStorageInfo(storage); - if (dnIndex < 0) { // the node is not found - return false; - } - Preconditions.checkArgument(b.getPrevious(dnIndex) == null && - b.getNext(dnIndex) == null, - "Block is still in the list and must be removed first."); - // find the last not null node - int lastNode = b.numNodes()-1; - // replace current node triplet by the lastNode one - b.setStorageInfo(dnIndex, b.getStorageInfo(lastNode)); - b.setNext(dnIndex, b.getNext(lastNode)); - b.setPrevious(dnIndex, b.getPrevious(lastNode)); - // set the last triplet to null - b.setStorageInfo(lastNode, null); - b.setNext(lastNode, null); - b.setPrevious(lastNode, null); - return true; - } - - static int numNodes(BlockInfo b) { - Preconditions.checkArgument(b.triplets != null, - "BlockInfo is not initialized"); - Preconditions.checkArgument(b.triplets.length % 3 == 0, - "Malformed BlockInfo"); - - for (int idx = b.getCapacity()-1; idx >= 0; idx--) { - if (b.getDatanode(idx) != null) { - return idx + 1; - } - } - return 0; - } - - static void replaceBlock(BlockInfo b, BlockInfo newBlock) { - for (int i = b.numNodes() - 1; i >= 0; i--) { - final DatanodeStorageInfo storage = b.getStorageInfo(i); - final boolean removed = storage.removeBlock(b); - Preconditions.checkState(removed, "currentBlock not found."); - - final DatanodeStorageInfo.AddBlockResult result = storage.addBlock( - newBlock); - Preconditions.checkState( - result == DatanodeStorageInfo.AddBlockResult.ADDED, - "newBlock already exists."); - } - } -} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java index 99def6b1f5afd..7e12a99a55b64 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java @@ -221,8 +221,8 @@ public CachedBlocksList getPendingUncached() { private final BlockQueue replicateBlocks = new BlockQueue<>(); /** A queue of blocks to be recovered by this datanode */ - private final BlockQueue recoverBlocks = - new BlockQueue<>(); + private final BlockQueue recoverBlocks = + new BlockQueue(); /** A set of blocks to be invalidated by this datanode */ private final LightWeightHashSet invalidateBlocks = new LightWeightHashSet<>(); @@ -600,7 +600,7 @@ void addBlockToBeReplicated(Block block, DatanodeStorageInfo[] targets) { /** * Store block recovery work. */ - void addBlockToBeRecovered(BlockInfoUnderConstruction block) { + void addBlockToBeRecovered(BlockInfoContiguousUnderConstruction block) { if(recoverBlocks.contains(block)) { // this prevents adding the same block twice to the recovery queue BlockManager.LOG.info(block + " is already in the recovery queue"); @@ -642,12 +642,11 @@ public List getReplicationCommand(int maxTransfers) { return replicateBlocks.poll(maxTransfers); } - public BlockInfoUnderConstruction[] getLeaseRecoveryCommand( - int maxTransfers) { - List blocks = recoverBlocks.poll(maxTransfers); + public BlockInfoContiguousUnderConstruction[] getLeaseRecoveryCommand(int maxTransfers) { + List blocks = recoverBlocks.poll(maxTransfers); if(blocks == null) return null; - return blocks.toArray(new BlockInfoUnderConstruction[blocks.size()]); + return blocks.toArray(new BlockInfoContiguousUnderConstruction[blocks.size()]); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java index 4266004f49c0e..3397bbb7916d4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java @@ -1380,12 +1380,12 @@ public DatanodeCommand[] handleHeartbeat(DatanodeRegistration nodeReg, } //check lease recovery - BlockInfoUnderConstruction[] blocks = nodeinfo + BlockInfoContiguousUnderConstruction[] blocks = nodeinfo .getLeaseRecoveryCommand(Integer.MAX_VALUE); if (blocks != null) { BlockRecoveryCommand brCommand = new BlockRecoveryCommand( blocks.length); - for (BlockInfoUnderConstruction b : blocks) { + for (BlockInfoContiguousUnderConstruction b : blocks) { final DatanodeStorageInfo[] storages = b.getExpectedStorageLocations(); // Skip stale nodes during recovery - not heart beated for some time (30s by default). final List recoveryLocations = diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java index e24bb2f0b81eb..474c2574d8830 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java @@ -28,8 +28,7 @@ import org.apache.hadoop.hdfs.protocol.QuotaExceededException; import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; -import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; -import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstructionContiguous; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem.RecoverLeaseOp; @@ -96,7 +95,7 @@ static TruncateResult truncate(final FSNamesystem fsn, final String srcArg, final BlockInfo last = file.getLastBlock(); if (last != null && last.getBlockUCState() == BlockUCState.UNDER_RECOVERY) { - final Block truncatedBlock = ((BlockInfoUnderConstruction) last) + final Block truncatedBlock = ((BlockInfoContiguousUnderConstruction) last) .getTruncateBlock(); if (truncatedBlock != null) { final long truncateLength = file.computeFileSize(false, false) @@ -223,12 +222,12 @@ static Block prepareFileForTruncate(FSNamesystem fsn, INodesInPath iip, oldBlock))); } - BlockInfoUnderConstruction truncatedBlockUC; + BlockInfoContiguousUnderConstruction truncatedBlockUC; BlockManager blockManager = fsn.getFSDirectory().getBlockManager(); if (shouldCopyOnTruncate) { // Add new truncateBlock into blocksMap and // use oldBlock as a source for copy-on-truncate recovery - truncatedBlockUC = new BlockInfoUnderConstructionContiguous(newBlock, + truncatedBlockUC = new BlockInfoContiguousUnderConstruction(newBlock, file.getPreferredBlockReplication()); truncatedBlockUC.setNumBytes(oldBlock.getNumBytes() - lastBlockDelta); truncatedBlockUC.setTruncateBlock(oldBlock); @@ -245,7 +244,7 @@ static Block prepareFileForTruncate(FSNamesystem fsn, INodesInPath iip, blockManager.convertLastBlockToUnderConstruction(file, lastBlockDelta); oldBlock = file.getLastBlock(); assert !oldBlock.isComplete() : "oldBlock should be under construction"; - truncatedBlockUC = (BlockInfoUnderConstruction) oldBlock; + truncatedBlockUC = (BlockInfoContiguousUnderConstruction) oldBlock; truncatedBlockUC.setTruncateBlock(new Block(oldBlock)); truncatedBlockUC.getTruncateBlock().setNumBytes( oldBlock.getNumBytes() - lastBlockDelta); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java index 008a945b83602..732e9cf4cae6b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java @@ -43,8 +43,7 @@ import org.apache.hadoop.hdfs.protocol.QuotaExceededException; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; -import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; -import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstructionContiguous; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; @@ -74,7 +73,7 @@ static boolean unprotectedRemoveBlock( Block block) throws IOException { // modify file-> block and blocksMap // fileNode should be under construction - BlockInfoUnderConstruction uc = fileNode.removeLastBlock(block); + BlockInfoContiguousUnderConstruction uc = fileNode.removeLastBlock(block); if (uc == null) { return false; } @@ -237,7 +236,7 @@ static LocatedBlock storeAllocatedBlock(FSNamesystem fsn, String src, } else { // add new chosen targets to already allocated block and return BlockInfo lastBlockInFile = pendingFile.getLastBlock(); - ((BlockInfoUnderConstruction) lastBlockInFile) + ((BlockInfoContiguousUnderConstruction) lastBlockInFile) .setExpectedLocations(targets); offset = pendingFile.computeFileSize(); return makeLocatedBlock(fsn, lastBlockInFile, targets, offset); @@ -521,8 +520,8 @@ private static BlockInfo addBlock( fileINode.getPreferredBlockReplication(), true); // associate new last block for the file - BlockInfoUnderConstruction blockInfo = - new BlockInfoUnderConstructionContiguous( + BlockInfoContiguousUnderConstruction blockInfo = + new BlockInfoContiguousUnderConstruction( block, fileINode.getFileReplication(), HdfsServerConstants.BlockUCState.UNDER_CONSTRUCTION, @@ -663,8 +662,8 @@ private static FileState analyzeFileState( "allocation of a new block in " + src + ". Returning previously" + " allocated block " + lastBlockInFile); long offset = file.computeFileSize(); - BlockInfoUnderConstruction lastBlockUC = - (BlockInfoUnderConstruction) lastBlockInFile; + BlockInfoContiguousUnderConstruction lastBlockUC = + (BlockInfoContiguousUnderConstruction) lastBlockInFile; onRetryBlock[0] = makeLocatedBlock(fsn, lastBlockInFile, lastBlockUC.getExpectedStorageLocations(), offset); return new FileState(file, src, iip); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java index 357684a35f11e..3dd076d9b6f0a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java @@ -45,8 +45,7 @@ import org.apache.hadoop.hdfs.protocol.LayoutVersion; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous; -import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; -import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstructionContiguous; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.RollingUpgradeStartupOption; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; @@ -961,16 +960,16 @@ private void addNewBlock(FSDirectory fsDir, AddBlockOp op, INodeFile file) } oldLastBlock.setNumBytes(pBlock.getNumBytes()); - if (oldLastBlock instanceof BlockInfoUnderConstruction) { + if (oldLastBlock instanceof BlockInfoContiguousUnderConstruction) { fsNamesys.getBlockManager().forceCompleteBlock(file, - (BlockInfoUnderConstruction) oldLastBlock); + (BlockInfoContiguousUnderConstruction) oldLastBlock); fsNamesys.getBlockManager().processQueuedMessagesForBlock(pBlock); } } else { // the penultimate block is null Preconditions.checkState(oldBlocks == null || oldBlocks.length == 0); } // add the new block - BlockInfo newBI = new BlockInfoUnderConstructionContiguous( + BlockInfo newBI = new BlockInfoContiguousUnderConstruction( newBlock, file.getPreferredBlockReplication()); fsNamesys.getBlockManager().addBlockCollection(newBI, file); file.addBlock(newBI); @@ -1011,11 +1010,11 @@ private void updateBlocks(FSDirectory fsDir, BlockListUpdatingOp op, oldBlock.getGenerationStamp() != newBlock.getGenerationStamp(); oldBlock.setGenerationStamp(newBlock.getGenerationStamp()); - if (oldBlock instanceof BlockInfoUnderConstruction && + if (oldBlock instanceof BlockInfoContiguousUnderConstruction && (!isLastBlock || op.shouldCompleteLastBlock())) { changeMade = true; fsNamesys.getBlockManager().forceCompleteBlock(file, - (BlockInfoUnderConstruction) oldBlock); + (BlockInfoContiguousUnderConstruction) oldBlock); } if (changeMade) { // The state or gen-stamp of the block has changed. So, we may be @@ -1050,7 +1049,7 @@ private void updateBlocks(FSDirectory fsDir, BlockListUpdatingOp op, // TODO: shouldn't this only be true for the last block? // what about an old-version fsync() where fsync isn't called // until several blocks in? - newBI = new BlockInfoUnderConstructionContiguous( + newBI = new BlockInfoContiguousUnderConstruction( newBlock, file.getPreferredBlockReplication()); } else { // OP_CLOSE should add finalized blocks. This code path diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java index 2305e31d9097b..30517d06de77f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java @@ -54,7 +54,7 @@ import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous; -import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstructionContiguous; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; @@ -777,9 +777,8 @@ INode loadINode(final byte[] localName, boolean isSnapshotINode, // convert the last block to BlockUC if (blocks.length > 0) { BlockInfo lastBlk = blocks[blocks.length - 1]; - blocks[blocks.length - 1] = - new BlockInfoUnderConstructionContiguous( - lastBlk, replication); + blocks[blocks.length - 1] = new BlockInfoContiguousUnderConstruction( + lastBlk, replication); } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java index d90751cf17626..e8378e58f770d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java @@ -44,7 +44,7 @@ import org.apache.hadoop.hdfs.protocolPB.PBHelper; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous; -import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstructionContiguous; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf.LoaderContext; import org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf.SaverContext; @@ -364,8 +364,8 @@ private INodeFile loadINodeFile(INodeSection.INode n) { if (blocks.length > 0) { BlockInfo lastBlk = file.getLastBlock(); // replace the last block of file - file.setBlock(file.numBlocks() - 1, - new BlockInfoUnderConstructionContiguous(lastBlk, replication)); + file.setBlock(file.numBlocks() - 1, new BlockInfoContiguousUnderConstruction( + lastBlk, replication)); } } return file; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java index 2dc6252a0dbd0..f71cf0b765c0a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java @@ -35,7 +35,7 @@ import org.apache.hadoop.hdfs.protocol.LayoutVersion; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous; -import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstructionContiguous; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat; import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat.ReferenceMap; @@ -137,7 +137,7 @@ static INodeFile readINodeUnderConstruction( // last block is UNDER_CONSTRUCTION if(numBlocks > 0) { blk.readFields(in); - blocks[i] = new BlockInfoUnderConstructionContiguous( + blocks[i] = new BlockInfoContiguousUnderConstruction( blk, blockReplication, BlockUCState.UNDER_CONSTRUCTION, null); } PermissionStatus perm = PermissionStatus.read(in); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 3d176b052d8fe..1cde47c30d253 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -199,7 +199,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection; import org.apache.hadoop.hdfs.server.blockmanagement.BlockIdManager; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; -import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager; @@ -3088,8 +3088,7 @@ boolean internalReleaseLease(Lease lease, String src, INodesInPath iip, throw new AlreadyBeingCreatedException(message); case UNDER_CONSTRUCTION: case UNDER_RECOVERY: - final BlockInfoUnderConstruction uc = - (BlockInfoUnderConstruction)lastBlock; + final BlockInfoContiguousUnderConstruction uc = (BlockInfoContiguousUnderConstruction)lastBlock; // determine if last block was intended to be truncated Block recoveryBlock = uc.getTruncateBlock(); boolean truncateRecovery = recoveryBlock != null; @@ -3205,7 +3204,7 @@ BlockInfo getStoredBlock(Block block) { } @Override - public boolean isInSnapshot(BlockInfoUnderConstruction blockUC) { + public boolean isInSnapshot(BlockInfoContiguousUnderConstruction blockUC) { assert hasReadLock(); final BlockCollection bc = blockUC.getBlockCollection(); if (bc == null || !(bc instanceof INodeFile) @@ -3252,7 +3251,7 @@ void commitBlockSynchronization(ExtendedBlock oldBlock, waitForLoadingFSImage(); writeLock(); boolean copyTruncate = false; - BlockInfoUnderConstruction truncatedBlock = null; + BlockInfoContiguousUnderConstruction truncatedBlock = null; try { checkOperation(OperationCategory.WRITE); // If a DN tries to commit to the standby, the recovery will @@ -3309,7 +3308,7 @@ void commitBlockSynchronization(ExtendedBlock oldBlock, return; } - truncatedBlock = (BlockInfoUnderConstruction) iFile + truncatedBlock = (BlockInfoContiguousUnderConstruction) iFile .getLastBlock(); long recoveryId = truncatedBlock.getBlockRecoveryId(); copyTruncate = truncatedBlock.getBlockId() != storedBlock.getBlockId(); @@ -5335,8 +5334,8 @@ private void updatePipelineInternal(String clientName, ExtendedBlock oldBlock, assert hasWriteLock(); // check the vadility of the block and lease holder name final INodeFile pendingFile = checkUCBlock(oldBlock, clientName); - final BlockInfoUnderConstruction blockinfo - = (BlockInfoUnderConstruction)pendingFile.getLastBlock(); + final BlockInfoContiguousUnderConstruction blockinfo + = (BlockInfoContiguousUnderConstruction)pendingFile.getLastBlock(); // check new GS & length: this is not expected if (newBlock.getGenerationStamp() <= blockinfo.getGenerationStamp() || diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileUnderConstructionFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileUnderConstructionFeature.java index 74c5d094991b8..d07ae1f513b62 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileUnderConstructionFeature.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileUnderConstructionFeature.java @@ -21,7 +21,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; -import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction; import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo; /** @@ -61,7 +61,7 @@ void updateLengthOfLastBlock(INodeFile f, long lastBlockLength) BlockInfo lastBlock = f.getLastBlock(); assert (lastBlock != null) : "The last block for path " + f.getFullPathName() + " is null when updating its length"; - assert (lastBlock instanceof BlockInfoUnderConstruction) + assert (lastBlock instanceof BlockInfoContiguousUnderConstruction) : "The last block for path " + f.getFullPathName() + " is not a BlockInfoUnderConstruction when updating its length"; lastBlock.setNumBytes(lastBlockLength); @@ -76,9 +76,9 @@ void cleanZeroSizeBlock(final INodeFile f, final BlocksMapUpdateInfo collectedBlocks) { final BlockInfo[] blocks = f.getBlocks(); if (blocks != null && blocks.length > 0 - && blocks[blocks.length - 1] instanceof BlockInfoUnderConstruction) { - BlockInfoUnderConstruction lastUC = - (BlockInfoUnderConstruction) blocks[blocks.length - 1]; + && blocks[blocks.length - 1] instanceof BlockInfoContiguousUnderConstruction) { + BlockInfoContiguousUnderConstruction lastUC = + (BlockInfoContiguousUnderConstruction) blocks[blocks.length - 1]; if (lastUC.getNumBytes() == 0) { // this is a 0-sized block. do not need check its UC state here collectedBlocks.addDeleteBlock(lastUC); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java index ec0c6c2100634..3f242e0edb09f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java @@ -37,7 +37,7 @@ import org.apache.hadoop.hdfs.protocol.QuotaExceededException; import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; -import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction; import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; @@ -231,7 +231,7 @@ public void setBlock(int index, BlockInfo blk) { } @Override // BlockCollection, the file should be under construction - public BlockInfoUnderConstruction setLastBlock( + public BlockInfoContiguousUnderConstruction setLastBlock( BlockInfo lastBlock, DatanodeStorageInfo[] locations) throws IOException { Preconditions.checkState(isUnderConstruction(), @@ -240,7 +240,7 @@ public BlockInfoUnderConstruction setLastBlock( if (numBlocks() == 0) { throw new IOException("Failed to set last block: File is empty."); } - BlockInfoUnderConstruction ucBlock = + BlockInfoContiguousUnderConstruction ucBlock = lastBlock.convertToBlockUnderConstruction( BlockUCState.UNDER_CONSTRUCTION, locations); setBlock(numBlocks() - 1, ucBlock); @@ -251,7 +251,7 @@ public BlockInfoUnderConstruction setLastBlock( * Remove a block from the block list. This block should be * the last one on the list. */ - BlockInfoUnderConstruction removeLastBlock(Block oldblock) { + BlockInfoContiguousUnderConstruction removeLastBlock(Block oldblock) { Preconditions.checkState(isUnderConstruction(), "file is no longer under construction"); if (blocks == null || blocks.length == 0) { @@ -262,8 +262,8 @@ BlockInfoUnderConstruction removeLastBlock(Block oldblock) { return null; } - BlockInfoUnderConstruction uc = - (BlockInfoUnderConstruction)blocks[size_1]; + BlockInfoContiguousUnderConstruction uc = + (BlockInfoContiguousUnderConstruction)blocks[size_1]; //copy to a new list BlockInfo[] newlist = new BlockInfo[size_1]; System.arraycopy(blocks, 0, newlist, 0, size_1); @@ -696,7 +696,7 @@ public final long computeFileSize(boolean includesLastUcBlock, final int last = blocks.length - 1; //check if the last block is BlockInfoUnderConstruction long size = blocks[last].getNumBytes(); - if (blocks[last] instanceof BlockInfoUnderConstruction) { + if (blocks[last] instanceof BlockInfoContiguousUnderConstruction) { if (!includesLastUcBlock) { size = 0; } else if (usePreferredBlockSize4LastUcBlock) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Namesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Namesystem.java index 1732865a712a2..a5053bc6e13dc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Namesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Namesystem.java @@ -19,7 +19,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hdfs.protocol.Block; -import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction; import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory; import org.apache.hadoop.hdfs.util.RwLock; import org.apache.hadoop.ipc.StandbyException; @@ -45,7 +45,7 @@ public interface Namesystem extends RwLock, SafeMode { void checkOperation(OperationCategory read) throws StandbyException; - boolean isInSnapshot(BlockInfoUnderConstruction blockUC); + boolean isInSnapshot(BlockInfoContiguousUnderConstruction blockUC); CacheManager getCacheManager(); } \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java index d081a6b5dda41..6b8388e0fe1f1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java @@ -22,7 +22,7 @@ import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; -import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.namenode.INode; import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo; @@ -133,7 +133,7 @@ void combineAndCollectSnapshotBlocks( Block dontRemoveBlock = null; if (lastBlock != null && lastBlock.getBlockUCState().equals( HdfsServerConstants.BlockUCState.UNDER_RECOVERY)) { - dontRemoveBlock = ((BlockInfoUnderConstruction) lastBlock) + dontRemoveBlock = ((BlockInfoContiguousUnderConstruction) lastBlock) .getTruncateBlock(); } // Collect the remaining blocks of the file, ignoring truncate block diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java index 5e1c597be4ad5..a742757dcef8f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java @@ -110,7 +110,7 @@ import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; -import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; @@ -1637,9 +1637,9 @@ public static DatanodeDescriptor getExpectedPrimaryNode(NameNode nn, BlockInfo storedBlock = bm0.getStoredBlock(blk.getLocalBlock()); assertTrue("Block " + blk + " should be under construction, " + "got: " + storedBlock, - storedBlock instanceof BlockInfoUnderConstruction); - BlockInfoUnderConstruction ucBlock = - (BlockInfoUnderConstruction)storedBlock; + storedBlock instanceof BlockInfoContiguousUnderConstruction); + BlockInfoContiguousUnderConstruction ucBlock = + (BlockInfoContiguousUnderConstruction)storedBlock; // We expect that the replica with the most recent heart beat will be // the one to be in charge of the synchronization / recovery protocol. final DatanodeStorageInfo[] storages = ucBlock.getExpectedStorageLocations(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoUnderConstruction.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoUnderConstruction.java index 630cd1c756efd..a7ba29399dcad 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoUnderConstruction.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoUnderConstruction.java @@ -23,6 +23,7 @@ import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.server.common.GenerationStamp; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; +import org.apache.hadoop.util.Time; import org.junit.Test; /** @@ -39,8 +40,7 @@ public void testInitializeBlockRecovery() throws Exception { DatanodeDescriptor dd3 = s3.getDatanodeDescriptor(); dd1.isAlive = dd2.isAlive = dd3.isAlive = true; - BlockInfoUnderConstruction blockInfo = - new BlockInfoUnderConstructionContiguous( + BlockInfoContiguousUnderConstruction blockInfo = new BlockInfoContiguousUnderConstruction( new Block(0, 0, GenerationStamp.LAST_RESERVED_STAMP), (short) 3, BlockUCState.UNDER_CONSTRUCTION, @@ -51,7 +51,7 @@ public void testInitializeBlockRecovery() throws Exception { DFSTestUtil.resetLastUpdatesWithOffset(dd2, -1 * 1000); DFSTestUtil.resetLastUpdatesWithOffset(dd3, -2 * 1000); blockInfo.initializeBlockRecovery(1); - BlockInfoUnderConstruction[] blockInfoRecovery = dd2.getLeaseRecoveryCommand(1); + BlockInfoContiguousUnderConstruction[] blockInfoRecovery = dd2.getLeaseRecoveryCommand(1); assertEquals(blockInfoRecovery[0], blockInfo); // Recovery attempt #2. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java index 396dff302a998..2d3d90a1cda18 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java @@ -726,7 +726,7 @@ public void testSafeModeIBRBeforeFirstFullBR() throws Exception { // verify the storage info is correct assertTrue(bm.getStoredBlock(new Block(receivedBlockId)).findStorageInfo (ds) >= 0); - assertTrue(((BlockInfoUnderConstruction) bm. + assertTrue(((BlockInfoContiguousUnderConstruction) bm. getStoredBlock(new Block(receivingBlockId))).getNumExpectedLocations() > 0); assertTrue(bm.getStoredBlock(new Block(receivingReceivedBlockId)) .findStorageInfo(ds) >= 0); @@ -747,8 +747,8 @@ private BlockInfo addBlockToBM(long blkId) { private BlockInfo addUcBlockToBM(long blkId) { Block block = new Block(blkId); - BlockInfoUnderConstruction blockInfo = - new BlockInfoUnderConstructionContiguous(block, (short) 3); + BlockInfoContiguousUnderConstruction blockInfo = + new BlockInfoContiguousUnderConstruction(block, (short) 3); BlockCollection bc = Mockito.mock(BlockCollection.class); Mockito.doReturn((short) 3).when(bc).getPreferredBlockReplication(); bm.blocksMap.addBlockCollection(blockInfo, bc); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHeartbeatHandling.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHeartbeatHandling.java index e48e9e84c6333..6fc30ba6d6314 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHeartbeatHandling.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHeartbeatHandling.java @@ -39,6 +39,7 @@ import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; +import org.apache.hadoop.util.Time; import org.junit.Test; /** @@ -172,8 +173,7 @@ public void testHeartbeatBlockRecovery() throws Exception { dd1.getStorageInfos()[0], dd2.getStorageInfos()[0], dd3.getStorageInfos()[0]}; - BlockInfoUnderConstruction blockInfo = - new BlockInfoUnderConstructionContiguous( + BlockInfoContiguousUnderConstruction blockInfo = new BlockInfoContiguousUnderConstruction( new Block(0, 0, GenerationStamp.LAST_RESERVED_STAMP), (short) 3, BlockUCState.UNDER_RECOVERY, storages); dd1.addBlockToBeRecovered(blockInfo); @@ -195,7 +195,7 @@ public void testHeartbeatBlockRecovery() throws Exception { // More than the default stale interval of 30 seconds. DFSTestUtil.resetLastUpdatesWithOffset(dd2, -40 * 1000); DFSTestUtil.resetLastUpdatesWithOffset(dd3, 0); - blockInfo = new BlockInfoUnderConstructionContiguous( + blockInfo = new BlockInfoContiguousUnderConstruction( new Block(0, 0, GenerationStamp.LAST_RESERVED_STAMP), (short) 3, BlockUCState.UNDER_RECOVERY, storages); dd1.addBlockToBeRecovered(blockInfo); @@ -216,7 +216,7 @@ public void testHeartbeatBlockRecovery() throws Exception { // More than the default stale interval of 30 seconds. DFSTestUtil.resetLastUpdatesWithOffset(dd2, - 40 * 1000); DFSTestUtil.resetLastUpdatesWithOffset(dd3, - 80 * 1000); - blockInfo = new BlockInfoUnderConstructionContiguous( + blockInfo = new BlockInfoContiguousUnderConstruction( new Block(0, 0, GenerationStamp.LAST_RESERVED_STAMP), (short) 3, BlockUCState.UNDER_RECOVERY, storages); dd1.addBlockToBeRecovered(blockInfo); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java index 28129572370af..6553185e0ab95 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java @@ -1182,8 +1182,7 @@ public void testAddStoredBlockDoesNotCauseSkippedReplication() // block under construction, the BlockManager will realize the expected // replication has been achieved and remove it from the under-replicated // queue. - BlockInfoUnderConstruction info = - new BlockInfoUnderConstructionContiguous(block1, (short) 1); + BlockInfoContiguousUnderConstruction info = new BlockInfoContiguousUnderConstruction(block1, (short) 1); BlockCollection bc = mock(BlockCollection.class); when(bc.getPreferredBlockReplication()).thenReturn((short)1); bm.addBlockCollection(info, bc); @@ -1239,7 +1238,7 @@ public void testAddStoredBlockDoesNotCauseSkippedReplication() DatanodeStorageInfo[] storageAry = {new DatanodeStorageInfo( dataNodes[0], new DatanodeStorage("s1"))}; - final BlockInfoUnderConstruction ucBlock = + final BlockInfoContiguousUnderConstruction ucBlock = info.convertToBlockUnderConstruction(BlockUCState.UNDER_CONSTRUCTION, storageAry); DatanodeStorageInfo storage = mock(DatanodeStorageInfo.class); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java index 872ff9c490f6d..f372bec3ba62e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java @@ -36,7 +36,7 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; -import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.junit.AfterClass; @@ -170,7 +170,7 @@ public void testGetBlockLocations() throws IOException { final List blocks = lb.getLocatedBlocks(); assertEquals(i, blocks.size()); final Block b = blocks.get(blocks.size() - 1).getBlock().getLocalBlock(); - assertTrue(b instanceof BlockInfoUnderConstruction); + assertTrue(b instanceof BlockInfoContiguousUnderConstruction); if (++i < NUM_BLOCKS) { // write one more block diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java index 9de426e450538..c218b7c2cc62d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java @@ -24,8 +24,7 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous; -import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; -import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstructionContiguous; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.junit.Test; @@ -69,10 +68,8 @@ private FSNamesystem makeNameSystemSpy(Block block, INodeFile file) namesystem.dir.getINodeMap().put(file); FSNamesystem namesystemSpy = spy(namesystem); - BlockInfoUnderConstruction blockInfo = - new BlockInfoUnderConstructionContiguous( - block, (short) 1, HdfsServerConstants.BlockUCState.UNDER_CONSTRUCTION, - targets); + BlockInfoContiguousUnderConstruction blockInfo = new BlockInfoContiguousUnderConstruction( + block, (short) 1, HdfsServerConstants.BlockUCState.UNDER_CONSTRUCTION, targets); blockInfo.setBlockCollection(file); blockInfo.setGenerationStamp(genStamp); blockInfo.initializeBlockRecovery(genStamp); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java index 8e54edc81395c..767f4de543bb0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java @@ -54,7 +54,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; -import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.security.UserGroupInformation; @@ -1016,7 +1016,7 @@ public void testTruncateRecovery() throws IOException { is(fsn.getBlockIdManager().getGenerationStampV2())); assertThat(file.getLastBlock().getBlockUCState(), is(HdfsServerConstants.BlockUCState.UNDER_RECOVERY)); - long blockRecoveryId = ((BlockInfoUnderConstruction) file.getLastBlock()) + long blockRecoveryId = ((BlockInfoContiguousUnderConstruction) file.getLastBlock()) .getBlockRecoveryId(); assertThat(blockRecoveryId, is(initialGenStamp + 1)); fsn.getEditLog().logTruncate( @@ -1049,7 +1049,7 @@ public void testTruncateRecovery() throws IOException { is(fsn.getBlockIdManager().getGenerationStampV2())); assertThat(file.getLastBlock().getBlockUCState(), is(HdfsServerConstants.BlockUCState.UNDER_RECOVERY)); - long blockRecoveryId = ((BlockInfoUnderConstruction) file.getLastBlock()) + long blockRecoveryId = ((BlockInfoContiguousUnderConstruction) file.getLastBlock()) .getBlockRecoveryId(); assertThat(blockRecoveryId, is(initialGenStamp + 1)); fsn.getEditLog().logTruncate( diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java index 14d9a1ee94277..d202fb788f8f0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java @@ -72,7 +72,7 @@ import org.apache.hadoop.hdfs.protocol.LastBlockWithStatus; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; -import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.INodeFile; import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper; @@ -752,8 +752,8 @@ void invoke() throws Exception { boolean checkNamenodeBeforeReturn() throws Exception { INodeFile fileNode = cluster.getNamesystem(0).getFSDirectory() .getINode4Write(file).asFile(); - BlockInfoUnderConstruction blkUC = - (BlockInfoUnderConstruction) (fileNode.getBlocks())[1]; + BlockInfoContiguousUnderConstruction blkUC = + (BlockInfoContiguousUnderConstruction) (fileNode.getBlocks())[1]; int datanodeNum = blkUC.getExpectedStorageLocations().length; for (int i = 0; i < CHECKTIMES && datanodeNum != 2; i++) { Thread.sleep(1000); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotTestHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotTestHelper.java index 824f45baa4165..a1abd0892e824 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotTestHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotTestHelper.java @@ -44,7 +44,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; -import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceStorage; import org.apache.hadoop.hdfs.server.datanode.BlockScanner; @@ -177,7 +177,7 @@ public static void checkSnapshotCreation(DistributedFileSystem hdfs, * Specific information for different types of INode: * {@link INodeDirectory}:childrenSize * {@link INodeFile}: fileSize, block list. Check {@link BlockInfo#toString()} - * and {@link BlockInfoUnderConstruction#toString()} for detailed information. + * and {@link BlockInfoContiguousUnderConstruction#toString()} for detailed information. * {@link FileWithSnapshot}: next link * * @see INode#dumpTreeRecursively() From 5df1a2591f2faffcc718f7e103b2e37d6592e96a Mon Sep 17 00:00:00 2001 From: Carlo Curino Date: Sun, 2 Aug 2015 01:51:38 -0700 Subject: [PATCH 076/130] YARN-3974. Refactor the reservation system test cases to use parameterized base test. (subru via curino) --- hadoop-yarn-project/CHANGES.txt | 3 + .../resourcemanager/reservation/Plan.java | 3 +- .../resourcemanager/reservation/PlanView.java | 11 +- .../ReservationSchedulerConfiguration.java | 14 +- .../reservation/ReservationSystem.java | 15 +- .../ReservationSystemTestUtil.java | 177 ++++++--------- .../TestCapacityReservationSystem.java | 94 -------- .../TestCapacitySchedulerPlanFollower.java | 11 +- .../TestFairReservationSystem.java | 127 ----------- .../TestFairSchedulerPlanFollower.java | 71 +++--- .../reservation/TestReservationSystem.java | 213 ++++++++++++++++++ 11 files changed, 338 insertions(+), 401 deletions(-) delete mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestCapacityReservationSystem.java delete mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestFairReservationSystem.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestReservationSystem.java diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index 1840b1bdec3e2..c4b19866053fb 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -377,6 +377,9 @@ Release 2.8.0 - UNRELEASED YARN-4019. Add JvmPauseMonitor to ResourceManager and NodeManager. (Robert Kanter via junping_du) + YARN-3974. Refactor the reservation system test cases to use parameterized + base test. (subru via curino) + OPTIMIZATIONS YARN-3339. TestDockerContainerExecutor should pull a single image and not diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/Plan.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/Plan.java index f7ffbd0effde1..a708a4af21fb0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/Plan.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/Plan.java @@ -19,7 +19,6 @@ package org.apache.hadoop.yarn.server.resourcemanager.reservation; import org.apache.hadoop.yarn.api.records.ReservationDefinition; -import org.apache.hadoop.yarn.server.resourcemanager.reservation.planning.ReservationAgent; /** * A Plan represents the central data structure of a reservation system that @@ -28,7 +27,7 @@ * previously accepted will be honored. * * {@link ReservationDefinition} submitted by the users through the RM public - * APIs are passed to appropriate {@link ReservationAgent}s, which in turn will + * APIs are passed to appropriate {@code ReservationAgent}s, which in turn will * consult the Plan (via the {@link PlanView} interface) and try to determine * whether there are sufficient resources available in this Plan to satisfy the * temporal and resource constraints of a {@link ReservationDefinition}. If a diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/PlanView.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/PlanView.java index be68906e81cde..66c66cacb4d11 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/PlanView.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/PlanView.java @@ -17,15 +17,14 @@ */ package org.apache.hadoop.yarn.server.resourcemanager.reservation; -import java.util.Set; - import org.apache.hadoop.yarn.api.records.ReservationId; import org.apache.hadoop.yarn.api.records.Resource; -import org.apache.hadoop.yarn.server.resourcemanager.reservation.planning.ReservationAgent; + +import java.util.Set; /** * This interface provides a read-only view on the allocations made in this - * plan. This methods are used for example by {@link ReservationAgent}s to + * plan. This methods are used for example by {@code ReservationAgent}s to * determine the free resources in a certain point in time, and by * PlanFollowerPolicy to publish this plan to the scheduler. */ @@ -66,7 +65,7 @@ public interface PlanView extends PlanContext { * @return the total {@link Resource} reserved for all users at the specified * time */ - public Resource getTotalCommittedResources(long tick); + Resource getTotalCommittedResources(long tick); /** * Returns the total {@link Resource} reserved for a given user at the @@ -88,7 +87,7 @@ public interface PlanView extends PlanContext { * @return the overall capacity in terms of {@link Resource} assigned to this * plan */ - public Resource getTotalCapacity(); + Resource getTotalCapacity(); /** * Gets the time (UTC in ms) at which the first reservation starts diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationSchedulerConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationSchedulerConfiguration.java index c430b1fea525f..afca8f934978f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationSchedulerConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationSchedulerConfiguration.java @@ -21,8 +21,6 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.records.ReservationDefinition; -import org.apache.hadoop.yarn.server.resourcemanager.reservation.planning.ReservationAgent; -import org.apache.hadoop.yarn.server.resourcemanager.reservation.planning.Planner; public abstract class ReservationSchedulerConfiguration extends Configuration { @@ -64,7 +62,7 @@ public ReservationSchedulerConfiguration( /** * Checks if the queue participates in reservation based scheduling - * @param queue + * @param queue name of the queue * @return true if the queue participates in reservation based scheduling */ public abstract boolean isReservable(String queue); @@ -110,10 +108,10 @@ public String getReservationAdmissionPolicy(String queue) { } /** - * Gets the name of the {@link ReservationAgent} class associated with the + * Gets the name of the {@code ReservationAgent} class associated with the * queue * @param queue name of the queue - * @return the class name of the {@link ReservationAgent} + * @return the class name of the {@code ReservationAgent} */ public String getReservationAgent(String queue) { return DEFAULT_RESERVATION_AGENT_NAME; @@ -129,10 +127,10 @@ public boolean getShowReservationAsQueues(String queuePath) { } /** - * Gets the name of the {@link Planner} class associated with the + * Gets the name of the {@code Planner} class associated with the * queue * @param queue name of the queue - * @return the class name of the {@link Planner} + * @return the class name of the {@code Planner} */ public String getReplanner(String queue) { return DEFAULT_RESERVATION_PLANNER_NAME; @@ -150,7 +148,7 @@ public boolean getMoveOnExpiry(String queue) { } /** - * Gets the time in milliseconds for which the {@link Planner} will verify + * Gets the time in milliseconds for which the {@code Planner} will verify * the {@link Plan}s satisfy the constraints * @param queue name of the queue * @return the time in milliseconds for which to check constraints diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationSystem.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationSystem.java index 3309693843d5b..7785885735403 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationSystem.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationSystem.java @@ -18,8 +18,6 @@ package org.apache.hadoop.yarn.server.resourcemanager.reservation; -import java.util.Map; - import org.apache.hadoop.classification.InterfaceAudience.LimitedPrivate; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.conf.Configuration; @@ -28,16 +26,15 @@ import org.apache.hadoop.yarn.server.resourcemanager.RMContext; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.Queue; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; -import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; -import org.apache.hadoop.yarn.api.records.Resource; -import org.apache.hadoop.yarn.server.resourcemanager.reservation.planning.ReservationAgent; + +import java.util.Map; /** * This interface is the one implemented by any system that wants to support - * Reservations i.e. make {@link Resource} allocations in future. Implementors + * Reservations i.e. make {@code Resource} allocations in future. Implementors * need to bootstrap all configured {@link Plan}s in the active * {@link ResourceScheduler} along with their corresponding - * {@link ReservationAgent} and {@link SharingPolicy}. It is also responsible + * {@code ReservationAgent} and {@link SharingPolicy}. It is also responsible * for managing the {@link PlanFollower} to ensure the {@link Plan}s are in sync * with the {@link ResourceScheduler}. */ @@ -49,7 +46,7 @@ public interface ReservationSystem { * Set RMContext for {@link ReservationSystem}. This method should be called * immediately after instantiating a reservation system once. * - * @param rmContext created by {@link ResourceManager} + * @param rmContext created by {@code ResourceManager} */ void setRMContext(RMContext rmContext); @@ -57,7 +54,7 @@ public interface ReservationSystem { * Re-initialize the {@link ReservationSystem}. * * @param conf configuration - * @param rmContext current context of the {@link ResourceManager} + * @param rmContext current context of the {@code ResourceManager} * @throws YarnException */ void reinitialize(Configuration conf, RMContext rmContext) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationSystemTestUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationSystemTestUtil.java index e07b33ea6132b..954023b2e002e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationSystemTestUtil.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationSystemTestUtil.java @@ -1,19 +1,19 @@ /******************************************************************************* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. *******************************************************************************/ package org.apache.hadoop.yarn.server.resourcemanager.reservation; @@ -73,11 +73,11 @@ public static ReservationId getNewReservationId() { public static ReservationSchedulerConfiguration createConf( String reservationQ, long timeWindow, float instConstraint, float avgConstraint) { - ReservationSchedulerConfiguration conf = mock - (ReservationSchedulerConfiguration.class); + ReservationSchedulerConfiguration conf = + mock(ReservationSchedulerConfiguration.class); when(conf.getReservationWindow(reservationQ)).thenReturn(timeWindow); - when(conf.getInstantaneousMaxCapacity(reservationQ)).thenReturn - (instConstraint); + when(conf.getInstantaneousMaxCapacity(reservationQ)) + .thenReturn(instConstraint); when(conf.getAverageCapacity(reservationQ)).thenReturn(avgConstraint); return conf; } @@ -91,21 +91,8 @@ public static void validateReservationQueue( Assert.assertEquals(8192, plan.getTotalCapacity().getMemory()); Assert.assertTrue( plan.getReservationAgent() instanceof AlignedPlannerWithGreedy); - Assert.assertTrue( - plan.getSharingPolicy() instanceof CapacityOverTimePolicy); - } - - public static void validateNewReservationQueue( - AbstractReservationSystem reservationSystem, String newQ) { - Plan newPlan = reservationSystem.getPlan(newQ); - Assert.assertNotNull(newPlan); - Assert.assertTrue(newPlan instanceof InMemoryPlan); - Assert.assertEquals(newQ, newPlan.getQueueName()); - Assert.assertEquals(1024, newPlan.getTotalCapacity().getMemory()); Assert - .assertTrue(newPlan.getReservationAgent() instanceof AlignedPlannerWithGreedy); - Assert - .assertTrue(newPlan.getSharingPolicy() instanceof CapacityOverTimePolicy); + .assertTrue(plan.getSharingPolicy() instanceof CapacityOverTimePolicy); } public static void setupFSAllocationFile(String allocationFile) @@ -129,7 +116,8 @@ public static void setupFSAllocationFile(String allocationFile) out.println(""); out.println("8"); out.println(""); - out.println("drf"); + out.println( + "drf"); out.println(""); out.close(); } @@ -153,21 +141,20 @@ public static void updateFSAllocationFile(String allocationFile) out.println(""); out.println(""); out.println(""); - out.println("80"); + out.println("10"); out.println(""); out.println(""); out.println(""); - out.println("10"); + out.println("80"); out.println(""); - out.println("drf"); + out.println( + "drf"); out.println(""); out.close(); } - public static FairScheduler setupFairScheduler( - ReservationSystemTestUtil testUtil, - RMContext rmContext, Configuration conf, int numContainers) throws - IOException { + public static FairScheduler setupFairScheduler(RMContext rmContext, + Configuration conf, int numContainers) throws IOException { FairScheduler scheduler = new FairScheduler(); scheduler.setRMContext(rmContext); @@ -178,7 +165,8 @@ public static FairScheduler setupFairScheduler( scheduler.reinitialize(conf, rmContext); - Resource resource = testUtil.calculateClusterResource(numContainers); + Resource resource = + ReservationSystemTestUtil.calculateClusterResource(numContainers); RMNode node1 = MockNodes.newNodeInfo(1, resource, 1, "127.0.0.1"); NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1); scheduler.handle(nodeEvent1); @@ -224,8 +212,9 @@ public CapacityScheduler mockCapacityScheduler(int numContainers) return cs; } - public static void initializeRMContext(int numContainers, - AbstractYarnScheduler scheduler, RMContext mockRMContext) { + @SuppressWarnings("rawtypes") public static void initializeRMContext( + int numContainers, AbstractYarnScheduler scheduler, + RMContext mockRMContext) { when(mockRMContext.getScheduler()).thenReturn(scheduler); Resource r = calculateClusterResource(numContainers); @@ -233,18 +222,17 @@ public static void initializeRMContext(int numContainers, } public static RMContext createRMContext(Configuration conf) { - RMContext mockRmContext = - Mockito.spy(new RMContextImpl(null, null, null, null, null, null, + RMContext mockRmContext = Mockito.spy( + new RMContextImpl(null, null, null, null, null, null, new RMContainerTokenSecretManager(conf), new NMTokenSecretManagerInRM(conf), new ClientToAMTokenSecretManagerInRM(), null)); RMNodeLabelsManager nlm = mock(RMNodeLabelsManager.class); - when( - nlm.getQueueResource(any(String.class), anySetOf(String.class), + when(nlm.getQueueResource(any(String.class), anySetOf(String.class), any(Resource.class))).thenAnswer(new Answer() { - @Override - public Resource answer(InvocationOnMock invocation) throws Throwable { + @Override public Resource answer(InvocationOnMock invocation) + throws Throwable { Object[] args = invocation.getArguments(); return (Resource) args[2]; } @@ -252,8 +240,8 @@ public Resource answer(InvocationOnMock invocation) throws Throwable { when(nlm.getResourceByLabel(any(String.class), any(Resource.class))) .thenAnswer(new Answer() { - @Override - public Resource answer(InvocationOnMock invocation) throws Throwable { + @Override public Resource answer(InvocationOnMock invocation) + throws Throwable { Object[] args = invocation.getArguments(); return (Resource) args[1]; } @@ -263,21 +251,22 @@ public Resource answer(InvocationOnMock invocation) throws Throwable { return mockRmContext; } - public static void setupQueueConfiguration(CapacitySchedulerConfiguration conf) { + public static void setupQueueConfiguration( + CapacitySchedulerConfiguration conf) { // Define default queue final String defQ = CapacitySchedulerConfiguration.ROOT + ".default"; conf.setCapacity(defQ, 10); // Define top-level queues - conf.setQueues(CapacitySchedulerConfiguration.ROOT, new String[] { - "default", "a", reservationQ }); + conf.setQueues(CapacitySchedulerConfiguration.ROOT, + new String[] { "default", "a", reservationQ }); final String A = CapacitySchedulerConfiguration.ROOT + ".a"; conf.setCapacity(A, 10); final String dedicated = - CapacitySchedulerConfiguration.ROOT - + CapacitySchedulerConfiguration.DOT + reservationQ; + CapacitySchedulerConfiguration.ROOT + CapacitySchedulerConfiguration.DOT + + reservationQ; conf.setCapacity(dedicated, 80); // Set as reservation queue conf.setReservable(dedicated, true); @@ -290,44 +279,43 @@ public static void setupQueueConfiguration(CapacitySchedulerConfiguration conf) conf.setCapacity(A2, 70); } - public String getFullReservationQueueName() { + public static String getFullReservationQueueName() { return CapacitySchedulerConfiguration.ROOT + CapacitySchedulerConfiguration.DOT + reservationQ; } - public String getreservationQueueName() { + public static String getReservationQueueName() { return reservationQ; } - public void updateQueueConfiguration(CapacitySchedulerConfiguration conf, - String newQ) { + public static void updateQueueConfiguration( + CapacitySchedulerConfiguration conf, String newQ) { // Define default queue - final String prefix = - CapacitySchedulerConfiguration.ROOT - + CapacitySchedulerConfiguration.DOT; + final String prefix = CapacitySchedulerConfiguration.ROOT + + CapacitySchedulerConfiguration.DOT; final String defQ = prefix + "default"; conf.setCapacity(defQ, 5); // Define top-level queues - conf.setQueues(CapacitySchedulerConfiguration.ROOT, new String[] { - "default", "a", reservationQ, newQ }); + conf.setQueues(CapacitySchedulerConfiguration.ROOT, + new String[] { "default", "a", reservationQ, newQ }); final String A = prefix + "a"; conf.setCapacity(A, 5); final String dedicated = prefix + reservationQ; - conf.setCapacity(dedicated, 80); + conf.setCapacity(dedicated, 10); // Set as reservation queue conf.setReservable(dedicated, true); - conf.setCapacity(prefix + newQ, 10); + conf.setCapacity(prefix + newQ, 80); // Set as reservation queue conf.setReservable(prefix + newQ, true); // Define 2nd-level queues final String A1 = A + ".a1"; final String A2 = A + ".a2"; - conf.setQueues(A, new String[]{"a1", "a2"}); + conf.setQueues(A, new String[] { "a1", "a2" }); conf.setCapacity(A1, 30); conf.setCapacity(A2, 70); } @@ -349,9 +337,8 @@ public static ReservationDefinition generateRandomRR(Random rand, long i) { int gang = 1 + rand.nextInt(9); int par = (rand.nextInt(1000) + 1) * gang; long dur = rand.nextInt(2 * 3600 * 1000); // random duration within 2h - ReservationRequest r = - ReservationRequest.newInstance(Resource.newInstance(1024, 1), par, - gang, dur); + ReservationRequest r = ReservationRequest + .newInstance(Resource.newInstance(1024, 1), par, gang, dur); ReservationRequests reqs = new ReservationRequestsPBImpl(); reqs.setReservationResources(Collections.singletonList(r)); rand.nextInt(3); @@ -364,53 +351,19 @@ public static ReservationDefinition generateRandomRR(Random rand, long i) { } - public static ReservationDefinition generateBigRR(Random rand, long i) { - rand.setSeed(i); - long now = System.currentTimeMillis(); - - // start time at random in the next 2 hours - long arrival = rand.nextInt(2 * 3600 * 1000); - // deadline at random in the next day - long deadline = rand.nextInt(24 * 3600 * 1000); - - // create a request with a single atomic ask - ReservationDefinition rr = new ReservationDefinitionPBImpl(); - rr.setArrival(now + arrival); - rr.setDeadline(now + deadline); - - int gang = 1; - int par = 100000; // 100k tasks - long dur = rand.nextInt(60 * 1000); // 1min tasks - ReservationRequest r = - ReservationRequest.newInstance(Resource.newInstance(1024, 1), par, - gang, dur); - ReservationRequests reqs = new ReservationRequestsPBImpl(); - reqs.setReservationResources(Collections.singletonList(r)); - rand.nextInt(3); - ReservationRequestInterpreter[] type = - ReservationRequestInterpreter.values(); - reqs.setInterpreter(type[rand.nextInt(type.length)]); - rr.setReservationRequests(reqs); - - return rr; - } - public static Map generateAllocation( long startTime, long step, int[] alloc) { - Map req = - new TreeMap(); + Map req = new TreeMap<>(); for (int i = 0; i < alloc.length; i++) { - req.put(new ReservationInterval(startTime + i * step, startTime + (i + 1) - * step), ReservationSystemUtil.toResource(ReservationRequest - .newInstance( - Resource.newInstance(1024, 1), alloc[i]))); + req.put(new ReservationInterval(startTime + i * step, + startTime + (i + 1) * step), ReservationSystemUtil.toResource( + ReservationRequest + .newInstance(Resource.newInstance(1024, 1), alloc[i]))); } return req; } public static Resource calculateClusterResource(int numContainers) { - Resource clusterResource = Resource.newInstance(numContainers * 1024, - numContainers); - return clusterResource; + return Resource.newInstance(numContainers * 1024, numContainers); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestCapacityReservationSystem.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestCapacityReservationSystem.java deleted file mode 100644 index 11e52c47073f0..0000000000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestCapacityReservationSystem.java +++ /dev/null @@ -1,94 +0,0 @@ -/******************************************************************************* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - *******************************************************************************/ -package org.apache.hadoop.yarn.server.resourcemanager.reservation; - -import java.io.IOException; - -import org.apache.hadoop.yarn.exceptions.YarnException; -import org.apache.hadoop.yarn.server.resourcemanager.RMContext; -import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler; -import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration; -import org.junit.Assert; -import org.junit.Test; - -public class TestCapacityReservationSystem { - - @Test - public void testInitialize() { - ReservationSystemTestUtil testUtil = new ReservationSystemTestUtil(); - CapacityScheduler capScheduler = null; - try { - capScheduler = testUtil.mockCapacityScheduler(10); - } catch (IOException e) { - Assert.fail(e.getMessage()); - } - CapacityReservationSystem reservationSystem = - new CapacityReservationSystem(); - reservationSystem.setRMContext(capScheduler.getRMContext()); - try { - reservationSystem.reinitialize(capScheduler.getConf(), - capScheduler.getRMContext()); - } catch (YarnException e) { - Assert.fail(e.getMessage()); - } - String planQName = testUtil.getreservationQueueName(); - ReservationSystemTestUtil.validateReservationQueue(reservationSystem, - planQName); - } - - @Test - public void testReinitialize() { - ReservationSystemTestUtil testUtil = new ReservationSystemTestUtil(); - CapacityScheduler capScheduler = null; - try { - capScheduler = testUtil.mockCapacityScheduler(10); - } catch (IOException e) { - Assert.fail(e.getMessage()); - } - CapacityReservationSystem reservationSystem = - new CapacityReservationSystem(); - CapacitySchedulerConfiguration conf = capScheduler.getConfiguration(); - RMContext mockContext = capScheduler.getRMContext(); - reservationSystem.setRMContext(mockContext); - try { - reservationSystem.reinitialize(capScheduler.getConfiguration(), - mockContext); - } catch (YarnException e) { - Assert.fail(e.getMessage()); - } - // Assert queue in original config - String planQName = testUtil.getreservationQueueName(); - ReservationSystemTestUtil.validateReservationQueue(reservationSystem, planQName); - - // Dynamically add a plan - String newQ = "reservation"; - Assert.assertNull(reservationSystem.getPlan(newQ)); - testUtil.updateQueueConfiguration(conf, newQ); - try { - capScheduler.reinitialize(conf, mockContext); - } catch (IOException e) { - Assert.fail(e.getMessage()); - } - try { - reservationSystem.reinitialize(conf, mockContext); - } catch (YarnException e) { - Assert.fail(e.getMessage()); - } - ReservationSystemTestUtil.validateNewReservationQueue(reservationSystem, newQ); - } -} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestCapacitySchedulerPlanFollower.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestCapacitySchedulerPlanFollower.java index 15f9a89f1c686..9152d1207f96b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestCapacitySchedulerPlanFollower.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestCapacitySchedulerPlanFollower.java @@ -57,7 +57,8 @@ import org.mockito.Matchers; import org.mockito.Mockito; -public class TestCapacitySchedulerPlanFollower extends TestSchedulerPlanFollowerBase { +public class TestCapacitySchedulerPlanFollower extends + TestSchedulerPlanFollowerBase { private RMContext rmContext; private RMContext spyRMContext; @@ -116,11 +117,11 @@ public void setUp() throws Exception { } private void setupPlanFollower() throws Exception { - ReservationSystemTestUtil testUtil = new ReservationSystemTestUtil(); mClock = mock(Clock.class); mAgent = mock(ReservationAgent.class); - String reservationQ = testUtil.getFullReservationQueueName(); + String reservationQ = + ReservationSystemTestUtil.getFullReservationQueueName(); CapacitySchedulerConfiguration csConf = cs.getConfiguration(); csConf.setReservationWindow(reservationQ, 20L); csConf.setMaximumCapacity(reservationQ, 40); @@ -144,7 +145,7 @@ public void testWithKillOnExpiry() throws PlanningException, @Override protected void verifyCapacity(Queue defQ) { - CSQueue csQueue = (CSQueue)defQ; + CSQueue csQueue = (CSQueue) defQ; assertTrue(csQueue.getCapacity() > 0.9); } @@ -155,7 +156,7 @@ protected Queue getDefaultQueue() { @Override protected int getNumberOfApplications(Queue queue) { - CSQueue csQueue = (CSQueue)queue; + CSQueue csQueue = (CSQueue) queue; int numberOfApplications = csQueue.getNumApplications(); return numberOfApplications; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestFairReservationSystem.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestFairReservationSystem.java deleted file mode 100644 index 4b685b28b0214..0000000000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestFairReservationSystem.java +++ /dev/null @@ -1,127 +0,0 @@ -/******************************************************************************* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - *******************************************************************************/ -package org.apache.hadoop.yarn.server.resourcemanager.reservation; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.yarn.conf.YarnConfiguration; -import org.apache.hadoop.yarn.exceptions.YarnException; -import org.apache.hadoop.yarn.server.resourcemanager.RMContext; -import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; -import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler; -import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairSchedulerConfiguration; -import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairSchedulerTestBase; - -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; - -import java.io.File; -import java.io.IOException; - -public class TestFairReservationSystem { - private final static String ALLOC_FILE = new File(FairSchedulerTestBase. - TEST_DIR, - TestFairReservationSystem.class.getName() + ".xml").getAbsolutePath(); - private Configuration conf; - private FairScheduler scheduler; - private FairSchedulerTestBase testHelper = new FairSchedulerTestBase(); - - protected Configuration createConfiguration() { - Configuration conf = testHelper.createConfiguration(); - conf.setClass(YarnConfiguration.RM_SCHEDULER, FairScheduler.class, - ResourceScheduler.class); - conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE); - return conf; - } - - @Before - public void setup() throws IOException { - conf = createConfiguration(); - } - - @After - public void teardown() { - conf = null; - } - - @Test - public void testFairReservationSystemInitialize() throws IOException { - ReservationSystemTestUtil.setupFSAllocationFile(ALLOC_FILE); - - ReservationSystemTestUtil testUtil = new ReservationSystemTestUtil(); - - // Setup - RMContext mockRMContext = testUtil.createRMContext(conf); - scheduler = ReservationSystemTestUtil.setupFairScheduler(testUtil, - mockRMContext, conf, 10); - - FairReservationSystem reservationSystem = new FairReservationSystem(); - reservationSystem.setRMContext(mockRMContext); - - try { - reservationSystem.reinitialize(scheduler.getConf(), mockRMContext); - } catch (YarnException e) { - Assert.fail(e.getMessage()); - } - - ReservationSystemTestUtil.validateReservationQueue(reservationSystem, - testUtil.getFullReservationQueueName()); - } - - @Test - public void testFairReservationSystemReinitialize() throws IOException { - ReservationSystemTestUtil.setupFSAllocationFile(ALLOC_FILE); - - ReservationSystemTestUtil testUtil = new ReservationSystemTestUtil(); - - // Setup - RMContext mockRMContext = testUtil.createRMContext(conf); - scheduler = ReservationSystemTestUtil.setupFairScheduler(testUtil, - mockRMContext, conf, 10); - - FairReservationSystem reservationSystem = new FairReservationSystem(); - reservationSystem.setRMContext(mockRMContext); - - try { - reservationSystem.reinitialize(scheduler.getConf(), mockRMContext); - } catch (YarnException e) { - Assert.fail(e.getMessage()); - } - - // Assert queue in original config - final String planQNam = testUtil.getFullReservationQueueName(); - ReservationSystemTestUtil.validateReservationQueue(reservationSystem, - planQNam); - - // Dynamically add a plan - ReservationSystemTestUtil.updateFSAllocationFile(ALLOC_FILE); - scheduler.reinitialize(conf, mockRMContext); - - try { - reservationSystem.reinitialize(conf, mockRMContext); - } catch (YarnException e) { - Assert.fail(e.getMessage()); - } - - String newQueue = "root.reservation"; - ReservationSystemTestUtil.validateNewReservationQueue - (reservationSystem, newQueue); - } - -} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestFairSchedulerPlanFollower.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestFairSchedulerPlanFollower.java index 43316f7a589e4..1b4e2f8e13500 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestFairSchedulerPlanFollower.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestFairSchedulerPlanFollower.java @@ -1,20 +1,20 @@ /** -* Licensed to the Apache Software Foundation (ASF) under one -* or more contributor license agreements. See the NOTICE file -* distributed with this work for additional information -* regarding copyright ownership. The ASF licenses this file -* to you under the Apache License, Version 2.0 (the -* "License"); you may not use this file except in compliance -* with the License. You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ package org.apache.hadoop.yarn.server.resourcemanager.reservation; import static org.junit.Assert.assertNotNull; @@ -62,9 +62,9 @@ public class TestFairSchedulerPlanFollower extends TestSchedulerPlanFollowerBase { - private final static String ALLOC_FILE = new File(FairSchedulerTestBase. - TEST_DIR, - TestFairReservationSystem.class.getName() + ".xml").getAbsolutePath(); + private final static String ALLOC_FILE = new File( + FairSchedulerTestBase.TEST_DIR, + TestSchedulerPlanFollowerBase.class.getName() + ".xml").getAbsolutePath(); private RMContext rmContext; private RMContext spyRMContext; private FairScheduler fs; @@ -86,13 +86,11 @@ protected Configuration createConfiguration() { public void setUp() throws Exception { conf = createConfiguration(); ReservationSystemTestUtil.setupFSAllocationFile(ALLOC_FILE); - ReservationSystemTestUtil testUtil = new ReservationSystemTestUtil(); // Setup rmContext = TestUtils.getMockRMContext(); spyRMContext = spy(rmContext); - fs = ReservationSystemTestUtil.setupFairScheduler(testUtil, - spyRMContext, conf, 125); + fs = ReservationSystemTestUtil.setupFairScheduler(spyRMContext, conf, 125); scheduler = fs; ConcurrentMap spyApps = @@ -108,11 +106,11 @@ public void setUp() throws Exception { } private void setupPlanFollower() throws Exception { - ReservationSystemTestUtil testUtil = new ReservationSystemTestUtil(); mClock = mock(Clock.class); mAgent = mock(ReservationAgent.class); - String reservationQ = testUtil.getFullReservationQueueName(); + String reservationQ = + ReservationSystemTestUtil.getFullReservationQueueName(); AllocationConfiguration allocConf = fs.getAllocationConfiguration(); allocConf.setReservationWindow(20L); allocConf.setAverageCapacity(20); @@ -135,14 +133,13 @@ public void testWithKillOnExpiry() throws PlanningException, @Override protected void verifyCapacity(Queue defQ) { - assertTrue(((FSQueue) defQ).getWeights().getWeight(ResourceType.MEMORY) > - 0.9); + assertTrue(((FSQueue) defQ).getWeights().getWeight(ResourceType.MEMORY) > 0.9); } @Override protected Queue getDefaultQueue() { - return getReservationQueue("dedicated" + - ReservationConstants.DEFAULT_QUEUE_SUFFIX); + return getReservationQueue("dedicated" + + ReservationConstants.DEFAULT_QUEUE_SUFFIX); } @Override @@ -153,8 +150,7 @@ protected int getNumberOfApplications(Queue queue) { @Override protected AbstractSchedulerPlanFollower createPlanFollower() { - FairSchedulerPlanFollower planFollower = - new FairSchedulerPlanFollower(); + FairSchedulerPlanFollower planFollower = new FairSchedulerPlanFollower(); planFollower.init(mClock, scheduler, Collections.singletonList(plan)); return planFollower; } @@ -168,13 +164,13 @@ protected void assertReservationQueueExists(ReservationId r) { @Override protected void assertReservationQueueExists(ReservationId r, double expectedCapacity, double expectedMaxCapacity) { - FSLeafQueue q = fs.getQueueManager().getLeafQueue(plan.getQueueName() + "" + - "." + - r, false); + FSLeafQueue q = + fs.getQueueManager().getLeafQueue(plan.getQueueName() + "" + "." + r, + false); assertNotNull(q); // For now we are setting both to same weight - Assert.assertEquals(expectedCapacity, q.getWeights().getWeight - (ResourceType.MEMORY), 0.01); + Assert.assertEquals(expectedCapacity, + q.getWeights().getWeight(ResourceType.MEMORY), 0.01); } @Override @@ -185,9 +181,8 @@ protected void assertReservationQueueDoesNotExist(ReservationId r) { @Override protected Queue getReservationQueue(String r) { - return fs.getQueueManager().getLeafQueue(plan.getQueueName() + "" + - "." + - r, false); + return fs.getQueueManager().getLeafQueue( + plan.getQueueName() + "" + "." + r, false); } public static ApplicationACLsManager mockAppACLsManager() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestReservationSystem.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestReservationSystem.java new file mode 100644 index 0000000000000..7fcf4bcae5df7 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestReservationSystem.java @@ -0,0 +1,213 @@ +/******************************************************************************* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + *******************************************************************************/ +package org.apache.hadoop.yarn.server.resourcemanager.reservation; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.server.resourcemanager.ParameterizedSchedulerTestBase; +import org.apache.hadoop.yarn.server.resourcemanager.RMContext; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.AbstractYarnScheduler; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairSchedulerConfiguration; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairSchedulerTestBase; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.mockito.Mockito; + +import java.io.File; +import java.io.IOException; + +@SuppressWarnings({ "rawtypes" }) +public class TestReservationSystem extends + ParameterizedSchedulerTestBase { + + private final static String ALLOC_FILE = new File( + FairSchedulerTestBase.TEST_DIR, TestReservationSystem.class.getName() + + ".xml").getAbsolutePath(); + private AbstractYarnScheduler scheduler; + private AbstractReservationSystem reservationSystem; + private RMContext rmContext; + private Configuration conf; + private RMContext mockRMContext; + + public TestReservationSystem(SchedulerType type) { + super(type); + } + + @Before + public void setUp() throws IOException { + scheduler = initializeScheduler(); + rmContext = getRMContext(); + reservationSystem = configureReservationSystem(); + reservationSystem.setRMContext(rmContext); + DefaultMetricsSystem.setMiniClusterMode(true); + } + + @After + public void tearDown() { + conf = null; + reservationSystem = null; + rmContext = null; + scheduler = null; + clearRMContext(); + QueueMetrics.clearQueueMetrics(); + } + + @Test + public void testInitialize() throws IOException { + try { + reservationSystem.reinitialize(scheduler.getConfig(), rmContext); + } catch (YarnException e) { + Assert.fail(e.getMessage()); + } + if (getSchedulerType().equals(SchedulerType.CAPACITY)) { + ReservationSystemTestUtil.validateReservationQueue(reservationSystem, + ReservationSystemTestUtil.getReservationQueueName()); + } else { + ReservationSystemTestUtil.validateReservationQueue(reservationSystem, + ReservationSystemTestUtil.getFullReservationQueueName()); + } + + } + + @Test + public void testReinitialize() throws IOException { + conf = scheduler.getConfig(); + try { + reservationSystem.reinitialize(conf, rmContext); + } catch (YarnException e) { + Assert.fail(e.getMessage()); + } + if (getSchedulerType().equals(SchedulerType.CAPACITY)) { + ReservationSystemTestUtil.validateReservationQueue(reservationSystem, + ReservationSystemTestUtil.getReservationQueueName()); + } else { + ReservationSystemTestUtil.validateReservationQueue(reservationSystem, + ReservationSystemTestUtil.getFullReservationQueueName()); + } + + // Dynamically add a plan + String newQ = "reservation"; + Assert.assertNull(reservationSystem.getPlan(newQ)); + updateSchedulerConf(conf, newQ); + try { + scheduler.reinitialize(conf, rmContext); + } catch (IOException e) { + Assert.fail(e.getMessage()); + } + try { + reservationSystem.reinitialize(conf, rmContext); + } catch (YarnException e) { + Assert.fail(e.getMessage()); + } + if (getSchedulerType().equals(SchedulerType.CAPACITY)) { + ReservationSystemTestUtil.validateReservationQueue(reservationSystem, + newQ); + } else { + ReservationSystemTestUtil.validateReservationQueue(reservationSystem, + "root." + newQ); + } + } + + @SuppressWarnings("rawtypes") + public AbstractYarnScheduler initializeScheduler() throws IOException { + switch (getSchedulerType()) { + case CAPACITY: + return initializeCapacityScheduler(); + case FAIR: + return initializeFairScheduler(); + } + return null; + } + + public AbstractReservationSystem configureReservationSystem() { + switch (getSchedulerType()) { + case CAPACITY: + return new CapacityReservationSystem(); + case FAIR: + return new FairReservationSystem(); + } + return null; + } + + public void updateSchedulerConf(Configuration conf, String newQ) + throws IOException { + switch (getSchedulerType()) { + case CAPACITY: + ReservationSystemTestUtil.updateQueueConfiguration( + (CapacitySchedulerConfiguration) conf, newQ); + case FAIR: + ReservationSystemTestUtil.updateFSAllocationFile(ALLOC_FILE); + } + } + + public RMContext getRMContext() { + return mockRMContext; + } + + public void clearRMContext() { + mockRMContext = null; + } + + private CapacityScheduler initializeCapacityScheduler() { + // stolen from TestCapacityScheduler + CapacitySchedulerConfiguration conf = new CapacitySchedulerConfiguration(); + ReservationSystemTestUtil.setupQueueConfiguration(conf); + + CapacityScheduler cs = Mockito.spy(new CapacityScheduler()); + cs.setConf(conf); + + mockRMContext = ReservationSystemTestUtil.createRMContext(conf); + + cs.setRMContext(mockRMContext); + try { + cs.serviceInit(conf); + } catch (Exception e) { + Assert.fail(e.getMessage()); + } + ReservationSystemTestUtil.initializeRMContext(10, cs, mockRMContext); + return cs; + } + + private Configuration createFSConfiguration() { + FairSchedulerTestBase testHelper = new FairSchedulerTestBase(); + Configuration conf = testHelper.createConfiguration(); + conf.setClass(YarnConfiguration.RM_SCHEDULER, FairScheduler.class, + ResourceScheduler.class); + conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE); + return conf; + } + + private FairScheduler initializeFairScheduler() throws IOException { + Configuration conf = createFSConfiguration(); + ReservationSystemTestUtil.setupFSAllocationFile(ALLOC_FILE); + + // Setup + mockRMContext = ReservationSystemTestUtil.createRMContext(conf); + return ReservationSystemTestUtil + .setupFairScheduler(mockRMContext, conf, 10); + } +} From acd8e59b246281f81043694474cde2e08038ff93 Mon Sep 17 00:00:00 2001 From: Arpit Agarwal Date: Thu, 6 Aug 2015 18:51:28 -0700 Subject: [PATCH 077/130] HDFS-8856. Make LeaseManager#countPath O(1). (Contributed by Arpit Agarwal) --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 + .../hdfs/server/namenode/Checkpointer.java | 4 +- .../hdfs/server/namenode/FSNamesystem.java | 14 ++-- .../hdfs/server/namenode/LeaseManager.java | 17 ++--- .../server/namenode/TestLeaseManager.java | 65 ++++++++++++++++--- 5 files changed, 79 insertions(+), 23 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 138ed107a0df7..051dc8a604ec8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -767,6 +767,8 @@ Release 2.8.0 - UNRELEASED HDFS-8815. DFS getStoragePolicy implementation using single RPC call (Surendra Singh Lilhore via vinayakumarb) + HDFS-8856. Make LeaseManager#countPath O(1). (Arpit Agarwal) + OPTIMIZATIONS HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java index 25b87f095ef40..908762985403d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java @@ -254,7 +254,9 @@ void doCheckpoint() throws IOException { try { backupNode.namesystem.setImageLoaded(); if(backupNode.namesystem.getBlocksTotal() > 0) { - backupNode.namesystem.setBlockTotal(); + long completeBlocksTotal = + backupNode.namesystem.getCompleteBlocksTotal(); + backupNode.namesystem.setBlockTotal(completeBlocksTotal); } bnImage.saveFSImageInAllDirs(backupNode.getNamesystem(), txid); if (!backupNode.namesystem.isRollingUpgrade()) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 1cde47c30d253..4cc30732d7ff4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -1042,9 +1042,10 @@ void startCommonServices(Configuration conf, HAContext haContext) throws IOExcep assert safeMode != null && !isPopulatingReplQueues(); StartupProgress prog = NameNode.getStartupProgress(); prog.beginPhase(Phase.SAFEMODE); + long completeBlocksTotal = getCompleteBlocksTotal(); prog.setTotal(Phase.SAFEMODE, STEP_AWAITING_REPORTED_BLOCKS, - getCompleteBlocksTotal()); - setBlockTotal(); + completeBlocksTotal); + setBlockTotal(completeBlocksTotal); blockManager.activate(conf); } finally { writeUnlock(); @@ -4686,12 +4687,12 @@ public void adjustSafeModeBlockTotals(int deltaSafe, int deltaTotal) { /** * Set the total number of blocks in the system. */ - public void setBlockTotal() { + public void setBlockTotal(long completeBlocksTotal) { // safeMode is volatile, and may be set to null at any time SafeModeInfo safeMode = this.safeMode; if (safeMode == null) return; - safeMode.setBlockTotal((int) getCompleteBlocksTotal()); + safeMode.setBlockTotal((int) completeBlocksTotal); } /** @@ -4723,13 +4724,14 @@ public long getNumActiveClients() { /** * Get the total number of COMPLETE blocks in the system. * For safe mode only complete blocks are counted. + * This is invoked only during NN startup and checkpointing. */ - private long getCompleteBlocksTotal() { + public long getCompleteBlocksTotal() { // Calculate number of blocks under construction long numUCBlocks = 0; readLock(); - numUCBlocks = leaseManager.getNumUnderConstructionBlocks(); try { + numUCBlocks = leaseManager.getNumUnderConstructionBlocks(); return getBlocksTotal() - numUCBlocks; } finally { readUnlock(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java index f954a58084470..1a1edaf14c157 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java @@ -22,6 +22,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Collection; +import java.util.Collections; import java.util.Comparator; import java.util.HashMap; import java.util.HashSet; @@ -128,15 +129,13 @@ synchronized long getNumUnderConstructionBlocks() { /** @return the number of leases currently in the system */ @VisibleForTesting - public synchronized int countLease() {return sortedLeases.size();} + public synchronized int countLease() { + return sortedLeases.size(); + } /** @return the number of paths contained in all leases */ - synchronized int countPath() { - int count = 0; - for (Lease lease : sortedLeases) { - count += lease.getFiles().size(); - } - return count; + synchronized long countPath() { + return leasesById.size(); } /** @@ -280,7 +279,9 @@ public int hashCode() { return holder.hashCode(); } - private Collection getFiles() { return files; } + private Collection getFiles() { + return Collections.unmodifiableCollection(files); + } String getHolder() { return holder; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLeaseManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLeaseManager.java index 96907f8ac4e07..de301617bd6f0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLeaseManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLeaseManager.java @@ -17,19 +17,28 @@ */ package org.apache.hadoop.hdfs.server.namenode; +import static org.hamcrest.CoreMatchers.is; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNull; import com.google.common.collect.Lists; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.fs.permission.PermissionStatus; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; +import org.junit.Rule; import org.junit.Test; -import org.mockito.Mockito; +import org.junit.rules.Timeout; import java.util.ArrayList; +import static org.junit.Assert.assertThat; import static org.mockito.Mockito.*; public class TestLeaseManager { + @Rule + public Timeout timeout = new Timeout(300000); + @Test public void testRemoveLeases() throws Exception { FSNamesystem fsn = mock(FSNamesystem.class); @@ -52,14 +61,9 @@ public void testRemoveLeases() throws Exception { * leases, the Namenode does't enter an infinite loop while holding the FSN * write lock and thus become unresponsive */ - @Test (timeout=1000) + @Test public void testCheckLeaseNotInfiniteLoop() { - FSDirectory dir = Mockito.mock(FSDirectory.class); - FSNamesystem fsn = Mockito.mock(FSNamesystem.class); - Mockito.when(fsn.isRunning()).thenReturn(true); - Mockito.when(fsn.hasWriteLock()).thenReturn(true); - Mockito.when(fsn.getFSDirectory()).thenReturn(dir); - LeaseManager lm = new LeaseManager(fsn); + LeaseManager lm = new LeaseManager(makeMockFsNameSystem()); //Make sure the leases we are going to add exceed the hard limit lm.setLeasePeriod(0, 0); @@ -73,4 +77,49 @@ public void testCheckLeaseNotInfiniteLoop() { //Initiate a call to checkLease. This should exit within the test timeout lm.checkLeases(); } + + + @Test + public void testCountPath() { + LeaseManager lm = new LeaseManager(makeMockFsNameSystem()); + + lm.addLease("holder1", 1); + assertThat(lm.countPath(), is(1L)); + + lm.addLease("holder2", 2); + assertThat(lm.countPath(), is(2L)); + lm.addLease("holder2", 2); // Duplicate addition + assertThat(lm.countPath(), is(2L)); + + assertThat(lm.countPath(), is(2L)); + + // Remove a couple of non-existing leases. countPath should not change. + lm.removeLease("holder2", stubInodeFile(3)); + lm.removeLease("InvalidLeaseHolder", stubInodeFile(1)); + assertThat(lm.countPath(), is(2L)); + + INodeFile file = stubInodeFile(1); + lm.reassignLease(lm.getLease(file), file, "holder2"); + assertThat(lm.countPath(), is(2L)); // Count unchanged on reassign + + lm.removeLease("holder2", stubInodeFile(2)); // Remove existing + assertThat(lm.countPath(), is(1L)); + } + + private static FSNamesystem makeMockFsNameSystem() { + FSDirectory dir = mock(FSDirectory.class); + FSNamesystem fsn = mock(FSNamesystem.class); + when(fsn.isRunning()).thenReturn(true); + when(fsn.hasWriteLock()).thenReturn(true); + when(fsn.getFSDirectory()).thenReturn(dir); + return fsn; + } + + private static INodeFile stubInodeFile(long inodeId) { + PermissionStatus p = new PermissionStatus( + "dummy", "dummy", new FsPermission((short) 0777)); + return new INodeFile( + inodeId, "/foo".getBytes(), p, 0L, 0L, + BlockInfo.EMPTY_ARRAY, (short) 1, 1L); + } } From e1accc8cda50719aeb42a1dc87a11c0dfa923ce2 Mon Sep 17 00:00:00 2001 From: Rohith Sharma K S Date: Fri, 7 Aug 2015 10:43:41 +0530 Subject: [PATCH 078/130] YARN-3948. Display Application Priority in RM Web UI.(Sunil G via rohithsharmaks) --- hadoop-yarn-project/CHANGES.txt | 2 + .../yarn/api/records/ApplicationReport.java | 13 +++++++ .../src/main/proto/yarn_protos.proto | 1 + .../yarn/client/cli/ApplicationCLI.java | 2 + .../hadoop/yarn/client/cli/TestYarnCLI.java | 2 + .../impl/pb/ApplicationReportPBImpl.java | 37 +++++++++++++++++++ .../yarn/server/utils/BuilderUtils.java | 4 +- .../hadoop/yarn/server/webapp/AppBlock.java | 5 +++ .../yarn/server/webapp/WebPageUtils.java | 4 +- .../yarn/server/webapp/dao/AppInfo.java | 9 +++++ .../resourcemanager/rmapp/RMAppImpl.java | 8 ++-- .../resourcemanager/webapp/RMAppsBlock.java | 7 +++- .../resourcemanager/webapp/dao/AppInfo.java | 12 +++++- .../webapp/TestRMWebServicesApps.java | 24 ++++++------ .../src/site/markdown/ResourceManagerRest.md | 7 ++++ 15 files changed, 116 insertions(+), 21 deletions(-) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index c4b19866053fb..fa0433722d4be 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -159,6 +159,8 @@ Release 2.8.0 - UNRELEASED YARN-3736. Add RMStateStore apis to store and load accepted reservations for failover (adhoot via asuresh) + YARN-3948. Display Application Priority in RM Web UI.(Sunil G via rohithsharmaks) + IMPROVEMENTS YARN-644. Basic null check is not performed on passed in arguments before diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationReport.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationReport.java index e7a2ad4b83501..258b991df301c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationReport.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationReport.java @@ -408,4 +408,17 @@ public abstract void setLogAggregationStatus( @Public @Unstable public abstract void setUnmanagedApp(boolean unmanagedApplication); + + /** + * Get priority of the application + * + * @return Application's priority + */ + @Public + @Stable + public abstract Priority getPriority(); + + @Private + @Unstable + public abstract void setPriority(Priority priority); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto index fdacab3edf0fa..13d836546b75d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto @@ -196,6 +196,7 @@ message ApplicationReportProto { repeated string applicationTags = 20; optional LogAggregationStatusProto log_aggregation_status = 21; optional bool unmanaged_application = 22 [default = false]; + optional PriorityProto priority = 23; } enum LogAggregationStatusProto { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java index 013b88e00ef75..62638149f46d0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java @@ -501,6 +501,8 @@ private int printApplicationReport(String applicationId) appReportStr.println(appReport.getUser()); appReportStr.print("\tQueue : "); appReportStr.println(appReport.getQueue()); + appReportStr.print("\tApplication Priority : "); + appReportStr.println(appReport.getPriority()); appReportStr.print("\tStart-Time : "); appReportStr.println(appReport.getStartTime()); appReportStr.print("\tFinish-Time : "); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java index aadcd4dc27c46..eb50e003e93d5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java @@ -106,6 +106,7 @@ public void testGetApplicationReport() throws Exception { FinalApplicationStatus.SUCCEEDED, usageReport, "N/A", 0.53789f, "YARN", null, null, false); newApplicationReport.setLogAggregationStatus(LogAggregationStatus.SUCCEEDED); + newApplicationReport.setPriority(Priority.newInstance(0)); when(client.getApplicationReport(any(ApplicationId.class))).thenReturn( newApplicationReport); int result = cli.run(new String[] { "application", "-status", applicationId.toString() }); @@ -119,6 +120,7 @@ public void testGetApplicationReport() throws Exception { pw.println("\tApplication-Type : YARN"); pw.println("\tUser : user"); pw.println("\tQueue : queue"); + pw.println("\tApplication Priority : 0"); pw.println("\tStart-Time : 0"); pw.println("\tFinish-Time : 0"); pw.println("\tProgress : 53.79%"); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationReportPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationReportPBImpl.java index 69435b5d7b7ff..2e50e0dd251cd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationReportPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationReportPBImpl.java @@ -27,6 +27,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport; import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; import org.apache.hadoop.yarn.api.records.LogAggregationStatus; +import org.apache.hadoop.yarn.api.records.Priority; import org.apache.hadoop.yarn.api.records.Token; import org.apache.hadoop.yarn.api.records.YarnApplicationState; import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationAttemptIdProto; @@ -36,6 +37,7 @@ import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationResourceUsageReportProto; import org.apache.hadoop.yarn.proto.YarnProtos.FinalApplicationStatusProto; import org.apache.hadoop.yarn.proto.YarnProtos.LogAggregationStatusProto; +import org.apache.hadoop.yarn.proto.YarnProtos.PriorityProto; import org.apache.hadoop.yarn.proto.YarnProtos.YarnApplicationStateProto; import com.google.protobuf.TextFormat; @@ -55,6 +57,7 @@ public class ApplicationReportPBImpl extends ApplicationReport { private Token clientToAMToken = null; private Token amRmToken = null; private Set applicationTags = null; + private Priority priority = null; public ApplicationReportPBImpl() { builder = ApplicationReportProto.newBuilder(); @@ -484,6 +487,11 @@ private void mergeLocalToBuilder() { builder.clearApplicationTags(); builder.addAllApplicationTags(this.applicationTags); } + if (this.priority != null + && !((PriorityPBImpl) this.priority).getProto().equals( + builder.getPriority())) { + builder.setPriority(convertToProtoFormat(this.priority)); + } } private void mergeLocalToProto() { @@ -551,6 +559,14 @@ private TokenProto convertToProtoFormat(Token t) { return ((TokenPBImpl)t).getProto(); } + private PriorityPBImpl convertFromProtoFormat(PriorityProto p) { + return new PriorityPBImpl(p); + } + + private PriorityProto convertToProtoFormat(Priority t) { + return ((PriorityPBImpl)t).getProto(); + } + @Override public LogAggregationStatus getLogAggregationStatus() { ApplicationReportProtoOrBuilder p = viaProto ? proto : builder; @@ -593,4 +609,25 @@ public void setUnmanagedApp(boolean unmanagedApplication) { maybeInitBuilder(); builder.setUnmanagedApplication(unmanagedApplication); } + + @Override + public Priority getPriority() { + ApplicationReportProtoOrBuilder p = viaProto ? proto : builder; + if (this.priority != null) { + return this.priority; + } + if (!p.hasPriority()) { + return null; + } + this.priority = convertFromProtoFormat(p.getPriority()); + return this.priority; + } + + @Override + public void setPriority(Priority priority) { + maybeInitBuilder(); + if (priority == null) + builder.clearPriority(); + this.priority = priority; + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/BuilderUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/BuilderUtils.java index f2146c8b124be..a3bd6f8640d2b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/BuilderUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/BuilderUtils.java @@ -324,7 +324,8 @@ public static ApplicationReport newApplicationReport( String url, long startTime, long finishTime, FinalApplicationStatus finalStatus, ApplicationResourceUsageReport appResources, String origTrackingUrl, - float progress, String appType, Token amRmToken, Set tags) { + float progress, String appType, Token amRmToken, Set tags, + Priority priority) { ApplicationReport report = recordFactory .newRecordInstance(ApplicationReport.class); report.setApplicationId(applicationId); @@ -347,6 +348,7 @@ public static ApplicationReport newApplicationReport( report.setApplicationType(appType); report.setAMRMToken(amRmToken); report.setApplicationTags(tags); + report.setPriority(priority); return report; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java index 111842c2f2f59..98a8f81c0a9bc 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java @@ -167,6 +167,7 @@ public ApplicationReport run() throws Exception { ._("Application Type:", app.getType()) ._("Application Tags:", app.getApplicationTags() == null ? "" : app.getApplicationTags()) + ._("Application Priority:", clarifyAppPriority(app.getPriority())) ._( "YarnApplicationState:", app.getAppState() == null ? UNAVAILABLE : clarifyAppState(app @@ -342,6 +343,10 @@ private String clarifyAppState(YarnApplicationState state) { } } + private String clarifyAppPriority(int priority) { + return priority + " (Higher Integer value indicates higher priority)"; + } + private String clairfyAppFinalStatus(FinalApplicationStatus status) { if (status == FinalApplicationStatus.UNDEFINED) { return "Application has not completed yet."; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebPageUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebPageUtils.java index df63b77a4c1c9..ffc56374e54bd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebPageUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebPageUtils.java @@ -52,13 +52,13 @@ private static String getAppsTableColumnDefs( .append("{'sType':'string', 'aTargets': [0]") .append(", 'mRender': parseHadoopID }") .append("\n, {'sType':'numeric', 'aTargets': " + - (isFairSchedulerPage ? "[6, 7]": "[5, 6]")) + (isFairSchedulerPage ? "[6, 7]": "[6, 7]")) .append(", 'mRender': renderHadoopDate }") .append("\n, {'sType':'numeric', bSearchable:false, 'aTargets':"); if (isFairSchedulerPage) { sb.append("[13]"); } else if (isResourceManager) { - sb.append("[12]"); + sb.append("[13]"); } else { sb.append("[9]"); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/AppInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/AppInfo.java index 77553d497d02e..7efbcb92175ec 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/AppInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/AppInfo.java @@ -58,6 +58,7 @@ public class AppInfo { protected long finishedTime; protected long elapsedTime; protected String applicationTags; + protected int priority; private int allocatedCpuVcores; private int allocatedMemoryMB; protected boolean unmanagedApplication; @@ -86,6 +87,10 @@ public AppInfo(ApplicationReport app) { finishedTime = app.getFinishTime(); elapsedTime = Times.elapsed(startedTime, finishedTime); finalAppStatus = app.getFinalApplicationStatus(); + priority = 0; + if (app.getPriority() != null) { + priority = app.getPriority().getPriority(); + } if (app.getApplicationResourceUsageReport() != null) { runningContainers = app.getApplicationResourceUsageReport() .getNumUsedContainers(); @@ -194,4 +199,8 @@ public String getApplicationTags() { public boolean isUnmanagedApp() { return unmanagedApplication; } + + public int getPriority() { + return priority; + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java index 6b7526eab1c05..42ff1deb0c40c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java @@ -660,10 +660,10 @@ public ApplicationReport createAndGetApplicationReport(String clientUserName, ApplicationReport report = BuilderUtils.newApplicationReport( this.applicationId, currentApplicationAttemptId, this.user, this.queue, this.name, host, rpcPort, clientToAMToken, - createApplicationState(), diags, - trackingUrl, this.startTime, this.finishTime, finishState, - appUsageReport, origTrackingUrl, progress, this.applicationType, - amrmToken, applicationTags); + createApplicationState(), diags, trackingUrl, this.startTime, + this.finishTime, finishState, appUsageReport, origTrackingUrl, + progress, this.applicationType, amrmToken, applicationTags, + this.submissionContext.getPriority()); report.setLogAggregationStatus(logAggregationStatus); report.setUnmanagedApp(submissionContext.getUnmanagedAM()); return report; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppsBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppsBlock.java index 5e80d23674e4f..a1d8633f3e503 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppsBlock.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppsBlock.java @@ -56,7 +56,8 @@ protected void renderData(Block html) { TBODY> tbody = html.table("#apps").thead().tr().th(".id", "ID").th(".user", "User") .th(".name", "Name").th(".type", "Application Type") - .th(".queue", "Queue").th(".starttime", "StartTime") + .th(".queue", "Queue").th(".priority", "Application Priority") + .th(".starttime", "StartTime") .th(".finishtime", "FinishTime").th(".state", "State") .th(".finalstatus", "FinalStatus") .th(".runningcontainer", "Running Containers") @@ -106,7 +107,9 @@ protected void renderData(Block html) { .append("\",\"") .append( StringEscapeUtils.escapeJavaScript(StringEscapeUtils.escapeHtml(app - .getQueue()))).append("\",\"").append(app.getStartedTime()) + .getQueue()))).append("\",\"").append(String + .valueOf(app.getPriority())) + .append("\",\"").append(app.getStartedTime()) .append("\",\"").append(app.getFinishedTime()) .append("\",\"") .append(app.getAppState() == null ? UNAVAILABLE : app.getAppState()) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java index 231f955444c13..46f05336a88b1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java @@ -74,7 +74,8 @@ public class AppInfo { protected long clusterId; protected String applicationType; protected String applicationTags = ""; - + protected int priority; + // these are only allowed if acls allow protected long startedTime; protected long finishedTime; @@ -130,6 +131,11 @@ public AppInfo(ResourceManager rm, RMApp app, Boolean hasAccess, this.user = app.getUser().toString(); this.name = app.getName().toString(); this.queue = app.getQueue().toString(); + this.priority = 0; + if (app.getApplicationSubmissionContext().getPriority() != null) { + this.priority = app.getApplicationSubmissionContext().getPriority() + .getPriority(); + } this.progress = app.getProgress() * 100; this.diagnostics = app.getDiagnostics().toString(); if (diagnostics == null || diagnostics.isEmpty()) { @@ -328,4 +334,8 @@ public LogAggregationStatus getLogAggregationStatus() { public boolean isUnmanagedApp() { return unmanagedApplication; } + + public int getPriority() { + return this.priority; + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java index 879ce4cbca3f2..919bb1ab577de 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java @@ -1290,6 +1290,7 @@ public void verifyAppsXML(NodeList nodes, RMApp app) throws JSONException, WebServicesTestUtils.getXmlString(element, "name"), WebServicesTestUtils.getXmlString(element, "applicationType"), WebServicesTestUtils.getXmlString(element, "queue"), + WebServicesTestUtils.getXmlInt(element, "priority"), WebServicesTestUtils.getXmlString(element, "state"), WebServicesTestUtils.getXmlString(element, "finalStatus"), WebServicesTestUtils.getXmlFloat(element, "progress"), @@ -1316,18 +1317,18 @@ public void verifyAppsXML(NodeList nodes, RMApp app) throws JSONException, public void verifyAppInfo(JSONObject info, RMApp app) throws JSONException, Exception { - assertEquals("incorrect number of elements", 29, info.length()); + assertEquals("incorrect number of elements", 30, info.length()); verifyAppInfoGeneric(app, info.getString("id"), info.getString("user"), info.getString("name"), info.getString("applicationType"), - info.getString("queue"), info.getString("state"), - info.getString("finalStatus"), (float) info.getDouble("progress"), - info.getString("trackingUI"), info.getString("diagnostics"), - info.getLong("clusterId"), info.getLong("startedTime"), - info.getLong("finishedTime"), info.getLong("elapsedTime"), - info.getString("amHostHttpAddress"), info.getString("amContainerLogs"), - info.getInt("allocatedMB"), info.getInt("allocatedVCores"), - info.getInt("runningContainers"), + info.getString("queue"), info.getInt("priority"), + info.getString("state"), info.getString("finalStatus"), + (float) info.getDouble("progress"), info.getString("trackingUI"), + info.getString("diagnostics"), info.getLong("clusterId"), + info.getLong("startedTime"), info.getLong("finishedTime"), + info.getLong("elapsedTime"), info.getString("amHostHttpAddress"), + info.getString("amContainerLogs"), info.getInt("allocatedMB"), + info.getInt("allocatedVCores"), info.getInt("runningContainers"), info.getInt("preemptedResourceMB"), info.getInt("preemptedResourceVCores"), info.getInt("numNonAMContainerPreempted"), @@ -1337,8 +1338,8 @@ public void verifyAppInfo(JSONObject info, RMApp app) throws JSONException, } public void verifyAppInfoGeneric(RMApp app, String id, String user, - String name, String applicationType, String queue, String state, - String finalStatus, float progress, String trackingUI, + String name, String applicationType, String queue, int prioirty, + String state, String finalStatus, float progress, String trackingUI, String diagnostics, long clusterId, long startedTime, long finishedTime, long elapsedTime, String amHostHttpAddress, String amContainerLogs, int allocatedMB, int allocatedVCores, int numContainers, @@ -1355,6 +1356,7 @@ public void verifyAppInfoGeneric(RMApp app, String id, String user, WebServicesTestUtils.checkStringMatch("applicationType", app.getApplicationType(), applicationType); WebServicesTestUtils.checkStringMatch("queue", app.getQueue(), queue); + assertEquals("priority doesn't match", 0, prioirty); WebServicesTestUtils.checkStringMatch("state", app.getState().toString(), state); WebServicesTestUtils.checkStringMatch("finalStatus", app diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRest.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRest.md index ddf2bf4481327..e16f8c1d83349 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRest.md +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRest.md @@ -1382,6 +1382,7 @@ Response Body: "memorySeconds" : 151730, "vcoreSeconds" : 103, "unmanagedApplication":"false" + "applicationPriority":0 }, { "finishedTime" : 1326815789546, @@ -1408,6 +1409,7 @@ Response Body: "memorySeconds" : 640064, "vcoreSeconds" : 442 "unmanagedApplication":"false" + "applicationPriority":0 } ] } @@ -1458,6 +1460,7 @@ Response Body: 151730 103 false + 0 application_1326815542473_0002 @@ -1484,6 +1487,7 @@ Response Body: 640064 442 false + 0 ``` @@ -1644,6 +1648,7 @@ Note that depending on security settings a user might not be able to see all the | memorySeconds | long | The amount of memory the application has allocated (megabyte-seconds) | | vcoreSeconds | long | The amount of CPU resources the application has allocated (virtual core-seconds) | | unmanagedApplication | boolean | Is the application unmanaged. | +| applicationPriority | int | priority of the submitted application | ### Response Examples @@ -1685,6 +1690,7 @@ Response Body: "memorySeconds" : 151730, "vcoreSeconds" : 103, "unmanagedApplication":"false" + "applicationPriority":0 } } ``` @@ -1727,6 +1733,7 @@ Response Body: 151730 103 false + 0 ``` From 1825fbc1aac0813c36944454bf95879b45aa40d6 Mon Sep 17 00:00:00 2001 From: Wangda Tan Date: Fri, 7 Aug 2015 09:46:57 -0700 Subject: [PATCH 079/130] YARN-3966. Fix excessive loggings in CapacityScheduler. (Jian He via wangda) --- hadoop-yarn-project/CHANGES.txt | 2 ++ .../scheduler/AbstractYarnScheduler.java | 10 +++--- .../scheduler/capacity/CapacityScheduler.java | 36 ++++++------------- .../scheduler/capacity/LeafQueue.java | 18 +++++----- .../scheduler/capacity/ParentQueue.java | 14 ++++---- .../common/fica/FiCaSchedulerApp.java | 8 +---- 6 files changed, 33 insertions(+), 55 deletions(-) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index fa0433722d4be..7d34eeb7bae49 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -382,6 +382,8 @@ Release 2.8.0 - UNRELEASED YARN-3974. Refactor the reservation system test cases to use parameterized base test. (subru via curino) + YARN-3966. Fix excessive loggings in CapacityScheduler. (Jian He via wangda) + OPTIMIZATIONS YARN-3339. TestDockerContainerExecutor should pull a single image and not diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java index 094f77d506775..d69600ab62ec7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java @@ -215,12 +215,12 @@ protected void initMaximumResourceCapability(Resource maximumAllocation) { protected synchronized void containerLaunchedOnNode( ContainerId containerId, SchedulerNode node) { // Get the application for the finished container - SchedulerApplicationAttempt application = getCurrentAttemptForContainer - (containerId); + SchedulerApplicationAttempt application = + getCurrentAttemptForContainer(containerId); if (application == null) { - LOG.info("Unknown application " - + containerId.getApplicationAttemptId().getApplicationId() - + " launched container " + containerId + " on node: " + node); + LOG.info("Unknown application " + containerId.getApplicationAttemptId() + .getApplicationId() + " launched container " + containerId + + " on node: " + node); this.rmContext.getDispatcher().getEventHandler() .handle(new RMNodeCleanContainerEvent(node.getNodeID(), containerId)); return; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java index 0b39d355f7927..1d353a60b3b37 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java @@ -795,8 +795,8 @@ private synchronized void addApplicationAttempt( application.getUser(), queue, queue.getActiveUsersManager(), rmContext, application.getPriority()); if (transferStateFromPreviousAttempt) { - attempt.transferStateFromPreviousAttempt(application - .getCurrentAppAttempt()); + attempt.transferStateFromPreviousAttempt( + application.getCurrentAppAttempt()); } application.setCurrentAppAttempt(attempt); @@ -899,8 +899,6 @@ public Allocation allocate(ApplicationAttemptId applicationAttemptId, FiCaSchedulerApp application = getApplicationAttempt(applicationAttemptId); if (application == null) { - LOG.info("Calling allocate on removed " + - "or non existant application " + applicationAttemptId); return EMPTY_ALLOCATION; } @@ -921,33 +919,26 @@ ask, getResourceCalculator(), getClusterResource(), // make sure we aren't stopping/removing the application // when the allocate comes in if (application.isStopped()) { - LOG.info("Calling allocate on a stopped " + - "application " + applicationAttemptId); return EMPTY_ALLOCATION; } if (!ask.isEmpty()) { if(LOG.isDebugEnabled()) { - LOG.debug("allocate: pre-update" + - " applicationAttemptId=" + applicationAttemptId + - " application=" + application); + LOG.debug("allocate: pre-update " + applicationAttemptId + + " ask size =" + ask.size()); + application.showRequests(); } - application.showRequests(); - + // Update application requests if (application.updateResourceRequests(ask)) { updateDemandForQueue = (LeafQueue) application.getQueue(); } - LOG.debug("allocate: post-update"); - application.showRequests(); - } - - if(LOG.isDebugEnabled()) { - LOG.debug("allocate:" + - " applicationAttemptId=" + applicationAttemptId + - " #ask=" + ask.size()); + if(LOG.isDebugEnabled()) { + LOG.debug("allocate: post-update"); + application.showRequests(); + } } application.updateBlacklist(blacklistAdditions, blacklistRemovals); @@ -1018,7 +1009,6 @@ private synchronized void nodeUpdate(RMNode nm) { for (ContainerStatus completedContainer : completedContainers) { ContainerId containerId = completedContainer.getContainerId(); RMContainer container = getRMContainer(containerId); - LOG.debug("Container FINISHED: " + containerId); completedContainer(container, completedContainer, RMContainerEventType.FINISHED); if (container != null) { @@ -1481,9 +1471,6 @@ protected synchronized void completedContainer(RMContainer rmContainer, queue.completedContainer(clusterResource, application, node, rmContainer, containerStatus, event, null, true); - LOG.info("Application attempt " + application.getApplicationAttemptId() - + " released container " + container.getId() + " on node: " + node - + " with event: " + event); if (containerStatus.getExitStatus() == ContainerExitStatus.PREEMPTED) { schedulerHealth.updatePreemption(Time.now(), container.getNodeId(), container.getId(), queue.getQueuePath()); @@ -1783,8 +1770,7 @@ public EnumSet getSchedulingResourceTypes() { .equals(DefaultResourceCalculator.class.getName())) { return EnumSet.of(SchedulerResourceTypes.MEMORY); } - return EnumSet - .of(SchedulerResourceTypes.MEMORY, SchedulerResourceTypes.CPU); + return EnumSet.of(SchedulerResourceTypes.MEMORY, SchedulerResourceTypes.CPU); } @Override diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java index a71cc68639c29..2691c3349db51 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java @@ -1125,12 +1125,8 @@ public void completedContainer(Resource clusterResource, // Inform the ordering policy orderingPolicy.containerReleased(application, rmContainer); - releaseResource(clusterResource, application, - container.getResource(), node.getPartition(), rmContainer); - LOG.info("completedContainer" + - " container=" + container + - " queue=" + this + - " cluster=" + clusterResource); + releaseResource(clusterResource, application, container.getResource(), + node.getPartition(), rmContainer); } } @@ -1203,10 +1199,12 @@ synchronized void releaseResource(Resource clusterResource, User user = getUser(userName); user.releaseContainer(resource, nodePartition); metrics.setAvailableResourcesToUser(userName, application.getHeadroom()); - - LOG.info(getQueueName() + - " used=" + queueUsage.getUsed() + " numContainers=" + numContainers + - " user=" + userName + " user-resources=" + user.getUsed()); + + if (LOG.isDebugEnabled()) { + LOG.debug(getQueueName() + + " used=" + queueUsage.getUsed() + " numContainers=" + numContainers + + " user=" + userName + " user-resources=" + user.getUsed()); + } } private void updateAbsoluteCapacityResource(Resource clusterResource) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java index 725aea184035e..e01204ccdd130 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java @@ -629,12 +629,9 @@ public void completedContainer(Resource clusterResource, super.releaseResource(clusterResource, rmContainer.getContainer() .getResource(), node.getPartition()); - LOG.info("completedContainer" + - " queue=" + getQueueName() + - " usedCapacity=" + getUsedCapacity() + - " absoluteUsedCapacity=" + getAbsoluteUsedCapacity() + - " used=" + queueUsage.getUsed() + - " cluster=" + clusterResource); + if (LOG.isDebugEnabled()) { + LOG.debug("completedContainer " + this + ", cluster=" + clusterResource); + } // Note that this is using an iterator on the childQueues so this can't // be called if already within an iterator for the childQueues. Like @@ -646,8 +643,9 @@ public void completedContainer(Resource clusterResource, CSQueue csqueue = iter.next(); if(csqueue.equals(completedChildQueue)) { iter.remove(); - LOG.info("Re-sorting completed queue: " + csqueue.getQueuePath() + - " stats: " + csqueue); + if (LOG.isDebugEnabled()) { + LOG.debug("Re-sorting completed queue: " + csqueue); + } childQueues.add(csqueue); break; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java index d75b2c39a228c..f9a6bc25186dd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java @@ -140,13 +140,7 @@ synchronized public boolean containerCompleted(RMContainer rmContainer, // Inform the container rmContainer.handle( - new RMContainerFinishedEvent( - containerId, - containerStatus, - event) - ); - LOG.info("Completed container: " + rmContainer.getContainerId() + - " in state: " + rmContainer.getState() + " event:" + event); + new RMContainerFinishedEvent(containerId, containerStatus, event)); containersToPreempt.remove(rmContainer.getContainerId()); From 42684f15641408985eeb8cb61afdc73b6aebc207 Mon Sep 17 00:00:00 2001 From: Andrew Wang Date: Wed, 5 Aug 2015 16:35:41 -0700 Subject: [PATCH 080/130] HDFS-8772. Fix TestStandbyIsHot#testDatanodeRestarts which occasionally fails. Contributed by Walter Su. --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ++ .../apache/hadoop/hdfs/MiniDFSCluster.java | 30 ++++++++++++++++++- .../server/namenode/ha/TestStandbyIsHot.java | 2 ++ 3 files changed, 34 insertions(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 051dc8a604ec8..4e97b6b7ee3e6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -769,6 +769,9 @@ Release 2.8.0 - UNRELEASED HDFS-8856. Make LeaseManager#countPath O(1). (Arpit Agarwal) + HDFS-8772. Fix TestStandbyIsHot#testDatanodeRestarts which occasionally fails. + (Walter Su via wang) + OPTIMIZATIONS HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java index 0a2188609e418..70523216831ae 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java @@ -61,7 +61,9 @@ import java.util.Map; import java.util.Random; import java.util.Set; +import java.util.concurrent.TimeoutException; +import com.google.common.base.Supplier; import com.google.common.collect.ArrayListMultimap; import com.google.common.collect.Multimap; import org.apache.commons.logging.Log; @@ -86,6 +88,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; +import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.common.Util; @@ -114,6 +117,7 @@ import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authorize.ProxyUsers; +import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.ExitUtil; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.ToolRunner; @@ -2386,7 +2390,31 @@ public void waitActive(int nnIndex) throws IOException { client.close(); } - + + /** Wait until the given namenode gets first block reports from all the datanodes */ + public void waitFirstBRCompleted(int nnIndex, int timeout) throws + IOException, TimeoutException, InterruptedException { + if (namenodes.size() == 0 || getNN(nnIndex) == null || getNN(nnIndex).nameNode == null) { + return; + } + + final FSNamesystem ns = getNamesystem(nnIndex); + final DatanodeManager dm = ns.getBlockManager().getDatanodeManager(); + GenericTestUtils.waitFor(new Supplier() { + @Override + public Boolean get() { + List nodes = dm.getDatanodeListForReport + (DatanodeReportType.LIVE); + for (DatanodeDescriptor node : nodes) { + if (!node.checkBlockReportReceived()) { + return false; + } + } + return true; + } + }, 100, timeout); + } + /** * Wait until the cluster is active and running. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyIsHot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyIsHot.java index 622ed94861e10..14c9dc264d896 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyIsHot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyIsHot.java @@ -182,6 +182,8 @@ public void testDatanodeRestarts() throws Exception { // Wait for both NNs to re-register the DN. cluster.waitActive(0); cluster.waitActive(1); + cluster.waitFirstBRCompleted(0, 10000); + cluster.waitFirstBRCompleted(1, 10000); BlockManagerTestUtil.updateState(nn0.getNamesystem().getBlockManager()); BlockManagerTestUtil.updateState(nn1.getNamesystem().getBlockManager()); From 14e59b58485d7e5a13dd192fcbb473bb29d1d086 Mon Sep 17 00:00:00 2001 From: Jakob Homan Date: Fri, 7 Aug 2015 11:38:31 -0700 Subject: [PATCH 081/130] HDFS-8866. Typo in docs: Rumtime -> Runtime. Contributed by Gabor Liptak. --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 ++ hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 4e97b6b7ee3e6..565e469ee64fb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -1118,6 +1118,8 @@ Release 2.8.0 - UNRELEASED HDFS-8844. TestHDFSCLI does not cleanup the test directory (Masatake Iwasaki via Colin P. McCabe) + HDFS-8866. Typo in docs: Rumtime -> Runtime. (Gabor Liptak via jghoman) + Release 2.7.2 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md index e8f5fee542576..f4f79b38835ea 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md @@ -1002,7 +1002,7 @@ When an operation fails, the server may throw an exception. The JSON schema of e | `SecurityException ` | `401 Unauthorized ` | | `IOException ` | `403 Forbidden ` | | `FileNotFoundException ` | `404 Not Found ` | -| `RumtimeException ` | `500 Internal Server Error` | +| `RuntimeException ` | `500 Internal Server Error` | Below are examples of exception responses. From 73c226b7ba3da4d08a58100b20cd650e7475aec6 Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Mon, 10 Aug 2015 09:44:47 -0700 Subject: [PATCH 082/130] HADOOP-12275. releasedocmaker: unreleased should still be dated (Kengo Seki via aw) --- dev-support/releasedocmaker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-support/releasedocmaker.py b/dev-support/releasedocmaker.py index 37bd58aa69869..3c398befbc93d 100755 --- a/dev-support/releasedocmaker.py +++ b/dev-support/releasedocmaker.py @@ -434,7 +434,7 @@ def main(): elif options.usetoday: reldate = strftime("%Y-%m-%d", gmtime()) else: - reldate = "Unreleased" + reldate = "Unreleased (as of %s)" % strftime("%Y-%m-%d", gmtime()) if not os.path.exists(vstr): os.mkdir(vstr) From 4a11646bfbe48a65c3c9872fe5f675b4ba3d84e9 Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Mon, 10 Aug 2015 09:46:06 -0700 Subject: [PATCH 083/130] HADOOP-12310. final memory report sometimes generates spurious errors (Kengo Seki via aw) --- dev-support/test-patch.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/dev-support/test-patch.sh b/dev-support/test-patch.sh index 1faf99f2d4e0f..446d5cf70ea6e 100755 --- a/dev-support/test-patch.sh +++ b/dev-support/test-patch.sh @@ -395,9 +395,10 @@ function finish_footer_table { local maxmem + # `sort | head` can cause a broken pipe error, but we can ignore it just like compute_gitdiff. # shellcheck disable=SC2016,SC2086 maxmem=$(find "${PATCH_DIR}" -type f -exec ${AWK} 'match($0, /^\[INFO\] Final Memory: [0-9]+/) - { print substr($0, 22, RLENGTH-21) }' {} \; | sort -nr | head -n 1) + { print substr($0, 22, RLENGTH-21) }' {} \; | sort -nr 2>/dev/null | head -n 1) if [[ -n ${maxmem} ]]; then add_footer_table "Max memory used" "${maxmem}MB" From 1358b7831e0bacdea07673c545a6f6cc0ded7d1d Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Mon, 10 Aug 2015 11:10:36 -0700 Subject: [PATCH 084/130] HADOOP-12248. Add native support for TAP (aw) --- dev-support/personality/hadoop.sh | 74 +++++++++++++++++++ .../test-patch.d/builtin-personality.sh | 22 +++--- dev-support/test-patch.d/shellcheck.sh | 2 +- dev-support/test-patch.sh | 40 ++++++++-- 4 files changed, 122 insertions(+), 16 deletions(-) diff --git a/dev-support/personality/hadoop.sh b/dev-support/personality/hadoop.sh index 60dbb3d7989bc..1243a1786e35b 100755 --- a/dev-support/personality/hadoop.sh +++ b/dev-support/personality/hadoop.sh @@ -253,6 +253,26 @@ function personality_modules #fi needflags=true hadoop_unittest_prereqs + + verify_needed_test javac + if [[ $? == 0 ]]; then + yetus_debug "hadoop: javac not requested" + verify_needed_test native + if [[ $? == 0 ]]; then + yetus_debug "hadoop: native not requested" + yetus_debug "hadoop: adding -DskipTests to unit test" + extra="-DskipTests" + fi + fi + + verify_needed_test shellcheck + if [[ $? == 0 + && ! ${CHANGED_FILES} =~ \.bats ]]; then + yetus_debug "hadoop: NO shell code change detected; disabling shelltest profile" + extra="${extra} -P!shelltest" + else + extra="${extra} -Pshelltest" + fi ;; *) extra="-DskipTests" @@ -272,3 +292,57 @@ function personality_modules done } +function personality_file_tests +{ + local filename=$1 + + yetus_debug "Using Hadoop-specific personality_file_tests" + + if [[ ${filename} =~ src/main/webapp ]]; then + yetus_debug "tests/webapp: ${filename}" + elif [[ ${filename} =~ \.sh + || ${filename} =~ \.cmd + || ${filename} =~ src/scripts + || ${filename} =~ src/test/scripts + ]]; then + yetus_debug "tests/shell: ${filename}" + add_test unit + elif [[ ${filename} =~ \.md$ + || ${filename} =~ \.md\.vm$ + || ${filename} =~ src/site + ]]; then + yetus_debug "tests/site: ${filename}" + add_test site + elif [[ ${filename} =~ \.c$ + || ${filename} =~ \.cc$ + || ${filename} =~ \.h$ + || ${filename} =~ \.hh$ + || ${filename} =~ \.proto$ + || ${filename} =~ \.cmake$ + || ${filename} =~ CMakeLists.txt + ]]; then + yetus_debug "tests/units: ${filename}" + add_test cc + add_test unit + add_test javac + elif [[ ${filename} =~ build.xml$ + || ${filename} =~ pom.xml$ + || ${filename} =~ \.java$ + || ${filename} =~ src/main + ]]; then + yetus_debug "tests/javadoc+units: ${filename}" + add_test javac + add_test javadoc + add_test mvninstall + add_test unit + fi + + if [[ ${filename} =~ src/test ]]; then + yetus_debug "tests" + add_test unit + fi + + if [[ ${filename} =~ \.java$ ]]; then + add_test findbugs + fi +} diff --git a/dev-support/test-patch.d/builtin-personality.sh b/dev-support/test-patch.d/builtin-personality.sh index dc944e485f1e1..4be3bfa3c2b2d 100755 --- a/dev-support/test-patch.d/builtin-personality.sh +++ b/dev-support/test-patch.d/builtin-personality.sh @@ -55,6 +55,8 @@ function builtin_mvn_personality_file_tests yetus_debug "tests/webapp: ${filename}" elif [[ ${filename} =~ \.sh || ${filename} =~ \.cmd + || ${filename} =~ src/main/scripts + || ${filename} =~ src/test/scripts ]]; then yetus_debug "tests/shell: ${filename}" elif [[ ${filename} =~ \.md$ @@ -69,29 +71,31 @@ function builtin_mvn_personality_file_tests || ${filename} =~ \.h$ || ${filename} =~ \.hh$ || ${filename} =~ \.proto$ - || ${filename} =~ src/test || ${filename} =~ \.cmake$ || ${filename} =~ CMakeLists.txt ]]; then yetus_debug "tests/units: ${filename}" + add_test cc + add_test unit + elif [[ ${filename} =~ \.scala$ ]]; then add_test javac - add_test mvninstall add_test unit - elif [[ ${filename} =~ pom.xml$ + add_test mvninstall + elif [[ ${filename} =~ build.xml$ + || ${filename} =~ pom.xml$ || ${filename} =~ \.java$ - || ${filename} =~ \.scala$ || ${filename} =~ src/main ]]; then - if [[ ${filename} =~ src/main/bin - || ${filename} =~ src/main/sbin ]]; then - yetus_debug "tests/shell: ${filename}" - else yetus_debug "tests/javadoc+units: ${filename}" add_test javac add_test javadoc add_test mvninstall add_test unit - fi + fi + + if [[ ${filename} =~ src/test ]]; then + yetus_debug "tests" + add_test unit fi if [[ ${filename} =~ \.java$ ]]; then diff --git a/dev-support/test-patch.d/shellcheck.sh b/dev-support/test-patch.d/shellcheck.sh index 14d1d1837637f..4d177685fe344 100755 --- a/dev-support/test-patch.d/shellcheck.sh +++ b/dev-support/test-patch.d/shellcheck.sh @@ -56,7 +56,7 @@ function shellcheck_private_findbash fi list="${list} ${i}" done - done < <(find . -type d -name bin -o -type d -name sbin -o -type d -name libexec -o -type d -name shellprofile.d) + done < <(find . -type d -name bin -o -type d -name sbin -o -type d -name scripts -o -type d -name libexec -o -type d -name shellprofile.d) # shellcheck disable=SC2086 echo ${list} ${SHELLCHECK_SPECIFICFILES} | tr ' ' '\n' | sort -u } diff --git a/dev-support/test-patch.sh b/dev-support/test-patch.sh index 446d5cf70ea6e..4dd15e69e1b6b 100755 --- a/dev-support/test-patch.sh +++ b/dev-support/test-patch.sh @@ -30,6 +30,8 @@ BINDIR=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P) STARTINGDIR=$(pwd) USER_PARAMS=("$@") GLOBALTIMER=$(date +"%s") +#shellcheck disable=SC2034 +QATESTMODE=false # global arrays declare -a MAVEN_ARGS=("--batch-mode") @@ -1177,6 +1179,7 @@ function find_changed_modules #shellcheck disable=SC2086,SC2116 CHANGED_UNFILTERED_MODULES=$(echo ${CHANGED_UNFILTERED_MODULES}) + if [[ ${BUILDTOOL} = maven ]]; then # Filter out modules without code for module in ${builddirs}; do @@ -1786,7 +1789,7 @@ function copytpbits # if we've already copied, then don't bother doing it again if [[ ${STARTDIR} == ${PATCH_DIR}/precommit ]]; then - hadoop_debug "Skipping copytpbits; already copied once" + yetus_debug "Skipping copytpbits; already copied once" return fi @@ -2942,6 +2945,7 @@ function populate_test_table function check_unittests { local i + local testsys local test_logfile local result=0 local -r savejavahome=${JAVA_HOME} @@ -2949,6 +2953,9 @@ function check_unittests local jdk="" local jdkindex=0 local statusjdk + local formatresult=0 + local needlog + local unitlogs big_console_header "Running unit tests" @@ -2976,7 +2983,7 @@ function check_unittests personality_modules patch unit case ${BUILDTOOL} in maven) - modules_workers patch unit clean install -fae + modules_workers patch unit clean test -fae ;; ant) modules_workers patch unit @@ -3002,13 +3009,23 @@ function check_unittests pushd "${MODULE[${i}]}" >/dev/null - for j in ${TESTSYSTEMS}; do - if declare -f ${j}_process_tests; then - "${j}_process_tests" "${module}" "${test_logfile}" - ((results=results+$?)) + needlog=0 + for testsys in ${TESTFORMATS}; do + if declare -f ${testsys}_process_tests >/dev/null; then + yetus_debug "Calling ${testsys}_process_tests" + "${testsys}_process_tests" "${module}" "${test_logfile}" "${fn}" + formatresult=$? + ((results=results+formatresult)) + if [[ "${formatresult}" != 0 ]]; then + needlog=1 + fi fi done + if [[ ${needlog} == 1 ]]; then + unitlogs="${unitlogs} @@BASE@@/patch-unit-${fn}.txt" + fi + popd >/dev/null ((i=i+1)) @@ -3017,10 +3034,21 @@ function check_unittests done JAVA_HOME=${savejavahome} + if [[ -n "${unitlogs}" ]]; then + add_footer_table "unit test logs" "${unitlogs}" + fi + if [[ ${JENKINS} == true ]]; then add_footer_table "${statusjdk} Test Results" "${BUILD_URL}testReport/" fi + for testsys in ${TESTFORMATS}; do + if declare -f ${testsys}_finalize_results >/dev/null; then + yetus_debug "Calling ${testsys}_finalize_results" + "${testsys}_finalize_results" "${statusjdk}" + fi + done + if [[ ${result} -gt 0 ]]; then return 1 fi From ac9ef39cd0a2bff0d40280e17e3e5e6a76680cd1 Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Mon, 10 Aug 2015 15:51:48 -0700 Subject: [PATCH 085/130] fixes --- dev-support/test-patch.d/github.sh | 33 ++++-------------------------- dev-support/test-patch.d/jira.sh | 5 +++++ 2 files changed, 9 insertions(+), 29 deletions(-) diff --git a/dev-support/test-patch.d/github.sh b/dev-support/test-patch.d/github.sh index 72afee70f5372..33cc248002b96 100755 --- a/dev-support/test-patch.d/github.sh +++ b/dev-support/test-patch.d/github.sh @@ -22,7 +22,6 @@ GITHUB_REPO="apache/hadoop" GITHUB_PASSWD="" GITHUB_TOKEN="" GITHUB_USER="" -GITHUB_COMMITID="" GITHUB_ISSUE="" function github_usage @@ -100,12 +99,9 @@ function github_jira_bridge function github_determine_issue { declare input=$1 - declare patchnamechunk - declare maybeissue - if [[ ${input} =~ ^[0-9]+$ - && -n ${GITHUB_REPO} ]]; then + && -n ${GITHUB_REPO} ]]; then ISSUE=${input} return 0 fi @@ -146,7 +142,6 @@ function github_locate_patch # base->sha? GITHUB_ISSUE=${input} - GITHUB_COMMITID="" add_footer_table "GITHUB PR" "${GITHUB_URL}/${GITHUB_REPO}/pull/${input}" @@ -186,37 +181,17 @@ function github_write_comment ${CURL} -X POST \ -H "Accept: application/json" \ -H "Content-Type: application/json" \ - -u "${GITHUB_USER}:${GITHUB_PASSWD}" \ + ${githubauth} \ -d @"${PATCH_DIR}/jiracomment.$$" \ --silent --location \ "${JIRA_URL}/rest/api/2/issue/${ISSUE}/comment" \ >/dev/null - retval=$? - rm "${PATCH_DIR}/jiracomment.$$" - fi + retval=$? + rm "${PATCH_DIR}/jiracomment.$$" return ${retval} } - -function github_write_comment -{ - declare -r commentfile=${1} - shift - - declare retval=1 - - if [[ "${OFFLINE}" == true ]]; then - return 0 - fi - - - - yetus_debug "${GITHUB_USER} ${GITHUB_PASSWD} ${GITHUB_TOKEN} ${GITHUB_COMMITID}" - return ${retval} -} - - ## @description Print out the finished details to the Github PR ## @audience private ## @stability evolving diff --git a/dev-support/test-patch.d/jira.sh b/dev-support/test-patch.d/jira.sh index 066cca5da67b9..8fdd67915f303 100755 --- a/dev-support/test-patch.d/jira.sh +++ b/dev-support/test-patch.d/jira.sh @@ -63,6 +63,7 @@ function jira_determine_issue if [[ ${maybeissue} =~ ${JIRA_ISSUE_RE} ]]; then ISSUE=${maybeissue} + JIRA_ISSUE=${maybeissue} add_footer_table "JIRA Issue" "${ISSUE}" return 0 fi @@ -211,6 +212,10 @@ function jira_finalreport return 0 fi + if [[ -z "${JIRA_ISSUE}" ]]; then + return 0 + fi + big_console_header "Adding comment to JIRA" add_footer_table "Console output" "${BUILD_URL}console" From d50934b886050a04c2f25a8cf78e3128b3db1a96 Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Mon, 10 Aug 2015 15:55:33 -0700 Subject: [PATCH 086/130] more fixes --- dev-support/test-patch.d/github.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/dev-support/test-patch.d/github.sh b/dev-support/test-patch.d/github.sh index 33cc248002b96..9838f8b8cd0f3 100755 --- a/dev-support/test-patch.d/github.sh +++ b/dev-support/test-patch.d/github.sh @@ -165,8 +165,8 @@ function github_write_comment echo "{\"body\":\"" > "${PATCH_DIR}/ghcomment.$$" sed -e 's,\\,\\\\,g' \ - -e 's,\",\\\",g' \ - | tr -d '\n'>> "${PATCH_DIR}/ghcomment.$$" + -e 's,\",\\\",g' "${PATCH_DIR}/ghcomment.$$" \ + >> "${PATCH_DIR}/ghcomment.$$" echo "\"}" >> "${PATCH_DIR}/ghcomment.$$" if [[ -n ${GITHUB_USER} @@ -272,5 +272,5 @@ function github_finalreport printf "\n\nThis message was automatically generated.\n\n" >> "${commentfile}" - write_to_github "${commentfile}" + github_write_comment "${commentfile}" } From fb1e9c15ab1d457d2538a0eb29246fa71826c4a3 Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Mon, 10 Aug 2015 16:05:28 -0700 Subject: [PATCH 087/130] someday i will understand urls --- dev-support/test-patch.d/github.sh | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/dev-support/test-patch.d/github.sh b/dev-support/test-patch.d/github.sh index 9838f8b8cd0f3..d7641e3ef9a22 100755 --- a/dev-support/test-patch.d/github.sh +++ b/dev-support/test-patch.d/github.sh @@ -16,7 +16,8 @@ add_bugsystem github -GITHUB_URL="https://github.com" +GITHUB_BASE_URL="https://github.com" +GITHUB_API_URL="https://api.github.com" GITHUB_REPO="apache/hadoop" GITHUB_PASSWD="" @@ -27,7 +28,8 @@ GITHUB_ISSUE="" function github_usage { echo "GITHUB Options:" - echo "--github-base-url= The URL of the JIRA server (default:'${GITHUB_URL}')" + echo "--github-api-url= The URL of the API for github (default: '${GITHUB_API_URL}')" + echo "--github-base-url= The URL of the github server (default:'${GITHUB_BASE_URL}')" echo "--github-password= Github password" echo "--github-repo= github repo to use (default:'${GITHUB_REPO}')" echo "--github-token= The token to use to write to github" @@ -41,8 +43,11 @@ function github_parse_args for i in "$@"; do case ${i} in + --github-api-url=*) + GITHUB_API_URL=${i#*=} + ;; --github-base-url=*) - GITHUB_URL=${i#*=} + GITHUB_BASE_URL=${i#*=} ;; --github-repo=*) GITHUB_REPO=${i#*=} @@ -81,7 +86,7 @@ function github_jira_bridge ((pos2=count-3)) ((pos1=pos2)) - GITHUB_URL=$(echo "${urlfromjira}" | cut -f1-${pos2} -d/) + GITHUB_BASE_URL=$(echo "${urlfromjira}" | cut -f1-${pos2} -d/) ((pos1=pos1+1)) ((pos2=pos1+1)) @@ -124,7 +129,7 @@ function github_locate_patch return 1 fi - PATCHURL="${GITHUB_URL}/${GITHUB_REPO}/pull/${input}.patch" + PATCHURL="${GITHUB_BASE_URL}/${GITHUB_REPO}/pull/${input}.patch" echo "GITHUB PR #${input} is being downloaded at $(date) from" echo "${PATCHURL}" @@ -143,7 +148,7 @@ function github_locate_patch GITHUB_ISSUE=${input} - add_footer_table "GITHUB PR" "${GITHUB_URL}/${GITHUB_REPO}/pull/${input}" + add_footer_table "GITHUB PR" "${GITHUB_BASE_URL}/${GITHUB_REPO}/pull/${input}" return 0 } @@ -182,13 +187,13 @@ function github_write_comment -H "Accept: application/json" \ -H "Content-Type: application/json" \ ${githubauth} \ - -d @"${PATCH_DIR}/jiracomment.$$" \ + -d @"${PATCH_DIR}/ghcomment.$$" \ --silent --location \ - "${JIRA_URL}/rest/api/2/issue/${ISSUE}/comment" \ + "${GITHUB_API_URL}/repos/${GITHUB_REPO}/issues/${ISSUE}/comments" \ >/dev/null retval=$? - rm "${PATCH_DIR}/jiracomment.$$" + #rm "${PATCH_DIR}/ghcomment.$$" return ${retval} } From 555e97f8b7463070f8aa8e4926b70dbcaf8218c8 Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Mon, 10 Aug 2015 16:26:36 -0700 Subject: [PATCH 088/130] sucess! --- dev-support/test-patch.d/github.sh | 15 ++++++++------- dev-support/test-patch.d/jira.sh | 2 +- 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/dev-support/test-patch.d/github.sh b/dev-support/test-patch.d/github.sh index d7641e3ef9a22..33bf04c1b6a55 100755 --- a/dev-support/test-patch.d/github.sh +++ b/dev-support/test-patch.d/github.sh @@ -169,16 +169,17 @@ function github_write_comment fi echo "{\"body\":\"" > "${PATCH_DIR}/ghcomment.$$" - sed -e 's,\\,\\\\,g' \ - -e 's,\",\\\",g' "${PATCH_DIR}/ghcomment.$$" \ - >> "${PATCH_DIR}/ghcomment.$$" + ${SED} -e 's,\\,\\\\,g' \ + -e 's,\",\\\",g' \ + -e 's,$,\\r\\n,g' "${commentfile}" \ + | tr -d '\n'>> "${PATCH_DIR}/ghcomment.$$" echo "\"}" >> "${PATCH_DIR}/ghcomment.$$" if [[ -n ${GITHUB_USER} && -n ${GITHUB_PASSWD} ]]; then - githubauth="-u \"${GITHUB_USER}:${GITHUB_PASSWD}\"" + githubauth="${GITHUB_USER}:${GITHUB_PASSWD}" elif [[ -n ${GITHUB_TOKEN} ]]; then - githubauth="-H \"Authorization: token ${GITHUB_TOKEN}\"" + githubauth="Authorization: token ${GITHUB_TOKEN}" else return 0 fi @@ -186,14 +187,14 @@ function github_write_comment ${CURL} -X POST \ -H "Accept: application/json" \ -H "Content-Type: application/json" \ - ${githubauth} \ + -H "${githubauth}" \ -d @"${PATCH_DIR}/ghcomment.$$" \ --silent --location \ "${GITHUB_API_URL}/repos/${GITHUB_REPO}/issues/${ISSUE}/comments" \ >/dev/null retval=$? - #rm "${PATCH_DIR}/ghcomment.$$" + rm "${PATCH_DIR}/ghcomment.$$" return ${retval} } diff --git a/dev-support/test-patch.d/jira.sh b/dev-support/test-patch.d/jira.sh index 8fdd67915f303..80e94902adbcf 100755 --- a/dev-support/test-patch.d/jira.sh +++ b/dev-support/test-patch.d/jira.sh @@ -167,7 +167,7 @@ function jira_write_comment && -n ${JIRA_USER} ]]; then echo "{\"body\":\"" > "${PATCH_DIR}/jiracomment.$$" - sed -e 's,\\,\\\\,g' \ + ${SED} -e 's,\\,\\\\,g' \ -e 's,\",\\\",g' \ -e 's,$,\\r\\n,g' "${commentfile}" \ | tr -d '\n'>> "${PATCH_DIR}/jiracomment.$$" From 142643ca9d1ccb91d23a071062746fb1f2adad6a Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Mon, 10 Aug 2015 16:31:01 -0700 Subject: [PATCH 089/130] add location to s-a-p --- dev-support/smart-apply-patch.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/dev-support/smart-apply-patch.sh b/dev-support/smart-apply-patch.sh index c9221b932ecc8..50ce787fc3ac6 100755 --- a/dev-support/smart-apply-patch.sh +++ b/dev-support/smart-apply-patch.sh @@ -235,6 +235,7 @@ function locate_patch else ${CURL} --silent \ --output "${PATCH_DIR}/jira" \ + --location \ "http://issues.apache.org/jira/browse/${PATCH_OR_ISSUE}" case $? in 0) @@ -278,7 +279,7 @@ function locate_patch fi fi if [[ -z "${PATCH_FILE}" ]]; then - ${CURL} -q -O "${PATCH_DIR}/patch" "${PATCHURL}" + ${CURL} -q --location -O "${PATCH_DIR}/patch" "${PATCHURL}" if [[ $? != 0 ]];then yetus_error "ERROR: ${PATCH_OR_ISSUE} could not be downloaded." cleanup_and_exit 1 From b1307c67a64bdb135c3171da982c1549de52b546 Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Mon, 10 Aug 2015 16:32:01 -0700 Subject: [PATCH 090/130] add location to s-a-p --- dev-support/smart-apply-patch.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-support/smart-apply-patch.sh b/dev-support/smart-apply-patch.sh index 50ce787fc3ac6..e7ac9eb19f8cf 100755 --- a/dev-support/smart-apply-patch.sh +++ b/dev-support/smart-apply-patch.sh @@ -279,7 +279,7 @@ function locate_patch fi fi if [[ -z "${PATCH_FILE}" ]]; then - ${CURL} -q --location -O "${PATCH_DIR}/patch" "${PATCHURL}" + ${CURL} -q --location --output "${PATCH_DIR}/patch" "${PATCHURL}" if [[ $? != 0 ]];then yetus_error "ERROR: ${PATCH_OR_ISSUE} could not be downloaded." cleanup_and_exit 1 From 3a01c9de55ecf42336f3bcb3af25fd013e5e23b2 Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Mon, 10 Aug 2015 16:38:30 -0700 Subject: [PATCH 091/130] drop the debug --- dev-support/test-patch.sh | 2 -- 1 file changed, 2 deletions(-) diff --git a/dev-support/test-patch.sh b/dev-support/test-patch.sh index 6fc58b2a5ae4a..55f9d038450fa 100755 --- a/dev-support/test-patch.sh +++ b/dev-support/test-patch.sh @@ -23,8 +23,6 @@ if [[ -z "${BASH_VERSINFO}" ]] \ exit 1 fi -set -x - ### BUILD_URL is set by Hudson if it is run by patch process this="${BASH_SOURCE-$0}" From aa05c85338190dab55bde4fa0fc9f4f5212a5984 Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Tue, 11 Aug 2015 07:25:36 -0700 Subject: [PATCH 092/130] fix branch determination --- dev-support/test-patch.d/github.sh | 81 ++++++++++++++++++++++++--- dev-support/test-patch.d/jira.sh | 90 ++++++++++++++++++++++++++++++ dev-support/test-patch.sh | 87 ++++------------------------- 3 files changed, 173 insertions(+), 85 deletions(-) diff --git a/dev-support/test-patch.d/github.sh b/dev-support/test-patch.d/github.sh index 33bf04c1b6a55..e56f012baa1f4 100755 --- a/dev-support/test-patch.d/github.sh +++ b/dev-support/test-patch.d/github.sh @@ -71,9 +71,7 @@ function github_jira_bridge { declare fileloc=$1 declare urlfromjira - declare count - declare pos1 - declare pos2 + # the JIRA issue has already been downloaded. So let's # find the URL. This is currently hard-coded to github.com @@ -81,7 +79,19 @@ function github_jira_bridge # shellcheck disable=SC2016 urlfromjira=$(${AWK} 'match($0,"https://github.com/.*patch"){print $1}' "${PATCH_DIR}/jira" | tail -1) - count=${urlfromjira//[^\/]} + github_breakup_url "${urlfromjira}" + github_locate_patch "${GITHUB_ISSUE}" "${fileloc}" + +} + +function github_breakup_url +{ + declare url=$1 + declare count + declare pos1 + declare pos2 + + count=${url//[^\/]} count=${#count} ((pos2=count-3)) ((pos1=pos2)) @@ -97,8 +107,6 @@ function github_jira_bridge unset pos2 GITHUB_ISSUE=$(echo "${urlfromjira}" | cut -f${pos1}-${pos2} -d/ | cut -f1 -d.) - - github_locate_patch "${GITHUB_ISSUE}" "${fileloc}" } function github_determine_issue @@ -114,16 +122,51 @@ function github_determine_issue return 1 } +## @description Try to guess the branch being tested using a variety of heuristics +## @audience private +## @stability evolving +## @replaceable no +## @return 0 on success, with PATCH_BRANCH updated appropriately +function github_determine_branch +{ + declare reflist + declare ref + + if [[ ! -f "${PATCH_DIR}/github-pull.json" ]]; then + return 1 + fi + + reflist=$(${AWK} 'match($0,"\"ref\": \""){print $2}' "${PATCH_DIR}/github-pull.json"\ + | cut -f2 -d\" ) + + for PATCH_BRANCH in ${reflist}; do + yetus_debug "Determine branch: starting with ${PATCH_BRANCH}" + + verify_valid_branch "${PATCH_BRANCH}" + if [[ $? == 0 ]]; then + return 0 + fi + done + return 1 +} + + function github_locate_patch { declare input=$1 declare output=$2 + declare githubauth if [[ "${OFFLINE}" == true ]]; then yetus_debug "github_locate_patch: offline, skipping" return 1 fi + if [[ ${input} =~ ^${GITHUB_BASE_URL}.*patch$ ]]; then + github_breakup_url "${GITHUB_BASE_URL}" + input=${GITHUB_ISSUE} + fi + if [[ ! ${input} =~ ^[0-9]+$ ]]; then yetus_debug "github: ${input} is not a pull request #" return 1 @@ -131,11 +174,34 @@ function github_locate_patch PATCHURL="${GITHUB_BASE_URL}/${GITHUB_REPO}/pull/${input}.patch" echo "GITHUB PR #${input} is being downloaded at $(date) from" + echo "${GITHUB_BASE_URL}/${GITHUB_REPO}/pull/${input}" + + if [[ -n ${GITHUB_USER} + && -n ${GITHUB_PASSWD} ]]; then + githubauth="${GITHUB_USER}:${GITHUB_PASSWD}" + elif [[ -n ${GITHUB_TOKEN} ]]; then + githubauth="Authorization: token ${GITHUB_TOKEN}" + else + githubauth="X-ignore-me: fake" + fi + + # Let's pull the PR JSON for later use + ${CURL} --silent --fail \ + -H "Accept: application/json" \ + -H "${githubauth}" \ + --output "${PATCH_DIR}/github-pull.json" \ + --location \ + "${GITHUB_API_URL}/repos/${GITHUB_REPO}/pulls/${input}" + + + echo "Patch from GITHUB PR #${input} is being downloaded at $(date) from" echo "${PATCHURL}" + # the actual patch file ${CURL} --silent --fail \ --output "${output}" \ --location \ + -H "${githubauth}" \ "${PATCHURL}" if [[ $? != 0 ]]; then @@ -143,9 +209,6 @@ function github_locate_patch return 1 fi - # https://api.github.com/repos/apache/hadoop/pulls/25 - # base->sha? - GITHUB_ISSUE=${input} add_footer_table "GITHUB PR" "${GITHUB_BASE_URL}/${GITHUB_REPO}/pull/${input}" diff --git a/dev-support/test-patch.d/jira.sh b/dev-support/test-patch.d/jira.sh index 80e94902adbcf..3791003718c06 100755 --- a/dev-support/test-patch.d/jira.sh +++ b/dev-support/test-patch.d/jira.sh @@ -147,6 +147,96 @@ function jira_locate_patch return 0 } +## @description Try to guess the branch being tested using a variety of heuristics +## @audience private +## @stability evolving +## @replaceable no +## @return 0 on success, with PATCH_BRANCH updated appropriately +function jira_determine_branch +{ + declare patchnamechunk + declare total + declare count + declare hinttype + + for hinttype in "${PATCHURL}" "${PATCH_OR_ISSUE}"; do + if [[ -z "${hinttype}" ]]; then + continue + fi + yetus_debug "Determine branch: starting with ${hinttype}" + patchnamechunk=$(echo "${hinttype}" \ + | ${SED} -e 's,.*/\(.*\)$,\1,' \ + -e 's,\.txt,.,' \ + -e 's,.patch,.,g' \ + -e 's,.diff,.,g' \ + -e 's,\.\.,.,g' \ + -e 's,\.$,,g' ) + + # ISSUE-branch-## + PATCH_BRANCH=$(echo "${patchnamechunk}" | cut -f3- -d- | cut -f1,2 -d-) + yetus_debug "Determine branch: ISSUE-branch-## = ${PATCH_BRANCH}" + if [[ -n "${PATCH_BRANCH}" ]]; then + verify_valid_branch "${PATCH_BRANCH}" + if [[ $? == 0 ]]; then + return 0 + fi + fi + + # ISSUE-##[.##].branch + PATCH_BRANCH=$(echo "${patchnamechunk}" | cut -f3- -d. ) + count="${PATCH_BRANCH//[^.]}" + total=${#count} + ((total = total + 3 )) + until [[ ${total} -lt 2 ]]; do + PATCH_BRANCH=$(echo "${patchnamechunk}" | cut -f3-${total} -d.) + yetus_debug "Determine branch: ISSUE[.##].branch = ${PATCH_BRANCH}" + ((total=total-1)) + if [[ -n "${PATCH_BRANCH}" ]]; then + verify_valid_branch "${PATCH_BRANCH}" + if [[ $? == 0 ]]; then + return 0 + fi + fi + done + + # ISSUE.branch.## + PATCH_BRANCH=$(echo "${patchnamechunk}" | cut -f2- -d. ) + count="${PATCH_BRANCH//[^.]}" + total=${#count} + ((total = total + 3 )) + until [[ ${total} -lt 2 ]]; do + PATCH_BRANCH=$(echo "${patchnamechunk}" | cut -f2-${total} -d.) + yetus_debug "Determine branch: ISSUE.branch[.##] = ${PATCH_BRANCH}" + ((total=total-1)) + if [[ -n "${PATCH_BRANCH}" ]]; then + verify_valid_branch "${PATCH_BRANCH}" + if [[ $? == 0 ]]; then + return 0 + fi + fi + done + + # ISSUE-branch.## + PATCH_BRANCH=$(echo "${patchnamechunk}" | cut -f3- -d- | cut -f1- -d. ) + count="${PATCH_BRANCH//[^.]}" + total=${#count} + ((total = total + 1 )) + until [[ ${total} -eq 1 ]]; do + PATCH_BRANCH=$(echo "${patchnamechunk}" | cut -f3- -d- | cut -f1-${total} -d. ) + yetus_debug "Determine branch: ISSUE-branch[.##] = ${PATCH_BRANCH}" + ((total=total-1)) + if [[ -n "${PATCH_BRANCH}" ]]; then + verify_valid_branch "${PATCH_BRANCH}" + if [[ $? == 0 ]]; then + return 0 + fi + fi + done + done + + return 1 +} + ## @description Write the contents of a file to JIRA ## @params filename ## @stability stable diff --git a/dev-support/test-patch.sh b/dev-support/test-patch.sh index 55f9d038450fa..6fc2f7b105fcf 100755 --- a/dev-support/test-patch.sh +++ b/dev-support/test-patch.sh @@ -1386,9 +1386,8 @@ function verify_valid_branch ## @return 1 on failure, with PATCH_BRANCH updated to PATCH_BRANCH_DEFAULT function determine_branch { - local patchnamechunk - local total - local count + declare bugs + declare retval=1 # something has already set this, so move on if [[ -n ${PATCH_BRANCH} ]]; then @@ -1411,83 +1410,19 @@ function determine_branch return fi - for j in "${PATCHURL}" "${PATCH_OR_ISSUE}"; do - if [[ -z "${j}" ]]; then - continue - fi - yetus_debug "Determine branch: starting with ${j}" - patchnamechunk=$(echo "${j}" \ - | ${SED} -e 's,.*/\(.*\)$,\1,' \ - -e 's,\.txt,.,' \ - -e 's,.patch,.,g' \ - -e 's,.diff,.,g' \ - -e 's,\.\.,.,g' \ - -e 's,\.$,,g' ) - - # ISSUE-branch-## - PATCH_BRANCH=$(echo "${patchnamechunk}" | cut -f3- -d- | cut -f1,2 -d-) - yetus_debug "Determine branch: ISSUE-branch-## = ${PATCH_BRANCH}" - if [[ -n "${PATCH_BRANCH}" ]]; then - verify_valid_branch "${PATCH_BRANCH}" - if [[ $? == 0 ]]; then - return + for bugs in ${BUGSYSTEMS}; do + if declare -f ${bugs}_determine_branch >/dev/null;then + "${bugs}_determine_branch" + retval=$? + if [[ ${retval} == 0 ]]; then + break fi fi - - # ISSUE-##[.##].branch - PATCH_BRANCH=$(echo "${patchnamechunk}" | cut -f3- -d. ) - count="${PATCH_BRANCH//[^.]}" - total=${#count} - ((total = total + 3 )) - until [[ ${total} -lt 2 ]]; do - PATCH_BRANCH=$(echo "${patchnamechunk}" | cut -f3-${total} -d.) - yetus_debug "Determine branch: ISSUE[.##].branch = ${PATCH_BRANCH}" - ((total=total-1)) - if [[ -n "${PATCH_BRANCH}" ]]; then - verify_valid_branch "${PATCH_BRANCH}" - if [[ $? == 0 ]]; then - return - fi - fi - done - - # ISSUE.branch.## - PATCH_BRANCH=$(echo "${patchnamechunk}" | cut -f2- -d. ) - count="${PATCH_BRANCH//[^.]}" - total=${#count} - ((total = total + 3 )) - until [[ ${total} -lt 2 ]]; do - PATCH_BRANCH=$(echo "${patchnamechunk}" | cut -f2-${total} -d.) - yetus_debug "Determine branch: ISSUE.branch[.##] = ${PATCH_BRANCH}" - ((total=total-1)) - if [[ -n "${PATCH_BRANCH}" ]]; then - verify_valid_branch "${PATCH_BRANCH}" - if [[ $? == 0 ]]; then - return - fi - fi - done - - # ISSUE-branch.## - PATCH_BRANCH=$(echo "${patchnamechunk}" | cut -f3- -d- | cut -f1- -d. ) - count="${PATCH_BRANCH//[^.]}" - total=${#count} - ((total = total + 1 )) - until [[ ${total} -eq 1 ]]; do - PATCH_BRANCH=$(echo "${patchnamechunk}" | cut -f3- -d- | cut -f1-${total} -d. ) - yetus_debug "Determine branch: ISSUE-branch[.##] = ${PATCH_BRANCH}" - ((total=total-1)) - if [[ -n "${PATCH_BRANCH}" ]]; then - verify_valid_branch "${PATCH_BRANCH}" - if [[ $? == 0 ]]; then - return - fi - fi - done - done - PATCH_BRANCH="${PATCH_BRANCH_DEFAULT}" + if [[ ${retval} != 0 ]]; then + PATCH_BRANCH="${PATCH_BRANCH_DEFAULT}" + fi popd >/dev/null } From 96f27450cc603053e4af54a7a2eec30bae3d6fc5 Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Tue, 11 Aug 2015 07:49:15 -0700 Subject: [PATCH 093/130] HADOOP-12248. Add native support for TAP (aw) --- dev-support/test-patch.d/tap.sh | 80 +++++++++++++++++++++++++++++++++ 1 file changed, 80 insertions(+) create mode 100755 dev-support/test-patch.d/tap.sh diff --git a/dev-support/test-patch.d/tap.sh b/dev-support/test-patch.d/tap.sh new file mode 100755 index 0000000000000..c6796a8692b59 --- /dev/null +++ b/dev-support/test-patch.d/tap.sh @@ -0,0 +1,80 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +add_test_format tap + +TAP_FAILED_TESTS="" +TAP_LOG_DIR="target/tap" + +function tap_process_args +{ + declare i + + for i in "$@"; do + case ${i} in + --tap-log-dir=*) + TAP_LOG_DIR=${i#=*} + ;; + esac + done +} + +function tap_usage +{ + echo "TAP Options:" + echo "--tap-log-dir=

Directory relative to the module for tap output (default: \"target/tap\")" +} + +function tap_process_tests +{ + # shellcheck disable=SC2034 + declare module=$1 + # shellcheck disable=SC2034 + declare buildlogfile=$2 + declare filefrag=$3 + declare result=0 + declare module_failed_tests + declare filenames + + filenames=$(find "${TAP_LOG_DIR}" -type f -exec "${GREP}" -l -E "not ok " {} \;) + + if [[ -n "${filenames}" ]]; then + module_failed_tests=$(echo "${filenames}" \ + | sed -e "s,${TAP_LOG_DIR},,g" -e s,^/,,g ) + # shellcheck disable=SC2086 + cat ${filenames} >> "${PATCH_DIR}/patch-${filefrag}.tap" + TAP_LOGS="${TAP_LOGS} @@BASE@@/patch-${filefrag}.tap" + TAP_FAILED_TESTS="${TAP_FAILED_TESTS} ${module_failed_tests}" + ((result=result+1)) + fi + + if [[ ${result} -gt 0 ]]; then + return 1 + fi + return 0 +} + +function tap_finalize_results +{ + declare jdk=$1 + + if [[ -n "${TAP_FAILED_TESTS}" ]] ; then + # shellcheck disable=SC2086 + populate_test_table "${jdk}Failed junit tests" ${TAP_FAILED_TESTS} + TAP_FAILED_TESTS="" + add_footer_table "TAP logs" "${TAP_LOGS}" + fi +} From d552abec703cc22e4d452aecbbb15e19087ca0b2 Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Tue, 11 Aug 2015 07:57:32 -0700 Subject: [PATCH 094/130] fix shellcheck errors --- dev-support/test-patch.d/github.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-support/test-patch.d/github.sh b/dev-support/test-patch.d/github.sh index e56f012baa1f4..7d0073f4823d5 100755 --- a/dev-support/test-patch.d/github.sh +++ b/dev-support/test-patch.d/github.sh @@ -130,7 +130,6 @@ function github_determine_issue function github_determine_branch { declare reflist - declare ref if [[ ! -f "${PATCH_DIR}/github-pull.json" ]]; then return 1 @@ -139,6 +138,7 @@ function github_determine_branch reflist=$(${AWK} 'match($0,"\"ref\": \""){print $2}' "${PATCH_DIR}/github-pull.json"\ | cut -f2 -d\" ) + # shellcheck disable=SC2016 for PATCH_BRANCH in ${reflist}; do yetus_debug "Determine branch: starting with ${PATCH_BRANCH}" From a2c964e3ec2c1d7dee60b6b20b55b47ebec07121 Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Tue, 11 Aug 2015 09:53:15 -0700 Subject: [PATCH 095/130] fix some shellcheck errors; github to jira support --- dev-support/test-patch.d/github.sh | 70 ++++++++++++++++++++++-------- dev-support/test-patch.d/jira.sh | 4 +- 2 files changed, 55 insertions(+), 19 deletions(-) diff --git a/dev-support/test-patch.d/github.sh b/dev-support/test-patch.d/github.sh index 7d0073f4823d5..b4e70db886d1e 100755 --- a/dev-support/test-patch.d/github.sh +++ b/dev-support/test-patch.d/github.sh @@ -25,6 +25,8 @@ GITHUB_TOKEN="" GITHUB_USER="" GITHUB_ISSUE="" +GITHUB_BRIDGED=false + function github_usage { echo "GITHUB Options:" @@ -34,7 +36,6 @@ function github_usage echo "--github-repo= github repo to use (default:'${GITHUB_REPO}')" echo "--github-token= The token to use to write to github" echo "--github-user= Github user" - } function github_parse_args @@ -72,16 +73,16 @@ function github_jira_bridge declare fileloc=$1 declare urlfromjira - # the JIRA issue has already been downloaded. So let's # find the URL. This is currently hard-coded to github.com # Sorry Github Enterprise users. :( + GITHUB_BRIDGED=true + # shellcheck disable=SC2016 urlfromjira=$(${AWK} 'match($0,"https://github.com/.*patch"){print $1}' "${PATCH_DIR}/jira" | tail -1) github_breakup_url "${urlfromjira}" github_locate_patch "${GITHUB_ISSUE}" "${fileloc}" - } function github_breakup_url @@ -109,6 +110,40 @@ function github_breakup_url GITHUB_ISSUE=$(echo "${urlfromjira}" | cut -f${pos1}-${pos2} -d/ | cut -f1 -d.) } +function github_find_jira_title +{ + declare title + declare maybe + declare retval + + if [[ -f "${PATCH_DIR}/github-pull.json" ]]; then + return 1 + fi + + title=$(GREP title "${PATCH_DIR}/github-pull.json" \ + | cut -f4 -d\") + + # people typically do two types: JIRA-ISSUE: and [JIRA-ISSUE] + # JIRA_ISSUE_RE is pretty strict so we need to chop that stuff + # out first + + maybe=$(echo "${title}" | cut -f2 -d\[ | cut -f1 -d\]) + jira_determine_issue "${maybe}" + retval=$? + + if [[ ${retval} == 0 ]]; then + return 0 + fi + + maybe=$(echo "${title}" | cut -f1 -d:) + jira_determine_issue "${maybe}" + retval=$? + + if [[ ${retval} == 0 ]]; then + return 0 + fi +} + function github_determine_issue { declare input=$1 @@ -119,6 +154,12 @@ function github_determine_issue return 0 fi + if [[ ${GITHUB_BRIDGED} == false ]]; then + github_find_jira_title + if [[ $? == 0 ]]; then + return 0 + fi + fi return 1 } @@ -129,28 +170,24 @@ function github_determine_issue ## @return 0 on success, with PATCH_BRANCH updated appropriately function github_determine_branch { - declare reflist - if [[ ! -f "${PATCH_DIR}/github-pull.json" ]]; then return 1 fi - reflist=$(${AWK} 'match($0,"\"ref\": \""){print $2}' "${PATCH_DIR}/github-pull.json"\ - | cut -f2 -d\" ) - # shellcheck disable=SC2016 - for PATCH_BRANCH in ${reflist}; do - yetus_debug "Determine branch: starting with ${PATCH_BRANCH}" + PATCH_BRANCH=$(${AWK} 'match($0,"\"ref\": \""){print $2}' "${PATCH_DIR}/github-pull.json"\ + | cut -f2 -d\"\ + | tail -1 ) - verify_valid_branch "${PATCH_BRANCH}" - if [[ $? == 0 ]]; then - return 0 - fi - done + yetus_debug "Github determine branch: starting with ${PATCH_BRANCH}" + + verify_valid_branch "${PATCH_BRANCH}" + if [[ $? == 0 ]]; then + return 0 + fi return 1 } - function github_locate_patch { declare input=$1 @@ -193,7 +230,6 @@ function github_locate_patch --location \ "${GITHUB_API_URL}/repos/${GITHUB_REPO}/pulls/${input}" - echo "Patch from GITHUB PR #${input} is being downloaded at $(date) from" echo "${PATCHURL}" diff --git a/dev-support/test-patch.d/jira.sh b/dev-support/test-patch.d/jira.sh index 3791003718c06..f58b90f7913b1 100755 --- a/dev-support/test-patch.d/jira.sh +++ b/dev-support/test-patch.d/jira.sh @@ -111,8 +111,8 @@ function jira_locate_patch return 1 fi - # Sorry enterprise github users. Currently hard-coded to github.com - if [[ $(${GREP} -c 'https://github.com/.*patch' "${PATCH_DIR}/jira") != 0 ]]; then + if [[ -n "${GITHUB_BASE_URL}" + && $(${GREP} -c "${GITHUB_BASE_URL}"'.*patch' "${PATCH_DIR}/jira") != 0 ]]; then echo "${input} appears to be a Github PR. Switching Modes." github_jira_bridge "${fileloc}" return $? From a5e41a33e577edbb79aa1fabee56fc28908b823b Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Tue, 11 Aug 2015 11:22:01 -0700 Subject: [PATCH 096/130] docs, minor fixes --- dev-support/docs/precommit-advanced.md | 37 ++++++++++++++++++++++-- dev-support/docs/precommit-basic.md | 40 ++++++++++++++++++++++++-- dev-support/test-patch.d/github.sh | 24 ++++++++++++---- dev-support/test-patch.sh | 2 +- 4 files changed, 91 insertions(+), 12 deletions(-) diff --git a/dev-support/docs/precommit-advanced.md b/dev-support/docs/precommit-advanced.md index a424199e7f5a6..ca6fc68e21ad4 100644 --- a/dev-support/docs/precommit-advanced.md +++ b/dev-support/docs/precommit-advanced.md @@ -50,8 +50,9 @@ test-patch always passes -noinput to Ant. This force ant to be non-interactive. test-patch allows one to add to its basic feature set via plug-ins. There is a directory called test-patch.d off of the directory where test-patch.sh lives. Inside this directory one may place some bash shell fragments that, if setup with proper functions, will allow for test-patch to call it as necessary. +## Test Plug-ins -Every plugin must have one line in order to be recognized: +Every test plugin must have one line in order to be recognized: ```bash add_plugin @@ -94,6 +95,38 @@ If the plug-in has some specific options, one can use following functions: HINT: It is recommended to make the pluginname relatively small, 10 characters at the most. Otherwise, the ASCII output table may be skewed. +## Bug System Plug-ins + +Similar to tests, the ability to add support for bug tracking systems is also handled via a plug-in mechanism. + +* pluginname_usage + + - executed when the help message is displayed. This is used to display the plug-in specific options for the user. + +* pluginname_parse_args + + - executed prior to any other above functions except for pluginname_usage. This is useful for parsing the arguments passed from the user and setting up the execution environment. + + +* pluginname\_locate\_patch + +Given input from the user, download the patch if possible. + +* pluginname\_determine\_branch + +Using any heuristics available, return the branch to process, if possible. + +* pluginname\_determine\_issue + +Using any heuristics available, set the issue, bug number, etc, for this bug system, if possible. This is typically used to fill in supplementary information in the final output table. + +* pluginname_writecomment + +Given text input, write this output to the bug system as a comment. NOTE: It is the bug system's responsibility to format appropriately. + +* pluginname_finalreport + +Write the final result table to the bug system. # Configuring for Other Projects @@ -193,7 +226,7 @@ There are a handful of extremely important variables that make life easier for p * HOW\_TO\_CONTRIBUTE should be a URL that points to a project's on-boarding documentation for new users. Currently, it is used to suggest a review of patch naming guidelines. Since this should be project specific information, it is useful to set in a project's personality. -* ISSUE\_RE is to help test-patch when talking to JIRA. It helps determine if the given project is appropriate for the given JIRA issue. +* JIRA\_ISSUE\_RE is to help test-patch when talking to JIRA. It helps determine if the given project is appropriate for the given JIRA issue. There are similar variables for GITHUB. * MODULE and other MODULE\_\* are arrays that contain which modules, the status, etc, to be operated upon. These should be treated as read-only by plug-ins. diff --git a/dev-support/docs/precommit-basic.md b/dev-support/docs/precommit-basic.md index e8121056036df..0558384609014 100644 --- a/dev-support/docs/precommit-basic.md +++ b/dev-support/docs/precommit-basic.md @@ -119,6 +119,8 @@ will tell test-patch to use ant instead of maven to drive the project. # Providing Patch Files +## JIRA + It is a fairly common practice within the Apache community to use Apache's JIRA instance to store potential patches. As a result, test-patch supports providing just a JIRA issue number. test-patch will find the *last* attachment, download it, then process it. For example: @@ -129,15 +131,47 @@ $ test-patch.sh (other options) HADOOP-9905 ... will process the patch file associated with this JIRA issue. -A new practice is to use a service such as GitHub and its Pull Request (PR) feature. Luckily, test-patch supports URLs and many services like GitHub provide ways to provide unified diffs via URLs. +If the Apache JIRA system is not in use, then override options may be provided on the command line (or via the Personality. See the advanced guide.) + +```bash +$ test-patch.sh --jira-issue-re='^PROJECT-[0-9]+$' --jira-base-url='https://example.com/jira' PROJECT-90 +``` + +... will process the patch file attached to PROJECT-90 on the JIRA instance located on the example.com server. + +## GITHUB + +A new practice within the ASF is to use a service such as GitHub and its Pull Request (PR) feature. test-patch supports many forms of providing PR support. + +```bash +$ test-patch.sh --github-repo=apache/pig 99 +``` + +or + +```bash +$ test-patch.sh https://github.com/apache/pig/pulls/99 +``` + +or + +```bash +$ test-patch.sh https://github.com/apache/pig/pulls/99.patch +``` + +... will process PR #99 on the apache/pig repo. + +## Generic URLs + +Luckily, test-patch supports provide ways to provide unified diffs via URLs. For example: ```bash -$ test-patch.sh (other options) https://github.com/apache/flink/pull/773.patch +$ test-patch.sh (other options) https://example.com/webserver/file.patch ``` -... will grab a unified diff of PR #773 and process it. +... will download and process the file.patch from the example.com webserver. # Project-specific Capabilities diff --git a/dev-support/test-patch.d/github.sh b/dev-support/test-patch.d/github.sh index b4e70db886d1e..b8f2574cbb8c3 100755 --- a/dev-support/test-patch.d/github.sh +++ b/dev-support/test-patch.d/github.sh @@ -97,17 +97,17 @@ function github_breakup_url ((pos2=count-3)) ((pos1=pos2)) - GITHUB_BASE_URL=$(echo "${urlfromjira}" | cut -f1-${pos2} -d/) + GITHUB_BASE_URL=$(echo "${url}" | cut -f1-${pos2} -d/) ((pos1=pos1+1)) ((pos2=pos1+1)) - GITHUB_REPO=$(echo "${urlfromjira}" | cut -f${pos1}-${pos2} -d/) + GITHUB_REPO=$(echo "${url}" | cut -f${pos1}-${pos2} -d/) ((pos1=pos2+2)) unset pos2 - GITHUB_ISSUE=$(echo "${urlfromjira}" | cut -f${pos1}-${pos2} -d/ | cut -f1 -d.) + GITHUB_ISSUE=$(echo "${url}" | cut -f${pos1}-${pos2} -d/ | cut -f1 -d.) } function github_find_jira_title @@ -116,7 +116,7 @@ function github_find_jira_title declare maybe declare retval - if [[ -f "${PATCH_DIR}/github-pull.json" ]]; then + if [[ ! -f "${PATCH_DIR}/github-pull.json" ]]; then return 1 fi @@ -151,7 +151,9 @@ function github_determine_issue if [[ ${input} =~ ^[0-9]+$ && -n ${GITHUB_REPO} ]]; then ISSUE=${input} - return 0 + if [[ -z ${GITHUB_ISSUE} ]]; then + GITHUB_ISSUE=${input} + fi fi if [[ ${GITHUB_BRIDGED} == false ]]; then @@ -160,6 +162,11 @@ function github_determine_issue return 0 fi fi + + if [[ -n ${ISSUE} ]]; then + return 0 + fi + return 1 } @@ -199,8 +206,13 @@ function github_locate_patch return 1 fi + if [[ ${input} =~ ^${GITHUB_BASE_URL}.*/pulls/[0-9]+$ ]]; then + github_breakup_url "${input}.patch" + input=${GITHUB_ISSUE} + fi + if [[ ${input} =~ ^${GITHUB_BASE_URL}.*patch$ ]]; then - github_breakup_url "${GITHUB_BASE_URL}" + github_breakup_url "${input}" input=${GITHUB_ISSUE} fi diff --git a/dev-support/test-patch.sh b/dev-support/test-patch.sh index 6fc2f7b105fcf..f53ac809cd6d7 100755 --- a/dev-support/test-patch.sh +++ b/dev-support/test-patch.sh @@ -1561,7 +1561,7 @@ function locate_patch guess_patch_file "${PATCH_DIR}/patch" if [[ $? != 0 ]]; then - yetus_error "ERROR: ${PATCHURL} is not a patch file." + yetus_error "ERROR: Unsure how to process ${PATCH_OR_ISSUE}." cleanup_and_exit 1 fi } From 55457b0d416744f97d2b04889054829e1ab70a33 Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Tue, 11 Aug 2015 11:31:51 -0700 Subject: [PATCH 097/130] remove a todo that is tadone --- dev-support/test-patch.d/github.sh | 2 -- 1 file changed, 2 deletions(-) diff --git a/dev-support/test-patch.d/github.sh b/dev-support/test-patch.d/github.sh index b8f2574cbb8c3..6142f365610f5 100755 --- a/dev-support/test-patch.d/github.sh +++ b/dev-support/test-patch.d/github.sh @@ -321,8 +321,6 @@ function github_finalreport declare commentfile=${PATCH_DIR}/gitcommentfile.$$ declare comment - # TODO: There really should be a reference to the JIRA issue, as needed - rm "${commentfile}" 2>/dev/null if [[ ${JENKINS} != "true" From d86f031cbde59e1a39d327dddb052dca5b52d80a Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Tue, 11 Aug 2015 13:04:14 -0700 Subject: [PATCH 098/130] fix shellcheck errors; github issue determination fix --- dev-support/test-patch.d/github.sh | 2 +- dev-support/test-patch.d/jira.sh | 2 +- dev-support/test-patch.sh | 4 +++- 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/dev-support/test-patch.d/github.sh b/dev-support/test-patch.d/github.sh index 6142f365610f5..994392ebca95c 100755 --- a/dev-support/test-patch.d/github.sh +++ b/dev-support/test-patch.d/github.sh @@ -163,7 +163,7 @@ function github_determine_issue fi fi - if [[ -n ${ISSUE} ]]; then + if [[ -n ${GITHUB_ISSUE} ]]; then return 0 fi diff --git a/dev-support/test-patch.d/jira.sh b/dev-support/test-patch.d/jira.sh index f58b90f7913b1..77d9e7a6a4a61 100755 --- a/dev-support/test-patch.d/jira.sh +++ b/dev-support/test-patch.d/jira.sh @@ -297,7 +297,7 @@ function jira_finalreport rm "${commentfile}" 2>/dev/null - if [[ ${JENKINS} != "true" + if [[ ${JENKINS} == "false" || ${OFFLINE} == true ]] ; then return 0 fi diff --git a/dev-support/test-patch.sh b/dev-support/test-patch.sh index f53ac809cd6d7..112d9ad8c79ce 100755 --- a/dev-support/test-patch.sh +++ b/dev-support/test-patch.sh @@ -99,7 +99,7 @@ function setup_defaults RESETREPO=false ISSUE="" TIMER=$(date +"%s") - PATCHURL="" + PATCH_URL="" OSTYPE=$(uname -s) BUILDTOOL=maven TESTFORMATS="" @@ -278,8 +278,10 @@ function add_vote_table fi if [[ -z ${value} ]]; then + # shellcehck disable=SC2034 TP_VOTE_TABLE[${TP_VOTE_COUNTER}]="| | ${subsystem} | | ${*:-} |" else + # shellcehck disable=SC2034 TP_VOTE_TABLE[${TP_VOTE_COUNTER}]="| ${value} | ${subsystem} | ${calctime} | $* |" fi ((TP_VOTE_COUNTER=TP_VOTE_COUNTER+1)) From 6abf64bf3b438b49f63ced1c0bfb19adfdfe7e68 Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Tue, 11 Aug 2015 13:05:32 -0700 Subject: [PATCH 099/130] github issue determination fix --- dev-support/test-patch.sh | 2 -- 1 file changed, 2 deletions(-) diff --git a/dev-support/test-patch.sh b/dev-support/test-patch.sh index 112d9ad8c79ce..6a2b79c5982b7 100755 --- a/dev-support/test-patch.sh +++ b/dev-support/test-patch.sh @@ -1440,8 +1440,6 @@ function determine_issue yetus_debug "Determine issue" - ISSUE="Unknown" - for bugsys in ${BUGSYSTEMS}; do if declare -f ${bugsys}_determine_issue >/dev/null; then "${bugsys}_determine_issue" "${PATCH_OR_ISSUE}" From 076f84ce1dc2573217d3974d063cf24e98856958 Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Tue, 11 Aug 2015 13:43:07 -0700 Subject: [PATCH 100/130] use github issue, not issue for github write --- dev-support/test-patch.d/github.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-support/test-patch.d/github.sh b/dev-support/test-patch.d/github.sh index 994392ebca95c..8965c9a8dce05 100755 --- a/dev-support/test-patch.d/github.sh +++ b/dev-support/test-patch.d/github.sh @@ -301,7 +301,7 @@ function github_write_comment -H "${githubauth}" \ -d @"${PATCH_DIR}/ghcomment.$$" \ --silent --location \ - "${GITHUB_API_URL}/repos/${GITHUB_REPO}/issues/${ISSUE}/comments" \ + "${GITHUB_API_URL}/repos/${GITHUB_REPO}/issues/${GITHUB_ISSUE}/comments" \ >/dev/null retval=$? From aa74fc84b7a9d4cbcc2d9d316b6f89dac9514427 Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Tue, 11 Aug 2015 13:43:39 -0700 Subject: [PATCH 101/130] use jira issue, not issue for jira write --- dev-support/test-patch.d/jira.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-support/test-patch.d/jira.sh b/dev-support/test-patch.d/jira.sh index 77d9e7a6a4a61..d139bd9fd5252 100755 --- a/dev-support/test-patch.d/jira.sh +++ b/dev-support/test-patch.d/jira.sh @@ -269,7 +269,7 @@ function jira_write_comment -u "${JIRA_USER}:${JIRA_PASSWD}" \ -d @"${PATCH_DIR}/jiracomment.$$" \ --silent --location \ - "${JIRA_URL}/rest/api/2/issue/${ISSUE}/comment" \ + "${JIRA_URL}/rest/api/2/issue/${JIRA_ISSUE}/comment" \ >/dev/null retval=$? rm "${PATCH_DIR}/jiracomment.$$" From e51de2612297df120d4857cf89213515f1ca8339 Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Tue, 11 Aug 2015 13:49:41 -0700 Subject: [PATCH 102/130] fix some shellcheck errors --- dev-support/test-patch.d/github.sh | 1 + dev-support/test-patch.sh | 1 - 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-support/test-patch.d/github.sh b/dev-support/test-patch.d/github.sh index 8965c9a8dce05..ef8b400ef43f6 100755 --- a/dev-support/test-patch.d/github.sh +++ b/dev-support/test-patch.d/github.sh @@ -150,6 +150,7 @@ function github_determine_issue if [[ ${input} =~ ^[0-9]+$ && -n ${GITHUB_REPO} ]]; then + # shellcheck disable=SC2034 ISSUE=${input} if [[ -z ${GITHUB_ISSUE} ]]; then GITHUB_ISSUE=${input} diff --git a/dev-support/test-patch.sh b/dev-support/test-patch.sh index 6a2b79c5982b7..b90525b728c50 100755 --- a/dev-support/test-patch.sh +++ b/dev-support/test-patch.sh @@ -99,7 +99,6 @@ function setup_defaults RESETREPO=false ISSUE="" TIMER=$(date +"%s") - PATCH_URL="" OSTYPE=$(uname -s) BUILDTOOL=maven TESTFORMATS="" From fac99f1247362fa1fb03d4c8061cd752b07c750e Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Wed, 12 Aug 2015 10:38:34 -0700 Subject: [PATCH 103/130] 1st pass at calculating the uni diff lines --- dev-support/test-patch.d/github.sh | 2 ++ dev-support/test-patch.sh | 45 ++++++++++++++++++++++++++++++ 2 files changed, 47 insertions(+) diff --git a/dev-support/test-patch.d/github.sh b/dev-support/test-patch.d/github.sh index ef8b400ef43f6..6e90b29270517 100755 --- a/dev-support/test-patch.d/github.sh +++ b/dev-support/test-patch.d/github.sh @@ -27,6 +27,8 @@ GITHUB_ISSUE="" GITHUB_BRIDGED=false +GITHUB_COMMITSHA="" + function github_usage { echo "GITHUB Options:" diff --git a/dev-support/test-patch.sh b/dev-support/test-patch.sh index b90525b728c50..e5acb8b3af15f 100755 --- a/dev-support/test-patch.sh +++ b/dev-support/test-patch.sh @@ -105,6 +105,7 @@ function setup_defaults JDK_TEST_LIST="javac javadoc unit" GITDIFFLINES="${PATCH_DIR}/gitdifflines.txt" GITDIFFCONTENT="${PATCH_DIR}/gitdiffcontent.txt" + GITUNIDIFFLINES="${PATCH_DIR}/gitdiffunilines.txt" # Solaris needs POSIX, not SVID case ${OSTYPE} in @@ -662,9 +663,53 @@ function compute_gitdiff touch "${GITDIFFCONTENT}" fi + if [[ -s "${GITDIFFLINES}" ]] + compute_unidiff + fi + popd >/dev/null } +## @description generate an index of unified diff lines vs. modified/added lines +## @description ${GITDIFFLINES} must exist. +## @audience private +## @stability stable +## @replaceable no +function compute_unidiff +{ + declare fn + declare tmpfile=${PATCH_DIR}/gitdifflinenumber.$$ + + # now that we know what lines are where, we can deal + # with github's pain-in-the-butt API. It requires + # that the client provides the line number of the + # unified diff on a per file basis. + + # First, build a per-file unified diff, pulling + # out the 'extra' lines, grabbing the adds with + # the line number in the diff file along the way, + # finally rewriting the line so that it is in + # './filename:diff line:content' format. + for fn in ${CHANGED_FILES}; do + ${GIT} diff ${file} > ${PATCH_DIR}/${file}.$$ \ + | ${GREP} -vE '^(@|\+\+\+|\-\-\-|diff|index)' \ + | ${GREP} -n '^+' \ + | ${SED} -e 's,^\([0-9]*:\)\+,\1,g' \ + -e s,^,./${fn}:,g >> "${tmpfile}" + done + + # at this point, tmpfile should be in the same format + # as gitdiffcontent, just with different line numbers. + # let's do a merge: + + # ./filename:real number:diff number + paste -d: "${GITDIFFLINES}" "${tmpflie}" \ + | ${AWK} -F: '{print $1":"$2":"$5}' \ + > "${GITUNIDIFFLINES}" + + rm "${tmpfile}" +} + ## @description Print the command to be executing to the screen. Then ## @description run the command, sending stdout and stderr to the given filename ## @description This will also ensure that any directories in ${BASEDIR} have From 9e52c30777205728ed74173b041bfd2a4b8245b2 Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Wed, 12 Aug 2015 10:39:14 -0700 Subject: [PATCH 104/130] 1st pass at calculating the uni diff lines --- dev-support/test-patch.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/dev-support/test-patch.sh b/dev-support/test-patch.sh index e5acb8b3af15f..00ce6cf12e2e8 100755 --- a/dev-support/test-patch.sh +++ b/dev-support/test-patch.sh @@ -659,11 +659,12 @@ function compute_gitdiff if [[ ! -f ${GITDIFFLINES} ]]; then touch "${GITDIFFLINES}" fi + if [[ ! -f ${GITDIFFCONTENT} ]]; then touch "${GITDIFFCONTENT}" fi - if [[ -s "${GITDIFFLINES}" ]] + if [[ -s "${GITDIFFLINES}" ]]; then compute_unidiff fi From 2e7c6dafe62128f32d386b84e5934f7d07f56bae Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Wed, 12 Aug 2015 10:41:17 -0700 Subject: [PATCH 105/130] 1st pass at calculating the uni diff lines --- dev-support/test-patch.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-support/test-patch.sh b/dev-support/test-patch.sh index 00ce6cf12e2e8..dcd68b2ffa8a5 100755 --- a/dev-support/test-patch.sh +++ b/dev-support/test-patch.sh @@ -692,7 +692,7 @@ function compute_unidiff # finally rewriting the line so that it is in # './filename:diff line:content' format. for fn in ${CHANGED_FILES}; do - ${GIT} diff ${file} > ${PATCH_DIR}/${file}.$$ \ + ${GIT} diff ${fn} > ${PATCH_DIR}/${fn}.$$ \ | ${GREP} -vE '^(@|\+\+\+|\-\-\-|diff|index)' \ | ${GREP} -n '^+' \ | ${SED} -e 's,^\([0-9]*:\)\+,\1,g' \ From 1c70a7201740b4def62ad6dbd9798f70fdb0c714 Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Wed, 12 Aug 2015 11:35:14 -0700 Subject: [PATCH 106/130] 2nd pass: line comments on github --- dev-support/test-patch.d/github.sh | 50 ++++++++++++++++++++++++++ dev-support/test-patch.d/shellcheck.sh | 6 +++- dev-support/test-patch.sh | 48 +++++++++++++++++-------- 3 files changed, 89 insertions(+), 15 deletions(-) diff --git a/dev-support/test-patch.d/github.sh b/dev-support/test-patch.d/github.sh index 6e90b29270517..2bf0f74fd1963 100755 --- a/dev-support/test-patch.d/github.sh +++ b/dev-support/test-patch.d/github.sh @@ -267,6 +267,56 @@ function github_locate_patch return 0 } +function github_linecomments +{ + declare file=$1 + declare realline=$2 + declare uniline=$3 + declare text=$4 + declare commitsha + + if [[ ${file} =~ ^./ ]]; then + file=${file##./} + fi + + commitsha=$(${GREP} \"sha\" "${PATCH_DIR}/github-pull.json" 2>/dev/null \ + | head -1 \ + | cut -f4 -d\") + + { + printf "{\"body\":\"" + echo "${text}" \ + | ${SED} -e 's,\\,\\\\,g' \ + -e 's,\",\\\",g' \ + -e 's,$,\\r\\n,g' \ + | tr -d '\n' + echo "\"," + echo "\"commit_id\":\"${commitsha}\"," + echo "\"path\":\"${file}\"," + echo "\"position\":${uniline}," + echo "}" + } > "${PATCH_DIR}/ghcomment.$$" + + if [[ -n ${GITHUB_USER} + && -n ${GITHUB_PASSWD} ]]; then + githubauth="${GITHUB_USER}:${GITHUB_PASSWD}" + elif [[ -n ${GITHUB_TOKEN} ]]; then + githubauth="Authorization: token ${GITHUB_TOKEN}" + else + return 0 + fi + + ${CURL} -X POST \ + -H "Accept: application/json" \ + -H "Content-Type: application/json" \ + -H "${githubauth}" \ + -d @"${PATCH_DIR}/ghcomment.$$" \ + --silent --location \ + "${GITHUB_API_URL}/repos/${GITHUB_REPO}/pulls/${GITHUB_ISSUE}/comments" \ + >/dev/null + rm "${PATCH_DIR}/ghcomment.$$" +} + ## @description Write the contents of a file to github ## @params filename ## @stability stable diff --git a/dev-support/test-patch.d/shellcheck.sh b/dev-support/test-patch.d/shellcheck.sh index 4d177685fe344..47ea33380912b 100755 --- a/dev-support/test-patch.d/shellcheck.sh +++ b/dev-support/test-patch.d/shellcheck.sh @@ -137,7 +137,10 @@ function shellcheck_postapply fi add_footer_table shellcheck "${msg}" - calcdiffs "${PATCH_DIR}/branch-shellcheck-result.txt" "${PATCH_DIR}/patch-shellcheck-result.txt" > "${PATCH_DIR}/diff-patch-shellcheck.txt" + calcdiffs \ + "${PATCH_DIR}/branch-shellcheck-result.txt" \ + "${PATCH_DIR}/patch-shellcheck-result.txt" \ + > "${PATCH_DIR}/diff-patch-shellcheck.txt" # shellcheck disable=SC2016 diffPostpatch=$(wc -l "${PATCH_DIR}/diff-patch-shellcheck.txt" | ${AWK} '{print $1}') @@ -151,6 +154,7 @@ function shellcheck_postapply add_vote_table -1 shellcheck "The applied patch generated "\ "${diffPostpatch} new shellcheck issues (total was ${numPrepatch}, now ${numPostpatch})." add_footer_table shellcheck "@@BASE@@/diff-patch-shellcheck.txt" + bugsystem_linecomments "${PATCH_DIR}/diff-patch-shellcheck.txt" return 1 fi diff --git a/dev-support/test-patch.sh b/dev-support/test-patch.sh index dcd68b2ffa8a5..b0f5ef25d40f9 100755 --- a/dev-support/test-patch.sh +++ b/dev-support/test-patch.sh @@ -103,9 +103,6 @@ function setup_defaults BUILDTOOL=maven TESTFORMATS="" JDK_TEST_LIST="javac javadoc unit" - GITDIFFLINES="${PATCH_DIR}/gitdifflines.txt" - GITDIFFCONTENT="${PATCH_DIR}/gitdiffcontent.txt" - GITUNIDIFFLINES="${PATCH_DIR}/gitdiffunilines.txt" # Solaris needs POSIX, not SVID case ${OSTYPE} in @@ -692,7 +689,7 @@ function compute_unidiff # finally rewriting the line so that it is in # './filename:diff line:content' format. for fn in ${CHANGED_FILES}; do - ${GIT} diff ${fn} > ${PATCH_DIR}/${fn}.$$ \ + ${GIT} diff ${fn} \ | ${GREP} -vE '^(@|\+\+\+|\-\-\-|diff|index)' \ | ${GREP} -n '^+' \ | ${SED} -e 's,^\([0-9]*:\)\+,\1,g' \ @@ -701,10 +698,10 @@ function compute_unidiff # at this point, tmpfile should be in the same format # as gitdiffcontent, just with different line numbers. - # let's do a merge: + # let's do a merge (using gitdifflines because it's easier): # ./filename:real number:diff number - paste -d: "${GITDIFFLINES}" "${tmpflie}" \ + paste -d: "${GITDIFFLINES}" "${tmpfile}" \ | ${AWK} -F: '{print $1":"$2":"$5}' \ > "${GITUNIDIFFLINES}" @@ -1080,6 +1077,8 @@ function parse_args GITDIFFLINES="${PATCH_DIR}/gitdifflines.txt" GITDIFFCONTENT="${PATCH_DIR}/gitdiffcontent.txt" + GITUNIDIFFLINES="${PATCH_DIR}/gitdiffunilines.txt" + } ## @description Locate the build file for a given directory @@ -1189,7 +1188,7 @@ function find_changed_modules ;; *) yetus_error "ERROR: Unsupported build tool." - output_to_bugsystem 1 + bugsystem_output 1 cleanup_and_exit 1 ;; esac @@ -1207,7 +1206,7 @@ function find_changed_modules builddir=$(find_buildfile_dir ${buildfile} "${i}") if [[ -z ${builddir} ]]; then yetus_error "ERROR: ${buildfile} is not found. Make sure the target is a ${BUILDTOOL}-based project." - output_to_bugsystem 1 + bugsystem_output 1 cleanup_and_exit 1 fi builddirs="${builddirs} ${builddir}" @@ -1674,7 +1673,7 @@ function apply_patch_file echo "PATCH APPLICATION FAILED" ((RESULT = RESULT + 1)) add_vote_table -1 patch "The patch command could not apply the patch." - output_to_bugsystem 1 + bugsystem_output 1 cleanup_and_exit 1 fi return 0 @@ -2965,11 +2964,32 @@ function check_unittests return 0 } +function bugsystem_linecomments +{ + declare fn=$1 + declare line + declare bugs + + while read line;do + file=$(echo ${line} | cut -f1 -d:) + realline=$(echo ${line} | cut -f2 -d:) + text=$(echo ${line} | cut -f3- -d:) + idxline="${file}:${realline}:" + uniline=$(${GREP} "${idxline}" "${GITUNIDIFFLINES}" | cut -f3 -d:) + + for bugs in ${BUGSYSTEMS}; do + if declare -f ${bugs}_linecomments >/dev/null;then + "${bugs}_linecomments" "${file}" "${realline}" "${uniline}" "${text}" + fi + done + done < "${fn}" +} + ## @description Write the final output to the selected bug system ## @audience private ## @stability evolving ## @replaceable no -function output_to_bugsystem +function bugsystem_output { declare bugs @@ -3024,7 +3044,7 @@ function postcheckout (( RESULT = RESULT + $? )) if [[ ${RESULT} != 0 ]] ; then - output_to_bugsystem 1 + bugsystem_output 1 cleanup_and_exit 1 fi done @@ -3040,7 +3060,7 @@ function postcheckout (( RESULT = RESULT + $? )) if [[ ${RESULT} != 0 ]] ; then - output_to_bugsystem 1 + bugsystem_output 1 cleanup_and_exit 1 fi fi @@ -3096,7 +3116,7 @@ function postapply check_patch_javac retval=$? if [[ ${retval} -gt 1 ]] ; then - output_to_bugsystem 1 + bugsystem_output 1 cleanup_and_exit 1 fi @@ -3371,5 +3391,5 @@ finish_vote_table finish_footer_table -output_to_bugsystem ${RESULT} +bugsystem_output ${RESULT} cleanup_and_exit ${RESULT} From 2a274ba86e148dc5d099dfc84d673d7997f85ce7 Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Wed, 12 Aug 2015 17:50:11 -0700 Subject: [PATCH 107/130] this might be working --- dev-support/test-patch.d/github.sh | 9 +++--- dev-support/test-patch.sh | 51 ++++++++++++++++++++---------- 2 files changed, 40 insertions(+), 20 deletions(-) diff --git a/dev-support/test-patch.d/github.sh b/dev-support/test-patch.d/github.sh index 2bf0f74fd1963..5ca07ba70eebb 100755 --- a/dev-support/test-patch.d/github.sh +++ b/dev-support/test-patch.d/github.sh @@ -274,6 +274,7 @@ function github_linecomments declare uniline=$3 declare text=$4 declare commitsha + declare tempfile="${PATCH_DIR}/ghcomment.$$.${RANDOM}" if [[ ${file} =~ ^./ ]]; then file=${file##./} @@ -293,9 +294,9 @@ function github_linecomments echo "\"," echo "\"commit_id\":\"${commitsha}\"," echo "\"path\":\"${file}\"," - echo "\"position\":${uniline}," + echo "\"position\":${uniline}" echo "}" - } > "${PATCH_DIR}/ghcomment.$$" + } > "${tempfile}" if [[ -n ${GITHUB_USER} && -n ${GITHUB_PASSWD} ]]; then @@ -310,11 +311,11 @@ function github_linecomments -H "Accept: application/json" \ -H "Content-Type: application/json" \ -H "${githubauth}" \ - -d @"${PATCH_DIR}/ghcomment.$$" \ + -d @"${tempfile}" \ --silent --location \ "${GITHUB_API_URL}/repos/${GITHUB_REPO}/pulls/${GITHUB_ISSUE}/comments" \ >/dev/null - rm "${PATCH_DIR}/ghcomment.$$" + #rm "${tempfile}" } ## @description Write the contents of a file to github diff --git a/dev-support/test-patch.sh b/dev-support/test-patch.sh index b0f5ef25d40f9..c3635699c7a8a 100755 --- a/dev-support/test-patch.sh +++ b/dev-support/test-patch.sh @@ -275,10 +275,10 @@ function add_vote_table fi if [[ -z ${value} ]]; then - # shellcehck disable=SC2034 + # shellcheck disable=SC2034 TP_VOTE_TABLE[${TP_VOTE_COUNTER}]="| | ${subsystem} | | ${*:-} |" else - # shellcehck disable=SC2034 + # shellcheck disable=SC2034 TP_VOTE_TABLE[${TP_VOTE_COUNTER}]="| ${value} | ${subsystem} | ${calctime} | $* |" fi ((TP_VOTE_COUNTER=TP_VOTE_COUNTER+1)) @@ -676,36 +676,55 @@ function compute_gitdiff function compute_unidiff { declare fn - declare tmpfile=${PATCH_DIR}/gitdifflinenumber.$$ + declare tmpfile1="${PATCH_DIR}/tmp.$$.${RANDOM}" + declare tmpfile2="${PATCH_DIR}/tmp.$$.${RANDOM}" + declare linepos + declare offset # now that we know what lines are where, we can deal # with github's pain-in-the-butt API. It requires # that the client provides the line number of the # unified diff on a per file basis. - # First, build a per-file unified diff, pulling + + # Now build a per-file unified diff, pulling # out the 'extra' lines, grabbing the adds with # the line number in the diff file along the way, # finally rewriting the line so that it is in - # './filename:diff line:content' format. + # './filename:diff line:content' format + # while also dealing with offsets... + for fn in ${CHANGED_FILES}; do - ${GIT} diff ${fn} \ - | ${GREP} -vE '^(@|\+\+\+|\-\-\-|diff|index)' \ + filen=${fn##./} + + ${GIT} diff ${filen} \ | ${GREP} -n '^+' \ + | ${GREP} -vE '^[0-9]*:\+\+\+' \ | ${SED} -e 's,^\([0-9]*:\)\+,\1,g' \ - -e s,^,./${fn}:,g >> "${tmpfile}" - done + > "${tmpfile1}" - # at this point, tmpfile should be in the same format - # as gitdiffcontent, just with different line numbers. - # let's do a merge (using gitdifflines because it's easier): + # now rewrite the file with the offset + while read -r line; do + ll=$(echo ${line} | cut -f1 -d:) + content=$(echo ${line} | cut -f2- -d:) + ((ll=ll-5)) + echo "${fn}:${ll}:${content}" >> "${tmpfile2}" + done < "${tmpfile1}" + + # at this point, tmpfile should be in the same format + # as gitdiffcontent, just with different line numbers. + # let's do a merge (using gitdifflines because it's easier): + + + + done # ./filename:real number:diff number - paste -d: "${GITDIFFLINES}" "${tmpfile}" \ - | ${AWK} -F: '{print $1":"$2":"$5}' \ - > "${GITUNIDIFFLINES}" + paste -d: "${GITDIFFLINES}" "${tmpfile2}" \ + | ${AWK} -F: '{print $1":"$2":"$5":"$6}' \ + >> "${GITUNIDIFFLINES}" + rm "${tmpfile1}" "${tmpfile2}" - rm "${tmpfile}" } ## @description Print the command to be executing to the screen. Then From cb48f6bfdc453a31e8443954923611c915fca8da Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Wed, 12 Aug 2015 18:13:16 -0700 Subject: [PATCH 108/130] optimize --- dev-support/test-patch.sh | 38 +++++++++++++------------------------- 1 file changed, 13 insertions(+), 25 deletions(-) diff --git a/dev-support/test-patch.sh b/dev-support/test-patch.sh index c3635699c7a8a..62090bc1b20b5 100755 --- a/dev-support/test-patch.sh +++ b/dev-support/test-patch.sh @@ -676,55 +676,43 @@ function compute_gitdiff function compute_unidiff { declare fn - declare tmpfile1="${PATCH_DIR}/tmp.$$.${RANDOM}" - declare tmpfile2="${PATCH_DIR}/tmp.$$.${RANDOM}" - declare linepos - declare offset + declare filen + declare tmpfile="${PATCH_DIR}/tmp.$$.${RANDOM}" # now that we know what lines are where, we can deal # with github's pain-in-the-butt API. It requires # that the client provides the line number of the # unified diff on a per file basis. - - # Now build a per-file unified diff, pulling + # First, build a per-file unified diff, pulling # out the 'extra' lines, grabbing the adds with # the line number in the diff file along the way, # finally rewriting the line so that it is in # './filename:diff line:content' format - # while also dealing with offsets... for fn in ${CHANGED_FILES}; do filen=${fn##./} ${GIT} diff ${filen} \ + | tail -n +6 \ | ${GREP} -n '^+' \ | ${GREP} -vE '^[0-9]*:\+\+\+' \ | ${SED} -e 's,^\([0-9]*:\)\+,\1,g' \ - > "${tmpfile1}" - - # now rewrite the file with the offset - while read -r line; do - ll=$(echo ${line} | cut -f1 -d:) - content=$(echo ${line} | cut -f2- -d:) - ((ll=ll-5)) - echo "${fn}:${ll}:${content}" >> "${tmpfile2}" - done < "${tmpfile1}" - - # at this point, tmpfile should be in the same format - # as gitdiffcontent, just with different line numbers. - # let's do a merge (using gitdifflines because it's easier): - - - + -e s,^,./${filen}:,g \ + >> "${tmpfile}" done + # at this point, tmpfile should be in the same format + # as gitdiffcontent, just with different line numbers. + # let's do a merge (using gitdifflines because it's easier) + # ./filename:real number:diff number - paste -d: "${GITDIFFLINES}" "${tmpfile2}" \ + paste -d: "${GITDIFFLINES}" "${tmpfile}" \ | ${AWK} -F: '{print $1":"$2":"$5":"$6}' \ >> "${GITUNIDIFFLINES}" - rm "${tmpfile1}" "${tmpfile2}" + rm "${tmpfile1}" + exit } ## @description Print the command to be executing to the screen. Then From 0de5edbec94285c6af28491dfb2059055fc48d08 Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Wed, 12 Aug 2015 18:14:23 -0700 Subject: [PATCH 109/130] optimize --- dev-support/test-patch.d/github.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/dev-support/test-patch.d/github.sh b/dev-support/test-patch.d/github.sh index 5ca07ba70eebb..5954fe052ae9c 100755 --- a/dev-support/test-patch.d/github.sh +++ b/dev-support/test-patch.d/github.sh @@ -270,6 +270,7 @@ function github_locate_patch function github_linecomments { declare file=$1 + # shellcheck disable=SC2034 declare realline=$2 declare uniline=$3 declare text=$4 From a9434e9161ebcd737c2e079d8648ea8d70790fce Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Wed, 12 Aug 2015 18:15:16 -0700 Subject: [PATCH 110/130] enable everything again --- dev-support/test-patch.sh | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/dev-support/test-patch.sh b/dev-support/test-patch.sh index 62090bc1b20b5..901371d29ebcf 100755 --- a/dev-support/test-patch.sh +++ b/dev-support/test-patch.sh @@ -711,8 +711,7 @@ function compute_unidiff | ${AWK} -F: '{print $1":"$2":"$5":"$6}' \ >> "${GITUNIDIFFLINES}" - rm "${tmpfile1}" - exit + rm "${tmpfile}" } ## @description Print the command to be executing to the screen. Then From 6536df57478e6375cf3bff853b2568b461c118d8 Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Wed, 12 Aug 2015 18:19:42 -0700 Subject: [PATCH 111/130] enable everything again --- dev-support/test-patch.d/github.sh | 10 ++++++---- dev-support/test-patch.sh | 9 +++++---- 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/dev-support/test-patch.d/github.sh b/dev-support/test-patch.d/github.sh index 5954fe052ae9c..fc64005fd310d 100755 --- a/dev-support/test-patch.d/github.sh +++ b/dev-support/test-patch.d/github.sh @@ -281,9 +281,11 @@ function github_linecomments file=${file##./} fi - commitsha=$(${GREP} \"sha\" "${PATCH_DIR}/github-pull.json" 2>/dev/null \ - | head -1 \ - | cut -f4 -d\") + if [[ -z "${GITHUB_COMMITSHA}" ]]; then + GITHUB_COMMITSHA=$(${GREP} \"sha\" "${PATCH_DIR}/github-pull.json" 2>/dev/null \ + | head -1 \ + | cut -f4 -d\") + fi { printf "{\"body\":\"" @@ -293,7 +295,7 @@ function github_linecomments -e 's,$,\\r\\n,g' \ | tr -d '\n' echo "\"," - echo "\"commit_id\":\"${commitsha}\"," + echo "\"commit_id\":\"${GITHUB_COMMITSHA}\"," echo "\"path\":\"${file}\"," echo "\"position\":${uniline}" echo "}" diff --git a/dev-support/test-patch.sh b/dev-support/test-patch.sh index 901371d29ebcf..b2dbb2230b203 100755 --- a/dev-support/test-patch.sh +++ b/dev-support/test-patch.sh @@ -707,6 +707,7 @@ function compute_unidiff # let's do a merge (using gitdifflines because it's easier) # ./filename:real number:diff number + # shellcheck disable=SC2016 paste -d: "${GITDIFFLINES}" "${tmpfile}" \ | ${AWK} -F: '{print $1":"$2":"$5":"$6}' \ >> "${GITUNIDIFFLINES}" @@ -2976,10 +2977,10 @@ function bugsystem_linecomments declare line declare bugs - while read line;do - file=$(echo ${line} | cut -f1 -d:) - realline=$(echo ${line} | cut -f2 -d:) - text=$(echo ${line} | cut -f3- -d:) + while read -r line;do + file=$(echo "${line}" | cut -f1 -d:) + realline=$(echo "${line}" | cut -f2 -d:) + text=$(echo "${line}" | cut -f3- -d:) idxline="${file}:${realline}:" uniline=$(${GREP} "${idxline}" "${GITUNIDIFFLINES}" | cut -f3 -d:) From 7f0a22860d42bcc048b272616db34e736be72c9d Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Wed, 12 Aug 2015 18:21:40 -0700 Subject: [PATCH 112/130] fix shellcheck error --- dev-support/test-patch.d/github.sh | 1 - 1 file changed, 1 deletion(-) diff --git a/dev-support/test-patch.d/github.sh b/dev-support/test-patch.d/github.sh index fc64005fd310d..ed39cdbf91212 100755 --- a/dev-support/test-patch.d/github.sh +++ b/dev-support/test-patch.d/github.sh @@ -274,7 +274,6 @@ function github_linecomments declare realline=$2 declare uniline=$3 declare text=$4 - declare commitsha declare tempfile="${PATCH_DIR}/ghcomment.$$.${RANDOM}" if [[ ${file} =~ ^./ ]]; then From 1da7cdd3b3725ff7315534467b51cd9ef2d8577d Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Wed, 12 Aug 2015 18:38:12 -0700 Subject: [PATCH 113/130] make sure that jira to github bridge works on something like a contiguous url --- dev-support/test-patch.d/jira.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-support/test-patch.d/jira.sh b/dev-support/test-patch.d/jira.sh index d139bd9fd5252..b3613b632173d 100755 --- a/dev-support/test-patch.d/jira.sh +++ b/dev-support/test-patch.d/jira.sh @@ -112,7 +112,7 @@ function jira_locate_patch fi if [[ -n "${GITHUB_BASE_URL}" - && $(${GREP} -c "${GITHUB_BASE_URL}"'.*patch' "${PATCH_DIR}/jira") != 0 ]]; then + && $(${GREP} -c "${GITHUB_BASE_URL}"'[^ ]*patch' "${PATCH_DIR}/jira") != 0 ]]; then echo "${input} appears to be a Github PR. Switching Modes." github_jira_bridge "${fileloc}" return $? From d065d968d55c96afd0e1865e32a817c1b8d06f55 Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Thu, 13 Aug 2015 07:40:46 -0700 Subject: [PATCH 114/130] add linecomments to docs; add a title to line comments; remove a temp file --- dev-support/docs/precommit-advanced.md | 4 ++++ dev-support/test-patch.d/github.sh | 17 +++++++++++------ dev-support/test-patch.d/shellcheck.sh | 2 +- dev-support/test-patch.sh | 17 ++++++++++++++--- 4 files changed, 30 insertions(+), 10 deletions(-) diff --git a/dev-support/docs/precommit-advanced.md b/dev-support/docs/precommit-advanced.md index ca6fc68e21ad4..80d999ca5fd71 100644 --- a/dev-support/docs/precommit-advanced.md +++ b/dev-support/docs/precommit-advanced.md @@ -124,6 +124,10 @@ Using any heuristics available, set the issue, bug number, etc, for this bug sys Given text input, write this output to the bug system as a comment. NOTE: It is the bug system's responsibility to format appropriately. +* pluginname\_linecomments + +This function allows for the system to write specific comments on specific lines if the bug system supports code review comments. + * pluginname_finalreport Write the final result table to the bug system. diff --git a/dev-support/test-patch.d/github.sh b/dev-support/test-patch.d/github.sh index ed39cdbf91212..5cbec7ebc39a8 100755 --- a/dev-support/test-patch.d/github.sh +++ b/dev-support/test-patch.d/github.sh @@ -269,11 +269,12 @@ function github_locate_patch function github_linecomments { - declare file=$1 + declare plugin=$1 + declare file=$2 # shellcheck disable=SC2034 - declare realline=$2 - declare uniline=$3 - declare text=$4 + declare realline=$3 + declare uniline=$4 + declare text=$5 declare tempfile="${PATCH_DIR}/ghcomment.$$.${RANDOM}" if [[ ${file} =~ ^./ ]]; then @@ -286,9 +287,13 @@ function github_linecomments | cut -f4 -d\") fi + if [[ -z "${uniline}" ]]; then + return + fi + { printf "{\"body\":\"" - echo "${text}" \ + echo "${plugin}: ${text}" \ | ${SED} -e 's,\\,\\\\,g' \ -e 's,\",\\\",g' \ -e 's,$,\\r\\n,g' \ @@ -317,7 +322,7 @@ function github_linecomments --silent --location \ "${GITHUB_API_URL}/repos/${GITHUB_REPO}/pulls/${GITHUB_ISSUE}/comments" \ >/dev/null - #rm "${tempfile}" + rm "${tempfile}" } ## @description Write the contents of a file to github diff --git a/dev-support/test-patch.d/shellcheck.sh b/dev-support/test-patch.d/shellcheck.sh index 47ea33380912b..0c198db19a01b 100755 --- a/dev-support/test-patch.d/shellcheck.sh +++ b/dev-support/test-patch.d/shellcheck.sh @@ -154,7 +154,7 @@ function shellcheck_postapply add_vote_table -1 shellcheck "The applied patch generated "\ "${diffPostpatch} new shellcheck issues (total was ${numPrepatch}, now ${numPostpatch})." add_footer_table shellcheck "@@BASE@@/diff-patch-shellcheck.txt" - bugsystem_linecomments "${PATCH_DIR}/diff-patch-shellcheck.txt" + bugsystem_linecomments "shellcheck" "${PATCH_DIR}/diff-patch-shellcheck.txt" return 1 fi diff --git a/dev-support/test-patch.sh b/dev-support/test-patch.sh index b2dbb2230b203..678169e5dad8c 100755 --- a/dev-support/test-patch.sh +++ b/dev-support/test-patch.sh @@ -2971,22 +2971,33 @@ function check_unittests return 0 } +## @description Write comments onto bug systems that have code review support. +## @description File should be in the form of "file:line:comment" +## @audience public +## @stability evolving +## @replaceable no +## @param filename function bugsystem_linecomments { - declare fn=$1 + declare title=$1 + declare fn=$2 declare line declare bugs + if [[ ! -f "${GITUNIDIFFLINES}" ]]; then + return + fi + while read -r line;do file=$(echo "${line}" | cut -f1 -d:) realline=$(echo "${line}" | cut -f2 -d:) text=$(echo "${line}" | cut -f3- -d:) idxline="${file}:${realline}:" - uniline=$(${GREP} "${idxline}" "${GITUNIDIFFLINES}" | cut -f3 -d:) + uniline=$(${GREP} "${idxline}" "${GITUNIDIFFLINES}" | cut -f3 -d: ) for bugs in ${BUGSYSTEMS}; do if declare -f ${bugs}_linecomments >/dev/null;then - "${bugs}_linecomments" "${file}" "${realline}" "${uniline}" "${text}" + "${bugs}_linecomments" "${title}" "${file}" "${realline}" "${uniline}" "${text}" fi done done < "${fn}" From 0802d72767b92080b24d18a65e2fabce0b867283 Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Thu, 13 Aug 2015 08:23:28 -0700 Subject: [PATCH 115/130] doc updates; code comments; rest cleanup --- dev-support/docs/precommit-advanced.md | 16 ++--- dev-support/docs/precommit-basic.md | 6 +- dev-support/test-patch.d/builtin-bugsystem.sh | 3 + dev-support/test-patch.d/github.sh | 60 +++++++++++++++---- dev-support/test-patch.d/jira.sh | 31 ++++++++-- 5 files changed, 87 insertions(+), 29 deletions(-) diff --git a/dev-support/docs/precommit-advanced.md b/dev-support/docs/precommit-advanced.md index 80d999ca5fd71..00ea45fd109b4 100644 --- a/dev-support/docs/precommit-advanced.md +++ b/dev-support/docs/precommit-advanced.md @@ -103,34 +103,34 @@ Similar to tests, the ability to add support for bug tracking systems is also ha - executed when the help message is displayed. This is used to display the plug-in specific options for the user. -* pluginname_parse_args +* pluginname\_parse\_args - executed prior to any other above functions except for pluginname_usage. This is useful for parsing the arguments passed from the user and setting up the execution environment. * pluginname\_locate\_patch -Given input from the user, download the patch if possible. + - Given input from the user, download the patch if possible. * pluginname\_determine\_branch -Using any heuristics available, return the branch to process, if possible. + - Using any heuristics available, return the branch to process, if possible. * pluginname\_determine\_issue -Using any heuristics available, set the issue, bug number, etc, for this bug system, if possible. This is typically used to fill in supplementary information in the final output table. + - Using any heuristics available, set the issue, bug number, etc, for this bug system, if possible. This is typically used to fill in supplementary information in the final output table. * pluginname_writecomment -Given text input, write this output to the bug system as a comment. NOTE: It is the bug system's responsibility to format appropriately. + - Given text input, write this output to the bug system as a comment. NOTE: It is the bug system's responsibility to format appropriately. * pluginname\_linecomments -This function allows for the system to write specific comments on specific lines if the bug system supports code review comments. + - This function allows for the system to write specific comments on specific lines if the bug system supports code review comments. * pluginname_finalreport -Write the final result table to the bug system. + - Write the final result table to the bug system. # Configuring for Other Projects @@ -216,7 +216,7 @@ This function will tell test-patch that when the javadoc test is being run, do t # Important Variables -There are a handful of extremely important variables that make life easier for personality and plug-in writers: +There are a handful of extremely important system variables that make life easier for personality and plug-in writers. Other variables may be provided by individual plug-ins. Check their development documentation for more information. * BUILD\_NATIVE will be set to true if the system has requested that non-JVM-based code be built (e.g., JNI or other compiled C code). Under Jenkins, this is always true. diff --git a/dev-support/docs/precommit-basic.md b/dev-support/docs/precommit-basic.md index 0558384609014..07af634fc509c 100644 --- a/dev-support/docs/precommit-basic.md +++ b/dev-support/docs/precommit-basic.md @@ -50,7 +50,7 @@ test-patch has the following requirements: * POSIX sed * curl * file command -* smart-apply-patch.sh +* smart-apply-patch.sh (included!) Maven plugins requirements: @@ -131,7 +131,7 @@ $ test-patch.sh (other options) HADOOP-9905 ... will process the patch file associated with this JIRA issue. -If the Apache JIRA system is not in use, then override options may be provided on the command line (or via the Personality. See the advanced guide.) +If the Apache JIRA system is not in use, then override options may be provided on the command line to point to a different JIRA instance. ```bash $ test-patch.sh --jira-issue-re='^PROJECT-[0-9]+$' --jira-base-url='https://example.com/jira' PROJECT-90 @@ -215,8 +215,6 @@ $ test-patch.sh (other options) --docker This will do some preliminary setup and then re-execute itself inside a Docker container. For more information on how to provide a custom Dockerfile, see the advanced guide. - - ## In Closing test-patch has many other features and command line options for the basic user. Many of these are self-explanatory. To see the list of options, run test-patch.sh without any options or with --help. diff --git a/dev-support/test-patch.d/builtin-bugsystem.sh b/dev-support/test-patch.d/builtin-bugsystem.sh index 118d7212f525c..9a9ee05b9b95a 100755 --- a/dev-support/test-patch.d/builtin-bugsystem.sh +++ b/dev-support/test-patch.d/builtin-bugsystem.sh @@ -14,6 +14,9 @@ # See the License for the specific language governing permissions and # limitations under the License. + +# This bug system handles the output on the screen. + add_bugsystem console # we always call this one last diff --git a/dev-support/test-patch.d/github.sh b/dev-support/test-patch.d/github.sh index 5cbec7ebc39a8..fb3cc6cd18b19 100755 --- a/dev-support/test-patch.d/github.sh +++ b/dev-support/test-patch.d/github.sh @@ -14,19 +14,29 @@ # See the License for the specific language governing permissions and # limitations under the License. +# This bug system provides github integration + add_bugsystem github +# personalities can override the following settings: + +# Web interface URL. GITHUB_BASE_URL="https://github.com" + +# API interface URL. GITHUB_API_URL="https://api.github.com" -GITHUB_REPO="apache/hadoop" +# user/repo +GITHUB_REPO="apache/yetus" + +# user settings GITHUB_PASSWD="" GITHUB_TOKEN="" GITHUB_USER="" GITHUB_ISSUE="" +# private globals... GITHUB_BRIDGED=false - GITHUB_COMMITSHA="" function github_usage @@ -70,6 +80,7 @@ function github_parse_args ## @description this gets called when JIRA thinks this ## @description issue is just a pointer to github +## @description WARNING: Called from JIRA plugin! function github_jira_bridge { declare fileloc=$1 @@ -87,6 +98,9 @@ function github_jira_bridge github_locate_patch "${GITHUB_ISSUE}" "${fileloc}" } +## @description given a URL, break it up into github plugin globals +## @description this will *override* any personality or yetus defaults +## @params url function github_breakup_url { declare url=$1 @@ -112,6 +126,8 @@ function github_breakup_url GITHUB_ISSUE=$(echo "${url}" | cut -f${pos1}-${pos2} -d/ | cut -f1 -d.) } + +## @description based upon a github PR, attempt to link back to JIRA function github_find_jira_title { declare title @@ -159,6 +175,7 @@ function github_determine_issue fi fi + # if JIRA didn't call us, should we call it? if [[ ${GITHUB_BRIDGED} == false ]]; then github_find_jira_title if [[ $? == 0 ]]; then @@ -178,6 +195,7 @@ function github_determine_issue ## @stability evolving ## @replaceable no ## @return 0 on success, with PATCH_BRANCH updated appropriately +## @return 1 on failure function github_determine_branch { if [[ ! -f "${PATCH_DIR}/github-pull.json" ]]; then @@ -209,21 +227,36 @@ function github_locate_patch return 1 fi + + # https://github.com/your/repo/pulls/## if [[ ${input} =~ ^${GITHUB_BASE_URL}.*/pulls/[0-9]+$ ]]; then github_breakup_url "${input}.patch" input=${GITHUB_ISSUE} fi + # https://github.com/your/repo/pulls/##.patch if [[ ${input} =~ ^${GITHUB_BASE_URL}.*patch$ ]]; then github_breakup_url "${input}" input=${GITHUB_ISSUE} fi + # https://github.com/your/repo/pulls/##.diff + if [[ ${input} =~ ^${GITHUB_BASE_URL}.*diff$ ]]; then + github_breakup_url "${input}" + input=${GITHUB_ISSUE} + fi + + # if it isn't a number at this point, no idea + # how to process if [[ ! ${input} =~ ^[0-9]+$ ]]; then yetus_debug "github: ${input} is not a pull request #" return 1 fi + # we always pull the .patch version (even if .diff was given) + # with the assumption that this way binary files work. + # The downside of this is that the patch files are + # significantly larger and therefore take longer to process PATCHURL="${GITHUB_BASE_URL}/${GITHUB_REPO}/pull/${input}.patch" echo "GITHUB PR #${input} is being downloaded at $(date) from" echo "${GITHUB_BASE_URL}/${GITHUB_REPO}/pull/${input}" @@ -262,6 +295,7 @@ function github_locate_patch GITHUB_ISSUE=${input} + # github will translate this to be #(xx) ! add_footer_table "GITHUB PR" "${GITHUB_BASE_URL}/${GITHUB_REPO}/pull/${input}" return 0 @@ -291,6 +325,7 @@ function github_linecomments return fi + # build our REST post { printf "{\"body\":\"" echo "${plugin}: ${text}" \ @@ -332,20 +367,21 @@ function github_linecomments function github_write_comment { declare -r commentfile=${1} - shift - declare retval=0 + declare restfile="${PATCH_DIR}/ghcomment.$$" if [[ "${OFFLINE}" == true ]]; then return 0 fi - echo "{\"body\":\"" > "${PATCH_DIR}/ghcomment.$$" - ${SED} -e 's,\\,\\\\,g' \ - -e 's,\",\\\",g' \ - -e 's,$,\\r\\n,g' "${commentfile}" \ - | tr -d '\n'>> "${PATCH_DIR}/ghcomment.$$" - echo "\"}" >> "${PATCH_DIR}/ghcomment.$$" + { + printf "{\"body\":\"" + ${SED} -e 's,\\,\\\\,g' \ + -e 's,\",\\\",g' \ + -e 's,$,\\r\\n,g' "${commentfile}" \ + | tr -d '\n' + echo "\"}" + } > "${restfile}" if [[ -n ${GITHUB_USER} && -n ${GITHUB_PASSWD} ]]; then @@ -360,13 +396,13 @@ function github_write_comment -H "Accept: application/json" \ -H "Content-Type: application/json" \ -H "${githubauth}" \ - -d @"${PATCH_DIR}/ghcomment.$$" \ + -d @"${restfile}" \ --silent --location \ "${GITHUB_API_URL}/repos/${GITHUB_REPO}/issues/${GITHUB_ISSUE}/comments" \ >/dev/null retval=$? - rm "${PATCH_DIR}/ghcomment.$$" + rm "${restfile}" return ${retval} } diff --git a/dev-support/test-patch.d/jira.sh b/dev-support/test-patch.d/jira.sh index b3613b632173d..551ec7f6a0172 100755 --- a/dev-support/test-patch.d/jira.sh +++ b/dev-support/test-patch.d/jira.sh @@ -14,7 +14,13 @@ # See the License for the specific language governing permissions and # limitations under the License. +# this bug system handles JIRA. Personalities +# can override the following variables: + +# base JIRA URL JIRA_URL=${JIRA_URL:-"https://issues.apache.org/jira"} + +# Issue regex to help identify the project JIRA_ISSUE_RE='^(YETUS)-[0-9]+$' add_bugsystem jira @@ -50,6 +56,8 @@ function jira_parse_args done } +## @description provides issue determination based upon the URL and more. +## @description WARNING: called from the github plugin! function jira_determine_issue { declare input=$1 @@ -78,11 +86,11 @@ function jira_http_fetch if [[ -n "${JIRA_USER}" && -n "${JIRA_PASSWD}" ]]; then - ${CURL} --silent --fail \ - --user "${JIRA_USER}:${JIRA_PASSWD}" \ - --output "${output}" \ - --location \ - "${JIRA_URL}/${input}" + ${CURL} --silent --fail \ + --user "${JIRA_USER}:${JIRA_PASSWD}" \ + --output "${output}" \ + --location \ + "${JIRA_URL}/${input}" else ${CURL} --silent --fail \ --output "${output}" \ @@ -111,6 +119,8 @@ function jira_locate_patch return 1 fi + # if github is configured and we see what looks like a URL, + # send this to the github plugin to process. if [[ -n "${GITHUB_BASE_URL}" && $(${GREP} -c "${GITHUB_BASE_URL}"'[^ ]*patch' "${PATCH_DIR}/jira") != 0 ]]; then echo "${input} appears to be a Github PR. Switching Modes." @@ -163,6 +173,17 @@ function jira_determine_branch if [[ -z "${hinttype}" ]]; then continue fi + + # If one of these matches the JIRA issue regex + # then we don't want it to trigger the branch + # detection since that's almost certainly not + # intended. In other words, if ISSUE-99 is the + # name of a branch, you want to test ISSUE-99 + # against master, not ISSUE-99's branch + if [[ ${hinttype} =~ ${JIRA_ISSUE_RE} ]]; then + continue + fi + yetus_debug "Determine branch: starting with ${hinttype}" patchnamechunk=$(echo "${hinttype}" \ | ${SED} -e 's,.*/\(.*\)$,\1,' \ From d5053ef04c2da7d550669f743562e91aba7ebcd7 Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Thu, 13 Aug 2015 09:34:05 -0700 Subject: [PATCH 116/130] more cleanup, add a comment when we cannot write so people do not freak out --- dev-support/test-patch.d/github.sh | 2 ++ dev-support/test-patch.d/jira.sh | 18 ++++++++++++------ 2 files changed, 14 insertions(+), 6 deletions(-) diff --git a/dev-support/test-patch.d/github.sh b/dev-support/test-patch.d/github.sh index fb3cc6cd18b19..09f734f166c9e 100755 --- a/dev-support/test-patch.d/github.sh +++ b/dev-support/test-patch.d/github.sh @@ -371,6 +371,7 @@ function github_write_comment declare restfile="${PATCH_DIR}/ghcomment.$$" if [[ "${OFFLINE}" == true ]]; then + echo "Github Plugin: Running in offline, comment skipped." return 0 fi @@ -389,6 +390,7 @@ function github_write_comment elif [[ -n ${GITHUB_TOKEN} ]]; then githubauth="Authorization: token ${GITHUB_TOKEN}" else + echo "Github Plugin: no credentials provided to write a comment." return 0 fi diff --git a/dev-support/test-patch.d/jira.sh b/dev-support/test-patch.d/jira.sh index 551ec7f6a0172..8bb74fb7a1edb 100755 --- a/dev-support/test-patch.d/jira.sh +++ b/dev-support/test-patch.d/jira.sh @@ -271,18 +271,22 @@ function jira_write_comment declare retval=0 if [[ "${OFFLINE}" == true ]]; then + echo "JIRA Plugin: Running in offline, comment skipped." return 0 fi if [[ -n ${JIRA_PASSWD} && -n ${JIRA_USER} ]]; then - echo "{\"body\":\"" > "${PATCH_DIR}/jiracomment.$$" - ${SED} -e 's,\\,\\\\,g' \ - -e 's,\",\\\",g' \ - -e 's,$,\\r\\n,g' "${commentfile}" \ - | tr -d '\n'>> "${PATCH_DIR}/jiracomment.$$" - echo "\"}" >> "${PATCH_DIR}/jiracomment.$$" + # RESTify the comment + { + echo "{\"body\":\"" + ${SED} -e 's,\\,\\\\,g' \ + -e 's,\",\\\",g' \ + -e 's,$,\\r\\n,g' "${commentfile}" \ + | tr -d '\n' + echo "\"}" + } > "${PATCH_DIR}/jiracomment.$$" ${CURL} -X POST \ -H "Accept: application/json" \ @@ -294,6 +298,8 @@ function jira_write_comment >/dev/null retval=$? rm "${PATCH_DIR}/jiracomment.$$" + else + echo "JIRA Plugin: no credentials provided to write a comment." fi return ${retval} } From 90c91fef86457e807688a07e812b98b34a9a5ece Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Thu, 13 Aug 2015 10:06:58 -0700 Subject: [PATCH 117/130] many cleanups --- dev-support/test-patch.d/github.sh | 39 +++++++++++++++------------- dev-support/test-patch.d/jira.sh | 4 +-- dev-support/test-patch.sh | 41 +++++++++++++++++++----------- 3 files changed, 48 insertions(+), 36 deletions(-) diff --git a/dev-support/test-patch.d/github.sh b/dev-support/test-patch.d/github.sh index 09f734f166c9e..c8eaa6fd0a67a 100755 --- a/dev-support/test-patch.d/github.sh +++ b/dev-support/test-patch.d/github.sh @@ -86,12 +86,13 @@ function github_jira_bridge declare fileloc=$1 declare urlfromjira + # we use this to prevent loops later on + GITHUB_BRIDGED=true + # the JIRA issue has already been downloaded. So let's # find the URL. This is currently hard-coded to github.com # Sorry Github Enterprise users. :( - GITHUB_BRIDGED=true - # shellcheck disable=SC2016 urlfromjira=$(${AWK} 'match($0,"https://github.com/.*patch"){print $1}' "${PATCH_DIR}/jira" | tail -1) github_breakup_url "${urlfromjira}" @@ -261,10 +262,10 @@ function github_locate_patch echo "GITHUB PR #${input} is being downloaded at $(date) from" echo "${GITHUB_BASE_URL}/${GITHUB_REPO}/pull/${input}" - if [[ -n ${GITHUB_USER} - && -n ${GITHUB_PASSWD} ]]; then + if [[ -n "${GITHUB_USER}" + && -n "${GITHUB_PASSWD}" ]]; then githubauth="${GITHUB_USER}:${GITHUB_PASSWD}" - elif [[ -n ${GITHUB_TOKEN} ]]; then + elif [[ -n "${GITHUB_TOKEN}" ]]; then githubauth="Authorization: token ${GITHUB_TOKEN}" else githubauth="X-ignore-me: fake" @@ -272,7 +273,7 @@ function github_locate_patch # Let's pull the PR JSON for later use ${CURL} --silent --fail \ - -H "Accept: application/json" \ + -H "Accept: application/vnd.github.v3.full+json" \ -H "${githubauth}" \ --output "${PATCH_DIR}/github-pull.json" \ --location \ @@ -310,8 +311,9 @@ function github_linecomments declare uniline=$4 declare text=$5 declare tempfile="${PATCH_DIR}/ghcomment.$$.${RANDOM}" + declare githubauth - if [[ ${file} =~ ^./ ]]; then + if [[ "${file}" =~ ^./ ]]; then file=${file##./} fi @@ -340,17 +342,17 @@ function github_linecomments echo "}" } > "${tempfile}" - if [[ -n ${GITHUB_USER} - && -n ${GITHUB_PASSWD} ]]; then + if [[ -n "${GITHUB_USER}" + && -n "${GITHUB_PASSWD}" ]]; then githubauth="${GITHUB_USER}:${GITHUB_PASSWD}" - elif [[ -n ${GITHUB_TOKEN} ]]; then + elif [[ -n "${GITHUB_TOKEN}" ]]; then githubauth="Authorization: token ${GITHUB_TOKEN}" else return 0 fi ${CURL} -X POST \ - -H "Accept: application/json" \ + -H "Accept: application/vnd.github.v3.full+json" \ -H "Content-Type: application/json" \ -H "${githubauth}" \ -d @"${tempfile}" \ @@ -369,6 +371,7 @@ function github_write_comment declare -r commentfile=${1} declare retval=0 declare restfile="${PATCH_DIR}/ghcomment.$$" + declare githubauth if [[ "${OFFLINE}" == true ]]; then echo "Github Plugin: Running in offline, comment skipped." @@ -384,10 +387,10 @@ function github_write_comment echo "\"}" } > "${restfile}" - if [[ -n ${GITHUB_USER} - && -n ${GITHUB_PASSWD} ]]; then + if [[ -n "${GITHUB_USER}" + && -n "${GITHUB_PASSWD}" ]]; then githubauth="${GITHUB_USER}:${GITHUB_PASSWD}" - elif [[ -n ${GITHUB_TOKEN} ]]; then + elif [[ -n "${GITHUB_TOKEN}" ]]; then githubauth="Authorization: token ${GITHUB_TOKEN}" else echo "Github Plugin: no credentials provided to write a comment." @@ -395,7 +398,7 @@ function github_write_comment fi ${CURL} -X POST \ - -H "Accept: application/json" \ + -H "Accept: application/vnd.github.v3.full+json" \ -H "Content-Type: application/json" \ -H "${githubauth}" \ -d @"${restfile}" \ @@ -440,7 +443,7 @@ function github_finalreport printf "\n\n\n\n" >> "${commentfile}" i=0 - until [[ $i -eq ${#TP_HEADER[@]} ]]; do + until [[ ${i} -eq ${#TP_HEADER[@]} ]]; do printf "%s\n\n" "${TP_HEADER[${i}]}" >> "${commentfile}" ((i=i+1)) done @@ -452,7 +455,7 @@ function github_finalreport } >> "${commentfile}" i=0 - until [[ $i -eq ${#TP_VOTE_TABLE[@]} ]]; do + until [[ ${i} -eq ${#TP_VOTE_TABLE[@]} ]]; do echo "${TP_VOTE_TABLE[${i}]}" >> "${commentfile}" ((i=i+1)) done @@ -464,7 +467,7 @@ function github_finalreport echo "|-------:|:------|" } >> "${commentfile}" i=0 - until [[ $i -eq ${#TP_TEST_TABLE[@]} ]]; do + until [[ ${i} -eq ${#TP_TEST_TABLE[@]} ]]; do echo "${TP_TEST_TABLE[${i}]}" >> "${commentfile}" ((i=i+1)) done diff --git a/dev-support/test-patch.d/jira.sh b/dev-support/test-patch.d/jira.sh index 8bb74fb7a1edb..f3f2016d65fc4 100755 --- a/dev-support/test-patch.d/jira.sh +++ b/dev-support/test-patch.d/jira.sh @@ -142,7 +142,7 @@ function jira_locate_patch if [[ ! ${PATCHURL} =~ \.patch$ ]]; then guess_patch_file "${PATCH_DIR}/patch" if [[ $? == 0 ]]; then - yetus_debug "The patch ${PATCHURL} was not named properly, but it looks like a patch file. proceeding, but issue/branch matching might go awry." + yetus_debug "The patch ${PATCHURL} was not named properly, but it looks like a patch file. Proceeding, but issue/branch matching might go awry." add_vote_table 0 patch "The patch file was not named according to ${PROJECT_NAME}'s naming conventions. Please see ${HOW_TO_CONTRIBUTE} for instructions." fi fi @@ -266,8 +266,6 @@ function jira_determine_branch function jira_write_comment { declare -r commentfile=${1} - shift - declare retval=0 if [[ "${OFFLINE}" == true ]]; then diff --git a/dev-support/test-patch.sh b/dev-support/test-patch.sh index 678169e5dad8c..93012f32fcc62 100755 --- a/dev-support/test-patch.sh +++ b/dev-support/test-patch.sh @@ -547,11 +547,7 @@ function find_java_home function write_comment { local -r commentfile=${1} - shift declare bug - declare retval - - local retval=0 for bug in ${BUGSYSTEMS}; do if declare -f ${bug}_write_comment >/dev/null; then @@ -613,7 +609,7 @@ function compute_gitdiff pushd "${BASEDIR}" >/dev/null ${GIT} add --all --intent-to-add - while read line; do + while read -r line; do if [[ ${line} =~ ^\+\+\+ ]]; then file="./"$(echo "${line}" | cut -f2- -d/) continue @@ -653,16 +649,18 @@ function compute_gitdiff fi done < <("${GIT}" diff --unified=0 --no-color) - if [[ ! -f ${GITDIFFLINES} ]]; then + if [[ ! -f "${GITDIFFLINES}" ]]; then touch "${GITDIFFLINES}" fi - if [[ ! -f ${GITDIFFCONTENT} ]]; then + if [[ ! -f "${GITDIFFCONTENT}" ]]; then touch "${GITDIFFCONTENT}" fi if [[ -s "${GITDIFFLINES}" ]]; then compute_unidiff + else + touch "${GITUNIDIFFLINES}" fi popd >/dev/null @@ -1195,7 +1193,7 @@ function find_changed_modules ;; *) yetus_error "ERROR: Unsupported build tool." - bugsystem_output 1 + bugsystem_finalreport 1 cleanup_and_exit 1 ;; esac @@ -1213,7 +1211,7 @@ function find_changed_modules builddir=$(find_buildfile_dir ${buildfile} "${i}") if [[ -z ${builddir} ]]; then yetus_error "ERROR: ${buildfile} is not found. Make sure the target is a ${BUILDTOOL}-based project." - bugsystem_output 1 + bugsystem_finalreport 1 cleanup_and_exit 1 fi builddirs="${builddirs} ${builddir}" @@ -1290,6 +1288,7 @@ function git_checkout { local currentbranch local exemptdir + local status big_console_header "Confirming git environment" @@ -1548,6 +1547,7 @@ function verify_needed_test function determine_needed_tests { local i + local plugin for i in ${CHANGED_FILES}; do yetus_debug "Determining needed tests for ${i}" @@ -1578,9 +1578,11 @@ function locate_patch yetus_debug "locate patch" + # it's a locally provided file if [[ -f ${PATCH_OR_ISSUE} ]]; then patchfile="${PATCH_OR_ISSUE}" else + # run through the bug systems. maybe they know? for bugsys in ${BUGSYSTEMS}; do if declare -f ${bugsys}_locate_patch >/dev/null 2>&1; then "${bugsys}_locate_patch" "${PATCH_OR_ISSUE}" "${PATCH_DIR}/patch" @@ -1594,6 +1596,7 @@ function locate_patch fi done + # ok, none of the bug systems know. let's see how smart we are if [[ ${gotit} == false ]]; then generic_locate_patch "${PATCH_OR_ISSUE}" "${PATCH_DIR}/patch" fi @@ -1628,6 +1631,10 @@ function guess_patch_file local patch=$1 local fileOutput + if [[ ! -f ${patch} ]]; then + return 1 + fi + yetus_debug "Trying to guess is ${patch} is a patch file." fileOutput=$("${FILE}" "${patch}") if [[ $fileOutput =~ \ diff\ ]]; then @@ -1680,7 +1687,7 @@ function apply_patch_file echo "PATCH APPLICATION FAILED" ((RESULT = RESULT + 1)) add_vote_table -1 patch "The patch command could not apply the patch." - bugsystem_output 1 + bugsystem_finalreport 1 cleanup_and_exit 1 fi return 0 @@ -2983,6 +2990,10 @@ function bugsystem_linecomments declare fn=$2 declare line declare bugs + declare realline + declare text + declare idxline + declare uniline if [[ ! -f "${GITUNIDIFFLINES}" ]]; then return @@ -3007,7 +3018,7 @@ function bugsystem_linecomments ## @audience private ## @stability evolving ## @replaceable no -function bugsystem_output +function bugsystem_finalreport { declare bugs @@ -3062,7 +3073,7 @@ function postcheckout (( RESULT = RESULT + $? )) if [[ ${RESULT} != 0 ]] ; then - bugsystem_output 1 + bugsystem_finalreport 1 cleanup_and_exit 1 fi done @@ -3078,7 +3089,7 @@ function postcheckout (( RESULT = RESULT + $? )) if [[ ${RESULT} != 0 ]] ; then - bugsystem_output 1 + bugsystem_finalreport 1 cleanup_and_exit 1 fi fi @@ -3134,7 +3145,7 @@ function postapply check_patch_javac retval=$? if [[ ${retval} -gt 1 ]] ; then - bugsystem_output 1 + bugsystem_finalreport 1 cleanup_and_exit 1 fi @@ -3409,5 +3420,5 @@ finish_vote_table finish_footer_table -bugsystem_output ${RESULT} +bugsystem_finalreport ${RESULT} cleanup_and_exit ${RESULT} From 451a78ef5d412a6076a228d1b8bd900c7e8065d8 Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Thu, 13 Aug 2015 10:18:06 -0700 Subject: [PATCH 118/130] set some defaults, remove some defaults --- dev-support/personality/flink.sh | 2 ++ dev-support/personality/hadoop.sh | 2 ++ dev-support/personality/hbase.sh | 2 ++ dev-support/personality/pig.sh | 2 ++ dev-support/personality/tajo.sh | 2 ++ dev-support/personality/tez.sh | 2 ++ dev-support/test-patch.d/github.sh | 2 +- dev-support/test-patch.d/jira.sh | 2 +- 8 files changed, 14 insertions(+), 2 deletions(-) diff --git a/dev-support/personality/flink.sh b/dev-support/personality/flink.sh index 9f59233ee0302..4b6c3906c81d6 100755 --- a/dev-support/personality/flink.sh +++ b/dev-support/personality/flink.sh @@ -19,6 +19,8 @@ PATCH_BRANCH_DEFAULT=master #shellcheck disable=SC2034 JIRA_ISSUE_RE='^FLINK-[0-9]+$' #shellcheck disable=SC2034 +GITHUB_REPO="apache/flink" +#shellcheck disable=SC2034 HOW_TO_CONTRIBUTE="" add_plugin flinklib diff --git a/dev-support/personality/hadoop.sh b/dev-support/personality/hadoop.sh index 83aae5ef436d6..b3eb04a13d78d 100755 --- a/dev-support/personality/hadoop.sh +++ b/dev-support/personality/hadoop.sh @@ -23,6 +23,8 @@ HOW_TO_CONTRIBUTE="https://wiki.apache.org/hadoop/HowToContribute" #shellcheck disable=SC2034 JIRA_ISSUE_RE='^(HADOOP|YARN|MAPREDUCE|HDFS)-[0-9]+$' #shellcheck disable=SC2034 +GITHUB_REPO="apache/hadoop" +#shellcheck disable=SC2034 PYLINT_OPTIONS="--indent-string=' '" HADOOP_MODULES="" diff --git a/dev-support/personality/hbase.sh b/dev-support/personality/hbase.sh index 7becfddffefa5..84e6c8703b4f1 100755 --- a/dev-support/personality/hbase.sh +++ b/dev-support/personality/hbase.sh @@ -19,6 +19,8 @@ PATCH_BRANCH_DEFAULT=master #shellcheck disable=SC2034 JIRA_ISSUE_RE='^HBASE-[0-9]+$' #shellcheck disable=SC2034 +GITHUB_REPO="apache/hbase" +#shellcheck disable=SC2034 HOW_TO_CONTRIBUTE="" # All supported Hadoop versions that we want to test the compilation with diff --git a/dev-support/personality/pig.sh b/dev-support/personality/pig.sh index 2d562d326beb4..d67b2276bf5c9 100755 --- a/dev-support/personality/pig.sh +++ b/dev-support/personality/pig.sh @@ -19,6 +19,8 @@ PATCH_BRANCH_DEFAULT=trunk #shellcheck disable=SC2034 JIRA_ISSUE_RE='^PIG-[0-9]+$' #shellcheck disable=SC2034 +GITHUB_REPO="apache/pig" +#shellcheck disable=SC2034 HOW_TO_CONTRIBUTE="" #shellcheck disable=SC2034 BUILDTOOL=ant diff --git a/dev-support/personality/tajo.sh b/dev-support/personality/tajo.sh index a03efadec04d8..7e7ea97873d48 100755 --- a/dev-support/personality/tajo.sh +++ b/dev-support/personality/tajo.sh @@ -19,6 +19,8 @@ PATCH_BRANCH_DEFAULT=master #shellcheck disable=SC2034 JIRA_ISSUE_RE='^TAJO-[0-9]+$' #shellcheck disable=SC2034 +GITHUB_REPO="apache/tajo" +#shellcheck disable=SC2034 HOW_TO_CONTRIBUTE="https://cwiki.apache.org/confluence/display/TAJO/How+to+Contribute+to+Tajo" function personality_modules diff --git a/dev-support/personality/tez.sh b/dev-support/personality/tez.sh index d2f2e58fa2f5d..9b4575916d4e1 100755 --- a/dev-support/personality/tez.sh +++ b/dev-support/personality/tez.sh @@ -19,6 +19,8 @@ PATCH_BRANCH_DEFAULT=master #shellcheck disable=SC2034 JIRA_ISSUE_RE='^TEZ-[0-9]+$' #shellcheck disable=SC2034 +GITHUB_REPO="apache/tez" +#shellcheck disable=SC2034 HOW_TO_CONTRIBUTE="https://cwiki.apache.org/confluence/display/TEZ/How+to+Contribute+to+Tez" function personality_modules diff --git a/dev-support/test-patch.d/github.sh b/dev-support/test-patch.d/github.sh index c8eaa6fd0a67a..34c98b518cece 100755 --- a/dev-support/test-patch.d/github.sh +++ b/dev-support/test-patch.d/github.sh @@ -27,7 +27,7 @@ GITHUB_BASE_URL="https://github.com" GITHUB_API_URL="https://api.github.com" # user/repo -GITHUB_REPO="apache/yetus" +GITHUB_REPO="" # user settings GITHUB_PASSWD="" diff --git a/dev-support/test-patch.d/jira.sh b/dev-support/test-patch.d/jira.sh index f3f2016d65fc4..d708c6c37b1bd 100755 --- a/dev-support/test-patch.d/jira.sh +++ b/dev-support/test-patch.d/jira.sh @@ -21,7 +21,7 @@ JIRA_URL=${JIRA_URL:-"https://issues.apache.org/jira"} # Issue regex to help identify the project -JIRA_ISSUE_RE='^(YETUS)-[0-9]+$' +JIRA_ISSUE_RE='' add_bugsystem jira From 066ca427dbfd0a0c1a8eed5ea3edf88e763df9b2 Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Thu, 13 Aug 2015 10:23:09 -0700 Subject: [PATCH 119/130] fix manual pull --- dev-support/test-patch.d/github.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/dev-support/test-patch.d/github.sh b/dev-support/test-patch.d/github.sh index 34c98b518cece..36c7e519ab7e0 100755 --- a/dev-support/test-patch.d/github.sh +++ b/dev-support/test-patch.d/github.sh @@ -229,8 +229,8 @@ function github_locate_patch fi - # https://github.com/your/repo/pulls/## - if [[ ${input} =~ ^${GITHUB_BASE_URL}.*/pulls/[0-9]+$ ]]; then + # https://github.com/your/repo/pull/## + if [[ ${input} =~ ^${GITHUB_BASE_URL}.*/pull/[0-9]+$ ]]; then github_breakup_url "${input}.patch" input=${GITHUB_ISSUE} fi From 9a26498b5d3586adf73b8797d4c3d749dde94394 Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Thu, 13 Aug 2015 11:02:34 -0700 Subject: [PATCH 120/130] test whitespace linecomments --- dev-support/create-release.sh | 5 ++++- dev-support/test-patch.d/whitespace.sh | 21 +++++++++++++++++++++ 2 files changed, 25 insertions(+), 1 deletion(-) diff --git a/dev-support/create-release.sh b/dev-support/create-release.sh index 7a3dd70334704..eaae86b95b888 100755 --- a/dev-support/create-release.sh +++ b/dev-support/create-release.sh @@ -141,4 +141,7 @@ echo "The artifacts for this run are available at ${ARTIFACTS_DIR}:" run ls -1 ${ARTIFACTS_DIR} echo echo "Remember to sign them before staging them on the open" -echo +echo + +#tab -^ + diff --git a/dev-support/test-patch.d/whitespace.sh b/dev-support/test-patch.d/whitespace.sh index 6fc033b3f31f9..5a1369bc45fd0 100755 --- a/dev-support/test-patch.d/whitespace.sh +++ b/dev-support/test-patch.d/whitespace.sh @@ -16,6 +16,24 @@ add_plugin whitespace + +function whitespace_linecomment_reporter +{ + local file=$1 + local comment=$@ + local tmpfile="${PATCH_DIR}/wlr.$$.${RANDOM}" + + while read -r line; do + { + printf "${line}" | cut -f1-2 -d: + echo ":${comment}" + } >> "${tmpfile}" + done < "${file}" + + bugsystem_linecomments "whitespace:" "${tmpfile}" + rm "${tmpfile}" +} + function whitespace_postapply { local count @@ -40,6 +58,8 @@ function whitespace_postapply if [[ ${count} -gt 0 ]]; then add_vote_table -1 whitespace "The patch has ${count}"\ " line(s) that end in whitespace. Use git apply --whitespace=fix." + + whitespace_linecomment_reporter "${PATCH_DIR}/whitespace-eol.txt" "end of line" add_footer_table whitespace "@@BASE@@/whitespace-eol.txt" ((result=result+1)) fi @@ -51,6 +71,7 @@ function whitespace_postapply add_vote_table -1 whitespace "The patch has ${count}"\ " line(s) with tabs." add_footer_table whitespace "@@BASE@@/whitespace-tabs.txt" + whitespace_linecomment_reporter "${PATCH_DIR}/whitespace-tabs.txt" "tabs in line" ((result=result+1)) fi From 8951b815b590b95be0e163395a1034ff4a1c83c7 Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Thu, 13 Aug 2015 11:05:13 -0700 Subject: [PATCH 121/130] test whitespace linecomments --- dev-support/test-patch.d/whitespace.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/dev-support/test-patch.d/whitespace.sh b/dev-support/test-patch.d/whitespace.sh index 5a1369bc45fd0..4047525306a62 100755 --- a/dev-support/test-patch.d/whitespace.sh +++ b/dev-support/test-patch.d/whitespace.sh @@ -20,12 +20,12 @@ add_plugin whitespace function whitespace_linecomment_reporter { local file=$1 - local comment=$@ + local comment=$* local tmpfile="${PATCH_DIR}/wlr.$$.${RANDOM}" while read -r line; do { - printf "${line}" | cut -f1-2 -d: + printf "%s" $(echo "${line}" | cut -f1-2 -d:) echo ":${comment}" } >> "${tmpfile}" done < "${file}" From a171780525640ba9bd3580d8f5457b372c05ee75 Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Thu, 13 Aug 2015 11:06:57 -0700 Subject: [PATCH 122/130] test whitespace linecomments --- dev-support/test-patch.d/whitespace.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/dev-support/test-patch.d/whitespace.sh b/dev-support/test-patch.d/whitespace.sh index 4047525306a62..c2377715b53e0 100755 --- a/dev-support/test-patch.d/whitespace.sh +++ b/dev-support/test-patch.d/whitespace.sh @@ -20,6 +20,7 @@ add_plugin whitespace function whitespace_linecomment_reporter { local file=$1 + shift local comment=$* local tmpfile="${PATCH_DIR}/wlr.$$.${RANDOM}" @@ -31,7 +32,7 @@ function whitespace_linecomment_reporter done < "${file}" bugsystem_linecomments "whitespace:" "${tmpfile}" - rm "${tmpfile}" + #rm "${tmpfile}" } function whitespace_postapply From 49432eb7fca22ac2865e7593ca67fc93c4abb177 Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Thu, 13 Aug 2015 11:08:39 -0700 Subject: [PATCH 123/130] minor whitespace cleanup --- dev-support/test-patch.d/whitespace.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/dev-support/test-patch.d/whitespace.sh b/dev-support/test-patch.d/whitespace.sh index c2377715b53e0..e6676e3f7b04d 100755 --- a/dev-support/test-patch.d/whitespace.sh +++ b/dev-support/test-patch.d/whitespace.sh @@ -27,12 +27,12 @@ function whitespace_linecomment_reporter while read -r line; do { printf "%s" $(echo "${line}" | cut -f1-2 -d:) - echo ":${comment}" + echo "${comment}" } >> "${tmpfile}" done < "${file}" bugsystem_linecomments "whitespace:" "${tmpfile}" - #rm "${tmpfile}" + rm "${tmpfile}" } function whitespace_postapply From 19d02c9bc4c4fcce6dfcf07f775358c117cbd211 Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Thu, 13 Aug 2015 11:10:42 -0700 Subject: [PATCH 124/130] fix create-release.sh --- dev-support/create-release.sh | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/dev-support/create-release.sh b/dev-support/create-release.sh index eaae86b95b888..7a3dd70334704 100755 --- a/dev-support/create-release.sh +++ b/dev-support/create-release.sh @@ -141,7 +141,4 @@ echo "The artifacts for this run are available at ${ARTIFACTS_DIR}:" run ls -1 ${ARTIFACTS_DIR} echo echo "Remember to sign them before staging them on the open" -echo - -#tab -^ - +echo From abc46dd67c618ac5d5e6748544eb1661351b4eca Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Thu, 13 Aug 2015 12:30:41 -0700 Subject: [PATCH 125/130] HADOOP-12244. recover broken rebase during precommit (aw) --- dev-support/test-patch.sh | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/dev-support/test-patch.sh b/dev-support/test-patch.sh index 4dd15e69e1b6b..13332b7e361b1 100755 --- a/dev-support/test-patch.sh +++ b/dev-support/test-patch.sh @@ -1290,6 +1290,12 @@ function git_checkout # we need to explicitly fetch in case the # git ref hasn't been brought in tree yet if [[ ${OFFLINE} == false ]]; then + + if [[ -f .git/rebase-apply ]]; then + yetus_error "ERROR: previous rebase failed. Aborting it." + ${GIT} rebase --abort + fi + ${GIT} pull --rebase if [[ $? != 0 ]]; then yetus_error "ERROR: git pull is failing" From 8be9c3b68d5bd1768b53b5e0acea6355653f34a5 Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Thu, 13 Aug 2015 12:51:00 -0700 Subject: [PATCH 126/130] HADOOP-12315. hbaseprotoc_postapply in the test-patch hbase personality can return a wrong status (Kengo Seki via aw) --- dev-support/personality/hbase.sh | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/dev-support/personality/hbase.sh b/dev-support/personality/hbase.sh index d8ca9010af5b9..97490965561e9 100755 --- a/dev-support/personality/hbase.sh +++ b/dev-support/personality/hbase.sh @@ -146,7 +146,7 @@ function hbaseprotoc_postapply local module local logfile local count - local results + local result big_console_header "Patch HBase protoc plugin" @@ -159,7 +159,7 @@ function hbaseprotoc_postapply fi personality_modules patch hbaseprotoc - modules_workers patch hbaseprotoc -DskipTests -Pcompile-protobuf -X -DHBasePatchProcess + modules_workers patch hbaseprotoc compile -DskipTests -Pcompile-protobuf -X -DHBasePatchProcess # shellcheck disable=SC2153 until [[ $i -eq ${#MODULE[@]} ]]; do @@ -177,13 +177,13 @@ function hbaseprotoc_postapply if [[ ${count} -gt 0 ]]; then module_status ${i} -1 "patch-hbaseprotoc-${fn}.txt" "Patch generated "\ "${count} new protoc errors in ${module}." - ((results=results+1)) + ((result=result+1)) fi ((i=i+1)) done modules_messages patch hbaseprotoc true - if [[ ${results} -gt 0 ]]; then + if [[ ${result} -gt 0 ]]; then return 1 fi return 0 From 3e25a683958f37375d037f975eaf1237805316ba Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Thu, 13 Aug 2015 12:56:33 -0700 Subject: [PATCH 127/130] HADOOP-12297. test-patch's basedir and patch-dir must be directories under the user's home in docker mode if using boot2docker (Kengo Seki via aw) --- dev-support/docs/precommit-advanced.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/dev-support/docs/precommit-advanced.md b/dev-support/docs/precommit-advanced.md index a424199e7f5a6..31855121bafed 100644 --- a/dev-support/docs/precommit-advanced.md +++ b/dev-support/docs/precommit-advanced.md @@ -28,6 +28,8 @@ By default, test-patch runs in the same shell where it was launched. It can alt The `--docker` parameter tells test-patch to run in Docker mode. The `--dockerfile` parameter allows one to provide a custom Dockerfile. The Dockerfile should contain all of the necessary binaries and tooling needed to run the test. However be aware that test-patch will copy this file and append its necessary hooks to re-launch itself prior to executing docker. +NOTE: If you are using Boot2Docker, you must use directories under /Users (OSX) or C:\Users (Windows) as the base and patchprocess directories (specified by the --basedir and --patch-dir options respectively), because automatically mountable directories are limited to them. See [the Docker documentation](https://docs.docker.com/userguide/dockervolumes/#mount-a-host-directory-as-a-data-volume). + Dockerfile images will be named with a test-patch prefix and suffix with either a date or a git commit hash. By using this information, test-patch will automatically manage broken/stale container images that are hanging around if it is run in --jenkins mode. In this way, if Docker fails to build the image, the disk space should eventually be cleaned and returned back to the system. # Maven Specific From 9a5f980738f33dac75ac9bea5f6a806c692a6fe9 Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Fri, 14 Aug 2015 10:12:26 -0700 Subject: [PATCH 128/130] re-org buildtools --- dev-support/personality/samza.sh | 54 ++ dev-support/test-patch.d/ant.sh | 65 ++ .../test-patch.d/builtin-personality.sh | 12 +- dev-support/test-patch.d/gradle.sh | 89 ++ dev-support/test-patch.d/maven.sh | 236 +++++ dev-support/test-patch.sh | 873 +++++------------- 6 files changed, 656 insertions(+), 673 deletions(-) create mode 100755 dev-support/personality/samza.sh create mode 100644 dev-support/test-patch.d/ant.sh create mode 100644 dev-support/test-patch.d/gradle.sh create mode 100644 dev-support/test-patch.d/maven.sh diff --git a/dev-support/personality/samza.sh b/dev-support/personality/samza.sh new file mode 100755 index 0000000000000..a21be5f539f1f --- /dev/null +++ b/dev-support/personality/samza.sh @@ -0,0 +1,54 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +#shellcheck disable=SC2034 +PATCH_BRANCH_DEFAULT=master +#shellcheck disable=SC2034 +ISSUE_RE='^SAMZA-[0-9]+$' +#shellcheck disable=SC2034 +HOW_TO_CONTRIBUTE="https://cwiki.apache.org/confluence/display/SAMZA/Contributor's+Corner" +# shellcheck disable=SC2034 +BUILDTOOL=gradle + +function personality_modules +{ + local repostatus=$1 + local testtype=$2 + local extra="" + + yetus_debug "Personality: ${repostatus} ${testtype}" + + clear_personality_queue + + case ${testtype} in + asflicense) + # this is very fast and provides the full path if we do it from + # the root of the source + personality_enqueue_module . + return + ;; + unit) + ;; + *) + extra="-DskipTests" + ;; + esac + + for module in ${CHANGED_MODULES}; do + # shellcheck disable=SC2086 + personality_enqueue_module ${module} ${extra} + done +} diff --git a/dev-support/test-patch.d/ant.sh b/dev-support/test-patch.d/ant.sh new file mode 100644 index 0000000000000..1208d9c9b2d70 --- /dev/null +++ b/dev-support/test-patch.d/ant.sh @@ -0,0 +1,65 @@ + +function ant_buildfile +{ + echo "build.xml" +} + +function ant_executor +{ + echo "${ANT}" "${ANT_ARGS[@]}" +} + +function ant_modules_worker +{ + declare branch=$1 + declare tst=$2 + shift 2 + + case ${tst} in + javac) + modules_workers ${branch} javac + ;; + javadoc) + modules_workers ${branch} javadoc clean javadoc + ;; + unit) + modules_workers ${branch} unit + ;; + *) + yetus_error "WARNING: ${tst} is unsupported by ${BUILDTOOL}" + return 1 + ;; + esac +} + +function ant_count_javac_probs +{ + declare warningfile=$1 + declare val1 + declare val2 + + #shellcheck disable=SC2016 + val1=$(${GREP} -E "\[javac\] [0-9]+ errors?$" "${warningfile}" | ${AWK} '{sum+=$2} END {print sum}') + #shellcheck disable=SC2016 + val2=$(${GREP} -E "\[javac\] [0-9]+ warnings?$" "${warningfile}" | ${AWK} '{sum+=$2} END {print sum}') + echo $((val1+val2)) +} + +## @description Helper for check_patch_javadoc +## @audience private +## @stability evolving +## @replaceable no +## @return 0 on success +## @return 1 on failure +function ant_count_javadoc_probs +{ + local warningfile=$1 + local val1 + local val2 + + #shellcheck disable=SC2016 + val1=$(${GREP} -E "\[javadoc\] [0-9]+ errors?$" "${warningfile}" | ${AWK} '{sum+=$2} END {print sum}') + #shellcheck disable=SC2016 + val2=$(${GREP} -E "\[javadoc\] [0-9]+ warnings?$" "${warningfile}" | ${AWK} '{sum+=$2} END {print sum}') + echo $((val1+val2)) +} \ No newline at end of file diff --git a/dev-support/test-patch.d/builtin-personality.sh b/dev-support/test-patch.d/builtin-personality.sh index 4be3bfa3c2b2d..6e87ff44e3fd8 100755 --- a/dev-support/test-patch.d/builtin-personality.sh +++ b/dev-support/test-patch.d/builtin-personality.sh @@ -59,13 +59,6 @@ function builtin_mvn_personality_file_tests || ${filename} =~ src/test/scripts ]]; then yetus_debug "tests/shell: ${filename}" - elif [[ ${filename} =~ \.md$ - || ${filename} =~ \.md\.vm$ - || ${filename} =~ src/site - || ${filename} =~ src/main/docs - ]]; then - yetus_debug "tests/site: ${filename}" - add_test site elif [[ ${filename} =~ \.c$ || ${filename} =~ \.cc$ || ${filename} =~ \.h$ @@ -78,9 +71,9 @@ function builtin_mvn_personality_file_tests add_test cc add_test unit elif [[ ${filename} =~ \.scala$ ]]; then - add_test javac + add_test scalac + add_test scaladoc add_test unit - add_test mvninstall elif [[ ${filename} =~ build.xml$ || ${filename} =~ pom.xml$ || ${filename} =~ \.java$ @@ -89,7 +82,6 @@ function builtin_mvn_personality_file_tests yetus_debug "tests/javadoc+units: ${filename}" add_test javac add_test javadoc - add_test mvninstall add_test unit fi diff --git a/dev-support/test-patch.d/gradle.sh b/dev-support/test-patch.d/gradle.sh new file mode 100644 index 0000000000000..3def21dad931e --- /dev/null +++ b/dev-support/test-patch.d/gradle.sh @@ -0,0 +1,89 @@ + +function gradle_executor +{ + echo "${GRADLE}" "${GRADLE_ARGS[@]}" +} + +## @description Bootstrap gradle +## @audience private +## @stability evolving +## @replaceable no +## @return 0 on success +## @return 1 on failure +function gradle_precheck_install +{ + local result=0 + + if [[ ${BUILDTOOL} != gradle ]]; then + return 0 + fi + + personality_modules branch gradleboot + modules_workers branch gradleboot + result=$? + modules_messages branch gradleboot true + if [[ ${result} != 0 ]]; then + return 1 + fi + return 0 +} + +function gradle_count_javac_probs +{ + echo 0 +} + +function gradle_count_javadoc_probs +{ + echo 0 +} + +function gradle_modules_worker +{ + declare branch=$1 + declare tst=$2 + shift 2 + + case ${tst} in + javac) + modules_workers ${branch} javac + ;; + javadoc) + modules_workers ${branch} javadoc javadoc + ;; + scaladoc) + modules_workers ${branch} scaladoc scaladoc + ;; + unit) + modules_workers ${branch} unit test + ;; + *) + yetus_error "WARNING: ${tst} is unsupported by ${BUILDTOOL}" + return 1 + ;; + esac +} + +## @description Bootstrap gradle +## @audience private +## @stability evolving +## @replaceable no +## @return 0 on success +## @return 1 on failure +function gradle_postapply_install +{ + local result=0 + + if [[ ${BUILDTOOL} != gradle ]]; then + return 0 + fi + + personality_modules patch gradleboot + modules_workers branch gradleboot + result=$? + modules_messages patch gradleboot true + if [[ ${result} != 0 ]]; then + return 1 + fi + return 0 +} \ No newline at end of file diff --git a/dev-support/test-patch.d/maven.sh b/dev-support/test-patch.d/maven.sh new file mode 100644 index 0000000000000..3db51c03c0cc8 --- /dev/null +++ b/dev-support/test-patch.d/maven.sh @@ -0,0 +1,236 @@ + +add_plugin mvnsite +add_plugin mvneclipse + +function maven_buildfile +{ + echo "pom.xml" +} + +function maven_executor +{ + echo "${MAVEN}" "${MAVEN_ARGS[@]}" +} + +# if it ends in an explicit .sh, then this is shell code. +# if it doesn't have an extension, we assume it is shell code too +function mvnsite_filefilter +{ + local filename=$1 + + if [[ ${BUILDTOOL} = maven ]]; then + if [[ ${filename} =~ src/site ]]; then + yetus_debug "tests/mvnsite: ${filename}" + add_test mvnsite + fi + fi +} + +function maven_modules_worker +{ + declare branch=$1 + declare tst=$2 + + case ${tst} in + javac) + modules_workers ${branch} javac clean test-compile + ;; + javadoc) + modules_workers ${branch} javadoc clean javadoc:javadoc + ;; + unit) + modules_workers ${branch} unit clean test -fae + ;; + *) + yetus_error "WARNING: ${tst} is unsupported by ${BUILDTOOL}" + return 1 + ;; + esac +} + +function maven_count_javac_probs +{ + local warningfile=$1 + + #shellcheck disable=SC2016,SC2046 + ${GREP} '\[WARNING\]' "${warningfile}" | ${AWK} '{sum+=1} END {print sum}' +} + +## @description Helper for check_patch_javadoc +## @audience private +## @stability evolving +## @replaceable no +## @return 0 on success +## @return 1 on failure +function maven_count_javadoc_probs +{ + local warningfile=$1 + + #shellcheck disable=SC2016,SC2046 + ${GREP} -E "^[0-9]+ warnings?$" "${warningfile}" | ${AWK} '{sum+=$1} END {print sum}' +} + +## @description Confirm site pre-patch +## @audience private +## @stability stable +## @replaceable no +## @return 0 on success +## @return 1 on failure +function mvnsite_preapply +{ + local result=0 + + if [[ ${BUILDTOOL} != maven ]]; then + return 0 + fi + + verify_needed_test mvnsite + if [[ $? == 0 ]];then + return 0 + fi + big_console_header "Pre-patch ${PATCH_BRANCH} site verification" + + + personality_modules branch mvnsite + modules_workers branch mvnsite clean site site:stage + result=$? + modules_messages branch mvnsite true + if [[ ${result} != 0 ]]; then + return 1 + fi + return 0 +} + +## @description Make sure site still compiles +## @audience private +## @stability evolving +## @replaceable no +## @return 0 on success +## @return 1 on failure +function mvnsite_postapply +{ + local result=0 + + if [[ ${BUILDTOOL} != maven ]]; then + return 0 + fi + + verify_needed_test mvnsite + if [[ $? == 0 ]]; then + return 0 + fi + + big_console_header "Determining number of patched site errors" + + personality_modules patch mvnsite + modules_workers patch mvnsite clean site site:stage -Dmaven.javadoc.skip=true + result=$? + modules_messages patch mvnsite true + if [[ ${result} != 0 ]]; then + return 1 + fi + return 0 +} + + +## @description Make sure Maven's eclipse generation works. +## @audience private +## @stability evolving +## @replaceable no +## @return 0 on success +## @return 1 on failure +function mvneclipse_postapply +{ + if [[ ${BUILDTOOL} != maven ]]; then + return 0 + fi + + big_console_header "Verifying mvn eclipse:eclipse still works" + + verify_needed_test javac + if [[ $? == 0 ]]; then + echo "Patch does not touch any java files. Skipping mvn eclipse:eclipse" + return 0 + fi + + personality_modules patch mvneclipse + modules_workers patch mvneclipse eclipse:eclipse + result=$? + modules_messages patch mvneclipse true + if [[ ${result} != 0 ]]; then + return 1 + fi + return 0 +} + +## @description Verify mvn install works +## @audience private +## @stability evolving +## @replaceable no +## @return 0 on success +## @return 1 on failure +function maven_precheck_install +{ + local result=0 + + if [[ ${BUILDTOOL} != maven ]]; then + return 0 + fi + + big_console_header "Verifying mvn install works" + + verify_needed_test javadoc + retval=$? + + verify_needed_test javac + ((retval = retval + $? )) + if [[ ${retval} == 0 ]]; then + echo "This patch does not appear to need mvn install checks." + return 0 + fi + + personality_modules branch mvninstall + modules_workers branch mvninstall -fae clean install -Dmaven.javadoc.skip=true + result=$? + modules_messages branch mvninstall true + if [[ ${result} != 0 ]]; then + return 1 + fi + return 0 +} + +## @description Verify mvn install works +## @audience private +## @stability evolving +## @replaceable no +## @return 0 on success +## @return 1 on failure +function maven_postapply_install +{ + local result=0 + + if [[ ${BUILDTOOL} != maven ]]; then + return 0 + fi + + big_console_header "Verifying mvn install still works" + + verify_needed_test javadoc + retval=$? + + verify_needed_test javac + ((retval = retval + $? )) + if [[ ${retval} == 0 ]]; then + echo "This patch does not appear to need mvn install checks." + return 0 + fi + + personality_modules patch mvninstall + modules_workers patch mvninstall clean install -Dmaven.javadoc.skip=true + result=$? + modules_messages patch mvninstall true + if [[ ${result} != 0 ]]; then + return 1 + fi + return 0 +} diff --git a/dev-support/test-patch.sh b/dev-support/test-patch.sh index 13332b7e361b1..b875800c2e859 100755 --- a/dev-support/test-patch.sh +++ b/dev-support/test-patch.sh @@ -98,23 +98,18 @@ function setup_defaults REEXECED=false RESETREPO=false ISSUE="" - ISSUE_RE='^(YETUS)-[0-9]+$' TIMER=$(date +"%s") - PATCHURL="" OSTYPE=$(uname -s) BUILDTOOL=maven - BUGSYSTEM=jira TESTFORMATS="" JDK_TEST_LIST="javac javadoc unit" - GITDIFFLINES="${PATCH_DIR}/gitdifflines.txt" - GITDIFFCONTENT="${PATCH_DIR}/gitdiffcontent.txt" # Solaris needs POSIX, not SVID case ${OSTYPE} in SunOS) AWK=${AWK:-/usr/xpg4/bin/awk} SED=${SED:-/usr/xpg4/bin/sed} - WGET=${WGET:-wget} + CURL=${CURL:-curl} GIT=${GIT:-git} GREP=${GREP:-/usr/xpg4/bin/grep} PATCH=${PATCH:-/usr/gnu/bin/patch} @@ -124,7 +119,7 @@ function setup_defaults *) AWK=${AWK:-awk} SED=${SED:-sed} - WGET=${WGET:-wget} + CURL=${CURL:-curl} GIT=${GIT:-git} GREP=${GREP:-grep} PATCH=${PATCH:-patch} @@ -242,6 +237,7 @@ function offset_clock ## @param string function add_header_line { + # shellcheck disable=SC2034 TP_HEADER[${TP_HEADER_COUNTER}]="$*" ((TP_HEADER_COUNTER=TP_HEADER_COUNTER+1 )) } @@ -279,8 +275,10 @@ function add_vote_table fi if [[ -z ${value} ]]; then + # shellcheck disable=SC2034 TP_VOTE_TABLE[${TP_VOTE_COUNTER}]="| | ${subsystem} | | ${*:-} |" else + # shellcheck disable=SC2034 TP_VOTE_TABLE[${TP_VOTE_COUNTER}]="| ${value} | ${subsystem} | ${calctime} | $* |" fi ((TP_VOTE_COUNTER=TP_VOTE_COUNTER+1)) @@ -423,6 +421,7 @@ function finish_vote_table echo "Total Elapsed time: ${calctime}" echo "" + # shellcheck disable=SC2034 TP_VOTE_TABLE[${TP_VOTE_COUNTER}]="| | | ${calctime} | |" ((TP_VOTE_COUNTER=TP_VOTE_COUNTER+1 )) } @@ -440,6 +439,7 @@ function add_footer_table local subsystem=$1 shift 1 + # shellcheck disable=SC2034 TP_FOOTER_TABLE[${TP_FOOTER_COUNTER}]="| ${subsystem} | $* |" ((TP_FOOTER_COUNTER=TP_FOOTER_COUNTER+1 )) } @@ -455,6 +455,7 @@ function add_test_table local failure=$1 shift 1 + # shellcheck disable=SC2034 TP_TEST_TABLE[${TP_TEST_COUNTER}]="| ${failure} | $* |" ((TP_TEST_COUNTER=TP_TEST_COUNTER+1 )) } @@ -538,24 +539,21 @@ function find_java_home return 0 } -## @description Write the contents of a file to jenkins +## @description Write the contents of a file to all of the bug systems +## @description (so content should avoid special formatting) ## @params filename ## @stability stable ## @audience public -## @returns ${JIRACLI} exit code function write_comment { local -r commentfile=${1} - shift + declare bug - local retval=0 - - if [[ ${OFFLINE} == false - && ${JENKINS} == true ]]; then - ${BUGSYSTEM}_write_comment "${commentfile}" - retval=$? - fi - return ${retval} + for bug in ${BUGSYSTEMS}; do + if declare -f ${bug}_write_comment >/dev/null; then + "${bug}_write_comment" "${commentfile}" + fi + done } ## @description Verify that the patch directory is still in working order @@ -611,7 +609,7 @@ function compute_gitdiff pushd "${BASEDIR}" >/dev/null ${GIT} add --all --intent-to-add - while read line; do + while read -r line; do if [[ ${line} =~ ^\+\+\+ ]]; then file="./"$(echo "${line}" | cut -f2- -d/) continue @@ -651,16 +649,70 @@ function compute_gitdiff fi done < <("${GIT}" diff --unified=0 --no-color) - if [[ ! -f ${GITDIFFLINES} ]]; then + if [[ ! -f "${GITDIFFLINES}" ]]; then touch "${GITDIFFLINES}" fi - if [[ ! -f ${GITDIFFCONTENT} ]]; then + + if [[ ! -f "${GITDIFFCONTENT}" ]]; then touch "${GITDIFFCONTENT}" fi + if [[ -s "${GITDIFFLINES}" ]]; then + compute_unidiff + else + touch "${GITUNIDIFFLINES}" + fi + popd >/dev/null } +## @description generate an index of unified diff lines vs. modified/added lines +## @description ${GITDIFFLINES} must exist. +## @audience private +## @stability stable +## @replaceable no +function compute_unidiff +{ + declare fn + declare filen + declare tmpfile="${PATCH_DIR}/tmp.$$.${RANDOM}" + + # now that we know what lines are where, we can deal + # with github's pain-in-the-butt API. It requires + # that the client provides the line number of the + # unified diff on a per file basis. + + # First, build a per-file unified diff, pulling + # out the 'extra' lines, grabbing the adds with + # the line number in the diff file along the way, + # finally rewriting the line so that it is in + # './filename:diff line:content' format + + for fn in ${CHANGED_FILES}; do + filen=${fn##./} + + ${GIT} diff ${filen} \ + | tail -n +6 \ + | ${GREP} -n '^+' \ + | ${GREP} -vE '^[0-9]*:\+\+\+' \ + | ${SED} -e 's,^\([0-9]*:\)\+,\1,g' \ + -e s,^,./${filen}:,g \ + >> "${tmpfile}" + done + + # at this point, tmpfile should be in the same format + # as gitdiffcontent, just with different line numbers. + # let's do a merge (using gitdifflines because it's easier) + + # ./filename:real number:diff number + # shellcheck disable=SC2016 + paste -d: "${GITDIFFLINES}" "${tmpfile}" \ + | ${AWK} -F: '{print $1":"$2":"$5":"$6}' \ + >> "${GITUNIDIFFLINES}" + + rm "${tmpfile}" +} + ## @description Print the command to be executing to the screen. Then ## @description run the command, sending stdout and stderr to the given filename ## @description This will also ensure that any directories in ${BASEDIR} have @@ -729,16 +781,13 @@ function testpatch_usage echo "--basedir= The directory to apply the patch to (default current directory)" echo "--branch= Forcibly set the branch" echo "--branch-default= If the branch isn't forced and we don't detect one in the patch name, use this branch (default 'master')" - #not quite working yet - #echo "--bugsystem= The bug system in use ('jira', the default, or 'github')" echo "--build-native= If true, then build native components (default 'true')" - echo "--build-tool= Pick which build tool to focus around (maven, ant)" + echo "--build-tool= Pick which build tool to focus around (ant, gradle, maven)" echo "--contrib-guide= URL to point new users towards project conventions. (default: ${HOW_TO_CONTRIBUTE} )" echo "--debug If set, then output some extra stuff to stderr" echo "--dirty-workspace Allow the local git workspace to have uncommitted changes" echo "--docker Spawn a docker container" echo "--dockerfile= Dockerfile fragment to use as the base" - echo "--issue-re= Bash regular expression to use when trying to find a jira ref in the patch name (default: \'${ISSUE_RE}\')" echo "--java-home= Set JAVA_HOME (In Docker mode, this should be local to the image)" echo "--multijdkdirs= Comma delimited lists of JDK paths to use for multi-JDK tests" echo "--multijdktests= Comma delimited tests to use when multijdkdirs is used. (default: javac,javadoc,unit)" @@ -760,6 +809,7 @@ function testpatch_usage echo "Shell binary overrides:" echo "--ant-cmd= The 'ant' command to use (default \${ANT_HOME}/bin/ant, or 'ant')" echo "--awk-cmd= The 'awk' command to use (default 'awk')" + echo "--curl-cmd= The 'wget' command to use (default 'curl')" echo "--diff-cmd= The GNU-compatible 'diff' command to use (default 'diff')" echo "--file-cmd= The 'file' command to use (default 'file')" echo "--git-cmd= The 'git' command to use (default 'git')" @@ -774,7 +824,6 @@ function testpatch_usage echo "--build-url Set the build location web page" echo "--eclipse-home= Eclipse home directory (default ECLIPSE_HOME environment variable)" echo "--mv-patch-dir Move the patch-dir into the basedir during cleanup." - echo "--wget-cmd= The 'wget' command to use (default 'wget')" importplugins @@ -815,9 +864,6 @@ function parse_args --branch-default=*) PATCH_BRANCH_DEFAULT=${i#*=} ;; - --bugsystem=*) - BUGSYSTEM=${i#*=} - ;; --build-native=*) BUILD_NATIVE=${i#*=} ;; @@ -830,6 +876,9 @@ function parse_args --contrib-guide=*) HOW_TO_CONTRIBUTE=${i#*=} ;; + --curl-cmd=*) + CURL=${i#*=} + ;; --debug) TP_SHELL_SCRIPT_DEBUG=true ;; @@ -864,9 +913,6 @@ function parse_args testpatch_usage exit 0 ;; - --issue-re=*) - ISSUE_RE=${i#*=} - ;; --java-home=*) JAVA_HOME=${i#*=} ;; @@ -954,9 +1000,6 @@ function parse_args --tpreexectimer=*) REEXECLAUNCHTIMER=${i#*=} ;; - --wget-cmd=*) - WGET=${i#*=} - ;; --*) ## PATCH_OR_ISSUE can't be a --. So this is probably ## a plugin thing. @@ -1039,6 +1082,8 @@ function parse_args GITDIFFLINES="${PATCH_DIR}/gitdifflines.txt" GITDIFFCONTENT="${PATCH_DIR}/gitdiffcontent.txt" + GITUNIDIFFLINES="${PATCH_DIR}/gitdiffunilines.txt" + } ## @description Locate the build file for a given directory @@ -1139,20 +1184,13 @@ function find_changed_modules local dir local buildfile - case ${BUILDTOOL} in - maven) - buildfile=pom.xml - ;; - ant) - buildfile=build.xml - ;; - *) - yetus_error "ERROR: Unsupported build tool." - output_to_console 1 - output_to_bugsystem 1 - cleanup_and_exit 1 - ;; - esac + buildfile=$(${BUILDTOOL}_buildfile) + + if [[ $? != 0 ]]; then + yetus_error "ERROR: Unsupported build tool." + bugsystem_finalreport 1 + cleanup_and_exit 1 + fi changed_dirs=$(for i in ${CHANGED_FILES}; do dirname "${i}"; done | sort -u) @@ -1167,8 +1205,7 @@ function find_changed_modules builddir=$(find_buildfile_dir ${buildfile} "${i}") if [[ -z ${builddir} ]]; then yetus_error "ERROR: ${buildfile} is not found. Make sure the target is a ${BUILDTOOL}-based project." - output_to_console 1 - output_to_bugsystem 1 + bugsystem_finalreport 1 cleanup_and_exit 1 fi builddirs="${builddirs} ${builddir}" @@ -1245,6 +1282,7 @@ function git_checkout { local currentbranch local exemptdir + local status big_console_header "Confirming git environment" @@ -1342,8 +1380,6 @@ function git_checkout determine_issue GIT_REVISION=$(${GIT} rev-parse --verify --short HEAD) - # shellcheck disable=SC2034 - VERSION=${GIT_REVISION}_${ISSUE}_PATCH-${patchNum} if [[ "${ISSUE}" == 'Unknown' ]]; then echo "Testing patch on ${PATCH_BRANCH}." @@ -1402,9 +1438,8 @@ function verify_valid_branch ## @return 1 on failure, with PATCH_BRANCH updated to PATCH_BRANCH_DEFAULT function determine_branch { - local patchnamechunk - local total - local count + declare bugs + declare retval=1 # something has already set this, so move on if [[ -n ${PATCH_BRANCH} ]]; then @@ -1427,83 +1462,19 @@ function determine_branch return fi - for j in "${PATCHURL}" "${PATCH_OR_ISSUE}"; do - if [[ -z "${j}" ]]; then - continue - fi - yetus_debug "Determine branch: starting with ${j}" - patchnamechunk=$(echo "${j}" \ - | ${SED} -e 's,.*/\(.*\)$,\1,' \ - -e 's,\.txt,.,' \ - -e 's,.patch,.,g' \ - -e 's,.diff,.,g' \ - -e 's,\.\.,.,g' \ - -e 's,\.$,,g' ) - - # ISSUE-branch-## - PATCH_BRANCH=$(echo "${patchnamechunk}" | cut -f3- -d- | cut -f1,2 -d-) - yetus_debug "Determine branch: ISSUE-branch-## = ${PATCH_BRANCH}" - if [[ -n "${PATCH_BRANCH}" ]]; then - verify_valid_branch "${PATCH_BRANCH}" - if [[ $? == 0 ]]; then - return + for bugs in ${BUGSYSTEMS}; do + if declare -f ${bugs}_determine_branch >/dev/null;then + "${bugs}_determine_branch" + retval=$? + if [[ ${retval} == 0 ]]; then + break fi fi - - # ISSUE-##[.##].branch - PATCH_BRANCH=$(echo "${patchnamechunk}" | cut -f3- -d. ) - count="${PATCH_BRANCH//[^.]}" - total=${#count} - ((total = total + 3 )) - until [[ ${total} -eq 2 ]]; do - PATCH_BRANCH=$(echo "${patchnamechunk}" | cut -f3-${total} -d.) - yetus_debug "Determine branch: ISSUE[.##].branch = ${PATCH_BRANCH}" - ((total=total-1)) - if [[ -n "${PATCH_BRANCH}" ]]; then - verify_valid_branch "${PATCH_BRANCH}" - if [[ $? == 0 ]]; then - return - fi - fi - done - - # ISSUE.branch.## - PATCH_BRANCH=$(echo "${patchnamechunk}" | cut -f2- -d. ) - count="${PATCH_BRANCH//[^.]}" - total=${#count} - ((total = total + 3 )) - until [[ ${total} -eq 2 ]]; do - PATCH_BRANCH=$(echo "${patchnamechunk}" | cut -f2-${total} -d.) - yetus_debug "Determine branch: ISSUE.branch[.##] = ${PATCH_BRANCH}" - ((total=total-1)) - if [[ -n "${PATCH_BRANCH}" ]]; then - verify_valid_branch "${PATCH_BRANCH}" - if [[ $? == 0 ]]; then - return - fi - fi - done - - # ISSUE-branch.## - PATCH_BRANCH=$(echo "${patchnamechunk}" | cut -f3- -d- | cut -f1- -d. ) - count="${PATCH_BRANCH//[^.]}" - total=${#count} - ((total = total + 1 )) - until [[ ${total} -eq 1 ]]; do - PATCH_BRANCH=$(echo "${patchnamechunk}" | cut -f3- -d- | cut -f1-${total} -d. ) - yetus_debug "Determine branch: ISSUE-branch[.##] = ${PATCH_BRANCH}" - ((total=total-1)) - if [[ -n "${PATCH_BRANCH}" ]]; then - verify_valid_branch "${PATCH_BRANCH}" - if [[ $? == 0 ]]; then - return - fi - fi - done - done - PATCH_BRANCH="${PATCH_BRANCH_DEFAULT}" + if [[ ${retval} != 0 ]]; then + PATCH_BRANCH="${PATCH_BRANCH_DEFAULT}" + fi popd >/dev/null } @@ -1515,28 +1486,19 @@ function determine_branch ## @return 1 on failure, with ISSUE updated to "Unknown" function determine_issue { - local patchnamechunk - local maybeissue + local bugsys yetus_debug "Determine issue" - # we can shortcut jenkins - if [[ ${JENKINS} == true ]]; then - ISSUE=${PATCH_OR_ISSUE} - return 0 - fi - - # shellcheck disable=SC2016 - patchnamechunk=$(echo "${PATCH_OR_ISSUE}" | ${AWK} -F/ '{print $NF}') - - maybeissue=$(echo "${patchnamechunk}" | cut -f1,2 -d-) - - if [[ ${maybeissue} =~ ${ISSUE_RE} ]]; then - ISSUE=${maybeissue} - return 0 - fi - - ISSUE="Unknown" + for bugsys in ${BUGSYSTEMS}; do + if declare -f ${bugsys}_determine_issue >/dev/null; then + "${bugsys}_determine_issue" "${PATCH_OR_ISSUE}" + if [[ $? == 0 ]]; then + yetus_debug "${bugsys} says ${ISSUE}" + return 0 + fi + fi + done return 1 } @@ -1585,6 +1547,7 @@ function verify_needed_test function determine_needed_tests { local i + local plugin for i in ${CHANGED_FILES}; do yetus_debug "Determining needed tests for ${i}" @@ -1609,100 +1572,51 @@ function determine_needed_tests ## @return 1 on failure, may exit function locate_patch { - local notSureIfPatch=false + local bugsys + local patchfile="" + local gotit=false + yetus_debug "locate patch" + # it's a locally provided file if [[ -f ${PATCH_OR_ISSUE} ]]; then - PATCH_FILE="${PATCH_OR_ISSUE}" + patchfile="${PATCH_OR_ISSUE}" else - if [[ ${PATCH_OR_ISSUE} =~ ^http ]]; then - echo "Patch is being downloaded at $(date) from" - PATCHURL="${PATCH_OR_ISSUE}" - else - ${WGET} -q -O "${PATCH_DIR}/jira" "http://issues.apache.org/jira/browse/${PATCH_OR_ISSUE}" - - case $? in - 0) - ;; - 2) - yetus_error "ERROR: .wgetrc/.netrc parsing error." - cleanup_and_exit 1 - ;; - 3) - yetus_error "ERROR: File IO error." - cleanup_and_exit 1 - ;; - 4) - yetus_error "ERROR: URL ${PATCH_OR_ISSUE} is unreachable." - cleanup_and_exit 1 - ;; - *) - # we want to try and do as much as we can in docker mode, - # but if the patch was passed as a file, then we may not - # be able to continue. - if [[ ${REEXECED} == true - && -f "${PATCH_DIR}/patch" ]]; then - PATCH_FILE="${PATCH_DIR}/patch" - else - yetus_error "ERROR: Unable to fetch ${PATCH_OR_ISSUE}." - cleanup_and_exit 1 - fi - ;; - esac - - if [[ -z "${PATCH_FILE}" ]]; then - if [[ $(${GREP} -c 'Patch Available' "${PATCH_DIR}/jira") == 0 ]] ; then - if [[ ${JENKINS} == true ]]; then - yetus_error "ERROR: ${PATCH_OR_ISSUE} is not \"Patch Available\"." - cleanup_and_exit 1 - else - yetus_error "WARNING: ${PATCH_OR_ISSUE} is not \"Patch Available\"." + # run through the bug systems. maybe they know? + for bugsys in ${BUGSYSTEMS}; do + if declare -f ${bugsys}_locate_patch >/dev/null 2>&1; then + "${bugsys}_locate_patch" "${PATCH_OR_ISSUE}" "${PATCH_DIR}/patch" + if [[ $? == 0 ]]; then + guess_patch_file "${PATCH_DIR}/patch" + if [[ $? == 0 ]]; then + gotit=true + break; fi fi - - #shellcheck disable=SC2016 - relativePatchURL=$(${AWK} 'match($0,"\"/jira/secure/attachment/[0-9]*/[^\"]*"){print substr($0,RSTART+1,RLENGTH-1)}' "${PATCH_DIR}/jira" | - ${GREP} -v -e 'htm[l]*$' | sort | tail -1) - PATCHURL="http://issues.apache.org${relativePatchURL}" - if [[ ! ${PATCHURL} =~ \.patch$ ]]; then - notSureIfPatch=true - fi - #shellcheck disable=SC2016 - patchNum=$(echo "${PATCHURL}" | ${AWK} 'match($0,"[0-9]*/"){print substr($0,RSTART,RLENGTH-1)}') - echo "${ISSUE} patch is being downloaded at $(date) from" - fi - fi - if [[ -z "${PATCH_FILE}" ]]; then - echo "${PATCHURL}" - add_footer_table "Patch URL" "${PATCHURL}" - ${WGET} -q -O "${PATCH_DIR}/patch" "${PATCHURL}" - if [[ $? != 0 ]];then - yetus_error "ERROR: ${PATCH_OR_ISSUE} could not be downloaded." - cleanup_and_exit 1 fi - PATCH_FILE="${PATCH_DIR}/patch" + done + + # ok, none of the bug systems know. let's see how smart we are + if [[ ${gotit} == false ]]; then + generic_locate_patch "${PATCH_OR_ISSUE}" "${PATCH_DIR}/patch" fi fi - if [[ ! -f "${PATCH_DIR}/patch" ]]; then - cp "${PATCH_FILE}" "${PATCH_DIR}/patch" + if [[ ! -f "${PATCH_DIR}/patch" + && -f "${patchfile}" ]]; then + cp "${patchfile}" "${PATCH_DIR}/patch" if [[ $? == 0 ]] ; then - echo "Patch file ${PATCH_FILE} copied to ${PATCH_DIR}" + echo "Patch file ${patchfile} copied to ${PATCH_DIR}" else - yetus_error "ERROR: Could not copy ${PATCH_FILE} to ${PATCH_DIR}" + yetus_error "ERROR: Could not copy ${patchfile} to ${PATCH_DIR}" cleanup_and_exit 1 fi fi - if [[ ${notSureIfPatch} == "true" ]]; then - guess_patch_file "${PATCH_DIR}/patch" - if [[ $? != 0 ]]; then - yetus_error "ERROR: ${PATCHURL} is not a patch file." - cleanup_and_exit 1 - else - yetus_debug "The patch ${PATCHURL} was not named properly, but it looks like a patch file. proceeding, but issue/branch matching might go awry." - add_vote_table 0 patch "The patch file was not named according to ${PROJECT_NAME}'s naming conventions. Please see ${HOW_TO_CONTRIBUTE} for instructions." - fi + guess_patch_file "${PATCH_DIR}/patch" + if [[ $? != 0 ]]; then + yetus_error "ERROR: Unsure how to process ${PATCH_OR_ISSUE}." + cleanup_and_exit 1 fi } @@ -1717,6 +1631,10 @@ function guess_patch_file local patch=$1 local fileOutput + if [[ ! -f ${patch} ]]; then + return 1 + fi + yetus_debug "Trying to guess is ${patch} is a patch file." fileOutput=$("${FILE}" "${patch}") if [[ $fileOutput =~ \ diff\ ]]; then @@ -1769,8 +1687,7 @@ function apply_patch_file echo "PATCH APPLICATION FAILED" ((RESULT = RESULT + 1)) add_vote_table -1 patch "The patch command could not apply the patch." - output_to_console 1 - output_to_bugsystem 1 + bugsystem_finalreport 1 cleanup_and_exit 1 fi return 0 @@ -2097,7 +2014,7 @@ function module_status fi } -## @description run the maven tests for the queued modules +## @description run the tests for the queued modules ## @audience public ## @stability evolving ## @replaceable no @@ -2154,26 +2071,10 @@ function modules_workers continue fi - case ${BUILDTOOL} in - maven) - #shellcheck disable=SC2086 - echo_and_redirect "${PATCH_DIR}/${repostatus}-${testtype}-${fn}.txt" \ - ${MVN} "${MAVEN_ARGS[@]}" \ - "${@//@@@MODULEFN@@@/${fn}}" \ - ${MODULEEXTRAPARAM[${modindex}]//@@@MODULEFN@@@/${fn}} -Ptest-patch - ;; - ant) - #shellcheck disable=SC2086 - echo_and_redirect "${PATCH_DIR}/${repostatus}-${testtype}-${fn}.txt" \ - "${ANT}" "${ANT_ARGS[@]}" \ - ${MODULEEXTRAPARAM[${modindex}]//@@@MODULEFN@@@/${fn}} \ - "${@//@@@MODULEFN@@@/${fn}}" - ;; - *) - yetus_error "ERROR: Unsupported build tool." - return 1 - ;; - esac + echo_and_redirect "${PATCH_DIR}/${repostatus}-${testtype}-${fn}.txt" \ + $("${BUILDTOOL}_executor") \ + ${MODULEEXTRAPARAM[${modindex}]//@@@MODULEFN@@@/${fn}} \ + "${@//@@@MODULEFN@@@/${fn}}" if [[ $? == 0 ]] ; then module_status \ @@ -2265,18 +2166,7 @@ function precheck_javac fi personality_modules branch javac - case ${BUILDTOOL} in - maven) - modules_workers branch javac clean test-compile - ;; - ant) - modules_workers branch javac - ;; - *) - yetus_error "ERROR: Unsupported build tool." - return 1 - ;; - esac + "${BUILDTOOL}_modules_worker" branch javac ((result=result + $?)) modules_messages branch javac true @@ -2322,18 +2212,7 @@ function precheck_javadoc fi personality_modules branch javadoc - case ${BUILDTOOL} in - maven) - modules_workers branch javadoc clean javadoc:javadoc - ;; - ant) - modules_workers branch javadoc clean javadoc - ;; - *) - yetus_error "ERROR: Unsupported build tool." - return 1 - ;; - esac + ${BUILDTOOL}_modules_worker branch javadoc ((result=result + $?)) modules_messages branch javadoc true @@ -2347,38 +2226,6 @@ function precheck_javadoc return 0 } -## @description Confirm site pre-patch -## @audience private -## @stability stable -## @replaceable no -## @return 0 on success -## @return 1 on failure -function precheck_site -{ - local result=0 - - if [[ ${BUILDTOOL} != maven ]]; then - return 0 - fi - - big_console_header "Pre-patch ${PATCH_BRANCH} site verification" - - verify_needed_test site - if [[ $? == 0 ]];then - echo "Patch does not appear to need site tests." - return 0 - fi - - personality_modules branch site - modules_workers branch site clean site site:stage - result=$? - modules_messages branch site true - if [[ ${result} != 0 ]]; then - return 1 - fi - return 0 -} - ## @description Confirm the source environment pre-patch ## @audience private ## @stability stable @@ -2389,10 +2236,11 @@ function precheck_without_patch { local result=0 - precheck_mvninstall - - if [[ $? -gt 0 ]]; then - ((result = result +1 )) + if declare -f ${BUILDTOOL}_precheck_install >/dev/null; then + "${BUILDTOOL}_precheck_install" + if [[ $? -gt 0 ]]; then + ((result = result +1 )) + fi fi precheck_javac @@ -2494,33 +2342,6 @@ function check_modified_unittests return 0 } -## @description Helper for check_patch_javac -## @audience private -## @stability evolving -## @replaceable no -## @return 0 on success -## @return 1 on failure -function count_javac_probs -{ - local warningfile=$1 - local val1 - local val2 - - case ${BUILDTOOL} in - maven) - #shellcheck disable=SC2016,SC2046 - ${GREP} '\[WARNING\]' "${warningfile}" | ${AWK} '{sum+=1} END {print sum}' - ;; - ant) - #shellcheck disable=SC2016 - val1=$(${GREP} -E "\[javac\] [0-9]+ errors?$" "${warningfile}" | ${AWK} '{sum+=$2} END {print sum}') - #shellcheck disable=SC2016 - val2=$(${GREP} -E "\[javac\] [0-9]+ warnings?$" "${warningfile}" | ${AWK} '{sum+=$2} END {print sum}') - echo $((val1+val2)) - ;; - esac -} - ## @description Count and compare the number of javac warnings pre- and post- patch ## @audience private ## @stability evolving @@ -2565,19 +2386,7 @@ function check_patch_javac fi personality_modules patch javac - - case ${BUILDTOOL} in - maven) - modules_workers patch javac clean test-compile - ;; - ant) - modules_workers patch javac - ;; - *) - yetus_error "ERROR: Unsupported build tool." - return 1 - ;; - esac + ${BUILDTOOL}_modules_worker patch javac i=0 until [[ ${i} -eq ${#MODULE[@]} ]]; do @@ -2611,8 +2420,8 @@ function check_patch_javac "${PATCH_DIR}/patch-javac-${fn}-warning.txt" fi - numbranch=$(count_javac_probs "${PATCH_DIR}/branch-javac-${fn}-warning.txt") - numpatch=$(count_javac_probs "${PATCH_DIR}/patch-javac-${fn}-warning.txt") + numbranch=$(${BUILDTOOL}_count_javac_probs "${PATCH_DIR}/branch-javac-${fn}-warning.txt") + numpatch=$(${BUILDTOOL}_count_javac_probs "${PATCH_DIR}/patch-javac-${fn}-warning.txt") if [[ -n ${numbranch} && -n ${numpatch} @@ -2641,33 +2450,6 @@ function check_patch_javac return 0 } -## @description Helper for check_patch_javadoc -## @audience private -## @stability evolving -## @replaceable no -## @return 0 on success -## @return 1 on failure -function count_javadoc_probs -{ - local warningfile=$1 - local val1 - local val2 - - case ${BUILDTOOL} in - maven) - #shellcheck disable=SC2016,SC2046 - ${GREP} -E "^[0-9]+ warnings?$" "${warningfile}" | ${AWK} '{sum+=$1} END {print sum}' - ;; - ant) - #shellcheck disable=SC2016 - val1=$(${GREP} -E "\[javadoc\] [0-9]+ errors?$" "${warningfile}" | ${AWK} '{sum+=$2} END {print sum}') - #shellcheck disable=SC2016 - val2=$(${GREP} -E "\[javadoc\] [0-9]+ warnings?$" "${warningfile}" | ${AWK} '{sum+=$2} END {print sum}') - echo $((val1+val2)) - ;; - esac -} - ## @description Count and compare the number of JavaDoc warnings pre- and post- patch ## @audience private ## @stability evolving @@ -2711,18 +2493,7 @@ function check_patch_javadoc fi personality_modules patch javadoc - case ${BUILDTOOL} in - maven) - modules_workers patch javadoc clean javadoc:javadoc - ;; - ant) - modules_workers patch javadoc clean javadoc - ;; - *) - yetus_error "ERROR: Unsupported build tool." - return 1 - ;; - esac + ${BUILDTOOL}_modules_workers patch javadoc i=0 until [[ ${i} -eq ${#MODULE[@]} ]]; do @@ -2755,8 +2526,8 @@ function check_patch_javadoc "${PATCH_DIR}/patch-javadoc-${fn}-warning.txt" fi - numbranch=$(count_javadoc_probs "${PATCH_DIR}/branch-javadoc-${fn}.txt") - numpatch=$(count_javadoc_probs "${PATCH_DIR}/patch-javadoc-${fn}.txt") + numbranch=$(${BUILDTOOL}_count_javadoc_probs "${PATCH_DIR}/branch-javadoc-${fn}.txt") + numpatch=$(${BUILDTOOL}_count_javadoc_probs "${PATCH_DIR}/patch-javadoc-${fn}.txt") if [[ -n ${numbranch} && -n ${numpatch} @@ -2785,140 +2556,6 @@ function check_patch_javadoc return 0 } -## @description Make sure site still compiles -## @audience private -## @stability evolving -## @replaceable no -## @return 0 on success -## @return 1 on failure -function check_site -{ - local result=0 - - if [[ ${BUILDTOOL} != maven ]]; then - return 0 - fi - - big_console_header "Determining number of patched site errors" - - verify_needed_test site - if [[ $? == 0 ]]; then - echo "Patch does not appear to need site tests." - return 0 - fi - - personality_modules patch site - modules_workers patch site clean site site:stage -Dmaven.javadoc.skip=true - result=$? - modules_messages patch site true - if [[ ${result} != 0 ]]; then - return 1 - fi - return 0 -} - -## @description Verify mvn install works -## @audience private -## @stability evolving -## @replaceable no -## @return 0 on success -## @return 1 on failure -function precheck_mvninstall -{ - local result=0 - - if [[ ${BUILDTOOL} != maven ]]; then - return 0 - fi - - big_console_header "Verifying mvn install works" - - verify_needed_test javadoc - retval=$? - - verify_needed_test javac - ((retval = retval + $? )) - if [[ ${retval} == 0 ]]; then - echo "This patch does not appear to need mvn install checks." - return 0 - fi - - personality_modules branch mvninstall - modules_workers branch mvninstall -fae clean install -Dmaven.javadoc.skip=true - result=$? - modules_messages branch mvninstall true - if [[ ${result} != 0 ]]; then - return 1 - fi - return 0 -} - -## @description Verify mvn install works -## @audience private -## @stability evolving -## @replaceable no -## @return 0 on success -## @return 1 on failure -function check_mvninstall -{ - local result=0 - - if [[ ${BUILDTOOL} != maven ]]; then - return 0 - fi - - big_console_header "Verifying mvn install still works" - - verify_needed_test javadoc - retval=$? - - verify_needed_test javac - ((retval = retval + $? )) - if [[ ${retval} == 0 ]]; then - echo "This patch does not appear to need mvn install checks." - return 0 - fi - - personality_modules patch mvninstall - modules_workers patch mvninstall clean install -Dmaven.javadoc.skip=true - result=$? - modules_messages patch mvninstall true - if [[ ${result} != 0 ]]; then - return 1 - fi - return 0 -} - -## @description Make sure Maven's eclipse generation works. -## @audience private -## @stability evolving -## @replaceable no -## @return 0 on success -## @return 1 on failure -function check_mvn_eclipse -{ - if [[ ${BUILDTOOL} != maven ]]; then - return 0 - fi - - big_console_header "Verifying mvn eclipse:eclipse still works" - - verify_needed_test javac - if [[ $? == 0 ]]; then - echo "Patch does not touch any java files. Skipping mvn eclipse:eclipse" - return 0 - fi - - personality_modules patch eclipse - modules_workers patch eclipse eclipse:eclipse - result=$? - modules_messages patch eclipse true - if [[ ${result} != 0 ]]; then - return 1 - fi - return 0 -} - ## @description Utility to push many tests into the failure list ## @audience private ## @stability evolving @@ -2987,18 +2624,8 @@ function check_unittests fi personality_modules patch unit - case ${BUILDTOOL} in - maven) - modules_workers patch unit clean test -fae - ;; - ant) - modules_workers patch unit - ;; - *) - yetus_error "ERROR: Unsupported build tool." - return 1 - ;; - esac + ${BUILDTOOL}_modules_workers patch unit + ((result=result+$?)) modules_messages patch unit false @@ -3061,134 +2688,55 @@ function check_unittests return 0 } -## @description Print out the finished details on the console -## @audience private +## @description Write comments onto bug systems that have code review support. +## @description File should be in the form of "file:line:comment" +## @audience public ## @stability evolving ## @replaceable no -## @param runresult -## @return 0 on success -## @return 1 on failure -function output_to_console -{ - local result=$1 - shift - local i=0 - local ourstring - local vote - local subs - local ela - local comment - local commentfile1="${PATCH_DIR}/comment.1" - local commentfile2="${PATCH_DIR}/comment.2" - local normaltop - local line - local seccoladj=0 - local spcfx=${PATCH_DIR}/spcl.txt - - if [[ ${result} == 0 ]]; then - if [[ ${JENKINS} == false ]]; then - { - printf "IF9fX19fX19fX18gCjwgU3VjY2VzcyEgPgogLS0tLS0tLS0tLSAKIFwgICAg"; - printf "IC9cICBfX18gIC9cCiAgXCAgIC8vIFwvICAgXC8gXFwKICAgICAoKCAgICBP"; - printf "IE8gICAgKSkKICAgICAgXFwgLyAgICAgXCAvLwogICAgICAgXC8gIHwgfCAg"; - printf "XC8gCiAgICAgICAgfCAgfCB8ICB8ICAKICAgICAgICB8ICB8IHwgIHwgIAog"; - printf "ICAgICAgIHwgICBvICAgfCAgCiAgICAgICAgfCB8ICAgfCB8ICAKICAgICAg"; - printf "ICB8bXwgICB8bXwgIAo" - } > "${spcfx}" - fi - printf "\n\n+1 overall\n\n" - else - if [[ ${JENKINS} == false ]]; then - { - printf "IF9fX19fICAgICBfIF8gICAgICAgICAgICAgICAgXyAKfCAgX19ffF8gXyhf"; - printf "KSB8XyAgIF8gXyBfXyBfX198IHwKfCB8XyAvIF9gIHwgfCB8IHwgfCB8ICdf"; - printf "Xy8gXyBcIHwKfCAgX3wgKF98IHwgfCB8IHxffCB8IHwgfCAgX18vX3wKfF98"; - printf "ICBcX18sX3xffF98XF9fLF98X3wgIFxfX18oXykKICAgICAgICAgICAgICAg"; - printf "ICAgICAgICAgICAgICAgICAK" - } > "${spcfx}" - fi - printf "\n\n-1 overall\n\n" - fi - - if [[ -f ${spcfx} ]]; then - if which base64 >/dev/null 2>&1; then - base64 --decode "${spcfx}" 2>/dev/null - elif which openssl >/dev/null 2>&1; then - openssl enc -A -d -base64 -in "${spcfx}" 2>/dev/null - fi - echo - echo - rm "${spcfx}" - fi - - seccoladj=$(findlargest 2 "${TP_VOTE_TABLE[@]}") - if [[ ${seccoladj} -lt 10 ]]; then - seccoladj=10 +## @param filename +function bugsystem_linecomments +{ + declare title=$1 + declare fn=$2 + declare line + declare bugs + declare realline + declare text + declare idxline + declare uniline + + if [[ ! -f "${GITUNIDIFFLINES}" ]]; then + return fi - seccoladj=$((seccoladj + 2 )) - i=0 - until [[ $i -eq ${#TP_HEADER[@]} ]]; do - printf "%s\n" "${TP_HEADER[${i}]}" - ((i=i+1)) - done - - printf "| %s | %*s | %s | %s\n" "Vote" ${seccoladj} Subsystem Runtime "Comment" - echo "============================================================================" - i=0 - until [[ $i -eq ${#TP_VOTE_TABLE[@]} ]]; do - ourstring=$(echo "${TP_VOTE_TABLE[${i}]}" | tr -s ' ') - vote=$(echo "${ourstring}" | cut -f2 -d\|) - subs=$(echo "${ourstring}" | cut -f3 -d\|) - ela=$(echo "${ourstring}" | cut -f4 -d\|) - comment=$(echo "${ourstring}" | cut -f5 -d\|) - - echo "${comment}" | fold -s -w $((78-seccoladj-22)) > "${commentfile1}" - normaltop=$(head -1 "${commentfile1}") - ${SED} -e '1d' "${commentfile1}" > "${commentfile2}" - - printf "| %4s | %*s | %-10s |%-s\n" "${vote}" ${seccoladj} \ - "${subs}" "${ela}" "${normaltop}" - while read line; do - printf "| | %*s | | %-s\n" ${seccoladj} " " "${line}" - done < "${commentfile2}" + while read -r line;do + file=$(echo "${line}" | cut -f1 -d:) + realline=$(echo "${line}" | cut -f2 -d:) + text=$(echo "${line}" | cut -f3- -d:) + idxline="${file}:${realline}:" + uniline=$(${GREP} "${idxline}" "${GITUNIDIFFLINES}" | cut -f3 -d: ) - ((i=i+1)) - rm "${commentfile2}" "${commentfile1}" 2>/dev/null - done - - if [[ ${#TP_TEST_TABLE[@]} -gt 0 ]]; then - seccoladj=$(findlargest 1 "${TP_TEST_TABLE[@]}") - printf "\n\n%*s | Tests\n" "${seccoladj}" "Reason" - i=0 - until [[ $i -eq ${#TP_TEST_TABLE[@]} ]]; do - ourstring=$(echo "${TP_TEST_TABLE[${i}]}" | tr -s ' ') - vote=$(echo "${ourstring}" | cut -f2 -d\|) - subs=$(echo "${ourstring}" | cut -f3 -d\|) - printf "%*s | %s\n" "${seccoladj}" "${vote}" "${subs}" - ((i=i+1)) + for bugs in ${BUGSYSTEMS}; do + if declare -f ${bugs}_linecomments >/dev/null;then + "${bugs}_linecomments" "${title}" "${file}" "${realline}" "${uniline}" "${text}" + fi done - fi - - printf "\n\n|| Subsystem || Report/Notes ||\n" - echo "============================================================================" - i=0 - - until [[ $i -eq ${#TP_FOOTER_TABLE[@]} ]]; do - comment=$(echo "${TP_FOOTER_TABLE[${i}]}" | - ${SED} -e "s,@@BASE@@,${PATCH_DIR},g") - printf "%s\n" "${comment}" - ((i=i+1)) - done + done < "${fn}" } ## @description Write the final output to the selected bug system ## @audience private ## @stability evolving ## @replaceable no -function output_to_bugsystem +function bugsystem_finalreport { - "${BUGSYSTEM}_finalreport" "${@}" + declare bugs + + for bugs in ${BUGSYSTEMS}; do + if declare -f ${bugs}_finalreport >/dev/null;then + "${bugs}_finalreport" "${@}" + fi + done } ## @description Clean the filesystem as appropriate and then exit @@ -3235,8 +2783,7 @@ function postcheckout (( RESULT = RESULT + $? )) if [[ ${RESULT} != 0 ]] ; then - output_to_console 1 - output_to_bugsystem 1 + bugsystem_finalreport 1 cleanup_and_exit 1 fi done @@ -3252,8 +2799,7 @@ function postcheckout (( RESULT = RESULT + $? )) if [[ ${RESULT} != 0 ]] ; then - output_to_console 1 - output_to_bugsystem 1 + bugsystem_finalreport 1 cleanup_and_exit 1 fi fi @@ -3309,8 +2855,7 @@ function postapply check_patch_javac retval=$? if [[ ${retval} -gt 1 ]] ; then - output_to_console 1 - output_to_bugsystem 1 + bugsystem_finalreport 1 cleanup_and_exit 1 fi @@ -3345,6 +2890,11 @@ function postinstall local routine local plugin + if declare -f ${BUILDTOOL}_postapply_install >/dev/null; then + "${BUILDTOOL}_postapply_install" + (( RESULT = RESULT + $? )) + fi + verify_patchdir_still_exists for routine in check_patch_javadoc check_mvn_eclipse do @@ -3575,8 +3125,6 @@ find_changed_modules postapply -check_mvninstall - postinstall runtests @@ -3585,6 +3133,5 @@ finish_vote_table finish_footer_table -output_to_console ${RESULT} -output_to_bugsystem ${RESULT} +bugsystem_finalreport ${RESULT} cleanup_and_exit ${RESULT} From d5f22169cac7c6ae35f34665b9377ed1ef4a3c4d Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Fri, 14 Aug 2015 14:51:15 -0700 Subject: [PATCH 129/130] fixes --- dev-support/test-patch.d/ant.sh | 15 +++++++++++++++ dev-support/test-patch.d/gradle.sh | 15 +++++++++++++++ dev-support/test-patch.d/maven.sh | 15 +++++++++++++++ dev-support/test-patch.d/scaladoc.sh | 15 +++++++++++++++ 4 files changed, 60 insertions(+) mode change 100644 => 100755 dev-support/test-patch.d/ant.sh mode change 100644 => 100755 dev-support/test-patch.d/gradle.sh mode change 100644 => 100755 dev-support/test-patch.d/maven.sh mode change 100644 => 100755 dev-support/test-patch.d/scaladoc.sh diff --git a/dev-support/test-patch.d/ant.sh b/dev-support/test-patch.d/ant.sh old mode 100644 new mode 100755 index 98f6c6c54445b..ca5bf01fcf7c6 --- a/dev-support/test-patch.d/ant.sh +++ b/dev-support/test-patch.d/ant.sh @@ -1,3 +1,18 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. if [[ -z "${ANT_HOME:-}" ]]; then ANT=ant diff --git a/dev-support/test-patch.d/gradle.sh b/dev-support/test-patch.d/gradle.sh old mode 100644 new mode 100755 index 366e18a4b281d..06947b4dc085b --- a/dev-support/test-patch.d/gradle.sh +++ b/dev-support/test-patch.d/gradle.sh @@ -1,3 +1,18 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. if [[ -z "${GRADLE:-}" ]]; then GRADLE=gradle diff --git a/dev-support/test-patch.d/maven.sh b/dev-support/test-patch.d/maven.sh old mode 100644 new mode 100755 index 4ea26b6bec35e..952b66503a105 --- a/dev-support/test-patch.d/maven.sh +++ b/dev-support/test-patch.d/maven.sh @@ -1,3 +1,18 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. declare -a MAVEN_ARGS=("--batch-mode") diff --git a/dev-support/test-patch.d/scaladoc.sh b/dev-support/test-patch.d/scaladoc.sh old mode 100644 new mode 100755 index 7eccc89e7b14f..646cfab8a92eb --- a/dev-support/test-patch.d/scaladoc.sh +++ b/dev-support/test-patch.d/scaladoc.sh @@ -1,3 +1,18 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. add_plugin scaladoc From b5f16194e347fddc95588fa9a523d07b0aca0a05 Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Fri, 14 Aug 2015 19:20:37 -0700 Subject: [PATCH 130/130] fixes --- dev-support/test-patch.d/scaladoc.sh | 10 ++++------ dev-support/test-patch.sh | 9 ++++----- 2 files changed, 8 insertions(+), 11 deletions(-) diff --git a/dev-support/test-patch.d/scaladoc.sh b/dev-support/test-patch.d/scaladoc.sh index 646cfab8a92eb..0853c9d4f0edb 100755 --- a/dev-support/test-patch.d/scaladoc.sh +++ b/dev-support/test-patch.d/scaladoc.sh @@ -37,14 +37,13 @@ function scaladoc_preapply { local result=0 - big_console_header "Pre-patch ${PATCH_BRANCH} Javadoc verification" - verify_needed_test scaladoc if [[ $? == 0 ]]; then - echo "Patch does not appear to need scaladoc tests." return 0 fi + big_console_header "Pre-patch ${PATCH_BRANCH} Javadoc verification" + personality_modules branch scaladoc ${BUILDTOOL}_modules_worker branch scaladoc @@ -72,14 +71,13 @@ function scaladoc_postinstall declare -i numbranch=0 declare -i numpatch=0 - big_console_header "Determining number of patched scaladoc warnings" - verify_needed_test scaladoc if [[ $? == 0 ]]; then - echo "Patch does not appear to need scaladoc tests." return 0 fi + big_console_header "Determining number of patched scaladoc warnings" + personality_modules patch scaladoc ${BUILDTOOL}_modules_workers patch scaladoc diff --git a/dev-support/test-patch.sh b/dev-support/test-patch.sh index 2fe33bc840547..bd0a8ea063421 100755 --- a/dev-support/test-patch.sh +++ b/dev-support/test-patch.sh @@ -2465,7 +2465,7 @@ function check_patch_javadoc fi personality_modules patch javadoc - ${BUILDTOOL}_modules_workers patch javadoc + ${BUILDTOOL}_modules_worker patch javadoc i=0 until [[ ${i} -eq ${#MODULE[@]} ]]; do @@ -2572,15 +2572,14 @@ function check_unittests local needlog local unitlogs - big_console_header "Running unit tests" - verify_needed_test unit if [[ $? == 0 ]]; then - echo "Existing unit tests do not test patched files. Skipping." return 0 fi + big_console_header "Running unit tests" + verify_multijdk_test unit if [[ $? == 1 ]]; then multijdkmode=true @@ -2596,7 +2595,7 @@ function check_unittests fi personality_modules patch unit - ${BUILDTOOL}_modules_workers patch unit + ${BUILDTOOL}_modules_worker patch unit ((result=result+$?))