diff --git a/bin/chaos-daemon.sh b/bin/chaos-daemon.sh new file mode 100644 index 000000000000..084e519321a2 --- /dev/null +++ b/bin/chaos-daemon.sh @@ -0,0 +1,140 @@ +#!/usr/bin/env bash +# +#/** +# * Licensed to the Apache Software Foundation (ASF) under one +# * or more contributor license agreements. See the NOTICE file +# * distributed with this work for additional information +# * regarding copyright ownership. The ASF licenses this file +# * to you under the Apache License, Version 2.0 (the +# * "License"); you may not use this file except in compliance +# * with the License. You may obtain a copy of the License at +# * +# * http://www.apache.org/licenses/LICENSE-2.0 +# * +# * Unless required by applicable law or agreed to in writing, software +# * distributed under the License is distributed on an "AS IS" BASIS, +# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# * See the License for the specific language governing permissions and +# * limitations under the License. +# */ +# + +usage="Usage: chaos-daemon.sh (start|stop) chaosagent" + +# if no args specified, show usage +if [ $# -le 1 ]; then + echo "$usage" + exit 1 +fi + +# get arguments +startStop=$1 +shift + +command=$1 +shift + +check_before_start(){ + #ckeck if the process is not running + mkdir -p "$HBASE_PID_DIR" + if [ -f "$CHAOS_PID" ]; then + if kill -0 "$(cat "$CHAOS_PID")" > /dev/null 2>&1; then + echo "$command" running as process "$(cat "$CHAOS_PID")". Stop it first. + exit 1 + fi + fi +} + +bin=`dirname "${BASH_SOURCE-$0}"` +bin=$(cd "$bin">/dev/null || exit; pwd) + +. "$bin"/hbase-config.sh +. "$bin"/hbase-common.sh + +CLASSPATH=$HBASE_CONF_DIR +for f in ../lib/*.jar; do + CLASSPATH=${CLASSPATH}:$f +done + +# get log directory +if [ "$HBASE_LOG_DIR" = "" ]; then + export HBASE_LOG_DIR="$HBASE_HOME/logs" +fi + +if [ "$HBASE_PID_DIR" = "" ]; then + HBASE_PID_DIR=/tmp +fi + +if [ "$HBASE_IDENT_STRING" = "" ]; then + export HBASE_IDENT_STRING="$USER" +fi + +if [ "$JAVA_HOME" != "" ]; then + #echo "run java in $JAVA_HOME" + JAVA_HOME=$JAVA_HOME +fi +if [ "$JAVA_HOME" = "" ]; then + echo "Error: JAVA_HOME is not set." + exit 1 +fi + +export HBASE_LOG_PREFIX=hbase-$HBASE_IDENT_STRING-$command-$HOSTNAME +export CHAOS_LOGFILE=$HBASE_LOG_PREFIX.log + +if [ -z "${HBASE_ROOT_LOGGER}" ]; then +export HBASE_ROOT_LOGGER=${HBASE_ROOT_LOGGER:-"INFO,RFA"} +fi + +if [ -z "${HBASE_SECURITY_LOGGER}" ]; then +export HBASE_SECURITY_LOGGER=${HBASE_SECURITY_LOGGER:-"INFO,RFAS"} +fi + +CHAOS_LOGLOG=${CHAOS_LOGLOG:-"${HBASE_LOG_DIR}/${CHAOS_LOGFILE}"} +CHAOS_PID=$HBASE_PID_DIR/hbase-$HBASE_IDENT_STRING-$command.pid + +if [ -z "$CHAOS_JAVA_OPTS" ]; then + CHAOS_JAVA_OPTS="-Xms1024m -Xmx4096m" +fi + +case $startStop in + +(start) + check_before_start + echo running $command + CMD="${JAVA_HOME}/bin/java -Dapp.home=${HBASE_CONF_DIR}/../ ${CHAOS_JAVA_OPTS} -cp ${CLASSPATH} org.apache.hadoop.hbase.chaos.ChaosService -$command start &>> ${CHAOS_LOGLOG} &" + + eval $CMD + PID=$(echo $!) + echo ${PID} >${CHAOS_PID} + + echo "Chaos ${1} process Started with ${PID} !" + now=$(date) + echo "${now} Chaos ${1} process Started with ${PID} !" >>${CHAOS_LOGLOG} + ;; + +(stop) + echo stopping $command + if [ -f $CHAOS_PID ]; then + pidToKill=`cat $CHAOS_PID` + # kill -0 == see if the PID exists + if kill -0 $pidToKill > /dev/null 2>&1; then + echo -n stopping $command + echo "`date` Terminating $command" >> $CHAOS_LOGLOG + kill $pidToKill > /dev/null 2>&1 + waitForProcessEnd $pidToKill $command + else + retval=$? + echo no $command to stop because kill -0 of pid $pidToKill failed with status $retval + fi + else + echo no $command to stop because no pid file $CHAOS_PID + fi + rm -f $CHAOS_PID + ;; + +(*) + echo $usage + exit 1 + ;; + +esac diff --git a/bin/hbase b/bin/hbase index 127fa3c7fdd8..d2307c50c33a 100755 --- a/bin/hbase +++ b/bin/hbase @@ -258,7 +258,7 @@ if [ "${INTERNAL_CLASSPATH}" != "true" ]; then # If command needs our shaded mapreduce, use it # N.B "mapredcp" is not included here because in the shaded case it skips our built classpath - declare -a commands_in_mr_jar=("hbck" "snapshot" "canary" "regionsplitter" "pre-upgrade") + declare -a commands_in_mr_jar=("hbck" "snapshot" "regionsplitter" "pre-upgrade") for c in "${commands_in_mr_jar[@]}"; do if [ "${COMMAND}" = "${c}" ]; then # If we didn't find a jar above, this will just be blank and the @@ -509,13 +509,22 @@ fi # figure out which class to run if [ "$COMMAND" = "shell" ] ; then #find the hbase ruby sources + # assume we are in a binary install if lib/ruby exists if [ -d "$HBASE_HOME/lib/ruby" ]; then - HBASE_OPTS="$HBASE_OPTS -Dhbase.ruby.sources=$HBASE_HOME/lib/ruby" + # We want jruby to consume these things rather than our bootstrap script; + # jruby will look for the env variable 'JRUBY_OPTS'. + JRUBY_OPTS="${JRUBY_OPTS} -X+O" + export JRUBY_OPTS + # hbase-shell.jar contains a 'jar-bootstrap.rb' + # for more info see + # https://github.com/jruby/jruby/wiki/StandaloneJarsAndClasses#standalone-executable-jar-files + CLASS="org.jruby.JarBootstrapMain" + # otherwise assume we are running in a source checkout else HBASE_OPTS="$HBASE_OPTS -Dhbase.ruby.sources=$HBASE_HOME/hbase-shell/src/main/ruby" + CLASS="org.jruby.Main -X+O ${JRUBY_OPTS} ${HBASE_HOME}/hbase-shell/src/main/ruby/jar-bootstrap.rb" fi HBASE_OPTS="$HBASE_OPTS $HBASE_SHELL_OPTS" - CLASS="org.jruby.Main -X+O ${JRUBY_OPTS} ${HBASE_HOME}/bin/hirb.rb" elif [ "$COMMAND" = "hbck" ] ; then # Look for the -j /path/to/HBCK2.jar parameter. Else pass through to hbck. case "${1}" in diff --git a/bin/hirb.rb b/bin/hirb.rb index 7b1b8f172c25..12353ca1a0ec 100644 --- a/bin/hirb.rb +++ b/bin/hirb.rb @@ -1,5 +1,3 @@ -# -# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information @@ -15,217 +13,10 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -# -# File passed to org.jruby.Main by bin/hbase. Pollutes jirb with hbase imports -# and hbase commands and then loads jirb. Outputs a banner that tells user -# where to find help, shell version, and loads up a custom hirb. -# -# In noninteractive mode, runs commands from stdin until completion or an error. -# On success will exit with status 0, on any problem will exit non-zero. Callers -# should only rely on "not equal to 0", because the current error exit code of 1 -# will likely be updated to diffentiate e.g. invalid commands, incorrect args, -# permissions, etc. - -# TODO: Interrupt a table creation or a connection to a bad master. Currently -# has to time out. Below we've set down the retries for rpc and hbase but -# still can be annoying (And there seem to be times when we'll retry for -# ever regardless) -# TODO: Add support for listing and manipulating catalog tables, etc. -# TODO: Encoding; need to know how to go from ruby String to UTF-8 bytes - -# Run the java magic include and import basic HBase types that will help ease -# hbase hacking. -include Java - -# Some goodies for hirb. Should these be left up to the user's discretion? -require 'irb/completion' -require 'pathname' - -# Add the directory names in hbase.jruby.sources commandline option -# to the ruby load path so I can load up my HBase ruby modules -sources = java.lang.System.getProperty('hbase.ruby.sources') -$LOAD_PATH.unshift Pathname.new(sources) - -# -# FIXME: Switch args processing to getopt -# -# See if there are args for this shell. If any, read and then strip from ARGV -# so they don't go through to irb. Output shell 'usage' if user types '--help' -cmdline_help = </dev/null; pwd` . "$bin"/hbase-config.sh . "$bin"/hbase-common.sh +show_usage() { + echo "$usage" +} + +if [ "--help" = "$1" ] || [ "-h" = "$1" ]; then + show_usage + exit 0 +fi + # variables needed for stop command if [ "$HBASE_LOG_DIR" = "" ]; then export HBASE_LOG_DIR="$HBASE_HOME/logs" @@ -50,7 +62,7 @@ if [[ -e $pid ]]; then nohup nice -n ${HBASE_NICENESS:-0} "$HBASE_HOME"/bin/hbase \ --config "${HBASE_CONF_DIR}" \ - master stop "$@" > "$logout" 2>&1 < /dev/null & + master stop --shutDownCluster "$@" > "$logout" 2>&1 < /dev/null & waitForProcessEnd `cat $pid` 'stop-master-command' diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile index 5aaefd80ff07..f3de8edffcbe 100644 --- a/dev-support/Jenkinsfile +++ b/dev-support/Jenkinsfile @@ -17,7 +17,7 @@ pipeline { agent { node { - label 'Hadoop' + label 'hbase' } } triggers { @@ -25,13 +25,13 @@ pipeline { } options { buildDiscarder(logRotator(numToKeepStr: '15')) - timeout (time: 9, unit: 'HOURS') + timeout (time: 16, unit: 'HOURS') timestamps() skipDefaultCheckout() disableConcurrentBuilds() } environment { - YETUS_RELEASE = '0.11.1' + YETUS_RELEASE = '0.12.0' // where we'll write everything from different steps. Need a copy here so the final step can check for success/failure. OUTPUT_DIR_RELATIVE_GENERAL = 'output-general' OUTPUT_DIR_RELATIVE_JDK7 = 'output-jdk7' @@ -49,7 +49,7 @@ pipeline { ARCHIVE_PATTERN_LIST = 'TEST-*.xml,org.apache.h*.txt,*.dumpstream,*.dump' // These tests currently have known failures. Once they burn down to 0, remove from here so that new problems will cause a failure. TESTS_FILTER = 'cc,checkstyle,javac,javadoc,pylint,shellcheck,whitespace,perlcritic,ruby-lint,rubocop,mvnsite' - EXCLUDE_TESTS_URL = "${JENKINS_URL}/job/HBase/job/HBase-Find-Flaky-Tests/job/${BRANCH_NAME}/lastSuccessfulBuild/artifact/excludes" + EXCLUDE_TESTS_URL = "${JENKINS_URL}/job/HBase/job/HBase-Find-Flaky-Tests/job/${BRANCH_NAME}/lastSuccessfulBuild/artifact/output/excludes" // TODO does hadoopcheck need to be jdk specific? SHALLOW_CHECKS = 'all,-shadedjars,-unit' // run by the 'yetus general check' DEEP_CHECKS = 'compile,htmlout,javac,maven,mvninstall,shadedjars,unit' // run by 'yetus jdkX (HadoopY) checks' @@ -192,7 +192,7 @@ pipeline { stage ('yetus general check') { agent { node { - label 'Hadoop' + label 'hbase' } } environment { @@ -257,7 +257,7 @@ pipeline { stage ('yetus jdk7 checks') { agent { node { - label 'Hadoop' + label 'hbase' } } when { @@ -338,7 +338,7 @@ pipeline { stage ('yetus jdk8 hadoop2 checks') { agent { node { - label 'Hadoop' + label 'hbase' } } when { @@ -419,7 +419,7 @@ pipeline { stage ('yetus jdk8 hadoop3 checks') { agent { node { - label 'Hadoop' + label 'hbase' } } when { @@ -504,7 +504,7 @@ pipeline { stage ('yetus jdk11 hadoop3 checks') { agent { node { - label 'Hadoop' + label 'hbase' } } when { @@ -593,9 +593,9 @@ pipeline { // TODO (HBASE-23870): replace this with invocation of the release tool stage ('packaging and integration') { tools { - maven 'Maven (latest)' + maven 'maven_latest' // this needs to be set to the jdk that ought to be used to build releases on the branch the Jenkinsfile is stored in. - jdk "JDK 1.8 (latest)" + jdk "jdk_1.8_latest" } environment { BASEDIR = "${env.WORKSPACE}/component" diff --git a/dev-support/Jenkinsfile_GitHub b/dev-support/Jenkinsfile_GitHub index d314ba45cd9c..a725f1dbce4f 100644 --- a/dev-support/Jenkinsfile_GitHub +++ b/dev-support/Jenkinsfile_GitHub @@ -37,14 +37,14 @@ pipeline { DOCKERFILE_REL = "${SRC_REL}/dev-support/docker/Dockerfile" YETUS_DRIVER_REL = "${SRC_REL}/dev-support/jenkins_precommit_github_yetus.sh" // Branch or tag name. Yetus release tags are 'rel/X.Y.Z' - YETUS_VERSION = 'rel/0.11.1' - GENERAL_CHECK_PLUGINS = 'all,-compile,-javac,-javadoc,-jira,-shadedjars,-unit' + YETUS_VERSION = 'rel/0.12.0' + GENERAL_CHECK_PLUGINS = 'all,-javadoc,-jira,-shadedjars,-unit' JDK_SPECIFIC_PLUGINS = 'compile,github,htmlout,javac,javadoc,maven,mvninstall,shadedjars,unit' // output from surefire; sadly the archive function in yetus only works on file names. ARCHIVE_PATTERN_LIST = 'TEST-*.xml,org.apache.h*.txt,*.dumpstream,*.dump' // These tests currently have known failures. Once they burn down to 0, remove from here so that new problems will cause a failure. TESTS_FILTER = 'cc,checkstyle,javac,javadoc,pylint,shellcheck,whitespace,perlcritic,ruby-lint,rubocop,mvnsite' - EXCLUDE_TESTS_URL = "${JENKINS_URL}/job/HBase/job/HBase-Find-Flaky-Tests/job/${CHANGE_TARGET}/lastSuccessfulBuild/artifact/excludes" + EXCLUDE_TESTS_URL = "${JENKINS_URL}/job/HBase/job/HBase-Find-Flaky-Tests/job/${CHANGE_TARGET}/lastSuccessfulBuild/artifact/output/excludes" // a global view of paths. parallel stages can land on the same host concurrently, so each // stage works in its own subdirectory. there is an "output" under each of these @@ -168,6 +168,7 @@ pipeline { BUILD_URL_ARTIFACTS = "artifact/${WORKDIR_REL}/${PATCH_REL}" DOCKERFILE = "${WORKDIR}/${DOCKERFILE_REL}" YETUS_DRIVER = "${WORKDIR}/${YETUS_DRIVER_REL}" + SKIP_ERRORPRONE = true } steps { dir("${SOURCEDIR}") { @@ -268,6 +269,7 @@ pipeline { BUILD_URL_ARTIFACTS = "artifact/${WORKDIR_REL}/${PATCH_REL}" DOCKERFILE = "${WORKDIR}/${DOCKERFILE_REL}" YETUS_DRIVER = "${WORKDIR}/${YETUS_DRIVER_REL}" + SKIP_ERRORPRONE = true } steps { dir("${SOURCEDIR}") { diff --git a/dev-support/adhoc_run_tests/Jenkinsfile b/dev-support/adhoc_run_tests/Jenkinsfile index e06fdba325a6..476795d50ca8 100644 --- a/dev-support/adhoc_run_tests/Jenkinsfile +++ b/dev-support/adhoc_run_tests/Jenkinsfile @@ -51,10 +51,10 @@ pipeline { stages { stage ('run tests') { tools { - maven 'Maven (latest)' + maven 'maven_latest' // this needs to be set to the jdk that ought to be used to build releases on the branch // the Jenkinsfile is stored in. - jdk "JDK 1.8 (latest)" + jdk "jdk_1.8_latest" } steps { sh """#!/bin/bash -e diff --git a/dev-support/checkcompatibility.py b/dev-support/checkcompatibility.py index b764aaaec17d..d39599aa3ea1 100755 --- a/dev-support/checkcompatibility.py +++ b/dev-support/checkcompatibility.py @@ -229,7 +229,7 @@ def compare_results(tool_results, known_issues, compare_warnings): observed_count=tool_results[check][issue_type]) for check, known_issue_counts in known_issues.items() for issue_type, known_count in known_issue_counts.items() - if tool_results[check][issue_type] > known_count] + if compare_tool_results_count(tool_results, check, issue_type, known_count)] if not compare_warnings: unexpected_issues = [tup for tup in unexpected_issues @@ -241,6 +241,14 @@ def compare_results(tool_results, known_issues, compare_warnings): return bool(unexpected_issues) +def compare_tool_results_count(tool_results, check, issue_type, known_count): + """ Check problem counts are no more than the known count. + (This function exists just so can add in logging; previous was inlined + one-liner but this made it hard debugging) + """ + # logging.info("known_count=%s, check key=%s, tool_results=%s, issue_type=%s", + # str(known_count), str(check), str(tool_results), str(issue_type)) + return tool_results[check][issue_type] > known_count def process_java_acc_output(output): """ Process the output string to find the problems and warnings in both the diff --git a/dev-support/create-release/README.txt b/dev-support/create-release/README.txt index 4a457ddc09ec..42959cd9da19 100644 --- a/dev-support/create-release/README.txt +++ b/dev-support/create-release/README.txt @@ -1,44 +1,67 @@ -Entrance script is _do-release-docker.sh_. Requires a local docker; -for example, on mac os x, Docker for Desktop installed and running. +Creates an HBase release candidate. -For usage, pass '-h': +The scripts in this directory came originally from spark +(https://github.com/apache/spark/tree/master/dev/create-release). They were +then modified to suit the hbase context. These scripts supercede the old +_../make_rc.sh_ script for making release candidates because what is here is +more comprehensive doing more steps of the RM process as well as running in a +container so the RM build environment can be a constant. - $ ./do-release-docker.sh -h +It: -To run a build w/o invoking docker (not recommended!), use _do_release.sh_. + * Tags release + * Sets version to the release version + * Sets version to next SNAPSHOT version. + * Builds, signs, and hashes all artifacts. + * Pushes release tgzs to the dev dir in a apache dist. + * Pushes to repository.apache.org staging. -Both scripts will query interactively for needed parameters and passphrases. -For explanation of the parameters, execute: - $ release-build.sh --help - -Before starting the RC build, run a reconciliation of what is in -JIRA with what is in the commit log. Make sure they align and that -anomalies are explained up in JIRA. +The entry point is the do-release-docker.sh script. It requires a local +docker; for example, on mac os x, a Docker for Desktop installed and running. -See http://hbase.apache.org/book.html#maven.release +(To run a build w/o invoking docker (not recommended!), use _do_release.sh_.) -Regardless of where your release build will run (locally, locally in docker, on a remote machine, -etc) you will need a local gpg-agent with access to your secret keys. A quick way to tell gpg -to clear out state and start a gpg-agent is via the following command phrase: - -$ gpgconf --kill all && gpg-connect-agent /bye +The scripts will query interactively for needed parameters and passphrases. +For explanation of the parameters, execute: -Before starting an RC build, make sure your local gpg-agent has configs -to properly handle your credentials, especially if you want to avoid -typing the passphrase to your secret key. + $ release-build.sh --help -e.g. if you are going to run and step away, best to increase the TTL -on caching the unlocked secret via ~/.gnupg/gpg-agent.conf +The scripts run in dry-run mode by default where only local builds are +performed and nothing is uploaded to the ASF repos. Pass the '-f' flag +to remove dry-run mode. + +Before starting the RC build, run a reconciliation of what is in JIRA with +what is in the commit log. Make sure they align and that anomalies are +explained up in JIRA. See http://hbase.apache.org/book.html#maven.release +for how. + +Regardless of where your release build will run (locally, locally in docker, +on a remote machine, etc) you will need a local gpg-agent with access to your +secret keys. Before starting an RC build, make sure your local gpg-agent has +configs to properly handle your credentials, especially if you want to avoid +typing the passphrase to your secret key: e.g. if you are going to run +and step away (the RC creation takes ~5 hours), best to increase the TTL on +caching the unlocked secret by setting the following into local your +~/.gnupg/gpg-agent.conf file: # in seconds, e.g. a day default-cache-ttl 86400 max-cache-ttl 86400 +A quick way to tell gpg to clear out state, re-read the gpg-agent.conf file +and start a new gpg-agent is via the following command phrase: + + $ gpgconf --kill all && gpg-connect-agent /bye + +You can verify options took hold with '$ gpg --list-options gpg-agent'. + +Similarly, run ssh-agent with your ssh key added if building with docker. + Running a build on GCE is easy enough. Here are some notes if of use. -Create an instance. 4CPU/15G/10G disk seems to work well enough. +Create an instance. 4CPU/15G/20G disk seems to work well enough. Once up, run the below to make your machine fit for RC building: -# Presuming debian-compatible OS, do these steps on the VM -# your VM username should be your ASF id, because it will show up in build artifacts. +# Presuming debian-compatible OS, do these steps on the VM. +# Your VM username should be your ASF id, because it will show up in build artifacts. # Follow the docker install guide: https://docs.docker.com/engine/install/debian/ $ sudo apt-get install -y \ apt-transport-https \ @@ -101,7 +124,3 @@ $ git clone https://github.com/apache/hbase.git $ mkdir ~/build $ cd hbase $ ./dev-support/create-release/do-release-docker.sh -d ~/build - -# for building the main repo specifically you can save an extra download by pointing the build -# to the local clone you just made -$ ./dev-support/create-release/do-release-docker.sh -d ~/build -r .git diff --git a/dev-support/create-release/do-release-docker.sh b/dev-support/create-release/do-release-docker.sh index e863cb373a0c..cda814cfbf1b 100755 --- a/dev-support/create-release/do-release-docker.sh +++ b/dev-support/create-release/do-release-docker.sh @@ -76,7 +76,7 @@ Options: -s [step] runs a single step of the process; valid steps are: tag|publish-dist|publish-release. If none specified, runs tag, then publish-dist, and then publish-release. 'publish-snapshot' is also an allowed, less used, option. - -x debug. do less clean up. (env file, gpg forwarding on mac) + -x debug. Does less clean up (env file, gpg forwarding on mac) EOF exit 1 } @@ -147,7 +147,7 @@ done # We need to import that public key in the container in order to use the private key via the agent. GPG_KEY_FILE="$WORKDIR/gpg.key.public" -echo "Exporting public key for ${GPG_KEY}" +log "Exporting public key for ${GPG_KEY}" fcreate_secure "$GPG_KEY_FILE" $GPG "${GPG_ARGS[@]}" --export "${GPG_KEY}" > "${GPG_KEY_FILE}" @@ -155,10 +155,10 @@ function cleanup { local id banner "Release Cleanup" if is_debug; then - echo "skipping due to debug run" + log "skipping due to debug run" return 0 fi - echo "details in cleanup.log" + log "details in cleanup.log" if [ -f "${ENVFILE}" ]; then rm -f "$ENVFILE" fi @@ -186,7 +186,7 @@ function cleanup { trap cleanup EXIT -echo "Host OS: ${HOST_OS}" +log "Host OS: ${HOST_OS}" if [ "${HOST_OS}" == "DARWIN" ]; then run_silent "Building gpg-agent-proxy image with tag ${IMGTAG}..." "docker-proxy-build.log" \ docker build --build-arg "UID=${UID}" --build-arg "RM_USER=${USER}" \ @@ -198,7 +198,7 @@ run_silent "Building hbase-rm image with tag $IMGTAG..." "docker-build.log" \ --build-arg "RM_USER=${USER}" "$SELF/hbase-rm" banner "Final prep for container launch." -echo "Writing out environment for container." +log "Writing out environment for container." # Write the release information to a file with environment variables to be used when running the # image. ENVFILE="$WORKDIR/env.list" @@ -244,7 +244,7 @@ if [ -n "${GIT_REPO}" ]; then ;; # on the host but normally git wouldn't use the local optimization file://*) - echo "[INFO] converted file:// git repo to a local path, which changes git to assume --local." + log "Converted file:// git repo to a local path, which changes git to assume --local." GIT_REPO_MOUNT=(--mount "type=bind,src=${GIT_REPO#file://},dst=/opt/hbase-repo,consistency=delegated") echo "HOST_GIT_REPO=${GIT_REPO}" >> "${ENVFILE}" GIT_REPO="/opt/hbase-repo" @@ -286,8 +286,8 @@ fi GPG_PROXY_MOUNT=() if [ "${HOST_OS}" == "DARWIN" ]; then GPG_PROXY_MOUNT=(--mount "type=volume,src=gpgagent,dst=/home/${USER}/.gnupg/") - echo "Setting up GPG agent proxy container needed on OS X." - echo " we should clean this up for you. If that fails the container ID is below and in " \ + log "Setting up GPG agent proxy container needed on OS X." + log " we should clean this up for you. If that fails the container ID is below and in " \ "gpg-proxy.cid" #TODO the key pair used should be configurable docker run --rm -p 62222:22 \ @@ -301,8 +301,8 @@ if [ "${HOST_OS}" == "DARWIN" ]; then sort "${HOME}/.ssh/known_hosts" | comm -1 -3 - "${WORKDIR}/gpg-agent-proxy.ssh-keyscan" \ > "${WORKDIR}/gpg-agent-proxy.known_hosts" if [ -s "${WORKDIR}/gpg-agent-proxy.known_hosts" ]; then - echo "Your ssh known_hosts does not include the entries for the gpg-agent proxy container." - echo "The following entry(ies) arre missing:" + log "Your ssh known_hosts does not include the entries for the gpg-agent proxy container." + log "The following entry(ies) are missing:" sed -e 's/^/ /' "${WORKDIR}/gpg-agent-proxy.known_hosts" read -r -p "Okay to add these entries to ${HOME}/.ssh/known_hosts? [y/n] " ANSWER if [ "$ANSWER" != "y" ]; then @@ -310,8 +310,8 @@ if [ "${HOST_OS}" == "DARWIN" ]; then fi cat "${WORKDIR}/gpg-agent-proxy.known_hosts" >> "${HOME}/.ssh/known_hosts" fi - echo "Launching ssh reverse tunnel from the container to gpg agent." - echo " we should clean this up for you. If that fails the PID is in gpg-proxy.ssh.pid" + log "Launching ssh reverse tunnel from the container to gpg agent." + log " we should clean this up for you. If that fails the PID is in gpg-proxy.ssh.pid" ssh -p 62222 -R "/home/${USER}/.gnupg/S.gpg-agent:$(gpgconf --list-dir agent-extra-socket)" \ -i "${HOME}/.ssh/id_rsa" -N -n localhost >gpg-proxy.ssh.log 2>&1 & echo $! > "${WORKDIR}/gpg-proxy.ssh.pid" @@ -326,10 +326,10 @@ else fi banner "Building $RELEASE_TAG; output will be at $WORKDIR/output" -echo "We should clean the container up when we are done. If that fails then the container ID " \ +log "We should clean the container up when we are done. If that fails then the container ID " \ "is in release.cid" echo -# Where possible we specifcy "consistency=delegated" when we do not need host access during the +# Where possible we specify "consistency=delegated" when we do not need host access during the # build run. On Mac OS X specifically this gets us a big perf improvement. cmd=(docker run --rm -ti \ --env-file "$ENVFILE" \ diff --git a/dev-support/create-release/do-release.sh b/dev-support/create-release/do-release.sh index ebab9335cc27..904d813fc3c6 100755 --- a/dev-support/create-release/do-release.sh +++ b/dev-support/create-release/do-release.sh @@ -17,6 +17,10 @@ # limitations under the License. # +# Make a tmp dir into which we put files cleaned-up on exit. +TMPDIR=$(mktemp -d) +trap "rm -rf $TMPDIR" EXIT + set -e # Use the adjacent do-release-docker.sh instead, if you can. # Otherwise, this runs core of the release creation. @@ -86,17 +90,19 @@ else get_release_info fi -GPG_TTY="$(tty)" -export GPG_TTY -echo "Testing gpg signing." -echo "foo" > gpg_test.txt -if ! "${GPG}" "${GPG_ARGS[@]}" --detach --armor --sign gpg_test.txt ; then +# Check GPG +gpg_test_file="${TMPDIR}/gpg_test.$$.txt" +echo "Testing gpg signing ${GPG} ${GPG_ARGS[@]} --detach --armor --sign ${gpg_test_file}" +echo "foo" > "${gpg_test_file}" +if ! "${GPG}" "${GPG_ARGS[@]}" --detach --armor --sign "${gpg_test_file}" ; then gpg_agent_help fi # In --batch mode we have to be explicit about what we are verifying -if ! "${GPG}" "${GPG_ARGS[@]}" --verify gpg_test.txt.asc gpg_test.txt ; then +if ! "${GPG}" "${GPG_ARGS[@]}" --verify "${gpg_test_file}.asc" "${gpg_test_file}" ; then gpg_agent_help fi +GPG_TTY="$(tty)" +export GPG_TTY if [[ -z "$RELEASE_STEP" ]]; then # If doing all stages, leave out 'publish-snapshot' @@ -118,9 +124,9 @@ function should_build { if should_build "tag" && [ "$SKIP_TAG" = 0 ]; then if [ -z "${YETUS_HOME}" ] && [ "${RUNNING_IN_DOCKER}" != "1" ]; then - declare local_yetus="/opt/apache-yetus/0.11.1/" + declare local_yetus="/opt/apache-yetus/0.12.0/" if [ "$(get_host_os)" = "DARWIN" ]; then - local_yetus="/usr/local/Cellar/yetus/0.11.1/" + local_yetus="/usr/local/Cellar/yetus/0.12.0/" fi YETUS_HOME="$(read_config "YETUS_HOME not defined. Absolute path to local install of Apache Yetus" "${local_yetus}")" export YETUS_HOME diff --git a/dev-support/create-release/hbase-rm/Dockerfile b/dev-support/create-release/hbase-rm/Dockerfile index 630b8f17332e..c43976f61dd1 100644 --- a/dev-support/create-release/hbase-rm/Dockerfile +++ b/dev-support/create-release/hbase-rm/Dockerfile @@ -21,6 +21,7 @@ # * Java 8 FROM ubuntu:18.04 + # Install extra needed repos and refresh. # # This is all in a single "RUN" command so that if anything changes, "apt update" is run to fetch @@ -33,8 +34,7 @@ RUN DEBIAN_FRONTEND=noninteractive apt-get -qq -y update \ libcurl4-openssl-dev='7.58.0-*' \ libxml2-dev='2.9.4+dfsg1-*' \ lsof='4.89+dfsg-*' \ - maven='3.6.0-*' \ - openjdk-8-jdk='8u252-b09-*' \ + openjdk-8-jdk='8u*' \ python-pip='9.0.1-*' \ subversion='1.9.7-*' \ wget='1.19.4-*' \ @@ -43,10 +43,23 @@ RUN DEBIAN_FRONTEND=noninteractive apt-get -qq -y update \ && update-alternatives --set java /usr/lib/jvm/java-8-openjdk-amd64/jre/bin/java \ && pip install \ python-dateutil==2.8.1 -# Install Apache Yetus -ENV YETUS_VERSION 0.11.1 + SHELL ["/bin/bash", "-o", "pipefail", "-c"] -RUN wget -qO- "https://www.apache.org/dyn/mirrors/mirrors.cgi?action=download&filename=/yetus/${YETUS_VERSION}/apache-yetus-${YETUS_VERSION}-bin.tar.gz" | \ + +# Install mvn 3.6.3. +ARG MAVEN_VERSION=3.6.3 +ARG SHA=c35a1803a6e70a126e80b2b3ae33eed961f83ed74d18fcd16909b2d44d7dada3203f1ffe726c17ef8dcca2dcaa9fca676987befeadc9b9f759967a8cb77181c0 +ARG BASE_URL=https://apache.osuosl.org/maven/maven-3/${MAVEN_VERSION}/binaries +RUN mkdir -p /opt/maven \ + && curl -fsSL -o /tmp/apache-maven.tar.gz ${BASE_URL}/apache-maven-${MAVEN_VERSION}-bin.tar.gz \ + && echo "${SHA} /tmp/apache-maven.tar.gz" | sha512sum -c - \ + && tar -xzf /tmp/apache-maven.tar.gz -C /opt/maven --strip-components=1 \ + && rm -f /tmp/apache-maven.tar.gz \ + && ln -s /opt/maven/bin/mvn /usr/bin/mvn + +# Install Apache Yetus +ENV YETUS_VERSION 0.12.0 +RUN curl -L "https://www.apache.org/dyn/mirrors/mirrors.cgi?action=download&filename=/yetus/${YETUS_VERSION}/apache-yetus-${YETUS_VERSION}-bin.tar.gz" | \ tar xvz -C /opt ENV YETUS_HOME /opt/apache-yetus-${YETUS_VERSION} diff --git a/dev-support/create-release/mac-sshd-gpg-agent/Dockerfile b/dev-support/create-release/mac-sshd-gpg-agent/Dockerfile index a71d867613b1..3d206dc83365 100644 --- a/dev-support/create-release/mac-sshd-gpg-agent/Dockerfile +++ b/dev-support/create-release/mac-sshd-gpg-agent/Dockerfile @@ -83,7 +83,7 @@ FROM ubuntu:18.04 # into the container rather than launching a new docker container. RUN DEBIAN_FRONTEND=noninteractive apt-get -qq -y update \ && DEBIAN_FRONTEND=noninteractive apt-get -qq -y install --no-install-recommends \ - openssh-server=1:7.6p1-4ubuntu0.3 gnupg2=2.2.4-1ubuntu1.2 && mkdir /run/sshd \ + openssh-server=1:7.6p1-4ubuntu0.3 gnupg2=2.2.4-1ubuntu1.3 && mkdir /run/sshd \ && echo "StreamLocalBindUnlink yes" >> /etc/ssh/sshd_config \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* diff --git a/dev-support/create-release/release-build.sh b/dev-support/create-release/release-build.sh index db28f6f08b42..cb13110877f1 100755 --- a/dev-support/create-release/release-build.sh +++ b/dev-support/create-release/release-build.sh @@ -81,7 +81,7 @@ set -e function cleanup { # If REPO was set, then leave things be. Otherwise if we defined a repo clean it out. if [[ -z "${REPO}" ]] && [[ -n "${MAVEN_LOCAL_REPO}" ]]; then - echo "Cleaning up temp repo in '${MAVEN_LOCAL_REPO}'. Set REPO to reuse downloads." >&2 + log "Cleaning up temp repo in '${MAVEN_LOCAL_REPO}'. Set REPO to reuse downloads." >&2 rm -f "${MAVEN_SETTINGS_FILE}" &> /dev/null || true rm -rf "${MAVEN_LOCAL_REPO}" &> /dev/null || true fi @@ -91,6 +91,10 @@ if [ $# -ne 1 ]; then exit_with_usage fi +if [[ "$1" == "-h" ]]; then + exit_with_usage +fi + if [[ "$*" == *"help"* ]]; then exit_with_usage fi @@ -136,19 +140,21 @@ if [[ "$1" == "tag" ]]; then git config user.name "$GIT_NAME" git config user.email "$GIT_EMAIL" + git config user.signingkey "${GPG_KEY}" # Create release version maven_set_version "$RELEASE_VERSION" + find . -name pom.xml -exec git add {} \; git add RELEASENOTES.md CHANGES.md - git commit -a -m "Preparing ${PROJECT} release $RELEASE_TAG; tagging and updates to CHANGES.md and RELEASENOTES.md" - echo "Creating tag $RELEASE_TAG at the head of $GIT_BRANCH" - git tag "$RELEASE_TAG" + git commit -s -m "Preparing ${PROJECT} release $RELEASE_TAG; tagging and updates to CHANGES.md and RELEASENOTES.md" + log "Creating tag $RELEASE_TAG at the head of $GIT_BRANCH" + git tag -s -m "Via create-release" "$RELEASE_TAG" # Create next version maven_set_version "$NEXT_VERSION" - - git commit -a -m "Preparing development version $NEXT_VERSION" + find . -name pom.xml -exec git add {} \; + git commit -s -m "Preparing development version $NEXT_VERSION" if ! is_dry_run; then # Push changes @@ -159,7 +165,7 @@ if [[ "$1" == "tag" ]]; then else cd .. mv "${PROJECT}" "${PROJECT}.tag" - echo "Dry run: Clone with version changes and tag available as ${PROJECT}.tag in the output directory." + log "Dry run: Clone with version changes and tag available as ${PROJECT}.tag in the output directory." fi exit 0 fi @@ -186,7 +192,7 @@ fi cd "${PROJECT}" git checkout "$GIT_REF" git_hash="$(git rev-parse --short HEAD)" -echo "Checked out ${PROJECT} at ${GIT_REF} commit $git_hash" +log "Checked out ${PROJECT} at ${GIT_REF} commit $git_hash" if [ -z "${RELEASE_VERSION}" ]; then RELEASE_VERSION="$(maven_get_version)" @@ -210,7 +216,7 @@ cd .. if [[ "$1" == "publish-dist" ]]; then # Source and binary tarballs - echo "Packaging release source tarballs" + log "Packaging release source tarballs" make_src_release "${PROJECT}" "${RELEASE_VERSION}" # we do not have binary tarballs for hbase-thirdparty @@ -228,7 +234,7 @@ if [[ "$1" == "publish-dist" ]]; then rm -rf "${svn_target:?}/${DEST_DIR_NAME}" mkdir -p "$svn_target/${DEST_DIR_NAME}" - echo "Copying release tarballs" + log "Copying release tarballs" cp "${PROJECT}"-*.tar.* "$svn_target/${DEST_DIR_NAME}/" cp "${PROJECT}/CHANGES.md" "$svn_target/${DEST_DIR_NAME}/" cp "${PROJECT}/RELEASENOTES.md" "$svn_target/${DEST_DIR_NAME}/" @@ -241,6 +247,7 @@ if [[ "$1" == "publish-dist" ]]; then fi shopt -u nocasematch + log "svn add" svn add "$svn_target/${DEST_DIR_NAME}" if ! is_dry_run; then @@ -250,9 +257,10 @@ if [[ "$1" == "publish-dist" ]]; then rm -rf "$svn_target" else mv "$svn_target/${DEST_DIR_NAME}" "${svn_target}_${DEST_DIR_NAME}.dist" - echo "Dry run: svn-managed 'dist' directory with release tarballs, CHANGES.md and RELEASENOTES.md available as $(pwd)/${svn_target}_${DEST_DIR_NAME}.dist" + log "Dry run: svn-managed 'dist' directory with release tarballs, CHANGES.md and RELEASENOTES.md available as $(pwd)/${svn_target}_${DEST_DIR_NAME}.dist" rm -rf "$svn_target" fi + log "svn ci done" exit 0 fi @@ -261,13 +269,13 @@ if [[ "$1" == "publish-snapshot" ]]; then ( cd "${PROJECT}" mvn_log="${BASE_DIR}/mvn_deploy_snapshot.log" - echo "Publishing snapshot to nexus" + log "Publishing snapshot to nexus" maven_deploy snapshot "$mvn_log" if ! is_dry_run; then - echo "Snapshot artifacts successfully published to repo." + log "Snapshot artifacts successfully published to repo." rm "$mvn_log" else - echo "Dry run: Snapshot artifacts successfully built, but not published due to dry run." + log "Dry run: Snapshot artifacts successfully built, but not published due to dry run." fi ) exit $? @@ -277,16 +285,16 @@ if [[ "$1" == "publish-release" ]]; then ( cd "${PROJECT}" mvn_log="${BASE_DIR}/mvn_deploy_release.log" - echo "Staging release in nexus" + log "Staging release in nexus" maven_deploy release "$mvn_log" declare staged_repo_id="dryrun-no-repo" if ! is_dry_run; then staged_repo_id=$(grep -o "Closing staging repository with ID .*" "$mvn_log" \ | sed -e 's/Closing staging repository with ID "\([^"]*\)"./\1/') - echo "Release artifacts successfully published to repo ${staged_repo_id}" + log "Release artifacts successfully published to repo ${staged_repo_id}" rm "$mvn_log" else - echo "Dry run: Release artifacts successfully built, but not published due to dry run." + log "Dry run: Release artifacts successfully built, but not published due to dry run." fi # Dump out email to send. Where we find vote.tmpl depends # on where this script is run from @@ -300,5 +308,5 @@ fi set +x # done with detailed logging cd .. rm -rf "${PROJECT}" -echo "ERROR: expects to be called with 'tag', 'publish-dist', 'publish-release', or 'publish-snapshot'" >&2 +log "ERROR: expects to be called with 'tag', 'publish-dist', 'publish-release', or 'publish-snapshot'" >&2 exit_with_usage diff --git a/dev-support/create-release/release-util.sh b/dev-support/create-release/release-util.sh index 64654bba4b86..d907253dffe6 100755 --- a/dev-support/create-release/release-util.sh +++ b/dev-support/create-release/release-util.sh @@ -16,6 +16,17 @@ # See the License for the specific language governing permissions and # limitations under the License. # + +# Source this file if you want to use any of its utiilty (also useful +# testing the below functions). Do "$ . ./release-util.sh" and then +# you can do stuff like call the CHANGES updating function +# update_releasenotes: +# +# $ update_releasenotes ~/checkouts/hbase.apache.git 2.3.4 +# +# Just make sure any environment variables needed are predefined +# in your context. +# DRY_RUN=${DRY_RUN:-1} #default to dry run DEBUG=${DEBUG:-0} GPG=${GPG:-gpg} @@ -26,10 +37,8 @@ fi # Maven Profiles for publishing snapshots and release to Maven Central and Dist PUBLISH_PROFILES=("-P" "apache-release,release") -set -e - function error { - echo "Error: $*" >&2 + log "Error: $*" >&2 exit 1 } @@ -54,10 +63,14 @@ function parse_version { function banner { local msg="$1" echo "========================" - echo "=== ${msg}" + log "${msg}" echo } +function log { + echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") ${1}" +} + # current number of seconds since epoch function get_ctime { date +"%s" @@ -71,17 +84,17 @@ function run_silent { local -i stop_time banner "${BANNER}" - echo "Command: $*" - echo "Log file: $LOG_FILE" + log "Command: $*" + log "Log file: $LOG_FILE" start_time="$(get_ctime)" if ! "$@" 1>"$LOG_FILE" 2>&1; then - echo "Command FAILED. Check full logs for details." + log "Command FAILED. Check full logs for details." tail "$LOG_FILE" exit 1 fi stop_time="$(get_ctime)" - echo "=== SUCCESS ($((stop_time - start_time)) seconds)" + log "SUCCESS ($((stop_time - start_time)) seconds)" } function fcreate_secure { @@ -147,7 +160,7 @@ function get_release_info { local version version="$(curl -s "$ASF_REPO_WEBUI;a=blob_plain;f=pom.xml;hb=refs/heads/$GIT_BRANCH" | parse_version)" - echo "Current branch VERSION is $version." + log "Current branch VERSION is $version." NEXT_VERSION="$version" RELEASE_VERSION="" @@ -199,7 +212,7 @@ function get_release_info { if git ls-remote --tags "$ASF_REPO" "$RELEASE_TAG" | grep -q "refs/tags/${RELEASE_TAG}$" ; then read -r -p "$RELEASE_TAG already exists. Continue anyway [y/n]? " ANSWER if [ "$ANSWER" != "y" ]; then - echo "Exiting." + log "Exiting." exit 1 fi SKIP_TAG=1 @@ -209,7 +222,7 @@ function get_release_info { GIT_REF="$RELEASE_TAG" if is_dry_run; then - echo "This is a dry run. If tag does not actually exist, please confirm the ref that will be built for testing." + log "This is a dry run. If tag does not actually exist, please confirm the ref that will be built for testing." GIT_REF="$(read_config "GIT_REF" "$GIT_REF")" fi export GIT_REF @@ -252,7 +265,7 @@ EOF read -r -p "Is this info correct [y/n]? " ANSWER if [ "$ANSWER" != "y" ]; then - echo "Exiting." + log "Exiting." exit 1 fi GPG_ARGS=("${GPG_ARGS[@]}" --local-user "${GPG_KEY}") @@ -279,7 +292,7 @@ function is_debug { function check_get_passwords { for env in "$@"; do if [ -z "${!env}" ]; then - echo "The environment variable $env is not set. Please enter the password or passphrase." + log "The environment variable $env is not set. Please enter the password or passphrase." echo # shellcheck disable=SC2229 stty -echo && printf "%s : " "$env" && read -r "$env" && printf '\n' && stty echo @@ -293,7 +306,7 @@ function check_needed_vars { local missing=0 for env in "$@"; do if [ -z "${!env}" ]; then - echo "$env must be set to run this script" + log "$env must be set to run this script" (( missing++ )) else # shellcheck disable=SC2163 @@ -322,7 +335,7 @@ function init_java { error "JAVA_HOME is not set." fi JAVA_VERSION=$("${JAVA_HOME}"/bin/javac -version 2>&1 | cut -d " " -f 2) - echo "java version: $JAVA_VERSION" + log "java version: $JAVA_VERSION" export JAVA_VERSION } @@ -330,7 +343,7 @@ function init_python { if ! [ -x "$(command -v python2)" ]; then error 'python2 needed by yetus. Install or add link? E.g: sudo ln -sf /usr/bin/python2.7 /usr/local/bin/python2' fi - echo "python version: $(python2 --version)" + log "python version: $(python2 --version)" } # Set MVN @@ -357,7 +370,7 @@ function init_yetus { fi # Work around yetus bug by asking test-patch for the version instead of rdm. YETUS_VERSION=$("${YETUS_HOME}/bin/test-patch" --version) - echo "Apache Yetus version ${YETUS_VERSION}" + log "Apache Yetus version ${YETUS_VERSION}" } function configure_maven { @@ -409,7 +422,7 @@ function git_clone_overwrite { if [[ -z "${GIT_REPO}" ]]; then asf_repo="gitbox.apache.org/repos/asf/${PROJECT}.git" - echo "[INFO] clone will be of the gitbox repo for ${PROJECT}." + log "Clone will be of the gitbox repo for ${PROJECT}." if [ -n "${ASF_USERNAME}" ] && [ -n "${ASF_PASSWORD}" ]; then # Ugly! encoded_username=$(python -c "import urllib; print urllib.quote('''$ASF_USERNAME''', '')") @@ -419,7 +432,7 @@ function git_clone_overwrite { GIT_REPO="https://${asf_repo}" fi else - echo "[INFO] clone will be of provided git repo." + log "Clone will be of provided git repo." fi # N.B. we use the shared flag because the clone is short lived and if a local repo repo was # given this will let us refer to objects there directly instead of hardlinks or copying. @@ -440,7 +453,7 @@ function start_step { if [ -z "${name}" ]; then name="${FUNCNAME[1]}" fi - echo "$(date -u +'%Y-%m-%dT%H:%M:%SZ') ${name} start" >&2 + log "${name} start" >&2 get_ctime } @@ -452,7 +465,7 @@ function stop_step { name="${FUNCNAME[1]}" fi stop_time="$(get_ctime)" - echo "$(date -u +'%Y-%m-%dT%H:%M:%SZ') ${name} stop ($((stop_time - start_time)) seconds)" + log "${name} stop ($((stop_time - start_time)) seconds)" } # Writes report into cwd! @@ -465,8 +478,18 @@ function generate_api_report { local timing_token timing_token="$(start_step)" # Generate api report. + # Filter out some jar types. Filters are tricky. Python regex on + # file basename. Exclude the saved-aside original jars... they are + # not included in resulting artifact. Also, do not include the + # hbase-shaded-testing-util.* jars. This jar is unzip'able on mac + # os x as is because has it a META_INF/LICENSE file and then a + # META_INF/license directory for the included jar's licenses; + # it fails to unjar on mac os x which this tool does making its checks + # (Its exclusion should be fine; it is just an aggregate of other jars). "${project}"/dev-support/checkcompatibility.py --annotation \ org.apache.yetus.audience.InterfaceAudience.Public \ + -e "original-hbase.*.jar" \ + -e "hbase-shaded-testing-util.*.jar" \ "$previous_tag" "$release_tag" previous_version="$(echo "${previous_tag}" | sed -e 's/rel\///')" cp "${project}/target/compat-check/report.html" "./api_compare_${previous_version}_to_${release_tag}.html" @@ -474,6 +497,7 @@ function generate_api_report { } # Look up the Jira name associated with project. +# Returns result on stdout. # Currently all the 'hbase-*' projects share the same HBASE jira name. This works because, # by convention, the HBASE jira "Fix Version" field values have the sub-project name pre-pended, # as in "hbase-operator-tools-1.0.0". @@ -501,10 +525,17 @@ function update_releasenotes { local jira_project local timing_token timing_token="$(start_step)" + changelog="CHANGELOG.${jira_fix_version}.md" + releasenotes="RELEASENOTES.${jira_fix_version}.md" + if [ -f ${changelog} ]; then + rm ${changelog} + fi + if [ -f ${releasenotes} ]; then + rm ${releasenotes} + fi jira_project="$(get_jira_name "$(basename "$project_dir")")" "${YETUS_HOME}/bin/releasedocmaker" -p "${jira_project}" --fileversions -v "${jira_fix_version}" \ - -l --sortorder=newer --skip-credits - pwd + -l --sortorder=newer --skip-credits || true # First clear out the changes written by previous RCs. if [ -f "${project_dir}/CHANGES.md" ]; then sed -i -e \ @@ -517,24 +548,35 @@ function update_releasenotes { "${project_dir}/RELEASENOTES.md" || true fi + # Yetus will not generate CHANGES if no JIRAs fixed against the release version + # (Could happen if a release were bungled such that we had to make a new one + # without changes) + if [ ! -f "${changelog}" ]; then + echo -e "## Release ${jira_fix_version} - Unreleased (as of `date`)\nNo changes\n" > "${changelog}" + fi + if [ ! -f "${releasenotes}" ]; then + echo -e "# hbase ${jira_fix_version} Release Notes\nNo changes\n" > "${releasenotes}" + fi + # The releasedocmaker call above generates RELEASENOTES.X.X.X.md and CHANGELOG.X.X.X.md. if [ -f "${project_dir}/CHANGES.md" ]; then # To insert into project's CHANGES.md...need to cut the top off the # CHANGELOG.X.X.X.md file removing license and first line and then # insert it after the license comment closing where we have a # DO NOT REMOVE marker text! - sed -i -e '/## Release/,$!d' "CHANGELOG.${jira_fix_version}.md" - sed -i -e "/DO NOT REMOVE/r CHANGELOG.${jira_fix_version}.md" "${project_dir}/CHANGES.md" + sed -i -e '/## Release/,$!d' "${changelog}" + sed -i -e '2,${/^# HBASE Changelog/d;}' "${project_dir}/CHANGES.md" + sed -i -e "/DO NOT REMOVE/r ${changelog}" "${project_dir}/CHANGES.md" else - mv "CHANGELOG.${jira_fix_version}.md" "${project_dir}/CHANGES.md" + mv "${changelog}" "${project_dir}/CHANGES.md" fi if [ -f "${project_dir}/RELEASENOTES.md" ]; then # Similar for RELEASENOTES but slightly different. - sed -i -e '/Release Notes/,$!d' "RELEASENOTES.${jira_fix_version}.md" - sed -i -e "/DO NOT REMOVE/r RELEASENOTES.${jira_fix_version}.md" \ - "${project_dir}/RELEASENOTES.md" + sed -i -e '/Release Notes/,$!d' "${releasenotes}" + sed -i -e '2,${/^# RELEASENOTES/d;}' "${project_dir}/RELEASENOTES.md" + sed -i -e "/DO NOT REMOVE/r ${releasenotes}" "${project_dir}/RELEASENOTES.md" else - mv "RELEASENOTES.${jira_fix_version}.md" "${project_dir}/RELEASENOTES.md" + mv "${releasenotes}" "${project_dir}/RELEASENOTES.md" fi stop_step "${timing_token}" } @@ -607,7 +649,7 @@ make_binary_release() { done else cd .. || exit - echo "No ${f_bin_prefix}*-bin.tar.gz product; expected?" + log "No ${f_bin_prefix}*-bin.tar.gz product; expected?" fi stop_step "${timing_token}" @@ -630,7 +672,7 @@ function kick_gpg_agent { # Do maven command to set version into local pom function maven_set_version { #input: local this_version="$1" - echo "${MVN[@]}" versions:set -DnewVersion="$this_version" + log "${MVN[@]}" versions:set -DnewVersion="$this_version" "${MVN[@]}" versions:set -DnewVersion="$this_version" | grep -v "no value" # silence logs } @@ -661,19 +703,18 @@ function maven_deploy { #inputs: fi # Publish ${PROJECT} to Maven repo # shellcheck disable=SC2154 - echo "Publishing ${PROJECT} checkout at '$GIT_REF' ($git_hash)" - echo "Publish version is $RELEASE_VERSION" + log "Publishing ${PROJECT} checkout at '$GIT_REF' ($git_hash)" + log "Publish version is $RELEASE_VERSION" # Coerce the requested version maven_set_version "$RELEASE_VERSION" # Prepare for signing kick_gpg_agent - declare -a mvn_goals=(clean install) + declare -a mvn_goals=(clean) if ! is_dry_run; then mvn_goals=("${mvn_goals[@]}" deploy) fi - echo "${MVN[@]}" -DskipTests -Dcheckstyle.skip=true "${PUBLISH_PROFILES[@]}" \ - "${mvn_goals[@]}" - echo "Logging to ${mvn_log_file}. This will take a while..." + log "${MVN[@]}" -DskipTests -Dcheckstyle.skip=true "${PUBLISH_PROFILES[@]}" "${mvn_goals[@]}" + log "Logging to ${mvn_log_file}. This will take a while..." rm -f "$mvn_log_file" # The tortuous redirect in the next command allows mvn's stdout and stderr to go to mvn_log_file, # while also sending stderr back to the caller. @@ -682,7 +723,7 @@ function maven_deploy { #inputs: "${mvn_goals[@]}" 1>> "$mvn_log_file" 2> >( tee -a "$mvn_log_file" >&2 ); then error "Deploy build failed, for details see log at '$mvn_log_file'." fi - echo "BUILD SUCCESS." + log "BUILD SUCCESS." stop_step "${timing_token}" return 0 } diff --git a/dev-support/design-docs/HBASE-18070-ROOT_hbase_meta_Region_Replicas.pdf b/dev-support/design-docs/HBASE-18070-ROOT_hbase_meta_Region_Replicas.pdf new file mode 100644 index 000000000000..caeafafad753 Binary files /dev/null and b/dev-support/design-docs/HBASE-18070-ROOT_hbase_meta_Region_Replicas.pdf differ diff --git a/dev-support/design-docs/HBASE-24620_New_ClusterManager_And_Agent_Which_Submits_Command_Through_ZooKeeper.pdf b/dev-support/design-docs/HBASE-24620_New_ClusterManager_And_Agent_Which_Submits_Command_Through_ZooKeeper.pdf new file mode 100644 index 000000000000..fe35c04ebbc3 Binary files /dev/null and b/dev-support/design-docs/HBASE-24620_New_ClusterManager_And_Agent_Which_Submits_Command_Through_ZooKeeper.pdf differ diff --git a/dev-support/flaky-tests/flaky-reporting.Jenkinsfile b/dev-support/flaky-tests/flaky-reporting.Jenkinsfile index 8a56c0bdb0cc..25e3fdeef841 100644 --- a/dev-support/flaky-tests/flaky-reporting.Jenkinsfile +++ b/dev-support/flaky-tests/flaky-reporting.Jenkinsfile @@ -17,7 +17,7 @@ pipeline { agent { node { - label 'Hadoop' + label 'hbase' } } triggers { @@ -43,7 +43,8 @@ pipeline { flaky_args=("${flaky_args[@]}" --urls "${JENKINS_URL}/job/HBase/job/HBase%20Nightly/job/${BRANCH_NAME}" --is-yetus True --max-builds 10) flaky_args=("${flaky_args[@]}" --urls "${JENKINS_URL}/job/HBase/job/HBase-Flaky-Tests/job/${BRANCH_NAME}" --is-yetus False --max-builds 30) docker build -t hbase-dev-support dev-support - docker run --ulimit nproc=12500 -v "${WORKSPACE}":/hbase --workdir=/hbase hbase-dev-support python dev-support/flaky-tests/report-flakies.py --mvn -v "${flaky_args[@]}" + docker run --ulimit nproc=12500 -v "${WORKSPACE}":/hbase -u `id -u`:`id -g` --workdir=/hbase hbase-dev-support \ + python dev-support/flaky-tests/report-flakies.py --mvn -v -o output "${flaky_args[@]}" ''' } } @@ -51,13 +52,13 @@ pipeline { post { always { // Has to be relative to WORKSPACE. - archiveArtifacts artifacts: "includes,excludes,dashboard.html" + archiveArtifacts artifacts: "output/*" publishHTML target: [ allowMissing: true, keepAll: true, alwaysLinkToLastBuild: true, // Has to be relative to WORKSPACE - reportDir: ".", + reportDir: "output", reportFiles: 'dashboard.html', reportName: 'Flaky Test Report' ] diff --git a/dev-support/flaky-tests/report-flakies.py b/dev-support/flaky-tests/report-flakies.py index 1b3161af6d83..d29ecfa4da6e 100755 --- a/dev-support/flaky-tests/report-flakies.py +++ b/dev-support/flaky-tests/report-flakies.py @@ -60,6 +60,8 @@ "strings are written to files so they can be saved as artifacts and easily imported in " "other projects. Also writes timeout and failing tests in separate files for " "reference.") +parser.add_argument("-o", "--output", metavar='dir', action='store', required=False, + help="the output directory") parser.add_argument("-v", "--verbose", help="Prints more logs.", action="store_true") args = parser.parse_args() @@ -68,6 +70,11 @@ if args.verbose: logger.setLevel(logging.INFO) +output_dir = '.' +if args.output is not None: + output_dir = args.output + if not os.path.exists(output_dir): + os.makedirs(output_dir) def get_bad_tests(build_url, is_yetus): """ @@ -257,24 +264,24 @@ def expand_multi_config_projects(cli_args): all_bad_tests = all_hanging_tests.union(all_failed_tests) if args.mvn: includes = ",".join(all_bad_tests) - with open("./includes", "w") as inc_file: + with open(output_dir + "/includes", "w") as inc_file: inc_file.write(includes) excludes = ["**/{0}.java".format(bad_test) for bad_test in all_bad_tests] - with open("./excludes", "w") as exc_file: + with open(output_dir + "/excludes", "w") as exc_file: exc_file.write(",".join(excludes)) - with open("./timeout", "w") as timeout_file: + with open(output_dir + "/timeout", "w") as timeout_file: timeout_file.write(",".join(all_timeout_tests)) - with open("./failed", "w") as failed_file: + with open(output_dir + "/failed", "w") as failed_file: failed_file.write(",".join(all_failed_tests)) dev_support_dir = os.path.dirname(os.path.abspath(__file__)) with open(os.path.join(dev_support_dir, "flaky-dashboard-template.html"), "r") as f: template = Template(f.read()) -with open("dashboard.html", "w") as f: +with open(output_dir + "/dashboard.html", "w") as f: datetime = time.strftime("%m/%d/%Y %H:%M:%S") f.write(template.render(datetime=datetime, bad_tests_count=len(all_bad_tests), results=url_to_bad_test_results, build_ids=url_to_build_ids)) diff --git a/dev-support/flaky-tests/run-flaky-tests.Jenkinsfile b/dev-support/flaky-tests/run-flaky-tests.Jenkinsfile index e043feeb342d..0ba200ba07f2 100644 --- a/dev-support/flaky-tests/run-flaky-tests.Jenkinsfile +++ b/dev-support/flaky-tests/run-flaky-tests.Jenkinsfile @@ -17,11 +17,11 @@ pipeline { agent { node { - label 'Hadoop' + label 'hbase' } } triggers { - cron('H */12 * * *') // Every four hours. See https://jenkins.io/doc/book/pipeline/syntax/#cron-syntax + cron('H H/4 * * *') // Every four hours. See https://jenkins.io/doc/book/pipeline/syntax/#cron-syntax } options { // this should roughly match how long we tell the flaky dashboard to look at @@ -34,8 +34,8 @@ pipeline { } tools { // this should match what the yetus nightly job for the branch will use - maven 'Maven (latest)' - jdk "JDK 1.8 (latest)" + maven 'maven_latest' + jdk "jdk_1.8_latest" } stages { stage ('run flaky tests') { @@ -49,7 +49,7 @@ pipeline { mvn_args=("${mvn_args[@]}" -X) set -x fi - curl "${curl_args[@]}" -o includes.txt "${JENKINS_URL}/job/HBase/job/HBase-Find-Flaky-Tests/job/${BRANCH_NAME}/lastSuccessfulBuild/artifact/includes" + curl "${curl_args[@]}" -o includes.txt "${JENKINS_URL}/job/HBase/job/HBase-Find-Flaky-Tests/job/${BRANCH_NAME}/lastSuccessfulBuild/artifact/output/includes" if [ -s includes.txt ]; then rm -rf local-repository/org/apache/hbase mvn clean "${mvn_args[@]}" diff --git a/dev-support/git-jira-release-audit/README.md b/dev-support/git-jira-release-audit/README.md index 396128ad55df..6ea575e16fd3 100644 --- a/dev-support/git-jira-release-audit/README.md +++ b/dev-support/git-jira-release-audit/README.md @@ -62,6 +62,7 @@ usage: git_jira_release_audit.py [-h] [--populate-from-git POPULATE_FROM_GIT] [--release-line-regexp RELEASE_LINE_REGEXP] [--parse-release-tags PARSE_RELEASE_TAGS] [--fallback-actions-path FALLBACK_ACTIONS_PATH] + [--branch-filter-regexp BRANCH_FILTER_REGEXP] [--jira-url JIRA_URL] --branch-1-fix-version BRANCH_1_FIX_VERSION --branch-2-fix-version BRANCH_2_FIX_VERSION @@ -119,6 +120,9 @@ Interactions with the Git repo: --fallback-actions-path FALLBACK_ACTIONS_PATH Path to a file containing _DB.Actions applicable to specific git shas. (default: fallback_actions.csv) + --branch-filter-regexp BRANCH_FILTER_REGEXP + Limit repo parsing to branch names that match this + filter expression. (default: .*) --branch-1-fix-version BRANCH_1_FIX_VERSION The Jira fixVersion used to indicate an issue is committed to the specified release line branch @@ -175,8 +179,9 @@ fetch from Jira 100%|███████████████████ Optionally, the database can be build to include release tags, by specifying `--parse-release-tags=true`. This is more time-consuming, but is necessary for -auditing discrepancies between git and Jira. Running the same command but -including this flag looks like this: +auditing discrepancies between git and Jira. Optionally, limit the branches +under consideration by specifying a regex filter with `--branch-filter-regexp`. +Running the same command but including this flag looks like this: ```shell script origin/branch-1 100%|███████████████████████████████████████| 4084/4084 [08:58<00:00, 7.59 commit/s] diff --git a/dev-support/git-jira-release-audit/fallback_actions.csv b/dev-support/git-jira-release-audit/fallback_actions.csv index eb6c97c1c567..72bdf8f419b2 100644 --- a/dev-support/git-jira-release-audit/fallback_actions.csv +++ b/dev-support/git-jira-release-audit/fallback_actions.csv @@ -22,12 +22,16 @@ hexsha,action,jira_id 0057cd8ca7ff09ed6b794af71df301c5c47487f4,SKIP, 022f30ce0dd3dd931f6045c6778e194ef5c41f7a,SKIP, +048cee6e47022194a1c2bf84cdb9e2873c7f74dd,SKIP, 0505072c5182841ad1a28d798527c69bcc3348f0,SKIP, +057d83cfafd8d659576869f1e71e3e75029fbad3,SKIP, 05cb051423953b913156e4950b67f3d9b28ada5f,REVERT,HBASE-14391 05f8e94191ef6a63baadf56d6114d7d0317796f2,SKIP, 0791b878422eadf00b55076338f09bf059f39f0c,SKIP, 07f9f3d38cf4d0d01044ab28d90a50a1a009f6b8,SKIP, +081d65de93587f77c22675497c5f3314bf21ded3,SKIP, 0bff1305134b9c3a0bcad21900f5af68a8aedb4a,SKIP, +0ebc96e0491dde1aed25f7a5f7ba1df5ed2042c5,SKIP, 10f00547627076d79d77cf58dd2deaece2287084,ADD,HBASE-22330 10f3b77748a02a2c11635c33964929c0474e890d,SKIP, 1196e42362312080d3c523c107b5e8fefef9e57e,SKIP, @@ -63,19 +67,23 @@ hexsha,action,jira_id 2e4544a8b00766248c998850f8907511b8bae240,SKIP, 2e63f882c85fb5804aafff5d92503eca60c0820d,SKIP, 2ebd80499473bbac3eac083806211ec03e084db7,SKIP, +30ab9665068ba85ddfabf0d4e21f4da28d24404e,SKIP, 31b9096034e19171989fd5b76313e7e0f1a9a12a,SKIP, 31d37fb904c4fcd77e79f9df8db155c5a3d1d8ed,SKIP, 31fe5dbf6b3a261f2c902d0fd6b82bf6c7ecf954,REVERT,HBASE-19685 31fe5dbf6b3a261f2c902d0fd6b82bf6c7ecf954,SKIP, +34b2b48a6f6c5546f98a6716cfc6f5f001ed2f1d,SKIP, 34e97f9c08d97b38be9a8f7dda6214d7ae9c6ea8,SKIP, 34ecc75d1669e330c78c3e9b832eca0abf57902d,SKIP, 34fe1f5fd762e4ead3b0e2e820c360796939b315,SKIP, +361e81e1f893ae1bc923ef49d38b1832dbc6a253,SKIP, 37d46fcf85da772a06da29d9add8a0652330f6c5,SKIP, 38e2dbc503a7f9ef929ff11b615157f0ee79916c,SKIP, 3966d0fee6c9803cf567ef76d91855a1eaad621d,SKIP, 399b3e9d1bc68c2709565f0a1a719a9a66999564,SKIP, 39a4c56690eeeb2bb5ffaa0f3c8f6759b4fb3fb2,SKIP, 3a11028cdfc6e44576069bed452a0ed10c860db1,SKIP, +3a8b4d67386967b50a42941814801a2874d994eb,SKIP, 3b73ebb7b8975e18c67c24c258fbc061614bb7f2,SKIP, 3c7a349c2eab74a76c06b66df2e2d14ea7681f95,SKIP, 3dcb03947ce9cb1825167784992e689a23847351,ADD,HBASE-18290 @@ -118,6 +126,7 @@ hexsha,action,jira_id 6b54917d520d32d00f5b4e9420e0d4894aaa34e8,SKIP, 6cf647a0dfd696580c1d841e245d563beca451dd,SKIP, 6e376b900e125f71a71fd2a25c3ad08057b97f73,SKIP, +6f36c79c2fd0aadb204aed5a8f2459edfe153907,SKIP, 719993e0fe2b132b75a3689267ae4adff364b6aa,SKIP, 71ed7033675149956de855b6782e1e22fc908dc8,SKIP, 7242650afd466df511ba2d4cfa34f6d082cb1004,SKIP, @@ -137,10 +146,12 @@ hexsha,action,jira_id 7ea18e20680e86c200cbebc885ff91cfc1f72fac,SKIP, 80971f83d307ab661d830f1a2196729411873906,SKIP, 80d1f62cf7eaaeea569fe5a2e4a91fc270e7bc1f,SKIP, +825bdfb30413f205306debc14b120f1d33b52cc1,REVERT,HBASE-24713 829e6383d52e7a98947a4b2bdaa0b7e756bc6bfc,SKIP, 834488d435fb59d5cb2b0ed7f09b8b1e70d7e327,SKIP, 86242e1f55da7df6a2119389897d11356e6bbc2a,SKIP, 8670fb3339accf149d098552f523e9c14b90c941,SKIP, +87ce2cff979df88eed3ac2e530068fe2506a6fb6,SKIP, 880c7c35fc50f28ec3e072a4c62a348fc964e9e0,SKIP, 88ff206c57fac513b3c5442fd4369ced416279da,SKIP, 8aa1214a1722ba491d52cbbfab1b39cbd0eddeea,SKIP, @@ -149,6 +160,7 @@ hexsha,action,jira_id 8ef87ce4343e80321fcfd99594372759557c90f2,SKIP, 9213d7194ede5b723bc817a9bb634679ee3ce5c1,SKIP, 930f68c0b976a600066b838283a0f3dce050256f,SKIP, +94a03d7ae2ba2986fd359720704b88808d50f623,ADD,HBASE-24713 962d7e9bf06f4e2e569ba34acae6203b4deef778,ADD,HBASE-19074 97d7b3572cc661a8d31f82b9c567d7a75b9eef95,SKIP, 99e18fed23a2a476514fa4bd500b07a8d913e330,SKIP, @@ -163,7 +175,9 @@ a05cef75c4b33171ab29d89d0fbb0fbbc11d6d39,SKIP, a312705dbc8e6d604adcc874526294c72b8ff580,SKIP, a67481209f5d315f06e3a6910fa44493e398210f,REVERT,HBASE-16840 a72d40694116d84454f480c961c1cc1f5d7e1deb,SKIP, +a77829d5b7d627e904d13b9ffce41044b56d0feb,SKIP, a80799a3bc73513393f764df330704ad688140e8,SKIP, +aa5b28a7797564e021dd57626bebe911ad5da727,SKIP, aa8a9997792b686a606e8ada2cd34fb9ad895bc0,SKIP, aaeb488f43a9e79655275ddb481ba970b49d1173,SKIP, ac9035db199902533c07d80f384ae29c115d3ad5,SKIP, @@ -171,11 +185,17 @@ ad2064d8a5ff57d021852c3210a30c5f58eaa43c,SKIP, ad885a0baae21b943ffebef168c65650f8317023,SKIP, adec117e47a2ca503458954d6877667d877890fd,SKIP, ae95b1f215a120890de5454739651911749057ca,SKIP, +af1fa22e4dc824f8cb73ed682ee7c94fbae7a1c8,SKIP, +b0863c5832024033bc13efa3edb7c57b3b753996,SKIP, +b0863c5832024033bc13efa3edb7c57b3b753996,SKIP, b182030d48dcc89d8c26b98f2a58d7909957ea49,SKIP, +b33c200a28d6f26e68e3e2e651b7da463f030dc2,SKIP, b3d55441b8174c704ada4585603f6bcfca298843,SKIP, +b44cf90220ad58ab21852e451e505d4342ca022d,SKIP, b65231d04dbc565a578ce928e809aa51f5439857,SKIP, b6549007b313e8f3aa993d5c1ebd29c84ccb7b7b,SKIP, b6d4fc955fe0fc41f5225f1cc2e3e4b92029251c,SKIP, +b78f4367f710a8cb2b3df37ba158604e530301dc,SKIP, b9c676cdc048c52f927cfa906fd18ff412e4ca20,SKIP, b9f5c6b065ebd572193c1fdc9d38557320b42fe6,SKIP, bcadcef21048e4764f7ae8dec3ce52884f20c02c,SKIP, @@ -184,7 +204,9 @@ bd2c03dc7df600fe481ba7f2fed958deb18f5291,SKIP, bd4e14db07ea32a45c3ef734e06d195a405da67c,SKIP, bd4eba2b53b7af738fd9584511d737c4393d0855,SKIP, bef0616ef33306afca3060b96c2cba5f9762035d,SKIP, +c03ec837e70ebf014aabd8610d5fe4d53b239efa,SKIP, c100fb835a54be6002fe9704349e726f27b15b7a,SKIP, +c40b4781e4ae49308d5ac037364772de75f4f4e2,SKIP, c5e0a1397b3c6a14612e4c5b66f995c02de4310b,SKIP, c71da858ada94e1b93065f0b7caf3558942bc4da,SKIP, c89cfd3406823cf05fa83464c5ddee16bf0d473f,ADD,HBASE-17248 @@ -201,6 +223,7 @@ ce6a6014daded424d9460f7de4eadae169f52683,SKIP, cf1ccc30909bfb04326415e5a648605759d57360,SKIP, cf45c8d30a4d9810cd676b2a1a348141c4e27eeb,SKIP, d14e335edc9c22c30827bc75e73b5303ca64ee0d,SKIP, +d2c1886bf4df5746c05af7bc9b82715ead0b9d8e,ADD,HBASE-25450 d32230d0b5a4706b625cc7ac7ee7d28f44bd7b85,SKIP, d524768528cd15151ba1ebb82e32609da5308128,SKIP, d5a1b276270a1d41f21badd5b85d9502f8f9f415,SKIP, @@ -208,6 +231,7 @@ d6e85b0511396b3221cc7f495eaee5bbacc42afd,SKIP, d91908b0d46156fa364ba11d476b9cdbc01d0411,SKIP, da619282469c65dcf6bee06783c4246a24a1517c,SKIP, da8bcabb99ee5a9a35efd114aa45292616ca3c70,SKIP, +db7ad07081343df040b7d41b8881155257a02db5,SKIP, dfb1af48927a66aa5baa5b182e84327770b3c6c9,SKIP, e075492b4dac5c347b7f6b2e5318e2967b95b18b,SKIP, e08277ac8fe466bf63f6fc342256ab7b8d41243a,SKIP, @@ -218,11 +242,14 @@ e40fcee6b54712b76d702af6937c3320c60df2b9,SKIP, e501fe1a296be8fec0890e7e15414683aa3d933b,SKIP, e5349d589c000e395e12340e003aa9e2153afea6,SKIP, e5fb8214b2bfd6396539a4e8b6cf5f3cc5e9c06f,REVERT,HBASE-21874 +e67d7516ec4b4be0f0d9258af9f8c714b0bb7c58,SKIP, e869a20123afe326e198d35d110f5c0360ea244f,SKIP, e8e45ef8f2fb91a870399636b492d5cee58a4c39,SKIP, e92a147e1961366e36a39577816994566e1e21c5,SKIP, eacf3cb29641af1a68978d9bd7654f643a3aa3a1,SKIP, ec251bdd3649de7f30ece914c7930498e642527e,SKIP, +ec39d59161790d70e0b850b90dbd4101c5b6f895,SKIP, +ec39d59161790d70e0b850b90dbd4101c5b6f895,SKIP, ec39dc8c149b9f89a91596d57d27de812973f0a9,SKIP, ed520133d6dbb47a40f1883a56460582732f863a,SKIP, ed62e08786273587378b86278fae452dfc817dfb,SKIP, @@ -232,11 +259,15 @@ f0b1c4279eaf09d255336d1de9c2bc2b5d726e70,SKIP, f4acc47e2debb3d3d87c05436d940ef2fdfe0be3,SKIP, f6095adea64912deaebfaf2a6a5881b820d315b2,SKIP, f61f02b2b24af39545cc2754cfbc25122da60651,SKIP, +f66c80b6a655a6a39cdaba1af50918abcefff303,SKIP, f6d6bf59faa2a4a0767480af7658e4a844fd186f,SKIP, +f7bc7be1eb1ae7cd8ab09754845480e32a509384,SKIP, fab0b2e60385fca20021f74335a9c3d36368f621,SKIP, fb9be046aefb2e0b6e832dd00bc44a38ee62ab1f,SKIP, fc2ef413fab50d4375318fbd667051fd02f085f2,SKIP, fd5c5fb3887914183a1510f5972e50d9365e02f5,SKIP, +fd7beffcf92e1f435f4fd4aafb98057f067e9de4,SKIP, fe84833ea22c30b68022203132706ebb1e526852,SKIP, fe9e7483a316df9f5a62e9c215bcedcfd65c5f12,SKIP, ffcd4d424f69b4ecac1bd9f5980c14bb4b61a3fa,ADD,HBASE-13796 +ffeed7c6648391f02fd97d1da1fe4d210398437e,SKIP, diff --git a/dev-support/git-jira-release-audit/git_jira_release_audit.py b/dev-support/git-jira-release-audit/git_jira_release_audit.py index db2788d081d0..f8066c44e8f5 100644 --- a/dev-support/git-jira-release-audit/git_jira_release_audit.py +++ b/dev-support/git-jira-release-audit/git_jira_release_audit.py @@ -122,16 +122,21 @@ def flush_commits(self): """Commit any pending changes to the database.""" self.conn.commit() - def apply_git_tag(self, branch, git_sha, git_tag): + def apply_git_tag(self, branch, git_tag, git_shas): """Annotate a commit in the commits database as being a part of the specified release. Args: branch (str): The name of the git branch from which the commit originates. - git_sha (str): The commit's SHA. git_tag (str): The first release tag following the commit. + git_shas: The commits' SHAs. """ - self.conn.execute("UPDATE git_commits SET git_tag = ? WHERE branch = ? AND git_sha = ?", - (git_tag, branch, git_sha)) + self.conn.execute( + ( + f"UPDATE git_commits SET git_tag = ?" + f" WHERE branch = ?" + f" AND git_sha in ({','.join('?' for _ in git_shas)})" + ), + [git_tag, branch] + git_shas) def apply_fix_version(self, jira_id, fix_version): """Annotate a Jira issue in the jira database as being part of the specified release @@ -199,13 +204,14 @@ class _RepoReader: _identify_amend_jira_id_pattern = re.compile(r'^amend (.+)', re.IGNORECASE) def __init__(self, db, fallback_actions_path, remote_name, development_branch, - release_line_regexp, parse_release_tags, **_kwargs): + release_line_regexp, branch_filter_regexp, parse_release_tags, **_kwargs): self._db = db self._repo = _RepoReader._open_repo() self._fallback_actions = _RepoReader._load_fallback_actions(fallback_actions_path) self._remote_name = remote_name self._development_branch = development_branch self._release_line_regexp = release_line_regexp + self._branch_filter_regexp = branch_filter_regexp self._parse_release_tags = parse_release_tags @property @@ -326,12 +332,7 @@ def _extract_release_tag(self, commit): return None def _set_release_tag(self, branch, tag, shas): - cnt = 0 - for sha in shas: - self._db.apply_git_tag(branch, sha, tag) - cnt += 1 - if cnt % 50 == 0: - self._db.flush_commits() + self._db.apply_git_tag(branch, tag, shas) self._db.flush_commits() def _resolve_ambiguity(self, commit): @@ -364,6 +365,10 @@ def populate_db_release_branch(self, origin_commit, release_branch): release_branch (str): The name of the ref whose history is to be parsed. """ global MANAGER + branch_filter_pattern = re.compile('%s/%s' % (self._remote_name, self._branch_filter_regexp)) + if not branch_filter_pattern.match(release_branch): + return + commits = list(self._repo.iter_commits( "%s...%s" % (origin_commit.hexsha, release_branch), reverse=True)) LOG.info("%s has %d commits since its origin at %s.", release_branch, len(commits), @@ -638,6 +643,10 @@ def _build_first_pass_parser(): '--fallback-actions-path', help='Path to a file containing _DB.Actions applicable to specific git shas.', default='fallback_actions.csv') + git_repo_group.add_argument( + '--branch-filter-regexp', + help='Limit repo parsing to branch names that match this filter expression.', + default=r'.*') jira_group = parser.add_argument_group('Interactions with Jira') jira_group.add_argument( '--jira-url', diff --git a/dev-support/hbase-personality.sh b/dev-support/hbase-personality.sh index d7ca64cbb742..b502f77b5c23 100755 --- a/dev-support/hbase-personality.sh +++ b/dev-support/hbase-personality.sh @@ -80,9 +80,9 @@ function personality_globals # TODO use PATCH_BRANCH to select jdk versions to use. # Yetus 0.7.0 enforces limits. Default proclimit is 1000. - # Up it. See HBASE-19902 for how we arrived at this number. + # Up it. See HBASE-25081 for how we arrived at this number. #shellcheck disable=SC2034 - PROC_LIMIT=12500 + PROC_LIMIT=30000 # Set docker container to run with 20g. Default is 4g in yetus. # See HBASE-19902 for how we arrived at 20g. @@ -318,7 +318,7 @@ function get_include_exclude_tests_arg fi else # Use branch specific exclude list when EXCLUDE_TESTS_URL and INCLUDE_TESTS_URL are empty - FLAKY_URL="https://ci-hadoop.apache.org/job/HBase/job/HBase-Find-Flaky-Tests/job/${PATCH_BRANCH}/lastSuccessfulBuild/artifact/excludes/" + FLAKY_URL="https://ci-hadoop.apache.org/job/HBase/job/HBase-Find-Flaky-Tests/job/${PATCH_BRANCH}/lastSuccessfulBuild/artifact/output/excludes" if wget "${FLAKY_URL}" -O "excludes"; then excludes=$(cat excludes) yetus_debug "excludes=${excludes}" @@ -553,14 +553,7 @@ function hadoopcheck_rebuild # All supported Hadoop versions that we want to test the compilation with # See the Hadoop section on prereqs in the HBase Reference Guide - if [[ "${PATCH_BRANCH}" = branch-1.3 ]]; then - yetus_info "Setting Hadoop 2 versions to test based on branch-1.3 rules." - if [[ "${QUICK_HADOOPCHECK}" == "true" ]]; then - hbase_hadoop2_versions="2.4.1 2.5.2 2.6.5 2.7.7" - else - hbase_hadoop2_versions="2.4.0 2.4.1 2.5.0 2.5.1 2.5.2 2.6.1 2.6.2 2.6.3 2.6.4 2.6.5 2.7.1 2.7.2 2.7.3 2.7.4 2.7.5 2.7.6 2.7.7" - fi - elif [[ "${PATCH_BRANCH}" = branch-1.4 ]]; then + if [[ "${PATCH_BRANCH}" = branch-1.4 ]]; then yetus_info "Setting Hadoop 2 versions to test based on branch-1.4 rules." if [[ "${QUICK_HADOOPCHECK}" == "true" ]]; then hbase_hadoop2_versions="2.7.7" @@ -570,9 +563,9 @@ function hadoopcheck_rebuild elif [[ "${PATCH_BRANCH}" = branch-1 ]]; then yetus_info "Setting Hadoop 2 versions to test based on branch-1 rules." if [[ "${QUICK_HADOOPCHECK}" == "true" ]]; then - hbase_hadoop2_versions="2.8.5 2.9.2" + hbase_hadoop2_versions="2.10.0" else - hbase_hadoop2_versions="2.8.5 2.9.2" + hbase_hadoop2_versions="2.10.0" fi elif [[ "${PATCH_BRANCH}" = branch-2.0 ]]; then yetus_info "Setting Hadoop 2 versions to test based on branch-2.0 rules." @@ -616,12 +609,19 @@ function hadoopcheck_rebuild else hbase_hadoop3_versions="3.0.3 3.1.1 3.1.2" fi + elif [[ "${PATCH_BRANCH}" = branch-2.2 ]] || [[ "${PATCH_BRANCH}" = branch-2.3 ]]; then + yetus_info "Setting Hadoop 3 versions to test based on branch-2.2/branch-2.3 rules" + if [[ "${QUICK_HADOOPCHECK}" == "true" ]]; then + hbase_hadoop3_versions="3.1.2 3.2.1 3.3.0" + else + hbase_hadoop3_versions="3.1.1 3.1.2 3.2.0 3.2.1 3.3.0" + fi else - yetus_info "Setting Hadoop 3 versions to test based on branch-2.2+/master/feature branch rules" + yetus_info "Setting Hadoop 3 versions to test based on branch-2.4+/master/feature branch rules" if [[ "${QUICK_HADOOPCHECK}" == "true" ]]; then - hbase_hadoop3_versions="3.1.2 3.2.1" + hbase_hadoop3_versions="3.1.2 3.2.1 3.3.0" else - hbase_hadoop3_versions="3.1.1 3.1.2 3.2.0 3.2.1" + hbase_hadoop3_versions="3.1.1 3.1.2 3.2.0 3.2.1 3.3.0" fi fi diff --git a/dev-support/hbase-vote.sh b/dev-support/hbase-vote.sh index ec9340a0b0e1..11267757b253 100755 --- a/dev-support/hbase-vote.sh +++ b/dev-support/hbase-vote.sh @@ -29,7 +29,7 @@ hbase-vote. A script for standard vote which verifies the following items 4. Built from source 5. Unit tests -Usage: ${SCRIPT} -s | --source [-k | --key ] [-f | --keys-file-url ] [-o | --output-dir ] [-P runSmallTests] +Usage: ${SCRIPT} -s | --source [-k | --key ] [-f | --keys-file-url ] [-o | --output-dir ] [-P runSmallTests] [-D property[=value]] ${SCRIPT} -h | --help -h | --help Show this screen. @@ -37,27 +37,33 @@ Usage: ${SCRIPT} -s | --source [-k | --key ] [-f | --keys-file- e.g. https://dist.apache.org/repos/dist/dev/hbase/hbase-RC0/ -k | --key '' A signature of the public key, e.g. 9AD2AE49 -f | --keys-file-url '' the URL of the key file, default is - http://www.apache.org/dist/hbase/KEYS + https://downloads.apache.org/hbase/KEYS -o | --output-dir '' directory which has the stdout and stderr of each verification target -P | list of maven profiles to activate for test UT/IT, i.e. <-P runSmallTests> Defaults to runAllTests + -D | list of maven properties to set for the mvn invocations, i.e. <-D hadoop.profile=3.0 -D skipTests> Defaults to unset __EOF } +MVN_PROFILES=() +MVN_PROPERTIES=() + while ((${#})); do case "${1}" in -h | --help ) - usage; exit 0 ;; - -s | --source ) - SOURCE_URL="${2}"; shift 2 ;; - -k | --key ) - SIGNING_KEY="${2}"; shift 2 ;; + usage; exit 0 ;; + -s | --source ) + SOURCE_URL="${2}"; shift 2 ;; + -k | --key ) + SIGNING_KEY="${2}"; shift 2 ;; -f | --keys-file-url ) - KEY_FILE_URL="${2}"; shift 2 ;; + KEY_FILE_URL="${2}"; shift 2 ;; -o | --output-dir ) - OUTPUT_DIR="${2}"; shift 2 ;; + OUTPUT_DIR="${2}"; shift 2 ;; -P ) - MVN_ARGS="-P ${2}"; shift 2 ;; - * ) + MVN_PROFILES+=("-P ${2}"); shift 2 ;; + -D ) + MVN_PROPERTIES+=("-D ${2}"); shift 2 ;; + * ) usage >&2; exit 1 ;; esac done @@ -89,8 +95,8 @@ if [ ! -d "${OUTPUT_DIR}" ]; then fi # Maven profile must be provided -if [ -z "${MVN_ARGS}" ]; then - MVN_ARGS="-P runAllTests" +if [ ${#MVN_PROFILES[@]} -eq 0 ]; then + MVN_PROFILES=("-P runAllTests") fi OUTPUT_PATH_PREFIX="${OUTPUT_DIR}"/"${HBASE_RC_VERSION}" @@ -103,7 +109,7 @@ BUILD_FROM_SOURCE_PASSED=0 UNIT_TEST_PASSED=0 function download_and_import_keys() { - KEY_FILE_URL="${KEY_FILE_URL:-https://www.apache.org/dist/hbase/KEYS}" + KEY_FILE_URL="${KEY_FILE_URL:-https://downloads.apache.org/hbase/KEYS}" echo "Obtain and import the publisher key(s) from ${KEY_FILE_URL}" # download the keys file into file KEYS wget -O KEYS "${KEY_FILE_URL}" @@ -142,17 +148,18 @@ function unzip_from_source() { function rat_test() { rm -f "${OUTPUT_PATH_PREFIX}"_rat_test - mvn clean apache-rat:check 2>&1 | tee "${OUTPUT_PATH_PREFIX}"_rat_test && RAT_CHECK_PASSED=1 + mvn clean apache-rat:check "${MVN_PROPERTIES[@]}" 2>&1 | tee "${OUTPUT_PATH_PREFIX}"_rat_test && RAT_CHECK_PASSED=1 } function build_from_source() { rm -f "${OUTPUT_PATH_PREFIX}"_build_from_source - mvn clean install -DskipTests 2>&1 | tee "${OUTPUT_PATH_PREFIX}"_build_from_source && BUILD_FROM_SOURCE_PASSED=1 + # Hardcode skipTests for faster build. Testing is covered later. + mvn clean install "${MVN_PROPERTIES[@]}" -DskipTests 2>&1 | tee "${OUTPUT_PATH_PREFIX}"_build_from_source && BUILD_FROM_SOURCE_PASSED=1 } function run_tests() { rm -f "${OUTPUT_PATH_PREFIX}"_run_tests - mvn package "${MVN_ARGS}" -Dsurefire.rerunFailingTestsCount=3 2>&1 | tee "${OUTPUT_PATH_PREFIX}"_run_tests && UNIT_TEST_PASSED=1 + mvn package "${MVN_PROFILES[@]}" "${MVN_PROPERTIES[@]}" -Dsurefire.rerunFailingTestsCount=3 2>&1 | tee "${OUTPUT_PATH_PREFIX}"_run_tests && UNIT_TEST_PASSED=1 } function execute() { @@ -164,11 +171,11 @@ function print_when_exit() { * Signature: $( ((SIGNATURE_PASSED)) && echo "ok" || echo "failed" ) * Checksum : $( ((CHECKSUM_PASSED)) && echo "ok" || echo "failed" ) * Rat check (${JAVA_VERSION}): $( ((RAT_CHECK_PASSED)) && echo "ok" || echo "failed" ) - - mvn clean apache-rat:check + - mvn clean apache-rat:check ${MVN_PROPERTIES[@]} * Built from source (${JAVA_VERSION}): $( ((BUILD_FROM_SOURCE_PASSED)) && echo "ok" || echo "failed" ) - - mvn clean install -DskipTests + - mvn clean install ${MVN_PROPERTIES[@]} -DskipTests * Unit tests pass (${JAVA_VERSION}): $( ((UNIT_TEST_PASSED)) && echo "ok" || echo "failed" ) - - mvn package ${MVN_ARGS} + - mvn package ${MVN_PROFILES[@]} ${MVN_PROPERTIES[@]} -Dsurefire.rerunFailingTestsCount=3 __EOF if ((CHECKSUM_PASSED)) && ((SIGNATURE_PASSED)) && ((RAT_CHECK_PASSED)) && ((BUILD_FROM_SOURCE_PASSED)) && ((UNIT_TEST_PASSED)) ; then exit 0 diff --git a/dev-support/jenkins-scripts/generate-hbase-website.Jenkinsfile b/dev-support/jenkins-scripts/generate-hbase-website.Jenkinsfile index 76b7d3d4140f..7e8ec44a4e6a 100644 --- a/dev-support/jenkins-scripts/generate-hbase-website.Jenkinsfile +++ b/dev-support/jenkins-scripts/generate-hbase-website.Jenkinsfile @@ -37,9 +37,9 @@ pipeline { stages { stage ('generate hbase website') { tools { - maven 'Maven (latest)' + maven 'maven_latest' // this needs to be set to the jdk that ought to be used to build releases on the branch the Jenkinsfile is stored in. - jdk "JDK 1.8 (latest)" + jdk "jdk_1.8_latest" } steps { dir('hbase') { diff --git a/dev-support/jenkins_precommit_github_yetus.sh b/dev-support/jenkins_precommit_github_yetus.sh index 1c489d6f28bb..5bb2b1b755a4 100755 --- a/dev-support/jenkins_precommit_github_yetus.sh +++ b/dev-support/jenkins_precommit_github_yetus.sh @@ -122,7 +122,10 @@ YETUS_ARGS+=("--whitespace-tabs-ignore-list=.*/generated/.*") YETUS_ARGS+=("--tests-filter=${TESTS_FILTER}") YETUS_ARGS+=("--personality=${SOURCEDIR}/dev-support/hbase-personality.sh") YETUS_ARGS+=("--quick-hadoopcheck") -YETUS_ARGS+=("--skip-errorprone") +if [[ "${SKIP_ERRORPRONE}" = "true" ]]; then + # skip error prone + YETUS_ARGS+=("--skip-errorprone") +fi # effectively treat dev-support as a custom maven module YETUS_ARGS+=("--skip-dirs=dev-support") # For testing with specific hadoop version. Activates corresponding profile in maven runs. diff --git a/dev-support/jenkins_precommit_jira_yetus.sh b/dev-support/jenkins_precommit_jira_yetus.sh deleted file mode 100755 index 9961c3c98cfc..000000000000 --- a/dev-support/jenkins_precommit_jira_yetus.sh +++ /dev/null @@ -1,182 +0,0 @@ -#!/usr/bin/env bash -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -if [[ "true" = "${DEBUG}" ]]; then - set -x - printenv -fi - -##To set jenkins Environment Variables: -export TOOLS_HOME=/home/jenkins/tools -#export JAVA_HOME=${JAVA_HOME_HADOOP_MACHINES_HOME} -export FINDBUGS_HOME=${TOOLS_HOME}/findbugs/latest -export CLOVER_HOME=${TOOLS_HOME}/clover/latest -#export MAVEN_HOME=${MAVEN_3_0_4_HOME} -export MAVEN_HOME=/home/jenkins/tools/maven/apache-maven-3.0.5 - -#export PATH=$PATH:${JAVA_HOME}/bin:${MAVEN_HOME}/bin: -export PATH=$PATH:${MAVEN_HOME}/bin: - -YETUS_RELEASE=0.11.1 -COMPONENT=${WORKSPACE}/component -TEST_FRAMEWORK=${WORKSPACE}/test_framework - -PATCHPROCESS=${WORKSPACE}/patchprocess -if [[ -d ${PATCHPROCESS} ]]; then - echo "[WARN] patch process already existed '${PATCHPROCESS}'" - rm -rf "${PATCHPROCESS}" -fi -mkdir -p "${PATCHPROCESS}" - - -## Checking on H* machine nonsense -echo "JAVA_HOME: ${JAVA_HOME}" -ls -l "${JAVA_HOME}" || true -echo "MAVEN_HOME: ${MAVEN_HOME}" -echo "maven version:" -mvn --offline --version || true -echo "getting machine specs, find in ${BUILD_URL}/artifact/patchprocess/machine/" -mkdir "${PATCHPROCESS}/machine" -cat /proc/cpuinfo >"${PATCHPROCESS}/machine/cpuinfo" 2>&1 || true -cat /proc/meminfo >"${PATCHPROCESS}/machine/meminfo" 2>&1 || true -cat /proc/diskstats >"${PATCHPROCESS}/machine/diskstats" 2>&1 || true -cat /sys/block/sda/stat >"${PATCHPROCESS}/machine/sys-block-sda-stat" 2>&1 || true -df -h >"${PATCHPROCESS}/machine/df-h" 2>&1 || true -ps -Awwf >"${PATCHPROCESS}/machine/ps-Awwf" 2>&1 || true -ifconfig -a >"${PATCHPROCESS}/machine/ifconfig-a" 2>&1 || true -lsblk -ta >"${PATCHPROCESS}/machine/lsblk-ta" 2>&1 || true -lsblk -fa >"${PATCHPROCESS}/machine/lsblk-fa" 2>&1 || true -cat /proc/loadavg >"${PATCHPROCESS}/loadavg" 2>&1 || true -ulimit -a >"${PATCHPROCESS}/machine/ulimit-a" 2>&1 || true - -## /H* - -### Download Yetus -if [[ "true" != "${USE_YETUS_PRERELEASE}" ]]; then - if [ ! -d "${TEST_FRAMEWORK}/yetus-${YETUS_RELEASE}" ]; then - mkdir -p "${TEST_FRAMEWORK}" - cd "${TEST_FRAMEWORK}" || exit 1 - # clear out any cached 'use a prerelease' versions - rm -rf apache-yetus-* - - mkdir -p "${TEST_FRAMEWORK}/.gpg" - chmod -R 700 "${TEST_FRAMEWORK}/.gpg" - - curl -L --fail -o "${TEST_FRAMEWORK}/KEYS_YETUS" https://dist.apache.org/repos/dist/release/yetus/KEYS - gpg --homedir "${TEST_FRAMEWORK}/.gpg" --import "${TEST_FRAMEWORK}/KEYS_YETUS" - - ## Release - curl -L --fail -O "https://dist.apache.org/repos/dist/release/yetus/${YETUS_RELEASE}/apache-yetus-${YETUS_RELEASE}-bin.tar.gz" - curl -L --fail -O "https://dist.apache.org/repos/dist/release/yetus/${YETUS_RELEASE}/apache-yetus-${YETUS_RELEASE}-bin.tar.gz.asc" - gpg --homedir "${TEST_FRAMEWORK}/.gpg" --verify "apache-yetus-${YETUS_RELEASE}-bin.tar.gz.asc" - tar xzpf "apache-yetus-${YETUS_RELEASE}-bin.tar.gz" - fi - TESTPATCHBIN=${TEST_FRAMEWORK}/apache-yetus-${YETUS_RELEASE}/bin/test-patch - TESTPATCHLIB=${TEST_FRAMEWORK}/apache-yetus-${YETUS_RELEASE}/lib/precommit -else - prerelease_dirs=("${TEST_FRAMEWORK}/${YETUS_PRERELEASE_GITHUB/\//-}-*") - if [ ! -d "${prerelease_dirs[0]}" ]; then - mkdir -p "${TEST_FRAMEWORK}" - cd "${TEST_FRAMEWORK}" || exit - ## from github - curl -L --fail "https://api.github.com/repos/${YETUS_PRERELEASE_GITHUB}/tarball/HEAD" > yetus.tar.gz - tar xvpf yetus.tar.gz - prerelease_dirs=("${TEST_FRAMEWORK}/${YETUS_PRERELEASE_GITHUB/\//-}-*") - fi - TESTPATCHBIN=${prerelease_dirs[0]}/precommit/test-patch.sh - TESTPATCHLIB=${prerelease_dirs[0]}/precommit -fi - -if [[ "true" = "${DEBUG}" ]]; then - # DEBUG print the test framework - ls -l "${TESTPATCHBIN}" - ls -la "${TESTPATCHLIB}/test-patch.d/" - # DEBUG print the local customization - if [ -d "${COMPONENT}/dev-support/test-patch.d" ]; then - ls -la "${COMPONENT}/dev-support/test-patch.d/" - fi - YETUS_ARGS=(--debug "${YETUS_ARGS[@]}") -fi - - -if [ ! -x "${TESTPATCHBIN}" ] && [ -n "${TEST_FRAMEWORK}" ] && [ -d "${TEST_FRAMEWORK}" ]; then - echo "Something is amiss with the test framework; removing it. please re-run." - rm -rf "${TEST_FRAMEWORK}" - exit 1 -fi - -cd "${WORKSPACE}" || exit - - -# -# Yetus *always* builds with JAVA_HOME, so no need to list it. -# -# non-docker-mode JDK: -# --findbugs-home=/home/jenkins/tools/findbugs/latest \ - -# docker-mode: (openjdk 7 added for free) -# --findbugs-home=/usr \ -# --docker \ -# --multijdkdirs="/usr/lib/jvm/java-8-openjdk-amd64" \ - -if [[ "true" = "${RUN_IN_DOCKER}" ]]; then - YETUS_ARGS=( - --docker \ - "--multijdkdirs=/usr/lib/jvm/java-8-openjdk-amd64" \ - "--findbugs-home=/usr" \ - "${YETUS_ARGS[@]}" \ - ) - if [ -r "${COMPONENT}/dev-support/docker/Dockerfile" ]; then - YETUS_ARGS=("--dockerfile=${COMPONENT}/dev-support/docker/Dockerfile" "${YETUS_ARGS[@]}") - fi -else - YETUS_ARGS=("--findbugs-home=/home/jenkins/tools/findbugs/latest" "${YETUS_ARGS[@]}") -fi - -if [ -d "${COMPONENT}/dev-support/test-patch.d" ]; then - YETUS_ARGS=("--user-plugins=${COMPONENT}/dev-support/test-patch.d" "${YETUS_ARGS[@]}") -fi - -# I don't trust Yetus compat enough yet, so in prerelease mode, skip our personality. -# this should give us an incentive to update the Yetus exemplar for HBase periodically. -if [ -r "${COMPONENT}/dev-support/hbase-personality.sh" ] && [[ "true" != "${USE_YETUS_PRERELEASE}" ]] ; then - YETUS_ARGS=("--personality=${COMPONENT}/dev-support/hbase-personality.sh" "${YETUS_ARGS[@]}") -fi - -if [[ true == "${QUICK_HADOOPCHECK}" ]]; then - YETUS_ARGS=("--quick-hadoopcheck" "${YETUS_ARGS[@]}") -fi - -if [[ true == "${SKIP_ERRORPRONE}" ]]; then - YETUS_ARGS=("--skip-errorprone" "${YETUS_ARGS[@]}") -fi - -YETUS_ARGS=("--skip-dirs=dev-support" "${YETUS_ARGS[@]}") - -/bin/bash "${TESTPATCHBIN}" \ - "${YETUS_ARGS[@]}" \ - --patch-dir="${PATCHPROCESS}" \ - --basedir="${COMPONENT}" \ - --mvn-custom-repos \ - --whitespace-eol-ignore-list=".*/generated/.*" \ - --whitespace-tabs-ignore-list=".*/generated/.*" \ - --jira-user=HBaseQA \ - --jira-password="${JIRA_PASSWORD}" \ - "HBASE-${ISSUE_NUM}" - -find "${COMPONENT}" -name target -exec chmod -R u+w {} \; diff --git a/dev-support/release-vm/.gitignore b/dev-support/release-vm/.gitignore new file mode 100644 index 000000000000..49b3eb5bd9e3 --- /dev/null +++ b/dev-support/release-vm/.gitignore @@ -0,0 +1,3 @@ +.vagrant/ +*.log +*.patch diff --git a/dev-support/release-vm/README.md b/dev-support/release-vm/README.md new file mode 100644 index 000000000000..74bb4392d2eb --- /dev/null +++ b/dev-support/release-vm/README.md @@ -0,0 +1,141 @@ + + +# HBase Release Env + +This is a vagrant project that provides a virtual machine environment suitable +for running an Apache HBase release. + +Requires: +* [VirtualBox](http://virtualbox.org) +* [Vagrant](http://virtualbox.org) +* The private portion of your signing key avilable in the local GPG agent +* The private portion of your Github authentication key available in either the local GPG agent or + local SSH agent + +## Usage + +Unlock the local keyring before proceeding (this should prompt you for your GPG passphrase). For +example, assuming you have an authentication key configured in your keyring, this will do the +trick. + +All terminal commands used below are assumed to be run with the current working directory as the +location containing the `Vagrantfile`. + +The term "Host" is used to mean the environment that runs the Vagrant process. The term "Guest" is +used to mean the virtual machine managed by the Host. + +### Ensure credentials work from the Host OS + +The ssh- and gpg-agent forwarding configuration used here assumes that your credentials work +on the Host. Verify both are working before you proceed with the Guest. Additionally, using the +credentials requires you to unlock the respective keyring, the state of which is persisted by the +agent process or processes running on the Host. + +See instructions in [`create-release`](../create-release/README.txt) regarding proper +configuration of ssh- and gpg-agents. + +Assuming the git repo origin is on GitHub, the following command will ensure that your ssh +credentials are working. On the Host, run: + +```sh +host:~$ ssh -T git@github.com +Hi ! You've successfully authenticated, but GitHub does not provide shell access. +``` + +Likewise, ensure you have an encryption key that can be used to sign a file. Again, on the Host, +run: + +```sh +host:~$ gpg --detach --armor --sign Vagrantfile +host:~$ gpg --verify Vagrantfile.asc +gpg: assuming signed data in 'Vagrantfile' +... +host:~$ rm Vagrantfile.asc +``` + +### Make public keyring available to the VM + +Export the public portion of your signing credentials where the Guest can access it. Vagrant +(+VirtualBox) shares the directory of the `Vagrantfile` with the Linux Guest via the `/vagrant` +mount point. Any files present in this working directory on the Host are available to the Guest. + +From the Host, run: + +```sh +host:~$ gpg --export @apache.org > gpg..apache.pub +``` + +### Launch the Guest VM + +Launch the Guest VM by running + +```sh +host:~$ vagrant up +``` + +If anything about the Vagrant or VirtualBox environment have changed since you last used this VM, +it's best to `vagrant destroy -f` all local state and `vagrant up` a fresh instance. + +### Verify the Guest VM + +Connect to the Guest. This should forward your ssh- and gpg-agent session, as configured in the +`Vagrantfile`. + +```sh +host:~$ vagrant ssh +``` + +Now that you're in the Guest VM, be sure that all `gpg` command you issue include the +`--no-autostart` flag. This ensures that the `gpg` process in the Guest communicates with the +agent running on the Host OS rather than launching its own process on the Guest OS. + +From the Guest, verify that ssh-agent forwarding is working, using the same test performed on the +Host, + +```sh +guest:~$ ssh -T git@github.com +Hi ! You've successfully authenticated, but GitHub does not provide shell access. +``` + +From the Guest, import your exported public identity and verify the gpg-agent passthrough is +working correctly. + +```sh +guest:~$ gpg --no-autostart --import /vagrant/gpg..apache.pub +... +gpg: Total number processed: 1 +gpg: imported: 1 +guest:~$ gpg --no-autostart --detach --armor --sign repos/hbase/pom.xml +guest:~$ gpg --no-autostart --verify repos/hbase/pom.xml.asc +gpg: assuming signed data in 'repos/hbase/pom.xml' +... +guest:~$ rm repos/hbase/pom.xml.asc +``` + +### Build a Release Candidate + +Finally, you can initiate the release build. Follow the instructions in +[`create-release`](../create-release/README.txt), i.e., + +```sh +guest:~$ mkdir ~/build-2.3.1-rc0 +guest:~$ cd repos/hbase +guest:~/repos/hbase$ ./dev-support/create-release/do-release-docker.sh -d ~/build-2.3.1-rc0/ ... +``` diff --git a/dev-support/release-vm/Vagrantfile b/dev-support/release-vm/Vagrantfile new file mode 100644 index 000000000000..e6a9a74b10ff --- /dev/null +++ b/dev-support/release-vm/Vagrantfile @@ -0,0 +1,50 @@ +# -*- mode: ruby -*- +# vi: set ft=ruby : +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +Vagrant.configure("2") do |config| + + config.vm.define "rmvm" do |rmvm| + rmvm.vm.box = "ubuntu/focal64" + rmvm.vm.hostname = "rmvm" + + rmvm.vm.provision "shell", path: "provision/focal.sh", run: "once" + + rmvm.vm.provision "puppet", run: "always" do |puppet| + puppet.environment = "production" + puppet.environment_path = "puppet" + puppet.working_directory = "/tmp/vagrant-puppet" + puppet.options = "--test" + end + + rmvm.vm.provider "virtualbox" do |vb| + vb.name = "rmvm" + vb.cpus = 2 + vb.memory = (4 * 1024).to_s + end + end + + # pass through ssh-agent for github interaction + config.ssh.forward_agent = true + # pass through gpg-agent for artifact signing + config.ssh.extra_args = [ + "-R", "/run/user/1000/gnupg/S.gpg-agent:#{%x(gpgconf --list-dirs agent-extra-socket).strip}", + "-R", "/run/user/1000/gnupg/S.gpg-agent.extra:#{%x(gpgconf --list-dir agent-extra-socket).strip}", + ] +end diff --git a/dev-support/release-vm/provision/focal.sh b/dev-support/release-vm/provision/focal.sh new file mode 100755 index 000000000000..8dc30bc0a2ba --- /dev/null +++ b/dev-support/release-vm/provision/focal.sh @@ -0,0 +1,25 @@ +#!/usr/bin/env bash +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Bootstrap provisioner for a Ubuntu Bionic host. +# + +sudo apt-get update -qq +# puppet lets us manage the host, librarian-puppet lets us download puppet libraries +sudo apt-get install -y --no-install-recommends puppet librarian-puppet +cd /tmp/vagrant-puppet/environments/production && sudo librarian-puppet install --verbose diff --git a/dev-support/release-vm/puppet/production/.gitignore b/dev-support/release-vm/puppet/production/.gitignore new file mode 100644 index 000000000000..2df0470bce95 --- /dev/null +++ b/dev-support/release-vm/puppet/production/.gitignore @@ -0,0 +1,3 @@ +.tmp/ +modules/ +**/*.lock diff --git a/dev-support/release-vm/puppet/production/.librarian/puppet/config b/dev-support/release-vm/puppet/production/.librarian/puppet/config new file mode 100644 index 000000000000..738f292ed132 --- /dev/null +++ b/dev-support/release-vm/puppet/production/.librarian/puppet/config @@ -0,0 +1,21 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +--- +LIBRARIAN_PUPPET_DESTRUCTIVE: 'false' +LIBRARIAN_PUPPET_USE_V1_API: '1' +LIBRARIAN_PUPPET_TMP: "/tmp/librarian_puppet/tmp" diff --git a/dev-support/release-vm/puppet/production/Puppetfile b/dev-support/release-vm/puppet/production/Puppetfile new file mode 100644 index 000000000000..3d5d5e44640e --- /dev/null +++ b/dev-support/release-vm/puppet/production/Puppetfile @@ -0,0 +1,27 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +forge "https://forgeapi.puppetlabs.com" + +mod 'puppet-packages', + :git => "https://github.com/greenaar/puppet-packages.git", + :ref => '8d6b8a85eea931e4cd045884d5786c1c1ff0df4c' +mod 'puppetlabs-docker', '3.10.1' +mod 'puppetlabs-stdlib', '5.2.0' +mod 'puppetlabs-vcsrepo', '3.1.0' +mod 'saz-ssh', '6.2.0' diff --git a/dev-support/release-vm/puppet/production/data/nodes/rmvm.yaml b/dev-support/release-vm/puppet/production/data/nodes/rmvm.yaml new file mode 100644 index 000000000000..44a66262e31a --- /dev/null +++ b/dev-support/release-vm/puppet/production/data/nodes/rmvm.yaml @@ -0,0 +1,31 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +--- +classes: + - docker + - packages::manage + - ssh + +packages::latest: + - curl + - git + - gnupg + - gnupg-agent + +ssh::server_options: + StreamLocalBindUnlink: 'yes' diff --git a/dev-support/release-vm/puppet/production/environment.conf b/dev-support/release-vm/puppet/production/environment.conf new file mode 100644 index 000000000000..c6deb8dd9886 --- /dev/null +++ b/dev-support/release-vm/puppet/production/environment.conf @@ -0,0 +1,20 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +modulepath = modules:site:$basemodulepath +manifest = manifests diff --git a/dev-support/release-vm/puppet/production/hiera.yaml b/dev-support/release-vm/puppet/production/hiera.yaml new file mode 100644 index 000000000000..a8bb7c1c965f --- /dev/null +++ b/dev-support/release-vm/puppet/production/hiera.yaml @@ -0,0 +1,23 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +--- +version: 5 + +hierarchy: + - name: "Per-node data" + path: "nodes/%{facts.hostname}.yaml" diff --git a/dev-support/release-vm/puppet/production/manifests/default.pp b/dev-support/release-vm/puppet/production/manifests/default.pp new file mode 100644 index 000000000000..e429d5af4bed --- /dev/null +++ b/dev-support/release-vm/puppet/production/manifests/default.pp @@ -0,0 +1,44 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +lookup('classes', Array[String], 'unique').include + +node rmvm { + $user = 'vagrant' + + # include the default `vagrant` user in the `docker` group + user { $user: + groups => ['docker'], + require => Package['docker'], + } + + # allow managing git repos in puppet + vcsrepo { "/home/${user}/repos/hbase": + ensure => latest, + branch => 'master', + group => $user, + owner => $user, + keep_local_changes => true, + provider => git, + remote => 'origin', + source => { + 'origin' => 'https://github.com/apache/hbase.git', + }, + depth => 1, + } +} diff --git a/hbase-assembly/src/main/assembly/client-components.xml b/hbase-assembly/src/main/assembly/client-components.xml index 4bad0d465e27..7cb97dd6822d 100644 --- a/hbase-assembly/src/main/assembly/client-components.xml +++ b/hbase-assembly/src/main/assembly/client-components.xml @@ -76,13 +76,6 @@ hbase-config.cmd - - - ${project.basedir}/../hbase-shell/src/main/ruby - lib/ruby - 0644 - 0755 - ${project.basedir}/../hbase-server/target/native diff --git a/hbase-assembly/src/main/assembly/components.xml b/hbase-assembly/src/main/assembly/components.xml index aaa6a831ad59..3e1394e7d5b1 100644 --- a/hbase-assembly/src/main/assembly/components.xml +++ b/hbase-assembly/src/main/assembly/components.xml @@ -69,13 +69,6 @@ **/*.cmd - - - ${project.basedir}/../hbase-shell/src/main/ruby - lib/ruby - 0644 - 0755 - ${project.basedir}/../hbase-server/target/hbase-webapps diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupHFileCleaner.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupHFileCleaner.java index 68e6b4358716..0550f9bc1473 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupHFileCleaner.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupHFileCleaner.java @@ -39,7 +39,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.com.google.common.collect.Iterables; /** @@ -88,7 +87,7 @@ private Set loadHFileRefs(List tableList) throws IOException } } - @VisibleForTesting + @InterfaceAudience.Private void setCheckForFullyBackedUpTables(boolean b) { checkForFullyBackedUpTables = b; } diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java index d49aef2c0c4f..e1fb73abe74d 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java @@ -52,8 +52,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; - /** * Handles backup requests, creates backup info records in backup system table to keep track of * backup sessions, dispatches backup request. @@ -101,7 +99,6 @@ protected BackupInfo getBackupInfo() { * (TESTs only) * @param conf configuration */ - @VisibleForTesting public static void decorateMasterConfiguration(Configuration conf) { if (!isBackupEnabled(conf)) { return; @@ -137,7 +134,6 @@ public static void decorateMasterConfiguration(Configuration conf) { * TESTs only. * @param conf configuration */ - @VisibleForTesting public static void decorateRegionServerConfiguration(Configuration conf) { if (!isBackupEnabled(conf)) { return; diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.java index c0103f5db31f..5bf1373a6e53 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.java @@ -25,6 +25,7 @@ import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.JOB_NAME_CONF_KEY; import java.io.IOException; +import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -87,11 +88,21 @@ protected void snapshotCopy(BackupInfo backupInfo) throws Exception { // calculate the real files' size for the percentage in the future. // backupCopier.setSubTaskPercntgInWholeTask(1f / numOfSnapshots); int res; - String[] args = new String[4]; - args[0] = "-snapshot"; - args[1] = backupInfo.getSnapshotName(table); - args[2] = "-copy-to"; - args[3] = backupInfo.getTableBackupDir(table); + ArrayList argsList = new ArrayList<>(); + argsList.add("-snapshot"); + argsList.add(backupInfo.getSnapshotName(table)); + argsList.add("-copy-to"); + argsList.add(backupInfo.getTableBackupDir(table)); + if (backupInfo.getBandwidth() > -1) { + argsList.add("-bandwidth"); + argsList.add(String.valueOf(backupInfo.getBandwidth())); + } + if (backupInfo.getWorkers() > -1) { + argsList.add("-mappers"); + argsList.add(String.valueOf(backupInfo.getWorkers())); + } + + String[] args = argsList.toArray(new String[0]); String jobname = "Full-Backup_" + backupInfo.getBackupId() + "_" + table.getNameAsString(); if (LOG.isDebugEnabled()) { diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/TableBackupClient.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/TableBackupClient.java index 83e545cb3bc7..021341427a85 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/TableBackupClient.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/TableBackupClient.java @@ -42,8 +42,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; - /** * Base class for backup operation. Concrete implementation for * full and incremental backup are delegated to corresponding sub-classes: @@ -55,7 +53,6 @@ public abstract class TableBackupClient { public static final String BACKUP_CLIENT_IMPL_CLASS = "backup.client.impl.class"; - @VisibleForTesting public static final String BACKUP_TEST_MODE_STAGE = "backup.test.mode.stage"; private static final Logger LOG = LoggerFactory.getLogger(TableBackupClient.class); @@ -411,12 +408,10 @@ protected void completeBackup(final Connection conn, BackupInfo backupInfo, */ public abstract void execute() throws IOException; - @VisibleForTesting protected Stage getTestStage() { return Stage.valueOf("stage_"+ conf.getInt(BACKUP_TEST_MODE_STAGE, 0)); } - @VisibleForTesting protected void failStageIf(Stage stage) throws IOException { Stage current = getTestStage(); if (current == stage) { diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollRegionServerProcedureManager.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollRegionServerProcedureManager.java index 5d087a65f91f..f09e71005598 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollRegionServerProcedureManager.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollRegionServerProcedureManager.java @@ -116,7 +116,7 @@ public void stop(boolean force) throws IOException { /** * If in a running state, creates the specified subprocedure for handling a backup procedure. - * @return Subprocedure to submit to the ProcedureMemeber. + * @return Subprocedure to submit to the ProcedureMember. */ public Subprocedure buildSubprocedure(byte[] data) { // don't run a backup if the parent is stop(ping) diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java index c7f7ec197a9e..c987b49862d3 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java @@ -27,7 +27,7 @@ import java.util.List; import java.util.Map; import java.util.Objects; -import java.util.Set; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell.Type; import org.apache.hadoop.hbase.ClientMetaTableAccessor.QueryType; import org.apache.hadoop.hbase.client.Connection; @@ -56,8 +56,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; - /** * Read/write operations on hbase:meta region as well as assignment information stored * to hbase:meta. @@ -247,7 +245,7 @@ public static Result scanByRegionEncodedName(Connection connection, String regio throws IOException { RowFilter rowFilter = new RowFilter(CompareOperator.EQUAL, new SubstringComparator(regionEncodedName)); - Scan scan = getMetaScan(connection, 1); + Scan scan = getMetaScan(connection.getConfiguration(), 1); scan.setFilter(rowFilter); try (Table table = getMetaHTable(connection); ResultScanner resultScanner = table.getScanner(scan)) { @@ -262,7 +260,6 @@ public static Result scanByRegionEncodedName(Connection connection, String regio * true and we'll leave out offlined regions from returned list * @return List of all user-space regions. */ - @VisibleForTesting public static List getAllRegions(Connection connection, boolean excludeOfflinedSplitParents) throws IOException { List> result; @@ -319,27 +316,24 @@ public static List getTableRegions(Connection connection, TableName * and scan until it hits a new table since that requires parsing the HRI to get the table name. * @param tableName bytes of table's name * @return configured Scan object - * @deprecated This is internal so please remove it when we get a chance. */ - @Deprecated - public static Scan getScanForTableName(Connection connection, TableName tableName) { + public static Scan getScanForTableName(Configuration conf, TableName tableName) { // Start key is just the table name with delimiters byte[] startKey = ClientMetaTableAccessor.getTableStartRowForMeta(tableName, QueryType.REGION); // Stop key appends the smallest possible char to the table name byte[] stopKey = ClientMetaTableAccessor.getTableStopRowForMeta(tableName, QueryType.REGION); - Scan scan = getMetaScan(connection, -1); + Scan scan = getMetaScan(conf, -1); scan.withStartRow(startKey); scan.withStopRow(stopKey); return scan; } - private static Scan getMetaScan(Connection connection, int rowUpperLimit) { + private static Scan getMetaScan(Configuration conf, int rowUpperLimit) { Scan scan = new Scan(); - int scannerCaching = connection.getConfiguration().getInt(HConstants.HBASE_META_SCANNER_CACHING, + int scannerCaching = conf.getInt(HConstants.HBASE_META_SCANNER_CACHING, HConstants.DEFAULT_HBASE_META_SCANNER_CACHING); - if (connection.getConfiguration().getBoolean(HConstants.USE_META_REPLICAS, - HConstants.DEFAULT_USE_META_REPLICAS)) { + if (conf.getBoolean(HConstants.USE_META_REPLICAS, HConstants.DEFAULT_USE_META_REPLICAS)) { scan.setConsistency(Consistency.TIMELINE); } if (rowUpperLimit > 0) { @@ -471,7 +465,7 @@ public static void scanMeta(Connection connection, @Nullable final byte[] startR @Nullable final byte[] stopRow, QueryType type, @Nullable Filter filter, int maxRows, final ClientMetaTableAccessor.Visitor visitor) throws IOException { int rowUpperLimit = maxRows > 0 ? maxRows : Integer.MAX_VALUE; - Scan scan = getMetaScan(connection, rowUpperLimit); + Scan scan = getMetaScan(connection.getConfiguration(), rowUpperLimit); for (byte[] family : type.getFamilies()) { scan.addFamily(family); @@ -527,7 +521,7 @@ public static void scanMeta(Connection connection, @Nullable final byte[] startR private static RegionInfo getClosestRegionInfo(Connection connection, @NonNull final TableName tableName, @NonNull final byte[] row) throws IOException { byte[] searchRow = RegionInfo.createRegionName(tableName, row, HConstants.NINES, false); - Scan scan = getMetaScan(connection, 1); + Scan scan = getMetaScan(connection.getConfiguration(), 1); scan.setReversed(true); scan.withStartRow(searchRow); try (ResultScanner resultScanner = getMetaHTable(connection).getScanner(scan)) { @@ -735,37 +729,6 @@ private static void deleteFromMetaTable(final Connection connection, final List< } } - /** - * Deletes some replica columns corresponding to replicas for the passed rows - * @param metaRows rows in hbase:meta - * @param replicaIndexToDeleteFrom the replica ID we would start deleting from - * @param numReplicasToRemove how many replicas to remove - * @param connection connection we're using to access meta table - */ - public static void removeRegionReplicasFromMeta(Set metaRows, - int replicaIndexToDeleteFrom, int numReplicasToRemove, Connection connection) - throws IOException { - int absoluteIndex = replicaIndexToDeleteFrom + numReplicasToRemove; - for (byte[] row : metaRows) { - long now = EnvironmentEdgeManager.currentTime(); - Delete deleteReplicaLocations = new Delete(row); - for (int i = replicaIndexToDeleteFrom; i < absoluteIndex; i++) { - deleteReplicaLocations.addColumns(HConstants.CATALOG_FAMILY, - CatalogFamilyFormat.getServerColumn(i), now); - deleteReplicaLocations.addColumns(HConstants.CATALOG_FAMILY, - CatalogFamilyFormat.getSeqNumColumn(i), now); - deleteReplicaLocations.addColumns(HConstants.CATALOG_FAMILY, - CatalogFamilyFormat.getStartCodeColumn(i), now); - deleteReplicaLocations.addColumns(HConstants.CATALOG_FAMILY, - CatalogFamilyFormat.getServerNameColumn(i), now); - deleteReplicaLocations.addColumns(HConstants.CATALOG_FAMILY, - CatalogFamilyFormat.getRegionStateColumn(i), now); - } - - deleteFromMetaTable(connection, deleteReplicaLocations); - } - } - public static Put addRegionStateToPut(Put put, RegionState.State state) throws IOException { put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(put.getRow()) .setFamily(HConstants.CATALOG_FAMILY).setQualifier(HConstants.STATE_QUALIFIER) @@ -806,22 +769,6 @@ public static void addSplitsToParent(Connection connection, RegionInfo regionInf } } - /** - * Adds a (single) hbase:meta row for the specified new region and its daughters. Note that this - * does not add its daughter's as different rows, but adds information about the daughters in the - * same row as the parent. Use - * {@link #splitRegion(Connection, RegionInfo, long, RegionInfo, RegionInfo, ServerName, int)} if - * you want to do that. - * @param connection connection we're using - * @param regionInfo region information - * @throws IOException if problem connecting or updating meta - */ - @VisibleForTesting - public static void addRegionToMeta(Connection connection, RegionInfo regionInfo) - throws IOException { - addRegionsToMeta(connection, Collections.singletonList(regionInfo), 1); - } - /** * Adds a hbase:meta row for each of the specified new regions. Initial state for new regions is * CLOSED. @@ -847,17 +794,18 @@ public static void addRegionsToMeta(Connection connection, List regi int regionReplication, long ts) throws IOException { List puts = new ArrayList<>(); for (RegionInfo regionInfo : regionInfos) { - if (RegionReplicaUtil.isDefaultReplica(regionInfo)) { - Put put = makePutFromRegionInfo(regionInfo, ts); - // New regions are added with initial state of CLOSED. - addRegionStateToPut(put, RegionState.State.CLOSED); - // Add empty locations for region replicas so that number of replicas can be cached - // whenever the primary region is looked up from meta - for (int i = 1; i < regionReplication; i++) { - addEmptyLocation(put, i); - } - puts.add(put); + if (!RegionReplicaUtil.isDefaultReplica(regionInfo)) { + continue; + } + Put put = makePutFromRegionInfo(regionInfo, ts); + // New regions are added with initial state of CLOSED. + addRegionStateToPut(put, RegionState.State.CLOSED); + // Add empty locations for region replicas so that number of replicas can be cached + // whenever the primary region is looked up from meta + for (int i = 1; i < regionReplication; i++) { + addEmptyLocation(put, i); } + puts.add(put); } putsToMetaTable(connection, puts); LOG.info("Added {} regions to meta.", puts.size()); @@ -910,7 +858,6 @@ public static void deleteTableState(Connection connection, TableName table) thro * @param sn Server name * @param masterSystemTime wall clock time from master if passed in the open region RPC */ - @VisibleForTesting public static void updateRegionLocation(Connection connection, RegionInfo regionInfo, ServerName sn, long openSeqNum, long masterSystemTime) throws IOException { updateLocation(connection, regionInfo, sn, openSeqNum, masterSystemTime); diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/RackManager.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/RackManager.java index 3ed20065a672..54ccac0cb629 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/RackManager.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/RackManager.java @@ -22,8 +22,6 @@ import java.util.List; import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.util.ReflectionUtils; @@ -36,7 +34,6 @@ */ @InterfaceAudience.Private public class RackManager { - private static final Logger LOG = LoggerFactory.getLogger(RackManager.class); public static final String UNKNOWN_RACK = "Unknown Rack"; private DNSToSwitchMapping switchMapping; diff --git a/hbase-build-configuration/pom.xml b/hbase-build-configuration/pom.xml index 4bab5e9c579d..49a1dea8a199 100644 --- a/hbase-build-configuration/pom.xml +++ b/hbase-build-configuration/pom.xml @@ -108,7 +108,7 @@ -XDcompilePolicy=simple - -Xplugin:ErrorProne -XepDisableWarningsInGeneratedCode -Xep:FallThrough:OFF -Xep:ClassNewInstance:ERROR -Xep:MissingDefault:ERROR + -Xplugin:ErrorProne -XepDisableWarningsInGeneratedCode -Xep:FallThrough:OFF -Xep:MutablePublicArray:OFF -Xep:ClassNewInstance:ERROR -Xep:MissingDefault:ERROR -J-Xbootclasspath/p:${settings.localRepository}/com/google/errorprone/javac/${javac.version}/javac-${javac.version}.jar diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/CatalogFamilyFormat.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/CatalogFamilyFormat.java index a2297b66ab32..3cf6cc035238 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/CatalogFamilyFormat.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/CatalogFamilyFormat.java @@ -39,8 +39,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; - /** * Helper class for generating/parsing * {@value org.apache.hadoop.hbase.HConstants#CATALOG_FAMILY_STR} family cells in meta table. @@ -101,7 +99,7 @@ public static RegionInfo parseRegionInfoFromRegionName(byte[] regionName) throws long regionId = Long.parseLong(Bytes.toString(fields[2])); int replicaId = fields.length > 3 ? Integer.parseInt(Bytes.toString(fields[3]), 16) : 0; return RegionInfoBuilder.newBuilder(TableName.valueOf(fields[0])).setStartKey(fields[1]) - .setEndKey(fields[2]).setSplit(false).setRegionId(regionId).setReplicaId(replicaId).build(); + .setRegionId(regionId).setReplicaId(replicaId).build(); } /** @@ -276,7 +274,6 @@ public static byte[] getSeqNumColumn(int replicaId) { } /** The delimiter for meta columns for replicaIds > 0 */ - @VisibleForTesting static final char META_REPLICA_ID_DELIMITER = '_'; /** @@ -285,7 +282,6 @@ public static byte[] getSeqNumColumn(int replicaId) { * @param serverColumn the column qualifier * @return an int for the replicaId */ - @VisibleForTesting static int parseReplicaIdFromServerColumn(byte[] serverColumn) { String serverStr = Bytes.toString(serverColumn); @@ -311,8 +307,7 @@ public static byte[] getMetaKeyForRegion(RegionInfo regionInfo) { * @param replicaId the replicaId of the region * @return a byte[] for state qualifier */ - @VisibleForTesting - static byte[] getRegionStateColumn(int replicaId) { + public static byte[] getRegionStateColumn(int replicaId) { return replicaId == 0 ? HConstants.STATE_QUALIFIER : Bytes.toBytes(HConstants.STATE_QUALIFIER_STR + META_REPLICA_ID_DELIMITER + String.format(RegionInfo.REPLICA_ID_FORMAT, replicaId)); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetrics.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetrics.java index 7d732607ae36..8cd3ea156c4d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetrics.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetrics.java @@ -21,6 +21,7 @@ package org.apache.hadoop.hbase; import java.util.Map; +import org.apache.hadoop.hbase.client.CompactionState; import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; @@ -182,4 +183,9 @@ default String getNameAsString() { * @return the block total weight of this region */ long getBlocksTotalWeight(); + + /** + * @return the compaction state of this region + */ + CompactionState getCompactionState(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetricsBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetricsBuilder.java index c3839662ac27..8349c35d7d33 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetricsBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetricsBuilder.java @@ -24,12 +24,14 @@ import java.util.List; import java.util.Map; import java.util.stream.Collectors; +import org.apache.hadoop.hbase.client.CompactionState; import org.apache.hadoop.hbase.util.Strings; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; @@ -58,6 +60,8 @@ public static RegionMetrics toRegionMetrics(ClusterStatusProtos.RegionLoad regio .setBlocksLocalWithSsdWeight(regionLoadPB.hasBlocksLocalWithSsdWeight() ? regionLoadPB.getBlocksLocalWithSsdWeight() : 0) .setBlocksTotalWeight(regionLoadPB.getBlocksTotalWeight()) + .setCompactionState(ProtobufUtil.createCompactionStateForRegionLoad( + regionLoadPB.getCompactionState())) .setFilteredReadRequestCount(regionLoadPB.getFilteredReadRequestsCount()) .setStoreFileUncompressedDataIndexSize(new Size(regionLoadPB.getTotalStaticIndexSizeKB(), Size.Unit.KILOBYTE)) @@ -159,6 +163,7 @@ public static RegionMetricsBuilder newBuilder(byte[] name) { private long blocksLocalWeight; private long blocksLocalWithSsdWeight; private long blocksTotalWeight; + private CompactionState compactionState; private RegionMetricsBuilder(byte[] name) { this.name = name; } @@ -263,6 +268,11 @@ public RegionMetricsBuilder setBlocksTotalWeight(long value) { this.blocksTotalWeight = value; return this; } + public RegionMetricsBuilder setCompactionState(CompactionState compactionState) { + this.compactionState = compactionState; + return this; + } + public RegionMetrics build() { return new RegionMetricsImpl(name, storeCount, @@ -289,7 +299,8 @@ public RegionMetrics build() { dataLocalityForSsd, blocksLocalWeight, blocksLocalWithSsdWeight, - blocksTotalWeight); + blocksTotalWeight, + compactionState); } private static class RegionMetricsImpl implements RegionMetrics { @@ -319,6 +330,7 @@ private static class RegionMetricsImpl implements RegionMetrics { private final long blocksLocalWeight; private final long blocksLocalWithSsdWeight; private final long blocksTotalWeight; + private final CompactionState compactionState; RegionMetricsImpl(byte[] name, int storeCount, int storeFileCount, @@ -344,7 +356,8 @@ private static class RegionMetricsImpl implements RegionMetrics { float dataLocalityForSsd, long blocksLocalWeight, long blocksLocalWithSsdWeight, - long blocksTotalWeight) { + long blocksTotalWeight, + CompactionState compactionState) { this.name = Preconditions.checkNotNull(name); this.storeCount = storeCount; this.storeFileCount = storeFileCount; @@ -371,6 +384,7 @@ private static class RegionMetricsImpl implements RegionMetrics { this.blocksLocalWeight = blocksLocalWeight; this.blocksLocalWithSsdWeight = blocksLocalWithSsdWeight; this.blocksTotalWeight = blocksTotalWeight; + this.compactionState = compactionState; } @Override @@ -503,6 +517,11 @@ public long getBlocksTotalWeight() { return blocksTotalWeight; } + @Override + public CompactionState getCompactionState() { + return compactionState; + } + @Override public String toString() { StringBuilder sb = Strings.appendKeyValue(new StringBuilder(), "storeCount", @@ -562,6 +581,8 @@ public String toString() { blocksLocalWithSsdWeight); Strings.appendKeyValue(sb, "blocksTotalWeight", blocksTotalWeight); + Strings.appendKeyValue(sb, "compactionState", + compactionState); return sb.toString(); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java index 75d55cf17839..370ab6408254 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java @@ -2512,8 +2512,8 @@ Pair, List> getConfiguredNamespacesAndTablesInRSGroup(St * Examples include slow/large RPC logs, balancer decisions by master. * * @param serverNames servers to retrieve records from, useful in case of records maintained - * by RegionServer as we can select specific server. In case of servertype=MASTER, logs will - * only come from the currently active master. + * by RegionServer as we can select specific server. In case of servertype=MASTER, logs will + * only come from the currently active master. * @param logType string representing type of log records * @param serverType enum for server type: HMaster or RegionServer * @param limit put a limit to list of records that server should send in response diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java index 922f46703eb5..41b3845fc784 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java @@ -68,7 +68,7 @@ public class Append extends Mutation { * @return this */ public Append setTimeRange(long minStamp, long maxStamp) { - tr = new TimeRange(minStamp, maxStamp); + tr = TimeRange.between(minStamp, maxStamp); return this; } @@ -162,10 +162,9 @@ public Append addColumn(byte[] family, byte[] qualifier, byte[] value) { /** * Add column and value to this Append operation. - * @param cell * @return This instance */ - @SuppressWarnings("unchecked") + @Override public Append add(final Cell cell) { try { super.add(cell); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java index c2d7e8a07829..2ed624ca01f8 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java @@ -1714,8 +1714,8 @@ default CompletableFuture> getSlowLogResponses( * Examples include slow/large RPC logs, balancer decisions by master. * * @param serverNames servers to retrieve records from, useful in case of records maintained - * by RegionServer as we can select specific server. In case of servertype=MASTER, logs will - * only come from the currently active master. + * by RegionServer as we can select specific server. In case of servertype=MASTER, logs will + * only come from the currently active master. * @param logType string representing type of log records * @param serverType enum for server type: HMaster or RegionServer * @param limit put a limit to list of records that server should send in response diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBufferedMutatorImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBufferedMutatorImpl.java index 7aa9597c594f..fcd1724d10e5 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBufferedMutatorImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBufferedMutatorImpl.java @@ -32,7 +32,6 @@ import org.apache.hadoop.hbase.TableName; import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer; import org.apache.hbase.thirdparty.io.netty.util.Timeout; @@ -60,7 +59,6 @@ class AsyncBufferedMutatorImpl implements AsyncBufferedMutator { private boolean closed; - @VisibleForTesting Timeout periodicFlushTask; AsyncBufferedMutatorImpl(HashedWheelTimer periodicalFlushTimer, AsyncTable table, @@ -83,7 +81,6 @@ public Configuration getConfiguration() { } // will be overridden in test - @VisibleForTesting protected void internalFlush() { if (periodicFlushTask != null) { periodicFlushTask.cancel(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java index 97b70e1a7ad8..8a1ac5aac76d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java @@ -54,12 +54,11 @@ import org.apache.hadoop.hbase.util.ConcurrentMapUtils; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService; @@ -75,14 +74,11 @@ class AsyncConnectionImpl implements AsyncConnection { private static final Logger LOG = LoggerFactory.getLogger(AsyncConnectionImpl.class); - @VisibleForTesting static final HashedWheelTimer RETRY_TIMER = new HashedWheelTimer( new ThreadFactoryBuilder().setNameFormat("Async-Client-Retry-Timer-pool-%d").setDaemon(true) .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build(), 10, TimeUnit.MILLISECONDS); - private static final String RESOLVE_HOSTNAME_ON_FAIL_KEY = "hbase.resolve.hostnames.on.failure"; - private final Configuration conf; final AsyncConnectionConfiguration connConf; @@ -97,8 +93,6 @@ class AsyncConnectionImpl implements AsyncConnection { final RpcControllerFactory rpcControllerFactory; - private final boolean hostnameCanChange; - private final AsyncRegionLocator locator; final AsyncRpcRetryingCallerFactory callerFactory; @@ -116,7 +110,7 @@ class AsyncConnectionImpl implements AsyncConnection { private final Optional stats; private final ClientBackoffPolicy backoffPolicy; - private ChoreService authService; + private ChoreService choreService; private final AtomicBoolean closed = new AtomicBoolean(false); @@ -130,6 +124,7 @@ public AsyncConnectionImpl(Configuration conf, ConnectionRegistry registry, Stri SocketAddress localAddress, User user) { this.conf = conf; this.user = user; + if (user.isLoginFromKeytab()) { spawnRenewalChore(user.getUGI()); } @@ -143,7 +138,6 @@ public AsyncConnectionImpl(Configuration conf, ConnectionRegistry registry, Stri this.rpcClient = RpcClientFactory.createClient( conf, clusterId, localAddress, metrics.orElse(null)); this.rpcControllerFactory = RpcControllerFactory.instantiate(conf); - this.hostnameCanChange = conf.getBoolean(RESOLVE_HOSTNAME_ON_FAIL_KEY, true); this.rpcTimeout = (int) Math.min(Integer.MAX_VALUE, TimeUnit.NANOSECONDS.toMillis(connConf.getRpcTimeoutNs())); this.locator = new AsyncRegionLocator(this, RETRY_TIMER); @@ -182,8 +176,22 @@ public void newDead(ServerName sn) { } private void spawnRenewalChore(final UserGroupInformation user) { - authService = new ChoreService("Relogin service"); - authService.scheduleChore(AuthUtil.getAuthRenewalChore(user)); + ChoreService service = getChoreService(); + service.scheduleChore(AuthUtil.getAuthRenewalChore(user)); + } + + /** + * If choreService has not been created yet, create the ChoreService. + * @return ChoreService + */ + synchronized ChoreService getChoreService() { + if (isClosed()) { + throw new IllegalStateException("connection is already closed"); + } + if (choreService == null) { + choreService = new ChoreService("AsyncConn Chore Service"); + } + return choreService; } @Override @@ -205,11 +213,15 @@ public void close() { if(LOG.isDebugEnabled()){ logCallStack(Thread.currentThread().getStackTrace()); } - IOUtils.closeQuietly(clusterStatusListener); - IOUtils.closeQuietly(rpcClient); - IOUtils.closeQuietly(registry); - if (authService != null) { - authService.shutdown(); + IOUtils.closeQuietly(clusterStatusListener, + e -> LOG.warn("failed to close clusterStatusListener", e)); + IOUtils.closeQuietly(rpcClient, e -> LOG.warn("failed to close rpcClient", e)); + IOUtils.closeQuietly(registry, e -> LOG.warn("failed to close registry", e)); + synchronized (this) { + if (choreService != null) { + choreService.shutdown(); + choreService = null; + } } metrics.ifPresent(MetricsConnection::shutdown); ConnectionOverAsyncConnection c = this.conn; @@ -254,7 +266,7 @@ private ClientService.Interface createRegionServerStub(ServerName serverName) th ClientService.Interface getRegionServerStub(ServerName serverName) throws IOException { return ConcurrentMapUtils.computeIfAbsentEx(rsStubs, - getStubKey(ClientService.Interface.class.getSimpleName(), serverName, hostnameCanChange), + getStubKey(ClientService.getDescriptor().getName(), serverName), () -> createRegionServerStub(serverName)); } @@ -268,7 +280,7 @@ private AdminService.Interface createAdminServerStub(ServerName serverName) thro AdminService.Interface getAdminStub(ServerName serverName) throws IOException { return ConcurrentMapUtils.computeIfAbsentEx(adminSubs, - getStubKey(AdminService.Interface.class.getSimpleName(), serverName, hostnameCanChange), + getStubKey(AdminService.getDescriptor().getName(), serverName), () -> createAdminServerStub(serverName)); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncMetaRegionLocator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncMetaRegionLocator.java index 3571f96027f5..5ae9de6c476d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncMetaRegionLocator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncMetaRegionLocator.java @@ -30,8 +30,6 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; - /** * The asynchronous locator for meta region. */ @@ -136,13 +134,11 @@ void clearCache(ServerName serverName) { } // only used for testing whether we have cached the location for a region. - @VisibleForTesting RegionLocations getRegionLocationInCache() { return metaRegionLocations.get(); } // only used for testing whether we have cached the location for a table. - @VisibleForTesting int getNumberOfCachedRegionLocations() { RegionLocations locs = metaRegionLocations.get(); return locs != null ? locs.numNonNullElements() : 0; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java index b20216888167..1c686aca8b76 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java @@ -30,6 +30,7 @@ import static org.apache.hadoop.hbase.client.ConnectionUtils.createClosestRowAfter; import static org.apache.hadoop.hbase.client.ConnectionUtils.isEmptyStopRow; import static org.apache.hadoop.hbase.client.RegionInfo.createRegionName; +import static org.apache.hadoop.hbase.client.RegionLocator.LOCATOR_META_REPLICAS_MODE; import static org.apache.hadoop.hbase.util.Bytes.BYTES_COMPARATOR; import static org.apache.hadoop.hbase.util.ConcurrentMapUtils.computeIfAbsent; @@ -46,6 +47,7 @@ import java.util.concurrent.ConcurrentMap; import java.util.concurrent.ConcurrentNavigableMap; import java.util.concurrent.ConcurrentSkipListMap; +import java.util.concurrent.TimeUnit; import org.apache.commons.lang3.ObjectUtils; import org.apache.hadoop.hbase.CatalogFamilyFormat; import org.apache.hadoop.hbase.HBaseIOException; @@ -61,7 +63,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.com.google.common.base.Objects; /** @@ -72,13 +73,11 @@ class AsyncNonMetaRegionLocator { private static final Logger LOG = LoggerFactory.getLogger(AsyncNonMetaRegionLocator.class); - @VisibleForTesting static final String MAX_CONCURRENT_LOCATE_REQUEST_PER_TABLE = "hbase.client.meta.max.concurrent.locate.per.table"; private static final int DEFAULT_MAX_CONCURRENT_LOCATE_REQUEST_PER_TABLE = 8; - @VisibleForTesting static String LOCATE_PREFETCH_LIMIT = "hbase.client.locate.prefetch.limit"; private static final int DEFAULT_LOCATE_PREFETCH_LIMIT = 10; @@ -89,7 +88,10 @@ class AsyncNonMetaRegionLocator { private final int locatePrefetchLimit; - private final boolean useMetaReplicas; + // The mode tells if HedgedRead, LoadBalance mode is supported. + // The default mode is CatalogReplicaMode.None. + private CatalogReplicaMode metaReplicaMode; + private CatalogReplicaLoadBalanceSelector metaReplicaSelector; private final ConcurrentMap cache = new ConcurrentHashMap<>(); @@ -196,8 +198,41 @@ private boolean tryComplete(LocateRequest req, CompletableFuture { + int numOfReplicas = CatalogReplicaLoadBalanceSelector.UNINITIALIZED_NUM_OF_REPLICAS; + try { + RegionLocations metaLocations = conn.registry.getMetaRegionLocations().get( + conn.connConf.getReadRpcTimeoutNs(), TimeUnit.NANOSECONDS); + numOfReplicas = metaLocations.size(); + } catch (Exception e) { + LOG.error("Failed to get table {}'s region replication, ", META_TABLE_NAME, e); + } + return numOfReplicas; + }); + break; + case NONE: + // If user does not configure LOCATOR_META_REPLICAS_MODE, let's check the legacy config. + boolean useMetaReplicas = conn.getConfiguration().getBoolean(USE_META_REPLICAS, + DEFAULT_USE_META_REPLICAS); + if (useMetaReplicas) { + this.metaReplicaMode = CatalogReplicaMode.HEDGED_READ; + } + break; + default: + // Doing nothing + } } private TableCache getTableCache(TableName tableName) { @@ -433,9 +468,24 @@ private void locateInMeta(TableName tableName, LocateRequest req) { Scan scan = new Scan().withStartRow(metaStartKey).withStopRow(metaStopKey, true) .addFamily(HConstants.CATALOG_FAMILY).setReversed(true).setCaching(locatePrefetchLimit) .setReadType(ReadType.PREAD); - if (useMetaReplicas) { - scan.setConsistency(Consistency.TIMELINE); + + switch (this.metaReplicaMode) { + case LOAD_BALANCE: + int metaReplicaId = this.metaReplicaSelector.select(tableName, req.row, req.locateType); + if (metaReplicaId != RegionInfo.DEFAULT_REPLICA_ID) { + // If the selector gives a non-primary meta replica region, then go with it. + // Otherwise, just go to primary in non-hedgedRead mode. + scan.setConsistency(Consistency.TIMELINE); + scan.setReplicaId(metaReplicaId); + } + break; + case HEDGED_READ: + scan.setConsistency(Consistency.TIMELINE); + break; + default: + // do nothing } + conn.getTable(META_TABLE_NAME).scan(scan, new AdvancedScanResultConsumer() { private boolean completeNormally = false; @@ -577,6 +627,13 @@ private void removeLocationFromCache(HRegionLocation loc) { if (!canUpdateOnError(loc, oldLoc)) { return; } + // Tell metaReplicaSelector that the location is stale. It will create a stale entry + // with timestamp internally. Next time the client looks up the same location, + // it will pick a different meta replica region. + if (this.metaReplicaMode == CatalogReplicaMode.LOAD_BALANCE) { + metaReplicaSelector.onError(loc); + } + RegionLocations newLocs = removeRegionLocation(oldLocs, loc.getRegion().getReplicaId()); if (newLocs == null) { if (tableCache.cache.remove(startKey, oldLocs)) { @@ -649,7 +706,6 @@ void clearCache(ServerName serverName) { } // only used for testing whether we have cached the location for a region. - @VisibleForTesting RegionLocations getRegionLocationInCache(TableName tableName, byte[] row) { TableCache tableCache = cache.get(tableName); if (tableCache == null) { @@ -659,7 +715,6 @@ RegionLocations getRegionLocationInCache(TableName tableName, byte[] row) { } // only used for testing whether we have cached the location for a table. - @VisibleForTesting int getNumberOfCachedRegionLocations(TableName tableName) { TableCache tableCache = cache.get(tableName); if (tableCache == null) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionLocator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionLocator.java index 09eabfc1d53f..215a1c58faa1 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionLocator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionLocator.java @@ -34,7 +34,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer; import org.apache.hbase.thirdparty.io.netty.util.Timeout; @@ -175,13 +174,11 @@ void clearCache() { nonMetaRegionLocator.clearCache(); } - @VisibleForTesting AsyncNonMetaRegionLocator getNonMetaRegionLocator() { return nonMetaRegionLocator; } // only used for testing whether we have cached the location for a region. - @VisibleForTesting RegionLocations getRegionLocationInCache(TableName tableName, byte[] row) { if (TableName.isMetaTableName(tableName)) { return metaRegionLocator.getRegionLocationInCache(); @@ -191,7 +188,6 @@ RegionLocations getRegionLocationInCache(TableName tableName, byte[] row) { } // only used for testing whether we have cached the location for a table. - @VisibleForTesting int getNumberOfCachedRegionLocations(TableName tableName) { if (TableName.isMetaTableName(tableName)) { return metaRegionLocator.getNumberOfCachedRegionLocations(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTable.java index aae4fc7e2ece..b390909d3696 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTable.java @@ -396,9 +396,9 @@ default CompletableFuture> checkAndMutateAll( * Performs multiple mutations atomically on a single row. Currently {@link Put} and * {@link Delete} are supported. * @param mutation object that specifies the set of mutations to perform atomically - * @return A {@link CompletableFuture} that always returns null when complete normally. + * @return A {@link CompletableFuture} that returns results of Increment/Append operations */ - CompletableFuture mutateRow(RowMutations mutation); + CompletableFuture mutateRow(RowMutations mutation); /** * The scan API uses the observer pattern. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableImpl.java index f931d67a3107..a124467cd96f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableImpl.java @@ -218,7 +218,7 @@ public List> checkAndMutate( } @Override - public CompletableFuture mutateRow(RowMutations mutation) { + public CompletableFuture mutateRow(RowMutations mutation) { return wrap(rawTable.mutateRow(mutation)); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableRegionLocator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableRegionLocator.java index 321f44e87b51..96e3ec4173a9 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableRegionLocator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableRegionLocator.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.hbase.client; -import java.io.IOException; import java.util.List; import java.util.concurrent.CompletableFuture; import java.util.stream.Collectors; @@ -89,7 +88,6 @@ default CompletableFuture getRegionLocation(byte[] row, int rep * Find all the replicas for the region on which the given row is being served. * @param row Row to find. * @return Locations for all the replicas of the row. - * @throws IOException if a remote or network exception occurs */ default CompletableFuture> getRegionLocations(byte[] row) { return getRegionLocations(row, false); @@ -100,7 +98,6 @@ default CompletableFuture> getRegionLocations(byte[] row) * @param row Row to find. * @param reload true to reload information or false to use cached information * @return Locations for all the replicas of the row. - * @throws IOException if a remote or network exception occurs */ CompletableFuture> getRegionLocations(byte[] row, boolean reload); @@ -120,9 +117,8 @@ default CompletableFuture> getRegionLocations(byte[] row) *

* This is mainly useful for the MapReduce integration. * @return Array of region starting row keys - * @throws IOException if a remote or network exception occurs */ - default CompletableFuture> getStartKeys() throws IOException { + default CompletableFuture> getStartKeys() { return getStartEndKeys().thenApply( startEndKeys -> startEndKeys.stream().map(Pair::getFirst).collect(Collectors.toList())); } @@ -132,9 +128,8 @@ default CompletableFuture> getStartKeys() throws IOException { *

* This is mainly useful for the MapReduce integration. * @return Array of region ending row keys - * @throws IOException if a remote or network exception occurs */ - default CompletableFuture> getEndKeys() throws IOException { + default CompletableFuture> getEndKeys() { return getStartEndKeys().thenApply( startEndKeys -> startEndKeys.stream().map(Pair::getSecond).collect(Collectors.toList())); } @@ -144,9 +139,8 @@ default CompletableFuture> getEndKeys() throws IOException { *

* This is mainly useful for the MapReduce integration. * @return Pair of arrays of region starting and ending row keys - * @throws IOException if a remote or network exception occurs */ - default CompletableFuture>> getStartEndKeys() throws IOException { + default CompletableFuture>> getStartEndKeys() { return getAllRegionLocations().thenApply( locs -> locs.stream().filter(loc -> RegionReplicaUtil.isDefaultReplica(loc.getRegion())) .map(HRegionLocation::getRegion).map(r -> Pair.newPair(r.getStartKey(), r.getEndKey())) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableResultScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableResultScanner.java index cd5d5adb290a..2858d2f915f9 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableResultScanner.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableResultScanner.java @@ -23,13 +23,11 @@ import java.io.InterruptedIOException; import java.util.ArrayDeque; import java.util.Queue; - +import org.apache.hadoop.hbase.client.metrics.ScanMetrics; +import org.apache.hadoop.hbase.util.FutureUtils; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.client.metrics.ScanMetrics; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; -import org.apache.hbase.thirdparty.com.google.common.base.Throwables; /** * The {@link ResultScanner} implementation for {@link AsyncTable}. It will fetch data automatically @@ -140,8 +138,7 @@ public synchronized Result next() throws IOException { return null; } if (error != null) { - Throwables.propagateIfPossible(error, IOException.class); - throw new IOException(error); + FutureUtils.rethrow(error); } try { wait(); @@ -178,7 +175,6 @@ public boolean renewLease() { } // used in tests to test whether the scanner has been suspended - @VisibleForTesting synchronized boolean isSuspended() { return resumer != null; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSelector.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSelector.java new file mode 100644 index 000000000000..27be88a9def2 --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSelector.java @@ -0,0 +1,49 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import org.apache.hadoop.hbase.HRegionLocation; +import org.apache.hadoop.hbase.TableName; +import org.apache.yetus.audience.InterfaceAudience; + +/** + * A Catalog replica selector decides which catalog replica to go for read requests when it is + * configured as CatalogReplicaMode.LoadBalance. + */ +@InterfaceAudience.Private +interface CatalogReplicaLoadBalanceSelector { + + int UNINITIALIZED_NUM_OF_REPLICAS = -1; + + /** + * This method is called when input location is stale, i.e, when clients run into + * org.apache.hadoop.hbase.NotServingRegionException. + * @param loc stale location + */ + void onError(HRegionLocation loc); + + /** + * Select a catalog replica region where client go to loop up the input row key. + * + * @param tablename table name + * @param row key to look up + * @param locateType locate type + * @return replica id + */ + int select(TableName tablename, byte[] row, RegionLocateType locateType); +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSelectorFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSelectorFactory.java new file mode 100644 index 000000000000..fe686f79ab8a --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSelectorFactory.java @@ -0,0 +1,49 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import java.util.function.IntSupplier; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.util.ReflectionUtils; +import org.apache.yetus.audience.InterfaceAudience; + +/** + * Factory to create a {@link CatalogReplicaLoadBalanceSelector} + */ +@InterfaceAudience.Private +final class CatalogReplicaLoadBalanceSelectorFactory { + /** + * Private Constructor + */ + private CatalogReplicaLoadBalanceSelectorFactory() { + } + + /** + * Create a CatalogReplicaLoadBalanceReplicaSelector based on input config. + * @param replicaSelectorClass Selector classname. + * @param tableName System table name. + * @param conn {@link AsyncConnectionImpl} + * @return {@link CatalogReplicaLoadBalanceSelector} + */ + public static CatalogReplicaLoadBalanceSelector createSelector(String replicaSelectorClass, + TableName tableName, AsyncConnectionImpl conn, IntSupplier getReplicaCount) { + return ReflectionUtils.instantiateWithCustomCtor(replicaSelectorClass, + new Class[] { TableName.class, AsyncConnectionImpl.class, IntSupplier.class }, + new Object[] { tableName, conn, getReplicaCount }); + } +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSimpleSelector.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSimpleSelector.java new file mode 100644 index 000000000000..01996b34e2ef --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSimpleSelector.java @@ -0,0 +1,300 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import static org.apache.hadoop.hbase.client.ConnectionUtils.isEmptyStopRow; +import static org.apache.hadoop.hbase.util.Bytes.BYTES_COMPARATOR; +import static org.apache.hadoop.hbase.util.ConcurrentMapUtils.computeIfAbsent; +import java.util.Iterator; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.ConcurrentNavigableMap; +import java.util.concurrent.ConcurrentSkipListMap; +import java.util.concurrent.ThreadLocalRandom; +import java.util.function.IntSupplier; +import org.apache.commons.lang3.builder.ToStringBuilder; +import org.apache.commons.lang3.builder.ToStringStyle; +import org.apache.hadoop.hbase.HRegionLocation; +import org.apache.hadoop.hbase.ScheduledChore; +import org.apache.hadoop.hbase.Stoppable; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; +/** + *

CatalogReplicaLoadBalanceReplicaSimpleSelector implements a simple catalog replica load + * balancing algorithm. It maintains a stale location cache for each table. Whenever client looks + * up location, it first check if the row is the stale location cache. If yes, the location from + * catalog replica is stale, it will go to the primary region to look up update-to-date location; + * otherwise, it will randomly pick up a replica region for lookup. When clients receive + * RegionNotServedException from region servers, it will add these region locations to the stale + * location cache. The stale cache will be cleaned up periodically by a chore.

+ * + * It follows a simple algorithm to choose a replica to go: + * + *
    + *
  1. If there is no stale location entry for rows it looks up, it will randomly + * pick a replica region to do lookup.
  2. + *
  3. If the location from the replica region is stale, client gets RegionNotServedException + * from region server, in this case, it will create StaleLocationCacheEntry in + * CatalogReplicaLoadBalanceReplicaSimpleSelector.
  4. + *
  5. When client tries to do location lookup, it checks StaleLocationCache first for rows it + * tries to lookup, if entry exists, it will go with primary meta region to do lookup; + * otherwise, it will follow step 1.
  6. + *
  7. A chore will periodically run to clean up cache entries in the StaleLocationCache.
  8. + *
+ */ +class CatalogReplicaLoadBalanceSimpleSelector implements + CatalogReplicaLoadBalanceSelector, Stoppable { + private static final Logger LOG = + LoggerFactory.getLogger(CatalogReplicaLoadBalanceSimpleSelector.class); + private final long STALE_CACHE_TIMEOUT_IN_MILLISECONDS = 3000; // 3 seconds + private final int STALE_CACHE_CLEAN_CHORE_INTERVAL_IN_MILLISECONDS = 1500; // 1.5 seconds + private final int REFRESH_REPLICA_COUNT_CHORE_INTERVAL_IN_MILLISECONDS = 60000; // 1 minute + + /** + * StaleLocationCacheEntry is the entry when a stale location is reported by an client. + */ + private static final class StaleLocationCacheEntry { + // timestamp in milliseconds + private final long timestamp; + + private final byte[] endKey; + + StaleLocationCacheEntry(final byte[] endKey) { + this.endKey = endKey; + timestamp = EnvironmentEdgeManager.currentTime(); + } + + public byte[] getEndKey() { + return this.endKey; + } + + public long getTimestamp() { + return this.timestamp; + } + + @Override + public String toString() { + return new ToStringBuilder(this, ToStringStyle.SHORT_PREFIX_STYLE) + .append("endKey", endKey) + .append("timestamp", timestamp) + .toString(); + } + } + + private final ConcurrentMap> + staleCache = new ConcurrentHashMap<>(); + private volatile int numOfReplicas; + private final AsyncConnectionImpl conn; + private final TableName tableName; + private final IntSupplier getNumOfReplicas; + private volatile boolean isStopped = false; + + CatalogReplicaLoadBalanceSimpleSelector(TableName tableName, AsyncConnectionImpl conn, + IntSupplier getNumOfReplicas) { + this.conn = conn; + this.tableName = tableName; + this.getNumOfReplicas = getNumOfReplicas; + + // This numOfReplicas is going to be lazy initialized. + this.numOfReplicas = CatalogReplicaLoadBalanceSelector.UNINITIALIZED_NUM_OF_REPLICAS; + // Start chores + this.conn.getChoreService().scheduleChore(getCacheCleanupChore(this)); + this.conn.getChoreService().scheduleChore(getRefreshReplicaCountChore(this)); + } + + /** + * When a client runs into RegionNotServingException, it will call this method to + * update Selector's internal state. + * @param loc the location which causes exception. + */ + public void onError(HRegionLocation loc) { + ConcurrentNavigableMap tableCache = + computeIfAbsent(staleCache, loc.getRegion().getTable(), + () -> new ConcurrentSkipListMap<>(BYTES_COMPARATOR)); + byte[] startKey = loc.getRegion().getStartKey(); + tableCache.putIfAbsent(startKey, + new StaleLocationCacheEntry(loc.getRegion().getEndKey())); + LOG.debug("Add entry to stale cache for table {} with startKey {}, {}", + loc.getRegion().getTable(), startKey, loc.getRegion().getEndKey()); + } + + /** + * Select an random replica id. In case there is no replica region configured, return + * the primary replica id. + * @return Replica id + */ + private int getRandomReplicaId() { + int cachedNumOfReplicas = this.numOfReplicas; + if (cachedNumOfReplicas == CatalogReplicaLoadBalanceSelector.UNINITIALIZED_NUM_OF_REPLICAS) { + cachedNumOfReplicas = refreshCatalogReplicaCount(); + this.numOfReplicas = cachedNumOfReplicas; + } + // In case of no replica configured, return the primary region id. + if (cachedNumOfReplicas <= 1) { + return RegionInfo.DEFAULT_REPLICA_ID; + } + return 1 + ThreadLocalRandom.current().nextInt(cachedNumOfReplicas - 1); + } + + /** + * When it looks up a location, it will call this method to find a replica region to go. + * For a normal case, > 99% of region locations from catalog/meta replica will be up to date. + * In extreme cases such as region server crashes, it will depends on how fast replication + * catches up. + * + * @param tablename table name it looks up + * @param row key it looks up. + * @param locateType locateType, Only BEFORE and CURRENT will be passed in. + * @return catalog replica id + */ + public int select(final TableName tablename, final byte[] row, + final RegionLocateType locateType) { + Preconditions.checkArgument(locateType == RegionLocateType.BEFORE || + locateType == RegionLocateType.CURRENT, + "Expected type BEFORE or CURRENT but got: %s", locateType); + + ConcurrentNavigableMap tableCache = staleCache.get(tablename); + + // If there is no entry in StaleCache, select a random replica id. + if (tableCache == null) { + return getRandomReplicaId(); + } + + Map.Entry entry; + boolean isEmptyStopRow = isEmptyStopRow(row); + // Only BEFORE and CURRENT are passed in. + if (locateType == RegionLocateType.BEFORE) { + entry = isEmptyStopRow ? tableCache.lastEntry() : tableCache.lowerEntry(row); + } else { + entry = tableCache.floorEntry(row); + } + + // It is not in the stale cache, return a random replica id. + if (entry == null) { + return getRandomReplicaId(); + } + + // The entry here is a possible match for the location. Check if the entry times out first as + // long comparing is faster than comparing byte arrays(in most cases). It could remove + // stale entries faster. If the possible match entry does not time out, it will check if + // the entry is a match for the row passed in and select the replica id accordingly. + if ((EnvironmentEdgeManager.currentTime() - entry.getValue().getTimestamp()) >= + STALE_CACHE_TIMEOUT_IN_MILLISECONDS) { + LOG.debug("Entry for table {} with startKey {}, {} times out", tablename, entry.getKey(), + entry); + tableCache.remove(entry.getKey()); + return getRandomReplicaId(); + } + + byte[] endKey = entry.getValue().getEndKey(); + + // The following logic is borrowed from AsyncNonMetaRegionLocator. + if (isEmptyStopRow(endKey)) { + LOG.debug("Lookup {} goes to primary region", row); + return RegionInfo.DEFAULT_REPLICA_ID; + } + + if (locateType == RegionLocateType.BEFORE) { + if (!isEmptyStopRow && Bytes.compareTo(endKey, row) >= 0) { + LOG.debug("Lookup {} goes to primary meta", row); + return RegionInfo.DEFAULT_REPLICA_ID; + } + } else { + if (Bytes.compareTo(row, endKey) < 0) { + LOG.debug("Lookup {} goes to primary meta", row); + return RegionInfo.DEFAULT_REPLICA_ID; + } + } + + // Not in stale cache, return a random replica id. + return getRandomReplicaId(); + } + + // This class implements the Stoppable interface as chores needs a Stopable object, there is + // no-op on this Stoppable object currently. + @Override + public void stop(String why) { + isStopped = true; + } + + @Override + public boolean isStopped() { + return isStopped; + } + + private void cleanupReplicaReplicaStaleCache() { + long curTimeInMills = EnvironmentEdgeManager.currentTime(); + for (ConcurrentNavigableMap tableCache : staleCache.values()) { + Iterator> it = + tableCache.entrySet().iterator(); + while (it.hasNext()) { + Map.Entry entry = it.next(); + if (curTimeInMills - entry.getValue().getTimestamp() >= + STALE_CACHE_TIMEOUT_IN_MILLISECONDS) { + LOG.debug("clean entry {}, {} from stale cache", entry.getKey(), entry.getValue()); + it.remove(); + } + } + } + } + + private int refreshCatalogReplicaCount() { + int newNumOfReplicas = this.getNumOfReplicas.getAsInt(); + LOG.debug("Refreshed replica count {}", newNumOfReplicas); + // If the returned number of replicas is -1, it is caused by failure to fetch the + // replica count. Do not update the numOfReplicas in this case. + if (newNumOfReplicas == CatalogReplicaLoadBalanceSelector.UNINITIALIZED_NUM_OF_REPLICAS) { + LOG.error("Failed to fetch Table {}'s region replica count", tableName); + return this.numOfReplicas; + } + + int cachedNumOfReplicas = this.numOfReplicas; + if ((cachedNumOfReplicas == UNINITIALIZED_NUM_OF_REPLICAS) || + (cachedNumOfReplicas != newNumOfReplicas)) { + this.numOfReplicas = newNumOfReplicas; + } + return newNumOfReplicas; + } + + private ScheduledChore getCacheCleanupChore( + final CatalogReplicaLoadBalanceSimpleSelector selector) { + return new ScheduledChore("CleanupCatalogReplicaStaleCache", this, + STALE_CACHE_CLEAN_CHORE_INTERVAL_IN_MILLISECONDS) { + @Override + protected void chore() { + selector.cleanupReplicaReplicaStaleCache(); + } + }; + } + + private ScheduledChore getRefreshReplicaCountChore( + final CatalogReplicaLoadBalanceSimpleSelector selector) { + return new ScheduledChore("RefreshReplicaCountChore", this, + REFRESH_REPLICA_COUNT_CHORE_INTERVAL_IN_MILLISECONDS) { + @Override + protected void chore() { + selector.refreshCatalogReplicaCount(); + } + }; + } +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaMode.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaMode.java new file mode 100644 index 000000000000..40062e32e83c --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaMode.java @@ -0,0 +1,64 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import org.apache.yetus.audience.InterfaceAudience; + +/** + *

There are two modes with catalog replica support.

+ * + *
    + *
  1. HEDGED_READ - Client sends requests to the primary region first, within a + * configured amount of time, if there is no response coming back, + * client sends requests to all replica regions and takes the first + * response.
  2. + * + *
  3. LOAD_BALANCE - Client sends requests to replica regions in a round-robin mode, + * if results from replica regions are stale, next time, client sends requests for + * these stale locations to the primary region. In this mode, scan + * requests are load balanced across all replica regions.
  4. + *
+ */ +@InterfaceAudience.Private +enum CatalogReplicaMode { + NONE { + @Override + public String toString() { + return "None"; + } + }, + HEDGED_READ { + @Override + public String toString() { + return "HedgedRead"; + } + }, + LOAD_BALANCE { + @Override + public String toString() { + return "LoadBalance"; + } + }; + + public static CatalogReplicaMode fromString(final String value) { + for(CatalogReplicaMode mode : values()) { + if (mode.toString().equalsIgnoreCase(value)) { + return mode; + } + } + throw new IllegalArgumentException(); + } +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CheckAndMutate.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CheckAndMutate.java index f7d846b44c7a..b7f17f310fd8 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CheckAndMutate.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CheckAndMutate.java @@ -17,14 +17,7 @@ */ package org.apache.hadoop.hbase.client; -import java.util.Collections; -import java.util.List; -import java.util.NavigableMap; -import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.CellBuilder; -import org.apache.hadoop.hbase.CellBuilderType; import org.apache.hadoop.hbase.CompareOperator; -import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.io.TimeRange; import org.apache.hadoop.hbase.util.Bytes; @@ -60,7 +53,7 @@ */ @InterfaceAudience.Public @InterfaceStability.Evolving -public final class CheckAndMutate extends Mutation { +public final class CheckAndMutate implements Row { /** * A builder class for building a CheckAndMutate object. @@ -202,15 +195,15 @@ public CheckAndMutate build(Append append) { } /** - * @param mutation mutations to perform if check succeeds + * @param mutations mutations to perform if check succeeds * @return a CheckAndMutate object */ - public CheckAndMutate build(RowMutations mutation) { - preCheck(mutation); + public CheckAndMutate build(RowMutations mutations) { + preCheck(mutations); if (filter != null) { - return new CheckAndMutate(row, filter, timeRange, mutation); + return new CheckAndMutate(row, filter, timeRange, mutations); } else { - return new CheckAndMutate(row, family, qualifier, op, value, timeRange, mutation); + return new CheckAndMutate(row, family, qualifier, op, value, timeRange, mutations); } } } @@ -225,6 +218,7 @@ public static Builder newBuilder(byte[] row) { return new Builder(row); } + private final byte[] row; private final byte[] family; private final byte[] qualifier; private final CompareOperator op; @@ -235,7 +229,7 @@ public static Builder newBuilder(byte[] row) { private CheckAndMutate(byte[] row, byte[] family, byte[] qualifier,final CompareOperator op, byte[] value, TimeRange timeRange, Row action) { - super(row, HConstants.LATEST_TIMESTAMP, Collections.emptyNavigableMap()); + this.row = row; this.family = family; this.qualifier = qualifier; this.op = op; @@ -246,7 +240,7 @@ private CheckAndMutate(byte[] row, byte[] family, byte[] qualifier,final Compare } private CheckAndMutate(byte[] row, Filter filter, TimeRange timeRange, Row action) { - super(row, HConstants.LATEST_TIMESTAMP, Collections.emptyNavigableMap()); + this.row = row; this.family = null; this.qualifier = null; this.op = null; @@ -256,6 +250,14 @@ private CheckAndMutate(byte[] row, Filter filter, TimeRange timeRange, Row actio this.action = action; } + /** + * @return the row + */ + @Override + public byte[] getRow() { + return row; + } + /** * @return the family to check */ @@ -311,84 +313,4 @@ public TimeRange getTimeRange() { public Row getAction() { return action; } - - @Override - public NavigableMap> getFamilyCellMap() { - if (action instanceof Mutation) { - return ((Mutation) action).getFamilyCellMap(); - } - throw new UnsupportedOperationException(); - } - - @Override - public CellBuilder getCellBuilder(CellBuilderType cellBuilderType) { - if (action instanceof Mutation) { - return ((Mutation) action).getCellBuilder(); - } - throw new UnsupportedOperationException(); - } - - @Override - public long getTimestamp() { - if (action instanceof Mutation) { - return ((Mutation) action).getTimestamp(); - } - throw new UnsupportedOperationException(); - } - - @Override - public Mutation setTimestamp(long timestamp) { - if (action instanceof Mutation) { - return ((Mutation) action).setTimestamp(timestamp); - } - throw new UnsupportedOperationException(); - } - - @Override - public Durability getDurability() { - if (action instanceof Mutation) { - return ((Mutation) action).getDurability(); - } - throw new UnsupportedOperationException(); - } - - @Override - public Mutation setDurability(Durability d) { - if (action instanceof Mutation) { - return ((Mutation) action).setDurability(d); - } - throw new UnsupportedOperationException(); - } - - @Override - public byte[] getAttribute(String name) { - if (action instanceof Mutation) { - return ((Mutation) action).getAttribute(name); - } - throw new UnsupportedOperationException(); - } - - @Override - public OperationWithAttributes setAttribute(String name, byte[] value) { - if (action instanceof Mutation) { - return ((Mutation) action).setAttribute(name, value); - } - throw new UnsupportedOperationException(); - } - - @Override - public int getPriority() { - if (action instanceof Mutation) { - return ((Mutation) action).getPriority(); - } - return ((RowMutations) action).getMaxPriority(); - } - - @Override - public OperationWithAttributes setPriority(int priority) { - if (action instanceof Mutation) { - return ((Mutation) action).setPriority(priority); - } - throw new UnsupportedOperationException(); - } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java index 3889d32dda54..7afc3872b465 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java @@ -294,11 +294,6 @@ public static Map getDefaultValues() { DEFAULT_VALUES.put(BLOCKCACHE, String.valueOf(DEFAULT_BLOCKCACHE)); DEFAULT_VALUES.put(KEEP_DELETED_CELLS, String.valueOf(DEFAULT_KEEP_DELETED)); DEFAULT_VALUES.put(DATA_BLOCK_ENCODING, String.valueOf(DEFAULT_DATA_BLOCK_ENCODING)); - DEFAULT_VALUES.put(CACHE_DATA_ON_WRITE, String.valueOf(DEFAULT_CACHE_DATA_ON_WRITE)); - DEFAULT_VALUES.put(CACHE_INDEX_ON_WRITE, String.valueOf(DEFAULT_CACHE_INDEX_ON_WRITE)); - DEFAULT_VALUES.put(CACHE_BLOOMS_ON_WRITE, String.valueOf(DEFAULT_CACHE_BLOOMS_ON_WRITE)); - DEFAULT_VALUES.put(EVICT_BLOCKS_ON_CLOSE, String.valueOf(DEFAULT_EVICT_BLOCKS_ON_CLOSE)); - DEFAULT_VALUES.put(PREFETCH_BLOCKS_ON_OPEN, String.valueOf(DEFAULT_PREFETCH_BLOCKS_ON_OPEN)); // Do NOT add this key/value by default. NEW_VERSION_BEHAVIOR is NOT defined in hbase1 so // it is not possible to make an hbase1 HCD the same as an hbase2 HCD and so the replication // compare of schemas will fail. It is OK not adding the below to the initial map because of @@ -682,7 +677,7 @@ private ModifyableColumnFamilyDescriptor setValue(Bytes key, String value) { * @return this (for chained invocation) */ private ModifyableColumnFamilyDescriptor setValue(Bytes key, Bytes value) { - if (value == null) { + if (value == null || value.getLength() == 0) { values.remove(key); } else { values.put(key, value); @@ -1233,7 +1228,7 @@ public Map getConfiguration() { * @return this (for chained invocation) */ public ModifyableColumnFamilyDescriptor setConfiguration(String key, String value) { - if (value == null) { + if (value == null || value.length() == 0) { configuration.remove(key); } else { configuration.put(key, value); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionConfiguration.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionConfiguration.java index 9edf8c251ee5..19ca9adbf3f4 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionConfiguration.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionConfiguration.java @@ -15,8 +15,6 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; - /** * Configuration parameters for the connection. * Configuration is a heavy weight registry that does a lot of string operations and regex matching. @@ -125,7 +123,6 @@ public class ConnectionConfiguration { * This is for internal testing purpose (using the default value). * In real usage, we should read the configuration from the Configuration object. */ - @VisibleForTesting protected ConnectionConfiguration() { this.writeBufferSize = WRITE_BUFFER_SIZE_DEFAULT; this.writeBufferPeriodicFlushTimeoutMs = WRITE_BUFFER_PERIODIC_FLUSH_TIMEOUT_MS_DEFAULT; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java index 1228c7e592ea..5b8cb8463225 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java @@ -25,6 +25,7 @@ import java.io.IOException; import java.lang.reflect.UndeclaredThrowableException; import java.net.InetAddress; +import java.net.InetSocketAddress; import java.net.UnknownHostException; import java.util.Arrays; import java.util.List; @@ -128,32 +129,17 @@ public static void setServerSideHConnectionRetriesConfig(final Configuration c, } /** - * Return retires + 1. The returned value will be in range [1, Integer.MAX_VALUE]. + * Get a unique key for the rpc stub to the given server. */ - static int retries2Attempts(int retries) { - return Math.max(1, retries == Integer.MAX_VALUE ? Integer.MAX_VALUE : retries + 1); + static String getStubKey(String serviceName, ServerName serverName) { + return String.format("%s@%s", serviceName, serverName); } /** - * Get a unique key for the rpc stub to the given server. + * Return retires + 1. The returned value will be in range [1, Integer.MAX_VALUE]. */ - static String getStubKey(String serviceName, ServerName serverName, boolean hostnameCanChange) { - // Sometimes, servers go down and they come back up with the same hostname but a different - // IP address. Force a resolution of the rsHostname by trying to instantiate an - // InetSocketAddress, and this way we will rightfully get a new stubKey. - // Also, include the hostname in the key so as to take care of those cases where the - // DNS name is different but IP address remains the same. - String hostname = serverName.getHostname(); - int port = serverName.getPort(); - if (hostnameCanChange) { - try { - InetAddress ip = InetAddress.getByName(hostname); - return serviceName + "@" + hostname + "-" + ip.getHostAddress() + ":" + port; - } catch (UnknownHostException e) { - LOG.warn("Can not resolve " + hostname + ", please check your network", e); - } - } - return serviceName + "@" + hostname + ":" + port; + static int retries2Attempts(int retries) { + return Math.max(1, retries == Integer.MAX_VALUE ? Integer.MAX_VALUE : retries + 1); } static void checkHasFamilies(Mutation mutation) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java index d3b57fb461cf..0f04407ac3e3 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java @@ -74,7 +74,6 @@ public class Get extends Query implements Row { private int storeOffset = 0; private TimeRange tr = TimeRange.allTime(); private boolean checkExistenceOnly = false; - private boolean closestRowBefore = false; private Map> familyMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); /** @@ -199,11 +198,10 @@ public Get addColumn(byte [] family, byte [] qualifier) { * [minStamp, maxStamp). * @param minStamp minimum timestamp value, inclusive * @param maxStamp maximum timestamp value, exclusive - * @throws IOException * @return this for invocation chaining */ public Get setTimeRange(long minStamp, long maxStamp) throws IOException { - tr = new TimeRange(minStamp, maxStamp); + tr = TimeRange.between(minStamp, maxStamp); return this; } @@ -214,17 +212,17 @@ public Get setTimeRange(long minStamp, long maxStamp) throws IOException { */ public Get setTimestamp(long timestamp) { try { - tr = new TimeRange(timestamp, timestamp + 1); + tr = TimeRange.at(timestamp); } catch(Exception e) { // This should never happen, unless integer overflow or something extremely wrong... LOG.error("TimeRange failed, likely caused by integer overflow. ", e); throw e; } - return this; } - @Override public Get setColumnFamilyTimeRange(byte[] cf, long minStamp, long maxStamp) { + @Override + public Get setColumnFamilyTimeRange(byte[] cf, long minStamp, long maxStamp) { return (Get) super.setColumnFamilyTimeRange(cf, minStamp, maxStamp); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Increment.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Increment.java index df448eb91b6a..bd824d4a855f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Increment.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Increment.java @@ -148,9 +148,8 @@ public TimeRange getTimeRange() { * @throws IOException if invalid time range * @return this */ - public Increment setTimeRange(long minStamp, long maxStamp) - throws IOException { - tr = new TimeRange(minStamp, maxStamp); + public Increment setTimeRange(long minStamp, long maxStamp) throws IOException { + tr = TimeRange.between(minStamp, maxStamp); return this; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterRegistry.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterRegistry.java index 2a7ae16df47a..0975289d5164 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterRegistry.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterRegistry.java @@ -49,7 +49,6 @@ import org.apache.hadoop.hbase.util.DNS.ServerType; import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.com.google.common.base.Strings; import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableMap; @@ -151,7 +150,6 @@ void populateMasterStubs(Set masters) throws IOException { *

* Will be called in {@code HBaseTestingUtility}. */ - @VisibleForTesting public static String getMasterAddr(Configuration conf) throws UnknownHostException { String masterAddrFromConf = conf.get(MASTER_ADDRS_KEY); if (!Strings.isNullOrEmpty(masterAddrFromConf)) { @@ -325,14 +323,12 @@ private static List transformServerNames(GetMastersResponse resp) { } CompletableFuture> getMasters() { - System.out.println("getMasters()"); return this . call((c, s, d) -> s.getMasters( c, GetMastersRequest.getDefaultInstance(), d), r -> r.getMasterServersCount() != 0, "getMasters()").thenApply(MasterRegistry::transformServerNames); } - @VisibleForTesting Set getParsedMasterServers() { return masterAddr2Stub.keySet(); } @@ -346,4 +342,4 @@ public void close() { rpcClient.close(); } } -} \ No newline at end of file +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetricsConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetricsConnection.java index e9f4c61f5a20..9db8b6090e10 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetricsConnection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetricsConnection.java @@ -26,7 +26,6 @@ import com.codahale.metrics.MetricRegistry; import com.codahale.metrics.RatioGauge; import com.codahale.metrics.Timer; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; @@ -66,6 +65,8 @@ public class MetricsConnection implements StatisticTrackable { private static final String HEAP_BASE = "heapOccupancy_"; private static final String CACHE_BASE = "cacheDroppingExceptions_"; private static final String UNKNOWN_EXCEPTION = "UnknownException"; + private static final String NS_LOOKUPS = "nsLookups"; + private static final String NS_LOOKUPS_FAILED = "nsLookupsFailed"; private static final String CLIENT_SVC = ClientService.getDescriptor().getName(); /** A container class for collecting details about the RPC call as it percolates. */ @@ -126,12 +127,11 @@ public void setNumActionsPerServer(int numActionsPerServer) { } } - @VisibleForTesting protected static final class CallTracker { private final String name; - @VisibleForTesting final Timer callTimer; - @VisibleForTesting final Histogram reqHist; - @VisibleForTesting final Histogram respHist; + final Timer callTimer; + final Histogram reqHist; + final Histogram respHist; private CallTracker(MetricRegistry registry, String name, String subName, String scope) { StringBuilder sb = new StringBuilder(CLIENT_SVC).append("_").append(name); @@ -182,7 +182,6 @@ public void update(RegionLoadStats regionStatistics) { } } - @VisibleForTesting protected static class RunnerStats { final Counter normalRunners; final Counter delayRunners; @@ -210,7 +209,6 @@ public void updateDelayInterval(long interval) { } } - @VisibleForTesting protected ConcurrentHashMap> serverStats = new ConcurrentHashMap<>(); @@ -275,36 +273,38 @@ private static interface NewMetric { // static metrics - @VisibleForTesting protected final Counter metaCacheHits; - @VisibleForTesting protected final Counter metaCacheMisses; - @VisibleForTesting protected final CallTracker getTracker; - @VisibleForTesting protected final CallTracker scanTracker; - @VisibleForTesting protected final CallTracker appendTracker; - @VisibleForTesting protected final CallTracker deleteTracker; - @VisibleForTesting protected final CallTracker incrementTracker; - @VisibleForTesting protected final CallTracker putTracker; - @VisibleForTesting protected final CallTracker multiTracker; - @VisibleForTesting protected final RunnerStats runnerStats; - @VisibleForTesting protected final Counter metaCacheNumClearServer; - @VisibleForTesting protected final Counter metaCacheNumClearRegion; - @VisibleForTesting protected final Counter hedgedReadOps; - @VisibleForTesting protected final Counter hedgedReadWin; - @VisibleForTesting protected final Histogram concurrentCallsPerServerHist; - @VisibleForTesting protected final Histogram numActionsPerServerHist; + protected final Counter metaCacheHits; + protected final Counter metaCacheMisses; + protected final CallTracker getTracker; + protected final CallTracker scanTracker; + protected final CallTracker appendTracker; + protected final CallTracker deleteTracker; + protected final CallTracker incrementTracker; + protected final CallTracker putTracker; + protected final CallTracker multiTracker; + protected final RunnerStats runnerStats; + protected final Counter metaCacheNumClearServer; + protected final Counter metaCacheNumClearRegion; + protected final Counter hedgedReadOps; + protected final Counter hedgedReadWin; + protected final Histogram concurrentCallsPerServerHist; + protected final Histogram numActionsPerServerHist; + protected final Counter nsLookups; + protected final Counter nsLookupsFailed; // dynamic metrics // These maps are used to cache references to the metric instances that are managed by the // registry. I don't think their use perfectly removes redundant allocations, but it's // a big improvement over calling registry.newMetric each time. - @VisibleForTesting protected final ConcurrentMap rpcTimers = + protected final ConcurrentMap rpcTimers = new ConcurrentHashMap<>(CAPACITY, LOAD_FACTOR, CONCURRENCY_LEVEL); - @VisibleForTesting protected final ConcurrentMap rpcHistograms = + protected final ConcurrentMap rpcHistograms = new ConcurrentHashMap<>(CAPACITY * 2 /* tracking both request and response sizes */, LOAD_FACTOR, CONCURRENCY_LEVEL); private final ConcurrentMap cacheDroppingExceptions = new ConcurrentHashMap<>(CAPACITY, LOAD_FACTOR, CONCURRENCY_LEVEL); - @VisibleForTesting protected final ConcurrentMap rpcCounters = + protected final ConcurrentMap rpcCounters = new ConcurrentHashMap<>(CAPACITY, LOAD_FACTOR, CONCURRENCY_LEVEL); MetricsConnection(String scope, Supplier batchPool, @@ -353,22 +353,21 @@ protected Ratio getRatio() { "concurrentCallsPerServer", scope)); this.numActionsPerServerHist = registry.histogram(name(MetricsConnection.class, "numActionsPerServer", scope)); + this.nsLookups = registry.counter(name(this.getClass(), NS_LOOKUPS, scope)); + this.nsLookupsFailed = registry.counter(name(this.getClass(), NS_LOOKUPS_FAILED, scope)); this.reporter = JmxReporter.forRegistry(this.registry).build(); this.reporter.start(); } - @VisibleForTesting final String getExecutorPoolName() { return name(getClass(), "executorPoolActiveThreads", scope); } - @VisibleForTesting final String getMetaPoolName() { return name(getClass(), "metaPoolActiveThreads", scope); } - @VisibleForTesting MetricRegistry getMetricRegistry() { return registry; } @@ -524,4 +523,12 @@ public void incrCacheDroppingExceptions(Object exception) { (exception == null? UNKNOWN_EXCEPTION : exception.getClass().getSimpleName()), cacheDroppingExceptions, counterFactory).inc(); } + + public void incrNsLookups() { + this.nsLookups.inc(); + } + + public void incrNsLookupsFailed() { + this.nsLookupsFailed.inc(); + } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java index 5d48991cf205..028608db614d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java @@ -254,8 +254,11 @@ public MutableRegionInfo setSplit(boolean split) { /** * @return True if this region is offline. + * @deprecated since 3.0.0 and will be removed in 4.0.0 + * @see HBASE-25210 */ @Override + @Deprecated public boolean isOffline() { return this.offLine; } @@ -273,8 +276,11 @@ public MutableRegionInfo setOffline(boolean offLine) { /** * @return True if this is a split parent region. + * @deprecated since 3.0.0 and will be removed in 4.0.0, Use {@link #isSplit()} instead. + * @see HBASE-25210 */ @Override + @Deprecated public boolean isSplitParent() { if (!isSplit()) { return false; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java index 6ade9eb8f8e5..ab6fc9475142 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java @@ -43,7 +43,6 @@ import org.apache.hadoop.hbase.IndividualBytesFieldCell; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.PrivateCellUtil; -import org.apache.hadoop.hbase.RawCell; import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.io.HeapSize; @@ -1000,25 +999,16 @@ public byte getTypeByte() { @Override public Optional getTag(byte type) { - if (cell instanceof RawCell) { - return ((RawCell) cell).getTag(type); - } return PrivateCellUtil.getTag(cell, type); } @Override public Iterator getTags() { - if (cell instanceof RawCell) { - return ((RawCell) cell).getTags(); - } return PrivateCellUtil.tagsIterator(cell); } @Override public byte[] cloneTags() { - if (cell instanceof RawCell) { - return ((RawCell) cell).cloneTags(); - } return PrivateCellUtil.cloneTags(cell); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java index 1d990d1bc942..919513ceb622 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java @@ -230,7 +230,7 @@ public boolean doLoadColumnFamiliesOnDemand() { */ public Query setColumnFamilyTimeRange(byte[] cf, long minStamp, long maxStamp) { - colFamTimeRangeMap.put(cf, new TimeRange(minStamp, maxStamp)); + colFamTimeRangeMap.put(cf, TimeRange.between(minStamp, maxStamp)); return this; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java index 62879521afd1..38bdddef1e5e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java @@ -100,7 +100,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.com.google.protobuf.Message; import org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback; @@ -2385,56 +2384,60 @@ this. c * @param regionNameOrEncodedRegionName region name or encoded region name * @return region location, wrapped by a {@link CompletableFuture} */ - @VisibleForTesting CompletableFuture getRegionLocation(byte[] regionNameOrEncodedRegionName) { if (regionNameOrEncodedRegionName == null) { return failedFuture(new IllegalArgumentException("Passed region name can't be null")); } - try { - CompletableFuture> future; - if (RegionInfo.isEncodedRegionName(regionNameOrEncodedRegionName)) { - String encodedName = Bytes.toString(regionNameOrEncodedRegionName); - if (encodedName.length() < RegionInfo.MD5_HEX_LENGTH) { - // old format encodedName, should be meta region - future = connection.registry.getMetaRegionLocations() - .thenApply(locs -> Stream.of(locs.getRegionLocations()) - .filter(loc -> loc.getRegion().getEncodedName().equals(encodedName)).findFirst()); - } else { - future = ClientMetaTableAccessor.getRegionLocationWithEncodedName(metaTable, - regionNameOrEncodedRegionName); - } + + CompletableFuture> future; + if (RegionInfo.isEncodedRegionName(regionNameOrEncodedRegionName)) { + String encodedName = Bytes.toString(regionNameOrEncodedRegionName); + if (encodedName.length() < RegionInfo.MD5_HEX_LENGTH) { + // old format encodedName, should be meta region + future = connection.registry.getMetaRegionLocations() + .thenApply(locs -> Stream.of(locs.getRegionLocations()) + .filter(loc -> loc.getRegion().getEncodedName().equals(encodedName)).findFirst()); } else { - RegionInfo regionInfo = - CatalogFamilyFormat.parseRegionInfoFromRegionName(regionNameOrEncodedRegionName); - if (regionInfo.isMetaRegion()) { - future = connection.registry.getMetaRegionLocations() - .thenApply(locs -> Stream.of(locs.getRegionLocations()) - .filter(loc -> loc.getRegion().getReplicaId() == regionInfo.getReplicaId()) - .findFirst()); - } else { - future = - ClientMetaTableAccessor.getRegionLocation(metaTable, regionNameOrEncodedRegionName); - } + future = ClientMetaTableAccessor.getRegionLocationWithEncodedName(metaTable, + regionNameOrEncodedRegionName); + } + } else { + // Not all regionNameOrEncodedRegionName here is going to be a valid region name, + // it needs to throw out IllegalArgumentException in case tableName is passed in. + RegionInfo regionInfo; + try { + regionInfo = CatalogFamilyFormat.parseRegionInfoFromRegionName( + regionNameOrEncodedRegionName); + } catch (IOException ioe) { + return failedFuture(new IllegalArgumentException(ioe.getMessage())); } - CompletableFuture returnedFuture = new CompletableFuture<>(); - addListener(future, (location, err) -> { - if (err != null) { - returnedFuture.completeExceptionally(err); - return; - } - if (!location.isPresent() || location.get().getRegion() == null) { - returnedFuture.completeExceptionally( - new UnknownRegionException("Invalid region name or encoded region name: " + - Bytes.toStringBinary(regionNameOrEncodedRegionName))); - } else { - returnedFuture.complete(location.get()); - } - }); - return returnedFuture; - } catch (IOException e) { - return failedFuture(e); + if (regionInfo.isMetaRegion()) { + future = connection.registry.getMetaRegionLocations() + .thenApply(locs -> Stream.of(locs.getRegionLocations()) + .filter(loc -> loc.getRegion().getReplicaId() == regionInfo.getReplicaId()) + .findFirst()); + } else { + future = + ClientMetaTableAccessor.getRegionLocation(metaTable, regionNameOrEncodedRegionName); + } } + + CompletableFuture returnedFuture = new CompletableFuture<>(); + addListener(future, (location, err) -> { + if (err != null) { + returnedFuture.completeExceptionally(err); + return; + } + if (!location.isPresent() || location.get().getRegion() == null) { + returnedFuture.completeExceptionally( + new UnknownRegionException("Invalid region name or encoded region name: " + + Bytes.toStringBinary(regionNameOrEncodedRegionName))); + } else { + returnedFuture.complete(location.get()); + } + }); + return returnedFuture; } /** diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncTableImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncTableImpl.java index 95a569f8585c..3cffad8b44d4 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncTableImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncTableImpl.java @@ -513,17 +513,17 @@ public void run(MultiResponse resp) { } @Override - public CompletableFuture mutateRow(RowMutations mutation) { - return this. newCaller(mutation.getRow(), mutation.getMaxPriority(), writeRpcTimeoutNs) - .action((controller, loc, stub) -> - this. mutateRow(controller, loc, stub, mutation, + public CompletableFuture mutateRow(RowMutations mutations) { + return this. newCaller(mutations.getRow(), mutations.getMaxPriority(), + writeRpcTimeoutNs).action((controller, loc, stub) -> + this. mutateRow(controller, loc, stub, mutations, (rn, rm) -> { RegionAction.Builder regionMutationBuilder = RequestConverter .buildRegionAction(rn, rm); regionMutationBuilder.setAtomic(true); return MultiRequest.newBuilder().addRegionAction(regionMutationBuilder.build()) .build(); - }, resp -> null)) + }, resp -> resp)) .call(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java index 7a3a9af227f7..b6bdd0103de8 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java @@ -71,7 +71,9 @@ public interface RegionInfo extends Comparable { */ @Deprecated @InterfaceAudience.Private - RegionInfo UNDEFINED = RegionInfoBuilder.newBuilder(TableName.valueOf("__UNDEFINED__")).build(); + // Not using RegionInfoBuilder intentionally to avoid a static loading deadlock: HBASE-24896 + RegionInfo UNDEFINED = new MutableRegionInfo(0, TableName.valueOf("__UNDEFINED__"), + RegionInfo.DEFAULT_REPLICA_ID); /** * Separator used to demarcate the encodedName in a region name @@ -216,12 +218,18 @@ public interface RegionInfo extends Comparable { /** * @return True if this region is offline. + * @deprecated since 3.0.0 and will be removed in 4.0.0 + * @see HBASE-25210 */ + @Deprecated boolean isOffline(); /** * @return True if this is a split parent region. + * @deprecated since 3.0.0 and will be removed in 4.0.0, Use {@link #isSplit()} instead. + * @see HBASE-25210 */ + @Deprecated boolean isSplitParent(); /** @@ -355,7 +363,23 @@ static byte[] getStartKey(final byte[] regionName) throws IOException { @InterfaceAudience.Private // For use by internals only. public static boolean isEncodedRegionName(byte[] regionName) { // If not parseable as region name, presume encoded. TODO: add stringency; e.g. if hex. - return parseRegionNameOrReturnNull(regionName) == null && regionName.length <= MD5_HEX_LENGTH; + if (parseRegionNameOrReturnNull(regionName) == null) { + if (regionName.length > MD5_HEX_LENGTH) { + return false; + } else if (regionName.length == MD5_HEX_LENGTH) { + return true; + } else { + String encodedName = Bytes.toString(regionName); + try { + Integer.parseInt(encodedName); + // If this is a valid integer, it could be hbase:meta's encoded region name. + return true; + } catch(NumberFormatException er) { + return false; + } + } + } + return false; } /** @@ -588,8 +612,9 @@ static String prettyPrint(final String encodedRegionName) { * @return the MOB {@link RegionInfo}. */ static RegionInfo createMobRegionInfo(TableName tableName) { - return RegionInfoBuilder.newBuilder(tableName).setStartKey(Bytes.toBytes(".mob")). - setRegionId(0).build(); + // Skipping reference to RegionInfoBuilder in this class. + return new MutableRegionInfo(tableName, Bytes.toBytes(".mob"), + HConstants.EMPTY_END_ROW, false, 0, DEFAULT_REPLICA_ID, false); } /** diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoBuilder.java index a9e7806ad9d3..cbf9e4a3c219 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoBuilder.java @@ -100,6 +100,7 @@ public RegionInfoBuilder setSplit(boolean split) { return this; } + @Deprecated public RegionInfoBuilder setOffline(boolean offLine) { this.offLine = offLine; return this; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionLocator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionLocator.java index c7440c670403..7ea6e4ada36c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionLocator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionLocator.java @@ -37,6 +37,18 @@ */ @InterfaceAudience.Public public interface RegionLocator extends Closeable { + + /** Configuration for Region Locator's mode when meta replica is configured. + * Valid values are: HedgedRead, LoadBalance, None + */ + String LOCATOR_META_REPLICAS_MODE = "hbase.locator.meta.replicas.mode"; + + /** Configuration for meta replica selector when Region Locator's LoadBalance mode is configured. + * The default value is org.apache.hadoop.hbase.client.CatalogReplicaLoadBalanceSimpleSelector. + */ + String LOCATOR_META_REPLICAS_MODE_LOADBALANCE_SELECTOR = + "hbase.locator.meta.replicas.mode.loadbalance.selector"; + /** * Finds the region on which the given row is being served. Does not reload the cache. * @param row Row to find. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowMutations.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowMutations.java index 3b0f94b9dbce..0f8b429959de 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowMutations.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowMutations.java @@ -29,7 +29,6 @@ /** * Performs multiple mutations atomically on a single row. - * Currently {@link Put} and {@link Delete} are supported. * * The mutations are performed in the order in which they * were added. @@ -75,8 +74,6 @@ public RowMutations(byte [] row, int initialCapacity) { } /** - * Currently only supports {@link Put} and {@link Delete} mutations. - * * @param mutation The data to send. * @throws IOException if the row of added mutation doesn't match the original row */ @@ -85,15 +82,13 @@ public RowMutations add(Mutation mutation) throws IOException { } /** - * Currently only supports {@link Put} and {@link Delete} mutations. - * * @param mutations The data to send. * @throws IOException if the row of added mutation doesn't match the original row */ public RowMutations add(List mutations) throws IOException { for (Mutation mutation : mutations) { if (!Bytes.equals(row, mutation.getRow())) { - throw new WrongRowIOException("The row in the recently added Put/Delete <" + + throw new WrongRowIOException("The row in the recently added Mutation <" + Bytes.toStringBinary(mutation.getRow()) + "> doesn't match the original one <" + Bytes.toStringBinary(this.row) + ">"); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java index d515c550f0e9..708d6c735694 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java @@ -316,7 +316,7 @@ public Scan addColumn(byte [] family, byte [] qualifier) { * @return this */ public Scan setTimeRange(long minStamp, long maxStamp) throws IOException { - tr = new TimeRange(minStamp, maxStamp); + tr = TimeRange.between(minStamp, maxStamp); return this; } @@ -350,7 +350,7 @@ public Scan setTimeStamp(long timestamp) */ public Scan setTimestamp(long timestamp) { try { - tr = new TimeRange(timestamp, timestamp + 1); + tr = TimeRange.at(timestamp); } catch(Exception e) { // This should never happen, unless integer overflow or something extremely wrong... LOG.error("TimeRange failed, likely caused by integer overflow. ", e); @@ -383,6 +383,9 @@ public Scan withStartRow(byte[] startRow) { *

* If the specified row does not exist, or the {@code inclusive} is {@code false}, the Scanner * will start from the next closest row after the specified row. + *

+ * Note: When use {@link #setRowPrefixFilter(byte[])}, the result might be unexpected. + *

* @param startRow row to start scanner at or after * @param inclusive whether we should include the start row when scan * @return this @@ -447,7 +450,13 @@ public Scan withStopRow(byte[] stopRow, boolean inclusive) { * after this method will yield undefined results.

* @param rowPrefix the prefix all rows must start with. (Set null to remove the filter.) * @return this + * @deprecated since 3.0.0. The scan result might be unexpected in some cases. + * e.g. startRow : "112" and rowPrefixFilter : "11" + * The Result of this scan might contains : "111" + * This method implements the filter by setting startRow and stopRow, + * but does not take care of the scenario where startRow has been set. */ + @Deprecated public Scan setRowPrefixFilter(byte[] rowPrefix) { if (rowPrefix == null) { withStartRow(HConstants.EMPTY_START_ROW); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SimpleRequestController.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SimpleRequestController.java index d87014428c3b..1a184da86e45 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SimpleRequestController.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SimpleRequestController.java @@ -18,7 +18,6 @@ */ package org.apache.hadoop.hbase.client; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import java.io.InterruptedIOException; import java.util.ArrayList; import java.util.Collection; @@ -63,7 +62,6 @@ class SimpleRequestController implements RequestController { /** * Default value of {@link #HBASE_CLIENT_MAX_PERREQUEST_HEAPSIZE}. */ - @VisibleForTesting static final long DEFAULT_HBASE_CLIENT_MAX_PERREQUEST_HEAPSIZE = 4194304; /** @@ -73,7 +71,6 @@ class SimpleRequestController implements RequestController { /** * Default value of {@link #HBASE_CLIENT_MAX_PERREQUEST_ROWS}. */ - @VisibleForTesting static final long DEFAULT_HBASE_CLIENT_MAX_PERREQUEST_ROWS = 2048; /** @@ -83,14 +80,10 @@ class SimpleRequestController implements RequestController { /** * Default value of {@link #HBASE_CLIENT_MAX_SUBMIT_HEAPSIZE}. */ - @VisibleForTesting static final long DEFAULT_HBASE_CLIENT_MAX_SUBMIT_HEAPSIZE = DEFAULT_HBASE_CLIENT_MAX_PERREQUEST_HEAPSIZE; - @VisibleForTesting final AtomicLong tasksInProgress = new AtomicLong(0); - @VisibleForTesting final ConcurrentMap taskCounterPerRegion = new ConcurrentSkipListMap<>(Bytes.BYTES_COMPARATOR); - @VisibleForTesting final ConcurrentMap taskCounterPerServer = new ConcurrentHashMap<>(); /** * The number of tasks simultaneously executed on the cluster. @@ -112,13 +105,11 @@ class SimpleRequestController implements RequestController { * don't start a set of operations on a region before the previous one is * done. As well, this limits the pressure we put on the region server. */ - @VisibleForTesting final int maxConcurrentTasksPerRegion; /** * The number of task simultaneously executed on a single region server. */ - @VisibleForTesting final int maxConcurrentTasksPerServer; private final int thresholdToLogUndoneTaskDetails; public static final String THRESHOLD_TO_LOG_UNDONE_TASK_DETAILS = @@ -171,7 +162,6 @@ private static long checkAndGet(Configuration conf, String key, long defaultValu return value; } - @VisibleForTesting static Checker newChecker(List checkers) { return new Checker() { private boolean isEnd = false; @@ -331,7 +321,6 @@ public void waitForFreeSlot(long id, int periodToTrigger, Consumer trigger * limit the heapsize of total submitted data. Reduce the limit of heapsize * for submitting quickly if there is no running task. */ - @VisibleForTesting static class SubmittedSizeChecker implements RowChecker { private final long maxHeapSizeSubmit; @@ -365,7 +354,6 @@ public void reset() { /** * limit the max number of tasks in an AsyncProcess. */ - @VisibleForTesting static class TaskCountChecker implements RowChecker { private static final long MAX_WAITING_TIME = 1000; //ms @@ -475,7 +463,6 @@ public void notifyFinal(ReturnCode code, HRegionLocation loc, long heapSizeOfRow /** * limit the number of rows for each request. */ - @VisibleForTesting static class RequestRowsChecker implements RowChecker { private final long maxRowsPerRequest; @@ -514,7 +501,6 @@ public void notifyFinal(ReturnCode code, HRegionLocation loc, long heapSizeOfRow /** * limit the heap size for each request. */ - @VisibleForTesting static class RequestHeapSizeChecker implements RowChecker { private final long maxHeapSizePerRequest; @@ -554,7 +540,6 @@ public void notifyFinal(ReturnCode code, HRegionLocation loc, long heapSizeOfRow /** * Provide a way to control the flow of rows iteration. */ - @VisibleForTesting interface RowChecker { ReturnCode canTakeOperation(HRegionLocation loc, long heapSizeOfRow); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java index eb98bc9c1f3c..53da0cfb9120 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java @@ -459,9 +459,10 @@ default List checkAndMutate(List checkAndM * {@link Put} and {@link Delete} are supported. * * @param rm object that specifies the set of mutations to perform atomically - * @throws IOException + * @return results of Increment/Append operations + * @throws IOException if a remote or network exception occurs. */ - default void mutateRow(final RowMutations rm) throws IOException { + default Result mutateRow(final RowMutations rm) throws IOException { throw new NotImplementedException("Add an implementation!"); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptor.java index a4523872c9c5..1440c28787d0 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptor.java @@ -177,13 +177,6 @@ public interface TableDescriptor { */ TableName getTableName(); - /** - * @deprecated since 2.0.0 and will be removed in 3.0.0. - * @see HBASE-15583 - */ - @Deprecated - String getOwnerString(); - /** * Get the region server group this table belongs to. The regions of this table will be placed * only on the region servers within this group. If not present, will be placed on diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java index 1328f7d017e2..2581ccea758b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java @@ -41,9 +41,10 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.exceptions.DeserializationException; +import org.apache.hadoop.hbase.exceptions.HBaseException; import org.apache.hadoop.hbase.rsgroup.RSGroupInfo; -import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.PrettyPrinter; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -71,12 +72,6 @@ public class TableDescriptorBuilder { private static final Bytes MAX_FILESIZE_KEY = new Bytes(Bytes.toBytes(MAX_FILESIZE)); - @InterfaceAudience.Private - public static final String OWNER = "OWNER"; - @InterfaceAudience.Private - public static final Bytes OWNER_KEY - = new Bytes(Bytes.toBytes(OWNER)); - /** * Used by rest interface to access this metadata attribute * which denotes if the table is Read Only. @@ -253,6 +248,16 @@ public class TableDescriptorBuilder { RESERVED_KEYWORDS.add(IS_META_KEY); } + public static PrettyPrinter.Unit getUnit(String key) { + switch (key) { + case MAX_FILESIZE: + case MEMSTORE_FLUSHSIZE: + return PrettyPrinter.Unit.BYTE; + default: + return PrettyPrinter.Unit.NONE; + } + } + /** * @deprecated namespace table has been folded into the ns family in meta table, do not use this * any more. @@ -465,11 +470,22 @@ public TableDescriptorBuilder setMaxFileSize(long maxFileSize) { return this; } + public TableDescriptorBuilder setMaxFileSize(String maxFileSize) throws HBaseException { + desc.setMaxFileSize(maxFileSize); + return this; + } + public TableDescriptorBuilder setMemStoreFlushSize(long memstoreFlushSize) { desc.setMemStoreFlushSize(memstoreFlushSize); return this; } + public TableDescriptorBuilder setMemStoreFlushSize(String memStoreFlushSize) + throws HBaseException { + desc.setMemStoreFlushSize(memStoreFlushSize); + return this; + } + public TableDescriptorBuilder setNormalizerTargetRegionCount(final int regionCount) { desc.setNormalizerTargetRegionCount(regionCount); return this; @@ -485,26 +501,6 @@ public TableDescriptorBuilder setNormalizationEnabled(final boolean isEnable) { return this; } - /** - * @deprecated since 2.0.0 and will be removed in 3.0.0. - * @see HBASE-15583 - */ - @Deprecated - public TableDescriptorBuilder setOwner(User owner) { - desc.setOwner(owner); - return this; - } - - /** - * @deprecated since 2.0.0 and will be removed in 3.0.0. - * @see HBASE-15583 - */ - @Deprecated - public TableDescriptorBuilder setOwnerString(String ownerString) { - desc.setOwnerString(ownerString); - return this; - } - public TableDescriptorBuilder setPriority(int priority) { desc.setPriority(priority); return this; @@ -568,7 +564,7 @@ public TableDescriptorBuilder setReplicationScope(int scope) { } public TableDescriptorBuilder setRegionServerGroup(String group) { - desc.setValue(RSGROUP_KEY, new Bytes(Bytes.toBytes(group))); + desc.setValue(RSGROUP_KEY, group); return this; } @@ -705,7 +701,7 @@ public ModifyableTableDescriptor setValue(String key, String value) { toBytesOrNull(value, Bytes::toBytes)); } - /* + /** * @param key The key. * @param value The value. If null, removes the setting. */ @@ -714,14 +710,14 @@ private ModifyableTableDescriptor setValue(final Bytes key, return setValue(key, toBytesOrNull(value, Bytes::toBytes)); } - /* + /** * Setter for storing metadata as a (key, value) pair in {@link #values} map * * @param key The key. * @param value The value. If null, removes the setting. */ public ModifyableTableDescriptor setValue(final Bytes key, final Bytes value) { - if (value == null) { + if (value == null || value.getLength() == 0) { values.remove(key); } else { values.put(key, value); @@ -1009,6 +1005,11 @@ public ModifyableTableDescriptor setMaxFileSize(long maxFileSize) { return setValue(MAX_FILESIZE_KEY, Long.toString(maxFileSize)); } + public ModifyableTableDescriptor setMaxFileSize(String maxFileSize) throws HBaseException { + return setMaxFileSize(Long.parseLong(PrettyPrinter. + valueOf(maxFileSize, PrettyPrinter.Unit.BYTE))); + } + /** * Returns the size of the memstore after which a flush to filesystem is * triggered. @@ -1034,6 +1035,12 @@ public ModifyableTableDescriptor setMemStoreFlushSize(long memstoreFlushSize) { return setValue(MEMSTORE_FLUSHSIZE_KEY, Long.toString(memstoreFlushSize)); } + public ModifyableTableDescriptor setMemStoreFlushSize(String memStoreFlushSize) + throws HBaseException { + return setMemStoreFlushSize(Long.parseLong(PrettyPrinter.valueOf(memStoreFlushSize, + PrettyPrinter.Unit.BYTE))); + } + /** * This sets the class associated with the flush policy which determines * determines the stores need to be flushed when flushing a region. The @@ -1196,7 +1203,7 @@ private StringBuilder getValues(boolean printDefaults) { printCommaForAttr = true; s.append(key); s.append(" => "); - s.append('\'').append(value).append('\''); + s.append('\'').append(PrettyPrinter.format(value, getUnit(key))).append('\''); } if (!userKeys.isEmpty()) { @@ -1216,7 +1223,7 @@ private StringBuilder getValues(boolean printDefaults) { printCommaForCfg = true; s.append('\'').append(key).append('\''); s.append(" => "); - s.append('\'').append(value).append('\''); + s.append('\'').append(PrettyPrinter.format(value, getUnit(key))).append('\''); } s.append("}"); } @@ -1550,38 +1557,6 @@ public void removeCoprocessor(String className) { } } - /** - * @deprecated since 2.0.0 and will be removed in 3.0.0. - * @see HBASE-15583 - */ - @Deprecated - public ModifyableTableDescriptor setOwner(User owner) { - return setOwnerString(owner != null ? owner.getShortName() : null); - } - - /** - * @deprecated since 2.0.0 and will be removed in 3.0.0. - * @see HBASE-15583 - */ - // used by admin.rb:alter(table_name,*args) to update owner. - @Deprecated - public ModifyableTableDescriptor setOwnerString(String ownerString) { - return setValue(OWNER_KEY, ownerString); - } - - /** - * @deprecated since 2.0.0 and will be removed in 3.0.0. - * @see HBASE-15583 - */ - @Override - @Deprecated - public String getOwnerString() { - // Note that every table should have an owner (i.e. should have OWNER_KEY set). - // hbase:meta should return system user as owner, not null (see - // MasterFileSystem.java:bootstrap()). - return getOrDefault(OWNER_KEY, Function.identity(), null); - } - /** * @return the bytes in pb format */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableOverAsyncTable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableOverAsyncTable.java index 8639282d0a7d..1260f313cf52 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableOverAsyncTable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableOverAsyncTable.java @@ -311,8 +311,8 @@ public List checkAndMutate(List checkAndMu } @Override - public void mutateRow(RowMutations rm) throws IOException { - FutureUtils.get(table.mutateRow(rm)); + public Result mutateRow(RowMutations rm) throws IOException { + return FutureUtils.get(table.mutateRow(rm)); } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistry.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistry.java index 42a418859f18..4b31c7a6c8a6 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistry.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistry.java @@ -24,6 +24,7 @@ import static org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil.lengthOfPBMagic; import static org.apache.hadoop.hbase.util.FutureUtils.addListener; import static org.apache.hadoop.hbase.zookeeper.ZKMetadata.removeMetaData; + import java.io.IOException; import java.util.List; import java.util.concurrent.CompletableFuture; @@ -42,7 +43,7 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; + import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos; @@ -96,7 +97,6 @@ public CompletableFuture getClusterId() { return getAndConvert(znodePaths.clusterIdZNode, ZKConnectionRegistry::getClusterId); } - @VisibleForTesting ReadOnlyZKClient getZKClient() { return zk; } @@ -141,7 +141,7 @@ private void getMetaRegionLocation(CompletableFuture future, HRegionLocation[] locs = new HRegionLocation[metaReplicaZNodes.size()]; MutableInt remaining = new MutableInt(locs.length); for (String metaReplicaZNode : metaReplicaZNodes) { - int replicaId = znodePaths.getMetaReplicaIdFromZnode(metaReplicaZNode); + int replicaId = znodePaths.getMetaReplicaIdFromZNode(metaReplicaZNode); String path = ZNodePaths.joinZNode(znodePaths.baseZNode, metaReplicaZNode); if (replicaId == DEFAULT_REPLICA_ID) { addListener(getAndConvert(path, ZKConnectionRegistry::getMetaProto), (proto, error) -> { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java index c5dcd762e96f..05343eae4ccd 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java @@ -27,7 +27,6 @@ import java.util.Map; import java.util.Set; import java.util.stream.Collectors; - import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CompoundConfiguration; @@ -40,12 +39,12 @@ import org.apache.hadoop.hbase.replication.ReplicationPeerDescription; import org.apache.hadoop.hbase.replication.SyncReplicationState; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hbase.thirdparty.com.google.common.base.Splitter; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; import org.slf4j.Logger; import org.slf4j.LoggerFactory; - +import org.apache.hbase.thirdparty.com.google.common.base.Splitter; +import org.apache.hbase.thirdparty.com.google.common.base.Strings; import org.apache.hbase.thirdparty.com.google.common.collect.Lists; import org.apache.hbase.thirdparty.com.google.protobuf.ByteString; import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; @@ -246,7 +245,7 @@ public static Map> convert2Map(ReplicationProtos.TableCF /** * @param bytes Content of a peer znode. * @return ClusterKey parsed from the passed bytes. - * @throws DeserializationException + * @throws DeserializationException deserialization exception */ public static ReplicationPeerConfig parsePeerFrom(final byte[] bytes) throws DeserializationException { @@ -390,7 +389,7 @@ public static ReplicationProtos.ReplicationPeer convert(ReplicationPeerConfig pe } /** - * @param peerConfig + * @param peerConfig peer config of replication peer * @return Serialized protobuf of peerConfig with pb magic prefix prepended suitable * for use as content of a this.peersZNode; i.e. the content of PEER_ID znode under * /hbase/replication/peers/PEER_ID @@ -454,37 +453,42 @@ public static ReplicationPeerConfig appendTableCFsToReplicationPeerConfig( } /** - * Helper method to add base peer configs from Configuration to ReplicationPeerConfig - * if not present in latter. + * Helper method to add/removev base peer configs from Configuration to ReplicationPeerConfig * * This merges the user supplied peer configuration * {@link org.apache.hadoop.hbase.replication.ReplicationPeerConfig} with peer configs * provided as property hbase.replication.peer.base.configs in hbase configuration. - * Expected format for this hbase configuration is "k1=v1;k2=v2,v2_1". Original value - * of conf is retained if already present in ReplicationPeerConfig. + * Expected format for this hbase configuration is "k1=v1;k2=v2,v2_1;k3=""". + * If value is empty, it will remove the existing key-value from peer config. * * @param conf Configuration * @return ReplicationPeerConfig containing updated configs. */ - public static ReplicationPeerConfig addBasePeerConfigsIfNotPresent(Configuration conf, + public static ReplicationPeerConfig updateReplicationBasePeerConfigs(Configuration conf, ReplicationPeerConfig receivedPeerConfig) { - String basePeerConfigs = conf.get(HBASE_REPLICATION_PEER_BASE_CONFIG, ""); ReplicationPeerConfigBuilder copiedPeerConfigBuilder = ReplicationPeerConfig. newBuilder(receivedPeerConfig); - Map receivedPeerConfigMap = receivedPeerConfig.getConfiguration(); + Map receivedPeerConfigMap = receivedPeerConfig.getConfiguration(); + String basePeerConfigs = conf.get(HBASE_REPLICATION_PEER_BASE_CONFIG, ""); if (basePeerConfigs.length() != 0) { Map basePeerConfigMap = Splitter.on(';').trimResults().omitEmptyStrings() .withKeyValueSeparator("=").split(basePeerConfigs); - for (Map.Entry entry : basePeerConfigMap.entrySet()) { + for (Map.Entry entry : basePeerConfigMap.entrySet()) { String configName = entry.getKey(); String configValue = entry.getValue(); - // Only override if base config does not exist in existing peer configs - if (!receivedPeerConfigMap.containsKey(configName)) { + // If the config is provided with empty value, for eg. k1="", + // we remove it from peer config. Providing config with empty value + // is required so that it doesn't remove any other config unknowingly. + if (Strings.isNullOrEmpty(configValue)) { + copiedPeerConfigBuilder.removeConfiguration(configName); + } else if (!receivedPeerConfigMap.getOrDefault(configName, "").equals(configValue)) { + // update the configuration if exact config and value doesn't exists copiedPeerConfigBuilder.putConfiguration(configName, configValue); } } } + return copiedPeerConfigBuilder.build(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/ClientExceptionsUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/ClientExceptionsUtil.java index 6b1e251953b9..2482a632ca8d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/ClientExceptionsUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/ClientExceptionsUtil.java @@ -28,7 +28,6 @@ import java.nio.channels.ClosedChannelException; import java.util.Set; import java.util.concurrent.TimeoutException; - import org.apache.hadoop.hbase.CallDroppedException; import org.apache.hadoop.hbase.CallQueueTooBigException; import org.apache.hadoop.hbase.DoNotRetryIOException; @@ -36,16 +35,14 @@ import org.apache.hadoop.hbase.NotServingRegionException; import org.apache.hadoop.hbase.RegionTooBusyException; import org.apache.hadoop.hbase.RetryImmediatelyException; -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.yetus.audience.InterfaceStability; - -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; -import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableSet; - import org.apache.hadoop.hbase.ipc.CallTimeoutException; import org.apache.hadoop.hbase.ipc.FailedServerException; import org.apache.hadoop.hbase.quotas.RpcThrottlingException; import org.apache.hadoop.ipc.RemoteException; +import org.apache.yetus.audience.InterfaceAudience; +import org.apache.yetus.audience.InterfaceStability; + +import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableSet; @InterfaceAudience.Private @InterfaceStability.Evolving @@ -150,7 +147,6 @@ public static boolean isCallDroppedException(Throwable t) { * For test only. Usually you should use the {@link #isConnectionException(Throwable)} method * below. */ - @VisibleForTesting public static Set> getConnectionExceptionTypes() { return CONNECTION_EXCEPTION_TYPES; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterSchemaException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/MasterStoppedException.java similarity index 68% rename from hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterSchemaException.java rename to hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/MasterStoppedException.java index e4d773683133..1ed5b55410ff 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterSchemaException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/MasterStoppedException.java @@ -15,23 +15,20 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hbase.master; +package org.apache.hadoop.hbase.exceptions; -import org.apache.hadoop.hbase.HBaseIOException; +import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.yetus.audience.InterfaceAudience; -@SuppressWarnings("serial") +/** + * Thrown when the master is stopped + */ @InterfaceAudience.Private -public class ClusterSchemaException extends HBaseIOException { - public ClusterSchemaException(String message) { - super(message); - } +public class MasterStoppedException extends DoNotRetryIOException { - public ClusterSchemaException(String message, Throwable cause) { - super(message, cause); - } + private static final long serialVersionUID = -4284604435898100365L; - public ClusterSchemaException(Throwable cause) { - super(cause); + public MasterStoppedException() { + super(); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java index f6811f607bde..9f44fe85bcc2 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java @@ -37,8 +37,6 @@ import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.UnsafeAvailChecker; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; - /** * This is optimized version of a standard FuzzyRowFilter Filters data based on fuzzy row key. * Performs fast-forwards during scanning. It takes pairs (row key, fuzzy info) to match row keys. @@ -317,12 +315,10 @@ static enum SatisfiesCode { NO_NEXT } - @VisibleForTesting static SatisfiesCode satisfies(byte[] row, byte[] fuzzyKeyBytes, byte[] fuzzyKeyMeta) { return satisfies(false, row, 0, row.length, fuzzyKeyBytes, fuzzyKeyMeta); } - @VisibleForTesting static SatisfiesCode satisfies(boolean reverse, byte[] row, byte[] fuzzyKeyBytes, byte[] fuzzyKeyMeta) { return satisfies(reverse, row, 0, row.length, fuzzyKeyBytes, fuzzyKeyMeta); @@ -438,12 +434,10 @@ static SatisfiesCode satisfiesNoUnsafe(boolean reverse, byte[] row, int offset, return SatisfiesCode.YES; } - @VisibleForTesting static byte[] getNextForFuzzyRule(byte[] row, byte[] fuzzyKeyBytes, byte[] fuzzyKeyMeta) { return getNextForFuzzyRule(false, row, 0, row.length, fuzzyKeyBytes, fuzzyKeyMeta); } - @VisibleForTesting static byte[] getNextForFuzzyRule(boolean reverse, byte[] row, byte[] fuzzyKeyBytes, byte[] fuzzyKeyMeta) { return getNextForFuzzyRule(reverse, row, 0, row.length, fuzzyKeyBytes, fuzzyKeyMeta); @@ -530,7 +524,6 @@ public static Order orderFor(boolean reverse) { * @return greater byte array than given (row) which satisfies the fuzzy rule if it exists, null * otherwise */ - @VisibleForTesting static byte[] getNextForFuzzyRule(boolean reverse, byte[] row, int offset, int length, byte[] fuzzyKeyBytes, byte[] fuzzyKeyMeta) { // To find out the next "smallest" byte array that satisfies fuzzy rule and "greater" than diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseConstants.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseConstants.java index 9f52783dbb0c..b9132a3ba295 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseConstants.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseConstants.java @@ -244,6 +244,13 @@ public final class ParseConstants { public static final byte [] regexStringType = new byte [] {'r','e','g','e', 'x', 's','t','r','i','n','g'}; + /** + * RegexStringNoCaseType byte array + */ + public static final byte [] regexStringNoCaseType = new byte [] {'r','e','g','e','x', + 's','t','r','i','n','g', + 'n','o','c','a','s','e'}; + /** * SubstringType byte array */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseFilter.java index 1aeaa13f5a93..e06c6b5c4139 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseFilter.java @@ -28,6 +28,7 @@ import java.util.EmptyStackException; import java.util.HashMap; import java.util.Map; +import java.util.regex.Pattern; import java.util.Set; import java.util.Stack; @@ -812,6 +813,9 @@ else if (Bytes.equals(comparatorType, ParseConstants.binaryPrefixType)) return new BinaryPrefixComparator(comparatorValue); else if (Bytes.equals(comparatorType, ParseConstants.regexStringType)) return new RegexStringComparator(new String(comparatorValue, StandardCharsets.UTF_8)); + else if (Bytes.equals(comparatorType, ParseConstants.regexStringNoCaseType)) + return new RegexStringComparator(new String(comparatorValue, StandardCharsets.UTF_8), + Pattern.CASE_INSENSITIVE | Pattern.DOTALL); else if (Bytes.equals(comparatorType, ParseConstants.substringType)) return new SubstringComparator(new String(comparatorValue, StandardCharsets.UTF_8)); else diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java index d94f2d3f54df..e9ec6a92ee93 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java @@ -22,9 +22,7 @@ import static org.apache.hadoop.hbase.ipc.IPCUtil.wrapException; import java.io.IOException; -import java.net.InetSocketAddress; import java.net.SocketAddress; -import java.net.UnknownHostException; import java.util.Collection; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; @@ -37,6 +35,7 @@ import org.apache.hadoop.hbase.client.MetricsConnection; import org.apache.hadoop.hbase.codec.Codec; import org.apache.hadoop.hbase.codec.KeyValueCodec; +import org.apache.hadoop.hbase.net.Address; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.security.UserProvider; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; @@ -44,16 +43,15 @@ import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.io.compress.CompressionCodec; import org.apache.hadoop.ipc.RemoteException; -import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.com.google.common.cache.CacheBuilder; import org.apache.hbase.thirdparty.com.google.common.cache.CacheLoader; import org.apache.hbase.thirdparty.com.google.common.cache.LoadingCache; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hbase.thirdparty.com.google.protobuf.BlockingRpcChannel; import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors; import org.apache.hbase.thirdparty.com.google.protobuf.Message; @@ -101,7 +99,7 @@ public abstract class AbstractRpcClient implements RpcC new ThreadFactoryBuilder().setNameFormat("Idle-Rpc-Conn-Sweeper-pool-%d").setDaemon(true) .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); - protected boolean running = true; // if client runs + private boolean running = true; // if client runs protected final Configuration conf; protected final String clusterId; @@ -127,7 +125,7 @@ public abstract class AbstractRpcClient implements RpcC protected final int readTO; protected final int writeTO; - protected final PoolMap connections; + private final PoolMap connections; private final AtomicInteger callIdCnt = new AtomicInteger(0); @@ -135,10 +133,10 @@ public abstract class AbstractRpcClient implements RpcC private int maxConcurrentCallsPerServer; - private static final LoadingCache concurrentCounterCache = + private static final LoadingCache concurrentCounterCache = CacheBuilder.newBuilder().expireAfterAccess(1, TimeUnit.HOURS). - build(new CacheLoader() { - @Override public AtomicInteger load(InetSocketAddress key) throws Exception { + build(new CacheLoader() { + @Override public AtomicInteger load(Address key) throws Exception { return new AtomicInteger(0); } }); @@ -207,16 +205,15 @@ private void cleanupIdleConnections() { // The connection itself will disconnect if there is no pending call for maxIdleTime. if (conn.getLastTouched() < closeBeforeTime && !conn.isActive()) { if (LOG.isTraceEnabled()) { - LOG.trace("Cleanup idle connection to {}", conn.remoteId().address); + LOG.trace("Cleanup idle connection to {}", conn.remoteId().getAddress()); } - connections.removeValue(conn.remoteId(), conn); + connections.remove(conn.remoteId(), conn); conn.cleanupConnection(); } } } } - @VisibleForTesting public static String getDefaultCodec(final Configuration c) { // If "hbase.client.default.rpc.codec" is empty string -- you can't set it to null because // Configuration will complain -- then no default codec (and we'll pb everything). Else @@ -248,7 +245,6 @@ public boolean hasCellBlockSupport() { } // for writing tests that want to throw exception when connecting. - @VisibleForTesting boolean isTcpNoDelay() { return tcpNoDelay; } @@ -294,7 +290,14 @@ private static PoolMap.PoolType getPoolType(Configuration config) { * @return the maximum pool size */ private static int getPoolSize(Configuration config) { - return config.getInt(HConstants.HBASE_CLIENT_IPC_POOL_SIZE, 1); + int poolSize = config.getInt(HConstants.HBASE_CLIENT_IPC_POOL_SIZE, 1); + + if (poolSize <= 0) { + LOG.warn("{} must be positive. Using default value: 1", HConstants.HBASE_CLIENT_IPC_POOL_SIZE); + return 1; + } else { + return poolSize; + } } private int nextCallId() { @@ -315,7 +318,7 @@ private int nextCallId() { * @return A pair with the Message response and the Cell data (if any). */ private Message callBlockingMethod(Descriptors.MethodDescriptor md, HBaseRpcController hrc, - Message param, Message returnType, final User ticket, final InetSocketAddress isa) + Message param, Message returnType, final User ticket, final Address isa) throws ServiceException { BlockingRpcCallback done = new BlockingRpcCallback<>(); callMethod(md, hrc, param, returnType, ticket, isa, done); @@ -339,22 +342,18 @@ private Message callBlockingMethod(Descriptors.MethodDescriptor md, HBaseRpcCont private T getConnection(ConnectionId remoteId) throws IOException { if (failedServers.isFailedServer(remoteId.getAddress())) { if (LOG.isDebugEnabled()) { - LOG.debug("Not trying to connect to " + remoteId.address + LOG.debug("Not trying to connect to " + remoteId.getAddress() + " this server is in the failed servers list"); } throw new FailedServerException( - "This server is in the failed servers list: " + remoteId.address); + "This server is in the failed servers list: " + remoteId.getAddress()); } T conn; synchronized (connections) { if (!running) { throw new StoppedRpcClientException(); } - conn = connections.get(remoteId); - if (conn == null) { - conn = createConnection(remoteId); - connections.put(remoteId, conn); - } + conn = connections.getOrCreate(remoteId, () -> createConnection(remoteId)); conn.setLastTouched(EnvironmentEdgeManager.currentTime()); } return conn; @@ -365,7 +364,7 @@ private T getConnection(ConnectionId remoteId) throws IOException { */ protected abstract T createConnection(ConnectionId remoteId) throws IOException; - private void onCallFinished(Call call, HBaseRpcController hrc, InetSocketAddress addr, + private void onCallFinished(Call call, HBaseRpcController hrc, Address addr, RpcCallback callback) { call.callStats.setCallTimeMs(EnvironmentEdgeManager.currentTime() - call.getStartTime()); if (metrics != null) { @@ -390,8 +389,8 @@ private void onCallFinished(Call call, HBaseRpcController hrc, InetSocketAddress } Call callMethod(final Descriptors.MethodDescriptor md, final HBaseRpcController hrc, - final Message param, Message returnType, final User ticket, final InetSocketAddress addr, - final RpcCallback callback) { + final Message param, Message returnType, final User ticket, + final Address addr, final RpcCallback callback) { final MetricsConnection.CallStats cs = MetricsConnection.newCallStats(); cs.setStartTime(EnvironmentEdgeManager.currentTime()); @@ -429,12 +428,8 @@ public void run(Call call) { return call; } - InetSocketAddress createAddr(ServerName sn) throws UnknownHostException { - InetSocketAddress addr = new InetSocketAddress(sn.getHostname(), sn.getPort()); - if (addr.isUnresolved()) { - throw new UnknownHostException("can not resolve " + sn.getServerName()); - } - return addr; + private static Address createAddr(ServerName sn) { + return Address.fromParts(sn.getHostname(), sn.getPort()); } /** @@ -449,11 +444,11 @@ public void cancelConnections(ServerName sn) { synchronized (connections) { for (T connection : connections.values()) { ConnectionId remoteId = connection.remoteId(); - if (remoteId.address.getPort() == sn.getPort() - && remoteId.address.getHostName().equals(sn.getHostname())) { + if (remoteId.getAddress().getPort() == sn.getPort() + && remoteId.getAddress().getHostName().equals(sn.getHostname())) { LOG.info("The server on " + sn.toString() + " is dead - stopping the connection " + connection.remoteId); - connections.removeValue(remoteId, connection); + connections.remove(remoteId, connection); connection.shutdown(); connection.cleanupConnection(); } @@ -509,19 +504,18 @@ public void close() { @Override public BlockingRpcChannel createBlockingRpcChannel(final ServerName sn, final User ticket, - int rpcTimeout) throws UnknownHostException { + int rpcTimeout) { return new BlockingRpcChannelImplementation(this, createAddr(sn), ticket, rpcTimeout); } @Override - public RpcChannel createRpcChannel(ServerName sn, User user, int rpcTimeout) - throws UnknownHostException { + public RpcChannel createRpcChannel(ServerName sn, User user, int rpcTimeout) { return new RpcChannelImplementation(this, createAddr(sn), user, rpcTimeout); } private static class AbstractRpcChannel { - protected final InetSocketAddress addr; + protected final Address addr; protected final AbstractRpcClient rpcClient; @@ -529,7 +523,7 @@ private static class AbstractRpcChannel { protected final int rpcTimeout; - protected AbstractRpcChannel(AbstractRpcClient rpcClient, InetSocketAddress addr, + protected AbstractRpcChannel(AbstractRpcClient rpcClient, Address addr, User ticket, int rpcTimeout) { this.addr = addr; this.rpcClient = rpcClient; @@ -562,20 +556,19 @@ protected HBaseRpcController configureRpcController(RpcController controller) { /** * Blocking rpc channel that goes via hbase rpc. */ - @VisibleForTesting public static class BlockingRpcChannelImplementation extends AbstractRpcChannel implements BlockingRpcChannel { protected BlockingRpcChannelImplementation(AbstractRpcClient rpcClient, - InetSocketAddress addr, User ticket, int rpcTimeout) { + Address addr, User ticket, int rpcTimeout) { super(rpcClient, addr, ticket, rpcTimeout); } @Override public Message callBlockingMethod(Descriptors.MethodDescriptor md, RpcController controller, - Message param, Message returnType) throws ServiceException { - return rpcClient.callBlockingMethod(md, configureRpcController(controller), - param, returnType, ticket, addr); + Message param, Message returnType) throws ServiceException { + return rpcClient.callBlockingMethod(md, configureRpcController(controller), param, returnType, + ticket, addr); } } @@ -585,20 +578,19 @@ public Message callBlockingMethod(Descriptors.MethodDescriptor md, RpcController public static class RpcChannelImplementation extends AbstractRpcChannel implements RpcChannel { - protected RpcChannelImplementation(AbstractRpcClient rpcClient, InetSocketAddress addr, - User ticket, int rpcTimeout) throws UnknownHostException { + protected RpcChannelImplementation(AbstractRpcClient rpcClient, Address addr, + User ticket, int rpcTimeout) { super(rpcClient, addr, ticket, rpcTimeout); } @Override - public void callMethod(Descriptors.MethodDescriptor md, RpcController controller, - Message param, Message returnType, RpcCallback done) { + public void callMethod(Descriptors.MethodDescriptor md, RpcController controller, Message param, + Message returnType, RpcCallback done) { + HBaseRpcController configuredController = configureRpcController( + Preconditions.checkNotNull(controller, "RpcController can not be null for async rpc call")); // This method does not throw any exceptions, so the caller must provide a // HBaseRpcController which is used to pass the exceptions. - this.rpcClient.callMethod(md, - configureRpcController(Preconditions.checkNotNull(controller, - "RpcController can not be null for async rpc call")), - param, returnType, ticket, addr, done); + this.rpcClient.callMethod(md, configuredController, param, returnType, ticket, addr, done); } } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcClient.java index 22eca535e958..dd8f96bb2b9b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcClient.java @@ -25,7 +25,6 @@ import org.apache.hadoop.hbase.client.MetricsConnection; import org.apache.hadoop.net.NetUtils; import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; /** * Does RPC against a cluster. Manages connections per regionserver in the cluster. @@ -41,7 +40,6 @@ public class BlockingRpcClient extends AbstractRpcClient * Used in test only. Construct an IPC client for the cluster {@code clusterId} with the default * SocketFactory */ - @VisibleForTesting BlockingRpcClient(Configuration conf) { this(conf, HConstants.CLUSTER_ID_DEFAULT, null, null); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcConnection.java index ba3d4cd55644..cd8035fd58ec 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcConnection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcConnection.java @@ -32,9 +32,9 @@ import java.io.InputStream; import java.io.InterruptedIOException; import java.io.OutputStream; +import java.net.InetSocketAddress; import java.net.Socket; import java.net.SocketTimeoutException; -import java.net.UnknownHostException; import java.security.PrivilegedExceptionAction; import java.util.ArrayDeque; import java.util.Locale; @@ -43,7 +43,6 @@ import java.util.concurrent.ConcurrentMap; import java.util.concurrent.ThreadLocalRandom; import javax.security.sasl.SaslException; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.DoNotRetryIOException; @@ -67,11 +66,13 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.protobuf.Message; import org.apache.hbase.thirdparty.com.google.protobuf.Message.Builder; import org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback; import org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf; import org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.CellBlockMeta; @@ -207,7 +208,7 @@ public void run() { */ public void cleanup(IOException e) { IOException ie = new ConnectionClosingException( - "Connection to " + remoteId.address + " is closing."); + "Connection to " + remoteId.getAddress() + " is closing."); for (Call call : callsToWrite) { call.setException(ie); } @@ -217,12 +218,9 @@ public void cleanup(IOException e) { BlockingRpcConnection(BlockingRpcClient rpcClient, ConnectionId remoteId) throws IOException { super(rpcClient.conf, AbstractRpcClient.WHEEL_TIMER, remoteId, rpcClient.clusterId, - rpcClient.userProvider.isHBaseSecurityEnabled(), rpcClient.codec, rpcClient.compressor); + rpcClient.userProvider.isHBaseSecurityEnabled(), rpcClient.codec, rpcClient.compressor, + rpcClient.metrics); this.rpcClient = rpcClient; - if (remoteId.getAddress().isUnresolved()) { - throw new UnknownHostException("unknown host: " + remoteId.getAddress().getHostName()); - } - this.connectionHeaderPreamble = getConnectionHeaderPreamble(); ConnectionHeader header = getConnectionHeader(); ByteArrayOutputStream baos = new ByteArrayOutputStream(4 + header.getSerializedSize()); @@ -257,7 +255,8 @@ protected void setupConnection() throws IOException { if (this.rpcClient.localAddr != null) { this.socket.bind(this.rpcClient.localAddr); } - NetUtils.connect(this.socket, remoteId.getAddress(), this.rpcClient.connectTO); + InetSocketAddress remoteAddr = getRemoteInetAddress(rpcClient.metrics); + NetUtils.connect(this.socket, remoteAddr, this.rpcClient.connectTO); this.socket.setSoTimeout(this.rpcClient.readTO); return; } catch (SocketTimeoutException toe) { @@ -343,13 +342,13 @@ private synchronized boolean waitForWork() { @Override public void run() { if (LOG.isTraceEnabled()) { - LOG.trace(threadName + ": starting, connections " + this.rpcClient.connections.size()); + LOG.trace(threadName + ": starting"); } while (waitForWork()) { readResponse(); } if (LOG.isTraceEnabled()) { - LOG.trace(threadName + ": stopped, connections " + this.rpcClient.connections.size()); + LOG.trace(threadName + ": stopped"); } } @@ -362,8 +361,11 @@ private void disposeSasl() { private boolean setupSaslConnection(final InputStream in2, final OutputStream out2) throws IOException { + if (this.metrics != null) { + this.metrics.incrNsLookups(); + } saslRpcClient = new HBaseSaslRpcClient(this.rpcClient.conf, provider, token, - serverAddress, securityInfo, this.rpcClient.fallbackAllowed, + socket.getInetAddress(), securityInfo, this.rpcClient.fallbackAllowed, this.rpcClient.conf.get("hbase.rpc.protection", QualityOfProtection.AUTHENTICATION.name().toLowerCase(Locale.ROOT)), this.rpcClient.conf.getBoolean(CRYPTO_AES_ENABLED_KEY, CRYPTO_AES_ENABLED_DEFAULT)); @@ -440,16 +442,16 @@ private void setupIOstreams() throws IOException { if (this.rpcClient.failedServers.isFailedServer(remoteId.getAddress())) { if (LOG.isDebugEnabled()) { - LOG.debug("Not trying to connect to " + remoteId.address + LOG.debug("Not trying to connect to " + remoteId.getAddress() + " this server is in the failed servers list"); } throw new FailedServerException( - "This server is in the failed servers list: " + remoteId.address); + "This server is in the failed servers list: " + remoteId.getAddress()); } try { if (LOG.isDebugEnabled()) { - LOG.debug("Connecting to " + remoteId.address); + LOG.debug("Connecting to " + remoteId.getAddress()); } short numRetries = 0; @@ -504,14 +506,14 @@ public Boolean run() throws IOException { closeSocket(); IOException e = ExceptionUtil.asInterrupt(t); if (e == null) { - this.rpcClient.failedServers.addToFailedServers(remoteId.address, t); + this.rpcClient.failedServers.addToFailedServers(remoteId.getAddress(), t); if (t instanceof LinkageError) { // probably the hbase hadoop version does not match the running hadoop version e = new DoNotRetryIOException(t); } else if (t instanceof IOException) { e = (IOException) t; } else { - e = new IOException("Could not set up IO Streams to " + remoteId.address, t); + e = new IOException("Could not set up IO Streams to " + remoteId.getAddress(), t); } } throw e; @@ -768,7 +770,7 @@ public synchronized void shutdown() { if (callSender != null) { callSender.interrupt(); } - closeConn(new IOException("connection to " + remoteId.address + " closed")); + closeConn(new IOException("connection to " + remoteId.getAddress() + " closed")); } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ConnectionId.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ConnectionId.java index 1396f1e7abc5..cac9ff27382e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ConnectionId.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ConnectionId.java @@ -17,9 +17,9 @@ */ package org.apache.hadoop.hbase.ipc; -import java.net.InetSocketAddress; import java.util.Objects; +import org.apache.hadoop.hbase.net.Address; import org.apache.hadoop.hbase.security.User; import org.apache.yetus.audience.InterfaceAudience; @@ -32,9 +32,9 @@ class ConnectionId { private static final int PRIME = 16777619; final User ticket; final String serviceName; - final InetSocketAddress address; + final Address address; - public ConnectionId(User ticket, String serviceName, InetSocketAddress address) { + public ConnectionId(User ticket, String serviceName, Address address) { this.address = address; this.ticket = ticket; this.serviceName = serviceName; @@ -44,7 +44,7 @@ public String getServiceName() { return this.serviceName; } - public InetSocketAddress getAddress() { + public Address getAddress() { return address; } @@ -73,7 +73,7 @@ public int hashCode() { return hashCode(ticket,serviceName,address); } - public static int hashCode(User ticket, String serviceName, InetSocketAddress address) { + public static int hashCode(User ticket, String serviceName, Address address) { return (address.hashCode() + PRIME * (PRIME * serviceName.hashCode() ^ (ticket == null ? 0 : ticket.hashCode()))); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/FailedServers.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/FailedServers.java index 86b763b91b0c..1a8bc0129ea6 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/FailedServers.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/FailedServers.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.hbase.ipc; -import java.net.InetSocketAddress; import java.util.HashMap; import java.util.Map; @@ -25,6 +24,7 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hadoop.hbase.net.Address; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; /** @@ -32,7 +32,7 @@ */ @InterfaceAudience.Private public class FailedServers { - private final Map failedServers = new HashMap(); + private final Map failedServers = new HashMap(); private long latestExpiry = 0; private final int recheckServersTimeout; private static final Logger LOG = LoggerFactory.getLogger(FailedServers.class); @@ -45,13 +45,13 @@ public FailedServers(Configuration conf) { /** * Add an address to the list of the failed servers list. */ - public synchronized void addToFailedServers(InetSocketAddress address, Throwable throwable) { + public synchronized void addToFailedServers(Address address, Throwable throwable) { final long expiry = EnvironmentEdgeManager.currentTime() + recheckServersTimeout; - this.failedServers.put(address.toString(), expiry); + this.failedServers.put(address, expiry); this.latestExpiry = expiry; if (LOG.isDebugEnabled()) { LOG.debug( - "Added failed server with address " + address.toString() + " to list caused by " + "Added failed server with address " + address + " to list caused by " + throwable.toString()); } } @@ -61,7 +61,7 @@ public synchronized void addToFailedServers(InetSocketAddress address, Throwable * * @return true if the server is in the failed servers list */ - public synchronized boolean isFailedServer(final InetSocketAddress address) { + public synchronized boolean isFailedServer(final Address address) { if (failedServers.isEmpty()) { return false; } @@ -70,15 +70,14 @@ public synchronized boolean isFailedServer(final InetSocketAddress address) { failedServers.clear(); return false; } - String key = address.toString(); - Long expiry = this.failedServers.get(key); + Long expiry = this.failedServers.get(address); if (expiry == null) { return false; } if (expiry >= now) { return true; } else { - this.failedServers.remove(key); + this.failedServers.remove(address); } return false; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/IPCUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/IPCUtil.java index 811bb2c21fdf..c952f7384460 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/IPCUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/IPCUtil.java @@ -21,7 +21,6 @@ import java.io.OutputStream; import java.lang.reflect.InvocationTargetException; import java.net.ConnectException; -import java.net.InetSocketAddress; import java.net.SocketTimeoutException; import java.nio.channels.ClosedChannelException; import java.util.concurrent.TimeoutException; @@ -33,12 +32,12 @@ import org.apache.hadoop.hbase.exceptions.ConnectionClosedException; import org.apache.hadoop.hbase.exceptions.ConnectionClosingException; import org.apache.hadoop.hbase.exceptions.TimeoutIOException; +import org.apache.hadoop.hbase.net.Address; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.ipc.RemoteException; import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.com.google.protobuf.CodedOutputStream; import org.apache.hbase.thirdparty.com.google.protobuf.Message; @@ -176,7 +175,7 @@ static IOException toIOE(Throwable t) { * @return an exception to throw * @see ClientExceptionsUtil#isConnectionException(Throwable) */ - static IOException wrapException(InetSocketAddress addr, Throwable error) { + static IOException wrapException(Address addr, Throwable error) { if (error instanceof ConnectException) { // connection refused; include the host:port in the error return (IOException) new ConnectException( @@ -248,7 +247,6 @@ protected MutableInt initialValue() throws Exception { } }; - @VisibleForTesting static final int MAX_DEPTH = 4; static void execute(EventLoop eventLoop, Runnable action) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java index fc9f97930212..d0a13ca33d6c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java @@ -24,6 +24,8 @@ import static org.apache.hadoop.hbase.ipc.IPCUtil.toIOE; import java.io.IOException; +import java.net.InetSocketAddress; +import java.net.UnknownHostException; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.ThreadLocalRandom; @@ -35,11 +37,11 @@ import org.apache.hadoop.hbase.security.SaslChallengeDecoder; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback; import org.apache.hbase.thirdparty.io.netty.bootstrap.Bootstrap; import org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf; @@ -97,7 +99,8 @@ class NettyRpcConnection extends RpcConnection { NettyRpcConnection(NettyRpcClient rpcClient, ConnectionId remoteId) throws IOException { super(rpcClient.conf, AbstractRpcClient.WHEEL_TIMER, remoteId, rpcClient.clusterId, - rpcClient.userProvider.isHBaseSecurityEnabled(), rpcClient.codec, rpcClient.compressor); + rpcClient.userProvider.isHBaseSecurityEnabled(), rpcClient.codec, rpcClient.compressor, + rpcClient.metrics); this.rpcClient = rpcClient; this.eventLoop = rpcClient.group.next(); byte[] connectionHeaderPreamble = getConnectionHeaderPreamble(); @@ -207,7 +210,8 @@ private void saslNegotiate(final Channel ch) { final NettyHBaseSaslRpcClientHandler saslHandler; try { saslHandler = new NettyHBaseSaslRpcClientHandler(saslPromise, ticket, provider, token, - serverAddress, securityInfo, rpcClient.fallbackAllowed, this.rpcClient.conf); + ((InetSocketAddress) ch.remoteAddress()).getAddress(), securityInfo, + rpcClient.fallbackAllowed, this.rpcClient.conf); } catch (IOException e) { failInit(ch, e); return; @@ -265,23 +269,23 @@ public void operationComplete(Future future) throws Exception { }); } - private void connect() { + private void connect() throws UnknownHostException { assert eventLoop.inEventLoop(); - LOG.trace("Connecting to {}", remoteId.address); - + LOG.trace("Connecting to {}", remoteId.getAddress()); + InetSocketAddress remoteAddr = getRemoteInetAddress(rpcClient.metrics); this.channel = new Bootstrap().group(eventLoop).channel(rpcClient.channelClass) .option(ChannelOption.TCP_NODELAY, rpcClient.isTcpNoDelay()) .option(ChannelOption.SO_KEEPALIVE, rpcClient.tcpKeepAlive) .option(ChannelOption.CONNECT_TIMEOUT_MILLIS, rpcClient.connectTO) .handler(new BufferCallBeforeInitHandler()).localAddress(rpcClient.localAddr) - .remoteAddress(remoteId.address).connect().addListener(new ChannelFutureListener() { + .remoteAddress(remoteAddr).connect().addListener(new ChannelFutureListener() { @Override public void operationComplete(ChannelFuture future) throws Exception { Channel ch = future.channel(); if (!future.isSuccess()) { failInit(ch, toIOE(future.cause())); - rpcClient.failedServers.addToFailedServers(remoteId.address, future.cause()); + rpcClient.failedServers.addToFailedServers(remoteId.getAddress(), future.cause()); return; } ch.writeAndFlush(connectionHeaderPreamble.retainedDuplicate()); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcDuplexHandler.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcDuplexHandler.java index 649375a89c1c..2a2df8a7ad4a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcDuplexHandler.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcDuplexHandler.java @@ -17,35 +17,35 @@ */ package org.apache.hadoop.hbase.ipc; +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import org.apache.hadoop.hbase.CellScanner; +import org.apache.hadoop.hbase.codec.Codec; import org.apache.hadoop.hbase.exceptions.ConnectionClosedException; +import org.apache.hadoop.io.compress.CompressionCodec; +import org.apache.hadoop.ipc.RemoteException; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.protobuf.Message; import org.apache.hbase.thirdparty.com.google.protobuf.Message.Builder; import org.apache.hbase.thirdparty.com.google.protobuf.TextFormat; - import org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf; import org.apache.hbase.thirdparty.io.netty.buffer.ByteBufInputStream; import org.apache.hbase.thirdparty.io.netty.buffer.ByteBufOutputStream; import org.apache.hbase.thirdparty.io.netty.channel.ChannelDuplexHandler; +import org.apache.hbase.thirdparty.io.netty.channel.ChannelFuture; import org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext; import org.apache.hbase.thirdparty.io.netty.channel.ChannelPromise; import org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateEvent; import org.apache.hbase.thirdparty.io.netty.util.concurrent.PromiseCombiner; -import java.io.IOException; -import java.util.HashMap; -import java.util.Map; - -import org.apache.hadoop.hbase.CellScanner; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.codec.Codec; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.CellBlockMeta; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.ExceptionResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.ResponseHeader; -import org.apache.hadoop.io.compress.CompressionCodec; -import org.apache.hadoop.ipc.RemoteException; /** * The netty rpc handler. @@ -103,8 +103,8 @@ private void writeRequest(ChannelHandlerContext ctx, Call call, ChannelPromise p ctx.write(buf, withoutCellBlockPromise); ChannelPromise cellBlockPromise = ctx.newPromise(); ctx.write(cellBlock, cellBlockPromise); - PromiseCombiner combiner = new PromiseCombiner(); - combiner.addAll(withoutCellBlockPromise, cellBlockPromise); + PromiseCombiner combiner = new PromiseCombiner(ctx.executor()); + combiner.addAll((ChannelFuture) withoutCellBlockPromise, cellBlockPromise); combiner.finish(promise); } else { ctx.write(buf, promise); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClient.java index 877d9b0d5b90..5bb08152d30e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClient.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.ipc; import java.io.Closeable; -import java.io.IOException; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.security.User; import org.apache.yetus.audience.InterfaceAudience; @@ -64,10 +63,8 @@ public interface RpcClient extends Closeable { * @param rpcTimeout default rpc operation timeout * * @return A blocking rpc channel that goes via this rpc client instance. - * @throws IOException when channel could not be created */ - BlockingRpcChannel createBlockingRpcChannel(ServerName sn, User user, int rpcTimeout) - throws IOException; + BlockingRpcChannel createBlockingRpcChannel(ServerName sn, User user, int rpcTimeout); /** * Creates a "channel" that can be used by a protobuf service. Useful setting up @@ -79,8 +76,7 @@ BlockingRpcChannel createBlockingRpcChannel(ServerName sn, User user, int rpcTim * * @return A rpc channel that goes via this rpc client instance. */ - RpcChannel createRpcChannel(final ServerName sn, final User user, int rpcTimeout) - throws IOException; + RpcChannel createRpcChannel(final ServerName sn, final User user, int rpcTimeout); /** * Interrupt the connections to the given server. This should be called if the server diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientFactory.java index b6f9e3859304..434795248c6f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientFactory.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientFactory.java @@ -17,15 +17,13 @@ */ package org.apache.hadoop.hbase.ipc; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; -import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableMap; - import java.net.SocketAddress; - import org.apache.hadoop.conf.Configuration; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.MetricsConnection; import org.apache.hadoop.hbase.util.ReflectionUtils; +import org.apache.yetus.audience.InterfaceAudience; + +import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableMap; /** * Factory to create a {@link org.apache.hadoop.hbase.ipc.RpcClient} @@ -46,7 +44,6 @@ private RpcClientFactory() { } /** Helper method for tests only. Creates an {@code RpcClient} without metrics. */ - @VisibleForTesting public static RpcClient createClient(Configuration conf, String clusterId) { return createClient(conf, clusterId, null); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcConnection.java index 195a16d16d36..b2c7eeae4a5a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcConnection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcConnection.java @@ -18,13 +18,15 @@ package org.apache.hadoop.hbase.ipc; import java.io.IOException; -import java.net.InetAddress; +import java.net.InetSocketAddress; import java.net.UnknownHostException; import java.util.concurrent.TimeUnit; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.client.MetricsConnection; import org.apache.hadoop.hbase.codec.Codec; +import org.apache.hadoop.hbase.net.Address; import org.apache.hadoop.hbase.security.SecurityInfo; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.security.provider.SaslClientAuthenticationProvider; @@ -59,8 +61,6 @@ abstract class RpcConnection { protected final Token token; - protected final InetAddress serverAddress; - protected final SecurityInfo securityInfo; protected final int reloginMaxBackoff; // max pause before relogin on sasl failure @@ -69,6 +69,8 @@ abstract class RpcConnection { protected final CompressionCodec compressor; + protected final MetricsConnection metrics; + protected final HashedWheelTimer timeoutTimer; protected final Configuration conf; @@ -83,17 +85,13 @@ abstract class RpcConnection { protected SaslClientAuthenticationProvider provider; protected RpcConnection(Configuration conf, HashedWheelTimer timeoutTimer, ConnectionId remoteId, - String clusterId, boolean isSecurityEnabled, Codec codec, CompressionCodec compressor) - throws IOException { - if (remoteId.getAddress().isUnresolved()) { - throw new UnknownHostException("unknown host: " + remoteId.getAddress().getHostName()); - } - this.serverAddress = remoteId.getAddress().getAddress(); + String clusterId, boolean isSecurityEnabled, Codec codec, CompressionCodec compressor, + MetricsConnection metrics) throws IOException { this.timeoutTimer = timeoutTimer; this.codec = codec; this.compressor = compressor; this.conf = conf; - + this.metrics = metrics; User ticket = remoteId.getTicket(); this.securityInfo = SecurityInfo.getInfo(remoteId.getServiceName()); this.useSasl = isSecurityEnabled; @@ -127,7 +125,7 @@ protected RpcConnection(Configuration conf, HashedWheelTimer timeoutTimer, Conne this.remoteId = remoteId; } - protected void scheduleTimeoutTask(final Call call) { + protected final void scheduleTimeoutTask(final Call call) { if (call.timeout > 0) { call.timeoutTask = timeoutTimer.newTimeout(new TimerTask() { @@ -142,7 +140,7 @@ public void run(Timeout timeout) throws Exception { } } - protected byte[] getConnectionHeaderPreamble() { + protected final byte[] getConnectionHeaderPreamble() { // Assemble the preamble up in a buffer first and then send it. Writing individual elements, // they are getting sent across piecemeal according to wireshark and then server is messing // up the reading on occasion (the passed in stream is not buffered yet). @@ -158,7 +156,7 @@ protected byte[] getConnectionHeaderPreamble() { return preamble; } - protected ConnectionHeader getConnectionHeader() { + protected final ConnectionHeader getConnectionHeader() { final ConnectionHeader.Builder builder = ConnectionHeader.newBuilder(); builder.setServiceName(remoteId.getServiceName()); final UserInformation userInfoPB = provider.getUserInfo(remoteId.ticket); @@ -181,6 +179,21 @@ protected ConnectionHeader getConnectionHeader() { return builder.build(); } + protected final InetSocketAddress getRemoteInetAddress(MetricsConnection metrics) + throws UnknownHostException { + if (metrics != null) { + metrics.incrNsLookups(); + } + InetSocketAddress remoteAddr = Address.toSocketAddress(remoteId.getAddress()); + if (remoteAddr.isUnresolved()) { + if (metrics != null) { + metrics.incrNsLookupsFailed(); + } + throw new UnknownHostException(remoteId.getAddress() + " could not be resolved"); + } + return remoteAddr; + } + protected abstract void callTimeout(Call call); public ConnectionId remoteId() { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ServerTooBusyException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ServerTooBusyException.java index 135c78d6674e..eae9886ca55c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ServerTooBusyException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ServerTooBusyException.java @@ -21,6 +21,7 @@ import java.net.InetSocketAddress; import org.apache.hadoop.hbase.DoNotRetryIOException; +import org.apache.hadoop.hbase.net.Address; import org.apache.yetus.audience.InterfaceAudience; /** @@ -29,7 +30,14 @@ @SuppressWarnings("serial") @InterfaceAudience.Public public class ServerTooBusyException extends DoNotRetryIOException { + + public ServerTooBusyException(Address address, long count) { + super("Busy Server! " + count + " concurrent RPCs against " + address); + } + + @Deprecated public ServerTooBusyException(InetSocketAddress address, long count) { super("Busy Server! " + count + " concurrent RPCs against " + address); } + } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/master/RegionState.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/master/RegionState.java index 50594c81e5c7..2d03473b9f6b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/master/RegionState.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/master/RegionState.java @@ -23,7 +23,6 @@ import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos; @@ -188,7 +187,6 @@ public static State convert(ClusterStatusProtos.RegionState.State protoState) { // The duration of region in transition private long ritDuration; - @VisibleForTesting public static RegionState createForTesting(RegionInfo region, State state) { return new RegionState(region, state, System.currentTimeMillis(), null); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/ThrottleSettings.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/ThrottleSettings.java index 3c25d6e4f947..d77d8d168c80 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/ThrottleSettings.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/ThrottleSettings.java @@ -19,12 +19,11 @@ import java.io.IOException; import java.util.concurrent.TimeUnit; - import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.TableName; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos; @@ -52,7 +51,6 @@ public long getSoftLimit() { /** * Returns a copy of the internal state of this */ - @VisibleForTesting QuotaProtos.ThrottleRequest getProto() { return proto.toBuilder().build(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java index aba703ccdee8..5ca5cef9c4ed 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java @@ -79,41 +79,6 @@ private ReplicationPeerConfig(ReplicationPeerConfigBuilderImpl builder) { return Collections.unmodifiableMap(newTableCFsMap); } - /** - * @deprecated as release of 2.0.0, and it will be removed in 3.0.0. Use - * {@link ReplicationPeerConfigBuilder} to create new ReplicationPeerConfig. - */ - @Deprecated - public ReplicationPeerConfig() { - this.peerData = new TreeMap<>(Bytes.BYTES_COMPARATOR); - this.configuration = new HashMap<>(0); - this.serial = false; - } - - /** - * Set the clusterKey which is the concatenation of the slave cluster's: - * hbase.zookeeper.quorum:hbase.zookeeper.property.clientPort:zookeeper.znode.parent - * @deprecated as release of 2.0.0, and it will be removed in 3.0.0. Use - * {@link ReplicationPeerConfigBuilder#setClusterKey(String)} instead. - */ - @Deprecated - public ReplicationPeerConfig setClusterKey(String clusterKey) { - this.clusterKey = clusterKey; - return this; - } - - /** - * Sets the ReplicationEndpoint plugin class for this peer. - * @param replicationEndpointImpl a class implementing ReplicationEndpoint - * @deprecated as release of 2.0.0, and it will be removed in 3.0.0. Use - * {@link ReplicationPeerConfigBuilder#setReplicationEndpointImpl(String)} instead. - */ - @Deprecated - public ReplicationPeerConfig setReplicationEndpointImpl(String replicationEndpointImpl) { - this.replicationEndpointImpl = replicationEndpointImpl; - return this; - } - public String getClusterKey() { return clusterKey; } @@ -134,88 +99,26 @@ public Map> getTableCFsMap() { return (Map>) tableCFsMap; } - /** - * @deprecated as release of 2.0.0, and it will be removed in 3.0.0. Use - * {@link ReplicationPeerConfigBuilder#setTableCFsMap(Map)} instead. - */ - @Deprecated - public ReplicationPeerConfig setTableCFsMap(Map> tableCFsMap) { - this.tableCFsMap = tableCFsMap; - return this; - } - public Set getNamespaces() { return this.namespaces; } - /** - * @deprecated as release of 2.0.0, and it will be removed in 3.0.0. Use - * {@link ReplicationPeerConfigBuilder#setNamespaces(Set)} instead. - */ - @Deprecated - public ReplicationPeerConfig setNamespaces(Set namespaces) { - this.namespaces = namespaces; - return this; - } - public long getBandwidth() { return this.bandwidth; } - /** - * @deprecated as release of 2.0.0, and it will be removed in 3.0.0. Use - * {@link ReplicationPeerConfigBuilder#setBandwidth(long)} instead. - */ - @Deprecated - public ReplicationPeerConfig setBandwidth(long bandwidth) { - this.bandwidth = bandwidth; - return this; - } - public boolean replicateAllUserTables() { return this.replicateAllUserTables; } - /** - * @deprecated as release of 2.0.0, and it will be removed in 3.0.0. Use - * {@link ReplicationPeerConfigBuilder#setReplicateAllUserTables(boolean)} instead. - */ - @Deprecated - public ReplicationPeerConfig setReplicateAllUserTables(boolean replicateAllUserTables) { - this.replicateAllUserTables = replicateAllUserTables; - return this; - } - public Map> getExcludeTableCFsMap() { return (Map>) excludeTableCFsMap; } - /** - * @deprecated as release of 2.0.0, and it will be removed in 3.0.0. Use - * {@link ReplicationPeerConfigBuilder#setExcludeTableCFsMap(Map)} instead. - */ - @Deprecated - public ReplicationPeerConfig setExcludeTableCFsMap(Map> tableCFsMap) { - this.excludeTableCFsMap = tableCFsMap; - return this; - } - public Set getExcludeNamespaces() { return this.excludeNamespaces; } - /** - * @deprecated as release of 2.0.0, and it will be removed in 3.0.0. Use - * {@link ReplicationPeerConfigBuilder#setExcludeNamespaces(Set)} instead. - */ - @Deprecated - public ReplicationPeerConfig setExcludeNamespaces(Set namespaces) { - this.excludeNamespaces = namespaces; - return this; - } - public String getRemoteWALDir() { return this.remoteWALDir; } @@ -294,6 +197,12 @@ public ReplicationPeerConfigBuilder putConfiguration(String key, String value) { return this; } + @Override + public ReplicationPeerConfigBuilder removeConfiguration(String key) { + this.configuration.remove(key); + return this; + } + @Override public ReplicationPeerConfigBuilder putPeerData(byte[] key, byte[] value) { this.peerData.put(key, value); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfigBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfigBuilder.java index 58ff220e5631..c6a97fad9e81 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfigBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfigBuilder.java @@ -52,6 +52,15 @@ public interface ReplicationPeerConfigBuilder { @InterfaceAudience.Private ReplicationPeerConfigBuilder putConfiguration(String key, String value); + /** + * Removes a "raw" configuration property for this replication peer. For experts only. + * @param key Configuration property key to ve removed + * @return {@code this} + */ + @InterfaceAudience.Private + ReplicationPeerConfigBuilder removeConfiguration(String key); + + /** * Adds all of the provided "raw" configuration entries to {@code this}. * @param configuration A collection of raw configuration entries diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/EncryptionUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/EncryptionUtil.java index 72fd0c87165f..8d380dc7fe6c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/EncryptionUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/EncryptionUtil.java @@ -24,24 +24,23 @@ import java.security.KeyException; import java.security.SecureRandom; import java.util.Properties; - import javax.crypto.spec.SecretKeySpec; - import org.apache.commons.crypto.cipher.CryptoCipherFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; +import org.apache.hadoop.hbase.io.crypto.Cipher; +import org.apache.hadoop.hbase.io.crypto.Encryption; +import org.apache.hadoop.hbase.io.crypto.aes.CryptoAES; +import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; -import org.apache.hadoop.hbase.io.crypto.Cipher; -import org.apache.hadoop.hbase.io.crypto.Encryption; + import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; import org.apache.hadoop.hbase.shaded.protobuf.generated.EncryptionProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos; -import org.apache.hadoop.hbase.io.crypto.aes.CryptoAES; -import org.apache.hadoop.hbase.util.Bytes; /** * Some static utility methods for encryption uses in hbase-client. @@ -102,7 +101,9 @@ public static byte[] wrapKey(Configuration conf, String subject, Key key) } byte[] keyBytes = key.getEncoded(); builder.setLength(keyBytes.length); - builder.setHash(UnsafeByteOperations.unsafeWrap(Encryption.hash128(keyBytes))); + builder.setHashAlgorithm(Encryption.getConfiguredHashAlgorithm(conf)); + builder.setHash( + UnsafeByteOperations.unsafeWrap(Encryption.computeCryptoKeyHash(conf, keyBytes))); ByteArrayOutputStream out = new ByteArrayOutputStream(); Encryption.encryptWithSubjectKey(out, new ByteArrayInputStream(keyBytes), subject, conf, cipher, iv); @@ -138,13 +139,24 @@ public static Key unwrapKey(Configuration conf, String subject, byte[] value) private static Key getUnwrapKey(Configuration conf, String subject, EncryptionProtos.WrappedKey wrappedKey, Cipher cipher) throws IOException, KeyException { + String configuredHashAlgorithm = Encryption.getConfiguredHashAlgorithm(conf); + String wrappedHashAlgorithm = wrappedKey.getHashAlgorithm().trim(); + if(!configuredHashAlgorithm.equalsIgnoreCase(wrappedHashAlgorithm)) { + String msg = String.format("Unexpected encryption key hash algorithm: %s (expecting: %s)", + wrappedHashAlgorithm, configuredHashAlgorithm); + if(Encryption.failOnHashAlgorithmMismatch(conf)) { + throw new KeyException(msg); + } + LOG.debug(msg); + } ByteArrayOutputStream out = new ByteArrayOutputStream(); byte[] iv = wrappedKey.hasIv() ? wrappedKey.getIv().toByteArray() : null; Encryption.decryptWithSubjectKey(out, wrappedKey.getData().newInput(), wrappedKey.getLength(), subject, conf, cipher, iv); byte[] keyBytes = out.toByteArray(); if (wrappedKey.hasHash()) { - if (!Bytes.equals(wrappedKey.getHash().toByteArray(), Encryption.hash128(keyBytes))) { + if (!Bytes.equals(wrappedKey.getHash().toByteArray(), + Encryption.hashWithAlg(wrappedHashAlgorithm, keyBytes))) { throw new KeyException("Key was not successfully unwrapped"); } } @@ -180,12 +192,17 @@ public static Key unwrapWALKey(Configuration conf, String subject, byte[] value) * @param family The current column descriptor. * @return The created encryption context. * @throws IOException if an encryption key for the column cannot be unwrapped + * @throws IllegalStateException in case of encryption related configuration errors */ public static Encryption.Context createEncryptionContext(Configuration conf, ColumnFamilyDescriptor family) throws IOException { Encryption.Context cryptoContext = Encryption.Context.NONE; String cipherName = family.getEncryptionType(); if (cipherName != null) { + if(!Encryption.isEncryptionEnabled(conf)) { + throw new IllegalStateException("Encryption for family '" + family.getNameAsString() + + "' configured with type '" + cipherName + "' but the encryption feature is disabled"); + } Cipher cipher; Key key; byte[] keyBytes = family.getEncryptionKey(); @@ -195,13 +212,13 @@ public static Encryption.Context createEncryptionContext(Configuration conf, // Use the algorithm the key wants cipher = Encryption.getCipher(conf, key.getAlgorithm()); if (cipher == null) { - throw new RuntimeException("Cipher '" + key.getAlgorithm() + "' is not available"); + throw new IllegalStateException("Cipher '" + key.getAlgorithm() + "' is not available"); } // Fail if misconfigured // We use the encryption type specified in the column schema as a sanity check on // what the wrapped key is telling us if (!cipher.getName().equalsIgnoreCase(cipherName)) { - throw new RuntimeException("Encryption for family '" + family.getNameAsString() + throw new IllegalStateException("Encryption for family '" + family.getNameAsString() + "' configured with type '" + cipherName + "' but key specifies algorithm '" + cipher.getName() + "'"); } @@ -209,7 +226,7 @@ public static Encryption.Context createEncryptionContext(Configuration conf, // Family does not provide key material, create a random key cipher = Encryption.getCipher(conf, cipherName); if (cipher == null) { - throw new RuntimeException("Cipher '" + cipherName + "' is not available"); + throw new IllegalStateException("Cipher '" + cipherName + "' is not available"); } key = cipher.getRandomKey(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java index d2217c65dd03..462ffb012d93 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java @@ -124,7 +124,6 @@ import org.apache.hadoop.ipc.RemoteException; import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.com.google.common.io.ByteStreams; import org.apache.hbase.thirdparty.com.google.gson.JsonArray; import org.apache.hbase.thirdparty.com.google.gson.JsonElement; @@ -290,7 +289,6 @@ private final static class ClassLoaderHolder { } } - @VisibleForTesting public static boolean isClassLoaderLoaded() { return classLoaderLoaded; } @@ -1438,6 +1436,21 @@ private static MutationProto.Builder getMutationBuilderAndSetCommonFields(final * @return the converted protocol buffer Result */ public static ClientProtos.Result toResult(final Result result) { + return toResult(result, false); + } + + /** + * Convert a client Result to a protocol buffer Result + * @param result the client Result to convert + * @param encodeTags whether to includeTags in converted protobuf result or not + * When @encodeTags is set to true, it will return all the tags in the response. + * These tags may contain some sensitive data like acl permissions, etc. + * Only the tools like Export, Import which needs to take backup needs to set + * it to true so that cell tags are persisted in backup. + * Refer to HBASE-25246 for more context. + * @return the converted protocol buffer Result + */ + public static ClientProtos.Result toResult(final Result result, boolean encodeTags) { if (result.getExists() != null) { return toResult(result.getExists(), result.isStale()); } @@ -1449,7 +1462,7 @@ public static ClientProtos.Result toResult(final Result result) { ClientProtos.Result.Builder builder = ClientProtos.Result.newBuilder(); for (Cell c : cells) { - builder.addCell(toCell(c)); + builder.addCell(toCell(c, encodeTags)); } builder.setStale(result.isStale()); @@ -1496,6 +1509,22 @@ public static ClientProtos.Result toResultNoData(final Result result) { * @return the converted client Result */ public static Result toResult(final ClientProtos.Result proto) { + return toResult(proto, false); + } + + /** + * Convert a protocol buffer Result to a client Result + * + * @param proto the protocol buffer Result to convert + * @param decodeTags whether to decode tags into converted client Result + * When @decodeTags is set to true, it will decode all the tags from the + * response. These tags may contain some sensitive data like acl permissions, + * etc. Only the tools like Export, Import which needs to take backup needs to + * set it to true so that cell tags are persisted in backup. + * Refer to HBASE-25246 for more context. + * @return the converted client Result + */ + public static Result toResult(final ClientProtos.Result proto, boolean decodeTags) { if (proto.hasExists()) { if (proto.getStale()) { return proto.getExists() ? EMPTY_RESULT_EXISTS_TRUE_STALE :EMPTY_RESULT_EXISTS_FALSE_STALE; @@ -1511,7 +1540,7 @@ public static Result toResult(final ClientProtos.Result proto) { List cells = new ArrayList<>(values.size()); ExtendedCellBuilder builder = ExtendedCellBuilderFactory.create(CellBuilderType.SHALLOW_COPY); for (CellProtos.Cell c : values) { - cells.add(toCell(builder, c)); + cells.add(toCell(builder, c, decodeTags)); } return Result.create(cells, null, proto.getStale(), proto.getPartial()); } @@ -1554,7 +1583,7 @@ public static Result toResult(final ClientProtos.Result proto, final CellScanner if (cells == null) cells = new ArrayList<>(values.size()); ExtendedCellBuilder builder = ExtendedCellBuilderFactory.create(CellBuilderType.SHALLOW_COPY); for (CellProtos.Cell c: values) { - cells.add(toCell(builder, c)); + cells.add(toCell(builder, c, false)); } } @@ -2002,7 +2031,7 @@ public static void toIOException(ServiceException se) throws IOException { throw new IOException(se); } - public static CellProtos.Cell toCell(final Cell kv) { + public static CellProtos.Cell toCell(final Cell kv, boolean encodeTags) { // Doing this is going to kill us if we do it for all data passed. // St.Ack 20121205 CellProtos.Cell.Builder kvbuilder = CellProtos.Cell.newBuilder(); @@ -2017,7 +2046,10 @@ public static CellProtos.Cell toCell(final Cell kv) { kvbuilder.setTimestamp(kv.getTimestamp()); kvbuilder.setValue(wrap(((ByteBufferExtendedCell) kv).getValueByteBuffer(), ((ByteBufferExtendedCell) kv).getValuePosition(), kv.getValueLength())); - // TODO : Once tags become first class then we may have to set tags to kvbuilder. + if (encodeTags) { + kvbuilder.setTags(wrap(((ByteBufferExtendedCell) kv).getTagsByteBuffer(), + ((ByteBufferExtendedCell) kv).getTagsPosition(), kv.getTagsLength())); + } } else { kvbuilder.setRow( UnsafeByteOperations.unsafeWrap(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength())); @@ -2029,6 +2061,10 @@ public static CellProtos.Cell toCell(final Cell kv) { kvbuilder.setTimestamp(kv.getTimestamp()); kvbuilder.setValue(UnsafeByteOperations.unsafeWrap(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength())); + if (encodeTags) { + kvbuilder.setTags(UnsafeByteOperations.unsafeWrap(kv.getTagsArray(), kv.getTagsOffset(), + kv.getTagsLength())); + } } return kvbuilder.build(); } @@ -2040,15 +2076,19 @@ private static ByteString wrap(ByteBuffer b, int offset, int length) { return UnsafeByteOperations.unsafeWrap(dup); } - public static Cell toCell(ExtendedCellBuilder cellBuilder, final CellProtos.Cell cell) { - return cellBuilder.clear() - .setRow(cell.getRow().toByteArray()) - .setFamily(cell.getFamily().toByteArray()) - .setQualifier(cell.getQualifier().toByteArray()) - .setTimestamp(cell.getTimestamp()) - .setType((byte) cell.getCellType().getNumber()) - .setValue(cell.getValue().toByteArray()) - .build(); + public static Cell toCell(ExtendedCellBuilder cellBuilder, final CellProtos.Cell cell, + boolean decodeTags) { + ExtendedCellBuilder builder = cellBuilder.clear() + .setRow(cell.getRow().toByteArray()) + .setFamily(cell.getFamily().toByteArray()) + .setQualifier(cell.getQualifier().toByteArray()) + .setTimestamp(cell.getTimestamp()) + .setType((byte) cell.getCellType().getNumber()) + .setValue(cell.getValue().toByteArray()); + if (decodeTags && cell.hasTags()) { + builder.setTags(cell.getTags().toByteArray()); + } + return builder.build(); } public static HBaseProtos.NamespaceDescriptor toProtoNamespaceDescriptor(NamespaceDescriptor ns) { @@ -2797,8 +2837,8 @@ public static ReplicationLoadSink toReplicationLoadSink( ClusterStatusProtos.ReplicationLoadSink rls) { return new ReplicationLoadSink(rls.getAgeOfLastAppliedOp(), rls.getTimeStampsOfLastAppliedOp(), - rls.getTimestampStarted(), - rls.getTotalOpsProcessed()); + rls.hasTimestampStarted()? rls.getTimestampStarted(): -1L, + rls.hasTotalOpsProcessed()? rls.getTotalOpsProcessed(): -1L); } public static ReplicationLoadSource toReplicationLoadSource( @@ -2861,10 +2901,18 @@ public static List toSecurityCapabilityList( } public static TimeRange toTimeRange(HBaseProtos.TimeRange timeRange) { - return timeRange == null ? - TimeRange.allTime() : - new TimeRange(timeRange.hasFrom() ? timeRange.getFrom() : 0, - timeRange.hasTo() ? timeRange.getTo() : Long.MAX_VALUE); + if (timeRange == null) { + return TimeRange.allTime(); + } + if (timeRange.hasFrom()) { + if (timeRange.hasTo()) { + return TimeRange.between(timeRange.getFrom(), timeRange.getTo()); + } else { + return TimeRange.from(timeRange.getFrom()); + } + } else { + return TimeRange.until(timeRange.getTo()); + } } /** @@ -2960,6 +3008,23 @@ public static GetRegionInfoResponse.CompactionState createCompactionState(Compac return GetRegionInfoResponse.CompactionState.valueOf(state.toString()); } + /** + * Creates {@link CompactionState} from + * {@link org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos + * .RegionLoad.CompactionState} state + * @param state the protobuf CompactionState + * @return CompactionState + */ + public static CompactionState createCompactionStateForRegionLoad( + RegionLoad.CompactionState state) { + return CompactionState.valueOf(state.toString()); + } + + public static RegionLoad.CompactionState createCompactionStateForRegionLoad( + CompactionState state) { + return RegionLoad.CompactionState.valueOf(state.toString()); + } + public static Optional toOptionalTimestamp(MajorCompactionTimestampResponse resp) { long timestamp = resp.getCompactionTimestamp(); return timestamp == 0 ? Optional.empty() : Optional.of(timestamp); @@ -3632,9 +3697,13 @@ public static CheckAndMutate toCheckAndMutate(ClientProtos.Condition condition, return builder.build((Put) m); } else if (m instanceof Delete) { return builder.build((Delete) m); + } else if (m instanceof Increment) { + return builder.build((Increment) m); + } else if (m instanceof Append) { + return builder.build((Append) m); } else { - throw new DoNotRetryIOException("Unsupported mutate type: " + mutations.get(0) - .getClass().getSimpleName().toUpperCase()); + throw new DoNotRetryIOException("Unsupported mutate type: " + m.getClass() + .getSimpleName().toUpperCase()); } } else { return builder.build(new RowMutations(mutations.get(0).getRow()).add(mutations)); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java index 97e5adbda609..5dc5e3cf2059 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java @@ -227,17 +227,9 @@ public static ClientProtos.MultiRequest buildMutateRequest(final byte[] regionNa ClientProtos.Action.Builder actionBuilder = ClientProtos.Action.newBuilder(); MutationProto.Builder mutationBuilder = MutationProto.newBuilder(); for (Mutation mutation: rowMutations.getMutations()) { - MutationType mutateType; - if (mutation instanceof Put) { - mutateType = MutationType.PUT; - } else if (mutation instanceof Delete) { - mutateType = MutationType.DELETE; - } else { - throw new DoNotRetryIOException("RowMutations supports only put and delete, not " + - mutation.getClass().getName()); - } mutationBuilder.clear(); - MutationProto mp = ProtobufUtil.toMutation(mutateType, mutation, mutationBuilder); + MutationProto mp = ProtobufUtil.toMutation(getMutationType(mutation), mutation, + mutationBuilder); actionBuilder.clear(); actionBuilder.setMutation(mp); builder.addAction(actionBuilder.build()); @@ -343,17 +335,9 @@ public static RegionAction.Builder buildRegionAction(final byte [] regionName, ClientProtos.Action.Builder actionBuilder = ClientProtos.Action.newBuilder(); MutationProto.Builder mutationBuilder = MutationProto.newBuilder(); for (Mutation mutation: rowMutations.getMutations()) { - MutationType mutateType = null; - if (mutation instanceof Put) { - mutateType = MutationType.PUT; - } else if (mutation instanceof Delete) { - mutateType = MutationType.DELETE; - } else { - throw new DoNotRetryIOException("RowMutations supports only put and delete, not " + - mutation.getClass().getName()); - } mutationBuilder.clear(); - MutationProto mp = ProtobufUtil.toMutation(mutateType, mutation, mutationBuilder); + MutationProto mp = ProtobufUtil.toMutation(getMutationType(mutation), mutation, + mutationBuilder); actionBuilder.clear(); actionBuilder.setMutation(mp); builder.addAction(actionBuilder.build()); @@ -705,17 +689,9 @@ private static void buildNoDataRegionAction(final RowMutations rowMutations, final ClientProtos.Action.Builder actionBuilder, final MutationProto.Builder mutationBuilder) throws IOException { for (Mutation mutation: rowMutations.getMutations()) { - MutationType type; - if (mutation instanceof Put) { - type = MutationType.PUT; - } else if (mutation instanceof Delete) { - type = MutationType.DELETE; - } else { - throw new DoNotRetryIOException("RowMutations supports only put and delete, not " + - mutation.getClass().getName()); - } mutationBuilder.clear(); - MutationProto mp = ProtobufUtil.toMutationNoData(type, mutation, mutationBuilder); + MutationProto mp = ProtobufUtil.toMutationNoData(getMutationType(mutation), mutation, + mutationBuilder); cells.add(mutation); actionBuilder.clear(); regionActionBuilder.addAction(actionBuilder.setMutation(mp).build()); @@ -723,7 +699,6 @@ private static void buildNoDataRegionAction(final RowMutations rowMutations, } private static MutationType getMutationType(Mutation mutation) { - assert !(mutation instanceof CheckAndMutate); if (mutation instanceof Put) { return MutationType.PUT; } else if (mutation instanceof Delete) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ResponseConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ResponseConverter.java index 97ab9fdf9335..d62f0ac74e22 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ResponseConverter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ResponseConverter.java @@ -148,8 +148,6 @@ public static org.apache.hadoop.hbase.client.MultiResponse getResults(final Mult actionResult.getResultOrExceptionCount() + " for region " + actions.getRegion()); } - Object responseValue; - // For RowMutations/CheckAndMutate action, if there is an exception, the exception is set // at the RegionActionResult level and the ResultOrException is null at the original index Integer index = (indexMap == null ? null : indexMap.get(i)); @@ -158,39 +156,22 @@ public static org.apache.hadoop.hbase.client.MultiResponse getResults(final Mult // If there is an exception from the server, the exception is set at // the RegionActionResult level, which has been handled above. if (actions.hasCondition()) { - Result result = null; - if (actionResult.getResultOrExceptionCount() > 0) { - ResultOrException roe = actionResult.getResultOrException(0); - if (roe.hasResult()) { - Result r = ProtobufUtil.toResult(roe.getResult(), cells); - if (!r.isEmpty()) { - result = r; - } - } - } - responseValue = new CheckAndMutateResult(actionResult.getProcessed(), result); + results.add(regionName, index, getCheckAndMutateResult(actionResult, cells)); } else { - responseValue = actionResult.getProcessed() ? - ProtobufUtil.EMPTY_RESULT_EXISTS_TRUE : - ProtobufUtil.EMPTY_RESULT_EXISTS_FALSE; + results.add(regionName, index, getMutateRowResult(actionResult, cells)); } - results.add(regionName, index, responseValue); continue; } if (actions.hasCondition()) { - Result result = null; - if (actionResult.getResultOrExceptionCount() > 0) { - ResultOrException roe = actionResult.getResultOrException(0); - Result r = ProtobufUtil.toResult(roe.getResult(), cells); - if (!r.isEmpty()) { - result = r; - } - } - responseValue = new CheckAndMutateResult(actionResult.getProcessed(), result); - results.add(regionName, 0, responseValue); + results.add(regionName, 0, getCheckAndMutateResult(actionResult, cells)); } else { + if (actionResult.hasProcessed()) { + results.add(regionName, 0, getMutateRowResult(actionResult, cells)); + continue; + } for (ResultOrException roe : actionResult.getResultOrExceptionList()) { + Object responseValue; if (roe.hasException()) { responseValue = ProtobufUtil.toException(roe.getException()); } else if (roe.hasResult()) { @@ -198,12 +179,7 @@ public static org.apache.hadoop.hbase.client.MultiResponse getResults(final Mult } else if (roe.hasServiceResult()) { responseValue = roe.getServiceResult(); } else { - // Sometimes, the response is just "it was processed". Generally, this occurs for things - // like mutateRows where either we get back 'processed' (or not) and optionally some - // statistics about the regions we touched. - responseValue = actionResult.getProcessed() ? - ProtobufUtil.EMPTY_RESULT_EXISTS_TRUE : - ProtobufUtil.EMPTY_RESULT_EXISTS_FALSE; + responseValue = ProtobufUtil.EMPTY_RESULT_EXISTS_TRUE; } results.add(regionName, roe.getIndex(), responseValue); } @@ -220,6 +196,47 @@ public static org.apache.hadoop.hbase.client.MultiResponse getResults(final Mult return results; } + private static CheckAndMutateResult getCheckAndMutateResult(RegionActionResult actionResult, + CellScanner cells) throws IOException { + Result result = null; + if (actionResult.getResultOrExceptionCount() > 0) { + // Get the result of the Increment/Append operations from the first element of the + // ResultOrException list + ResultOrException roe = actionResult.getResultOrException(0); + if (roe.hasResult()) { + Result r = ProtobufUtil.toResult(roe.getResult(), cells); + if (!r.isEmpty()) { + result = r; + } + } + } + return new CheckAndMutateResult(actionResult.getProcessed(), result); + } + + private static Result getMutateRowResult(RegionActionResult actionResult, CellScanner cells) + throws IOException { + if (actionResult.getProcessed()) { + Result result = null; + if (actionResult.getResultOrExceptionCount() > 0) { + // Get the result of the Increment/Append operations from the first element of the + // ResultOrException list + ResultOrException roe = actionResult.getResultOrException(0); + Result r = ProtobufUtil.toResult(roe.getResult(), cells); + if (!r.isEmpty()) { + r.setExists(true); + result = r; + } + } + if (result != null) { + return result; + } else { + return ProtobufUtil.EMPTY_RESULT_EXISTS_TRUE; + } + } else { + return ProtobufUtil.EMPTY_RESULT_EXISTS_FALSE; + } + } + /** * Create a CheckAndMutateResult object from a protocol buffer MutateResponse * diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/util/PoolMap.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/util/PoolMap.java index cb3bf4ac6d9c..057cb7e37555 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/util/PoolMap.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/util/PoolMap.java @@ -18,17 +18,14 @@ */ package org.apache.hadoop.hbase.util; +import java.io.IOException; import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; -import java.util.HashSet; import java.util.List; import java.util.Locale; import java.util.Map; -import java.util.Set; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.CopyOnWriteArrayList; -import java.util.concurrent.atomic.AtomicInteger; +import java.util.Objects; import org.apache.yetus.audience.InterfaceAudience; @@ -45,177 +42,102 @@ * key. A size of {@link Integer#MAX_VALUE} is interpreted as an unbounded pool. *

* + *

+ * PoolMap is thread-safe. It does not remove elements automatically. Unused resources + * must be closed and removed explicitly. + *

+ * * @param * the type of the key to the resource * @param * the type of the resource being pooled */ @InterfaceAudience.Private -public class PoolMap implements Map { - private PoolType poolType; - - private int poolMaxSize; - - private Map> pools = new ConcurrentHashMap<>(); - - public PoolMap(PoolType poolType) { - this.poolType = poolType; - } - - public PoolMap(PoolType poolType, int poolMaxSize) { - this.poolType = poolType; - this.poolMaxSize = poolMaxSize; +public class PoolMap { + private final Map> pools; + private final PoolType poolType; + private final int poolMaxSize; + + public PoolMap(PoolType poolType, int poolMaxSize) { + pools = new HashMap<>(); + this.poolType = poolType; + this.poolMaxSize = poolMaxSize; } - @Override - public V get(Object key) { - Pool pool = pools.get(key); - return pool != null ? pool.get() : null; + public V getOrCreate(K key, PoolResourceSupplier supplier) throws IOException { + synchronized (pools) { + Pool pool = pools.get(key); + + if (pool == null) { + pool = createPool(); + pools.put(key, pool); + } + + try { + return pool.getOrCreate(supplier); + } catch (IOException | RuntimeException | Error e) { + if (pool.size() == 0) { + pools.remove(key); + } + + throw e; + } + } } + public boolean remove(K key, V value) { + synchronized (pools) { + Pool pool = pools.get(key); - @Override - public V put(K key, V value) { - Pool pool = pools.get(key); - if (pool == null) { - pools.put(key, pool = createPool()); - } - return pool != null ? pool.put(value) : null; - } + if (pool == null) { + return false; + } - @SuppressWarnings("unchecked") - @Override - public V remove(Object key) { - Pool pool = pools.remove(key); - if (pool != null) { - removeValue((K) key, pool.get()); - } - return null; - } + boolean removed = pool.remove(value); - public boolean removeValue(K key, V value) { - Pool pool = pools.get(key); - boolean res = false; - if (pool != null) { - res = pool.remove(value); - if (res && pool.size() == 0) { + if (removed && pool.size() == 0) { pools.remove(key); } - } - return res; - } - @Override - public Collection values() { - Collection values = new ArrayList<>(); - for (Pool pool : pools.values()) { - Collection poolValues = pool.values(); - if (poolValues != null) { - values.addAll(poolValues); - } + return removed; } - return values; } - public Collection values(K key) { - Collection values = new ArrayList<>(); - Pool pool = pools.get(key); - if (pool != null) { - Collection poolValues = pool.values(); - if (poolValues != null) { - values.addAll(poolValues); - } - } - return values; - } - - - @Override - public boolean isEmpty() { - return pools.isEmpty(); - } - - @Override - public int size() { - return pools.size(); - } - - public int size(K key) { - Pool pool = pools.get(key); - return pool != null ? pool.size() : 0; - } - - @Override - public boolean containsKey(Object key) { - return pools.containsKey(key); - } + public List values() { + List values = new ArrayList<>(); - @Override - public boolean containsValue(Object value) { - if (value == null) { - return false; - } - for (Pool pool : pools.values()) { - if (value.equals(pool.get())) { - return true; + synchronized (pools) { + for (Pool pool : pools.values()) { + Collection poolValues = pool.values(); + if (poolValues != null) { + values.addAll(poolValues); + } } } - return false; - } - @Override - public void putAll(Map map) { - for (Map.Entry entry : map.entrySet()) { - put(entry.getKey(), entry.getValue()); - } + return values; } - @Override public void clear() { - for (Pool pool : pools.values()) { - pool.clear(); + synchronized (pools) { + for (Pool pool : pools.values()) { + pool.clear(); + } + + pools.clear(); } - pools.clear(); } - @Override - public Set keySet() { - return pools.keySet(); + public interface PoolResourceSupplier { + R get() throws IOException; } - @Override - public Set> entrySet() { - Set> entries = new HashSet<>(); - for (Map.Entry> poolEntry : pools.entrySet()) { - final K poolKey = poolEntry.getKey(); - final Pool pool = poolEntry.getValue(); - if (pool != null) { - for (final V poolValue : pool.values()) { - entries.add(new Map.Entry() { - @Override - public K getKey() { - return poolKey; - } - - @Override - public V getValue() { - return poolValue; - } - - @Override - public V setValue(V value) { - return pool.put(value); - } - }); - } - } - } - return entries; + protected static V createResource(PoolResourceSupplier supplier) throws IOException { + V resource = supplier.get(); + return Objects.requireNonNull(resource, "resource cannot be null."); } protected interface Pool { - R get(); - - R put(R resource); + R getOrCreate(PoolResourceSupplier supplier) throws IOException; boolean remove(R resource); @@ -254,8 +176,9 @@ protected Pool createPool() { return new RoundRobinPool<>(poolMaxSize); case ThreadLocal: return new ThreadLocalPool<>(); + default: + return new RoundRobinPool<>(poolMaxSize); } - return null; } /** @@ -275,43 +198,66 @@ protected Pool createPool() { * */ @SuppressWarnings("serial") - static class RoundRobinPool extends CopyOnWriteArrayList implements Pool { - private int maxSize; - private int nextResource = 0; + static class RoundRobinPool implements Pool { + private final List resources; + private final int maxSize; + + private int nextIndex; public RoundRobinPool(int maxSize) { + if (maxSize <= 0) { + throw new IllegalArgumentException("maxSize must be positive"); + } + + resources = new ArrayList<>(maxSize); this.maxSize = maxSize; } @Override - public R put(R resource) { - if (super.size() < maxSize) { - add(resource); + public R getOrCreate(PoolResourceSupplier supplier) throws IOException { + int size = resources.size(); + R resource; + + /* letting pool to grow */ + if (size < maxSize) { + resource = createResource(supplier); + resources.add(resource); + } else { + resource = resources.get(nextIndex); + + /* at this point size cannot be 0 */ + nextIndex = (nextIndex + 1) % size; } - return null; + + return resource; } @Override - public R get() { - if (super.size() < maxSize) { - return null; - } - nextResource %= super.size(); - R resource = get(nextResource++); - return resource; + public boolean remove(R resource) { + return resources.remove(resource); + } + + @Override + public void clear() { + resources.clear(); } @Override public Collection values() { - return this; + return resources; } + @Override + public int size() { + return resources.size(); + } } /** * The ThreadLocalPool represents a {@link PoolMap.Pool} that - * builds on the {@link ThreadLocal} class. It essentially binds the resource - * to the thread from which it is accessed. + * works similarly to {@link ThreadLocal} class. It essentially binds the resource + * to the thread from which it is accessed. It doesn't remove resources when a thread exits, + * those resources must be closed manually. * *

* Note that the size of the pool is essentially bounded by the number of threads @@ -321,62 +267,45 @@ public Collection values() { * @param * the type of the resource */ - static class ThreadLocalPool extends ThreadLocal implements Pool { - private static final Map, AtomicInteger> poolSizes = new HashMap<>(); + static class ThreadLocalPool implements Pool { + private final Map resources; public ThreadLocalPool() { + resources = new HashMap<>(); } @Override - public R put(R resource) { - R previousResource = get(); - if (previousResource == null) { - AtomicInteger poolSize = poolSizes.get(this); - if (poolSize == null) { - poolSizes.put(this, poolSize = new AtomicInteger(0)); - } - poolSize.incrementAndGet(); - } - this.set(resource); - return previousResource; - } + public R getOrCreate(PoolResourceSupplier supplier) throws IOException { + Thread myself = Thread.currentThread(); + R resource = resources.get(myself); - @Override - public void remove() { - super.remove(); - AtomicInteger poolSize = poolSizes.get(this); - if (poolSize != null) { - poolSize.decrementAndGet(); + if (resource == null) { + resource = createResource(supplier); + resources.put(myself, resource); } + + return resource; } @Override - public int size() { - AtomicInteger poolSize = poolSizes.get(this); - return poolSize != null ? poolSize.get() : 0; + public boolean remove(R resource) { + /* remove can be called from any thread */ + return resources.values().remove(resource); } @Override - public boolean remove(R resource) { - R previousResource = super.get(); - if (resource != null && resource.equals(previousResource)) { - remove(); - return true; - } else { - return false; - } + public int size() { + return resources.size(); } @Override public void clear() { - super.remove(); + resources.clear(); } @Override public Collection values() { - List values = new ArrayList<>(); - values.add(get()); - return values; + return resources.values(); } } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.java index b4f3ccba9cd0..a1475de8a1ea 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.java @@ -42,8 +42,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; - /** * A very simple read only zookeeper implementation without watcher support. */ @@ -117,7 +115,6 @@ public long getDelay(TimeUnit unit) { private final AtomicBoolean closed = new AtomicBoolean(false); - @VisibleForTesting ZooKeeper zookeeper; private int pendingRequests = 0; @@ -365,7 +362,6 @@ public void close() { } } - @VisibleForTesting public String getConnectString() { return connectString; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZNodePaths.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZNodePaths.java index 5c49808807ff..71936b9f36d0 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZNodePaths.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZNodePaths.java @@ -17,22 +17,15 @@ */ package org.apache.hadoop.hbase.zookeeper; -import static org.apache.hadoop.hbase.HConstants.DEFAULT_META_REPLICA_NUM; import static org.apache.hadoop.hbase.HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT; -import static org.apache.hadoop.hbase.HConstants.META_REPLICAS_NUM; import static org.apache.hadoop.hbase.HConstants.SPLIT_LOGDIR_NAME; import static org.apache.hadoop.hbase.HConstants.ZOOKEEPER_ZNODE_PARENT; -import static org.apache.hadoop.hbase.client.RegionInfo.DEFAULT_REPLICA_ID; -import java.util.Collection; -import java.util.Optional; -import java.util.stream.IntStream; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableMap; - /** * Class that hold all the paths of znode for HBase. */ @@ -55,11 +48,6 @@ public class ZNodePaths { */ private final String metaZNodePrefix; - /** - * znodes containing the locations of the servers hosting the meta replicas - */ - private final ImmutableMap metaReplicaZNodes; - // znode containing ephemeral nodes of the regionservers public final String rsZNode; // znode containing ephemeral nodes of the draining regionservers @@ -86,8 +74,6 @@ public class ZNodePaths { public final String regionNormalizerZNode; // znode containing the state of all switches, currently there are split and merge child node. public final String switchZNode; - // znode containing namespace descriptors - public final String namespaceZNode; // znode of indicating master maintenance mode public final String masterMaintZNode; @@ -104,14 +90,7 @@ public class ZNodePaths { public ZNodePaths(Configuration conf) { baseZNode = conf.get(ZOOKEEPER_ZNODE_PARENT, DEFAULT_ZOOKEEPER_ZNODE_PARENT); - ImmutableMap.Builder builder = ImmutableMap.builder(); metaZNodePrefix = conf.get(META_ZNODE_PREFIX_CONF_KEY, META_ZNODE_PREFIX); - String defaultMetaReplicaZNode = ZNodePaths.joinZNode(baseZNode, metaZNodePrefix); - builder.put(DEFAULT_REPLICA_ID, defaultMetaReplicaZNode); - int numMetaReplicas = conf.getInt(META_REPLICAS_NUM, DEFAULT_META_REPLICA_NUM); - IntStream.range(1, numMetaReplicas) - .forEachOrdered(i -> builder.put(i, defaultMetaReplicaZNode + "-" + i)); - metaReplicaZNodes = builder.build(); rsZNode = joinZNode(baseZNode, conf.get("zookeeper.znode.rs", "rs")); drainingZNode = joinZNode(baseZNode, conf.get("zookeeper.znode.draining.rs", "draining")); masterAddressZNode = joinZNode(baseZNode, conf.get("zookeeper.znode.master", "master")); @@ -125,7 +104,6 @@ public ZNodePaths(Configuration conf) { regionNormalizerZNode = joinZNode(baseZNode, conf.get("zookeeper.znode.regionNormalizer", "normalizer")); switchZNode = joinZNode(baseZNode, conf.get("zookeeper.znode.switch", "switch")); - namespaceZNode = joinZNode(baseZNode, conf.get("zookeeper.znode.namespace", "namespace")); masterMaintZNode = joinZNode(baseZNode, conf.get("zookeeper.znode.masterMaintenance", "master-maintenance")); replicationZNode = joinZNode(baseZNode, conf.get("zookeeper.znode.replication", "replication")); @@ -142,7 +120,6 @@ public ZNodePaths(Configuration conf) { public String toString() { return new StringBuilder() .append("ZNodePaths [baseZNode=").append(baseZNode) - .append(", metaReplicaZNodes=").append(metaReplicaZNodes) .append(", rsZNode=").append(rsZNode) .append(", drainingZNode=").append(drainingZNode) .append(", masterAddressZNode=").append(masterAddressZNode) @@ -154,7 +131,6 @@ public String toString() { .append(", balancerZNode=").append(balancerZNode) .append(", regionNormalizerZNode=").append(regionNormalizerZNode) .append(", switchZNode=").append(switchZNode) - .append(", namespaceZNode=").append(namespaceZNode) .append(", masterMaintZNode=").append(masterMaintZNode) .append(", replicationZNode=").append(replicationZNode) .append(", peersZNode=").append(peersZNode) @@ -164,29 +140,15 @@ public String toString() { .append("]").toString(); } - /** - * @return true if the znode is a meta region replica - */ - public boolean isAnyMetaReplicaZNode(String node) { - return this.metaReplicaZNodes.containsValue(node); - } - - /** - * @return Meta Replica ZNodes - */ - public Collection getMetaReplicaZNodes() { - return this.metaReplicaZNodes.values(); - } - /** * @return the znode string corresponding to a replicaId */ public String getZNodeForReplica(int replicaId) { - // return a newly created path but don't update the cache of paths - // This is mostly needed for tests that attempt to create meta replicas - // from outside the master - return Optional.ofNullable(metaReplicaZNodes.get(replicaId)) - .orElseGet(() -> metaReplicaZNodes.get(DEFAULT_REPLICA_ID) + "-" + replicaId); + if (RegionReplicaUtil.isDefaultReplica(replicaId)) { + return joinZNode(baseZNode, metaZNodePrefix); + } else { + return joinZNode(baseZNode, metaZNodePrefix + "-" + replicaId); + } } /** @@ -198,7 +160,7 @@ public int getMetaReplicaIdFromPath(String path) { // Extract the znode from path. The prefix is of the following format. // baseZNode + PATH_SEPARATOR. int prefixLen = baseZNode.length() + 1; - return getMetaReplicaIdFromZnode(path.substring(prefixLen)); + return getMetaReplicaIdFromZNode(path.substring(prefixLen)); } /** @@ -206,7 +168,7 @@ public int getMetaReplicaIdFromPath(String path) { * @param znode the name of the znode, does not include baseZNode * @return replicaId */ - public int getMetaReplicaIdFromZnode(String znode) { + public int getMetaReplicaIdFromZNode(String znode) { return znode.equals(metaZNodePrefix)? RegionInfo.DEFAULT_REPLICA_ID: Integer.parseInt(znode.substring(metaZNodePrefix.length() + 1)); @@ -220,17 +182,25 @@ public boolean isMetaZNodePrefix(String znode) { } /** - * Returns whether the znode is supposed to be readable by the client and DOES NOT contain + * @return True is the fully qualified path is for meta location + */ + public boolean isMetaZNodePath(String path) { + int prefixLen = baseZNode.length() + 1; + return path.length() > prefixLen && isMetaZNodePrefix(path.substring(prefixLen)); + } + + /** + * Returns whether the path is supposed to be readable by the client and DOES NOT contain * sensitive information (world readable). */ - public boolean isClientReadable(String node) { + public boolean isClientReadable(String path) { // Developer notice: These znodes are world readable. DO NOT add more znodes here UNLESS // all clients need to access this data to work. Using zk for sharing data to clients (other // than service lookup case is not a recommended design pattern. - return node.equals(baseZNode) || isAnyMetaReplicaZNode(node) || - node.equals(masterAddressZNode) || node.equals(clusterIdZNode) || node.equals(rsZNode) || + return path.equals(baseZNode) || isMetaZNodePath(path) || path.equals(masterAddressZNode) || + path.equals(clusterIdZNode) || path.equals(rsZNode) || // /hbase/table and /hbase/table/foo is allowed, /hbase/table-lock is not - node.equals(tableZNode) || node.startsWith(tableZNode + "/"); + path.equals(tableZNode) || path.startsWith(tableZNode + "/"); } /** diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/TestCatalogFamilyFormat.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/TestCatalogFamilyFormat.java index 78e0fdba3016..628655a083c2 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/TestCatalogFamilyFormat.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/TestCatalogFamilyFormat.java @@ -19,13 +19,19 @@ import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import java.io.IOException; +import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; import org.junit.ClassRule; +import org.junit.Rule; import org.junit.Test; import org.junit.experimental.categories.Category; +import org.junit.rules.TestName; @Category({ ClientTests.class, SmallTests.class }) public class TestCatalogFamilyFormat { @@ -34,6 +40,9 @@ public class TestCatalogFamilyFormat { public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestCatalogFamilyFormat.class); + @Rule + public TestName name = new TestName(); + @Test public void testParseReplicaIdFromServerColumn() { String column1 = HConstants.SERVER_QUALIFIER_STR; @@ -70,4 +79,27 @@ public void testMetaReaderGetColumnMethods() { HConstants.SEQNUM_QUALIFIER_STR + CatalogFamilyFormat.META_REPLICA_ID_DELIMITER + "002A"), CatalogFamilyFormat.getSeqNumColumn(42)); } + + /** + * The info we can get from the regionName is: table name, start key, regionId, replicaId. + */ + @Test + public void testParseRegionInfoFromRegionName() throws IOException { + RegionInfo originalRegionInfo = RegionInfoBuilder.newBuilder( + TableName.valueOf(name.getMethodName())).setRegionId(999999L) + .setStartKey(Bytes.toBytes("2")).setEndKey(Bytes.toBytes("3")) + .setReplicaId(1).build(); + RegionInfo newParsedRegionInfo = CatalogFamilyFormat + .parseRegionInfoFromRegionName(originalRegionInfo.getRegionName()); + assertEquals("Parse TableName error", originalRegionInfo.getTable(), + newParsedRegionInfo.getTable()); + assertEquals("Parse regionId error", originalRegionInfo.getRegionId(), + newParsedRegionInfo.getRegionId()); + assertTrue("Parse startKey error", Bytes.equals(originalRegionInfo.getStartKey(), + newParsedRegionInfo.getStartKey())); + assertEquals("Parse replicaId error", originalRegionInfo.getReplicaId(), + newParsedRegionInfo.getReplicaId()); + assertTrue("We can't parse endKey from regionName only", + Bytes.equals(HConstants.EMPTY_END_ROW, newParsedRegionInfo.getEndKey())); + } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestColumnFamilyDescriptorBuilder.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestColumnFamilyDescriptorBuilder.java index d6ea1b3cef8f..7528d24705cf 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestColumnFamilyDescriptorBuilder.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestColumnFamilyDescriptorBuilder.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.client; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; import static org.junit.Assert.assertThrows; import static org.junit.Assert.assertTrue; @@ -39,6 +40,7 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; +import java.util.Map; @Category({ MiscTests.class, SmallTests.class }) public class TestColumnFamilyDescriptorBuilder { @@ -181,4 +183,52 @@ public void testSetTimeToLive() throws HBaseException { builder.setTimeToLive(ttl); Assert.assertEquals(43282800, builder.build().getTimeToLive()); } + + /** + * Test for verifying the ColumnFamilyDescriptorBuilder's default values so that backward + * compatibility with hbase-1.x can be mantained (see HBASE-24981). + */ + @Test + public void testDefaultBuilder() { + final Map defaultValueMap = ColumnFamilyDescriptorBuilder.getDefaultValues(); + assertEquals(defaultValueMap.size(), 11); + assertEquals(defaultValueMap.get(ColumnFamilyDescriptorBuilder.BLOOMFILTER), + BloomType.ROW.toString()); + assertEquals(defaultValueMap.get(ColumnFamilyDescriptorBuilder.REPLICATION_SCOPE), "0"); + assertEquals(defaultValueMap.get(ColumnFamilyDescriptorBuilder.MAX_VERSIONS), "1"); + assertEquals(defaultValueMap.get(ColumnFamilyDescriptorBuilder.MIN_VERSIONS), "0"); + assertEquals(defaultValueMap.get(ColumnFamilyDescriptorBuilder.COMPRESSION), + Compression.Algorithm.NONE.toString()); + assertEquals(defaultValueMap.get(ColumnFamilyDescriptorBuilder.TTL), + Integer.toString(Integer.MAX_VALUE)); + assertEquals(defaultValueMap.get(ColumnFamilyDescriptorBuilder.BLOCKSIZE), + Integer.toString(64 * 1024)); + assertEquals(defaultValueMap.get(ColumnFamilyDescriptorBuilder.IN_MEMORY), + Boolean.toString(false)); + assertEquals(defaultValueMap.get(ColumnFamilyDescriptorBuilder.BLOCKCACHE), + Boolean.toString(true)); + assertEquals(defaultValueMap.get(ColumnFamilyDescriptorBuilder.KEEP_DELETED_CELLS), + KeepDeletedCells.FALSE.toString()); + assertEquals(defaultValueMap.get(ColumnFamilyDescriptorBuilder.DATA_BLOCK_ENCODING), + DataBlockEncoding.NONE.toString()); + } + + @Test + public void testSetEmptyValue() { + ColumnFamilyDescriptorBuilder builder = + ColumnFamilyDescriptorBuilder.newBuilder(HConstants.CATALOG_FAMILY); + String testConf = "TestConfiguration"; + String testValue = "TestValue"; + // test set value + builder.setValue(testValue, "2"); + assertEquals("2", Bytes.toString(builder.build().getValue(Bytes.toBytes(testValue)))); + builder.setValue(testValue, ""); + assertNull(builder.build().getValue(Bytes.toBytes(testValue))); + + // test set configuration + builder.setConfiguration(testConf, "1"); + assertEquals("1", builder.build().getConfigurationValue(testConf)); + builder.setConfiguration(testConf, ""); + assertNull(builder.build().getConfigurationValue(testConf)); + } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestMasterRegistryHedgedReads.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestMasterRegistryHedgedReads.java index 0af01984218d..40a38c706a10 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestMasterRegistryHedgedReads.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestMasterRegistryHedgedReads.java @@ -90,14 +90,12 @@ public RpcClientImpl(Configuration configuration, String clusterId, SocketAddres } @Override - public BlockingRpcChannel createBlockingRpcChannel(ServerName sn, User user, int rpcTimeout) - throws IOException { + public BlockingRpcChannel createBlockingRpcChannel(ServerName sn, User user, int rpcTimeout) { throw new UnsupportedOperationException(); } @Override - public RpcChannel createRpcChannel(ServerName sn, User user, int rpcTimeout) - throws IOException { + public RpcChannel createRpcChannel(ServerName sn, User user, int rpcTimeout) { return new RpcChannelImpl(); } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorBuilder.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorBuilder.java index 89d740c824ef..05a0b31d1e8a 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorBuilder.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorBuilder.java @@ -19,6 +19,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; @@ -27,6 +28,8 @@ import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.exceptions.DeserializationException; +import org.apache.hadoop.hbase.exceptions.HBaseException; +import org.apache.hadoop.hbase.rsgroup.RSGroupInfo; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.BuilderStyleTest; @@ -216,6 +219,33 @@ public void testGetMaxFileSize() { assertEquals(1111L, desc.getMaxFileSize()); } + @Test + public void testSetMaxFileSize() throws HBaseException { + TableDescriptorBuilder builder = + TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())); + + String maxFileSize = "1073741824"; + builder.setMaxFileSize(maxFileSize); + assertEquals(1073741824, builder.build().getMaxFileSize()); + + maxFileSize = "1GB"; + builder.setMaxFileSize(maxFileSize); + assertEquals(1073741824, builder.build().getMaxFileSize()); + + maxFileSize = "10GB 25MB"; + builder.setMaxFileSize(maxFileSize); + assertEquals(10763632640L, builder.build().getMaxFileSize()); + + // ignore case + maxFileSize = "10GB 512mb 512KB 512b"; + builder.setMaxFileSize(maxFileSize); + assertEquals(11274813952L, builder.build().getMaxFileSize()); + + maxFileSize = "10737942528 B (10GB 512KB)"; + builder.setMaxFileSize(maxFileSize); + assertEquals(10737942528L, builder.build().getMaxFileSize()); + } + /** * Test default value handling for memStoreFlushSize */ @@ -229,6 +259,33 @@ public void testGetMemStoreFlushSize() { assertEquals(1111L, desc.getMemStoreFlushSize()); } + @Test + public void testSetMemStoreFlushSize() throws HBaseException { + TableDescriptorBuilder builder = + TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())); + + String memstoreFlushSize = "1073741824"; + builder.setMemStoreFlushSize(memstoreFlushSize); + assertEquals(1073741824, builder.build().getMemStoreFlushSize()); + + memstoreFlushSize = "1GB"; + builder.setMemStoreFlushSize(memstoreFlushSize); + assertEquals(1073741824, builder.build().getMemStoreFlushSize()); + + memstoreFlushSize = "10GB 25MB"; + builder.setMemStoreFlushSize(memstoreFlushSize); + assertEquals(10763632640L, builder.build().getMemStoreFlushSize()); + + // ignore case + memstoreFlushSize = "10GB 512mb 512KB 512b"; + builder.setMemStoreFlushSize(memstoreFlushSize); + assertEquals(11274813952L, builder.build().getMemStoreFlushSize()); + + memstoreFlushSize = "10737942528 B (10GB 512KB)"; + builder.setMemStoreFlushSize(memstoreFlushSize); + assertEquals(10737942528L, builder.build().getMemStoreFlushSize()); + } + @Test public void testClassMethodsAreBuilderStyle() { BuilderStyleTest.assertClassesAreBuilderStyle(TableDescriptorBuilder.class); @@ -279,7 +336,7 @@ public void testPriority() { } @Test - public void testStringCustomizedValues() { + public void testStringCustomizedValues() throws HBaseException { byte[] familyName = Bytes.toBytes("cf"); ColumnFamilyDescriptor hcd = ColumnFamilyDescriptorBuilder.newBuilder(familyName).setBlocksize(1000).build(); @@ -290,5 +347,44 @@ public void testStringCustomizedValues() { "'testStringCustomizedValues', " + "{TABLE_ATTRIBUTES => {DURABILITY => 'ASYNC_WAL'}}, {NAME => 'cf', BLOCKSIZE => '1000'}", htd.toStringCustomizedValues()); + + htd = TableDescriptorBuilder.newBuilder(htd) + .setMaxFileSize("10737942528") + .setMemStoreFlushSize("256MB") + .build(); + assertEquals( + "'testStringCustomizedValues', " + + "{TABLE_ATTRIBUTES => {DURABILITY => 'ASYNC_WAL', " + + "MAX_FILESIZE => '10737942528 B (10GB 512KB)', " + + "MEMSTORE_FLUSHSIZE => '268435456 B (256MB)'}}, {NAME => 'cf', BLOCKSIZE => '1000'}", + htd.toStringCustomizedValues()); + } + + @Test + public void testGetSetRegionServerGroup() { + String groupName = name.getMethodName(); + TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())) + .setRegionServerGroup(groupName).build(); + assertEquals(htd.getValue(RSGroupInfo.TABLE_DESC_PROP_GROUP), groupName); + htd = TableDescriptorBuilder.newBuilder(htd).setRegionServerGroup(null).build(); + assertNull(htd.getValue(RSGroupInfo.TABLE_DESC_PROP_GROUP)); + } + + @Test + public void testSetEmptyValue() { + TableDescriptorBuilder builder = + TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())); + String testValue = "TestValue"; + // test setValue + builder.setValue(testValue, "2"); + assertEquals("2", builder.build().getValue(testValue)); + builder.setValue(testValue, ""); + assertNull(builder.build().getValue(Bytes.toBytes(testValue))); + + // test setFlushPolicyClassName + builder.setFlushPolicyClassName("class"); + assertEquals("class", builder.build().getFlushPolicyClassName()); + builder.setFlushPolicyClassName(""); + assertNull(builder.build().getFlushPolicyClassName()); } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestConnectionId.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestConnectionId.java index 3e10f7409c5e..48a079d3e75b 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestConnectionId.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestConnectionId.java @@ -22,10 +22,10 @@ import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertTrue; -import java.net.InetSocketAddress; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.net.Address; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.SmallTests; @@ -43,7 +43,7 @@ public class TestConnectionId { private User testUser1 = User.createUserForTesting(testConfig, "test", new String[]{"testgroup"}); private User testUser2 = User.createUserForTesting(testConfig, "test", new String[]{"testgroup"}); private String serviceName = "test"; - private InetSocketAddress address = new InetSocketAddress(999); + private Address address = Address.fromParts("localhost", 999); private ConnectionId connectionId1 = new ConnectionId(testUser1, serviceName, address); private ConnectionId connectionId2 = new ConnectionId(testUser2, serviceName, address); @@ -66,7 +66,7 @@ public void testGetTicket() { @Test public void testToString() { - String expectedString = "0.0.0.0/0.0.0.0:999/test/test (auth:SIMPLE)"; + String expectedString = "localhost:999/test/test (auth:SIMPLE)"; assertEquals(expectedString, connectionId1.toString()); } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestFailedServersLog.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestFailedServersLog.java index eb1877f189dc..fa44022f8d09 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestFailedServersLog.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestFailedServersLog.java @@ -18,12 +18,12 @@ package org.apache.hadoop.hbase.ipc; import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertThat; -import java.net.InetSocketAddress; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.net.Address; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.log4j.Appender; @@ -51,7 +51,7 @@ public class TestFailedServersLog { HBaseClassTestRule.forClass(TestFailedServersLog.class); static final int TEST_PORT = 9999; - private InetSocketAddress addr; + private Address addr; @Mock private Appender mockAppender; @@ -74,7 +74,7 @@ public void testAddToFailedServersLogging() { Throwable nullException = new NullPointerException(); FailedServers fs = new FailedServers(new Configuration()); - addr = new InetSocketAddress(TEST_PORT); + addr = Address.fromParts("localhost", TEST_PORT); fs.addToFailedServers(addr, nullException); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestIPCUtil.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestIPCUtil.java index 9e1ab2eb9ff5..d1443a1068a5 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestIPCUtil.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestIPCUtil.java @@ -23,7 +23,6 @@ import java.io.IOException; import java.lang.reflect.Constructor; import java.lang.reflect.InvocationTargetException; -import java.net.InetSocketAddress; import java.util.ArrayList; import java.util.List; import java.util.concurrent.CompletableFuture; @@ -32,6 +31,7 @@ import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.exceptions.ClientExceptionsUtil; import org.apache.hadoop.hbase.exceptions.TimeoutIOException; +import org.apache.hadoop.hbase.net.Address; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.FutureUtils; @@ -100,7 +100,7 @@ public void testWrapConnectionException() throws Exception { for (Class clazz : ClientExceptionsUtil.getConnectionExceptionTypes()) { exceptions.add(create(clazz)); } - InetSocketAddress addr = InetSocketAddress.createUnresolved("127.0.0.1", 12345); + Address addr = Address.fromParts("127.0.0.1", 12345); for (Throwable exception : exceptions) { if (exception instanceof TimeoutException) { assertThat(IPCUtil.wrapException(addr, exception), instanceOf(TimeoutIOException.class)); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestNettyRpcConnection.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestNettyRpcConnection.java index ab75d6011ab8..8782fe116b07 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestNettyRpcConnection.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestNettyRpcConnection.java @@ -26,9 +26,9 @@ import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; import java.lang.reflect.Modifier; -import java.net.InetSocketAddress; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.net.Address; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.SmallTests; @@ -59,7 +59,7 @@ public class TestNettyRpcConnection { public static void setUp() throws IOException { CLIENT = new NettyRpcClient(HBaseConfiguration.create()); CONN = new NettyRpcConnection(CLIENT, - new ConnectionId(User.getCurrent(), "test", new InetSocketAddress("localhost", 1234))); + new ConnectionId(User.getCurrent(), "test", Address.fromParts("localhost", 1234))); } @AfterClass diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcClientDeprecatedNameMapping.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcClientDeprecatedNameMapping.java index ca2829a8065a..ba1e27258d2d 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcClientDeprecatedNameMapping.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcClientDeprecatedNameMapping.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hbase.ipc; import static org.hamcrest.CoreMatchers.instanceOf; -import static org.junit.Assert.assertThat; +import static org.hamcrest.MatcherAssert.assertThat; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/security/TestEncryptionUtil.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/security/TestEncryptionUtil.java index 59bebcf4a9a6..9275dc9e763c 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/security/TestEncryptionUtil.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/security/TestEncryptionUtil.java @@ -28,6 +28,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.io.crypto.Encryption; import org.apache.hadoop.hbase.io.crypto.KeyProviderForTesting; import org.apache.hadoop.hbase.io.crypto.aes.AES; import org.apache.hadoop.hbase.testclassification.ClientTests; @@ -40,6 +41,9 @@ @Category({ClientTests.class, SmallTests.class}) public class TestEncryptionUtil { + private static final String INVALID_HASH_ALG = "this-hash-algorithm-not-exists hopefully... :)"; + private static final String DEFAULT_HASH_ALGORITHM = "use-default"; + @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestEncryptionUtil.class); @@ -49,16 +53,108 @@ public class TestEncryptionUtil { // untested. Not ideal! @Test - public void testKeyWrapping() throws Exception { + public void testKeyWrappingUsingHashAlgDefault() throws Exception { + testKeyWrapping(DEFAULT_HASH_ALGORITHM); + } + + @Test + public void testKeyWrappingUsingHashAlgMD5() throws Exception { + testKeyWrapping("MD5"); + } + + @Test + public void testKeyWrappingUsingHashAlgSHA256() throws Exception { + testKeyWrapping("SHA-256"); + } + + @Test + public void testKeyWrappingUsingHashAlgSHA384() throws Exception { + testKeyWrapping("SHA-384"); + } + + @Test(expected = RuntimeException.class) + public void testKeyWrappingWithInvalidHashAlg() throws Exception { + testKeyWrapping(INVALID_HASH_ALG); + } + + @Test + public void testWALKeyWrappingUsingHashAlgDefault() throws Exception { + testWALKeyWrapping(DEFAULT_HASH_ALGORITHM); + } + + @Test + public void testWALKeyWrappingUsingHashAlgMD5() throws Exception { + testWALKeyWrapping("MD5"); + } + + @Test + public void testWALKeyWrappingUsingHashAlgSHA256() throws Exception { + testWALKeyWrapping("SHA-256"); + } + + @Test + public void testWALKeyWrappingUsingHashAlgSHA384() throws Exception { + testWALKeyWrapping("SHA-384"); + } + + @Test(expected = RuntimeException.class) + public void testWALKeyWrappingWithInvalidHashAlg() throws Exception { + testWALKeyWrapping(INVALID_HASH_ALG); + } + + @Test(expected = KeyException.class) + public void testWALKeyWrappingWithIncorrectKey() throws Exception { + // set up the key provider for testing to resolve a key for our test subject + Configuration conf = new Configuration(); // we don't need HBaseConfiguration for this + conf.set(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY, KeyProviderForTesting.class.getName()); + + // generate a test key + byte[] keyBytes = new byte[AES.KEY_LENGTH]; + new SecureRandom().nextBytes(keyBytes); + String algorithm = conf.get(HConstants.CRYPTO_WAL_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES); + Key key = new SecretKeySpec(keyBytes, algorithm); + + // wrap the test key + byte[] wrappedKeyBytes = EncryptionUtil.wrapKey(conf, "hbase", key); + assertNotNull(wrappedKeyBytes); + + // unwrap with an incorrect key + EncryptionUtil.unwrapWALKey(conf, "other", wrappedKeyBytes); + } + + @Test(expected = KeyException.class) + public void testHashAlgorithmMismatchWhenFailExpected() throws Exception { + Configuration conf = new Configuration(); // we don't need HBaseConfiguration for this + conf.setBoolean(Encryption.CRYPTO_KEY_FAIL_ON_ALGORITHM_MISMATCH_CONF_KEY, true); + testKeyWrappingWithMismatchingAlgorithms(conf); + } + + @Test + public void testHashAlgorithmMismatchWhenFailNotExpected() throws Exception { + Configuration conf = new Configuration(); // we don't need HBaseConfiguration for this + conf.setBoolean(Encryption.CRYPTO_KEY_FAIL_ON_ALGORITHM_MISMATCH_CONF_KEY, false); + testKeyWrappingWithMismatchingAlgorithms(conf); + } + + @Test + public void testHashAlgorithmMismatchShouldNotFailWithDefaultConfig() throws Exception { + Configuration conf = new Configuration(); // we don't need HBaseConfiguration for this + testKeyWrappingWithMismatchingAlgorithms(conf); + } + + private void testKeyWrapping(String hashAlgorithm) throws Exception { // set up the key provider for testing to resolve a key for our test subject Configuration conf = new Configuration(); // we don't need HBaseConfiguration for this conf.set(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY, KeyProviderForTesting.class.getName()); + if(!hashAlgorithm.equals(DEFAULT_HASH_ALGORITHM)) { + conf.set(Encryption.CRYPTO_KEY_HASH_ALGORITHM_CONF_KEY, hashAlgorithm); + } // generate a test key byte[] keyBytes = new byte[AES.KEY_LENGTH]; new SecureRandom().nextBytes(keyBytes); String algorithm = - conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES); + conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES); Key key = new SecretKeySpec(keyBytes, algorithm); // wrap the test key @@ -72,7 +168,7 @@ public void testKeyWrapping() throws Exception { assertTrue(unwrappedKey instanceof SecretKeySpec); // did we get back what we wrapped? assertTrue("Unwrapped key bytes do not match original", - Bytes.equals(keyBytes, unwrappedKey.getEncoded())); + Bytes.equals(keyBytes, unwrappedKey.getEncoded())); // unwrap with an incorrect key try { @@ -83,11 +179,13 @@ public void testKeyWrapping() throws Exception { } } - @Test - public void testWALKeyWrapping() throws Exception { + private void testWALKeyWrapping(String hashAlgorithm) throws Exception { // set up the key provider for testing to resolve a key for our test subject Configuration conf = new Configuration(); // we don't need HBaseConfiguration for this conf.set(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY, KeyProviderForTesting.class.getName()); + if(!hashAlgorithm.equals(DEFAULT_HASH_ALGORITHM)) { + conf.set(Encryption.CRYPTO_KEY_HASH_ALGORITHM_CONF_KEY, hashAlgorithm); + } // generate a test key byte[] keyBytes = new byte[AES.KEY_LENGTH]; @@ -106,26 +204,37 @@ public void testWALKeyWrapping() throws Exception { assertTrue(unwrappedKey instanceof SecretKeySpec); // did we get back what we wrapped? assertTrue("Unwrapped key bytes do not match original", - Bytes.equals(keyBytes, unwrappedKey.getEncoded())); + Bytes.equals(keyBytes, unwrappedKey.getEncoded())); } - @Test(expected = KeyException.class) - public void testWALKeyWrappingWithIncorrectKey() throws Exception { - // set up the key provider for testing to resolve a key for our test subject - Configuration conf = new Configuration(); // we don't need HBaseConfiguration for this + private void testKeyWrappingWithMismatchingAlgorithms(Configuration conf) throws Exception { + // we use MD5 to hash the encryption key during wrapping conf.set(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY, KeyProviderForTesting.class.getName()); + conf.set(Encryption.CRYPTO_KEY_HASH_ALGORITHM_CONF_KEY, "MD5"); // generate a test key byte[] keyBytes = new byte[AES.KEY_LENGTH]; new SecureRandom().nextBytes(keyBytes); - String algorithm = conf.get(HConstants.CRYPTO_WAL_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES); + String algorithm = + conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES); Key key = new SecretKeySpec(keyBytes, algorithm); // wrap the test key byte[] wrappedKeyBytes = EncryptionUtil.wrapKey(conf, "hbase", key); assertNotNull(wrappedKeyBytes); - // unwrap with an incorrect key - EncryptionUtil.unwrapWALKey(conf, "other", wrappedKeyBytes); + // we set the default hash algorithm to SHA-384 during unwrapping + conf.set(Encryption.CRYPTO_KEY_HASH_ALGORITHM_CONF_KEY, "SHA-384"); + + // unwrap + // we expect to fail, if CRYPTO_KEY_FAIL_ON_ALGORITHM_MISMATCH_CONF_KEY == true + // otherwise we will use the algorithm written during wrapping + Key unwrappedKey = EncryptionUtil.unwrapKey(conf, "hbase", wrappedKeyBytes); + assertNotNull(unwrappedKey); + + // did we get back what we wrapped? + assertTrue("Unwrapped key bytes do not match original", + Bytes.equals(keyBytes, unwrappedKey.getEncoded())); } + } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/shaded/protobuf/TestProtobufUtil.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/shaded/protobuf/TestProtobufUtil.java index 7d6eda817cfa..c47150b04858 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/shaded/protobuf/TestProtobufUtil.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/shaded/protobuf/TestProtobufUtil.java @@ -18,17 +18,24 @@ package org.apache.hadoop.hbase.shaded.protobuf; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; import java.io.IOException; import java.nio.ByteBuffer; +import java.util.Collections; +import java.util.List; +import org.apache.hadoop.hbase.ArrayBackedTag; import org.apache.hadoop.hbase.ByteBufferKeyValue; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellBuilderType; import org.apache.hadoop.hbase.CellComparatorImpl; +import org.apache.hadoop.hbase.ExtendedCellBuilder; import org.apache.hadoop.hbase.ExtendedCellBuilderFactory; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.PrivateCellUtil; +import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.client.Append; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Get; @@ -63,7 +70,8 @@ public class TestProtobufUtil { @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestProtobufUtil.class); - + private static final String TAG_STR = "tag-1"; + private static final byte TAG_TYPE = (byte)10; public TestProtobufUtil() { } @@ -271,9 +279,10 @@ public void testToCell() { ByteBuffer dbb = ByteBuffer.allocateDirect(arr.length); dbb.put(arr); ByteBufferKeyValue offheapKV = new ByteBufferKeyValue(dbb, kv1.getLength(), kv2.getLength()); - CellProtos.Cell cell = ProtobufUtil.toCell(offheapKV); + CellProtos.Cell cell = ProtobufUtil.toCell(offheapKV, false); Cell newOffheapKV = - ProtobufUtil.toCell(ExtendedCellBuilderFactory.create(CellBuilderType.SHALLOW_COPY), cell); + ProtobufUtil.toCell(ExtendedCellBuilderFactory.create(CellBuilderType.SHALLOW_COPY), cell, + false); assertTrue(CellComparatorImpl.COMPARATOR.compare(offheapKV, newOffheapKV) == 0); } @@ -479,4 +488,92 @@ public void testRegionLockInfo() { + "\"sharedLockCount\":0" + "}]", lockJson); } + + /** + * Test {@link ProtobufUtil#toCell(Cell, boolean)} and + * {@link ProtobufUtil#toCell(ExtendedCellBuilder, CellProtos.Cell, boolean)} conversion + * methods when it contains tags and encode/decode tags is set to true. + */ + @Test + public void testCellConversionWithTags() { + + Cell cell = getCellWithTags(); + CellProtos.Cell protoCell = ProtobufUtil.toCell(cell, true); + assertNotNull(protoCell); + + Cell decodedCell = getCellFromProtoResult(protoCell, true); + List decodedTags = PrivateCellUtil.getTags(decodedCell); + assertEquals(1, decodedTags.size()); + Tag decodedTag = decodedTags.get(0); + assertEquals(TAG_TYPE, decodedTag.getType()); + assertEquals(TAG_STR, Tag.getValueAsString(decodedTag)); + } + + private Cell getCellWithTags() { + Tag tag = new ArrayBackedTag(TAG_TYPE, TAG_STR); + ExtendedCellBuilder cellBuilder = ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY); + cellBuilder.setRow(Bytes.toBytes("row1")); + cellBuilder.setFamily(Bytes.toBytes("f1")); + cellBuilder.setQualifier(Bytes.toBytes("q1")); + cellBuilder.setValue(Bytes.toBytes("value1")); + cellBuilder.setType(Cell.Type.Delete); + cellBuilder.setTags(Collections.singletonList(tag)); + return cellBuilder.build(); + } + + private Cell getCellFromProtoResult(CellProtos.Cell protoCell, boolean decodeTags) { + ExtendedCellBuilder decodedBuilder = + ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY); + return ProtobufUtil.toCell(decodedBuilder, protoCell, decodeTags); + } + + /** + * Test {@link ProtobufUtil#toCell(Cell, boolean)} and + * {@link ProtobufUtil#toCell(ExtendedCellBuilder, CellProtos.Cell, boolean)} conversion + * methods when it contains tags and encode/decode tags is set to false. + */ + @Test + public void testCellConversionWithoutTags() { + Cell cell = getCellWithTags(); + CellProtos.Cell protoCell = ProtobufUtil.toCell(cell, false); + assertNotNull(protoCell); + + Cell decodedCell = getCellFromProtoResult(protoCell, false); + List decodedTags = PrivateCellUtil.getTags(decodedCell); + assertEquals(0, decodedTags.size()); + } + + /** + * Test {@link ProtobufUtil#toCell(Cell, boolean)} and + * {@link ProtobufUtil#toCell(ExtendedCellBuilder, CellProtos.Cell, boolean)} conversion + * methods when it contains tags and encoding of tags is set to false + * and decoding of tags is set to true. + */ + @Test + public void testTagEncodeFalseDecodeTrue() { + Cell cell = getCellWithTags(); + CellProtos.Cell protoCell = ProtobufUtil.toCell(cell, false); + assertNotNull(protoCell); + + Cell decodedCell = getCellFromProtoResult(protoCell, true); + List decodedTags = PrivateCellUtil.getTags(decodedCell); + assertEquals(0, decodedTags.size()); + } + + /** + * Test {@link ProtobufUtil#toCell(Cell, boolean)} and + * {@link ProtobufUtil#toCell(ExtendedCellBuilder, CellProtos.Cell, boolean)} conversion + * methods when it contains tags and encoding of tags is set to true + * and decoding of tags is set to false. + */ + @Test + public void testTagEncodeTrueDecodeFalse() { + Cell cell = getCellWithTags(); + CellProtos.Cell protoCell = ProtobufUtil.toCell(cell, true); + assertNotNull(protoCell); + + Cell decodedCell = getCellFromProtoResult(protoCell, false); + List decodedTags = PrivateCellUtil.getTags(decodedCell); + assertEquals(0, decodedTags.size()); + } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/util/PoolMapTestBase.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/util/PoolMapTestBase.java index 1b242522f353..314cae9e175b 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/util/PoolMapTestBase.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/util/PoolMapTestBase.java @@ -17,9 +17,13 @@ */ package org.apache.hadoop.hbase.util; +import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; +import java.io.IOException; +import java.util.Objects; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; import org.apache.hadoop.hbase.util.PoolMap.PoolType; import org.junit.After; import org.junit.Before; @@ -28,6 +32,7 @@ public abstract class PoolMapTestBase { protected PoolMap poolMap; + protected static final int KEY_COUNT = 5; protected static final int POOL_SIZE = 3; @Before @@ -35,27 +40,5 @@ public void setUp() throws Exception { this.poolMap = new PoolMap<>(getPoolType(), POOL_SIZE); } - @After - public void tearDown() throws Exception { - this.poolMap.clear(); - } - protected abstract PoolType getPoolType(); - - protected void runThread(final String randomKey, final String randomValue, - final String expectedValue) throws InterruptedException { - final AtomicBoolean matchFound = new AtomicBoolean(false); - Thread thread = new Thread(new Runnable() { - @Override - public void run() { - poolMap.put(randomKey, randomValue); - String actualValue = poolMap.get(randomKey); - matchFound - .set(expectedValue == null ? actualValue == null : expectedValue.equals(actualValue)); - } - }); - thread.start(); - thread.join(); - assertTrue(matchFound.get()); - } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/util/TestRoundRobinPoolMap.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/util/TestRoundRobinPoolMap.java index a71cf2974a1c..ef7cb4e6512a 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/util/TestRoundRobinPoolMap.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/util/TestRoundRobinPoolMap.java @@ -18,12 +18,19 @@ package org.apache.hadoop.hbase.util; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; +import java.util.Iterator; import java.util.List; import java.util.Random; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CompletionException; import java.util.concurrent.ExecutionException; import java.util.concurrent.ThreadLocalRandom; +import java.util.concurrent.atomic.AtomicInteger; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.testclassification.SmallTests; @@ -45,58 +52,103 @@ protected PoolType getPoolType() { } @Test - public void testSingleThreadedClient() throws InterruptedException, ExecutionException { - Random rand = ThreadLocalRandom.current(); - String randomKey = String.valueOf(rand.nextInt()); - String randomValue = String.valueOf(rand.nextInt()); - // As long as the pool is not full, we'll get null back. - // This forces the user to create new values that can be used to populate - // the pool. - runThread(randomKey, randomValue, null); - assertEquals(1, poolMap.size(randomKey)); + public void testGetOrCreate() throws IOException { + String key = "key"; + String value = "value"; + String result = poolMap.getOrCreate(key, () -> value); + + assertEquals(value, result); + assertEquals(1, poolMap.values().size()); + } + + @Test + public void testMultipleKeys() throws IOException { + for (int i = 0; i < KEY_COUNT; i++) { + String key = Integer.toString(i); + String value = Integer.toString(2 * i); + String result = poolMap.getOrCreate(key, () -> value); + + assertEquals(value, result); + } + + assertEquals(KEY_COUNT, poolMap.values().size()); + } + + @Test + public void testMultipleValues() throws IOException { + String key = "key"; + + for (int i = 0; i < POOL_SIZE; i++) { + String value = Integer.toString(i); + String result = poolMap.getOrCreate(key, () -> value); + + assertEquals(value, result); + } + + assertEquals(POOL_SIZE, poolMap.values().size()); } @Test - public void testMultiThreadedClients() throws InterruptedException, ExecutionException { - Random rand = ThreadLocalRandom.current(); + public void testRoundRobin() throws IOException { + String key = "key"; + for (int i = 0; i < POOL_SIZE; i++) { - String randomKey = String.valueOf(rand.nextInt()); - String randomValue = String.valueOf(rand.nextInt()); - // As long as the pool is not full, we'll get null back - runThread(randomKey, randomValue, null); - // As long as we use distinct keys, each pool will have one value - assertEquals(1, poolMap.size(randomKey)); + String value = Integer.toString(i); + poolMap.getOrCreate(key, () -> value); } - poolMap.clear(); - String randomKey = String.valueOf(rand.nextInt()); - for (int i = 0; i < POOL_SIZE - 1; i++) { - String randomValue = String.valueOf(rand.nextInt()); - // As long as the pool is not full, we'll get null back - runThread(randomKey, randomValue, null); - // since we use the same key, the pool size should grow - assertEquals(i + 1, poolMap.size(randomKey)); + + assertEquals(POOL_SIZE, poolMap.values().size()); + + /* pool is filled, get() should return elements round robin order */ + for (int i = 0; i < 2 * POOL_SIZE; i++) { + String expected = Integer.toString(i % POOL_SIZE); + assertEquals(expected, poolMap.getOrCreate(key, () -> { + throw new IOException("must not call me"); + })); } - // at the end of the day, there should be as many values as we put - assertEquals(POOL_SIZE - 1, poolMap.size(randomKey)); + + assertEquals(POOL_SIZE, poolMap.values().size()); } @Test - public void testPoolCap() throws InterruptedException, ExecutionException { - Random rand = ThreadLocalRandom.current(); - String randomKey = String.valueOf(rand.nextInt()); - List randomValues = new ArrayList<>(); - for (int i = 0; i < POOL_SIZE * 2; i++) { - String randomValue = String.valueOf(rand.nextInt()); - randomValues.add(randomValue); - if (i < POOL_SIZE - 1) { - // As long as the pool is not full, we'll get null back - runThread(randomKey, randomValue, null); - } else { - // when the pool becomes full, we expect the value we get back to be - // what we put earlier, in round-robin order - runThread(randomKey, randomValue, randomValues.get((i - POOL_SIZE + 1) % POOL_SIZE)); + public void testMultiThreadedRoundRobin() throws ExecutionException, InterruptedException { + String key = "key"; + AtomicInteger id = new AtomicInteger(); + List results = Collections.synchronizedList(new ArrayList<>()); + + Runnable runnable = () -> { + try { + for (int i = 0; i < POOL_SIZE; i++) { + String value = Integer.toString(id.getAndIncrement()); + String result = poolMap.getOrCreate(key, () -> value); + results.add(result); + + Thread.yield(); + } + } catch (IOException e) { + throw new CompletionException(e); } + }; + + CompletableFuture future1 = CompletableFuture.runAsync(runnable); + CompletableFuture future2 = CompletableFuture.runAsync(runnable); + + /* test for successful completion */ + future1.get(); + future2.get(); + + assertEquals(POOL_SIZE, poolMap.values().size()); + + /* check every elements occur twice */ + Collections.sort(results); + Iterator iterator = results.iterator(); + + for (int i = 0; i < POOL_SIZE; i++) { + String next1 = iterator.next(); + String next2 = iterator.next(); + assertEquals(next1, next2); } - assertEquals(POOL_SIZE, poolMap.size(randomKey)); + + assertFalse(iterator.hasNext()); } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/util/TestThreadLocalPoolMap.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/util/TestThreadLocalPoolMap.java index 5f047c4f9fc2..a1cb610e8544 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/util/TestThreadLocalPoolMap.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/util/TestThreadLocalPoolMap.java @@ -19,9 +19,13 @@ import static org.junit.Assert.assertEquals; +import java.io.IOException; import java.util.Random; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CompletionException; import java.util.concurrent.ExecutionException; import java.util.concurrent.ThreadLocalRandom; +import java.util.concurrent.atomic.AtomicInteger; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.testclassification.SmallTests; @@ -43,42 +47,71 @@ protected PoolType getPoolType() { } @Test - public void testSingleThreadedClient() throws InterruptedException, ExecutionException { - Random rand = ThreadLocalRandom.current(); - String randomKey = String.valueOf(rand.nextInt()); - String randomValue = String.valueOf(rand.nextInt()); - // As long as the pool is not full, we should get back what we put - runThread(randomKey, randomValue, randomValue); - assertEquals(1, poolMap.size(randomKey)); + public void testGetOrCreate() throws IOException { + String key = "key"; + String value = "value"; + String result = poolMap.getOrCreate(key, () -> value); + + assertEquals(value, result); + assertEquals(1, poolMap.values().size()); } @Test - public void testMultiThreadedClients() throws InterruptedException, ExecutionException { - Random rand = ThreadLocalRandom.current(); - // As long as the pool is not full, we should get back what we put - for (int i = 0; i < POOL_SIZE; i++) { - String randomKey = String.valueOf(rand.nextInt()); - String randomValue = String.valueOf(rand.nextInt()); - runThread(randomKey, randomValue, randomValue); - assertEquals(1, poolMap.size(randomKey)); - } - String randomKey = String.valueOf(rand.nextInt()); - for (int i = 0; i < POOL_SIZE; i++) { - String randomValue = String.valueOf(rand.nextInt()); - runThread(randomKey, randomValue, randomValue); - assertEquals(i + 1, poolMap.size(randomKey)); + public void testMultipleKeys() throws IOException { + for (int i = 0; i < KEY_COUNT; i++) { + String key = Integer.toString(i); + String value = Integer.toString(2 * i); + String result = poolMap.getOrCreate(key, () -> value); + + assertEquals(value, result); } + + assertEquals(KEY_COUNT, poolMap.values().size()); } @Test - public void testPoolCap() throws InterruptedException, ExecutionException { - Random rand = ThreadLocalRandom.current(); - String randomKey = String.valueOf(rand.nextInt()); - for (int i = 0; i < POOL_SIZE * 2; i++) { - String randomValue = String.valueOf(rand.nextInt()); - // as of HBASE-4150, pool limit is no longer used with ThreadLocalPool - runThread(randomKey, randomValue, randomValue); - } - assertEquals(POOL_SIZE * 2, poolMap.size(randomKey)); + public void testFull() throws IOException { + String key = "key"; + String value = "value"; + + String result = poolMap.getOrCreate(key, () -> value); + assertEquals(value, result); + + String result2 = poolMap.getOrCreate(key, () -> { + throw new IOException("must not call me"); + }); + + assertEquals(value, result2); + assertEquals(1, poolMap.values().size()); + } + + @Test + public void testLocality() throws ExecutionException, InterruptedException { + String key = "key"; + AtomicInteger id = new AtomicInteger(); + + Runnable runnable = () -> { + try { + String myId = Integer.toString(id.getAndIncrement()); + + for (int i = 0; i < 3; i++) { + String result = poolMap.getOrCreate(key, () -> myId); + assertEquals(myId, result); + + Thread.yield(); + } + } catch (IOException e) { + throw new CompletionException(e); + } + }; + + CompletableFuture future1 = CompletableFuture.runAsync(runnable); + CompletableFuture future2 = CompletableFuture.runAsync(runnable); + + /* test for successful completion */ + future1.get(); + future2.get(); + + assertEquals(2, poolMap.values().size()); } } diff --git a/hbase-common/pom.xml b/hbase-common/pom.xml index 48b3c0b7eded..8b9154156ba5 100644 --- a/hbase-common/pom.xml +++ b/hbase-common/pom.xml @@ -211,6 +211,11 @@ compile true + + org.hamcrest + hamcrest-library + test + org.mockito mockito-core diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/BBKVComparator.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/BBKVComparator.java deleted file mode 100644 index bc76a9df37e6..000000000000 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/BBKVComparator.java +++ /dev/null @@ -1,173 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase; - -import java.util.Comparator; - -import org.apache.hadoop.hbase.util.ByteBufferUtils; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import org.apache.hbase.thirdparty.com.google.common.primitives.Longs; - -/** - * A comparator for case where {@link ByteBufferKeyValue} is prevalent type (BBKV - * is base-type in hbase2). Takes a general comparator as fallback in case types are NOT the - * expected ByteBufferKeyValue. - * - *

This is a tricked-out Comparator at heart of hbase read and write. It is in - * the HOT path so we try all sorts of ugly stuff so we can go faster. See below - * in this javadoc comment for the list. - * - *

Apply this comparator narrowly so it is fed exclusively ByteBufferKeyValues - * as much as is possible so JIT can settle (e.g. make one per ConcurrentSkipListMap - * in HStore). - * - *

Exploits specially added methods in BBKV to save on deserializations of shorts, - * longs, etc: i.e. calculating the family length requires row length; pass it in - * rather than recalculate it, and so on. - * - *

This comparator does static dispatch to private final methods so hotspot is comfortable - * deciding inline. - * - *

Measurement has it that we almost have it so all inlines from memstore - * ConcurrentSkipListMap on down to the (unsafe) intrinisics that do byte compare - * and deserialize shorts and ints; needs a bit more work. - * - *

Does not take a Type to compare: i.e. it is not a Comparator<Cell> or - * CellComparator<Cell> or Comparator<ByteBufferKeyValue> because that adds - * another method to the hierarchy -- from compare(Object, Object) - * to dynamic compare(Cell, Cell) to static private compare -- and inlining doesn't happen if - * hierarchy is too deep (it is the case here). - * - *

Be careful making changes. Compare perf before and after and look at what - * hotspot ends up generating before committing change (jitwatch is helpful here). - * Changing this one class doubled write throughput (HBASE-20483). - */ -@InterfaceAudience.Private -public class BBKVComparator implements Comparator { - protected static final Logger LOG = LoggerFactory.getLogger(BBKVComparator.class); - private final Comparator fallback; - - public BBKVComparator(Comparator fallback) { - this.fallback = fallback; - } - - @Override - public int compare(Object l, Object r) { - if ((l instanceof ByteBufferKeyValue) && (r instanceof ByteBufferKeyValue)) { - return compare((ByteBufferKeyValue)l, (ByteBufferKeyValue)r, false); - } - // Skip calling compare(Object, Object) and go direct to compare(Cell, Cell) - return this.fallback.compare((Cell)l, (Cell)r); - } - - // TODO: Come back here. We get a few percentage points extra of throughput if this is a - // private method. - static int compare(ByteBufferKeyValue left, ByteBufferKeyValue right, - boolean ignoreSequenceid) { - // NOTE: Same method is in CellComparatorImpl, also private, not shared, intentionally. Not - // sharing gets us a few percent more throughput in compares. If changes here or there, make - // sure done in both places. - - // Compare Rows. Cache row length. - int leftRowLength = left.getRowLength(); - int rightRowLength = right.getRowLength(); - int diff = ByteBufferUtils.compareTo(left.getRowByteBuffer(), left.getRowPosition(), - leftRowLength, - right.getRowByteBuffer(), right.getRowPosition(), rightRowLength); - if (diff != 0) { - return diff; - } - - // If the column is not specified, the "minimum" key type appears as latest in the sorted - // order, regardless of the timestamp. This is used for specifying the last key/value in a - // given row, because there is no "lexicographically last column" (it would be infinitely long). - // The "maximum" key type does not need this behavior. Copied from KeyValue. This is bad in that - // we can't do memcmp w/ special rules like this. - // TODO: Is there a test for this behavior? - int leftFamilyLengthPosition = left.getFamilyLengthPosition(leftRowLength); - int leftFamilyLength = left.getFamilyLength(leftFamilyLengthPosition); - int leftKeyLength = left.getKeyLength(); - int leftQualifierLength = left.getQualifierLength(leftKeyLength, leftRowLength, - leftFamilyLength); - - // No need of left row length below here. - - byte leftType = left.getTypeByte(leftKeyLength); - if (leftFamilyLength + leftQualifierLength == 0 && - leftType == KeyValue.Type.Minimum.getCode()) { - // left is "bigger", i.e. it appears later in the sorted order - return 1; - } - - int rightFamilyLengthPosition = right.getFamilyLengthPosition(rightRowLength); - int rightFamilyLength = right.getFamilyLength(rightFamilyLengthPosition); - int rightKeyLength = right.getKeyLength(); - int rightQualifierLength = right.getQualifierLength(rightKeyLength, rightRowLength, - rightFamilyLength); - - // No need of right row length below here. - - byte rightType = right.getTypeByte(rightKeyLength); - if (rightFamilyLength + rightQualifierLength == 0 && - rightType == KeyValue.Type.Minimum.getCode()) { - return -1; - } - - // Compare families. - int leftFamilyPosition = left.getFamilyPosition(leftFamilyLengthPosition); - int rightFamilyPosition = right.getFamilyPosition(rightFamilyLengthPosition); - diff = ByteBufferUtils.compareTo(left.getFamilyByteBuffer(), leftFamilyPosition, - leftFamilyLength, - right.getFamilyByteBuffer(), rightFamilyPosition, rightFamilyLength); - if (diff != 0) { - return diff; - } - - // Compare qualifiers - diff = ByteBufferUtils.compareTo(left.getQualifierByteBuffer(), - left.getQualifierPosition(leftFamilyPosition, leftFamilyLength), leftQualifierLength, - right.getQualifierByteBuffer(), - right.getQualifierPosition(rightFamilyPosition, rightFamilyLength), - rightQualifierLength); - if (diff != 0) { - return diff; - } - - // Timestamps. - // Swap order we pass into compare so we get DESCENDING order. - diff = Long.compare(right.getTimestamp(rightKeyLength), left.getTimestamp(leftKeyLength)); - if (diff != 0) { - return diff; - } - - // Compare types. Let the delete types sort ahead of puts; i.e. types - // of higher numbers sort before those of lesser numbers. Maximum (255) - // appears ahead of everything, and minimum (0) appears after - // everything. - diff = (0xff & rightType) - (0xff & leftType); - if (diff != 0) { - return diff; - } - - // Negate following comparisons so later edits show up first mvccVersion: later sorts first - return ignoreSequenceid ? diff : Longs.compare(right.getSequenceId(), left.getSequenceId()); - } -} diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/ByteBufferKeyOnlyKeyValue.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/ByteBufferKeyOnlyKeyValue.java index 31f71f98c500..d55733769ddf 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/ByteBufferKeyOnlyKeyValue.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/ByteBufferKeyOnlyKeyValue.java @@ -61,10 +61,22 @@ public ByteBufferKeyOnlyKeyValue(ByteBuffer buf, int offset, int length) { * @param length */ public void setKey(ByteBuffer key, int offset, int length) { + setKey(key, offset, length, ByteBufferUtils.toShort(key, offset)); + } + + /** + * A setter that helps to avoid object creation every time and whenever + * there is a need to create new OffheapKeyOnlyKeyValue. + * @param key - the key part of the cell + * @param offset - offset of the cell + * @param length - length of the cell + * @param rowLen - the rowlen part of the cell + */ + public void setKey(ByteBuffer key, int offset, int length, short rowLen) { this.buf = key; this.offset = offset; this.length = length; - this.rowLen = ByteBufferUtils.toShort(this.buf, this.offset); + this.rowLen = rowLen; } @Override @@ -149,7 +161,11 @@ private int getTimestampOffset() { @Override public byte getTypeByte() { - return ByteBufferUtils.toByte(this.buf, this.offset + this.length - 1); + return getTypeByte(this.length); + } + + byte getTypeByte(int keyLen) { + return ByteBufferUtils.toByte(this.buf, this.offset + keyLen - 1); } @Override @@ -224,7 +240,11 @@ public int getFamilyPosition() { // The position in BB where the family length is added. private int getFamilyLengthPosition() { - return this.offset + Bytes.SIZEOF_SHORT + getRowLength(); + return getFamilyLengthPosition(getRowLength()); + } + + int getFamilyLengthPosition(int rowLength) { + return this.offset + Bytes.SIZEOF_SHORT + rowLength; } @Override diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/ByteBufferKeyValue.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/ByteBufferKeyValue.java index caf11c3253f5..a2a8198bd4d8 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/ByteBufferKeyValue.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/ByteBufferKeyValue.java @@ -25,8 +25,6 @@ import org.apache.hadoop.hbase.util.ClassSize; import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; - /** * This Cell is an implementation of {@link ByteBufferExtendedCell} where the data resides in * off heap/ on heap ByteBuffer @@ -55,12 +53,10 @@ public ByteBufferKeyValue(ByteBuffer buf, int offset, int length) { this.length = length; } - @VisibleForTesting public ByteBuffer getBuffer() { return this.buf; } - @VisibleForTesting public int getOffset() { return this.offset; } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparatorImpl.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparatorImpl.java index 4af035a94f16..d55f9bad46fe 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparatorImpl.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparatorImpl.java @@ -34,8 +34,7 @@ * format should be taken into consideration, for which the instance of this comparator * should be used. In all other cases the static APIs in this comparator would be enough *

HOT methods. We spend a good portion of CPU comparing. Anything that makes the compare - * faster will likely manifest at the macro level. See also - * {@link BBKVComparator}. Use it when mostly {@link ByteBufferKeyValue}s. + * faster will likely manifest at the macro level. *

*/ @edu.umd.cs.findbugs.annotations.SuppressWarnings( @@ -57,29 +56,286 @@ public final int compare(final Cell a, final Cell b) { } @Override - public int compare(final Cell a, final Cell b, boolean ignoreSequenceid) { - + public int compare(final Cell l, final Cell r, boolean ignoreSequenceid) { int diff = 0; // "Peel off" the most common path. - if (a instanceof ByteBufferKeyValue && b instanceof ByteBufferKeyValue) { - diff = BBKVComparator.compare((ByteBufferKeyValue)a, (ByteBufferKeyValue)b, ignoreSequenceid); + if (l instanceof KeyValue && r instanceof KeyValue) { + diff = compareKeyValues((KeyValue) l, (KeyValue) r); + if (diff != 0) { + return diff; + } + } else if (l instanceof KeyValue && r instanceof ByteBufferKeyValue) { + diff = compareKVVsBBKV((KeyValue) l, (ByteBufferKeyValue) r); + if (diff != 0) { + return diff; + } + } else if (l instanceof ByteBufferKeyValue && r instanceof KeyValue) { + diff = compareKVVsBBKV((KeyValue) r, (ByteBufferKeyValue) l); + if (diff != 0) { + // negate- Findbugs will complain? + return -diff; + } + } else if (l instanceof ByteBufferKeyValue && r instanceof ByteBufferKeyValue) { + diff = compareBBKV((ByteBufferKeyValue) l, (ByteBufferKeyValue) r); if (diff != 0) { return diff; } } else { - diff = compareRows(a, b); + int leftRowLength = l.getRowLength(); + int rightRowLength = r.getRowLength(); + diff = compareRows(l, leftRowLength, r, rightRowLength); if (diff != 0) { return diff; } - diff = compareWithoutRow(a, b); + diff = compareWithoutRow(l, r); if (diff != 0) { return diff; } } - // Negate following comparisons so later edits show up first mvccVersion: later sorts first - return ignoreSequenceid? diff: Long.compare(b.getSequenceId(), a.getSequenceId()); + return ignoreSequenceid ? diff : Long.compare(r.getSequenceId(), l.getSequenceId()); + } + + private static int compareKeyValues(final KeyValue left, final KeyValue right) { + int diff; + // Compare Rows. Cache row length. + int leftRowLength = left.getRowLength(); + int rightRowLength = right.getRowLength(); + diff = Bytes.compareTo(left.getRowArray(), left.getRowOffset(), leftRowLength, + right.getRowArray(), right.getRowOffset(), rightRowLength); + if (diff != 0) { + return diff; + } + + // If the column is not specified, the "minimum" key type appears as latest in the sorted + // order, regardless of the timestamp. This is used for specifying the last key/value in a + // given row, because there is no "lexicographically last column" (it would be infinitely + // long). + // The "maximum" key type does not need this behavior. Copied from KeyValue. This is bad in + // that + // we can't do memcmp w/ special rules like this. + // TODO: Is there a test for this behavior? + int leftFamilyLengthPosition = left.getFamilyLengthPosition(leftRowLength); + int leftFamilyLength = left.getFamilyLength(leftFamilyLengthPosition); + int leftKeyLength = left.getKeyLength(); + int leftQualifierLength = + left.getQualifierLength(leftKeyLength, leftRowLength, leftFamilyLength); + + // No need of left row length below here. + + byte leftType = left.getTypeByte(leftKeyLength); + if (leftType == KeyValue.Type.Minimum.getCode() + && leftFamilyLength + leftQualifierLength == 0) { + // left is "bigger", i.e. it appears later in the sorted order + return 1; + } + + int rightFamilyLengthPosition = right.getFamilyLengthPosition(rightRowLength); + int rightFamilyLength = right.getFamilyLength(rightFamilyLengthPosition); + int rightKeyLength = right.getKeyLength(); + int rightQualifierLength = + right.getQualifierLength(rightKeyLength, rightRowLength, rightFamilyLength); + + // No need of right row length below here. + + byte rightType = right.getTypeByte(rightKeyLength); + if (rightType == KeyValue.Type.Minimum.getCode() + && rightFamilyLength + rightQualifierLength == 0) { + return -1; + } + + // Compare families. + int leftFamilyPosition = left.getFamilyOffset(leftFamilyLengthPosition); + int rightFamilyPosition = right.getFamilyOffset(rightFamilyLengthPosition); + diff = Bytes.compareTo(left.getFamilyArray(), leftFamilyPosition, leftFamilyLength, + right.getFamilyArray(), rightFamilyPosition, rightFamilyLength); + if (diff != 0) { + return diff; + } + + // Compare qualifiers + diff = Bytes.compareTo(left.getQualifierArray(), + left.getQualifierOffset(leftFamilyPosition, leftFamilyLength), leftQualifierLength, + right.getQualifierArray(), right.getQualifierOffset(rightFamilyPosition, rightFamilyLength), + rightQualifierLength); + if (diff != 0) { + return diff; + } + + // Timestamps. + // Swap order we pass into compare so we get DESCENDING order. + // TODO : Ensure we read the bytes and do the compare instead of the value. + diff = Long.compare(right.getTimestamp(rightKeyLength), left.getTimestamp(leftKeyLength)); + if (diff != 0) { + return diff; + } + + // Compare types. Let the delete types sort ahead of puts; i.e. types + // of higher numbers sort before those of lesser numbers. Maximum (255) + // appears ahead of everything, and minimum (0) appears after + // everything. + return (0xff & rightType) - (0xff & leftType); + } + + private static int compareBBKV(final ByteBufferKeyValue left, final ByteBufferKeyValue right) { + int diff; + // Compare Rows. Cache row length. + int leftRowLength = left.getRowLength(); + int rightRowLength = right.getRowLength(); + diff = ByteBufferUtils.compareTo(left.getRowByteBuffer(), left.getRowPosition(), + leftRowLength, right.getRowByteBuffer(), right.getRowPosition(), rightRowLength); + if (diff != 0) { + return diff; + } + + // If the column is not specified, the "minimum" key type appears as latest in the sorted + // order, regardless of the timestamp. This is used for specifying the last key/value in a + // given row, because there is no "lexicographically last column" (it would be infinitely + // long). + // The "maximum" key type does not need this behavior. Copied from KeyValue. This is bad in + // that + // we can't do memcmp w/ special rules like this. + // TODO: Is there a test for this behavior? + int leftFamilyLengthPosition = left.getFamilyLengthPosition(leftRowLength); + int leftFamilyLength = left.getFamilyLength(leftFamilyLengthPosition); + int leftKeyLength = left.getKeyLength(); + int leftQualifierLength = + left.getQualifierLength(leftKeyLength, leftRowLength, leftFamilyLength); + + // No need of left row length below here. + + byte leftType = left.getTypeByte(leftKeyLength); + if (leftType == KeyValue.Type.Minimum.getCode() + && leftFamilyLength + leftQualifierLength == 0) { + // left is "bigger", i.e. it appears later in the sorted order + return 1; + } + + int rightFamilyLengthPosition = right.getFamilyLengthPosition(rightRowLength); + int rightFamilyLength = right.getFamilyLength(rightFamilyLengthPosition); + int rightKeyLength = right.getKeyLength(); + int rightQualifierLength = + right.getQualifierLength(rightKeyLength, rightRowLength, rightFamilyLength); + + // No need of right row length below here. + + byte rightType = right.getTypeByte(rightKeyLength); + if (rightType == KeyValue.Type.Minimum.getCode() + && rightFamilyLength + rightQualifierLength == 0) { + return -1; + } + + // Compare families. + int leftFamilyPosition = left.getFamilyPosition(leftFamilyLengthPosition); + int rightFamilyPosition = right.getFamilyPosition(rightFamilyLengthPosition); + diff = ByteBufferUtils.compareTo(left.getFamilyByteBuffer(), leftFamilyPosition, + leftFamilyLength, right.getFamilyByteBuffer(), rightFamilyPosition, rightFamilyLength); + if (diff != 0) { + return diff; + } + + // Compare qualifiers + diff = ByteBufferUtils.compareTo(left.getQualifierByteBuffer(), + left.getQualifierPosition(leftFamilyPosition, leftFamilyLength), leftQualifierLength, + right.getQualifierByteBuffer(), + right.getQualifierPosition(rightFamilyPosition, rightFamilyLength), rightQualifierLength); + if (diff != 0) { + return diff; + } + + // Timestamps. + // Swap order we pass into compare so we get DESCENDING order. + diff = Long.compare(right.getTimestamp(rightKeyLength), left.getTimestamp(leftKeyLength)); + if (diff != 0) { + return diff; + } + + // Compare types. Let the delete types sort ahead of puts; i.e. types + // of higher numbers sort before those of lesser numbers. Maximum (255) + // appears ahead of everything, and minimum (0) appears after + // everything. + return (0xff & rightType) - (0xff & leftType); + } + + private static int compareKVVsBBKV(final KeyValue left, final ByteBufferKeyValue right) { + int diff; + // Compare Rows. Cache row length. + int leftRowLength = left.getRowLength(); + int rightRowLength = right.getRowLength(); + diff = ByteBufferUtils.compareTo(left.getRowArray(), left.getRowOffset(), leftRowLength, + right.getRowByteBuffer(), right.getRowPosition(), rightRowLength); + if (diff != 0) { + return diff; + } + + // If the column is not specified, the "minimum" key type appears as latest in the sorted + // order, regardless of the timestamp. This is used for specifying the last key/value in a + // given row, because there is no "lexicographically last column" (it would be infinitely + // long). + // The "maximum" key type does not need this behavior. Copied from KeyValue. This is bad in + // that + // we can't do memcmp w/ special rules like this. + // TODO: Is there a test for this behavior? + int leftFamilyLengthPosition = left.getFamilyLengthPosition(leftRowLength); + int leftFamilyLength = left.getFamilyLength(leftFamilyLengthPosition); + int leftKeyLength = left.getKeyLength(); + int leftQualifierLength = + left.getQualifierLength(leftKeyLength, leftRowLength, leftFamilyLength); + + // No need of left row length below here. + + byte leftType = left.getTypeByte(leftKeyLength); + if (leftType == KeyValue.Type.Minimum.getCode() + && leftFamilyLength + leftQualifierLength == 0) { + // left is "bigger", i.e. it appears later in the sorted order + return 1; + } + + int rightFamilyLengthPosition = right.getFamilyLengthPosition(rightRowLength); + int rightFamilyLength = right.getFamilyLength(rightFamilyLengthPosition); + int rightKeyLength = right.getKeyLength(); + int rightQualifierLength = + right.getQualifierLength(rightKeyLength, rightRowLength, rightFamilyLength); + + // No need of right row length below here. + + byte rightType = right.getTypeByte(rightKeyLength); + if (rightType == KeyValue.Type.Minimum.getCode() + && rightFamilyLength + rightQualifierLength == 0) { + return -1; + } + + // Compare families. + int leftFamilyPosition = left.getFamilyOffset(leftFamilyLengthPosition); + int rightFamilyPosition = right.getFamilyPosition(rightFamilyLengthPosition); + diff = ByteBufferUtils.compareTo(left.getFamilyArray(), leftFamilyPosition, leftFamilyLength, + right.getFamilyByteBuffer(), rightFamilyPosition, rightFamilyLength); + if (diff != 0) { + return diff; + } + + // Compare qualifiers + diff = ByteBufferUtils.compareTo(left.getQualifierArray(), + left.getQualifierOffset(leftFamilyPosition, leftFamilyLength), leftQualifierLength, + right.getQualifierByteBuffer(), + right.getQualifierPosition(rightFamilyPosition, rightFamilyLength), rightQualifierLength); + if (diff != 0) { + return diff; + } + + // Timestamps. + // Swap order we pass into compare so we get DESCENDING order. + diff = Long.compare(right.getTimestamp(rightKeyLength), left.getTimestamp(leftKeyLength)); + if (diff != 0) { + return diff; + } + + // Compare types. Let the delete types sort ahead of puts; i.e. types + // of higher numbers sort before those of lesser numbers. Maximum (255) + // appears ahead of everything, and minimum (0) appears after + // everything. + return (0xff & rightType) - (0xff & leftType); } /** @@ -94,6 +350,65 @@ public final int compareColumns(final Cell left, final Cell right) { return compareQualifiers(left, right); } + private int compareColumns(final Cell left, final int leftFamLen, final int leftQualLen, + final Cell right, final int rightFamLen, final int rightQualLen) { + int diff = compareFamilies(left, leftFamLen, right, rightFamLen); + if (diff != 0) { + return diff; + } + return compareQualifiers(left, leftQualLen, right, rightQualLen); + } + + private int compareFamilies(Cell left, int leftFamLen, Cell right, int rightFamLen) { + if (left instanceof ByteBufferExtendedCell && right instanceof ByteBufferExtendedCell) { + return ByteBufferUtils.compareTo(((ByteBufferExtendedCell) left).getFamilyByteBuffer(), + ((ByteBufferExtendedCell) left).getFamilyPosition(), leftFamLen, + ((ByteBufferExtendedCell) right).getFamilyByteBuffer(), + ((ByteBufferExtendedCell) right).getFamilyPosition(), rightFamLen); + } + if (left instanceof ByteBufferExtendedCell) { + return ByteBufferUtils.compareTo(((ByteBufferExtendedCell) left).getFamilyByteBuffer(), + ((ByteBufferExtendedCell) left).getFamilyPosition(), leftFamLen, right.getFamilyArray(), + right.getFamilyOffset(), rightFamLen); + } + if (right instanceof ByteBufferExtendedCell) { + // Notice how we flip the order of the compare here. We used to negate the return value but + // see what FindBugs says + // http://findbugs.sourceforge.net/bugDescriptions.html#RV_NEGATING_RESULT_OF_COMPARETO + // It suggest flipping the order to get same effect and 'safer'. + return ByteBufferUtils.compareTo(left.getFamilyArray(), left.getFamilyOffset(), leftFamLen, + ((ByteBufferExtendedCell) right).getFamilyByteBuffer(), + ((ByteBufferExtendedCell) right).getFamilyPosition(), rightFamLen); + } + return Bytes.compareTo(left.getFamilyArray(), left.getFamilyOffset(), leftFamLen, + right.getFamilyArray(), right.getFamilyOffset(), rightFamLen); + } + + private final int compareQualifiers(Cell left, int leftQualLen, Cell right, int rightQualLen) { + if (left instanceof ByteBufferExtendedCell && right instanceof ByteBufferExtendedCell) { + return ByteBufferUtils.compareTo(((ByteBufferExtendedCell) left).getQualifierByteBuffer(), + ((ByteBufferExtendedCell) left).getQualifierPosition(), leftQualLen, + ((ByteBufferExtendedCell) right).getQualifierByteBuffer(), + ((ByteBufferExtendedCell) right).getQualifierPosition(), rightQualLen); + } + if (left instanceof ByteBufferExtendedCell) { + return ByteBufferUtils.compareTo(((ByteBufferExtendedCell) left).getQualifierByteBuffer(), + ((ByteBufferExtendedCell) left).getQualifierPosition(), leftQualLen, + right.getQualifierArray(), right.getQualifierOffset(), rightQualLen); + } + if (right instanceof ByteBufferExtendedCell) { + // Notice how we flip the order of the compare here. We used to negate the return value but + // see what FindBugs says + // http://findbugs.sourceforge.net/bugDescriptions.html#RV_NEGATING_RESULT_OF_COMPARETO + // It suggest flipping the order to get same effect and 'safer'. + return ByteBufferUtils.compareTo(left.getQualifierArray(), left.getQualifierOffset(), + leftQualLen, ((ByteBufferExtendedCell) right).getQualifierByteBuffer(), + ((ByteBufferExtendedCell) right).getQualifierPosition(), rightQualLen); + } + return Bytes.compareTo(left.getQualifierArray(), left.getQualifierOffset(), leftQualLen, + right.getQualifierArray(), right.getQualifierOffset(), rightQualLen); + } + /** * Compare the families of left and right cell * @return 0 if both cells are equal, 1 if left cell is bigger than right, -1 otherwise @@ -125,38 +440,174 @@ public final int compareFamilies(Cell left, Cell right) { right.getFamilyArray(), right.getFamilyOffset(), right.getFamilyLength()); } + static int compareQualifiers(KeyValue left, KeyValue right) { + // NOTE: Same method is in CellComparatorImpl, also private, not shared, intentionally. Not + // sharing gets us a few percent more throughput in compares. If changes here or there, make + // sure done in both places. + // Compare Rows. Cache row length. + int leftRowLength = left.getRowLength(); + int rightRowLength = right.getRowLength(); + + int leftFamilyLengthPosition = left.getFamilyLengthPosition(leftRowLength); + byte leftFamilyLength = left.getFamilyLength(leftFamilyLengthPosition); + int leftKeyLength = left.getKeyLength(); + int leftQualifierLength = + left.getQualifierLength(leftKeyLength, leftRowLength, leftFamilyLength); + + // No need of left row length below here. + + int rightFamilyLengthPosition = right.getFamilyLengthPosition(rightRowLength); + byte rightFamilyLength = right.getFamilyLength(rightFamilyLengthPosition); + int rightKeyLength = right.getKeyLength(); + int rightQualifierLength = + right.getQualifierLength(rightKeyLength, rightRowLength, rightFamilyLength); + + // Compare families. + int leftFamilyOffset = left.getFamilyOffset(leftFamilyLengthPosition); + int rightFamilyOffset = right.getFamilyOffset(rightFamilyLengthPosition); + + // Compare qualifiers + return Bytes.compareTo(left.getQualifierArray(), leftFamilyOffset + leftFamilyLength, + leftQualifierLength, right.getQualifierArray(), rightFamilyOffset + rightFamilyLength, + rightQualifierLength); + } + + static int compareQualifiers(KeyValue left, ByteBufferKeyValue right) { + // NOTE: Same method is in CellComparatorImpl, also private, not shared, intentionally. Not + // sharing gets us a few percent more throughput in compares. If changes here or there, make + // sure done in both places. + // Compare Rows. Cache row length. + int leftRowLength = left.getRowLength(); + int rightRowLength = right.getRowLength(); + + int leftFamilyLengthPosition = left.getFamilyLengthPosition(leftRowLength); + byte leftFamilyLength = left.getFamilyLength(leftFamilyLengthPosition); + int leftKeyLength = left.getKeyLength(); + int leftQualifierLength = + left.getQualifierLength(leftKeyLength, leftRowLength, leftFamilyLength); + + // No need of left row length below here. + + int rightFamilyLengthPosition = right.getFamilyLengthPosition(rightRowLength); + byte rightFamilyLength = right.getFamilyLength(rightFamilyLengthPosition); + int rightKeyLength = right.getKeyLength(); + int rightQualifierLength = + right.getQualifierLength(rightKeyLength, rightRowLength, rightFamilyLength); + + // Compare families. + int leftFamilyOffset = left.getFamilyOffset(leftFamilyLengthPosition); + int rightFamilyPosition = right.getFamilyPosition(rightFamilyLengthPosition); + + // Compare qualifiers + return ByteBufferUtils.compareTo(left.getQualifierArray(), + leftFamilyOffset + leftFamilyLength, leftQualifierLength, right.getQualifierByteBuffer(), + rightFamilyPosition + rightFamilyLength, rightQualifierLength); + } + + static int compareQualifiers(ByteBufferKeyValue left, KeyValue right) { + // NOTE: Same method is in CellComparatorImpl, also private, not shared, intentionally. Not + // sharing gets us a few percent more throughput in compares. If changes here or there, make + // sure done in both places. + // Compare Rows. Cache row length. + int leftRowLength = left.getRowLength(); + int rightRowLength = right.getRowLength(); + + int leftFamilyLengthPosition = left.getFamilyLengthPosition(leftRowLength); + byte leftFamilyLength = left.getFamilyLength(leftFamilyLengthPosition); + int leftKeyLength = left.getKeyLength(); + int leftQualifierLength = + left.getQualifierLength(leftKeyLength, leftRowLength, leftFamilyLength); + + // No need of left row length below here. + + int rightFamilyLengthPosition = right.getFamilyLengthPosition(rightRowLength); + byte rightFamilyLength = right.getFamilyLength(rightFamilyLengthPosition); + int rightKeyLength = right.getKeyLength(); + int rightQualifierLength = + right.getQualifierLength(rightKeyLength, rightRowLength, rightFamilyLength); + + // Compare families. + int leftFamilyPosition = left.getFamilyPosition(leftFamilyLengthPosition); + int rightFamilyOffset = right.getFamilyOffset(rightFamilyLengthPosition); + + // Compare qualifiers + return ByteBufferUtils.compareTo(left.getQualifierByteBuffer(), + leftFamilyPosition + leftFamilyLength, leftQualifierLength, right.getQualifierArray(), + rightFamilyOffset + rightFamilyLength, rightQualifierLength); + } + + static int compareQualifiers(ByteBufferKeyValue left, ByteBufferKeyValue right) { + // NOTE: Same method is in CellComparatorImpl, also private, not shared, intentionally. Not + // sharing gets us a few percent more throughput in compares. If changes here or there, make + // sure done in both places. + // Compare Rows. Cache row length. + int leftRowLength = left.getRowLength(); + int rightRowLength = right.getRowLength(); + + int leftFamilyLengthPosition = left.getFamilyLengthPosition(leftRowLength); + byte leftFamilyLength = left.getFamilyLength(leftFamilyLengthPosition); + int leftKeyLength = left.getKeyLength(); + int leftQualifierLength = + left.getQualifierLength(leftKeyLength, leftRowLength, leftFamilyLength); + + // No need of left row length below here. + + int rightFamilyLengthPosition = right.getFamilyLengthPosition(rightRowLength); + byte rightFamilyLength = right.getFamilyLength(rightFamilyLengthPosition); + int rightKeyLength = right.getKeyLength(); + int rightQualifierLength = + right.getQualifierLength(rightKeyLength, rightRowLength, rightFamilyLength); + + // Compare families. + int leftFamilyPosition = left.getFamilyPosition(leftFamilyLengthPosition); + int rightFamilyPosition = right.getFamilyPosition(rightFamilyLengthPosition); + + // Compare qualifiers + return ByteBufferUtils.compareTo(left.getQualifierByteBuffer(), + leftFamilyPosition + leftFamilyLength, leftQualifierLength, right.getQualifierByteBuffer(), + rightFamilyPosition + rightFamilyLength, rightQualifierLength); + } + /** * Compare the qualifiers part of the left and right cells. * @return 0 if both cells are equal, 1 if left cell is bigger than right, -1 otherwise */ @Override public final int compareQualifiers(Cell left, Cell right) { - if (left instanceof ByteBufferExtendedCell && right instanceof ByteBufferExtendedCell) { - return ByteBufferUtils - .compareTo(((ByteBufferExtendedCell) left).getQualifierByteBuffer(), - ((ByteBufferExtendedCell) left).getQualifierPosition(), - left.getQualifierLength(), ((ByteBufferExtendedCell) right).getQualifierByteBuffer(), - ((ByteBufferExtendedCell) right).getQualifierPosition(), - right.getQualifierLength()); - } - if (left instanceof ByteBufferExtendedCell) { - return ByteBufferUtils.compareTo(((ByteBufferExtendedCell) left).getQualifierByteBuffer(), + if ((left instanceof ByteBufferKeyValue) && (right instanceof ByteBufferKeyValue)) { + return compareQualifiers((ByteBufferKeyValue) left, (ByteBufferKeyValue) right); + } else if ((left instanceof KeyValue) && (right instanceof KeyValue)) { + return compareQualifiers((KeyValue) left, (KeyValue) right); + } else if ((left instanceof KeyValue) && (right instanceof ByteBufferKeyValue)) { + return compareQualifiers((KeyValue) left, (ByteBufferKeyValue) right); + } else if ((left instanceof ByteBufferKeyValue) && (right instanceof KeyValue)) { + return compareQualifiers((ByteBufferKeyValue) left, (KeyValue) right); + } else { + if (left instanceof ByteBufferExtendedCell && right instanceof ByteBufferExtendedCell) { + return ByteBufferUtils.compareTo(((ByteBufferExtendedCell) left).getQualifierByteBuffer(), + ((ByteBufferExtendedCell) left).getQualifierPosition(), left.getQualifierLength(), + ((ByteBufferExtendedCell) right).getQualifierByteBuffer(), + ((ByteBufferExtendedCell) right).getQualifierPosition(), right.getQualifierLength()); + } + if (left instanceof ByteBufferExtendedCell) { + return ByteBufferUtils.compareTo(((ByteBufferExtendedCell) left).getQualifierByteBuffer(), ((ByteBufferExtendedCell) left).getQualifierPosition(), left.getQualifierLength(), right.getQualifierArray(), right.getQualifierOffset(), right.getQualifierLength()); - } - if (right instanceof ByteBufferExtendedCell) { - // Notice how we flip the order of the compare here. We used to negate the return value but - // see what FindBugs says - // http://findbugs.sourceforge.net/bugDescriptions.html#RV_NEGATING_RESULT_OF_COMPARETO - // It suggest flipping the order to get same effect and 'safer'. - return ByteBufferUtils.compareTo(left.getQualifierArray(), - left.getQualifierOffset(), left.getQualifierLength(), - ((ByteBufferExtendedCell)right).getQualifierByteBuffer(), - ((ByteBufferExtendedCell)right).getQualifierPosition(), right.getQualifierLength()); - } - return Bytes.compareTo(left.getQualifierArray(), left.getQualifierOffset(), + } + if (right instanceof ByteBufferExtendedCell) { + // Notice how we flip the order of the compare here. We used to negate the return value but + // see what FindBugs says + // http://findbugs.sourceforge.net/bugDescriptions.html#RV_NEGATING_RESULT_OF_COMPARETO + // It suggest flipping the order to get same effect and 'safer'. + return ByteBufferUtils.compareTo(left.getQualifierArray(), left.getQualifierOffset(), + left.getQualifierLength(), ((ByteBufferExtendedCell) right).getQualifierByteBuffer(), + ((ByteBufferExtendedCell) right).getQualifierPosition(), right.getQualifierLength()); + } + return Bytes.compareTo(left.getQualifierArray(), left.getQualifierOffset(), left.getQualifierLength(), right.getQualifierArray(), right.getQualifierOffset(), right.getQualifierLength()); + } + } /** @@ -195,8 +646,8 @@ static int compareRows(final Cell left, int leftRowLength, final Cell right, int ((ByteBufferExtendedCell)right).getRowByteBuffer(), ((ByteBufferExtendedCell)right).getRowPosition(), rightRowLength); } - return Bytes.compareTo(left.getRowArray(), left.getRowOffset(), left.getRowLength(), - right.getRowArray(), right.getRowOffset(), right.getRowLength()); + return Bytes.compareTo(left.getRowArray(), left.getRowOffset(), leftRowLength, + right.getRowArray(), right.getRowOffset(), rightRowLength); } /** @@ -249,10 +700,10 @@ public final int compareWithoutRow(final Cell left, final Cell right) { } if (lFamLength != rFamLength) { // comparing column family is enough. - return compareFamilies(left, right); + return compareFamilies(left, lFamLength, right, rFamLength); } // Compare cf:qualifier - int diff = compareColumns(left, right); + int diff = compareColumns(left, lFamLength, lQualLength, right, rFamLength, rQualLength); if (diff != 0) { return diff; } @@ -282,7 +733,7 @@ public int compareTimestamps(final long ltimestamp, final long rtimestamp) { @Override public Comparator getSimpleComparator() { - return new BBKVComparator(this); + return this; } /** diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java index a51fa3de96ef..c3b65e32c11c 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java @@ -433,6 +433,11 @@ public static boolean matchingRow(final Cell left, final byte[] buf, final int o public static boolean matchingFamily(final Cell left, final Cell right) { byte lfamlength = left.getFamilyLength(); byte rfamlength = right.getFamilyLength(); + return matchingFamily(left, lfamlength, right, rfamlength); + } + + public static boolean matchingFamily(final Cell left, final byte lfamlength, final Cell right, + final byte rfamlength) { if (left instanceof ByteBufferExtendedCell && right instanceof ByteBufferExtendedCell) { return ByteBufferUtils.equals(((ByteBufferExtendedCell) left).getFamilyByteBuffer(), ((ByteBufferExtendedCell) left).getFamilyPosition(), lfamlength, @@ -463,6 +468,11 @@ public static boolean matchingFamily(final Cell left, final byte[] buf) { public static boolean matchingQualifier(final Cell left, final Cell right) { int lqlength = left.getQualifierLength(); int rqlength = right.getQualifierLength(); + return matchingQualifier(left, lqlength, right, rqlength); + } + + private static boolean matchingQualifier(final Cell left, final int lqlength, final Cell right, + final int rqlength) { if (left instanceof ByteBufferExtendedCell && right instanceof ByteBufferExtendedCell) { return ByteBufferUtils.equals(((ByteBufferExtendedCell) left).getQualifierByteBuffer(), ((ByteBufferExtendedCell) left).getQualifierPosition(), lqlength, @@ -516,6 +526,14 @@ public static boolean matchingColumn(final Cell left, final Cell right) { return matchingQualifier(left, right); } + private static boolean matchingColumn(final Cell left, final byte lFamLen, final int lQualLength, + final Cell right, final byte rFamLen, final int rQualLength) { + if (!matchingFamily(left, lFamLen, right, rFamLen)) { + return false; + } + return matchingQualifier(left, lQualLength, right, rQualLength); + } + public static boolean matchingValue(final Cell left, final Cell right) { return matchingValue(left, right, left.getValueLength(), right.getValueLength()); } @@ -685,6 +703,11 @@ public static boolean matchingTimestamp(Cell a, Cell b) { public static boolean matchingRows(final Cell left, final Cell right) { short lrowlength = left.getRowLength(); short rrowlength = right.getRowLength(); + return matchingRows(left, lrowlength, right, rrowlength); + } + + public static boolean matchingRows(final Cell left, final short lrowlength, final Cell right, + final short rrowlength) { if (lrowlength != rrowlength) return false; if (left instanceof ByteBufferExtendedCell && right instanceof ByteBufferExtendedCell) { return ByteBufferUtils.equals(((ByteBufferExtendedCell) left).getRowByteBuffer(), @@ -713,16 +736,29 @@ public static boolean matchingRows(final Cell left, final Cell right) { * @return True if same row and column. */ public static boolean matchingRowColumn(final Cell left, final Cell right) { - if ((left.getRowLength() + left.getFamilyLength() - + left.getQualifierLength()) != (right.getRowLength() + right.getFamilyLength() - + right.getQualifierLength())) { + short lrowlength = left.getRowLength(); + short rrowlength = right.getRowLength(); + // match length + if (lrowlength != rrowlength) { + return false; + } + + byte lfamlength = left.getFamilyLength(); + byte rfamlength = right.getFamilyLength(); + if (lfamlength != rfamlength) { return false; } - if (!matchingRows(left, right)) { + int lqlength = left.getQualifierLength(); + int rqlength = right.getQualifierLength(); + if (lqlength != rqlength) { + return false; + } + + if (!matchingRows(left, lrowlength, right, rrowlength)) { return false; } - return matchingColumn(left, right); + return matchingColumn(left, lfamlength, lqlength, right, rfamlength, rqlength); } public static boolean matchingRowColumnBytes(final Cell left, final Cell right) { @@ -732,9 +768,9 @@ public static boolean matchingRowColumnBytes(final Cell left, final Cell right) int rfamlength = right.getFamilyLength(); int lqlength = left.getQualifierLength(); int rqlength = right.getQualifierLength(); + // match length - if ((lrowlength + lfamlength + lqlength) != - (rrowlength + rfamlength + rqlength)) { + if ((lrowlength != rrowlength) || (lfamlength != rfamlength) || (lqlength != rqlength)) { return false; } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/ChoreService.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/ChoreService.java index 9dbb307df402..5bd67ad02eec 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/ChoreService.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/ChoreService.java @@ -18,6 +18,7 @@ */ package org.apache.hadoop.hbase; +import com.google.errorprone.annotations.RestrictedApi; import java.util.ArrayList; import java.util.HashMap; import java.util.LinkedHashMap; @@ -26,14 +27,10 @@ import java.util.concurrent.ScheduledThreadPoolExecutor; import java.util.concurrent.ThreadFactory; import java.util.concurrent.atomic.AtomicInteger; - -import org.apache.hadoop.hbase.ScheduledChore.ChoreServicer; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; - /** * ChoreService is a service that can be used to schedule instances of {@link ScheduledChore} to run * periodically while sharing threads. The ChoreService is backed by a @@ -54,7 +51,7 @@ * Calling this method ensures that all scheduled chores are cancelled and cleaned up properly. */ @InterfaceAudience.Public -public class ChoreService implements ChoreServicer { +public class ChoreService { private static final Logger LOG = LoggerFactory.getLogger(ChoreService.class); /** @@ -95,7 +92,6 @@ public class ChoreService implements ChoreServicer { * spawned by this service */ @InterfaceAudience.Private - @VisibleForTesting public ChoreService(final String coreThreadPoolPrefix) { this(coreThreadPoolPrefix, MIN_CORE_POOL_SIZE, false); } @@ -144,28 +140,39 @@ public ChoreService(final String coreThreadPoolPrefix, int corePoolSize, boolean * @return true when the chore was successfully scheduled. false when the scheduling failed * (typically occurs when a chore is scheduled during shutdown of service) */ - public synchronized boolean scheduleChore(ScheduledChore chore) { + public boolean scheduleChore(ScheduledChore chore) { if (chore == null) { return false; } - - try { - if (chore.getPeriod() <= 0) { - LOG.info("Chore {} is disabled because its period is not positive.", chore); - return false; + // always lock chore first to prevent dead lock + synchronized (chore) { + synchronized (this) { + try { + // Chores should only ever be scheduled with a single ChoreService. If the choreService + // is changing, cancel any existing schedules of this chore. + if (chore.getChoreService() == this) { + LOG.warn("Chore {} has already been scheduled with us", chore); + return false; + } + if (chore.getPeriod() <= 0) { + LOG.info("Chore {} is disabled because its period is not positive.", chore); + return false; + } + LOG.info("Chore {} is enabled.", chore); + if (chore.getChoreService() != null) { + LOG.info("Cancel chore {} from its previous service", chore); + chore.getChoreService().cancelChore(chore); + } + chore.setChoreService(this); + ScheduledFuture future = scheduler.scheduleAtFixedRate(chore, chore.getInitialDelay(), + chore.getPeriod(), chore.getTimeUnit()); + scheduledChores.put(chore, future); + return true; + } catch (Exception e) { + LOG.error("Could not successfully schedule chore: {}", chore.getName(), e); + return false; + } } - LOG.info("Chore {} is enabled.", chore); - chore.setChoreServicer(this); - ScheduledFuture future = - scheduler.scheduleAtFixedRate(chore, chore.getInitialDelay(), chore.getPeriod(), - chore.getTimeUnit()); - scheduledChores.put(chore, future); - return true; - } catch (Exception exception) { - if (LOG.isInfoEnabled()) { - LOG.info("Could not successfully schedule chore: " + chore.getName()); - } - return false; } } @@ -178,19 +185,35 @@ private void rescheduleChore(ScheduledChore chore) { ScheduledFuture future = scheduledChores.get(chore); future.cancel(false); } - scheduleChore(chore); + ScheduledFuture future = scheduler.scheduleAtFixedRate(chore, chore.getInitialDelay(), + chore.getPeriod(), chore.getTimeUnit()); + scheduledChores.put(chore, future); } - @InterfaceAudience.Private - @Override - public synchronized void cancelChore(ScheduledChore chore) { + /** + * Cancel any ongoing schedules that this chore has with the implementer of this interface. + *

+ * Call {@link ScheduledChore#cancel()} to cancel a {@link ScheduledChore}, in + * {@link ScheduledChore#cancel()} method we will call this method to remove the + * {@link ScheduledChore} from this {@link ChoreService}. + */ + @RestrictedApi(explanation = "Should only be called in ScheduledChore", link = "", + allowedOnPath = ".*/org/apache/hadoop/hbase/(ScheduledChore|ChoreService).java") + synchronized void cancelChore(ScheduledChore chore) { cancelChore(chore, true); } - @InterfaceAudience.Private - @Override - public synchronized void cancelChore(ScheduledChore chore, boolean mayInterruptIfRunning) { - if (chore != null && scheduledChores.containsKey(chore)) { + /** + * Cancel any ongoing schedules that this chore has with the implementer of this interface. + *

+ * Call {@link ScheduledChore#cancel(boolean)} to cancel a {@link ScheduledChore}, in + * {@link ScheduledChore#cancel(boolean)} method we will call this method to remove the + * {@link ScheduledChore} from this {@link ChoreService}. + */ + @RestrictedApi(explanation = "Should only be called in ScheduledChore", link = "", + allowedOnPath = ".*/org/apache/hadoop/hbase/(ScheduledChore|ChoreService).java") + synchronized void cancelChore(ScheduledChore chore, boolean mayInterruptIfRunning) { + if (scheduledChores.containsKey(chore)) { ScheduledFuture future = scheduledChores.get(chore); future.cancel(mayInterruptIfRunning); scheduledChores.remove(chore); @@ -204,21 +227,24 @@ public synchronized void cancelChore(ScheduledChore chore, boolean mayInterruptI } } + /** + * @return true when the chore is scheduled with the implementer of this interface + */ @InterfaceAudience.Private - @Override public synchronized boolean isChoreScheduled(ScheduledChore chore) { return chore != null && scheduledChores.containsKey(chore) && !scheduledChores.get(chore).isDone(); } - @InterfaceAudience.Private - @Override - public synchronized boolean triggerNow(ScheduledChore chore) { - if (chore != null) { - rescheduleChore(chore); - return true; - } - return false; + /** + * This method tries to execute the chore immediately. If the chore is executing at the time of + * this call, the chore will begin another execution as soon as the current execution finishes + */ + @RestrictedApi(explanation = "Should only be called in ScheduledChore", link = "", + allowedOnPath = ".*/org/apache/hadoop/hbase/ScheduledChore.java") + synchronized void triggerNow(ScheduledChore chore) { + assert chore.getChoreService() == this; + rescheduleChore(chore); } /** @@ -298,10 +324,20 @@ private synchronized void requestCorePoolDecrease() { } } - @InterfaceAudience.Private - @Override - public synchronized void onChoreMissedStartTime(ScheduledChore chore) { - if (chore == null || !scheduledChores.containsKey(chore)) return; + /** + * A callback that tells the implementer of this interface that one of the scheduled chores is + * missing its start time. The implication of a chore missing its start time is that the service's + * current means of scheduling may not be sufficient to handle the number of ongoing chores (the + * other explanation is that the chore's execution time is greater than its scheduled period). The + * service should try to increase its concurrency when this callback is received. + * @param chore The chore that missed its start time + */ + @RestrictedApi(explanation = "Should only be called in ScheduledChore", link = "", + allowedOnPath = ".*/org/apache/hadoop/hbase/ScheduledChore.java") + synchronized void onChoreMissedStartTime(ScheduledChore chore) { + if (!scheduledChores.containsKey(chore)) { + return; + } // If the chore has not caused an increase in the size of the core thread pool then request an // increase. This allows each chore missing its start time to increase the core pool size by @@ -322,13 +358,17 @@ public synchronized void onChoreMissedStartTime(ScheduledChore chore) { * shutdown the service. Any chores that are scheduled for execution will be cancelled. Any chores * in the middle of execution will be interrupted and shutdown. This service will be unusable * after this method has been called (i.e. future scheduling attempts will fail). + *

+ * Notice that, this will only clean the chore from this ChoreService but you could still schedule + * the chore with other ChoreService. */ public synchronized void shutdown() { - scheduler.shutdownNow(); - if (LOG.isInfoEnabled()) { - LOG.info("Chore service for: " + coreThreadPoolPrefix + " had " + scheduledChores.keySet() - + " on shutdown"); + if (isShutdown()) { + return; } + scheduler.shutdownNow(); + LOG.info("Chore service for: {} had {} on shutdown", coreThreadPoolPrefix, + scheduledChores.keySet()); cancelAllChores(true); scheduledChores.clear(); choresMissingStartTime.clear(); diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HBaseConfiguration.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HBaseConfiguration.java index 67de5fb3a21b..70467f08aa01 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HBaseConfiguration.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HBaseConfiguration.java @@ -36,36 +36,6 @@ public class HBaseConfiguration extends Configuration { private static final Logger LOG = LoggerFactory.getLogger(HBaseConfiguration.class); - /** - * Instantiating HBaseConfiguration() is deprecated. Please use - * HBaseConfiguration#create() to construct a plain Configuration - * @deprecated since 0.90.0. Please use {@link #create()} instead. - * @see #create() - * @see HBASE-2036 - */ - @Deprecated - public HBaseConfiguration() { - //TODO:replace with private constructor, HBaseConfiguration should not extend Configuration - super(); - addHbaseResources(this); - LOG.warn("instantiating HBaseConfiguration() is deprecated. Please use" - + " HBaseConfiguration#create() to construct a plain Configuration"); - } - - /** - * Instantiating HBaseConfiguration() is deprecated. Please use - * HBaseConfiguration#create(conf) to construct a plain Configuration - * @deprecated since 0.90.0. Please use {@link #create(Configuration)} instead. - * @see #create(Configuration) - * @see HBASE-2036 - */ - @Deprecated - public HBaseConfiguration(final Configuration c) { - //TODO:replace with private constructor - this(); - merge(this, c); - } - private static void checkDefaultsVersion(Configuration conf) { if (conf.getBoolean("hbase.defaults.for.version.skip", Boolean.FALSE)) return; String defaultsVersion = conf.get("hbase.defaults.for.version"); diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java index f6f00c552546..48fa00caaa14 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java @@ -700,6 +700,14 @@ public enum OperationStatusCode { public static final int HOUR_IN_SECONDS = 60 * 60; public static final int MINUTE_IN_SECONDS = 60; + /** + * KB, MB, GB, TB equivalent to how many bytes + */ + public static final long KB_IN_BYTES = 1024; + public static final long MB_IN_BYTES = 1024 * KB_IN_BYTES; + public static final long GB_IN_BYTES = 1024 * MB_IN_BYTES; + public static final long TB_IN_BYTES = 1024 * GB_IN_BYTES; + //TODO: although the following are referenced widely to format strings for // the shell. They really aren't a part of the public API. It would be // nice if we could put them somewhere where they did not need to be @@ -965,6 +973,17 @@ public enum OperationStatusCode { */ public static final int DEFAULT_HBASE_RPC_SHORTOPERATION_TIMEOUT = 10000; + /** + * Retry pause time for short operation RPC + */ + public static final String HBASE_RPC_SHORTOPERATION_RETRY_PAUSE_TIME = + "hbase.rpc.shortoperation.retry.pause.time"; + + /** + * Default value of {@link #HBASE_RPC_SHORTOPERATION_RETRY_PAUSE_TIME} + */ + public static final long DEFAULT_HBASE_RPC_SHORTOPERATION_RETRY_PAUSE_TIME = 1000; + /** * Value indicating the server name was saved with no sequence number. */ @@ -981,10 +1000,12 @@ public enum OperationStatusCode { */ public static final String REPLICATION_SOURCE_SERVICE_CLASSNAME = "hbase.replication.source.service"; - public static final String - REPLICATION_SINK_SERVICE_CLASSNAME = "hbase.replication.sink.service"; public static final String REPLICATION_SERVICE_CLASSNAME_DEFAULT = "org.apache.hadoop.hbase.replication.regionserver.Replication"; + public static final String + REPLICATION_SINK_SERVICE_CLASSNAME = "hbase.replication.sink.service"; + public static final String REPLICATION_SINK_SERVICE_CLASSNAME_DEFAULT = + "org.apache.hadoop.hbase.replication.ReplicationSinkServiceImpl"; public static final String REPLICATION_BULKLOAD_ENABLE_KEY = "hbase.replication.bulkload.enabled"; public static final boolean REPLICATION_BULKLOAD_ENABLE_DEFAULT = false; /** Replication cluster id of source cluster which uniquely identifies itself with peer cluster */ @@ -1129,7 +1150,20 @@ public enum OperationStatusCode { /** Conf key for enabling meta replication */ public static final String USE_META_REPLICAS = "hbase.meta.replicas.use"; public static final boolean DEFAULT_USE_META_REPLICAS = false; + + /** + * @deprecated Since 2.4.0, will be removed in 4.0.0. Please change the meta replicas number by + * altering meta table, i.e, set a new 'region replication' number and call + * modifyTable. + */ + @Deprecated public static final String META_REPLICAS_NUM = "hbase.meta.replica.count"; + /** + * @deprecated Since 2.4.0, will be removed in 4.0.0. Please change the meta replicas number by + * altering meta table, i.e, set a new 'region replication' number and call + * modifyTable. + */ + @Deprecated public static final int DEFAULT_META_REPLICA_NUM = 1; /** @@ -1343,7 +1377,9 @@ public enum OperationStatusCode { /** * Drop edits for tables that been deleted from the replication source and target - * @deprecated moved it into HBaseInterClusterReplicationEndpoint + * @deprecated since 3.0.0. Will be removed in 4.0.0. + * Moved it into HBaseInterClusterReplicationEndpoint. + * @see HBASE-24359 */ @Deprecated public static final String REPLICATION_DROP_ON_DELETED_TABLE_KEY = @@ -1372,9 +1408,7 @@ public enum OperationStatusCode { public static final String BUCKET_CACHE_IOENGINE_KEY = "hbase.bucketcache.ioengine"; /** - * When using bucket cache, this is a float that EITHER represents a percentage of total heap - * memory size to give to the cache (if < 1.0) OR, it is the capacity in - * megabytes of the cache. + * When using bucket cache, it is the capacity in megabytes of the cache. */ public static final String BUCKET_CACHE_SIZE_KEY = "hbase.bucketcache.size"; @@ -1571,6 +1605,13 @@ public enum OperationStatusCode { "hbase.master.executor.serverops.threads"; public static final int MASTER_SERVER_OPERATIONS_THREADS_DEFAULT = 5; + /** + * Number of threads used to dispatch merge operations to the regionservers. + */ + public static final String MASTER_MERGE_DISPATCH_THREADS = + "hbase.master.executor.merge.dispatch.threads"; + public static final int MASTER_MERGE_DISPATCH_THREADS_DEFAULT = 2; + public static final String MASTER_META_SERVER_OPERATIONS_THREADS = "hbase.master.executor.meta.serverops.threads"; public static final int MASTER_META_SERVER_OPERATIONS_THREADS_DEFAULT = 5; diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java index b7ddbab6c90c..79356edfea21 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java @@ -32,6 +32,7 @@ import java.util.Iterator; import java.util.List; import java.util.Map; + import org.apache.hadoop.hbase.util.ByteBufferUtils; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ClassSize; @@ -40,8 +41,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; - /** * An HBase Key/Value. This is the fundamental HBase Type. *

@@ -1232,7 +1231,6 @@ public static String humanReadableTimestamp(final long timestamp) { * and that we need access to the backing array to do some test case related assertions. * @return The byte array backing this KeyValue. */ - @VisibleForTesting public byte [] getBuffer() { return this.bytes; } @@ -1351,14 +1349,14 @@ public byte[] getFamilyArray() { */ @Override public int getFamilyOffset() { - return getFamilyOffset(getRowLength()); + return getFamilyOffset(getFamilyLengthPosition(getRowLength())); } /** * @return Family offset */ - private int getFamilyOffset(int rlength) { - return this.offset + ROW_KEY_OFFSET + rlength + Bytes.SIZEOF_BYTE; + int getFamilyOffset(int familyLenPosition) { + return familyLenPosition + Bytes.SIZEOF_BYTE; } /** @@ -1366,14 +1364,18 @@ private int getFamilyOffset(int rlength) { */ @Override public byte getFamilyLength() { - return getFamilyLength(getFamilyOffset()); + return getFamilyLength(getFamilyLengthPosition(getRowLength())); } /** * @return Family length */ - public byte getFamilyLength(int foffset) { - return this.bytes[foffset-1]; + public byte getFamilyLength(int famLenPos) { + return this.bytes[famLenPos]; + } + + int getFamilyLengthPosition(int rowLength) { + return this.offset + KeyValue.ROW_KEY_OFFSET + rowLength; } /** @@ -1396,7 +1398,14 @@ public int getQualifierOffset() { * @return Qualifier offset */ private int getQualifierOffset(int foffset) { - return foffset + getFamilyLength(foffset); + return getQualifierOffset(foffset, getFamilyLength()); + } + + /** + * @return Qualifier offset + */ + int getQualifierOffset(int foffset, int flength) { + return foffset + flength; } /** @@ -1411,7 +1420,14 @@ public int getQualifierLength() { * @return Qualifier length */ private int getQualifierLength(int rlength, int flength) { - return getKeyLength() - (int) getKeyDataStructureSize(rlength, flength, 0); + return getQualifierLength(getKeyLength(), rlength, flength); + } + + /** + * @return Qualifier length + */ + int getQualifierLength(int keyLength, int rlength, int flength) { + return keyLength - (int) getKeyDataStructureSize(rlength, flength, 0); } /** @@ -1504,7 +1520,11 @@ long getTimestamp(final int keylength) { */ @Override public byte getTypeByte() { - return this.bytes[this.offset + getKeyLength() - 1 + ROW_OFFSET]; + return getTypeByte(getKeyLength()); + } + + byte getTypeByte(int keyLength) { + return this.bytes[this.offset + keyLength - 1 + ROW_OFFSET]; } /** @@ -1878,8 +1898,8 @@ public int compareRows(final Cell left, final Cell right) { * @param rlength * @return 0 if equal, <0 if left smaller, >0 if right smaller */ - public int compareRows(byte [] left, int loffset, int llength, - byte [] right, int roffset, int rlength) { + public int compareRows(byte[] left, int loffset, int llength, byte[] right, int roffset, + int rlength) { return Bytes.compareTo(left, loffset, llength, right, roffset, rlength); } @@ -2452,6 +2472,10 @@ public byte getFamilyLength() { return this.bytes[getFamilyOffset() - 1]; } + int getFamilyLengthPosition(int rowLength) { + return this.offset + Bytes.SIZEOF_SHORT + rowLength; + } + @Override public int getFamilyOffset() { return this.offset + Bytes.SIZEOF_SHORT + getRowLength() + Bytes.SIZEOF_BYTE; @@ -2484,9 +2508,14 @@ public short getRowLength() { @Override public byte getTypeByte() { - return this.bytes[this.offset + getKeyLength() - 1]; + return getTypeByte(getKeyLength()); } + byte getTypeByte(int keyLength) { + return this.bytes[this.offset + keyLength - 1]; + } + + private int getQualifierLength(int rlength, int flength) { return getKeyLength() - (int) getKeyDataStructureSize(rlength, flength, 0); } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/PrivateCellUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/PrivateCellUtil.java index 2aadc4257c7c..a86ca1059126 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/PrivateCellUtil.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/PrivateCellUtil.java @@ -40,8 +40,6 @@ import org.apache.hadoop.hbase.util.ClassSize; import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; - /** * Utility methods helpful slinging {@link Cell} instances. It has more powerful and * rich set of APIs than those in {@link CellUtil} for internal usage. @@ -2635,7 +2633,6 @@ public static void compressQualifier(OutputStream out, Cell cell, Dictionary dic * @return an int greater than 0 if left is greater than right lesser than 0 if left is lesser * than right equal to 0 if left is equal to right */ - @VisibleForTesting public static final int compare(CellComparator comparator, Cell left, byte[] key, int offset, int length) { // row diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/RawCell.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/RawCell.java index ea598d21ca3b..d29e8ca8bdce 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/RawCell.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/RawCell.java @@ -18,13 +18,18 @@ package org.apache.hadoop.hbase; import java.util.Iterator; +import java.util.List; import java.util.Optional; import org.apache.yetus.audience.InterfaceAudience; /** - * An extended version of cell that gives more power to CPs + * An extended version of Cell that allows CPs manipulate Tags. */ +// Added by HBASE-19092 to expose Tags to CPs (history server) w/o exposing ExtendedCell. +// Why is this in hbase-common and not in hbase-server where it is used? +// RawCell is an odd name for a class that is only for CPs that want to manipulate Tags on +// server-side only w/o exposing ExtendedCell -- super rare, super exotic. @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) public interface RawCell extends Cell { static final int MAX_TAGS_LENGTH = (2 * Short.MAX_VALUE) + 1; @@ -64,4 +69,11 @@ public static void checkForTagsLength(int tagsLength) { throw new IllegalArgumentException("tagslength " + tagsLength + " > " + MAX_TAGS_LENGTH); } } + + /** + * @return A new cell which is having the extra tags also added to it. + */ + public static Cell createCell(Cell cell, List tags) { + return PrivateCellUtil.createCell(cell, tags); + } } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/ScheduledChore.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/ScheduledChore.java index a009cf4ef126..6155bbdeb3b0 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/ScheduledChore.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/ScheduledChore.java @@ -18,14 +18,13 @@ */ package org.apache.hadoop.hbase; +import com.google.errorprone.annotations.RestrictedApi; import java.util.concurrent.ScheduledThreadPoolExecutor; import java.util.concurrent.TimeUnit; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; - /** * ScheduledChore is a task performed on a period in hbase. ScheduledChores become active once * scheduled with a {@link ChoreService} via {@link ChoreService#scheduleChore(ScheduledChore)}. The @@ -35,7 +34,7 @@ * execute within the defined period. It is bad practice to define a ScheduledChore whose execution * time exceeds its period since it will try to hog one of the threads in the {@link ChoreService}'s * thread pool. - *

+ *

* Don't subclass ScheduledChore if the task relies on being woken up for something to do, such as * an entry being added to a queue, etc. */ @@ -62,7 +61,7 @@ public abstract class ScheduledChore implements Runnable { * Interface to the ChoreService that this ScheduledChore is scheduled with. null if the chore is * not scheduled. */ - private ChoreServicer choreServicer; + private ChoreService choreService; /** * Variables that encapsulate the meaningful state information @@ -79,44 +78,10 @@ public abstract class ScheduledChore implements Runnable { */ private final Stoppable stopper; - interface ChoreServicer { - /** - * Cancel any ongoing schedules that this chore has with the implementer of this interface. - */ - public void cancelChore(ScheduledChore chore); - public void cancelChore(ScheduledChore chore, boolean mayInterruptIfRunning); - - /** - * @return true when the chore is scheduled with the implementer of this interface - */ - public boolean isChoreScheduled(ScheduledChore chore); - - /** - * This method tries to execute the chore immediately. If the chore is executing at the time of - * this call, the chore will begin another execution as soon as the current execution finishes - *

- * If the chore is not scheduled with a ChoreService, this call will fail. - * @return false when the chore could not be triggered immediately - */ - public boolean triggerNow(ScheduledChore chore); - - /** - * A callback that tells the implementer of this interface that one of the scheduled chores is - * missing its start time. The implication of a chore missing its start time is that the - * service's current means of scheduling may not be sufficient to handle the number of ongoing - * chores (the other explanation is that the chore's execution time is greater than its - * scheduled period). The service should try to increase its concurrency when this callback is - * received. - * @param chore The chore that missed its start time - */ - public void onChoreMissedStartTime(ScheduledChore chore); - } - /** * This constructor is for test only. It allows us to create an object and to call chore() on it. */ @InterfaceAudience.Private - @VisibleForTesting protected ScheduledChore() { this("TestChore", null, 0, DEFAULT_INITIAL_DELAY, DEFAULT_TIME_UNIT); } @@ -171,8 +136,8 @@ public void run() { onChoreMissedStartTime(); LOG.info("Chore: {} missed its start time", getName()); } else if (stopper.isStopped() || !isScheduled()) { - cancel(false); - cleanup(); + // call shutdown here to cleanup the ScheduledChore. + shutdown(false); LOG.info("Chore: {} was stopped", getName()); } else { try { @@ -196,7 +161,6 @@ public void run() { LOG.error("Caught error", t); if (this.stopper.isStopped()) { cancel(false); - cleanup(); } } } @@ -217,7 +181,9 @@ private synchronized void updateTimeTrackingBeforeRun() { * pool threads */ private synchronized void onChoreMissedStartTime() { - if (choreServicer != null) choreServicer.onChoreMissedStartTime(this); + if (choreService != null) { + choreService.onChoreMissedStartTime(this); + } } /** @@ -256,20 +222,17 @@ private synchronized boolean isValidTime(final long time) { * @return false when the Chore is not currently scheduled with a ChoreService */ public synchronized boolean triggerNow() { - if (choreServicer != null) { - return choreServicer.triggerNow(this); - } else { + if (choreService == null) { return false; } + choreService.triggerNow(this); + return true; } - synchronized void setChoreServicer(ChoreServicer service) { - // Chores should only ever be scheduled with a single ChoreService. If the choreServicer - // is changing, cancel any existing schedules of this chore. - if (choreServicer != null && choreServicer != service) { - choreServicer.cancelChore(this, false); - } - choreServicer = service; + @RestrictedApi(explanation = "Should only be called in ChoreService", link = "", + allowedOnPath = ".*/org/apache/hadoop/hbase/ChoreService.java") + synchronized void setChoreService(ChoreService service) { + choreService = service; timeOfThisRun = -1; } @@ -278,9 +241,10 @@ public synchronized void cancel() { } public synchronized void cancel(boolean mayInterruptIfRunning) { - if (isScheduled()) choreServicer.cancelChore(this, mayInterruptIfRunning); - - choreServicer = null; + if (isScheduled()) { + choreService.cancelChore(this, mayInterruptIfRunning); + } + choreService = null; } public String getName() { @@ -313,17 +277,14 @@ public synchronized boolean isInitialChoreComplete() { return initialChoreComplete; } - @VisibleForTesting - synchronized ChoreServicer getChoreServicer() { - return choreServicer; + synchronized ChoreService getChoreService() { + return choreService; } - @VisibleForTesting synchronized long getTimeOfLastRun() { return timeOfLastRun; } - @VisibleForTesting synchronized long getTimeOfThisRun() { return timeOfThisRun; } @@ -332,11 +293,12 @@ synchronized long getTimeOfThisRun() { * @return true when this Chore is scheduled with a ChoreService */ public synchronized boolean isScheduled() { - return choreServicer != null && choreServicer.isChoreScheduled(this); + return choreService != null && choreService.isChoreScheduled(this); } @InterfaceAudience.Private - @VisibleForTesting + @RestrictedApi(explanation = "Should only be called in tests", link = "", + allowedOnPath = ".*/src/test/.*") public synchronized void choreForTesting() { chore(); } @@ -358,7 +320,26 @@ protected boolean initialChore() { /** * Override to run cleanup tasks when the Chore encounters an error and must stop running */ - protected synchronized void cleanup() { + protected void cleanup() { + } + + /** + * Call {@link #shutdown(boolean)} with {@code true}. + * @see ScheduledChore#shutdown(boolean) + */ + public synchronized void shutdown() { + shutdown(true); + } + + /** + * Completely shutdown the ScheduleChore, which means we will call cleanup and you should not + * schedule it again. + *

+ * This is another path to cleanup the chore, comparing to stop the stopper instance passed in. + */ + public synchronized void shutdown(boolean mayInterruptIfRunning) { + cancel(mayInterruptIfRunning); + cleanup(); } /** diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/SizeCachedByteBufferKeyValue.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/SizeCachedByteBufferKeyValue.java new file mode 100644 index 000000000000..9f5d9c179dd5 --- /dev/null +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/SizeCachedByteBufferKeyValue.java @@ -0,0 +1,90 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import java.nio.ByteBuffer; + +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; + +/** + * This Cell is an implementation of {@link ByteBufferExtendedCell} where the data resides in + * off heap/ on heap ByteBuffer + */ +@InterfaceAudience.Private +public class SizeCachedByteBufferKeyValue extends ByteBufferKeyValue { + + public static final int FIXED_OVERHEAD = Bytes.SIZEOF_SHORT + Bytes.SIZEOF_INT; + private short rowLen; + private int keyLen; + + public SizeCachedByteBufferKeyValue(ByteBuffer buf, int offset, int length, long seqId, + int keyLen) { + super(buf, offset, length); + // We will read all these cached values at least once. Initialize now itself so that we can + // avoid uninitialized checks with every time call + this.rowLen = super.getRowLength(); + this.keyLen = keyLen; + setSequenceId(seqId); + } + + public SizeCachedByteBufferKeyValue(ByteBuffer buf, int offset, int length, long seqId, + int keyLen, short rowLen) { + super(buf, offset, length); + // We will read all these cached values at least once. Initialize now itself so that we can + // avoid uninitialized checks with every time call + this.rowLen = rowLen; + this.keyLen = keyLen; + setSequenceId(seqId); + } + + @Override + public short getRowLength() { + return rowLen; + } + + @Override + public int getKeyLength() { + return this.keyLen; + } + + @Override + public long heapSize() { + return super.heapSize() + FIXED_OVERHEAD; + } + + /** + * Override by just returning the length for saving cost of method dispatching. If not, it will + * call {@link ExtendedCell#getSerializedSize()} firstly, then forward to + * {@link SizeCachedKeyValue#getSerializedSize(boolean)}. (See HBASE-21657) + */ + @Override + public int getSerializedSize() { + return this.length; + } + + @Override + public boolean equals(Object other) { + return super.equals(other); + } + + @Override + public int hashCode() { + return super.hashCode(); + } +} diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/SizeCachedKeyValue.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/SizeCachedKeyValue.java index 663f3eb77c66..5141cfba08f7 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/SizeCachedKeyValue.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/SizeCachedKeyValue.java @@ -39,12 +39,22 @@ public class SizeCachedKeyValue extends KeyValue { private short rowLen; private int keyLen; - public SizeCachedKeyValue(byte[] bytes, int offset, int length, long seqId) { + public SizeCachedKeyValue(byte[] bytes, int offset, int length, long seqId, int keyLen) { super(bytes, offset, length); // We will read all these cached values at least once. Initialize now itself so that we can // avoid uninitialized checks with every time call - rowLen = super.getRowLength(); - keyLen = super.getKeyLength(); + this.rowLen = super.getRowLength(); + this.keyLen = keyLen; + setSequenceId(seqId); + } + + public SizeCachedKeyValue(byte[] bytes, int offset, int length, long seqId, int keyLen, + short rowLen) { + super(bytes, offset, length); + // We will read all these cached values at least once. Initialize now itself so that we can + // avoid uninitialized checks with every time call + this.rowLen = rowLen; + this.keyLen = keyLen; setSequenceId(seqId); } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/SizeCachedNoTagsByteBufferKeyValue.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/SizeCachedNoTagsByteBufferKeyValue.java new file mode 100644 index 000000000000..0374169d9b79 --- /dev/null +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/SizeCachedNoTagsByteBufferKeyValue.java @@ -0,0 +1,80 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import java.nio.ByteBuffer; + +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; + +/** + * This Cell is an implementation of {@link ByteBufferExtendedCell} where the data resides in + * off heap/ on heap ByteBuffer + */ +@InterfaceAudience.Private +public class SizeCachedNoTagsByteBufferKeyValue extends NoTagsByteBufferKeyValue { + + public static final int FIXED_OVERHEAD = Bytes.SIZEOF_SHORT + Bytes.SIZEOF_INT; + private short rowLen; + private int keyLen; + + public SizeCachedNoTagsByteBufferKeyValue(ByteBuffer buf, int offset, int length, long seqId, + int keyLen) { + super(buf, offset, length); + // We will read all these cached values at least once. Initialize now itself so that we can + // avoid uninitialized checks with every time call + this.rowLen = super.getRowLength(); + this.keyLen = keyLen; + setSequenceId(seqId); + } + + public SizeCachedNoTagsByteBufferKeyValue(ByteBuffer buf, int offset, int length, long seqId, + int keyLen, short rowLen) { + super(buf, offset, length); + // We will read all these cached values at least once. Initialize now itself so that we can + // avoid uninitialized checks with every time call + this.rowLen = rowLen; + this.keyLen = keyLen; + setSequenceId(seqId); + } + + @Override + public short getRowLength() { + return rowLen; + } + + @Override + public int getKeyLength() { + return this.keyLen; + } + + @Override + public long heapSize() { + return super.heapSize() + FIXED_OVERHEAD; + } + + @Override + public boolean equals(Object other) { + return super.equals(other); + } + + @Override + public int hashCode() { + return super.hashCode(); + } +} diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/SizeCachedNoTagsKeyValue.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/SizeCachedNoTagsKeyValue.java index 88b6177fcb18..85bdb52bbfd4 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/SizeCachedNoTagsKeyValue.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/SizeCachedNoTagsKeyValue.java @@ -32,8 +32,13 @@ @InterfaceAudience.Private public class SizeCachedNoTagsKeyValue extends SizeCachedKeyValue { - public SizeCachedNoTagsKeyValue(byte[] bytes, int offset, int length, long seqId) { - super(bytes, offset, length, seqId); + public SizeCachedNoTagsKeyValue(byte[] bytes, int offset, int length, long seqId, int keyLen) { + super(bytes, offset, length, seqId, keyLen); + } + + public SizeCachedNoTagsKeyValue(byte[] bytes, int offset, int length, long seqId, int keyLen, + short rowLen) { + super(bytes, offset, length, seqId, keyLen, rowLen); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/SingletonCoprocessorService.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/TagBuilder.java similarity index 56% rename from hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/SingletonCoprocessorService.java rename to hbase-common/src/main/java/org/apache/hadoop/hbase/TagBuilder.java index 2b2aedee22c9..372144c6c268 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/SingletonCoprocessorService.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/TagBuilder.java @@ -1,4 +1,6 @@ -/* +/** + * Copyright The Apache Software Foundation + * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -7,7 +9,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,23 +17,34 @@ * See the License for the specific language governing permissions and * limitations under the License. */ +package org.apache.hadoop.hbase; -package org.apache.hadoop.hbase.coprocessor; - -import org.apache.hadoop.hbase.HBaseInterfaceAudience; +import java.nio.ByteBuffer; import org.apache.yetus.audience.InterfaceAudience; -import org.apache.yetus.audience.InterfaceStability; - -import org.apache.hbase.thirdparty.com.google.protobuf.Service; /** - * Coprocessor endpoints registered once per server and providing protobuf services should implement - * this interface and return the {@link Service} instance via {@link #getService()}. - * @deprecated Since 2.0. Will be removed in 3.0 + * Builder implementation to create {@link Tag} + * Call setTagValue(byte[]) method to create {@link ArrayBackedTag} */ -@Deprecated @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) -@InterfaceStability.Evolving -public interface SingletonCoprocessorService { - Service getService(); +public interface TagBuilder { + /** + * Set type of the tag. + * @param tagType type of the tag + * @return {@link TagBuilder} + */ + TagBuilder setTagType(byte tagType); + + /** + * Set the value of the tag. + * @param tagBytes tag bytes. + * @return {@link TagBuilder} + */ + TagBuilder setTagValue(byte[] tagBytes); + + /** + * Build the tag. + * @return {@link Tag} + */ + Tag build(); } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/TagBuilderFactory.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/TagBuilderFactory.java new file mode 100644 index 000000000000..40744f91abf0 --- /dev/null +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/TagBuilderFactory.java @@ -0,0 +1,73 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import java.nio.ByteBuffer; +import org.apache.yetus.audience.InterfaceAudience; + +/** + * Factory to create Tags. + */ +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) +public final class TagBuilderFactory { + + public static TagBuilder create() { + return new TagBuilderImpl(); + } +} + +/** + * Builder implementation to create {@link Tag}
+ * Call setTagValue(byte[]) method to create {@link ArrayBackedTag} + */ +class TagBuilderImpl implements TagBuilder { + // This assumes that we never create tag with value less than 0. + private byte tagType = (byte)-1; + private byte[] tagBytes = null; + public static final String TAG_TYPE_NOT_SET_EXCEPTION = "Need to set type of the tag."; + public static final String TAG_VALUE_NULL_EXCEPTION = "TagBytes can't be null"; + + @Override + public TagBuilder setTagType(byte tagType) { + this.tagType = tagType; + return this; + } + + @Override + public TagBuilder setTagValue(byte[] tagBytes) { + this.tagBytes = tagBytes; + return this; + } + + private void validate() { + if (tagType == -1) { + throw new IllegalArgumentException(TAG_TYPE_NOT_SET_EXCEPTION); + } + if (tagBytes == null) { + throw new IllegalArgumentException(TAG_VALUE_NULL_EXCEPTION); + } + } + + @Override + public Tag build() { + validate(); + return new ArrayBackedTag(tagType, tagBytes); + } +} diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/conf/ConfigurationManager.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/conf/ConfigurationManager.java index 511679f5b547..ad26f7633cb5 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/conf/ConfigurationManager.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/conf/ConfigurationManager.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,7 +21,6 @@ import java.util.Set; import java.util.WeakHashMap; import org.apache.hadoop.conf.Configuration; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; import org.slf4j.Logger; @@ -32,17 +31,18 @@ * when the Configuration is reloaded from the disk in the Online Configuration * Change mechanism, which lets you update certain configuration properties * on-the-fly, without having to restart the cluster. - * + *

* If a class has configuration properties which you would like to be able to * change on-the-fly, do the following: - * 1. Implement the {@link ConfigurationObserver} interface. This would require + *

    + *
  1. Implement the {@link ConfigurationObserver} interface. This would require * you to implement the * {@link ConfigurationObserver#onConfigurationChange(Configuration)} * method. This is a callback that is used to notify your class' instance * that the configuration has changed. In this method, you need to check * if the new values for the properties that are of interest to your class * are different from the cached values. If yes, update them. - * + *
    * However, be careful with this. Certain properties might be trivially * mutable online, but others might not. Two properties might be trivially * mutable by themselves, but not when changed together. For example, if a @@ -51,21 +51,23 @@ * yet updated "b", it might make a decision on the basis of a new value of * "a", and an old value of "b". This might introduce subtle bugs. This * needs to be dealt on a case-by-case basis, and this class does not provide - * any protection from such cases. + * any protection from such cases.
  2. * - * 2. Register the appropriate instance of the class with the + *
  3. Register the appropriate instance of the class with the * {@link ConfigurationManager} instance, using the * {@link ConfigurationManager#registerObserver(ConfigurationObserver)} * method. Be careful not to do this in the constructor, as you might cause * the 'this' reference to escape. Use a factory method, or an initialize() - * method which is called after the construction of the object. + * method which is called after the construction of the object.
  4. * - * 3. Deregister the instance using the + *
  5. Deregister the instance using the * {@link ConfigurationManager#deregisterObserver(ConfigurationObserver)} * method when it is going out of scope. In case you are not able to do that * for any reason, it is still okay, since entries for dead observers are * automatically collected during GC. But nonetheless, it is still a good - * practice to deregister your observer, whenever possible. + * practice to deregister your observer, whenever possible.
  6. + *
+ *

*/ @InterfaceAudience.Private @InterfaceStability.Evolving @@ -118,8 +120,8 @@ public void notifyAllObservers(Configuration conf) { observer.onConfigurationChange(conf); } } catch (Throwable t) { - LOG.error("Encountered a throwable while notifying observers: " + " of type : " + - observer.getClass().getCanonicalName() + "(" + observer + ")", t); + LOG.error("Encountered a throwable while notifying observers: of type : {}({})", + observer.getClass().getCanonicalName(), observer, t); } } } @@ -137,7 +139,6 @@ public int getNumObservers() { /** * @return true if contains the observer, for unit test only */ - @VisibleForTesting public boolean containsObserver(ConfigurationObserver observer) { synchronized (configurationObservers) { return configurationObservers.contains(observer); diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/conf/ConfigurationObserver.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/conf/ConfigurationObserver.java index 2370a21af033..0d1d8ce5a783 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/conf/ConfigurationObserver.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/conf/ConfigurationObserver.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,7 +24,7 @@ /** * Every class that wants to observe changes in Configuration properties, * must implement interface (and also, register itself with the - * ConfigurationManager object. + * {@link ConfigurationManager}. */ @InterfaceAudience.Private @InterfaceStability.Evolving diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ByteBuffAllocator.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ByteBuffAllocator.java index 8741bcff912e..f0cb9b0a7d40 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ByteBuffAllocator.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ByteBuffAllocator.java @@ -25,9 +25,6 @@ import java.util.concurrent.ConcurrentLinkedQueue; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.LongAdder; - -import sun.nio.ch.DirectBuffer; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.nio.ByteBuff; @@ -35,8 +32,8 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import sun.nio.ch.DirectBuffer; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.com.google.common.collect.Sets; /** @@ -208,7 +205,6 @@ private static ByteBuffAllocator createOnHeap() { return new ByteBuffAllocator(false, 0, DEFAULT_BUFFER_SIZE, Integer.MAX_VALUE); } - @VisibleForTesting ByteBuffAllocator(boolean reservoirEnabled, int maxBufCount, int bufSize, int minSizeForReservoirUse) { this.reservoirEnabled = reservoirEnabled; @@ -241,7 +237,6 @@ public int getUsedBufferCount() { * The {@link ConcurrentLinkedQueue#size()} is O(N) complexity and time-consuming, so DO NOT use * the method except in UT. */ - @VisibleForTesting public int getFreeBufferCount() { return this.buffers.size(); } @@ -348,7 +343,6 @@ public ByteBuff allocate(int size) { /** * Free all direct buffers if allocated, mainly used for testing. */ - @VisibleForTesting public void clean() { while (!buffers.isEmpty()) { ByteBuffer b = buffers.poll(); diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/TimeRange.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/TimeRange.java index fe229b692109..0dea94801b8a 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/TimeRange.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/TimeRange.java @@ -18,24 +18,23 @@ package org.apache.hadoop.hbase.io; -import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; /** * Represents an interval of version timestamps. Presumes timestamps between * {@link #INITIAL_MIN_TIMESTAMP} and {@link #INITIAL_MAX_TIMESTAMP} only. Gets freaked out if * passed a timestamp that is < {@link #INITIAL_MIN_TIMESTAMP}, - *

+ *

* Evaluated according to minStamp <= timestamp < maxStamp or [minStamp,maxStamp) in interval * notation. - *

+ *

* Can be returned and read by clients. Should not be directly created by clients. Thus, all * constructors are purposely @InterfaceAudience.Private. - *

+ *

* Immutable. Thread-safe. */ @InterfaceAudience.Public -public class TimeRange { +public final class TimeRange { public static final long INITIAL_MIN_TIMESTAMP = 0L; public static final long INITIAL_MAX_TIMESTAMP = Long.MAX_VALUE; private static final TimeRange ALL_TIME = new TimeRange(INITIAL_MIN_TIMESTAMP, @@ -84,67 +83,13 @@ public static TimeRange between(long minStamp, long maxStamp) { private final long maxStamp; private final boolean allTime; - /** - * Default constructor. - * Represents interval [0, Long.MAX_VALUE) (allTime) - * @deprecated This is made @InterfaceAudience.Private in the 2.0 line and above and may be - * changed to private or removed in 3.0. - */ - @Deprecated - @InterfaceAudience.Private - public TimeRange() { - this(INITIAL_MIN_TIMESTAMP, INITIAL_MAX_TIMESTAMP); - } - - /** - * Represents interval [minStamp, Long.MAX_VALUE) - * @param minStamp the minimum timestamp value, inclusive - * @deprecated This is made @InterfaceAudience.Private in the 2.0 line and above and may be - * changed to private or removed in 3.0. - */ - @Deprecated - @InterfaceAudience.Private - public TimeRange(long minStamp) { - this(minStamp, INITIAL_MAX_TIMESTAMP); - } - - /** - * Represents interval [minStamp, Long.MAX_VALUE) - * @param minStamp the minimum timestamp value, inclusive - * @deprecated This is made @InterfaceAudience.Private in the 2.0 line and above and may be - * changed to private or removed in 3.0. - */ - @Deprecated - @InterfaceAudience.Private - public TimeRange(byte [] minStamp) { - this(Bytes.toLong(minStamp)); - } - - /** - * Represents interval [minStamp, maxStamp) - * @param minStamp the minimum timestamp, inclusive - * @param maxStamp the maximum timestamp, exclusive - * @deprecated This is made @InterfaceAudience.Private in the 2.0 line and above and may be - * changed to private or removed in 3.0. - */ - @Deprecated - @InterfaceAudience.Private - public TimeRange(byte [] minStamp, byte [] maxStamp) { - this(Bytes.toLong(minStamp), Bytes.toLong(maxStamp)); - } - /** * Represents interval [minStamp, maxStamp) * @param minStamp the minimum timestamp, inclusive * @param maxStamp the maximum timestamp, exclusive * @throws IllegalArgumentException if either <0, - * @deprecated This is made @InterfaceAudience.Private in the 2.0 line and above and may be - * changed to private or removed in 3.0. */ - @Deprecated - @InterfaceAudience.Private - public TimeRange(long minStamp, long maxStamp) { - check(minStamp, maxStamp); + private TimeRange(long minStamp, long maxStamp) { this.minStamp = minStamp; this.maxStamp = maxStamp; this.allTime = isAllTime(minStamp, maxStamp); @@ -188,27 +133,8 @@ public boolean isAllTime() { /** * Check if the specified timestamp is within this TimeRange. - *

+ *

* Returns true if within interval [minStamp, maxStamp), false if not. - * @param bytes timestamp to check - * @param offset offset into the bytes - * @return true if within TimeRange, false if not - * @deprecated This is made @InterfaceAudience.Private in the 2.0 line and above and may be - * changed to private or removed in 3.0. Use {@link #withinTimeRange(long)} instead - */ - @Deprecated - public boolean withinTimeRange(byte [] bytes, int offset) { - if (allTime) { - return true; - } - return withinTimeRange(Bytes.toLong(bytes, offset)); - } - - /** - * Check if the specified timestamp is within this TimeRange. - *

- * Returns true if within interval [minStamp, maxStamp), false - * if not. * @param timestamp timestamp to check * @return true if within TimeRange, false if not */ diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Context.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Context.java index f556ed22ef96..f3c9cabb945b 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Context.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Context.java @@ -18,10 +18,10 @@ import java.security.Key; +import org.apache.commons.codec.binary.Hex; import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; -import org.apache.hadoop.hbase.util.MD5Hash; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; @@ -94,7 +94,7 @@ public Context setKey(Key key) { ", want=" + cipher.getKeyLength()); } this.key = key; - this.keyHash = MD5Hash.getMD5AsHex(encoded); + this.keyHash = new String(Hex.encodeHex(Encryption.computeCryptoKeyHash(conf, encoded))); return this; } } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryption.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryption.java index af0089d02cda..6adcae5b22e3 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryption.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryption.java @@ -16,10 +16,11 @@ */ package org.apache.hadoop.hbase.io.crypto; +import static java.lang.String.format; + import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; -import java.security.DigestException; import java.security.Key; import java.security.MessageDigest; import java.security.NoSuchAlgorithmException; @@ -36,6 +37,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.io.crypto.aes.AES; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.util.ReflectionUtils; @@ -51,6 +53,50 @@ public final class Encryption { private static final Logger LOG = LoggerFactory.getLogger(Encryption.class); + + /** + * Configuration key for globally enable / disable column family encryption + */ + public static final String CRYPTO_ENABLED_CONF_KEY = "hbase.crypto.enabled"; + + /** + * Default value for globally enable / disable column family encryption + * (set to "true" for backward compatibility) + */ + public static final boolean CRYPTO_ENABLED_CONF_DEFAULT = true; + + /** + * Configuration key for the hash algorithm used for generating key hash in encrypted HFiles. + * This is a MessageDigest algorithm identifier string, like "MD5", "SHA-256" or "SHA-384". + * (default: "MD5" for backward compatibility reasons) + */ + public static final String CRYPTO_KEY_HASH_ALGORITHM_CONF_KEY = "hbase.crypto.key.hash.algorithm"; + + /** + * Default hash algorithm used for generating key hash in encrypted HFiles. + * (we use "MD5" for backward compatibility reasons) + */ + public static final String CRYPTO_KEY_HASH_ALGORITHM_CONF_DEFAULT = "MD5"; + + /** + * Configuration key for specifying the behaviour if the configured hash algorithm + * differs from the one used for generating key hash in encrypted HFiles currently being read. + * + * - "false" (default): we won't fail but use the hash algorithm stored in the HFile + * - "true": we throw an exception (this can be useful if regulations are enforcing the usage + * of certain algorithms, e.g. on FIPS compliant clusters) + */ + public static final String CRYPTO_KEY_FAIL_ON_ALGORITHM_MISMATCH_CONF_KEY = + "hbase.crypto.key.hash.algorithm.failOnMismatch"; + + /** + * Default behaviour is not to fail if the hash algorithm configured differs from the one + * used in the HFile. (this is the more fail-safe approach, allowing us to read + * encrypted HFiles written using a different encryption key hash algorithm) + */ + public static final boolean CRYPTO_KEY_FAIL_ON_ALGORITHM_MISMATCH_CONF_DEFAULT = false; + + /** * Crypto context */ @@ -99,6 +145,14 @@ private Encryption() { super(); } + + /** + * Returns true if the column family encryption feature is enabled globally. + */ + public static boolean isEncryptionEnabled(Configuration conf) { + return conf.getBoolean(CRYPTO_ENABLED_CONF_KEY, CRYPTO_ENABLED_CONF_DEFAULT); + } + /** * Get an cipher given a name * @param name the cipher name @@ -127,79 +181,63 @@ public static String[] getSupportedCiphers(Configuration conf) { } /** - * Return the MD5 digest of the concatenation of the supplied arguments. + * Returns the Hash Algorithm defined in the crypto configuration. */ - public static byte[] hash128(String... args) { - byte[] result = new byte[16]; + public static String getConfiguredHashAlgorithm(Configuration conf) { + return conf.getTrimmed(CRYPTO_KEY_HASH_ALGORITHM_CONF_KEY, + CRYPTO_KEY_HASH_ALGORITHM_CONF_DEFAULT); + } + + /** + * Returns the Hash Algorithm mismatch behaviour defined in the crypto configuration. + */ + public static boolean failOnHashAlgorithmMismatch(Configuration conf) { + return conf.getBoolean(CRYPTO_KEY_FAIL_ON_ALGORITHM_MISMATCH_CONF_KEY, + CRYPTO_KEY_FAIL_ON_ALGORITHM_MISMATCH_CONF_DEFAULT); + } + + /** + * Returns the hash of the supplied argument, using the hash algorithm + * specified in the given config. + */ + public static byte[] computeCryptoKeyHash(Configuration conf, byte[] arg) { + String algorithm = getConfiguredHashAlgorithm(conf); try { - MessageDigest md = MessageDigest.getInstance("MD5"); - for (String arg: args) { - md.update(Bytes.toBytes(arg)); - } - md.digest(result, 0, result.length); - return result; - } catch (NoSuchAlgorithmException e) { - throw new RuntimeException(e); - } catch (DigestException e) { - throw new RuntimeException(e); + return hashWithAlg(algorithm, arg); + } catch (RuntimeException e) { + String message = format("Error in computeCryptoKeyHash (please check your configuration " + + "parameter %s and the security provider configuration of the JVM)", + CRYPTO_KEY_HASH_ALGORITHM_CONF_KEY); + throw new RuntimeException(message, e); } } + /** + * Return the MD5 digest of the concatenation of the supplied arguments. + */ + public static byte[] hash128(String... args) { + return hashWithAlg("MD5", Bytes.toByteArrays(args)); + } + /** * Return the MD5 digest of the concatenation of the supplied arguments. */ public static byte[] hash128(byte[]... args) { - byte[] result = new byte[16]; - try { - MessageDigest md = MessageDigest.getInstance("MD5"); - for (byte[] arg: args) { - md.update(arg); - } - md.digest(result, 0, result.length); - return result; - } catch (NoSuchAlgorithmException e) { - throw new RuntimeException(e); - } catch (DigestException e) { - throw new RuntimeException(e); - } + return hashWithAlg("MD5", args); } /** * Return the SHA-256 digest of the concatenation of the supplied arguments. */ public static byte[] hash256(String... args) { - byte[] result = new byte[32]; - try { - MessageDigest md = MessageDigest.getInstance("SHA-256"); - for (String arg: args) { - md.update(Bytes.toBytes(arg)); - } - md.digest(result, 0, result.length); - return result; - } catch (NoSuchAlgorithmException e) { - throw new RuntimeException(e); - } catch (DigestException e) { - throw new RuntimeException(e); - } + return hashWithAlg("SHA-256", Bytes.toByteArrays(args)); } /** * Return the SHA-256 digest of the concatenation of the supplied arguments. */ public static byte[] hash256(byte[]... args) { - byte[] result = new byte[32]; - try { - MessageDigest md = MessageDigest.getInstance("SHA-256"); - for (byte[] arg: args) { - md.update(arg); - } - md.digest(result, 0, result.length); - return result; - } catch (NoSuchAlgorithmException e) { - throw new RuntimeException(e); - } catch (DigestException e) { - throw new RuntimeException(e); - } + return hashWithAlg("SHA-256", args); } /** @@ -208,21 +246,11 @@ public static byte[] hash256(byte[]... args) { * */ public static byte[] pbkdf128(String... args) { - byte[] salt = new byte[128]; - Bytes.random(salt); StringBuilder sb = new StringBuilder(); for (String s: args) { sb.append(s); } - PBEKeySpec spec = new PBEKeySpec(sb.toString().toCharArray(), salt, 10000, 128); - try { - return SecretKeyFactory.getInstance("PBKDF2WithHmacSHA1") - .generateSecret(spec).getEncoded(); - } catch (NoSuchAlgorithmException e) { - throw new RuntimeException(e); - } catch (InvalidKeySpecException e) { - throw new RuntimeException(e); - } + return generateSecretKey("PBKDF2WithHmacSHA1", AES.KEY_LENGTH, sb.toString().toCharArray()); } /** @@ -231,19 +259,69 @@ public static byte[] pbkdf128(String... args) { * */ public static byte[] pbkdf128(byte[]... args) { - byte[] salt = new byte[128]; - Bytes.random(salt); StringBuilder sb = new StringBuilder(); for (byte[] b: args) { sb.append(Arrays.toString(b)); } - PBEKeySpec spec = new PBEKeySpec(sb.toString().toCharArray(), salt, 10000, 128); + return generateSecretKey("PBKDF2WithHmacSHA1", AES.KEY_LENGTH, sb.toString().toCharArray()); + } + + /** + * Return a key derived from the concatenation of the supplied arguments using + * PBKDF2WithHmacSHA384 key derivation algorithm at 10,000 iterations. + * + * The length of the returned key is determined based on the need of the cypher algorithm. + * E.g. for the default "AES" we will need a 128 bit long key, while if the user is using + * a custom cipher, we might generate keys with other length. + * + * This key generation method is used currently e.g. in the HBase Shell (admin.rb) to generate a + * column family data encryption key, if the user provided an ENCRYPTION_KEY parameter. + */ + public static byte[] generateSecretKey(Configuration conf, String cypherAlg, String... args) { + StringBuilder sb = new StringBuilder(); + for (String s: args) { + sb.append(s); + } + int keyLengthBytes = Encryption.getCipher(conf, cypherAlg).getKeyLength(); + return generateSecretKey("PBKDF2WithHmacSHA384", keyLengthBytes, sb.toString().toCharArray()); + } + + /** + * Return a key derived from the concatenation of the supplied arguments using + * PBKDF2WithHmacSHA384 key derivation algorithm at 10,000 iterations. + * + * The length of the returned key is determined based on the need of the cypher algorithm. + * E.g. for the default "AES" we will need a 128 bit long key, while if the user is using + * a custom cipher, we might generate keys with other length. + * + * This key generation method is used currently e.g. in the HBase Shell (admin.rb) to generate a + * column family data encryption key, if the user provided an ENCRYPTION_KEY parameter. + */ + public static byte[] generateSecretKey(Configuration conf, String cypherAlg, byte[]... args) { + StringBuilder sb = new StringBuilder(); + for (byte[] b: args) { + sb.append(Arrays.toString(b)); + } + int keyLength = Encryption.getCipher(conf, cypherAlg).getKeyLength(); + return generateSecretKey("PBKDF2WithHmacSHA384", keyLength, sb.toString().toCharArray()); + } + + /** + * Return a key (byte array) derived from the supplied password argument using the given + * algorithm with a random salt at 10,000 iterations. + * + * @param algorithm the secret key generation algorithm to use + * @param keyLengthBytes the length of the key to be derived (in bytes, not in bits) + * @param password char array to use as password for the key generation algorithm + * @return secret key encoded as a byte array + */ + private static byte[] generateSecretKey(String algorithm, int keyLengthBytes, char[] password) { + byte[] salt = new byte[keyLengthBytes]; + Bytes.random(salt); + PBEKeySpec spec = new PBEKeySpec(password, salt, 10000, keyLengthBytes*8); try { - return SecretKeyFactory.getInstance("PBKDF2WithHmacSHA1") - .generateSecret(spec).getEncoded(); - } catch (NoSuchAlgorithmException e) { - throw new RuntimeException(e); - } catch (InvalidKeySpecException e) { + return SecretKeyFactory.getInstance(algorithm).generateSecret(spec).getEncoded(); + } catch (NoSuchAlgorithmException | InvalidKeySpecException e) { throw new RuntimeException(e); } } @@ -578,4 +656,20 @@ public static void incrementIv(byte[] iv, int v) { } while (v > 0); } + /** + * Return the hash of the concatenation of the supplied arguments, using the + * hash algorithm provided. + */ + public static byte[] hashWithAlg(String algorithm, byte[]... args) { + try { + MessageDigest md = MessageDigest.getInstance(algorithm); + for (byte[] arg: args) { + md.update(arg); + } + return md.digest(); + } catch (NoSuchAlgorithmException e) { + throw new RuntimeException("unable to use hash algorithm: " + algorithm, e); + } + } + } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/aes/AES.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/aes/AES.java index 73b871350831..69f12f9ae068 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/aes/AES.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/aes/AES.java @@ -23,9 +23,7 @@ import java.security.GeneralSecurityException; import java.security.Key; import java.security.SecureRandom; - import javax.crypto.spec.SecretKeySpec; - import org.apache.hadoop.hbase.io.crypto.Cipher; import org.apache.hadoop.hbase.io.crypto.CipherProvider; import org.apache.hadoop.hbase.io.crypto.Context; @@ -36,7 +34,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; /** @@ -149,7 +146,6 @@ public InputStream createDecryptionStream(InputStream in, Decryptor d) throws IO return d.createDecryptionStream(in); } - @VisibleForTesting SecureRandom getRNG() { return rng; } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/aes/CommonsCryptoAES.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/aes/CommonsCryptoAES.java index a119c57f59d4..19c2bd8ae061 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/aes/CommonsCryptoAES.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/aes/CommonsCryptoAES.java @@ -24,9 +24,7 @@ import java.security.Key; import java.security.SecureRandom; import java.util.Properties; - import javax.crypto.spec.SecretKeySpec; - import org.apache.commons.crypto.cipher.CryptoCipherFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.io.crypto.Cipher; @@ -39,7 +37,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; @InterfaceAudience.Private @@ -159,7 +156,6 @@ public InputStream createDecryptionStream(InputStream in, return decryptor.createDecryptionStream(in); } - @VisibleForTesting SecureRandom getRNG() { return rng; } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/EncodedDataBlock.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/EncodedDataBlock.java index afaf1976dc6d..0f19254e34a6 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/EncodedDataBlock.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/EncodedDataBlock.java @@ -26,7 +26,6 @@ import java.util.ArrayList; import java.util.Iterator; import java.util.List; - import org.apache.commons.lang3.NotImplementedException; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.HConstants; @@ -39,7 +38,6 @@ import org.apache.hadoop.io.compress.Compressor; import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; /** @@ -48,7 +46,6 @@ * This is used only in testing. */ @InterfaceAudience.Private -@VisibleForTesting public class EncodedDataBlock { private byte[] rawKVs; private ByteBuffer rawBuffer; diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/RowIndexSeekerV1.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/RowIndexSeekerV1.java index 7ff7555ceb27..efc37e64522c 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/RowIndexSeekerV1.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/RowIndexSeekerV1.java @@ -18,15 +18,15 @@ import java.nio.ByteBuffer; import org.apache.hadoop.hbase.ByteBufferKeyOnlyKeyValue; -import org.apache.hadoop.hbase.ByteBufferKeyValue; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.NoTagsByteBufferKeyValue; import org.apache.hadoop.hbase.PrivateCellUtil; +import org.apache.hadoop.hbase.SizeCachedByteBufferKeyValue; import org.apache.hadoop.hbase.SizeCachedKeyValue; +import org.apache.hadoop.hbase.SizeCachedNoTagsByteBufferKeyValue; import org.apache.hadoop.hbase.SizeCachedNoTagsKeyValue; import org.apache.hadoop.hbase.io.encoding.AbstractDataBlockEncoder.AbstractEncodedSeeker; import org.apache.hadoop.hbase.nio.ByteBuff; @@ -359,26 +359,30 @@ public Cell toCell() { // TODO : reduce the varieties of KV here. Check if based on a boolean // we can handle the 'no tags' case. if (tagsLength > 0) { + // TODO : getRow len here. ret = new SizeCachedKeyValue(currentBuffer.array(), - currentBuffer.arrayOffset() + startOffset, cellBufSize, seqId); + currentBuffer.arrayOffset() + startOffset, cellBufSize, seqId, keyLength); } else { ret = new SizeCachedNoTagsKeyValue(currentBuffer.array(), - currentBuffer.arrayOffset() + startOffset, cellBufSize, seqId); + currentBuffer.arrayOffset() + startOffset, cellBufSize, seqId, keyLength); } } else { currentBuffer.asSubByteBuffer(startOffset, cellBufSize, tmpPair); ByteBuffer buf = tmpPair.getFirst(); if (buf.isDirect()) { - ret = - tagsLength > 0 ? new ByteBufferKeyValue(buf, tmpPair.getSecond(), cellBufSize, seqId) - : new NoTagsByteBufferKeyValue(buf, tmpPair.getSecond(), cellBufSize, seqId); + // TODO : getRow len here. + ret = tagsLength > 0 + ? new SizeCachedByteBufferKeyValue(buf, tmpPair.getSecond(), cellBufSize, seqId, + keyLength) + : new SizeCachedNoTagsByteBufferKeyValue(buf, tmpPair.getSecond(), cellBufSize, seqId, + keyLength); } else { if (tagsLength > 0) { ret = new SizeCachedKeyValue(buf.array(), buf.arrayOffset() - + tmpPair.getSecond(), cellBufSize, seqId); + + tmpPair.getSecond(), cellBufSize, seqId, keyLength); } else { ret = new SizeCachedNoTagsKeyValue(buf.array(), buf.arrayOffset() - + tmpPair.getSecond(), cellBufSize, seqId); + + tmpPair.getSecond(), cellBufSize, seqId, keyLength); } } } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hadoopbackport/ThrottledInputStream.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hadoopbackport/ThrottledInputStream.java index a488185591ce..68627c3b1c8a 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hadoopbackport/ThrottledInputStream.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hadoopbackport/ThrottledInputStream.java @@ -27,8 +27,6 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; - /** * The ThrottleInputStream provides bandwidth throttling on a specified * InputStream. It is implemented as a wrapper on top of another InputStream @@ -126,7 +124,6 @@ private long calSleepTimeMs() { EnvironmentEdgeManager.currentTime() - startTime); } - @VisibleForTesting static long calSleepTimeMs(long bytesRead, long maxBytesPerSec, long elapsed) { assert elapsed > 0 : "The elapsed time should be greater than zero"; if (bytesRead <= 0 || maxBytesPerSec <= 0) { diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/net/Address.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/net/Address.java index 48fa522397c5..725a3764a36a 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/net/Address.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/net/Address.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.hbase.net; +import java.net.InetSocketAddress; + import org.apache.commons.lang3.StringUtils; import org.apache.yetus.audience.InterfaceAudience; @@ -32,7 +34,7 @@ */ @InterfaceAudience.Public public class Address implements Comparable

{ - private HostAndPort hostAndPort; + private final HostAndPort hostAndPort; private Address(HostAndPort hostAndPort) { this.hostAndPort = hostAndPort; @@ -46,6 +48,33 @@ public static Address fromString(String hostnameAndPort) { return new Address(HostAndPort.fromString(hostnameAndPort)); } + public static Address fromSocketAddress(InetSocketAddress addr) { + return Address.fromParts(addr.getHostString(), addr.getPort()); + } + + public static InetSocketAddress toSocketAddress(Address addr) { + return new InetSocketAddress(addr.getHostName(), addr.getPort()); + } + + public static InetSocketAddress[] toSocketAddress(Address[] addrs) { + if (addrs == null) { + return null; + } + InetSocketAddress[] result = new InetSocketAddress[addrs.length]; + for (int i = 0; i < addrs.length; i++) { + result[i] = toSocketAddress(addrs[i]); + } + return result; + } + + public String getHostName() { + return this.hostAndPort.getHost(); + } + + /** + * @deprecated Use {@link #getHostName()} instead + */ + @Deprecated public String getHostname() { return this.hostAndPort.getHost(); } @@ -65,7 +94,7 @@ public String toString() { * otherwise returns same as {@link #toString()}} */ public String toStringWithoutDomain() { - String hostname = getHostname(); + String hostname = getHostName(); String [] parts = hostname.split("\\."); if (parts.length > 1) { for (String part: parts) { @@ -86,7 +115,7 @@ public boolean equals(Object other) { } if (other instanceof Address) { Address that = (Address)other; - return this.getHostname().equals(that.getHostname()) && + return this.getHostName().equals(that.getHostName()) && this.getPort() == that.getPort(); } return false; @@ -94,12 +123,12 @@ public boolean equals(Object other) { @Override public int hashCode() { - return this.getHostname().hashCode() ^ getPort(); + return this.getHostName().hashCode() ^ getPort(); } @Override public int compareTo(Address that) { - int compare = this.getHostname().compareTo(that.getHostname()); + int compare = this.getHostName().compareTo(that.getHostName()); if (compare != 0) { return compare; } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfo.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfo.java index 1aa7ca1fedd0..bb4a4d7c6228 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfo.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfo.java @@ -203,6 +203,9 @@ public String toString() { sb.append(", "); sb.append(" Tables:"); sb.append(this.tables); + sb.append(", "); + sb.append(" Configurations:"); + sb.append(this.configuration); return sb.toString(); } @@ -239,6 +242,7 @@ public int hashCode() { int result = servers.hashCode(); result = 31 * result + tables.hashCode(); result = 31 * result + name.hashCode(); + result = 31 * result + configuration.hashCode(); return result; } } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/security/UserProvider.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/security/UserProvider.java index efa18fb9f586..0c054ceaaa28 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/security/UserProvider.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/security/UserProvider.java @@ -23,7 +23,6 @@ import java.util.concurrent.Callable; import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.hbase.BaseConfigurable; @@ -32,7 +31,6 @@ import org.apache.hadoop.util.ReflectionUtils; import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.com.google.common.cache.CacheBuilder; import org.apache.hbase.thirdparty.com.google.common.cache.CacheLoader; import org.apache.hbase.thirdparty.com.google.common.cache.LoadingCache; @@ -57,7 +55,6 @@ public class UserProvider extends BaseConfigurable { static Groups groups = Groups.getUserToGroupsMappingService(); - @VisibleForTesting public static Groups getGroups() { return groups; } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteBufferArray.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteBufferArray.java index 42d1bf4fa006..0c4c52f99fe7 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteBufferArray.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteBufferArray.java @@ -34,8 +34,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; - /** * This class manages an array of ByteBuffers with a default size 4MB. These buffers are sequential * and could be considered as a large buffer.It supports reading/writing data from this large buffer @@ -61,7 +59,6 @@ public ByteBufferArray(long capacity, ByteBufferAllocator allocator) throws IOEx Runtime.getRuntime().availableProcessors(), capacity, allocator); } - @VisibleForTesting ByteBufferArray(int bufferSize, int bufferCount, int threadCount, long capacity, ByteBufferAllocator alloc) throws IOException { this.bufferSize = bufferSize; @@ -107,7 +104,6 @@ private void createBuffers(int threadCount, ByteBufferAllocator alloc) throws IO } } - @VisibleForTesting static int getBufferSize(long capacity) { int bufferSize = DEFAULT_BUFFER_SIZE; if (bufferSize > (capacity / 16)) { diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteBufferUtils.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteBufferUtils.java index 08c35be0e0ae..d270d63d635c 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteBufferUtils.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteBufferUtils.java @@ -27,7 +27,6 @@ import java.math.BigInteger; import java.nio.ByteBuffer; import java.util.Arrays; - import org.apache.hadoop.hbase.io.ByteBufferWriter; import org.apache.hadoop.hbase.io.util.StreamUtils; import org.apache.hadoop.hbase.nio.ByteBuff; @@ -36,8 +35,6 @@ import org.apache.yetus.audience.InterfaceAudience; import sun.nio.ch.DirectBuffer; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; - /** * Utility functions for working with byte buffers, such as reading/writing * variable-length long numbers. @@ -49,7 +46,6 @@ public final class ByteBufferUtils { public final static int VALUE_MASK = 0x7f; public final static int NEXT_BIT_SHIFT = 7; public final static int NEXT_BIT_MASK = 1 << 7; - @VisibleForTesting final static boolean UNSAFE_AVAIL = UnsafeAvailChecker.isAvailable(); public final static boolean UNSAFE_UNALIGNED = UnsafeAvailChecker.unaligned(); diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java index 6ecfa10d97b6..ce24694e7fdc 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java @@ -37,7 +37,6 @@ import java.util.Comparator; import java.util.Iterator; import java.util.List; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.io.RawComparator; @@ -46,10 +45,8 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; - import sun.misc.Unsafe; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.org.apache.commons.collections4.CollectionUtils; /** @@ -127,7 +124,6 @@ public class Bytes implements Comparable { // SizeOf which uses java.lang.instrument says 24 bytes. (3 longs?) public static final int ESTIMATED_HEAP_TAX = 16; - @VisibleForTesting static final boolean UNSAFE_UNALIGNED = UnsafeAvailChecker.unaligned(); /** @@ -1270,7 +1266,6 @@ static abstract class Converter { } - @VisibleForTesting static Comparer lexicographicalComparerJavaImpl() { return LexicographicalComparerHolder.PureJavaComparer.INSTANCE; } @@ -1420,7 +1415,6 @@ int putShort(byte[] bytes, int offset, short val) { *

Uses reflection to gracefully fall back to the Java implementation if * {@code Unsafe} isn't available. */ - @VisibleForTesting static class LexicographicalComparerHolder { static final String UNSAFE_COMPARER_NAME = LexicographicalComparerHolder.class.getName() + "$UnsafeComparer"; @@ -1470,7 +1464,6 @@ public int compareTo(byte[] buffer1, int offset1, int length1, } } - @VisibleForTesting enum UnsafeComparer implements Comparer { INSTANCE; diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ClassSize.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ClassSize.java index 6f88c005cb86..b983fc0f3db6 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ClassSize.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ClassSize.java @@ -28,7 +28,6 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; /** @@ -236,7 +235,6 @@ private static MemoryLayout getMemoryLayout() { private static final MemoryLayout memoryLayout = getMemoryLayout(); private static final boolean USE_UNSAFE_LAYOUT = (memoryLayout instanceof UnsafeLayout); - @VisibleForTesting public static boolean useUnsafeLayout() { return USE_UNSAFE_LAYOUT; } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java index aaa3e82f23e0..fef8e291b31e 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java @@ -28,7 +28,6 @@ import java.util.Locale; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; -import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FSDataOutputStreamBuilder; @@ -41,12 +40,10 @@ import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.ipc.RemoteException; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.com.google.common.collect.Lists; /** @@ -321,6 +318,10 @@ public static void setFsDefault(final Configuration c, final Path root) { c.set("fs.defaultFS", root.toString()); // for hadoop 0.21+ } + public static void setFsDefault(final Configuration c, final String uri) { + c.set("fs.defaultFS", uri); // for hadoop 0.21+ + } + public static FileSystem getRootDirFileSystem(final Configuration c) throws IOException { Path p = getRootDir(c); return p.getFileSystem(c); @@ -341,7 +342,20 @@ public static Path getWALRootDir(final Configuration c) throws IOException { return p.makeQualified(fs.getUri(), fs.getWorkingDirectory()); } - @VisibleForTesting + /** + * Returns the URI in the string format + * @param c configuration + * @param p path + * @return - the URI's to string format + * @throws IOException + */ + public static String getDirUri(final Configuration c, Path p) throws IOException { + if (p.toUri().getScheme() != null) { + return p.toUri().toString(); + } + return null; + } + public static void setWALRootDir(final Configuration c, final Path root) { c.set(HBASE_WAL_DIR, root.toString()); } @@ -364,7 +378,8 @@ private static boolean isValidWALRootDir(Path walDir, final Configuration c) thr if (!qualifiedWalDir.equals(rootDir)) { if (qualifiedWalDir.toString().startsWith(rootDir.toString() + "/")) { throw new IllegalStateException("Illegal WAL directory specified. " + - "WAL directories are not permitted to be under the root directory if set."); + "WAL directories are not permitted to be under root directory: rootDir=" + + rootDir.toString() + ", qualifiedWALDir=" + qualifiedWalDir); } } return true; diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/DNS.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/DNS.java index 2b4e1cbf02cd..5c23ddcedb5a 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/DNS.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/DNS.java @@ -35,13 +35,22 @@ public final class DNS { // the specification of server hostname is optional. The hostname should be resolvable from // both master and region server @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) - public static final String RS_HOSTNAME_KEY = "hbase.regionserver.hostname"; + public static final String UNSAFE_RS_HOSTNAME_KEY = "hbase.unsafe.regionserver.hostname"; @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) public static final String MASTER_HOSTNAME_KEY = "hbase.master.hostname"; private static boolean HAS_NEW_DNS_GET_DEFAULT_HOST_API; private static Method GET_DEFAULT_HOST_METHOD; + /** + * @deprecated since 2.4.0 and will be removed in 4.0.0. + * Use {@link DNS#UNSAFE_RS_HOSTNAME_KEY} instead. + * @see HBASE-24667 + */ + @Deprecated + @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) + public static final String RS_HOSTNAME_KEY = "hbase.regionserver.hostname"; + static { try { GET_DEFAULT_HOST_METHOD = org.apache.hadoop.net.DNS.class @@ -50,6 +59,7 @@ public final class DNS { } catch (Exception e) { HAS_NEW_DNS_GET_DEFAULT_HOST_API = false; // FindBugs: Causes REC_CATCH_EXCEPTION. Suppressed } + Configuration.addDeprecation(RS_HOSTNAME_KEY, UNSAFE_RS_HOSTNAME_KEY); } public enum ServerType { @@ -106,7 +116,7 @@ public static String getHostname(@NonNull Configuration conf, @NonNull ServerTyp hostname = conf.get(MASTER_HOSTNAME_KEY); break; case REGIONSERVER: - hostname = conf.get(RS_HOSTNAME_KEY); + hostname = conf.get(UNSAFE_RS_HOSTNAME_KEY); break; default: hostname = null; diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/FutureUtils.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/FutureUtils.java index dfd9ead27854..67a7d84b26fe 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/FutureUtils.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/FutureUtils.java @@ -139,19 +139,23 @@ private static void setStackTrace(Throwable error) { error.setStackTrace(newStackTrace); } - private static IOException rethrow(ExecutionException error) throws IOException { - Throwable cause = error.getCause(); - if (cause instanceof IOException) { - setStackTrace(cause); - throw (IOException) cause; - } else if (cause instanceof RuntimeException) { - setStackTrace(cause); - throw (RuntimeException) cause; - } else if (cause instanceof Error) { - setStackTrace(cause); - throw (Error) cause; + /** + * If we could propagate the given {@code error} directly, we will fill the stack trace with the + * current thread's stack trace so it is easier to trace where is the exception thrown. If not, we + * will just create a new IOException and then throw it. + */ + public static IOException rethrow(Throwable error) throws IOException { + if (error instanceof IOException) { + setStackTrace(error); + throw (IOException) error; + } else if (error instanceof RuntimeException) { + setStackTrace(error); + throw (RuntimeException) error; + } else if (error instanceof Error) { + setStackTrace(error); + throw (Error) error; } else { - throw new IOException(cause); + throw new IOException(error); } } @@ -165,7 +169,7 @@ public static T get(Future future) throws IOException { } catch (InterruptedException e) { throw (IOException) new InterruptedIOException().initCause(e); } catch (ExecutionException e) { - throw rethrow(e); + throw rethrow(e.getCause()); } } @@ -179,7 +183,7 @@ public static T get(Future future, long timeout, TimeUnit unit) throws IO } catch (InterruptedException e) { throw (IOException) new InterruptedIOException().initCause(e); } catch (ExecutionException e) { - throw rethrow(e); + throw rethrow(e.getCause()); } catch (TimeoutException e) { throw new TimeoutIOException(e); } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/GsonUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/GsonUtil.java index 80be4af72f13..59c2d80f4d18 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/GsonUtil.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/GsonUtil.java @@ -58,4 +58,8 @@ public LongAdder read(JsonReader in) throws IOException { } }); } + + public static GsonBuilder createGsonWithDisableHtmlEscaping() { + return createGson().disableHtmlEscaping(); + } } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/IdLock.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/IdLock.java index 9e5692feebb6..112af1ef85a7 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/IdLock.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/IdLock.java @@ -21,12 +21,10 @@ import java.io.InterruptedIOException; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; - import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; /** @@ -213,12 +211,10 @@ public boolean isHeldByCurrentThread(long id) { } } - @VisibleForTesting void assertMapEmpty() { assert map.isEmpty(); } - @VisibleForTesting public void waitForWaiters(long id, int numWaiters) throws InterruptedException { for (Entry entry;;) { entry = map.get(id); diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/OrderedBytes.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/OrderedBytes.java index ff2fd4538359..f565bc1ad181 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/OrderedBytes.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/OrderedBytes.java @@ -28,8 +28,6 @@ import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; - /** * Utility class that handles ordered byte arrays. That is, unlike * {@link Bytes}, these methods produce byte arrays which maintain the sort @@ -368,7 +366,6 @@ private static int putUint32(PositionedByteRange dst, int val) { * @param comp Compliment the encoded value when {@code comp} is true. * @return number of bytes written. */ - @VisibleForTesting static int putVaruint64(PositionedByteRange dst, long val, boolean comp) { int w, y, len = 0; final int offset = dst.getOffset(), start = dst.getPosition(); @@ -457,7 +454,6 @@ static int putVaruint64(PositionedByteRange dst, long val, boolean comp) { * @param comp if true, parse the compliment of the value. * @return the number of bytes consumed by this value. */ - @VisibleForTesting static int lengthVaruint64(PositionedByteRange src, boolean comp) { int a0 = (comp ? DESCENDING : ASCENDING).apply(src.peek()) & 0xff; if (a0 <= 240) return 1; @@ -478,7 +474,6 @@ static int lengthVaruint64(PositionedByteRange src, boolean comp) { * @param cmp if true, parse the compliment of the value. * @return the number of bytes skipped. */ - @VisibleForTesting static int skipVaruint64(PositionedByteRange src, boolean cmp) { final int len = lengthVaruint64(src, cmp); src.setPosition(src.getPosition() + len); @@ -490,7 +485,6 @@ static int skipVaruint64(PositionedByteRange src, boolean cmp) { * encoded value when {@code comp} is true. * @return the decoded value. */ - @VisibleForTesting static long getVaruint64(PositionedByteRange src, boolean comp) { assert src.getRemaining() >= lengthVaruint64(src, comp); final long ret; @@ -547,7 +541,6 @@ static long getVaruint64(PositionedByteRange src, boolean comp) { * From Phoenix's {@code NumberUtil}. * @return new {@link BigDecimal} instance */ - @VisibleForTesting static BigDecimal normalize(BigDecimal val) { return null == val ? null : val.stripTrailingZeros().round(DEFAULT_MATH_CONTEXT); } @@ -1013,7 +1006,6 @@ public static int blobVarEncodedLength(int len) { /** * Calculate the expected BlobVar decoded length based on encoded length. */ - @VisibleForTesting static int blobVarDecodedLength(int len) { return ((len diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PrettyPrinter.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PrettyPrinter.java index ff7064b11430..83eb01a635fd 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PrettyPrinter.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PrettyPrinter.java @@ -41,11 +41,17 @@ public final class PrettyPrinter { "((\\d+)\\s*MINUTES?)?\\s*((\\d+)\\s*SECONDS?)?\\s*\\)?"; private static final Pattern INTERVAL_PATTERN = Pattern.compile(INTERVAL_REGEX, Pattern.CASE_INSENSITIVE); + private static final String SIZE_REGEX = "((\\d+)\\s*B?\\s*\\()?\\s*" + + "((\\d+)\\s*TB?)?\\s*((\\d+)\\s*GB?)?\\s*" + + "((\\d+)\\s*MB?)?\\s*((\\d+)\\s*KB?)?\\s*((\\d+)\\s*B?)?\\s*\\)?"; + private static final Pattern SIZE_PATTERN = Pattern.compile(SIZE_REGEX, + Pattern.CASE_INSENSITIVE); public enum Unit { TIME_INTERVAL, LONG, BOOLEAN, + BYTE, NONE } @@ -63,6 +69,9 @@ public static String format(final String value, final Unit unit) { byte[] booleanBytes = Bytes.toBytesBinary(value); human.append(String.valueOf(Bytes.toBoolean(booleanBytes))); break; + case BYTE: + human.append(humanReadableByte(Long.parseLong(value))); + break; default: human.append(value); } @@ -82,6 +91,9 @@ public static String valueOf(final String pretty, final Unit unit) throws HBaseE case TIME_INTERVAL: value.append(humanReadableIntervalToSec(pretty)); break; + case BYTE: + value.append(humanReadableSizeToBytes(pretty)); + break; default: value.append(pretty); } @@ -191,6 +203,116 @@ private static long humanReadableIntervalToSec(final String humanReadableInterva return ttl; } + /** + * Convert a long size to a human readable string. + * Example: 10763632640 -> 10763632640 B (10GB 25MB) + * @param size the size in bytes + * @return human readable string + */ + private static String humanReadableByte(final long size) { + StringBuilder sb = new StringBuilder(); + long tb, gb, mb, kb, b; + + if (size < HConstants.KB_IN_BYTES) { + sb.append(size); + sb.append(" B"); + return sb.toString(); + } + + tb = size / HConstants.TB_IN_BYTES; + gb = (size - HConstants.TB_IN_BYTES * tb) / HConstants.GB_IN_BYTES; + mb = (size - HConstants.TB_IN_BYTES * tb + - HConstants.GB_IN_BYTES * gb) / HConstants.MB_IN_BYTES; + kb = (size - HConstants.TB_IN_BYTES * tb - HConstants.GB_IN_BYTES * gb + - HConstants.MB_IN_BYTES * mb) / HConstants.KB_IN_BYTES; + b = (size - HConstants.TB_IN_BYTES * tb - HConstants.GB_IN_BYTES * gb + - HConstants.MB_IN_BYTES * mb - HConstants.KB_IN_BYTES * kb); + + sb.append(size).append(" B ("); + if (tb > 0) { + sb.append(tb); + sb.append("TB"); + } + + if (gb > 0) { + sb.append(tb > 0 ? " " : ""); + sb.append(gb); + sb.append("GB"); + } + + if (mb > 0) { + sb.append(tb + gb > 0 ? " " : ""); + sb.append(mb); + sb.append("MB"); + } + + if (kb > 0) { + sb.append(tb + gb + mb > 0 ? " " : ""); + sb.append(kb); + sb.append("KB"); + } + + if (b > 0) { + sb.append(tb + gb + mb + kb > 0 ? " " : ""); + sb.append(b); + sb.append("B"); + } + + sb.append(")"); + return sb.toString(); + } + + /** + * Convert a human readable size to bytes. + * Examples of the human readable size are: 50 GB 20 MB 1 KB , 25000 B etc. + * The units of size specified can be in uppercase as well as lowercase. Also, if a + * single number is specified without any time unit, it is assumed to be in bytes. + * @param humanReadableSize human readable size + * @return value in bytes + * @throws HBaseException + */ + private static long humanReadableSizeToBytes(final String humanReadableSize) + throws HBaseException { + if (humanReadableSize == null) { + return -1; + } + + try { + return Long.parseLong(humanReadableSize); + } catch(NumberFormatException ex) { + LOG.debug("Given size value is not a number, parsing for human readable format"); + } + + String tb = null; + String gb = null; + String mb = null; + String kb = null; + String b = null; + String expectedSize = null; + long size = 0; + + Matcher matcher = PrettyPrinter.SIZE_PATTERN.matcher(humanReadableSize); + if (matcher.matches()) { + expectedSize = matcher.group(2); + tb = matcher.group(4); + gb = matcher.group(6); + mb = matcher.group(8); + kb = matcher.group(10); + b = matcher.group(12); + } + size += tb != null ? Long.parseLong(tb)*HConstants.TB_IN_BYTES:0; + size += gb != null ? Long.parseLong(gb)*HConstants.GB_IN_BYTES:0; + size += mb != null ? Long.parseLong(mb)*HConstants.MB_IN_BYTES:0; + size += kb != null ? Long.parseLong(kb)*HConstants.KB_IN_BYTES:0; + size += b != null ? Long.parseLong(b):0; + + if (expectedSize != null && Long.parseLong(expectedSize) != size) { + throw new HBaseException("Malformed size string: values in byte and human readable" + + "format do not match"); + } + return size; + } + /** * Pretty prints a collection of any type to a string. Relies on toString() implementation of the * object type. diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKConfig.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKConfig.java index 988277720401..e4e4ce8e5994 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKConfig.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKConfig.java @@ -28,8 +28,6 @@ import org.apache.hadoop.util.StringUtils; import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; - /** * Utility methods for reading, and building the ZooKeeper configuration. * @@ -299,7 +297,6 @@ public static String getZooKeeperClusterKey(Configuration conf, String name) { * @param clientPort the default client port * @return the string for a list of "server:port" separated by "," */ - @VisibleForTesting public static String standardizeZKQuorumServerString(String quorumStringInput, String clientPort) { String[] serverHosts = quorumStringInput.split(","); @@ -312,7 +309,6 @@ public static String standardizeZKQuorumServerString(String quorumStringInput, // in this case, the clientPort would be ignored) // (3). s1:p1,s2,s3:p3 (mix of (1) and (2) - if port is not specified in a server, it would use // the clientPort; otherwise, it would use the specified port) - @VisibleForTesting public static class ZKClusterKey { private String quorumString; private int clientPort; diff --git a/hbase-common/src/main/resources/hbase-default.xml b/hbase-common/src/main/resources/hbase-default.xml index 6fb6ce98e33d..20f3881edb2c 100644 --- a/hbase-common/src/main/resources/hbase-default.xml +++ b/hbase-common/src/main/resources/hbase-default.xml @@ -1033,9 +1033,7 @@ possible configurations would overwhelm and obscure the important. hbase.bucketcache.size - A float that EITHER represents a percentage of total heap memory - size to give to the cache (if < 1.0) OR, it is the total capacity in - megabytes of BucketCache. Default: 0.0 + It is the total capacity in megabytes of BucketCache. Default: 0.0 hbase.bucketcache.bucket.sizes @@ -1119,19 +1117,19 @@ possible configurations would overwhelm and obscure the important. http://docs.oracle.com/javase/1.5.0/docs/api/java/net/Socket.html#getTcpNoDelay() - hbase.regionserver.hostname + hbase.unsafe.regionserver.hostname This config is for experts: don't set its value unless you really know what you are doing. When set to a non-empty value, this represents the (external facing) hostname for the underlying server. See https://issues.apache.org/jira/browse/HBASE-12954 for details. - hbase.regionserver.hostname.disable.master.reversedns + hbase.unsafe.regionserver.hostname.disable.master.reversedns false This config is for experts: don't set its value unless you really know what you are doing. When set to true, regionserver will use the current node hostname for the servername and HMaster will skip reverse DNS lookup and use the hostname sent by regionserver instead. Note that this config and - hbase.regionserver.hostname are mutually exclusive. See https://issues.apache.org/jira/browse/HBASE-18226 + hbase.unsafe.regionserver.hostname are mutually exclusive. See https://issues.apache.org/jira/browse/HBASE-18226 for more details. + + + + dfs.client.read.shortcircuit + false + + If set to true, this configuration parameter enables short-circuit local + reads. + + + + dfs.domain.socket.path + + + Optional. This is a path to a UNIX domain socket that will be used for + communication between the DataNode and local HDFS clients. + If the string "_PORT" is present in this path, it will be replaced by the + TCP port of the DataNode. + + + diff --git a/hbase-common/src/test/resources/hdfs-scr-enabled.xml b/hbase-common/src/test/resources/hdfs-scr-enabled.xml new file mode 100644 index 000000000000..8594494782c5 --- /dev/null +++ b/hbase-common/src/test/resources/hdfs-scr-enabled.xml @@ -0,0 +1,42 @@ + + + + + + + dfs.client.read.shortcircuit + true + + If set to true, this configuration parameter enables short-circuit local + reads. + + + + dfs.domain.socket.path + /var/lib/hadoop-hdfs/dn_socket + + Optional. This is a path to a UNIX domain socket that will be used for + communication between the DataNode and local HDFS clients. + If the string "_PORT" is present in this path, it will be replaced by the + TCP port of the DataNode. + + + diff --git a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/Export.java b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/Export.java index a93718f70dbb..2b38dcbaae48 100644 --- a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/Export.java +++ b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/Export.java @@ -71,7 +71,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback; import org.apache.hbase.thirdparty.com.google.protobuf.RpcController; import org.apache.hbase.thirdparty.com.google.protobuf.Service; @@ -103,7 +102,7 @@ public static void main(String[] args) throws Throwable { System.exit(response == null ? -1 : 0); } - @VisibleForTesting + @InterfaceAudience.Private static Map run(final Configuration conf, final String[] args) throws Throwable { String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs(); if (!ExportUtils.isValidArguements(args)) { diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestSecureExport.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestSecureExport.java index 2f5024737dbc..d3be45b56f68 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestSecureExport.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestSecureExport.java @@ -216,6 +216,7 @@ public static void beforeClass() throws Exception { Permission.Action.EXEC, Permission.Action.READ, Permission.Action.WRITE); + SecureTestUtil.grantGlobal(UTIL, USER_OWNER, Permission.Action.CREATE); addLabels(UTIL.getConfiguration(), Arrays.asList(USER_OWNER), Arrays.asList(PRIVATE, CONFIDENTIAL, SECRET, TOPSECRET)); } @@ -236,11 +237,11 @@ public static void afterClass() throws Exception { public void testAccessCase() throws Throwable { final String exportTable = name.getMethodName(); TableDescriptor exportHtd = TableDescriptorBuilder - .newBuilder(TableName.valueOf(name.getMethodName())) + .newBuilder(TableName.valueOf(exportTable)) .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILYA)) - .setOwnerString(USER_OWNER) .build(); - SecureTestUtil.createTable(UTIL, exportHtd, new byte[][]{Bytes.toBytes("s")}); + User owner = User.createUserForTesting(UTIL.getConfiguration(), USER_OWNER, new String[0]); + SecureTestUtil.createTable(UTIL, owner, exportHtd, new byte[][]{Bytes.toBytes("s")}); SecureTestUtil.grantOnTable(UTIL, USER_RO, TableName.valueOf(exportTable), null, null, Permission.Action.READ); @@ -340,9 +341,9 @@ public void testVisibilityLabels() throws IOException, Throwable { final TableDescriptor exportHtd = TableDescriptorBuilder .newBuilder(TableName.valueOf(exportTable)) .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILYA)) - .setOwnerString(USER_OWNER) .build(); - SecureTestUtil.createTable(UTIL, exportHtd, new byte[][]{Bytes.toBytes("s")}); + User owner = User.createUserForTesting(UTIL.getConfiguration(), USER_OWNER, new String[0]); + SecureTestUtil.createTable(UTIL, owner, exportHtd, new byte[][]{Bytes.toBytes("s")}); AccessTestAction putAction = () -> { Put p1 = new Put(ROW1); p1.addColumn(FAMILYA, QUAL, NOW, QUAL); @@ -398,9 +399,8 @@ public void testVisibilityLabels() throws IOException, Throwable { final TableDescriptor importHtd = TableDescriptorBuilder .newBuilder(TableName.valueOf(importTable)) .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILYB)) - .setOwnerString(USER_OWNER) .build(); - SecureTestUtil.createTable(UTIL, importHtd, new byte[][]{Bytes.toBytes("s")}); + SecureTestUtil.createTable(UTIL, owner, importHtd, new byte[][]{Bytes.toBytes("s")}); AccessTestAction importAction = () -> { String[] args = new String[]{ "-D" + Import.CF_RENAME_PROP + "=" + FAMILYA_STRING + ":" + FAMILYB_STRING, diff --git a/hbase-examples/src/main/java/org/apache/hadoop/hbase/client/example/AsyncClientExample.java b/hbase-examples/src/main/java/org/apache/hadoop/hbase/client/example/AsyncClientExample.java index b773ee89ff57..fdbdbc6244f8 100644 --- a/hbase-examples/src/main/java/org/apache/hadoop/hbase/client/example/AsyncClientExample.java +++ b/hbase-examples/src/main/java/org/apache/hadoop/hbase/client/example/AsyncClientExample.java @@ -111,7 +111,7 @@ private CompletableFuture closeConn() { CompletableFuture closeFuture = new CompletableFuture<>(); addListener(f, (conn, error) -> { if (error == null) { - IOUtils.closeQuietly(conn); + IOUtils.closeQuietly(conn, e -> LOG.warn("failed to close conn", e)); } closeFuture.complete(null); }); diff --git a/hbase-external-blockcache/src/main/java/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.java b/hbase-external-blockcache/src/main/java/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.java index 6ccd138f70dc..246d7e0a138c 100644 --- a/hbase-external-blockcache/src/main/java/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.java +++ b/hbase-external-blockcache/src/main/java/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.java @@ -100,6 +100,9 @@ public MemcachedBlockCache(Configuration c) throws IOException { // case. String serverListString = c.get(MEMCACHED_CONFIG_KEY,"localhost:11211"); String[] servers = serverListString.split(","); + // MemcachedClient requires InetSocketAddresses, we have to create them now. Implies any + // resolved identities cannot have their address mappings changed while the MemcachedClient + // instance is alive. We won't get a chance to trigger re-resolution. List serverAddresses = new ArrayList<>(servers.length); for (String s:servers) { serverAddresses.add(Addressing.createInetSocketAddressFromHostAndPortStr(s)); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/impl/GlobalMetricRegistriesAdapter.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/impl/GlobalMetricRegistriesAdapter.java index 96fc95416a83..a816d4970449 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/impl/GlobalMetricRegistriesAdapter.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/impl/GlobalMetricRegistriesAdapter.java @@ -38,8 +38,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; - /** * This class acts as an adapter to export the MetricRegistry's in the global registry. Each * MetricRegistry will be registered or unregistered from the metric2 system. The collection will @@ -102,7 +100,6 @@ public static GlobalMetricRegistriesAdapter init() { return new GlobalMetricRegistriesAdapter(); } - @VisibleForTesting public void stop() { stopped.set(true); } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java index afeccee504a7..9d880d16137f 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java @@ -571,6 +571,9 @@ public interface MetricsRegionServerSource extends BaseSource, JvmPauseMonitorSo String RPC_SCAN_REQUEST_COUNT = "rpcScanRequestCount"; String RPC_SCAN_REQUEST_COUNT_DESC = "Number of rpc scan requests this RegionServer has answered."; + String RPC_FULL_SCAN_REQUEST_COUNT = "rpcFullScanRequestCount"; + String RPC_FULL_SCAN_REQUEST_COUNT_DESC = + "Number of rpc scan requests that were possible full region scans."; String RPC_MULTI_REQUEST_COUNT = "rpcMultiRequestCount"; String RPC_MULTI_REQUEST_COUNT_DESC = "Number of rpc multi requests this RegionServer has answered."; diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java index 32a2b1e99f4e..7b0225482cb8 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java @@ -351,6 +351,8 @@ public void getMetrics(MetricsCollector metricsCollector, boolean all) { rsWrap.getWriteRequestsCount()) .addCounter(Interns.info(RPC_GET_REQUEST_COUNT, RPC_GET_REQUEST_COUNT_DESC), rsWrap.getRpcGetRequestsCount()) + .addCounter(Interns.info(RPC_FULL_SCAN_REQUEST_COUNT, RPC_FULL_SCAN_REQUEST_COUNT_DESC), + rsWrap.getRpcFullScanRequestsCount()) .addCounter(Interns.info(RPC_SCAN_REQUEST_COUNT, RPC_SCAN_REQUEST_COUNT_DESC), rsWrap.getRpcScanRequestsCount()) .addCounter(Interns.info(RPC_MULTI_REQUEST_COUNT, RPC_MULTI_REQUEST_COUNT_DESC), diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java index 4ca2d0e85ad1..e4305330b671 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java @@ -492,6 +492,11 @@ public interface MetricsRegionServerWrapper { */ long getRpcScanRequestsCount(); + /** + * Get the number of full region rpc scan requests to this region server. + */ + long getRpcFullScanRequestsCount(); + /** * Get the number of rpc multi requests to this region server. */ diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableLatencies.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableLatencies.java index 231bad1be879..2aeb82b0d64d 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableLatencies.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableLatencies.java @@ -53,6 +53,9 @@ public interface MetricsTableLatencies { String DELETE_BATCH_TIME = "deleteBatchTime"; String INCREMENT_TIME = "incrementTime"; String APPEND_TIME = "appendTime"; + String CHECK_AND_DELETE_TIME = "checkAndDeleteTime"; + String CHECK_AND_PUT_TIME = "checkAndPutTime"; + String CHECK_AND_MUTATE_TIME = "checkAndMutateTime"; /** * Update the Put time histogram @@ -125,4 +128,26 @@ public interface MetricsTableLatencies { * @param t time it took */ void updateScanTime(String tableName, long t); + + /** + * Update the CheckAndDelete time histogram. + * @param nameAsString The table the metric is for + * @param time time it took + */ + void updateCheckAndDelete(String nameAsString, long time); + + /** + * Update the CheckAndPut time histogram. + * @param nameAsString The table the metric is for + * @param time time it took + */ + void updateCheckAndPut(String nameAsString, long time); + + /** + * Update the CheckAndMutate time histogram. + * @param nameAsString The table the metric is for + * @param time time it took + */ + void updateCheckAndMutate(String nameAsString, long time); + } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableLatenciesImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableLatenciesImpl.java index 5a3f3b9d2491..dd143d4c6f5d 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableLatenciesImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableLatenciesImpl.java @@ -25,8 +25,6 @@ import org.apache.hadoop.metrics2.lib.DynamicMetricsRegistry; import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; - /** * Implementation of {@link MetricsTableLatencies} to track latencies for one table in a * RegionServer. @@ -36,7 +34,6 @@ public class MetricsTableLatenciesImpl extends BaseSourceImpl implements Metrics private final HashMap histogramsByTable = new HashMap<>(); - @VisibleForTesting public static class TableHistograms { final MetricHistogram getTimeHisto; final MetricHistogram incrementTimeHisto; @@ -47,6 +44,9 @@ public static class TableHistograms { final MetricHistogram deleteBatchTimeHisto; final MetricHistogram scanTimeHisto; final MetricHistogram scanSizeHisto; + final MetricHistogram checkAndDeleteTimeHisto; + final MetricHistogram checkAndPutTimeHisto; + final MetricHistogram checkAndMutateTimeHisto; TableHistograms(DynamicMetricsRegistry registry, TableName tn) { getTimeHisto = registry.newTimeHistogram(qualifyMetricsName(tn, GET_TIME)); @@ -60,6 +60,12 @@ public static class TableHistograms { qualifyMetricsName(tn, DELETE_BATCH_TIME)); scanTimeHisto = registry.newTimeHistogram(qualifyMetricsName(tn, SCAN_TIME)); scanSizeHisto = registry.newSizeHistogram(qualifyMetricsName(tn, SCAN_SIZE)); + checkAndDeleteTimeHisto = + registry.newTimeHistogram(qualifyMetricsName(tn, CHECK_AND_DELETE_TIME)); + checkAndPutTimeHisto = + registry.newTimeHistogram(qualifyMetricsName(tn, CHECK_AND_PUT_TIME)); + checkAndMutateTimeHisto = + registry.newTimeHistogram(qualifyMetricsName(tn, CHECK_AND_MUTATE_TIME)); } public void updatePut(long time) { @@ -97,9 +103,20 @@ public void updateScanSize(long scanSize) { public void updateScanTime(long t) { scanTimeHisto.add(t); } + + public void updateCheckAndDeleteTime(long t) { + checkAndDeleteTimeHisto.add(t); + } + + public void updateCheckAndPutTime(long t) { + checkAndPutTimeHisto.add(t); + } + + public void updateCheckAndMutateTime(long t) { + checkAndMutateTimeHisto.add(t); + } } - @VisibleForTesting public static String qualifyMetricsName(TableName tableName, String metric) { StringBuilder sb = new StringBuilder(); sb.append("Namespace_").append(tableName.getNamespaceAsString()); @@ -108,7 +125,6 @@ public static String qualifyMetricsName(TableName tableName, String metric) { return sb.toString(); } - @VisibleForTesting public TableHistograms getOrCreateTableHistogram(String tableName) { // TODO Java8's ConcurrentHashMap#computeIfAbsent would be stellar instead final TableName tn = TableName.valueOf(tableName); @@ -174,6 +190,21 @@ public void updateScanTime(String tableName, long t) { getOrCreateTableHistogram(tableName).updateScanTime(t); } + @Override + public void updateCheckAndDelete(String tableName, long time) { + getOrCreateTableHistogram(tableName).updateCheckAndDeleteTime(time); + } + + @Override + public void updateCheckAndPut(String tableName, long time) { + getOrCreateTableHistogram(tableName).updateCheckAndPutTime(time); + } + + @Override + public void updateCheckAndMutate(String tableName, long time) { + getOrCreateTableHistogram(tableName).updateCheckAndMutateTime(time); + } + @Override public void getMetrics(MetricsCollector metricsCollector, boolean all) { MetricsRecordBuilder mrb = metricsCollector.addRecord(metricsName); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableSourceImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableSourceImpl.java index b39e1444dd2f..85f5bded98a8 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableSourceImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableSourceImpl.java @@ -337,7 +337,7 @@ private void addGauge(MetricsRecordBuilder mrb, Map metricMap, Str for (Entry entry : metricMap.entrySet()) { // append 'store' and its name to the metric mrb.addGauge(Interns.info(this.tableNamePrefixPart1 + _COLUMNFAMILY - + entry.getKey().split(MetricsTableWrapperAggregate.UNDERSCORE)[1] + + entry.getKey().split(MetricsTableWrapperAggregate.HASH)[1] + this.tableNamePrefixPart2 + metricName, metricDesc), entry.getValue()); } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperAggregate.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperAggregate.java index 4b8c46af2c0f..40fd6d8effaf 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperAggregate.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperAggregate.java @@ -28,7 +28,7 @@ */ @InterfaceAudience.Private public interface MetricsTableWrapperAggregate { - public String UNDERSCORE = "_"; + public String HASH = "#"; /** * Get the number of read requests that have been issued against this table */ diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALSource.java index 7dc27f609730..b179b91d8a38 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALSource.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.regionserver.wal; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.metrics.BaseSource; import org.apache.yetus.audience.InterfaceAudience; @@ -79,7 +80,7 @@ public interface MetricsWALSource extends BaseSource { /** * Add the append size. */ - void incrementAppendSize(long size); + void incrementAppendSize(TableName tableName, long size); /** * Add the time it took to append. @@ -89,7 +90,7 @@ public interface MetricsWALSource extends BaseSource { /** * Increment the count of wal appends */ - void incrementAppendCount(); + void incrementAppendCount(TableName tableName); /** * Increment the number of appends that were slow @@ -114,6 +115,4 @@ public interface MetricsWALSource extends BaseSource { void incrementSizeLogRoll(); void incrementWrittenBytes(long val); - - long getWrittenBytes(); } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALSourceImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALSourceImpl.java index eb605c503628..d308913f6e45 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALSourceImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALSourceImpl.java @@ -18,6 +18,9 @@ package org.apache.hadoop.hbase.regionserver.wal; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.metrics.BaseSourceImpl; import org.apache.hadoop.metrics2.MetricHistogram; import org.apache.hadoop.metrics2.lib.MutableFastCounter; @@ -43,6 +46,9 @@ public class MetricsWALSourceImpl extends BaseSourceImpl implements MetricsWALSo private final MutableFastCounter slowSyncRollRequested; private final MutableFastCounter sizeRollRequested; private final MutableFastCounter writtenBytes; + // Per table metrics. + private final ConcurrentMap perTableAppendCount; + private final ConcurrentMap perTableAppendSize; public MetricsWALSourceImpl() { this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT); @@ -72,11 +78,23 @@ public MetricsWALSourceImpl(String metricsName, sizeRollRequested = this.getMetricsRegistry() .newCounter(SIZE_ROLL_REQUESTED, SIZE_ROLL_REQUESTED_DESC, 0L); writtenBytes = this.getMetricsRegistry().newCounter(WRITTEN_BYTES, WRITTEN_BYTES_DESC, 0L); + perTableAppendCount = new ConcurrentHashMap<>(); + perTableAppendSize = new ConcurrentHashMap<>(); } @Override - public void incrementAppendSize(long size) { + public void incrementAppendSize(TableName tableName, long size) { appendSizeHisto.add(size); + MutableFastCounter tableAppendSizeCounter = perTableAppendSize.get(tableName); + if (tableAppendSizeCounter == null) { + // Ideally putIfAbsent is atomic and we don't need a branch check but we still do it to avoid + // expensive string construction for every append. + String metricsKey = String.format("%s.%s", tableName, APPEND_SIZE); + perTableAppendSize.putIfAbsent( + tableName, getMetricsRegistry().newCounter(metricsKey, APPEND_SIZE_DESC, 0L)); + tableAppendSizeCounter = perTableAppendSize.get(tableName); + } + tableAppendSizeCounter.incr(size); } @Override @@ -85,8 +103,16 @@ public void incrementAppendTime(long time) { } @Override - public void incrementAppendCount() { + public void incrementAppendCount(TableName tableName) { appendCount.incr(); + MutableFastCounter tableAppendCounter = perTableAppendCount.get(tableName); + if (tableAppendCounter == null) { + String metricsKey = String.format("%s.%s", tableName, APPEND_COUNT); + perTableAppendCount.putIfAbsent( + tableName, getMetricsRegistry().newCounter(metricsKey, APPEND_COUNT_DESC, 0L)); + tableAppendCounter = perTableAppendCount.get(tableName); + } + tableAppendCounter.incr(); } @Override @@ -133,10 +159,4 @@ public long getSlowAppendCount() { public void incrementWrittenBytes(long val) { writtenBytes.incr(val); } - - @Override - public long getWrittenBytes() { - return writtenBytes.value(); - } - } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/impl/JmxCacheBuster.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/impl/JmxCacheBuster.java index fbcd9fc7eaf8..88b491ba3ea1 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/impl/JmxCacheBuster.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/impl/JmxCacheBuster.java @@ -30,8 +30,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; - /** * JMX caches the beans that have been exported; even after the values are removed from hadoop's * metrics system the keys and old values will still remain. This class stops and restarts the @@ -75,7 +73,6 @@ public static void clearJmxCache() { * Stops the clearing of JMX metrics and restarting the Hadoop metrics system. This is needed for * some test environments where we manually inject sources or sinks dynamically. */ - @VisibleForTesting public static void stop() { stopped.set(true); ScheduledFuture future = fut.get(); @@ -86,7 +83,6 @@ public static void stop() { * Restarts the stopped service. * @see #stop() */ - @VisibleForTesting public static void restart() { stopped.set(false); } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/util/MetricSampleQuantiles.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/util/MetricSampleQuantiles.java index 8d075589d4af..c1880f8203ba 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/util/MetricSampleQuantiles.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/util/MetricSampleQuantiles.java @@ -27,8 +27,6 @@ import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; - /** * Implementation of the Cormode, Korn, Muthukrishnan, and Srivastava algorithm * for streaming calculation of targeted high-percentile epsilon-approximate @@ -257,7 +255,6 @@ synchronized public long getCount() { * * @return count current number of samples */ - @VisibleForTesting synchronized public int getSampleCount() { return samples.size(); } diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperStub.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperStub.java index 9a860a041d45..dbdc92da8ac4 100644 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperStub.java +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperStub.java @@ -116,14 +116,14 @@ public long getCpRequestsCount(String table) { @Override public Map getMemstoreOnlyRowReadsCount(String table) { Map map = new HashMap(); - map.put("table_info", 3L); + map.put("table#info", 3L); return map; } @Override public Map getMixedRowReadsCount(String table) { Map map = new HashMap(); - map.put("table_info", 3L); + map.put("table#info", 3L); return map; } } diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/TestRecord.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/TestRecord.java index da0d917a826e..339cc40847d3 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/TestRecord.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/TestRecord.java @@ -19,7 +19,7 @@ import static org.apache.hadoop.hbase.hbtop.Record.entry; import static org.hamcrest.CoreMatchers.is; -import static org.junit.Assert.assertThat; +import static org.hamcrest.MatcherAssert.assertThat; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.hbtop.field.Field; diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/TestRecordFilter.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/TestRecordFilter.java index 9dec51e0ce8a..2807fd8ef61e 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/TestRecordFilter.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/TestRecordFilter.java @@ -21,7 +21,7 @@ import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.CoreMatchers.notNullValue; import static org.hamcrest.CoreMatchers.nullValue; -import static org.junit.Assert.assertThat; +import static org.hamcrest.MatcherAssert.assertThat; import java.util.ArrayList; import java.util.Arrays; diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/TestUtils.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/TestUtils.java index 905e4c8fd7a2..c633e37825ea 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/TestUtils.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/TestUtils.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hbase.hbtop; import static org.hamcrest.CoreMatchers.is; -import static org.junit.Assert.assertThat; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.fail; import java.text.ParseException; @@ -27,7 +27,6 @@ import java.util.HashMap; import java.util.List; import java.util.Map; - import org.apache.commons.lang3.time.FastDateFormat; import org.apache.hadoop.hbase.ClusterMetrics; import org.apache.hadoop.hbase.ClusterMetricsBuilder; diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/field/TestFieldValue.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/field/TestFieldValue.java index beb0ee8075d4..dcbdb6b9b8ab 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/field/TestFieldValue.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/field/TestFieldValue.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hbase.hbtop.field; import static org.hamcrest.CoreMatchers.is; -import static org.junit.Assert.assertThat; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.fail; import org.apache.hadoop.hbase.HBaseClassTestRule; diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestClientMode.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestClientMode.java index 106cfe4af47b..4f0864838532 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestClientMode.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestClientMode.java @@ -18,11 +18,10 @@ package org.apache.hadoop.hbase.hbtop.mode; import static org.hamcrest.CoreMatchers.is; -import static org.junit.Assert.assertThat; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.fail; import java.util.List; - import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.hbtop.Record; import org.apache.hadoop.hbase.hbtop.TestUtils; diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestNamespaceMode.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestNamespaceMode.java index 04fd03d1663d..6c498e94eb1d 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestNamespaceMode.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestNamespaceMode.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hbase.hbtop.mode; import static org.hamcrest.CoreMatchers.is; -import static org.junit.Assert.assertThat; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.fail; import java.util.List; diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestRegionMode.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestRegionMode.java index ed397f6adc66..b705531475f3 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestRegionMode.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestRegionMode.java @@ -19,7 +19,7 @@ import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.CoreMatchers.nullValue; -import static org.junit.Assert.assertThat; +import static org.hamcrest.MatcherAssert.assertThat; import java.util.List; import org.apache.hadoop.hbase.HBaseClassTestRule; diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestRegionServerMode.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestRegionServerMode.java index ec29fd38f0a1..cbfc7283fc64 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestRegionServerMode.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestRegionServerMode.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hbase.hbtop.mode; import static org.hamcrest.CoreMatchers.is; -import static org.junit.Assert.assertThat; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.fail; import java.util.List; diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestRequestCountPerSecond.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestRequestCountPerSecond.java index 722aa2db03ad..a73d54ea6bb9 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestRequestCountPerSecond.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestRequestCountPerSecond.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hbase.hbtop.mode; import static org.hamcrest.CoreMatchers.is; -import static org.junit.Assert.assertThat; +import static org.hamcrest.MatcherAssert.assertThat; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.testclassification.SmallTests; diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestTableMode.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestTableMode.java index 6889639f4584..f718304671c4 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestTableMode.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestTableMode.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hbase.hbtop.mode; import static org.hamcrest.CoreMatchers.is; -import static org.junit.Assert.assertThat; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.fail; import java.util.List; diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestUserMode.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestUserMode.java index 92ca7767936e..f094c85f5481 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestUserMode.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestUserMode.java @@ -18,11 +18,10 @@ package org.apache.hadoop.hbase.hbtop.mode; import static org.hamcrest.CoreMatchers.is; -import static org.junit.Assert.assertThat; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.fail; import java.util.List; - import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.hbtop.Record; import org.apache.hadoop.hbase.hbtop.TestUtils; diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/field/TestFieldScreenPresenter.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/field/TestFieldScreenPresenter.java index 2e2931fd1c17..cbf740430b0a 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/field/TestFieldScreenPresenter.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/field/TestFieldScreenPresenter.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hbase.hbtop.screen.field; import static org.hamcrest.CoreMatchers.is; -import static org.junit.Assert.assertThat; +import static org.hamcrest.MatcherAssert.assertThat; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyBoolean; import static org.mockito.ArgumentMatchers.anyInt; diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/help/TestHelpScreenPresenter.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/help/TestHelpScreenPresenter.java index 0f7b4e3d063e..245bf615e731 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/help/TestHelpScreenPresenter.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/help/TestHelpScreenPresenter.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hbase.hbtop.screen.help; import static org.hamcrest.CoreMatchers.is; -import static org.junit.Assert.assertThat; +import static org.hamcrest.MatcherAssert.assertThat; import static org.mockito.ArgumentMatchers.argThat; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.verify; diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/mode/TestModeScreenPresenter.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/mode/TestModeScreenPresenter.java index e6c75b5737dc..1b7e12a6240f 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/mode/TestModeScreenPresenter.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/mode/TestModeScreenPresenter.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hbase.hbtop.screen.mode; import static org.hamcrest.CoreMatchers.is; -import static org.junit.Assert.assertThat; +import static org.hamcrest.MatcherAssert.assertThat; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.never; diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestFilterDisplayModeScreenPresenter.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestFilterDisplayModeScreenPresenter.java index 99c29c92d131..414b5b0702c5 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestFilterDisplayModeScreenPresenter.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestFilterDisplayModeScreenPresenter.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hbase.hbtop.screen.top; import static org.hamcrest.CoreMatchers.is; -import static org.junit.Assert.assertThat; +import static org.hamcrest.MatcherAssert.assertThat; import static org.mockito.ArgumentMatchers.argThat; import static org.mockito.Mockito.verify; diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestInputModeScreenPresenter.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestInputModeScreenPresenter.java index a5357cc303ed..b5e9bb9f3ba6 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestInputModeScreenPresenter.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestInputModeScreenPresenter.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hbase.hbtop.screen.top; import static org.hamcrest.CoreMatchers.is; -import static org.junit.Assert.assertThat; +import static org.hamcrest.MatcherAssert.assertThat; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.inOrder; diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestMessageModeScreenPresenter.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestMessageModeScreenPresenter.java index d4507597579f..0acd79c56d2d 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestMessageModeScreenPresenter.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestMessageModeScreenPresenter.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hbase.hbtop.screen.top; import static org.hamcrest.CoreMatchers.is; -import static org.junit.Assert.assertThat; +import static org.hamcrest.MatcherAssert.assertThat; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.verify; diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestPaging.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestPaging.java index 7cba9f6aef36..e0c09dfe1673 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestPaging.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestPaging.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hbase.hbtop.screen.top; import static org.hamcrest.CoreMatchers.is; -import static org.junit.Assert.assertThat; +import static org.hamcrest.MatcherAssert.assertThat; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.testclassification.SmallTests; diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestTopScreenModel.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestTopScreenModel.java index 85b901048954..44a8878407a0 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestTopScreenModel.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestTopScreenModel.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hbase.hbtop.screen.top; import static org.hamcrest.CoreMatchers.is; -import static org.junit.Assert.assertThat; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertTrue; import static org.mockito.Mockito.when; diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java index 8a47ca99ea3c..41844e755438 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java @@ -67,7 +67,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.com.google.common.collect.Lists; import org.apache.hbase.thirdparty.org.eclipse.jetty.http.HttpVersion; @@ -174,7 +173,6 @@ private ListenerInfo(boolean isManaged, ServerConnector listener) { private final List listeners = Lists.newArrayList(); - @VisibleForTesting public List getServerConnectors() { return listeners.stream().map(info -> info.listener).collect(Collectors.toList()); } @@ -414,6 +412,7 @@ public HttpServer build() throws IOException { httpConfig.setHeaderCacheSize(DEFAULT_MAX_HEADER_SIZE); httpConfig.setResponseHeaderSize(DEFAULT_MAX_HEADER_SIZE); httpConfig.setRequestHeaderSize(DEFAULT_MAX_HEADER_SIZE); + httpConfig.setSendServerVersion(false); if ("http".equals(scheme)) { listener = new ServerConnector(server.webServer, new HttpConnectionFactory(httpConfig)); @@ -1121,7 +1120,6 @@ private void loadListeners() { * Open the main listener for the server * @throws Exception if the listener cannot be opened or the appropriate port is already in use */ - @VisibleForTesting void openListeners() throws Exception { for (ListenerInfo li : listeners) { ServerConnector listener = li.listener; diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProxyUserAuthenticationFilter.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProxyUserAuthenticationFilter.java index 5fb17c97cf53..182a4e10996d 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProxyUserAuthenticationFilter.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProxyUserAuthenticationFilter.java @@ -150,6 +150,25 @@ static boolean containsUpperCase(final Iterable strings) { return false; } + /** + * The purpose of this function is to get the doAs parameter of a http request + * case insensitively + * @param request + * @return doAs parameter if exists or null otherwise + */ + public static String getDoasFromHeader(final HttpServletRequest request) { + String doas = null; + final Enumeration headers = request.getHeaderNames(); + while (headers.hasMoreElements()){ + String header = headers.nextElement(); + if (header.toLowerCase().equals("doas")){ + doas = request.getHeader(header); + break; + } + } + return doas; + } + public static HttpServletRequest toLowerCase( final HttpServletRequest request) { @SuppressWarnings("unchecked") diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/log/LogLevel.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/log/LogLevel.java index dede1f9d8fdf..1fcfa1390c2c 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/log/LogLevel.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/log/LogLevel.java @@ -24,6 +24,7 @@ import java.io.PrintWriter; import java.net.URL; import java.net.URLConnection; +import java.nio.charset.StandardCharsets; import java.util.Objects; import java.util.regex.Pattern; import javax.net.ssl.HttpsURLConnection; @@ -47,9 +48,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; -import org.apache.hbase.thirdparty.com.google.common.base.Charsets; - /** * Change log level in runtime. */ @@ -89,7 +87,6 @@ public static boolean isValidProtocol(String protocol) { protocol.equals(PROTOCOL_HTTPS))); } - @VisibleForTesting static class CLI extends Configured implements Tool { private Operations operation = Operations.UNKNOWN; private String protocol; @@ -289,7 +286,7 @@ private void process(String urlString) throws Exception { // read from the servlet try (InputStreamReader streamReader = - new InputStreamReader(connection.getInputStream(), Charsets.UTF_8); + new InputStreamReader(connection.getInputStream(), StandardCharsets.UTF_8); BufferedReader bufferedReader = new BufferedReader(streamReader)) { bufferedReader.lines().filter(Objects::nonNull).filter(line -> line.startsWith(MARKER)) .forEach(line -> System.out.println(TAG.matcher(line).replaceAll(""))); diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestSecurityHeadersFilter.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestSecurityHeadersFilter.java index 41a1235baaf4..6b9d2c341ed7 100644 --- a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestSecurityHeadersFilter.java +++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestSecurityHeadersFilter.java @@ -22,7 +22,8 @@ import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.CoreMatchers.not; -import static org.junit.Assert.assertThat; +import static org.hamcrest.MatcherAssert.assertThat; + import java.io.IOException; import java.net.HttpURLConnection; import java.net.URL; diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/ssl/KeyStoreTestUtil.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/ssl/KeyStoreTestUtil.java index c8abd9c6cebc..c201c7a52328 100644 --- a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/ssl/KeyStoreTestUtil.java +++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/ssl/KeyStoreTestUtil.java @@ -101,7 +101,12 @@ public static KeyPair generateKeyPair(String algorithm) private static KeyStore createEmptyKeyStore() throws GeneralSecurityException, IOException { - KeyStore ks = KeyStore.getInstance("JKS"); + return createEmptyKeyStore("jks"); + } + + private static KeyStore createEmptyKeyStore(String keyStoreType) + throws GeneralSecurityException, IOException { + KeyStore ks = KeyStore.getInstance(keyStoreType); ks.load(null, null); // initialize return ks; } @@ -117,18 +122,29 @@ private static void saveKeyStore(KeyStore ks, String filename, } } + /** + * Creates a keystore with a single key and saves it to a file. + * This method will use the same password for the keystore and for the key. + * This method will always generate a keystore file in JKS format. + * + * @param filename String file to save + * @param password String store password to set on keystore + * @param alias String alias to use for the key + * @param privateKey Key to save in keystore + * @param cert Certificate to use as certificate chain associated to key + * @throws GeneralSecurityException for any error with the security APIs + * @throws IOException if there is an I/O error saving the file + */ public static void createKeyStore(String filename, String password, String alias, Key privateKey, Certificate cert) throws GeneralSecurityException, IOException { - KeyStore ks = createEmptyKeyStore(); - ks.setKeyEntry(alias, privateKey, password.toCharArray(), - new Certificate[]{cert}); - saveKeyStore(ks, filename, password); + createKeyStore(filename, password, password, alias, privateKey, cert); } /** * Creates a keystore with a single key and saves it to a file. + * This method will always generate a keystore file in JKS format. * * @param filename String file to save * @param password String store password to set on keystore @@ -143,17 +159,66 @@ public static void createKeyStore(String filename, String password, String keyPassword, String alias, Key privateKey, Certificate cert) throws GeneralSecurityException, IOException { - KeyStore ks = createEmptyKeyStore(); + createKeyStore(filename, password, keyPassword, alias, privateKey, cert, "JKS"); + } + + + /** + * Creates a keystore with a single key and saves it to a file. + * + * @param filename String file to save + * @param password String store password to set on keystore + * @param keyPassword String key password to set on key + * @param alias String alias to use for the key + * @param privateKey Key to save in keystore + * @param cert Certificate to use as certificate chain associated to key + * @param keystoreType String keystore file type (e.g. "JKS") + * @throws GeneralSecurityException for any error with the security APIs + * @throws IOException if there is an I/O error saving the file + */ + public static void createKeyStore(String filename, String password, String keyPassword, + String alias, Key privateKey, Certificate cert, + String keystoreType) + throws GeneralSecurityException, IOException { + KeyStore ks = createEmptyKeyStore(keystoreType); ks.setKeyEntry(alias, privateKey, keyPassword.toCharArray(), new Certificate[]{cert}); saveKeyStore(ks, filename, password); } + /** + * Creates a truststore with a single certificate and saves it to a file. + * This method uses the default JKS truststore type. + * + * @param filename String file to save + * @param password String store password to set on truststore + * @param alias String alias to use for the certificate + * @param cert Certificate to add + * @throws GeneralSecurityException for any error with the security APIs + * @throws IOException if there is an I/O error saving the file + */ public static void createTrustStore(String filename, String password, String alias, Certificate cert) throws GeneralSecurityException, IOException { - KeyStore ks = createEmptyKeyStore(); + createTrustStore(filename, password, alias, cert, "JKS"); + } + + /** + * Creates a truststore with a single certificate and saves it to a file. + * + * @param filename String file to save + * @param password String store password to set on truststore + * @param alias String alias to use for the certificate + * @param cert Certificate to add + * @param trustStoreType String keystore file type (e.g. "JKS") + * @throws GeneralSecurityException for any error with the security APIs + * @throws IOException if there is an I/O error saving the file + */ + public static void createTrustStore(String filename, String password, String alias, + Certificate cert, String trustStoreType) + throws GeneralSecurityException, IOException { + KeyStore ks = createEmptyKeyStore(trustStoreType); ks.setCertificateEntry(alias, cert); saveKeyStore(ks, filename, password); } diff --git a/hbase-it/src/main/java/org/apache/hadoop/hbase/chaos/ChaosAgent.java b/hbase-it/src/main/java/org/apache/hadoop/hbase/chaos/ChaosAgent.java new file mode 100644 index 000000000000..430b46efaadc --- /dev/null +++ b/hbase-it/src/main/java/org/apache/hadoop/hbase/chaos/ChaosAgent.java @@ -0,0 +1,591 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.chaos; + +import java.io.Closeable; +import java.io.File; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.concurrent.atomic.AtomicBoolean; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.util.Pair; +import org.apache.hadoop.hbase.util.RetryCounter; +import org.apache.hadoop.hbase.util.RetryCounterFactory; +import org.apache.hadoop.util.Shell; +import org.apache.yetus.audience.InterfaceAudience; +import org.apache.zookeeper.AsyncCallback; +import org.apache.zookeeper.CreateMode; +import org.apache.zookeeper.KeeperException; +import org.apache.zookeeper.WatchedEvent; +import org.apache.zookeeper.Watcher; +import org.apache.zookeeper.ZooDefs; +import org.apache.zookeeper.ZooKeeper; +import org.apache.zookeeper.data.Stat; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/*** + * An agent for executing destructive actions for ChaosMonkey. + * Uses ZooKeeper Watchers and LocalShell, to do the killing + * and getting status of service on targeted host without SSH. + * uses given ZNode Structure: + * /perfChaosTest (root) + * | + * | + * /chaosAgents (Used for registration has + * hostname ephemeral nodes as children) + * | + * | + * /chaosAgentTaskStatus (Used for task + * Execution, has hostname persistent + * nodes as child with tasks as their children) + * | + * | + * /hostname + * | + * | + * /task0000001 (command as data) + * (has two types of command : + * 1: starts with "exec" + * for executing a destructive action. + * 2: starts with "bool" for getting + * only status of service. + * + */ +@InterfaceAudience.Private +public class ChaosAgent implements Watcher, Closeable, Runnable { + + private static final Logger LOG = LoggerFactory.getLogger(ChaosAgent.class); + static AtomicBoolean stopChaosAgent = new AtomicBoolean(); + private ZooKeeper zk; + private String quorum; + private String agentName; + private Configuration conf; + private RetryCounterFactory retryCounterFactory; + private volatile boolean connected = false; + + public ChaosAgent(Configuration conf, String quorum, String agentName) { + initChaosAgent(conf, quorum, agentName); + } + + /*** + * sets global params and initiates connection with ZooKeeper then does registration. + * @param conf initial configuration to use + * @param quorum ZK Quorum + * @param agentName AgentName to use + */ + private void initChaosAgent(Configuration conf, String quorum, String agentName) { + this.conf = conf; + this.quorum = quorum; + this.agentName = agentName; + this.retryCounterFactory = new RetryCounterFactory(new RetryCounter.RetryConfig() + .setMaxAttempts(conf.getInt(ChaosConstants.RETRY_ATTEMPTS_KEY, + ChaosConstants.DEFAULT_RETRY_ATTEMPTS)).setSleepInterval( + conf.getLong(ChaosConstants.RETRY_SLEEP_INTERVAL_KEY, + ChaosConstants.DEFAULT_RETRY_SLEEP_INTERVAL))); + try { + this.createZKConnection(null); + this.register(); + } catch (IOException e) { + LOG.error("Error Creating Connection: " + e); + } + } + + /*** + * Creates Connection with ZooKeeper. + * @throws IOException if something goes wrong + */ + private void createZKConnection(Watcher watcher) throws IOException { + if(watcher == null) { + zk = new ZooKeeper(quorum, ChaosConstants.SESSION_TIMEOUT_ZK, this); + } else { + zk = new ZooKeeper(quorum, ChaosConstants.SESSION_TIMEOUT_ZK, watcher); + } + LOG.info("ZooKeeper Connection created for ChaosAgent: " + agentName); + } + + //WATCHERS: Below are the Watches used by ChaosAgent + + /*** + * Watcher for notifying if any task is assigned to agent or not, + * by seeking if any Node is being added to agent as Child. + */ + Watcher newTaskCreatedWatcher = new Watcher() { + @Override + public void process(WatchedEvent watchedEvent) { + if (watchedEvent.getType() == Event.EventType.NodeChildrenChanged) { + if (!(ChaosConstants.CHAOS_AGENT_STATUS_PERSISTENT_ZNODE + + ChaosConstants.ZNODE_PATH_SEPARATOR + agentName).equals(watchedEvent.getPath())) { + throw new RuntimeException(KeeperException.create( + KeeperException.Code.DATAINCONSISTENCY)); + } + + LOG.info("Change in Tasks Node, checking for Tasks again."); + getTasks(); + } + + } + }; + + //CALLBACKS: Below are the Callbacks used by Chaos Agent + + /** + * Callback used while setting status of a given task, Logs given status. + */ + AsyncCallback.StatCallback setStatusOfTaskZNodeCallback = (rc, path, ctx, stat) -> { + switch (KeeperException.Code.get(rc)) { + case CONNECTIONLOSS: + // Connection to the server was lost while setting status setting again. + try { + recreateZKConnection(); + } catch (Exception e) { + break; + } + setStatusOfTaskZNode(path, (String) ctx); + break; + + case OK: + LOG.info("Status of Task has been set"); + break; + + case NONODE: + LOG.error("Chaos Agent status node does not exists: " + + "check for ZNode directory structure again."); + break; + + default: + LOG.error("Error while setting status of task ZNode: " + + path, KeeperException.create(KeeperException.Code.get(rc), path)); + } + }; + + /** + * Callback used while creating a Persistent ZNode tries to create + * ZNode again if Connection was lost in previous try. + */ + AsyncCallback.StringCallback createZNodeCallback = (rc, path, ctx, name) -> { + switch (KeeperException.Code.get(rc)) { + case CONNECTIONLOSS: + try { + recreateZKConnection(); + } catch (Exception e) { + break; + } + createZNode(path, (byte[]) ctx); + break; + case OK: + LOG.info("ZNode created : " + path); + break; + case NODEEXISTS: + LOG.warn("ZNode already registered: " + path); + break; + default: + LOG.error("Error occurred while creating Persistent ZNode: " + path, + KeeperException.create(KeeperException.Code.get(rc), path)); + } + }; + + /** + * Callback used while creating a Ephemeral ZNode tries to create ZNode again + * if Connection was lost in previous try. + */ + AsyncCallback.StringCallback createEphemeralZNodeCallback = (rc, path, ctx, name) -> { + switch (KeeperException.Code.get(rc)) { + case CONNECTIONLOSS: + try { + recreateZKConnection(); + } catch (Exception e) { + break; + } + createEphemeralZNode(path, (byte[]) ctx); + break; + case OK: + LOG.info("ZNode created : " + path); + break; + case NODEEXISTS: + LOG.warn("ZNode already registered: " + path); + break; + default: + LOG.error("Error occurred while creating Ephemeral ZNode: ", + KeeperException.create(KeeperException.Code.get(rc), path)); + } + }; + + /** + * Callback used by getTasksForAgentCallback while getting command, + * after getting command successfully, it executes command and + * set its status with respect to the command type. + */ + AsyncCallback.DataCallback getTaskForExecutionCallback = new AsyncCallback.DataCallback() { + @Override + public void processResult(int rc, String path, Object ctx, byte[] data, Stat stat) { + switch (KeeperException.Code.get(rc)) { + case CONNECTIONLOSS: + //Connection to the server has been lost while getting task, getting data again. + try { + recreateZKConnection(); + } catch (Exception e) { + break; + } + zk.getData(path, + false, + getTaskForExecutionCallback, + new String(data)); + break; + case OK: + String cmd = new String(data); + LOG.info("Executing command : " + cmd); + String status = ChaosConstants.TASK_COMPLETION_STRING; + try { + String user = conf.get(ChaosConstants.CHAOSAGENT_SHELL_USER, + ChaosConstants.DEFAULT_SHELL_USER); + switch (cmd.substring(0, 4)) { + case "bool": + String ret = execWithRetries(user, cmd.substring(4)).getSecond(); + status = Boolean.toString(ret.length() > 0); + break; + + case "exec": + execWithRetries(user, cmd.substring(4)); + break; + + default: + LOG.error("Unknown Command Type"); + status = ChaosConstants.TASK_ERROR_STRING; + } + } catch (IOException e) { + LOG.error("Got error while executing command : " + cmd + + " On agent : " + agentName + " Error : " + e); + status = ChaosConstants.TASK_ERROR_STRING; + } + + try { + setStatusOfTaskZNode(path, status); + Thread.sleep(ChaosConstants.SET_STATUS_SLEEP_TIME); + } catch (InterruptedException e) { + LOG.error("Error occured after setting status: " + e); + } + + default: + LOG.error("Error occurred while getting data", + KeeperException.create(KeeperException.Code.get(rc), path)); + } + } + }; + + /*** + * Callback used while getting Tasks for agent if call executed without Exception, + * It creates a separate thread for each children to execute given Tasks parallely. + */ + AsyncCallback.ChildrenCallback getTasksForAgentCallback = new AsyncCallback.ChildrenCallback() { + @Override + public void processResult(int rc, String path, Object ctx, List children) { + switch (KeeperException.Code.get(rc)) { + case CONNECTIONLOSS: { + // Connection to the server has been lost, getting tasks again. + try { + recreateZKConnection(); + } catch (Exception e) { + break; + } + getTasks(); + break; + } + + case OK: { + if (children != null) { + try { + + LOG.info("Executing each task as a separate thread"); + List tasksList = new ArrayList<>(); + for (String task : children) { + String threadName = agentName + "_" + task; + Thread t = new Thread(() -> { + + LOG.info("Executing task : " + task + " of agent : " + agentName); + zk.getData(ChaosConstants.CHAOS_AGENT_STATUS_PERSISTENT_ZNODE + + ChaosConstants.ZNODE_PATH_SEPARATOR + agentName + + ChaosConstants.ZNODE_PATH_SEPARATOR + task, + false, + getTaskForExecutionCallback, + task); + + }); + t.setName(threadName); + t.start(); + tasksList.add(t); + + for (Thread thread : tasksList) { + thread.join(); + } + } + } catch (InterruptedException e) { + LOG.error("Error scheduling next task : " + + " for agent : " + agentName + " Error : " + e); + } + } + break; + } + + default: + LOG.error("Error occurred while getting task", + KeeperException.create(KeeperException.Code.get(rc), path)); + } + } + }; + + /*** + * Function to create PERSISTENT ZNODE with given path and data given as params + * @param path Path at which ZNode to create + * @param data Data to put under ZNode + */ + public void createZNode(String path, byte[] data) { + zk.create(path, + data, + ZooDefs.Ids.OPEN_ACL_UNSAFE, + CreateMode.PERSISTENT, + createZNodeCallback, + data); + } + + /*** + * Function to create EPHEMERAL ZNODE with given path and data as params. + * @param path Path at which Ephemeral ZNode to create + * @param data Data to put under ZNode + */ + public void createEphemeralZNode(String path, byte[] data) { + zk.create(path, + data, + ZooDefs.Ids.OPEN_ACL_UNSAFE, + CreateMode.EPHEMERAL, + createEphemeralZNodeCallback, + data); + } + + /** + * Checks if given ZNode exists, if not creates a PERSISTENT ZNODE for same. + * + * @param path Path to check for ZNode + */ + private void createIfZNodeNotExists(String path) { + try { + if (zk.exists(path, + false) == null) { + createZNode(path, new byte[0]); + } + } catch (KeeperException | InterruptedException e) { + LOG.error("Error checking given node : " + path + " " + e); + } + } + + /** + * sets given Status for Task Znode + * + * @param taskZNode ZNode to set status + * @param status Status value + */ + public void setStatusOfTaskZNode(String taskZNode, String status) { + LOG.info("Setting status of Task ZNode: " + taskZNode + " status : " + status); + zk.setData(taskZNode, + status.getBytes(), + -1, + setStatusOfTaskZNodeCallback, + null); + } + + /** + * registration of ChaosAgent by checking and creating necessary ZNodes. + */ + private void register() { + createIfZNodeNotExists(ChaosConstants.CHAOS_TEST_ROOT_ZNODE); + createIfZNodeNotExists(ChaosConstants.CHAOS_AGENT_REGISTRATION_EPIMERAL_ZNODE); + createIfZNodeNotExists(ChaosConstants.CHAOS_AGENT_STATUS_PERSISTENT_ZNODE); + createIfZNodeNotExists(ChaosConstants.CHAOS_AGENT_STATUS_PERSISTENT_ZNODE + + ChaosConstants.ZNODE_PATH_SEPARATOR + agentName); + + createEphemeralZNode(ChaosConstants.CHAOS_AGENT_REGISTRATION_EPIMERAL_ZNODE + + ChaosConstants.ZNODE_PATH_SEPARATOR + agentName, new byte[0]); + } + + /*** + * Gets tasks for execution, basically sets Watch on it's respective host's Znode and + * waits for tasks to be assigned, also has a getTasksForAgentCallback + * which handles execution of task. + */ + private void getTasks() { + LOG.info("Getting Tasks for Agent: " + agentName + "and setting watch for new Tasks"); + zk.getChildren(ChaosConstants.CHAOS_AGENT_STATUS_PERSISTENT_ZNODE + + ChaosConstants.ZNODE_PATH_SEPARATOR + agentName, + newTaskCreatedWatcher, + getTasksForAgentCallback, + null); + } + + /** + * Below function executes command with retries with given user. + * Uses LocalShell to execute a command. + * + * @param user user name, default none + * @param cmd Command to execute + * @return A pair of Exit Code and Shell output + * @throws IOException Exception while executing shell command + */ + private Pair execWithRetries(String user, String cmd) throws IOException { + RetryCounter retryCounter = retryCounterFactory.create(); + while (true) { + try { + return exec(user, cmd); + } catch (IOException e) { + retryOrThrow(retryCounter, e, user, cmd); + } + try { + retryCounter.sleepUntilNextRetry(); + } catch (InterruptedException e) { + LOG.warn("Sleep Interrupted: " + e); + } + } + } + + private Pair exec(String user, String cmd) throws IOException { + LOG.info("Executing Shell command: " + cmd + " , user: " + user); + + LocalShell shell = new LocalShell(user, cmd); + try { + shell.execute(); + } catch (Shell.ExitCodeException e) { + String output = shell.getOutput(); + throw new Shell.ExitCodeException(e.getExitCode(), "stderr: " + e.getMessage() + + ", stdout: " + output); + } + LOG.info("Executed Shell command, exit code: {}, output n{}", shell.getExitCode(), shell.getOutput()); + + return new Pair<>(shell.getExitCode(), shell.getOutput()); + } + + private void retryOrThrow(RetryCounter retryCounter, E ex, + String user, String cmd) throws E { + if (retryCounter.shouldRetry()) { + LOG.warn("Local command: {}, user: {}, failed at attempt {}. Retrying until maxAttempts: {}." + + "Exception {}", cmd, user,retryCounter.getAttemptTimes(), retryCounter.getMaxAttempts(), + ex.getMessage()); + return; + } + throw ex; + } + + private boolean isConnected() { + return connected; + } + + @Override + public void close() throws IOException { + LOG.info("Closing ZooKeeper Connection for Chaos Agent : " + agentName); + try { + zk.close(); + } catch (InterruptedException e) { + LOG.error("Error while closing ZooKeeper Connection."); + } + } + + @Override + public void run() { + try { + LOG.info("Running Chaos Agent on : " + agentName); + while (!this.isConnected()) { + Thread.sleep(100); + } + this.getTasks(); + while (!stopChaosAgent.get()) { + Thread.sleep(500); + } + } catch (InterruptedException e) { + LOG.error("Error while running Chaos Agent", e); + } + + } + + @Override + public void process(WatchedEvent watchedEvent) { + LOG.info("Processing event: " + watchedEvent.toString()); + if (watchedEvent.getType() == Event.EventType.None) { + switch (watchedEvent.getState()) { + case SyncConnected: + connected = true; + break; + case Disconnected: + connected = false; + break; + case Expired: + connected = false; + LOG.error("Session expired creating again"); + try { + createZKConnection(null); + } catch (IOException e) { + LOG.error("Error creating Zookeeper connection", e); + } + default: + LOG.error("Unknown State"); + break; + } + } + } + + private void recreateZKConnection() throws Exception{ + try { + zk.close(); + createZKConnection(newTaskCreatedWatcher); + createEphemeralZNode(ChaosConstants.CHAOS_AGENT_REGISTRATION_EPIMERAL_ZNODE + + ChaosConstants.ZNODE_PATH_SEPARATOR + agentName, new byte[0]); + } catch (IOException e) { + LOG.error("Error creating new ZK COnnection for agent: {}", agentName + e); + throw e; + } + } + + /** + * Executes Command locally. + */ + protected static class LocalShell extends Shell.ShellCommandExecutor { + + private String user; + private String execCommand; + + public LocalShell(String user, String execCommand) { + super(new String[]{execCommand}); + this.user = user; + this.execCommand = execCommand; + } + + @Override + public String[] getExecString() { + // TODO: Considering Agent is running with same user. + if(!user.equals(ChaosConstants.DEFAULT_SHELL_USER)){ + execCommand = String.format("su -u %1$s %2$s", user, execCommand); + } + return new String[]{"/usr/bin/env", "bash", "-c", execCommand}; + } + + @Override + public void execute() throws IOException { + super.execute(); + } + } +} diff --git a/hbase-it/src/main/java/org/apache/hadoop/hbase/chaos/ChaosConstants.java b/hbase-it/src/main/java/org/apache/hadoop/hbase/chaos/ChaosConstants.java new file mode 100644 index 000000000000..54fbe9b10cde --- /dev/null +++ b/hbase-it/src/main/java/org/apache/hadoop/hbase/chaos/ChaosConstants.java @@ -0,0 +1,77 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.chaos; + +import org.apache.yetus.audience.InterfaceAudience; + +/*** + * ChaosConstant holds a bunch of Choas-related Constants + */ +@InterfaceAudience.Public +public final class ChaosConstants { + + /*Base ZNode for whole Chaos Testing*/ + public static final String CHAOS_TEST_ROOT_ZNODE = "/hbase"; + + /*Just a / used for path separator*/ + public static final String ZNODE_PATH_SEPARATOR = "/"; + + /*ZNode used for ChaosAgents registration.*/ + public static final String CHAOS_AGENT_REGISTRATION_EPIMERAL_ZNODE = + CHAOS_TEST_ROOT_ZNODE + ZNODE_PATH_SEPARATOR + "chaosAgents"; + + /*ZNode used for getting status of tasks assigned*/ + public static final String CHAOS_AGENT_STATUS_PERSISTENT_ZNODE = + CHAOS_TEST_ROOT_ZNODE + ZNODE_PATH_SEPARATOR + "chaosAgentTaskStatus"; + + /*Config property for getting number of retries to execute a command*/ + public static final String RETRY_ATTEMPTS_KEY = "hbase.it.clustermanager.retry.attempts"; + + /*Default value for number of retries*/ + public static final int DEFAULT_RETRY_ATTEMPTS = 5; + + /*Config property to sleep in between retries*/ + public static final String RETRY_SLEEP_INTERVAL_KEY = + "hbase.it.clustermanager.retry.sleep.interval"; + + /*Default Sleep time between each retry*/ + public static final int DEFAULT_RETRY_SLEEP_INTERVAL = 5000; + + /*Config property for executing command as specific user*/ + public static final String CHAOSAGENT_SHELL_USER = "hbase.it.clustermanager.ssh.user"; + + /*default user for executing local commands*/ + public static final String DEFAULT_SHELL_USER = ""; + + /*timeout used while creating ZooKeeper connection*/ + public static final int SESSION_TIMEOUT_ZK = 60000 * 10; + + /*Time given to ChaosAgent to set status*/ + public static final int SET_STATUS_SLEEP_TIME = 30 * 1000; + + /*Status String when you get an ERROR while executing task*/ + public static final String TASK_ERROR_STRING = "error"; + + /*Status String when your command gets executed correctly*/ + public static final String TASK_COMPLETION_STRING = "done"; + + /*Name of ChoreService to use*/ + public static final String CHORE_SERVICE_PREFIX = "ChaosService"; + +} diff --git a/hbase-it/src/main/java/org/apache/hadoop/hbase/chaos/ChaosService.java b/hbase-it/src/main/java/org/apache/hadoop/hbase/chaos/ChaosService.java new file mode 100644 index 000000000000..e2abe3d42655 --- /dev/null +++ b/hbase-it/src/main/java/org/apache/hadoop/hbase/chaos/ChaosService.java @@ -0,0 +1,138 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.chaos; + +import java.net.UnknownHostException; +import java.util.Arrays; +import java.util.Collection; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.AuthUtil; +import org.apache.hadoop.hbase.ChoreService; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.ScheduledChore; +import org.apache.hadoop.util.GenericOptionsParser; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine; +import org.apache.hbase.thirdparty.org.apache.commons.cli.GnuParser; +import org.apache.hbase.thirdparty.org.apache.commons.cli.Option; +import org.apache.hbase.thirdparty.org.apache.commons.cli.Options; + +/** + * Class used to start/stop Chaos related services (currently chaosagent) + */ +@InterfaceAudience.Private +public class ChaosService { + + private static final Logger LOG = LoggerFactory.getLogger(ChaosService.class.getName()); + + public static void execute(String[] args, Configuration conf) { + LOG.info("arguments : " + Arrays.toString(args)); + + try { + CommandLine cmdline = new GnuParser().parse(getOptions(), args); + if (cmdline.hasOption(ChaosServiceName.CHAOSAGENT.toString().toLowerCase())) { + String actionStr = cmdline.getOptionValue(ChaosServiceName.CHAOSAGENT.toString().toLowerCase()); + try { + ExecutorAction action = ExecutorAction.valueOf(actionStr.toUpperCase()); + if (action == ExecutorAction.START) { + ChaosServiceStart(conf, ChaosServiceName.CHAOSAGENT); + } else if (action == ExecutorAction.STOP) { + ChaosServiceStop(); + } + } catch (IllegalArgumentException e) { + LOG.error("action passed: {} Unexpected action. Please provide only start/stop.", + actionStr, e); + throw new RuntimeException(e); + } + } else { + LOG.error("Invalid Options"); + } + } catch (Exception e) { + LOG.error("Error while starting ChaosService : ", e); + } + } + + private static void ChaosServiceStart(Configuration conf, ChaosServiceName serviceName) { + switch (serviceName) { + case CHAOSAGENT: + ChaosAgent.stopChaosAgent.set(false); + try { + Thread t = new Thread(new ChaosAgent(conf, + ChaosUtils.getZKQuorum(conf), ChaosUtils.getHostName())); + t.start(); + t.join(); + } catch (InterruptedException | UnknownHostException e) { + LOG.error("Failed while executing next task execution of ChaosAgent on : {}", + serviceName, e); + } + break; + default: + LOG.error("Service Name not known : " + serviceName.toString()); + } + } + + private static void ChaosServiceStop() { + ChaosAgent.stopChaosAgent.set(true); + } + + private static Options getOptions() { + Options options = new Options(); + options.addOption(new Option("c", ChaosServiceName.CHAOSAGENT.toString().toLowerCase(), + true, "expecting a start/stop argument")); + options.addOption(new Option("D", ChaosServiceName.GENERIC.toString(), + true, "generic D param")); + LOG.info(Arrays.toString(new Collection[] { options.getOptions() })); + return options; + } + + public static void main(String[] args) throws Exception { + Configuration conf = HBaseConfiguration.create(); + new GenericOptionsParser(conf, args); + + ChoreService choreChaosService = null; + ScheduledChore authChore = AuthUtil.getAuthChore(conf); + + try { + if (authChore != null) { + choreChaosService = new ChoreService(ChaosConstants.CHORE_SERVICE_PREFIX); + choreChaosService.scheduleChore(authChore); + } + + execute(args, conf); + } finally { + if (authChore != null) + choreChaosService.shutdown(); + } + } + + enum ChaosServiceName { + CHAOSAGENT, + GENERIC + } + + + enum ExecutorAction { + START, + STOP + } +} diff --git a/hbase-it/src/main/java/org/apache/hadoop/hbase/chaos/ChaosUtils.java b/hbase-it/src/main/java/org/apache/hadoop/hbase/chaos/ChaosUtils.java new file mode 100644 index 000000000000..da42021bcafb --- /dev/null +++ b/hbase-it/src/main/java/org/apache/hadoop/hbase/chaos/ChaosUtils.java @@ -0,0 +1,49 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.chaos; + +import java.net.InetAddress; +import java.net.UnknownHostException; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HConstants; +import org.apache.yetus.audience.InterfaceAudience; + +/** + * ChaosUtils holds a bunch of useful functions like getting hostname and getting ZooKeeper quorum. + */ +@InterfaceAudience.Private +public class ChaosUtils { + + public static String getHostName() throws UnknownHostException { + return InetAddress.getLocalHost().getHostName(); + } + + + public static String getZKQuorum(Configuration conf) { + String port = + Integer.toString(conf.getInt(HConstants.ZOOKEEPER_CLIENT_PORT, 2181)); + String[] serverHosts = conf.getStrings(HConstants.ZOOKEEPER_QUORUM, "localhost"); + for (int i = 0; i < serverHosts.length; i++) { + serverHosts[i] = serverHosts[i] + ":" + port; + } + return String.join(",", serverHosts); + } + +} diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/ChaosZKClient.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/ChaosZKClient.java new file mode 100644 index 000000000000..31fb9e3ca604 --- /dev/null +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/ChaosZKClient.java @@ -0,0 +1,332 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase; + +import java.io.IOException; + +import org.apache.hadoop.hbase.util.Threads; +import org.apache.yetus.audience.InterfaceAudience; +import org.apache.zookeeper.AsyncCallback; +import org.apache.zookeeper.CreateMode; +import org.apache.zookeeper.KeeperException; +import org.apache.zookeeper.WatchedEvent; +import org.apache.zookeeper.Watcher; +import org.apache.zookeeper.ZooDefs; +import org.apache.zookeeper.ZooKeeper; +import org.apache.zookeeper.data.Stat; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@InterfaceAudience.Private +public class ChaosZKClient { + + private static final Logger LOG = LoggerFactory.getLogger(ChaosZKClient.class.getName()); + private static final String CHAOS_AGENT_PARENT_ZNODE = "/hbase/chaosAgents"; + private static final String CHAOS_AGENT_STATUS_ZNODE = "/hbase/chaosAgentTaskStatus"; + private static final String ZNODE_PATH_SEPARATOR = "/"; + private static final String TASK_PREFIX = "task_"; + private static final String TASK_ERROR_STRING = "error"; + private static final String TASK_COMPLETION_STRING = "done"; + private static final String TASK_BOOLEAN_TRUE = "true"; + private static final String TASK_BOOLEAN_FALSE = "false"; + private static final String CONNECTION_LOSS = "ConnectionLoss"; + private static final int SESSION_TIMEOUT_ZK = 10 * 60 * 1000; + private static final int TASK_EXECUTION_TIMEOUT = 5 * 60 * 1000; + private volatile String taskStatus = null; + + private final String quorum; + private ZooKeeper zk; + + public ChaosZKClient(String quorum) { + this.quorum = quorum; + try { + this.createNewZKConnection(); + } catch (IOException e) { + LOG.error("Error creating ZooKeeper Connection: ", e); + } + } + + /** + * Creates connection with ZooKeeper + * @throws IOException when not able to create connection properly + */ + public void createNewZKConnection() throws IOException { + Watcher watcher = new Watcher() { + @Override + public void process(WatchedEvent watchedEvent) { + LOG.info("Created ZooKeeper Connection For executing task"); + } + }; + + this.zk = new ZooKeeper(quorum, SESSION_TIMEOUT_ZK, watcher); + } + + /** + * Checks if ChaosAgent is running or not on target host by checking its ZNode. + * @param hostname hostname to check for chaosagent + * @return true/false whether agent is running or not + */ + private boolean isChaosAgentRunning(String hostname) { + try { + return zk.exists(CHAOS_AGENT_PARENT_ZNODE + ZNODE_PATH_SEPARATOR + hostname, + false) != null; + } catch (KeeperException e) { + if (e.toString().contains(CONNECTION_LOSS)) { + recreateZKConnection(); + try { + return zk.exists(CHAOS_AGENT_PARENT_ZNODE + ZNODE_PATH_SEPARATOR + hostname, + false) != null; + } catch (KeeperException | InterruptedException ie) { + LOG.error("ERROR ", ie); + } + } + } catch (InterruptedException e) { + LOG.error("Error checking for given hostname: {} ERROR: ", hostname, e); + } + return false; + } + + /** + * Creates tasks for target hosts by creating ZNodes. + * Waits for a limited amount of time to complete task to execute. + * @param taskObject Object data represents command + * @return returns status + */ + public String submitTask(final TaskObject taskObject) { + if (isChaosAgentRunning(taskObject.getTaskHostname())) { + LOG.info("Creating task node"); + zk.create(CHAOS_AGENT_STATUS_ZNODE + ZNODE_PATH_SEPARATOR + + taskObject.getTaskHostname() + ZNODE_PATH_SEPARATOR + TASK_PREFIX, + taskObject.getCommand().getBytes(), + ZooDefs.Ids.OPEN_ACL_UNSAFE, + CreateMode.EPHEMERAL_SEQUENTIAL, + submitTaskCallback, + taskObject); + long start = System.currentTimeMillis(); + + while ((System.currentTimeMillis() - start) < TASK_EXECUTION_TIMEOUT) { + if(taskStatus != null) { + return taskStatus; + } + Threads.sleep(500); + } + } else { + LOG.info("EHHHHH! ChaosAgent Not running"); + } + return TASK_ERROR_STRING; + } + + /** + * To get status of task submitted + * @param path path at which to get status + * @param ctx path context + */ + private void getStatus(String path , Object ctx) { + LOG.info("Getting Status of task: " + path); + zk.getData(path, + false, + getStatusCallback, + ctx); + } + + /** + * Set a watch on task submitted + * @param name ZNode name to set a watch + * @param taskObject context for ZNode name + */ + private void setStatusWatch(String name, TaskObject taskObject) { + LOG.info("Checking for ZNode and Setting watch for task : " + name); + zk.exists(name, + setStatusWatcher, + setStatusWatchCallback, + taskObject); + } + + /** + * Delete task after getting its status + * @param path path to delete ZNode + */ + private void deleteTask(String path) { + LOG.info("Deleting task: " + path); + zk.delete(path, + -1, + taskDeleteCallback, + null); + } + + //WATCHERS: + + /** + * Watcher to get notification whenever status of task changes. + */ + Watcher setStatusWatcher = new Watcher() { + @Override + public void process(WatchedEvent watchedEvent) { + LOG.info("Setting status watch for task: " + watchedEvent.getPath()); + if(watchedEvent.getType() == Event.EventType.NodeDataChanged) { + if(!watchedEvent.getPath().contains(TASK_PREFIX)) { + throw new RuntimeException(KeeperException.create( + KeeperException.Code.DATAINCONSISTENCY)); + } + getStatus(watchedEvent.getPath(), (Object) watchedEvent.getPath()); + + } + } + }; + + //CALLBACKS + + AsyncCallback.DataCallback getStatusCallback = (rc, path, ctx, data, stat) -> { + switch (KeeperException.Code.get(rc)) { + case CONNECTIONLOSS: + //Connectionloss while getting status of task, getting again + recreateZKConnection(); + getStatus(path, ctx); + break; + + case OK: + if (ctx!=null) { + + String status = new String(data); + taskStatus = status; + switch (status) { + case TASK_COMPLETION_STRING: + case TASK_BOOLEAN_TRUE: + case TASK_BOOLEAN_FALSE: + LOG.info("Task executed completely : Status --> " + status); + break; + + case TASK_ERROR_STRING: + LOG.info("There was error while executing task : Status --> " + status); + break; + + default: + LOG.warn("Status of task is undefined!! : Status --> " + status); + } + + deleteTask(path); + } + break; + + default: + LOG.error("ERROR while getting status of task: " + path + " ERROR: " + + KeeperException.create(KeeperException.Code.get(rc))); + } + }; + + AsyncCallback.StatCallback setStatusWatchCallback = (rc, path, ctx, stat) -> { + switch (KeeperException.Code.get(rc)) { + case CONNECTIONLOSS: + //ConnectionLoss while setting watch on status ZNode, setting again. + recreateZKConnection(); + setStatusWatch(path, (TaskObject) ctx); + break; + + case OK: + if(stat != null) { + getStatus(path, null); + } + break; + + default: + LOG.error("ERROR while setting watch on task ZNode: " + path + " ERROR: " + + KeeperException.create(KeeperException.Code.get(rc))); + } + }; + + AsyncCallback.StringCallback submitTaskCallback = (rc, path, ctx, name) -> { + switch (KeeperException.Code.get(rc)) { + case CONNECTIONLOSS: + // Connection to server was lost while submitting task, submitting again. + recreateZKConnection(); + submitTask((TaskObject) ctx); + break; + + case OK: + LOG.info("Task created : " + name); + setStatusWatch(name, (TaskObject) ctx); + break; + + default: + LOG.error("Error submitting task: " + name + " ERROR:" + + KeeperException.create(KeeperException.Code.get(rc))); + } + }; + + AsyncCallback.VoidCallback taskDeleteCallback = new AsyncCallback.VoidCallback() { + @Override + public void processResult(int rc, String path, Object ctx) { + switch (KeeperException.Code.get(rc)) { + case CONNECTIONLOSS: + //Connectionloss while deleting task, deleting again + recreateZKConnection(); + deleteTask(path); + break; + + case OK: + LOG.info("Task Deleted successfully!"); + LOG.info("Closing ZooKeeper Connection"); + try { + zk.close(); + } catch (InterruptedException e) { + LOG.error("Error while closing ZooKeeper Connection."); + } + break; + + default: + LOG.error("ERROR while deleting task: " + path + " ERROR: " + + KeeperException.create(KeeperException.Code.get(rc))); + } + } + }; + + + private void recreateZKConnection() { + try { + zk.close(); + } catch (InterruptedException e) { + LOG.error("Error closing ZK connection : ", e); + } finally { + try { + createNewZKConnection(); + } catch (IOException e) { + LOG.error("Error creating new ZK COnnection for agent: ", e); + } + } + } + + static class TaskObject { + private final String command; + private final String taskHostname; + + public TaskObject(String command, String taskHostname) { + this.command = command; + this.taskHostname = taskHostname; + } + + public String getCommand() { + return this.command; + } + + public String getTaskHostname() { + return taskHostname; + } + } + +} diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestMetaReplicas.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestMetaReplicas.java index d906bfd8420c..05e203607f53 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestMetaReplicas.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestMetaReplicas.java @@ -53,11 +53,11 @@ public static void setUp() throws Exception { if (util == null) { util = new IntegrationTestingUtility(); } - util.getConfiguration().setInt(HConstants.META_REPLICAS_NUM, 3); util.getConfiguration().setInt( StorefileRefresherChore.REGIONSERVER_STOREFILE_REFRESH_PERIOD, 1000); // Make sure there are three servers. util.initializeCluster(3); + HBaseTestingUtility.setReplicas(util.getAdmin(), TableName.META_TABLE_NAME, 3); ZKWatcher zkw = util.getZooKeeperWatcher(); Configuration conf = util.getConfiguration(); String baseZNode = conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT, diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/ZNodeClusterManager.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/ZNodeClusterManager.java new file mode 100644 index 000000000000..88f14b0d0d34 --- /dev/null +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/ZNodeClusterManager.java @@ -0,0 +1,120 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase; + +import java.io.IOException; +import java.util.Arrays; +import java.util.stream.Collectors; + +import org.apache.hadoop.conf.Configured; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@InterfaceAudience.Private +public class ZNodeClusterManager extends Configured implements ClusterManager { + private static final Logger LOG = LoggerFactory.getLogger(ZNodeClusterManager.class.getName()); + private static final String SIGKILL = "SIGKILL"; + private static final String SIGSTOP = "SIGSTOP"; + private static final String SIGCONT = "SIGCONT"; + public ZNodeClusterManager() { + } + + private String getZKQuorumServersStringFromHbaseConfig() { + String port = + Integer.toString(getConf().getInt(HConstants.ZOOKEEPER_CLIENT_PORT, 2181)); + String[] serverHosts = getConf().getStrings(HConstants.ZOOKEEPER_QUORUM, "localhost"); + for (int i = 0; i < serverHosts.length; i++) { + serverHosts[i] = serverHosts[i] + ":" + port; + } + return Arrays.asList(serverHosts).stream().collect(Collectors.joining(",")); + } + + private String createZNode(String hostname, String cmd) throws IOException{ + LOG.info("Zookeeper Mode enabled sending command to zookeeper + " + + cmd + "hostname:" + hostname); + ChaosZKClient chaosZKClient = new ChaosZKClient(getZKQuorumServersStringFromHbaseConfig()); + return chaosZKClient.submitTask(new ChaosZKClient.TaskObject(cmd, hostname)); + } + + protected HBaseClusterManager.CommandProvider getCommandProvider(ServiceType service) + throws IOException { + switch (service) { + case HADOOP_DATANODE: + case HADOOP_NAMENODE: + return new HBaseClusterManager.HadoopShellCommandProvider(getConf()); + case ZOOKEEPER_SERVER: + return new HBaseClusterManager.ZookeeperShellCommandProvider(getConf()); + default: + return new HBaseClusterManager.HBaseShellCommandProvider(getConf()); + } + } + + public void signal(ServiceType service, String signal, String hostname) throws IOException { + createZNode(hostname, CmdType.exec.toString() + + getCommandProvider(service).signalCommand(service, signal)); + } + + private void createOpCommand(String hostname, ServiceType service, + HBaseClusterManager.CommandProvider.Operation op) throws IOException{ + createZNode(hostname, CmdType.exec.toString() + + getCommandProvider(service).getCommand(service, op)); + } + + @Override + public void start(ServiceType service, String hostname, int port) throws IOException { + createOpCommand(hostname, service, HBaseClusterManager.CommandProvider.Operation.START); + } + + @Override + public void stop(ServiceType service, String hostname, int port) throws IOException { + createOpCommand(hostname, service, HBaseClusterManager.CommandProvider.Operation.STOP); + } + + @Override + public void restart(ServiceType service, String hostname, int port) throws IOException { + createOpCommand(hostname, service, HBaseClusterManager.CommandProvider.Operation.RESTART); + } + + @Override + public void kill(ServiceType service, String hostname, int port) throws IOException { + signal(service, SIGKILL, hostname); + } + + @Override + public void suspend(ServiceType service, String hostname, int port) throws IOException { + signal(service, SIGSTOP, hostname); + } + + @Override + public void resume(ServiceType service, String hostname, int port) throws IOException { + signal(service, SIGCONT, hostname); + } + + @Override + public boolean isRunning(ServiceType service, String hostname, int port) throws IOException { + return Boolean.parseBoolean(createZNode(hostname, CmdType.bool.toString() + + getCommandProvider(service).isRunningCommand(service))); + } + + enum CmdType { + exec, + bool + } +} diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestImportTsv.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestImportTsv.java index c80d61c4ea66..28b4ae467dda 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestImportTsv.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestImportTsv.java @@ -22,6 +22,7 @@ import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; +import java.io.File; import java.io.IOException; import java.util.Arrays; import java.util.HashMap; @@ -29,6 +30,7 @@ import java.util.Map; import java.util.Set; import java.util.TreeSet; +import java.util.UUID; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.fs.FileSystem; @@ -66,6 +68,8 @@ public class IntegrationTestImportTsv extends Configured implements Tool { private static final String NAME = IntegrationTestImportTsv.class.getSimpleName(); private static final Logger LOG = LoggerFactory.getLogger(IntegrationTestImportTsv.class); + private static final String GENERATED_HFILE_FOLDER_PARAM_KEY = + "IntegrationTestImportTsv.generatedHFileFolder"; protected static final String simple_tsv = "row1\t1\tc1\tc2\n" + @@ -190,8 +194,8 @@ public void testGenerateAndLoad() throws Exception { void generateAndLoad(final TableName table) throws Exception { LOG.info("Running test testGenerateAndLoad."); String cf = "d"; - Path hfiles = new Path( - util.getDataTestDirOnTestFS(table.getNameAsString()), "hfiles"); + Path hfiles = initGeneratedHFilePath(table); + LOG.info("The folder where the HFiles will be generated: {}", hfiles.toString()); Map args = new HashMap<>(); args.put(ImportTsv.BULK_OUTPUT_CONF_KEY, hfiles.toString()); @@ -220,6 +224,12 @@ public int run(String[] args) throws Exception { System.err.println(format("%s [genericOptions]", NAME)); System.err.println(" Runs ImportTsv integration tests against a distributed cluster."); System.err.println(); + System.err.println(" Use '-D" + GENERATED_HFILE_FOLDER_PARAM_KEY + "=' to define a"); + System.err.println(" base folder for the generated HFiles. If HDFS Transparent Encryption"); + System.err.println(" is configured, then make sure to set this parameter to a folder in"); + System.err.println(" the same encryption zone in HDFS as the HBase root directory,"); + System.err.println(" otherwise the bulkload will fail."); + System.err.println(); ToolRunner.printGenericCommandUsage(System.err); return 1; } @@ -237,6 +247,28 @@ public int run(String[] args) throws Exception { return 0; } + private Path initGeneratedHFilePath(final TableName table) throws IOException { + String folderParam = getConf().getTrimmed(GENERATED_HFILE_FOLDER_PARAM_KEY); + if (folderParam == null || folderParam.isEmpty()) { + // by default, fall back to the test data dir + return new Path(util.getDataTestDirOnTestFS(table.getNameAsString()), "hfiles"); + } + + Path hfiles = new Path(folderParam, UUID.randomUUID().toString()); + FileSystem fs = util.getTestFileSystem(); + String shouldPreserve = System.getProperty("hbase.testing.preserve.testdir", "false"); + if (!Boolean.parseBoolean(shouldPreserve)) { + if (fs.getUri().getScheme().equals(FileSystem.getLocal(getConf()).getUri().getScheme())) { + File localFoler = new File(hfiles.toString()); + localFoler.deleteOnExit(); + } else { + fs.deleteOnExit(hfiles); + } + } + return hfiles; + } + + public static void main(String[] args) throws Exception { Configuration conf = HBaseConfiguration.create(); IntegrationTestingUtility.setUseDistributedCluster(conf); diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java index d15a9d650526..d9d8cbad39af 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java @@ -233,8 +233,7 @@ private static void setupTables() throws IOException { } // Create the table. If this fails then fail everything. - TableDescriptor tableDescriptor = util.getAdmin().getDescriptor(tableName); - TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableDescriptor); + TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName); // Make the max file size huge so that splits don't happen during the test. builder.setMaxFileSize(Long.MAX_VALUE); diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/Driver.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/Driver.java index da907cbff774..0484fbbf239a 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/Driver.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/Driver.java @@ -23,8 +23,6 @@ import org.apache.yetus.audience.InterfaceStability; import org.apache.hadoop.util.ProgramDriver; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; - /** * Driver for hbase mapreduce jobs. Select which to run by passing name of job * to this main. @@ -35,7 +33,7 @@ public class Driver { private static ProgramDriver pgd = new ProgramDriver(); - @VisibleForTesting + @InterfaceAudience.Private static void setProgramDriver(ProgramDriver pgd0) { pgd = pgd0; } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Driver.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Driver.java index d52a31067f42..ed31c8422e7e 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Driver.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Driver.java @@ -1,4 +1,4 @@ -/** +/* * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file @@ -34,6 +34,7 @@ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) @InterfaceStability.Stable public class Driver { + private Driver() {} public static void main(String[] args) throws Throwable { ProgramDriver pgd = new ProgramDriver(); diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ExportUtils.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ExportUtils.java index a4f092b71797..568c47fd6e53 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ExportUtils.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ExportUtils.java @@ -37,7 +37,6 @@ import org.apache.hadoop.hbase.filter.RegexStringComparator; import org.apache.hadoop.hbase.filter.RowFilter; import org.apache.hadoop.hbase.security.visibility.Authorizations; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Triple; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; @@ -106,7 +105,6 @@ public static Triple getArgumentsFromCommandLine( return new Triple<>(TableName.valueOf(args[0]), getScanFromCommandLine(conf, args), new Path(args[1])); } - @VisibleForTesting static Scan getScanFromCommandLine(Configuration conf, String[] args) throws IOException { Scan s = new Scan(); // Optional arguments. diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java index 08752c192b60..ee6d5331f3f6 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java @@ -70,6 +70,7 @@ import org.apache.hadoop.hbase.regionserver.BloomType; import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.regionserver.StoreFileWriter; +import org.apache.hadoop.hbase.regionserver.StoreUtils; import org.apache.hadoop.hbase.util.BloomFilterUtil; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CommonFSUtils; @@ -90,8 +91,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; - /** * Writes HFiles. Passed Cells must arrive in order. * Writes current time as the sequence id for the file. Sets the major compacted @@ -371,8 +370,8 @@ private WriterLength getNewWriter(byte[] tableName, byte[] family, Configuration encoding = encoding == null ? datablockEncodingMap.get(tableAndFamily) : encoding; encoding = encoding == null ? DataBlockEncoding.NONE : encoding; HFileContextBuilder contextBuilder = new HFileContextBuilder().withCompression(compression) - .withDataBlockEncoding(encoding).withChecksumType(HStore.getChecksumType(conf)) - .withBytesPerCheckSum(HStore.getBytesPerChecksum(conf)).withBlockSize(blockSize) + .withDataBlockEncoding(encoding).withChecksumType(StoreUtils.getChecksumType(conf)) + .withBytesPerCheckSum(StoreUtils.getBytesPerChecksum(conf)).withBlockSize(blockSize) .withColumnFamily(family).withTableName(tableName); if (HFile.getFormatVersion(conf) >= HFile.MIN_FORMAT_VERSION_WITH_TAGS) { @@ -665,7 +664,7 @@ public static void configureIncrementalLoadMap(Job job, TableDescriptor tableDes * @param conf to read the serialized values from * @return a map from column family to the configured compression algorithm */ - @VisibleForTesting + @InterfaceAudience.Private static Map createFamilyCompressionMap(Configuration conf) { Map stringMap = createFamilyConfValueMap(conf, @@ -685,7 +684,7 @@ static Map createFamilyCompressionMap(Configuration * @param conf to read the serialized values from * @return a map from column family to the the configured bloom filter type */ - @VisibleForTesting + @InterfaceAudience.Private static Map createFamilyBloomTypeMap(Configuration conf) { Map stringMap = createFamilyConfValueMap(conf, BLOOM_TYPE_FAMILIES_CONF_KEY); @@ -704,12 +703,11 @@ static Map createFamilyBloomTypeMap(Configuration conf) { * @param conf to read the serialized values from * @return a map from column family to the the configured bloom filter param */ - @VisibleForTesting + @InterfaceAudience.Private static Map createFamilyBloomParamMap(Configuration conf) { return createFamilyConfValueMap(conf, BLOOM_PARAM_FAMILIES_CONF_KEY); } - /** * Runs inside the task to deserialize column family to block size * map from the configuration. @@ -717,7 +715,7 @@ static Map createFamilyBloomParamMap(Configuration conf) { * @param conf to read the serialized values from * @return a map from column family to the configured block size */ - @VisibleForTesting + @InterfaceAudience.Private static Map createFamilyBlockSizeMap(Configuration conf) { Map stringMap = createFamilyConfValueMap(conf, BLOCK_SIZE_FAMILIES_CONF_KEY); @@ -737,7 +735,7 @@ static Map createFamilyBlockSizeMap(Configuration conf) { * @return a map from column family to HFileDataBlockEncoder for the * configured data block type for the family */ - @VisibleForTesting + @InterfaceAudience.Private static Map createFamilyDataBlockEncodingMap( Configuration conf) { Map stringMap = createFamilyConfValueMap(conf, @@ -749,7 +747,6 @@ static Map createFamilyDataBlockEncodingMap( return encoderMap; } - /** * Run inside the task to deserialize column family to given conf value map. * @@ -802,7 +799,7 @@ static void configurePartitioner(Job job, List splitPoin @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "RCN_REDUNDANT_NULLCHECK_OF_NONNULL_VALUE") - @VisibleForTesting + @InterfaceAudience.Private static String serializeColumnFamilyAttribute(Function fn, List allTables) throws UnsupportedEncodingException { @@ -833,7 +830,7 @@ static String serializeColumnFamilyAttribute(Function compressionDetails = familyDescriptor -> familyDescriptor.getCompressionType().getName(); @@ -841,7 +838,7 @@ static String serializeColumnFamilyAttribute(Function blockSizeDetails = familyDescriptor -> String .valueOf(familyDescriptor.getBlocksize()); @@ -849,7 +846,7 @@ static String serializeColumnFamilyAttribute(Function bloomTypeDetails = familyDescriptor -> { String bloomType = familyDescriptor.getBloomFilterType().toString(); if (bloomType == null) { @@ -862,7 +859,7 @@ static String serializeColumnFamilyAttribute(Function bloomParamDetails = familyDescriptor -> { BloomType bloomType = familyDescriptor.getBloomFilterType(); String bloomParam = ""; @@ -876,7 +873,7 @@ static String serializeColumnFamilyAttribute(Function dataBlockEncodingDetails = familyDescriptor -> { DataBlockEncoding encoding = familyDescriptor.getDataBlockEncoding(); if (encoding == null) { diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java index 239a12bdc688..30071fdfd809 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java @@ -45,6 +45,7 @@ import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.ZooKeeperConnectionException; import org.apache.hadoop.hbase.util.MapReduceExtendedCell; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; @@ -511,6 +512,7 @@ private static Cell convertKv(Cell kv, Map cfRenameMap) { // If there's a rename mapping for this CF, create a new KeyValue byte[] newCfName = cfRenameMap.get(CellUtil.cloneFamily(kv)); if (newCfName != null) { + List tags = PrivateCellUtil.getTags(kv); kv = new KeyValue(kv.getRowArray(), // row buffer kv.getRowOffset(), // row offset kv.getRowLength(), // row length @@ -524,7 +526,8 @@ private static Cell convertKv(Cell kv, Map cfRenameMap) { KeyValue.Type.codeToType(kv.getTypeByte()), // KV Type kv.getValueArray(), // value buffer kv.getValueOffset(), // value offset - kv.getValueLength()); // value length + kv.getValueLength(), // value length + tags.size() == 0 ? null: tags); } } return kv; diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableHFileOutputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableHFileOutputFormat.java index cdc2f94c7409..6410bf8726c6 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableHFileOutputFormat.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableHFileOutputFormat.java @@ -29,7 +29,6 @@ import java.io.IOException; import java.nio.charset.Charset; import java.util.List; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; /** * Create 3 level tree directory, first level is using table name as parent @@ -45,7 +44,6 @@ * -columnFamilyName2 */ @InterfaceAudience.Public -@VisibleForTesting public class MultiTableHFileOutputFormat extends HFileOutputFormat2 { private static final Logger LOG = LoggerFactory.getLogger(MultiTableHFileOutputFormat.class); diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ResultSerialization.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ResultSerialization.java index dac1d425d806..9fdaa7b78f75 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ResultSerialization.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ResultSerialization.java @@ -128,7 +128,7 @@ public void close() throws IOException { @Override public Result deserialize(Result mutation) throws IOException { ClientProtos.Result proto = ClientProtos.Result.parseDelimitedFrom(in); - return ProtobufUtil.toResult(proto); + return ProtobufUtil.toResult(proto, true); } @Override @@ -152,7 +152,7 @@ public void open(OutputStream out) throws IOException { @Override public void serialize(Result result) throws IOException { - ProtobufUtil.toResult(result).writeDelimitedTo(out); + ProtobufUtil.toResult(result, true).writeDelimitedTo(out); } } } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java index e7c5bf4fb2d7..667901042234 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java @@ -26,10 +26,6 @@ import java.util.ArrayList; import java.util.HashMap; import java.util.List; - -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.TableName; @@ -52,7 +48,9 @@ import org.apache.hadoop.mapreduce.TaskAttemptContext; import org.apache.hadoop.net.DNS; import org.apache.hadoop.util.StringUtils; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * A base for {@link TableInputFormat}s. Receives a {@link Connection}, a {@link TableName}, @@ -287,7 +285,7 @@ public List getSplits(JobContext context) throws IOException { * Create one InputSplit per region * * @return The list of InputSplit for all the regions - * @throws IOException + * @throws IOException throws IOException */ private List oneInputSplitPerRegion() throws IOException { RegionSizeCalculator sizeCalculator = @@ -305,7 +303,10 @@ private List oneInputSplitPerRegion() throws IOException { } List splits = new ArrayList<>(1); long regionSize = sizeCalculator.getRegionSize(regLoc.getRegion().getRegionName()); - TableSplit split = new TableSplit(tableName, scan, + // In the table input format for single table we do not need to + // store the scan object in table split because it can be memory intensive and redundant + // information to what is already stored in conf SCAN. See HBASE-25212 + TableSplit split = new TableSplit(tableName, null, HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, regLoc .getHostnamePort().split(Addressing.HOSTNAME_PORT_SEPARATOR)[0], regionSize); splits.add(split); @@ -345,7 +346,10 @@ private List oneInputSplitPerRegion() throws IOException { byte[] regionName = location.getRegion().getRegionName(); String encodedRegionName = location.getRegion().getEncodedName(); long regionSize = sizeCalculator.getRegionSize(regionName); - TableSplit split = new TableSplit(tableName, scan, + // In the table input format for single table we do not need to + // store the scan object in table split because it can be memory intensive and redundant + // information to what is already stored in conf SCAN. See HBASE-25212 + TableSplit split = new TableSplit(tableName, null, splitStart, splitStop, regionLocation, encodedRegionName, regionSize); splits.add(split); if (LOG.isDebugEnabled()) { @@ -362,7 +366,7 @@ private List oneInputSplitPerRegion() throws IOException { * @param n Number of ranges after splitting. Pass 1 means no split for the range * Pass 2 if you want to split the range in two; * @return A list of TableSplit, the size of the list is n - * @throws IllegalArgumentIOException + * @throws IllegalArgumentIOException throws IllegalArgumentIOException */ protected List createNInputSplitsUniform(InputSplit split, int n) throws IllegalArgumentIOException { @@ -409,9 +413,12 @@ protected List createNInputSplitsUniform(InputSplit split, int n) // Split Region into n chunks evenly byte[][] splitKeys = Bytes.split(startRow, endRow, true, n-1); for (int i = 0; i < splitKeys.length - 1; i++) { + // In the table input format for single table we do not need to + // store the scan object in table split because it can be memory intensive and redundant + // information to what is already stored in conf SCAN. See HBASE-25212 //notice that the regionSize parameter may be not very accurate TableSplit tsplit = - new TableSplit(tableName, scan, splitKeys[i], splitKeys[i + 1], regionLocation, + new TableSplit(tableName, null, splitKeys[i], splitKeys[i + 1], regionLocation, encodedRegionName, regionSize / n); res.add(tsplit); } @@ -488,7 +495,10 @@ public List calculateAutoBalancedSplits(List splits, lon } } i = j - 1; - TableSplit t = new TableSplit(tableName, scan, splitStartKey, splitEndKey, regionLocation, + // In the table input format for single table we do not need to + // store the scan object in table split because it can be memory intensive and redundant + // information to what is already stored in conf SCAN. See HBASE-25212 + TableSplit t = new TableSplit(tableName, null, splitStartKey, splitEndKey, regionLocation, encodedRegionName, totalSize); resultList.add(t); } @@ -508,7 +518,9 @@ String reverseDNS(InetAddress ipAddress) throws UnknownHostException { // reverse DNS using jndi doesn't work well with ipv6 addresses. ipAddressString = InetAddress.getByName(ipAddress.getHostAddress()).getHostName(); } - if (ipAddressString == null) throw new UnknownHostException("No host found for " + ipAddress); + if (ipAddressString == null) { + throw new UnknownHostException("No host found for " + ipAddress); + } hostName = Strings.domainNamePointerToHostName(ipAddressString); this.reverseDNSCacheMap.put(ipAddress, hostName); } @@ -587,7 +599,7 @@ protected void initializeTable(Connection connection, TableName tableName) throw this.connection = connection; } - @VisibleForTesting + @InterfaceAudience.Private protected RegionSizeCalculator createRegionSizeCalculator(RegionLocator locator, Admin admin) throws IOException { return new RegionSizeCalculator(locator, admin); diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java index c84b25b0004f..81f9a7ceb43f 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java @@ -37,7 +37,6 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; /** * Iterate over an HBase table data, return (ImmutableBytesWritable, Result) @@ -51,8 +50,9 @@ public class TableRecordReaderImpl { private static final Logger LOG = LoggerFactory.getLogger(TableRecordReaderImpl.class); // HBASE_COUNTER_GROUP_NAME is the name of mapreduce counter group for HBase - @VisibleForTesting + @InterfaceAudience.Private static final String HBASE_COUNTER_GROUP_NAME = "HBaseCounters"; + private ResultScanner scanner = null; private Scan scan = null; private Scan currentScan = null; diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormat.java index 3ca6c0323688..d9c9b871a596 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormat.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormat.java @@ -40,8 +40,6 @@ import org.apache.hadoop.mapreduce.TaskAttemptContext; import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; - /** * TableSnapshotInputFormat allows a MapReduce job to run over a table snapshot. The job * bypasses HBase servers, and directly accesses the underlying files (hfile, recovered edits, @@ -131,7 +129,7 @@ TableSnapshotInputFormatImpl.InputSplit getDelegate() { } } - @VisibleForTesting + @InterfaceAudience.Private static class TableSnapshotRegionRecordReader extends RecordReader { private TableSnapshotInputFormatImpl.RecordReader delegate = diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSplit.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSplit.java index de42c31678ef..acce55e82ce8 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSplit.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSplit.java @@ -22,17 +22,16 @@ import java.io.DataOutput; import java.io.IOException; import java.util.Arrays; - -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.io.Writable; import org.apache.hadoop.io.WritableUtils; import org.apache.hadoop.mapreduce.InputSplit; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * A table split corresponds to a key range (low, high) and an optional scanner. @@ -40,7 +39,7 @@ */ @InterfaceAudience.Public public class TableSplit extends InputSplit -implements Writable, Comparable { + implements Writable, Comparable { /** @deprecated LOG variable would be made private. fix in hbase 3.0 */ @Deprecated public static final Logger LOG = LoggerFactory.getLogger(TableSplit.class); @@ -84,6 +83,16 @@ static Version fromCode(int code) { private byte [] endRow; private String regionLocation; private String encodedRegionName = ""; + + /** + * The scan object may be null but the serialized form of scan is never null + * or empty since we serialize the scan object with default values then. + * Having no scanner in TableSplit doesn't necessarily mean there is no scanner + * for mapreduce job, it just means that we do not need to set it for each split. + * For example, it is not required to have a scan object for + * {@link org.apache.hadoop.hbase.mapred.TableInputFormatBase} since we use the scan from the + * job conf and scanner is supposed to be same for all the splits of table. + */ private String scan = ""; // stores the serialized form of the Scan private long length; // Contains estimation of region size in bytes @@ -182,12 +191,23 @@ public TableSplit(TableName tableName, byte[] startRow, byte[] endRow, * Returns a Scan object from the stored string representation. * * @return Returns a Scan object based on the stored scanner. - * @throws IOException + * @throws IOException throws IOException if deserialization fails */ public Scan getScan() throws IOException { return TableMapReduceUtil.convertStringToScan(this.scan); } + /** + * Returns a scan string + * @return scan as string. Should be noted that this is not same as getScan().toString() + * because Scan object will have the default values when empty scan string is + * deserialized. Thus, getScan().toString() can never be empty + */ + @InterfaceAudience.Private + public String getScanAsString() { + return this.scan; + } + /** * Returns the table name converted to a byte array. * @see #getTable() diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALInputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALInputFormat.java index 1815412721f4..14bfec72efe8 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALInputFormat.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALInputFormat.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,24 +22,21 @@ import java.io.EOFException; import java.io.FileNotFoundException; import java.io.IOException; +import java.time.Instant; import java.util.ArrayList; import java.util.Collections; import java.util.List; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.LocatedFileStatus; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.RemoteIterator; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.wal.WALEdit; import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.wal.WAL.Entry; import org.apache.hadoop.hbase.wal.WAL.Reader; +import org.apache.hadoop.hbase.wal.WALEdit; import org.apache.hadoop.hbase.wal.WALKey; import org.apache.hadoop.io.Writable; import org.apache.hadoop.mapreduce.InputFormat; @@ -49,6 +46,9 @@ import org.apache.hadoop.mapreduce.TaskAttemptContext; import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; import org.apache.hadoop.util.StringUtils; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Simple {@link InputFormat} for {@link org.apache.hadoop.hbase.wal.WAL} files. @@ -77,10 +77,6 @@ public WALSplit() {} * Represent an WALSplit, i.e. a single WAL file. * Start- and EndTime are managed by the split, so that WAL files can be * filtered before WALEdits are passed to the mapper(s). - * @param logFileName - * @param fileSize - * @param startTime - * @param endTime */ public WALSplit(String logFileName, long fileSize, long startTime, long endTime) { this.logFileName = logFileName; @@ -154,14 +150,13 @@ public void initialize(InputSplit split, TaskAttemptContext context) WALSplit hsplit = (WALSplit)split; logFile = new Path(hsplit.getLogFileName()); conf = context.getConfiguration(); - LOG.info("Opening reader for "+split); + LOG.info("Opening {} for {}", logFile, split); openReader(logFile); this.startTime = hsplit.getStartTime(); this.endTime = hsplit.getEndTime(); } - private void openReader(Path path) throws IOException - { + private void openReader(Path path) throws IOException { closeReader(); reader = AbstractFSWALProvider.openReader(path, conf); seek(); @@ -187,7 +182,9 @@ private void seek() throws IOException { @Override public boolean nextKeyValue() throws IOException, InterruptedException { - if (reader == null) return false; + if (reader == null) { + return false; + } this.currentPos = reader.getPosition(); Entry temp; long i = -1; @@ -205,7 +202,9 @@ public boolean nextKeyValue() throws IOException, InterruptedException { } while (temp != null && temp.getKey().getWriteTime() < startTime); if (temp == null) { - if (i > 0) LOG.info("Skipped " + i + " entries."); + if (i > 0) { + LOG.info("Skipped " + i + " entries."); + } LOG.info("Reached end of file."); return false; } else if (i > 0) { @@ -243,7 +242,9 @@ public float getProgress() throws IOException, InterruptedException { @Override public void close() throws IOException { LOG.info("Closing reader"); - if (reader != null) this.reader.close(); + if (reader != null) { + this.reader.close(); + } } } @@ -302,40 +303,56 @@ private Path[] getInputPaths(Configuration conf) { inpDirs.split(conf.get(WALPlayer.INPUT_FILES_SEPARATOR_KEY, ","))); } + /** + * @param startTime If file looks like it has a timestamp in its name, we'll check if newer + * or equal to this value else we will filter out the file. If name does not + * seem to have a timestamp, we will just return it w/o filtering. + * @param endTime If file looks like it has a timestamp in its name, we'll check if older or equal + * to this value else we will filter out the file. If name does not seem to + * have a timestamp, we will just return it w/o filtering. + */ private List getFiles(FileSystem fs, Path dir, long startTime, long endTime) throws IOException { List result = new ArrayList<>(); LOG.debug("Scanning " + dir.toString() + " for WAL files"); - RemoteIterator iter = fs.listLocatedStatus(dir); - if (!iter.hasNext()) return Collections.emptyList(); + if (!iter.hasNext()) { + return Collections.emptyList(); + } while (iter.hasNext()) { LocatedFileStatus file = iter.next(); if (file.isDirectory()) { - // recurse into sub directories + // Recurse into sub directories result.addAll(getFiles(fs, file.getPath(), startTime, endTime)); } else { - String name = file.getPath().toString(); - int idx = name.lastIndexOf('.'); - if (idx > 0) { - try { - long fileStartTime = Long.parseLong(name.substring(idx+1)); - if (fileStartTime <= endTime) { - LOG.info("Found: " + file); - result.add(file); - } - } catch (NumberFormatException x) { - idx = 0; - } - } - if (idx == 0) { - LOG.warn("File " + name + " does not appear to be an WAL file. Skipping..."); - } + addFile(result, file, startTime, endTime); } } + // TODO: These results should be sorted? Results could be content of recovered.edits directory + // -- null padded increasing numeric -- or a WAL file w/ timestamp suffix or timestamp and + // then meta suffix. See AbstractFSWALProvider#WALStartTimeComparator return result; } + static void addFile(List result, LocatedFileStatus lfs, long startTime, + long endTime) { + long timestamp = AbstractFSWALProvider.getTimestamp(lfs.getPath().getName()); + if (timestamp > 0) { + // Looks like a valid timestamp. + if (timestamp <= endTime && timestamp >= startTime) { + LOG.info("Found {}", lfs.getPath()); + result.add(lfs); + } else { + LOG.info("Skipped {}, outside range [{}/{} - {}/{}]", lfs.getPath(), + startTime, Instant.ofEpochMilli(startTime), endTime, Instant.ofEpochMilli(endTime)); + } + } else { + // If no timestamp, add it regardless. + LOG.info("Found (no-timestamp!) {}", lfs); + result.add(lfs); + } + } + @Override public RecordReader createRecordReader(InputSplit split, TaskAttemptContext context) throws IOException, InterruptedException { diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java index bbaa7549fa9a..a47a12fffb5a 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,7 +26,6 @@ import java.util.Map; import java.util.Set; import java.util.TreeMap; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.fs.Path; @@ -58,6 +57,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; + + /** * A tool to replay WAL files as a M/R job. * The WAL can be replayed for a set of tables or all tables, @@ -140,7 +141,22 @@ public void setup(Context context) throws IOException { } /** - * A mapper that writes out {@link Mutation} to be directly applied to a running HBase instance. + * Enum for map metrics. Keep it out here rather than inside in the Map + * inner-class so we can find associated properties. + */ + protected static enum Counter { + /** Number of aggregated writes */ + PUTS, + /** Number of aggregated deletes */ + DELETES, + CELLS_READ, + CELLS_WRITTEN, + WALEDITS + } + + /** + * A mapper that writes out {@link Mutation} to be directly applied to + * a running HBase instance. */ protected static class WALMapper extends Mapper { @@ -148,6 +164,7 @@ protected static class WALMapper @Override public void map(WALKey key, WALEdit value, Context context) throws IOException { + context.getCounter(Counter.WALEDITS).increment(1); try { if (tables.isEmpty() || tables.containsKey(key.getTableName())) { TableName targetTable = @@ -157,6 +174,7 @@ public void map(WALKey key, WALEdit value, Context context) throws IOException { Delete del = null; Cell lastCell = null; for (Cell cell : value.getCells()) { + context.getCounter(Counter.CELLS_READ).increment(1); // Filtering WAL meta marker entries. if (WALEdit.isMetaEditFamily(cell)) { continue; @@ -172,9 +190,11 @@ public void map(WALKey key, WALEdit value, Context context) throws IOException { // row or type changed, write out aggregate KVs. if (put != null) { context.write(tableOut, put); + context.getCounter(Counter.PUTS).increment(1); } if (del != null) { context.write(tableOut, del); + context.getCounter(Counter.DELETES).increment(1); } if (CellUtil.isDelete(cell)) { del = new Delete(CellUtil.cloneRow(cell)); @@ -187,14 +207,17 @@ public void map(WALKey key, WALEdit value, Context context) throws IOException { } else { put.add(cell); } + context.getCounter(Counter.CELLS_WRITTEN).increment(1); } lastCell = cell; } // write residual KVs if (put != null) { context.write(tableOut, put); + context.getCounter(Counter.PUTS).increment(1); } if (del != null) { + context.getCounter(Counter.DELETES).increment(1); context.write(tableOut, del); } } @@ -215,6 +238,7 @@ protected boolean filter(Context context, final Cell cell) { super.cleanup(context); } + @SuppressWarnings("checkstyle:EmptyBlock") @Override public void setup(Context context) throws IOException { String[] tableMap = context.getConfiguration().getStrings(TABLE_MAP_KEY); @@ -270,7 +294,7 @@ public Job createSubmittableJob(String[] args) throws IOException { setupTime(conf, WALInputFormat.START_TIME_KEY); setupTime(conf, WALInputFormat.END_TIME_KEY); String inputDirs = args[0]; - String[] tables = args[1].split(","); + String[] tables = args.length == 1? new String [] {}: args[1].split(","); String[] tableMap; if (args.length > 2) { tableMap = args[2].split(","); @@ -278,7 +302,7 @@ public Job createSubmittableJob(String[] args) throws IOException { throw new IOException("The same number of tables and mapping must be provided."); } } else { - // if not mapping is specified map each table to itself + // if no mapping is specified, map each table to itself tableMap = tables; } conf.setStrings(TABLES_KEY, tables); @@ -349,27 +373,31 @@ private void usage(final String errorMsg) { if (errorMsg != null && errorMsg.length() > 0) { System.err.println("ERROR: " + errorMsg); } - System.err.println("Usage: " + NAME + " [options] []"); - System.err.println("Replay all WAL files into HBase."); - System.err.println(" is a comma separated list of tables."); - System.err.println("If no tables (\"\") are specified, all tables are imported."); - System.err.println("(Be careful, hbase:meta entries will be imported in this case.)\n"); - System.err.println("WAL entries can be mapped to new set of tables via ."); - System.err.println(" is a comma separated list of target tables."); - System.err.println("If specified, each table in must have a mapping.\n"); - System.err.println("By default " + NAME + " will load data directly into HBase."); - System.err.println("To generate HFiles for a bulk data load instead, pass the following option:"); - System.err.println(" -D" + BULK_OUTPUT_CONF_KEY + "=/path/for/output"); - System.err.println(" (Only one table can be specified, and no mapping is allowed!)"); - System.err.println("Time range options:"); - System.err.println(" -D" + WALInputFormat.START_TIME_KEY + "=[date|ms]"); - System.err.println(" -D" + WALInputFormat.END_TIME_KEY + "=[date|ms]"); - System.err.println(" (The start and the end date of timerange. The dates can be expressed"); - System.err.println(" in milliseconds since epoch or in yyyy-MM-dd'T'HH:mm:ss.SS format."); - System.err.println(" E.g. 1234567890120 or 2009-02-13T23:32:30.12)"); + System.err.println("Usage: " + NAME + " [options] [ ]"); + System.err.println(" directory of WALs to replay."); + System.err.println(" comma separated list of tables. If no tables specified,"); + System.err.println(" all are imported (even hbase:meta if present)."); + System.err.println(" WAL entries can be mapped to a new set of tables by " + + "passing"); + System.err.println(" , a comma separated list of target " + + "tables."); + System.err.println(" If specified, each table in must have a " + + "mapping."); + System.err.println("To generate HFiles to bulk load instead of loading HBase directly, pass:"); + System.err.println(" -D" + BULK_OUTPUT_CONF_KEY + "=/path/for/output"); + System.err.println(" Only one table can be specified, and no mapping allowed!"); + System.err.println("To specify a time range, pass:"); + System.err.println(" -D" + WALInputFormat.START_TIME_KEY + "=[date|ms]"); + System.err.println(" -D" + WALInputFormat.END_TIME_KEY + "=[date|ms]"); + System.err.println(" The start and the end date of timerange (inclusive). The dates can be"); + System.err.println(" expressed in milliseconds-since-epoch or yyyy-MM-dd'T'HH:mm:ss.SS " + + "format."); + System.err.println(" E.g. 1234567890120 or 2009-02-13T23:32:30.12"); System.err.println("Other options:"); - System.err.println(" -D" + JOB_NAME_CONF_KEY + "=jobName"); - System.err.println(" Use the specified mapreduce job name for the wal player"); + System.err.println(" -D" + JOB_NAME_CONF_KEY + "=jobName"); + System.err.println(" Use the specified mapreduce job name for the wal player"); + System.err.println(" -Dwal.input.separator=' '"); + System.err.println(" Change WAL filename separator (WAL dir names use default ','.)"); System.err.println("For performance also consider the following options:\n" + " -Dmapreduce.map.speculative=false\n" + " -Dmapreduce.reduce.speculative=false"); @@ -387,7 +415,7 @@ public static void main(String[] args) throws Exception { @Override public int run(String[] args) throws Exception { - if (args.length < 2) { + if (args.length < 1) { usage("Wrong number of arguments: " + args.length); System.exit(-1); } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java index 7b940d724ba9..baa0d1d02460 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java @@ -69,8 +69,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; - /** * This map-only job compares the data from a local table with a remote one. * Every cell is compared and must have exactly the same keys (even timestamp) @@ -517,7 +515,6 @@ private static void setStartAndStopRows(Scan scan, byte[] startPrefixRow, byte[] scan.withStopRow(stopRow); } - @VisibleForTesting public boolean doCommandLine(final String[] args) { if (args.length < 2) { printUsage(null); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java index 7ea858976aed..bea879a63ef7 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java @@ -31,6 +31,7 @@ import java.util.Arrays; import java.util.Date; import java.util.LinkedList; +import java.util.List; import java.util.Locale; import java.util.Map; import java.util.NoSuchElementException; @@ -62,6 +63,9 @@ import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Increment; import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.client.RegionInfoBuilder; +import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.RowMutations; @@ -160,17 +164,18 @@ public class PerformanceEvaluation extends Configured implements Tool { static { addCommandDescriptor(AsyncRandomReadTest.class, "asyncRandomRead", - "Run async random read test"); + "Run async random read test"); addCommandDescriptor(AsyncRandomWriteTest.class, "asyncRandomWrite", - "Run async random write test"); + "Run async random write test"); addCommandDescriptor(AsyncSequentialReadTest.class, "asyncSequentialRead", - "Run async sequential read test"); + "Run async sequential read test"); addCommandDescriptor(AsyncSequentialWriteTest.class, "asyncSequentialWrite", - "Run async sequential write test"); + "Run async sequential write test"); addCommandDescriptor(AsyncScanTest.class, "asyncScan", - "Run async scan test (read every row)"); - addCommandDescriptor(RandomReadTest.class, RANDOM_READ, - "Run random read test"); + "Run async scan test (read every row)"); + addCommandDescriptor(RandomReadTest.class, RANDOM_READ, "Run random read test"); + addCommandDescriptor(MetaRandomReadTest.class, "metaRandomRead", + "Run getRegionLocation test"); addCommandDescriptor(RandomSeekScanTest.class, RANDOM_SEEK_SCAN, "Run random seek and scan 100 test"); addCommandDescriptor(RandomScanWithRange10Test.class, "scanRange10", @@ -187,11 +192,12 @@ public class PerformanceEvaluation extends Configured implements Tool { "Run sequential read test"); addCommandDescriptor(SequentialWriteTest.class, "sequentialWrite", "Run sequential write test"); - addCommandDescriptor(ScanTest.class, "scan", - "Run scan test (read every row)"); + addCommandDescriptor(MetaWriteTest.class, "metaWrite", + "Populate meta table;used with 1 thread; to be cleaned up by cleanMeta"); + addCommandDescriptor(ScanTest.class, "scan", "Run scan test (read every row)"); addCommandDescriptor(FilteredScanTest.class, "filterScan", "Run scan test using a filter to find a specific row based on it's value " + - "(make sure to use --rows=20)"); + "(make sure to use --rows=20)"); addCommandDescriptor(IncrementTest.class, "increment", "Increment on each row; clients overlap on keyspace so some concurrent operations"); addCommandDescriptor(AppendTest.class, "append", @@ -202,6 +208,8 @@ public class PerformanceEvaluation extends Configured implements Tool { "CheckAndPut on each row; clients overlap on keyspace so some concurrent operations"); addCommandDescriptor(CheckAndDeleteTest.class, "checkAndDelete", "CheckAndDelete on each row; clients overlap on keyspace so some concurrent operations"); + addCommandDescriptor(CleanMetaTest.class, "cleanMeta", + "Remove fake region entries on meta table inserted by metaWrite; used with 1 thread"); } /** @@ -1481,6 +1489,31 @@ void onTakedown() throws IOException { } } + /* + * Parent class for all meta tests: MetaWriteTest, MetaRandomReadTest and CleanMetaTest + */ + static abstract class MetaTest extends TableTest { + protected int keyLength; + + MetaTest(Connection con, TestOptions options, Status status) { + super(con, options, status); + keyLength = Integer.toString(opts.perClientRunRows).length(); + } + + @Override + void onTakedown() throws IOException { + // No clean up + } + + /* + Generates Lexicographically ascending strings + */ + protected byte[] getSplitKey(final int i) { + return Bytes.toBytes(String.format("%0" + keyLength + "d", i)); + } + + } + static abstract class AsyncTableTest extends AsyncTest { protected AsyncTable table; @@ -1999,6 +2032,46 @@ protected void testTakedown() throws IOException { } } + /* + * Send random reads against fake regions inserted by MetaWriteTest + */ + static class MetaRandomReadTest extends MetaTest { + private RegionLocator regionLocator; + + MetaRandomReadTest(Connection con, TestOptions options, Status status) { + super(con, options, status); + LOG.info("call getRegionLocation"); + } + + @Override + void onStartup() throws IOException { + super.onStartup(); + this.regionLocator = connection.getRegionLocator(table.getName()); + } + + @Override + boolean testRow(final int i, final long startTime) throws IOException, InterruptedException { + if (opts.randomSleep > 0) { + Thread.sleep(rand.nextInt(opts.randomSleep)); + } + HRegionLocation hRegionLocation = regionLocator.getRegionLocation( + getSplitKey(rand.nextInt(opts.perClientRunRows)), true); + LOG.debug("get location for region: " + hRegionLocation); + return true; + } + + @Override + protected int getReportingPeriod() { + int period = opts.perClientRunRows / 10; + return period == 0 ? opts.perClientRunRows : period; + } + + @Override + protected void testTakedown() throws IOException { + super.testTakedown(); + } + } + static class RandomWriteTest extends SequentialWriteTest { RandomWriteTest(Connection con, TestOptions options, Status status) { super(con, options, status); @@ -2188,6 +2261,34 @@ boolean testRow(final int i, final long startTime) throws IOException { } } + /* + * Delete all fake regions inserted to meta table by MetaWriteTest. + */ + static class CleanMetaTest extends MetaTest { + CleanMetaTest(Connection con, TestOptions options, Status status) { + super(con, options, status); + } + + @Override + boolean testRow(final int i, final long startTime) throws IOException { + try { + RegionInfo regionInfo = connection.getRegionLocator(table.getName()) + .getRegionLocation(getSplitKey(i), false).getRegion(); + LOG.debug("deleting region from meta: " + regionInfo); + + Delete delete = MetaTableAccessor + .makeDeleteFromRegionInfo(regionInfo, HConstants.LATEST_TIMESTAMP); + try (Table t = MetaTableAccessor.getMetaHTable(connection)) { + t.delete(delete); + } + } catch (IOException ie) { + // Log and continue + LOG.error("cannot find region with start key: " + i); + } + return true; + } + } + static class SequentialReadTest extends TableTest { SequentialReadTest(Connection con, TestOptions options, Status status) { super(con, options, status); @@ -2277,6 +2378,32 @@ boolean testRow(final int i, final long startTime) throws IOException { } } + /* + * Insert fake regions into meta table with contiguous split keys. + */ + static class MetaWriteTest extends MetaTest { + + MetaWriteTest(Connection con, TestOptions options, Status status) { + super(con, options, status); + } + + @Override + boolean testRow(final int i, final long startTime) throws IOException { + List regionInfos = new ArrayList(); + RegionInfo regionInfo = (RegionInfoBuilder.newBuilder(TableName.valueOf(TABLE_NAME)) + .setStartKey(getSplitKey(i)) + .setEndKey(getSplitKey(i + 1)) + .build()); + regionInfos.add(regionInfo); + MetaTableAccessor.addRegionsToMeta(connection, regionInfos, 1); + + // write the serverName columns + MetaTableAccessor.updateRegionLocation(connection, + regionInfo, ServerName.valueOf("localhost", 60010, rand.nextLong()), i, + System.currentTimeMillis()); + return true; + } + } static class FilteredScanTest extends TableTest { protected static final Logger LOG = LoggerFactory.getLogger(FilteredScanTest.class.getName()); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java index 12060a742a2b..7b38c59c9387 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.hbase.mapreduce; +import static org.apache.hadoop.hbase.HConstants.RPC_CODEC_CONF_KEY; +import static org.apache.hadoop.hbase.ipc.RpcClient.DEFAULT_CODEC_CLASS; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; @@ -34,10 +36,13 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.List; +import java.util.Optional; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.ArrayBackedTag; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; @@ -46,10 +51,14 @@ import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.Result; @@ -58,11 +67,18 @@ import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableDescriptorBuilder; +import org.apache.hadoop.hbase.coprocessor.ObserverContext; +import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; +import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; +import org.apache.hadoop.hbase.coprocessor.RegionObserver; import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.filter.FilterBase; import org.apache.hadoop.hbase.filter.PrefixFilter; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.mapreduce.Import.CellImporter; +import org.apache.hadoop.hbase.regionserver.HRegion; +import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress; +import org.apache.hadoop.hbase.regionserver.RegionScanner; import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.VerySlowMapReduceTests; @@ -117,6 +133,9 @@ public class TestImportExport { private static final long now = System.currentTimeMillis(); private final TableName EXPORT_TABLE = TableName.valueOf("export_table"); private final TableName IMPORT_TABLE = TableName.valueOf("import_table"); + public static final byte TEST_TAG_TYPE = (byte) (Tag.CUSTOM_TAG_TYPE_RANGE + 1); + public static final String TEST_ATTR = "source_op"; + public static final String TEST_TAG = "test_tag"; @BeforeClass public static void beforeClass() throws Throwable { @@ -801,4 +820,207 @@ public boolean isWALVisited() { return isVisited; } } + + /** + * Add cell tags to delete mutations, run export and import tool and + * verify that tags are present in import table also. + * @throws Throwable throws Throwable. + */ + @Test + public void testTagsAddition() throws Throwable { + final TableName exportTable = TableName.valueOf(name.getMethodName()); + TableDescriptor desc = TableDescriptorBuilder + .newBuilder(exportTable) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA) + .setMaxVersions(5) + .setKeepDeletedCells(KeepDeletedCells.TRUE) + .build()) + .setCoprocessor(MetadataController.class.getName()) + .build(); + UTIL.getAdmin().createTable(desc); + + Table exportT = UTIL.getConnection().getTable(exportTable); + + //Add first version of QUAL + Put p = new Put(ROW1); + p.addColumn(FAMILYA, QUAL, now, QUAL); + exportT.put(p); + + //Add Delete family marker + Delete d = new Delete(ROW1, now+3); + // Add test attribute to delete mutation. + d.setAttribute(TEST_ATTR, Bytes.toBytes(TEST_TAG)); + exportT.delete(d); + + // Run export tool with KeyValueCodecWithTags as Codec. This will ensure that export tool + // will use KeyValueCodecWithTags. + String[] args = new String[] { + "-D" + ExportUtils.RAW_SCAN + "=true", + // This will make sure that codec will encode and decode tags in rpc call. + "-Dhbase.client.rpc.codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags", + exportTable.getNameAsString(), + FQ_OUTPUT_DIR, + "1000", // max number of key versions per key to export + }; + assertTrue(runExport(args)); + // Assert tag exists in exportTable + checkWhetherTagExists(exportTable, true); + + // Create an import table with MetadataController. + final TableName importTable = TableName.valueOf("importWithTestTagsAddition"); + TableDescriptor importTableDesc = TableDescriptorBuilder + .newBuilder(importTable) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA) + .setMaxVersions(5) + .setKeepDeletedCells(KeepDeletedCells.TRUE) + .build()) + .setCoprocessor(MetadataController.class.getName()) + .build(); + UTIL.getAdmin().createTable(importTableDesc); + + // Run import tool. + args = new String[] { + // This will make sure that codec will encode and decode tags in rpc call. + "-Dhbase.client.rpc.codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags", + importTable.getNameAsString(), + FQ_OUTPUT_DIR + }; + assertTrue(runImport(args)); + // Make sure that tags exists in imported table. + checkWhetherTagExists(importTable, true); + } + + private void checkWhetherTagExists(TableName table, boolean tagExists) throws IOException { + List values = new ArrayList<>(); + for (HRegion region : UTIL.getHBaseCluster().getRegions(table)) { + Scan scan = new Scan(); + // Make sure to set rawScan to true so that we will get Delete Markers. + scan.setRaw(true); + scan.readAllVersions(); + scan.withStartRow(ROW1); + // Need to use RegionScanner instead of table#getScanner since the latter will + // not return tags since it will go through rpc layer and remove tags intentionally. + RegionScanner scanner = region.getScanner(scan); + scanner.next(values); + if (!values.isEmpty()) { + break; + } + } + boolean deleteFound = false; + for (Cell cell: values) { + if (PrivateCellUtil.isDelete(cell.getType().getCode())) { + deleteFound = true; + List tags = PrivateCellUtil.getTags(cell); + // If tagExists flag is true then validate whether tag contents are as expected. + if (tagExists) { + Assert.assertEquals(1, tags.size()); + for (Tag tag : tags) { + Assert.assertEquals(TEST_TAG, Tag.getValueAsString(tag)); + } + } else { + // If tagExists flag is disabled then check for 0 size tags. + assertEquals(0, tags.size()); + } + } + } + Assert.assertTrue(deleteFound); + } + + /* + This co-proc will add a cell tag to delete mutation. + */ + public static class MetadataController implements RegionCoprocessor, RegionObserver { + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } + + @Override + public void preBatchMutate(ObserverContext c, + MiniBatchOperationInProgress miniBatchOp) + throws IOException { + if (c.getEnvironment().getRegion().getRegionInfo().getTable().isSystemTable()) { + return; + } + for (int i = 0; i < miniBatchOp.size(); i++) { + Mutation m = miniBatchOp.getOperation(i); + if (!(m instanceof Delete)) { + continue; + } + byte[] sourceOpAttr = m.getAttribute(TEST_ATTR); + if (sourceOpAttr == null) { + continue; + } + Tag sourceOpTag = new ArrayBackedTag(TEST_TAG_TYPE, sourceOpAttr); + List updatedCells = new ArrayList<>(); + for (CellScanner cellScanner = m.cellScanner(); cellScanner.advance(); ) { + Cell cell = cellScanner.current(); + List tags = PrivateCellUtil.getTags(cell); + tags.add(sourceOpTag); + Cell updatedCell = PrivateCellUtil.createCell(cell, tags); + updatedCells.add(updatedCell); + } + m.getFamilyCellMap().clear(); + // Clear and add new Cells to the Mutation. + for (Cell cell : updatedCells) { + Delete d = (Delete) m; + d.add(cell); + } + } + } + } + + /** + * Set hbase.client.rpc.codec and hbase.client.default.rpc.codec both to empty string + * This means it will use no Codec. Make sure that we don't return Tags in response. + * @throws Exception Exception + */ + @Test + public void testTagsWithEmptyCodec() throws Exception { + TableName tableName = TableName.valueOf(name.getMethodName()); + TableDescriptor tableDesc = TableDescriptorBuilder + .newBuilder(tableName) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA) + .setMaxVersions(5) + .setKeepDeletedCells(KeepDeletedCells.TRUE) + .build()) + .setCoprocessor(MetadataController.class.getName()) + .build(); + UTIL.getAdmin().createTable(tableDesc); + Configuration conf = new Configuration(UTIL.getConfiguration()); + conf.set(RPC_CODEC_CONF_KEY, ""); + conf.set(DEFAULT_CODEC_CLASS, ""); + try (Connection connection = ConnectionFactory.createConnection(conf); + Table table = connection.getTable(tableName)) { + //Add first version of QUAL + Put p = new Put(ROW1); + p.addColumn(FAMILYA, QUAL, now, QUAL); + table.put(p); + + //Add Delete family marker + Delete d = new Delete(ROW1, now+3); + // Add test attribute to delete mutation. + d.setAttribute(TEST_ATTR, Bytes.toBytes(TEST_TAG)); + table.delete(d); + + // Since RPC_CODEC_CONF_KEY and DEFAULT_CODEC_CLASS is set to empty, it will use + // empty Codec and it shouldn't encode/decode tags. + Scan scan = new Scan().withStartRow(ROW1).setRaw(true); + ResultScanner scanner = table.getScanner(scan); + int count = 0; + Result result; + while ((result = scanner.next()) != null) { + List cells = result.listCells(); + assertEquals(2, cells.size()); + Cell cell = cells.get(0); + assertTrue(CellUtil.isDelete(cell)); + List tags = PrivateCellUtil.getTags(cell); + assertEquals(0, tags.size()); + count++; + } + assertEquals(1, count); + } finally { + UTIL.deleteTable(tableName); + } + } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanBase.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanBase.java index 4b18624f9241..7855747b1664 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanBase.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanBase.java @@ -21,7 +21,6 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertTrue; - import java.io.IOException; import java.util.ArrayList; import java.util.List; @@ -86,7 +85,7 @@ public static void tearDownAfterClass() throws Exception { * Pass the key and value to reduce. */ public static class ScanMapper - extends TableMapper { + extends TableMapper { /** * Pass the key and value to reduce. @@ -99,7 +98,7 @@ public static class ScanMapper @Override public void map(ImmutableBytesWritable key, Result value, Context context) - throws IOException, InterruptedException { + throws IOException, InterruptedException { if (value.size() != 2) { throw new IOException("There should be two input columns"); } @@ -123,7 +122,7 @@ public void map(ImmutableBytesWritable key, Result value, * Checks the last and first key seen against the scanner boundaries. */ public static class ScanReducer - extends Reducer { private String first = null; @@ -131,7 +130,7 @@ public static class ScanReducer protected void reduce(ImmutableBytesWritable key, Iterable values, Context context) - throws IOException ,InterruptedException { + throws IOException ,InterruptedException { int count = 0; for (ImmutableBytesWritable value : values) { String val = Bytes.toStringBinary(value.get()); @@ -144,7 +143,7 @@ protected void reduce(ImmutableBytesWritable key, } protected void cleanup(Context context) - throws IOException, InterruptedException { + throws IOException, InterruptedException { Configuration c = context.getConfiguration(); String startRow = c.get(KEY_STARTROW); String lastRow = c.get(KEY_LASTROW); @@ -249,6 +248,12 @@ protected void testNumOfSplits(int splitsPerRegion, int expectedNumOfSplits) tif.setConf(job.getConfiguration()); Assert.assertEquals(TABLE_NAME, table.getName()); List splits = tif.getSplits(job); + for (InputSplit split : splits) { + TableSplit tableSplit = (TableSplit) split; + // In table input format, we do no store the scanner at the split level + // because we use the scan object from the map-reduce job conf itself. + Assert.assertTrue(tableSplit.getScanAsString().isEmpty()); + } Assert.assertEquals(expectedNumOfSplits, splits.size()); } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALInputFormat.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALInputFormat.java new file mode 100644 index 000000000000..8d21c394d554 --- /dev/null +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALInputFormat.java @@ -0,0 +1,75 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.mapreduce; + +import static org.junit.Assert.assertEquals; +import java.util.ArrayList; +import java.util.List; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.LocatedFileStatus; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.testclassification.MapReduceTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.mockito.Mockito; + +@Category({ MapReduceTests.class, SmallTests.class}) +public class TestWALInputFormat { + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestWALInputFormat.class); + + /** + * Test the primitive start/end time filtering. + */ + @Test + public void testAddFile() { + List lfss = new ArrayList<>(); + LocatedFileStatus lfs = Mockito.mock(LocatedFileStatus.class); + long now = System.currentTimeMillis(); + Mockito.when(lfs.getPath()).thenReturn(new Path("/name." + now)); + WALInputFormat.addFile(lfss, lfs, now, now); + assertEquals(1, lfss.size()); + WALInputFormat.addFile(lfss, lfs, now - 1, now - 1); + assertEquals(1, lfss.size()); + WALInputFormat.addFile(lfss, lfs, now - 2, now - 1); + assertEquals(1, lfss.size()); + WALInputFormat.addFile(lfss, lfs, now - 2, now); + assertEquals(2, lfss.size()); + WALInputFormat.addFile(lfss, lfs, Long.MIN_VALUE, now); + assertEquals(3, lfss.size()); + WALInputFormat.addFile(lfss, lfs, Long.MIN_VALUE, Long.MAX_VALUE); + assertEquals(4, lfss.size()); + WALInputFormat.addFile(lfss, lfs, now, now + 2); + assertEquals(5, lfss.size()); + WALInputFormat.addFile(lfss, lfs, now + 1, now + 2); + assertEquals(5, lfss.size()); + Mockito.when(lfs.getPath()).thenReturn(new Path("/name")); + WALInputFormat.addFile(lfss, lfs, Long.MIN_VALUE, Long.MAX_VALUE); + assertEquals(6, lfss.size()); + Mockito.when(lfs.getPath()).thenReturn(new Path("/name.123")); + WALInputFormat.addFile(lfss, lfs, Long.MIN_VALUE, Long.MAX_VALUE); + assertEquals(7, lfss.size()); + Mockito.when(lfs.getPath()).thenReturn(new Path("/name." + now + ".meta")); + WALInputFormat.addFile(lfss, lfs, now, now); + assertEquals(8, lfss.size()); + } +} diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALPlayer.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALPlayer.java index 4880ab64e669..d60a3d9b712c 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALPlayer.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALPlayer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,8 +24,8 @@ import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; - import java.io.ByteArrayOutputStream; +import java.io.File; import java.io.PrintStream; import java.util.ArrayList; import org.apache.hadoop.conf.Configuration; @@ -46,6 +46,7 @@ import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.mapreduce.WALPlayer.WALKeyValueMapper; +import org.apache.hadoop.hbase.regionserver.TestRecoveredEdits; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.MapReduceTests; import org.apache.hadoop.hbase.util.Bytes; @@ -73,7 +74,6 @@ */ @Category({MapReduceTests.class, LargeTests.class}) public class TestWALPlayer { - @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestWALPlayer.class); @@ -91,7 +91,7 @@ public class TestWALPlayer { @BeforeClass public static void beforeClass() throws Exception { - conf= TEST_UTIL.getConfiguration(); + conf = TEST_UTIL.getConfiguration(); rootDir = TEST_UTIL.createRootDir(); walRootDir = TEST_UTIL.createWALRootDir(); fs = CommonFSUtils.getRootDirFileSystem(conf); @@ -106,9 +106,32 @@ public static void afterClass() throws Exception { logFs.delete(walRootDir, true); } + /** + * Test that WALPlayer can replay recovered.edits files. + */ + @Test + public void testPlayingRecoveredEdit() throws Exception { + TableName tn = TableName.valueOf(TestRecoveredEdits.RECOVEREDEDITS_TABLENAME); + TEST_UTIL.createTable(tn, TestRecoveredEdits.RECOVEREDEDITS_COLUMNFAMILY); + // Copy testing recovered.edits file that is over under hbase-server test resources + // up into a dir in our little hdfs cluster here. + String hbaseServerTestResourcesEdits = System.getProperty("test.build.classes") + + "/../../../hbase-server/src/test/resources/" + + TestRecoveredEdits.RECOVEREDEDITS_PATH.getName(); + assertTrue(new File(hbaseServerTestResourcesEdits).exists()); + FileSystem dfs = TEST_UTIL.getDFSCluster().getFileSystem(); + // Target dir. + Path targetDir = new Path("edits").makeQualified(dfs.getUri(), dfs.getHomeDirectory()); + assertTrue(dfs.mkdirs(targetDir)); + dfs.copyFromLocalFile(new Path(hbaseServerTestResourcesEdits), targetDir); + assertEquals(0, + ToolRunner.run(new WALPlayer(this.conf), new String [] {targetDir.toString()})); + // I don't know how many edits are in this file for this table... so just check more than 1. + assertTrue(TEST_UTIL.countRows(tn) > 0); + } + /** * Simple end-to-end test - * @throws Exception */ @Test public void testWALPlayer() throws Exception { @@ -223,8 +246,8 @@ public void testMainMethod() throws Exception { } catch (SecurityException e) { assertEquals(-1, newSecurityManager.getExitCode()); assertTrue(data.toString().contains("ERROR: Wrong number of arguments:")); - assertTrue(data.toString().contains("Usage: WALPlayer [options] " + - " []")); + assertTrue(data.toString().contains("Usage: WALPlayer [options] " + + " [ ]")); assertTrue(data.toString().contains("-Dwal.bulk.output=/path/for/output")); } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALRecordReader.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALRecordReader.java index 21a43bd6b47f..40e7f37147a7 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALRecordReader.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALRecordReader.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; - import java.util.List; import java.util.NavigableMap; import java.util.TreeMap; @@ -42,6 +41,7 @@ import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CommonFSUtils; +import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.wal.WALEdit; @@ -123,8 +123,7 @@ public static void tearDownAfterClass() throws Exception { } /** - * Test partial reads from the log based on passed time range - * @throws Exception + * Test partial reads from the WALs based on passed time range. */ @Test public void testPartialRead() throws Exception { @@ -140,6 +139,7 @@ public void testPartialRead() throws Exception { edit.add(new KeyValue(rowName, family, Bytes.toBytes("2"), ts+1, value)); log.appendData(info, getWalKeyImpl(ts+1, scopes), edit); log.sync(); + Threads.sleep(10); LOG.info("Before 1st WAL roll " + log.toString()); log.rollWriter(); LOG.info("Past 1st WAL roll " + log.toString()); @@ -164,26 +164,29 @@ public void testPartialRead() throws Exception { jobConf.set("mapreduce.input.fileinputformat.inputdir", logDir.toString()); jobConf.setLong(WALInputFormat.END_TIME_KEY, ts); - // only 1st file is considered, and only its 1st entry is used + // Only 1st file is considered, and only its 1st entry is in-range. List splits = input.getSplits(MapreduceTestingShim.createJobContext(jobConf)); - assertEquals(1, splits.size()); testSplit(splits.get(0), Bytes.toBytes("1")); - jobConf.setLong(WALInputFormat.START_TIME_KEY, ts+1); jobConf.setLong(WALInputFormat.END_TIME_KEY, ts1+1); splits = input.getSplits(MapreduceTestingShim.createJobContext(jobConf)); - // both files need to be considered assertEquals(2, splits.size()); - // only the 2nd entry from the 1st file is used - testSplit(splits.get(0), Bytes.toBytes("2")); - // only the 1nd entry from the 2nd file is used + // Both entries from first file are in-range. + testSplit(splits.get(0), Bytes.toBytes("1"), Bytes.toBytes("2")); + // Only the 1st entry from the 2nd file is in-range. testSplit(splits.get(1), Bytes.toBytes("3")); + + jobConf.setLong(WALInputFormat.START_TIME_KEY, ts + 1); + jobConf.setLong(WALInputFormat.END_TIME_KEY, ts1 + 1); + splits = input.getSplits(MapreduceTestingShim.createJobContext(jobConf)); + assertEquals(1, splits.size()); + // Only the 1st entry from the 2nd file is in-range. + testSplit(splits.get(0), Bytes.toBytes("3")); } /** * Test basic functionality - * @throws Exception */ @Test public void testWALRecordReader() throws Exception { @@ -234,11 +237,7 @@ public void testWALRecordReader() throws Exception { jobConf.setLong(WALInputFormat.END_TIME_KEY, Long.MAX_VALUE); jobConf.setLong(WALInputFormat.START_TIME_KEY, thirdTs); splits = input.getSplits(MapreduceTestingShim.createJobContext(jobConf)); - // both logs need to be considered - assertEquals(2, splits.size()); - // but both readers skip all edits - testSplit(splits.get(0)); - testSplit(splits.get(1)); + assertTrue(splits.isEmpty()); } /** @@ -346,4 +345,4 @@ private void testSplitWithMovingWAL(InputSplit split, byte[] col1, byte[] col2) } reader.close(); } -} \ No newline at end of file +} diff --git a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricRegistriesLoader.java b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricRegistriesLoader.java index e17a28a5828d..edc813d95b99 100644 --- a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricRegistriesLoader.java +++ b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricRegistriesLoader.java @@ -28,8 +28,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; - @InterfaceAudience.Private public final class MetricRegistriesLoader { private static final Logger LOG = LoggerFactory.getLogger(MetricRegistries.class); @@ -57,7 +55,6 @@ public static MetricRegistries load() { * implementation will be loaded. * @return A {@link MetricRegistries} implementation. */ - @VisibleForTesting static MetricRegistries load(List availableImplementations) { if (availableImplementations.size() == 1) { diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java index 6b4461402f20..53bfba62daf8 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java @@ -26,8 +26,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; - @InterfaceAudience.Private public abstract class AbstractProcedureScheduler implements ProcedureScheduler { private static final Logger LOG = LoggerFactory.getLogger(AbstractProcedureScheduler.class); @@ -246,7 +244,6 @@ public long getNullPollCalls() { * Access should remain package-private. Use ProcedureEvent class to wake/suspend events. * @param events the list of events to wake */ - @VisibleForTesting public void wakeEvents(ProcedureEvent[] events) { schedLock(); try { diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java index 46dd9e289b1a..579c60998765 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java @@ -33,8 +33,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; - import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureState; /** @@ -588,7 +586,6 @@ public boolean hasOwner() { /** * Called by the ProcedureExecutor to assign the ID to the newly created procedure. */ - @VisibleForTesting protected void setProcId(long procId) { this.procId = procId; this.submittedTime = EnvironmentEdgeManager.currentTime(); @@ -609,12 +606,10 @@ protected void setRootProcId(long rootProcId) { /** * Called by the ProcedureExecutor to set the value to the newly created procedure. */ - @VisibleForTesting protected void setNonceKey(NonceKey nonceKey) { this.nonceKey = nonceKey; } - @VisibleForTesting public void setOwner(String owner) { this.owner = StringUtils.isEmpty(owner) ? null : owner; } @@ -784,7 +779,6 @@ public synchronized boolean isWaiting() { return false; } - @VisibleForTesting protected synchronized void setState(final ProcedureState state) { this.state = state; updateTimestamp(); diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureEvent.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureEvent.java index 2cf30b2b89b2..614aeb0ca20a 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureEvent.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureEvent.java @@ -21,7 +21,6 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; /** * Basic ProcedureEvent that contains an "object", which can be a description or a reference to the @@ -107,7 +106,6 @@ public static void wakeEvents(AbstractProcedureScheduler scheduler, ProcedureEve * when waking up multiple events. * Access should remain package-private. */ - @VisibleForTesting public synchronized void wakeInternal(AbstractProcedureScheduler procedureScheduler) { if (ready && !suspendedProcedures.isEmpty()) { LOG.warn("Found procedures suspended in a ready event! Size=" + suspendedProcedures.size()); @@ -127,7 +125,6 @@ public synchronized void wakeInternal(AbstractProcedureScheduler procedureSchedu * Access to suspendedProcedures is 'synchronized' on this object, but it's fine to return it * here for tests. */ - @VisibleForTesting public ProcedureDeque getSuspendedProcedures() { return suspendedProcedures; } diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java index f8857859131a..9111345fe8d5 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java @@ -56,7 +56,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; @@ -639,7 +638,6 @@ public void stop() { workerMonitorExecutor.sendStopSignal(); } - @VisibleForTesting public void join() { assert !isRunning() : "expected not running"; @@ -981,7 +979,7 @@ boolean bypassProcedure(long pid, long lockWait, boolean override, boolean recur while (current != null) { LOG.debug("Bypassing {}", current); current.bypass(getEnvironment()); - store.update(procedure); + store.update(current); long parentID = current.getParentProcId(); current = getProcedure(parentID); } @@ -1332,12 +1330,10 @@ private long nextProcId() { return procId; } - @VisibleForTesting protected long getLastProcId() { return lastProcId.get(); } - @VisibleForTesting public Set getActiveProcIds() { return procedures.keySet(); } @@ -1932,17 +1928,14 @@ RootProcedureState getProcStack(long rootProcId) { return rollbackStack.get(rootProcId); } - @VisibleForTesting ProcedureScheduler getProcedureScheduler() { return scheduler; } - @VisibleForTesting int getCompletedSize() { return completed.size(); } - @VisibleForTesting public IdLock getProcExecutionLock() { return procExecutionLock; } diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureScheduler.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureScheduler.java index d787cc0979c1..72b2b284ca19 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureScheduler.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureScheduler.java @@ -22,8 +22,6 @@ import java.util.concurrent.TimeUnit; import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; - /** * Keep track of the runnable procedures */ @@ -125,7 +123,6 @@ public interface ProcedureScheduler { * Returns the number of elements in this queue. * @return the number of elements in this queue. */ - @VisibleForTesting int size(); /** @@ -133,6 +130,5 @@ public interface ProcedureScheduler { * Used for testing failure and recovery. To emulate server crash/restart, * {@link ProcedureExecutor} resets its own state and calls clear() on scheduler. */ - @VisibleForTesting void clear(); } diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java index 611fc86f9a3c..a060f14ccf9a 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java @@ -106,6 +106,10 @@ public boolean start() { return true; } + protected void setTimeoutExecutorUncaughtExceptionHandler(UncaughtExceptionHandler eh) { + timeoutExecutor.setUncaughtExceptionHandler(eh); + } + public boolean stop() { if (!running.getAndSet(false)) { return false; diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/SimpleProcedureScheduler.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/SimpleProcedureScheduler.java index feab8be16c0e..2b043d472d0e 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/SimpleProcedureScheduler.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/SimpleProcedureScheduler.java @@ -23,8 +23,6 @@ import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; - /** * Simple scheduler for procedures */ @@ -47,7 +45,6 @@ protected Procedure dequeue() { return runnables.poll(); } - @VisibleForTesting @Override public void clear() { schedLock(); diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/StateMachineProcedure.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/StateMachineProcedure.java index 46c4c5e545ea..d1af4969141a 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/StateMachineProcedure.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/StateMachineProcedure.java @@ -22,14 +22,11 @@ import java.util.Arrays; import java.util.List; import java.util.concurrent.atomic.AtomicBoolean; - import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; - import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.StateMachineProcedureData; /** @@ -73,7 +70,6 @@ protected final int getCycles() { */ private int previousState; - @VisibleForTesting public enum Flow { HAS_MORE_STATE, NO_MORE_STATE, @@ -282,7 +278,6 @@ protected TState getCurrentState() { * sequentially. Some procedures may skip steps/ states, some may add intermediate steps in * future. */ - @VisibleForTesting public int getCurrentStateId() { return getStateId(getCurrentState()); } diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java index b0301c676069..6aed228af5af 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java @@ -59,7 +59,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.org.apache.commons.collections4.queue.CircularFifoQueue; import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureWALHeader; @@ -240,7 +239,6 @@ public WALProcedureStore(Configuration conf, LeaseRecovery leaseRecovery) throws leaseRecovery); } - @VisibleForTesting public WALProcedureStore(final Configuration conf, final Path walDir, final Path walArchiveDir, final LeaseRecovery leaseRecovery) throws IOException { this.conf = conf; @@ -984,7 +982,6 @@ public long getMillisFromLastRoll() { return (System.currentTimeMillis() - lastRollTs.get()); } - @VisibleForTesting void periodicRollForTesting() throws IOException { lock.lock(); try { @@ -994,7 +991,6 @@ void periodicRollForTesting() throws IOException { } } - @VisibleForTesting public boolean rollWriterForTesting() throws IOException { lock.lock(); try { @@ -1004,7 +1000,6 @@ public boolean rollWriterForTesting() throws IOException { } } - @VisibleForTesting void removeInactiveLogsForTesting() throws Exception { lock.lock(); try { @@ -1058,7 +1053,6 @@ private boolean rollWriter() throws IOException { return true; } - @VisibleForTesting boolean rollWriter(long logId) throws IOException { assert logId > flushLogId : "logId=" + logId + " flushLogId=" + flushLogId; assert lock.isHeldByCurrentThread() : "expected to be the lock owner. " + lock.isLocked(); @@ -1257,7 +1251,6 @@ public Path getWALDir() { return this.walDir; } - @VisibleForTesting Path getWalArchiveDir() { return this.walArchiveDir; } diff --git a/hbase-protocol-shaded/src/main/protobuf/client/Encryption.proto b/hbase-protocol-shaded/src/main/protobuf/client/Encryption.proto index d0b445c0aab1..6b98e3a1e240 100644 --- a/hbase-protocol-shaded/src/main/protobuf/client/Encryption.proto +++ b/hbase-protocol-shaded/src/main/protobuf/client/Encryption.proto @@ -31,4 +31,5 @@ message WrappedKey { required bytes data = 3; optional bytes iv = 4; optional bytes hash = 5; + optional string hash_algorithm = 6 [default = "MD5"]; } diff --git a/hbase-protocol-shaded/src/main/protobuf/server/ClusterStatus.proto b/hbase-protocol-shaded/src/main/protobuf/server/ClusterStatus.proto index 0c8e89d185d8..dc875daf7976 100644 --- a/hbase-protocol-shaded/src/main/protobuf/server/ClusterStatus.proto +++ b/hbase-protocol-shaded/src/main/protobuf/server/ClusterStatus.proto @@ -167,6 +167,16 @@ message RegionLoad { /** The current blocks total weight for region in the regionserver */ optional uint64 blocks_total_weight = 26; + + /** The compaction state for region */ + optional CompactionState compaction_state = 27; + + enum CompactionState { + NONE = 0; + MINOR = 1; + MAJOR = 2; + MAJOR_AND_MINOR = 3; + } } message UserLoad { @@ -197,8 +207,11 @@ message ClientMetrics { message ReplicationLoadSink { required uint64 ageOfLastAppliedOp = 1; required uint64 timeStampsOfLastAppliedOp = 2; - required uint64 timestampStarted = 3; - required uint64 totalOpsProcessed = 4; + // The below two were added after hbase-2.0.0 went out. They have to be added as 'optional' else + // we break upgrades; old RegionServers reporting in w/ old forms of this message will fail to + // deserialize on the new Master. See HBASE-25234 + optional uint64 timestampStarted = 3; + optional uint64 totalOpsProcessed = 4; } message ReplicationLoadSource { diff --git a/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto b/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto index b18de27a0c9a..76b085d43c8e 100644 --- a/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto +++ b/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto @@ -72,6 +72,8 @@ enum ModifyTableState { MODIFY_TABLE_DELETE_FS_LAYOUT = 5; MODIFY_TABLE_POST_OPERATION = 6; MODIFY_TABLE_REOPEN_ALL_REGIONS = 7; + MODIFY_TABLE_CLOSE_EXCESS_REPLICAS = 8; + MODIFY_TABLE_ASSIGN_NEW_REPLICAS = 9; } message ModifyTableStateData { @@ -487,6 +489,7 @@ enum ReopenTableRegionsState { message ReopenTableRegionsStateData { required TableName table_name = 1; repeated RegionLocation region = 2; + repeated bytes region_names = 3; } enum InitMetaState { @@ -570,7 +573,9 @@ enum RegionRemoteProcedureBaseState { message RegionRemoteProcedureBaseStateData { required RegionInfo region = 1; required ServerName target_server = 2; - required RegionRemoteProcedureBaseState state = 3; + // state is actually 'required' but we can't set it as 'required' here else it breaks old + // Messages; see HBASE-22074. + optional RegionRemoteProcedureBaseState state = 3; optional RegionStateTransition.TransitionCode transition_code = 4; optional int64 seq_id = 5; } diff --git a/hbase-protocol-shaded/src/main/protobuf/server/region/Admin.proto b/hbase-protocol-shaded/src/main/protobuf/server/region/Admin.proto index ca0c93209ec9..0667292917ae 100644 --- a/hbase-protocol-shaded/src/main/protobuf/server/region/Admin.proto +++ b/hbase-protocol-shaded/src/main/protobuf/server/region/Admin.proto @@ -393,12 +393,6 @@ service AdminService { rpc ExecuteProcedures(ExecuteProceduresRequest) returns(ExecuteProceduresResponse); - rpc GetSlowLogResponses(SlowLogResponseRequest) - returns(SlowLogResponses); - - rpc GetLargeLogResponses(SlowLogResponseRequest) - returns(SlowLogResponses); - rpc ClearSlowLogsResponses(ClearSlowLogResponseRequest) returns(ClearSlowLogResponses); diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerImpl.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerImpl.java index 22026e5284ce..08799856b754 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerImpl.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -60,7 +60,7 @@ public ReplicationPeerImpl(Configuration conf, String id, ReplicationPeerConfig SyncReplicationState newSyncReplicationState) { this.conf = conf; this.id = id; - this.peerState = peerState ? PeerState.ENABLED : PeerState.DISABLED; + setPeerState(peerState); this.peerConfig = peerConfig; this.syncReplicationStateBits = syncReplicationState.value() | (newSyncReplicationState.value() << SHIFT); diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java index ba6da7af3bef..ebe99da3541b 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java @@ -27,8 +27,6 @@ import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; - /** * This provides an class for maintaining a set of peer clusters. These peers are remote slave * clusters that data is replicated to. @@ -59,7 +57,6 @@ public void init() throws ReplicationException { } } - @VisibleForTesting public ReplicationPeerStorage getPeerStorage() { return this.peerStorage; } diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationTracker.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationTracker.java index 93a32630d559..a33e23dc96b8 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationTracker.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationTracker.java @@ -20,6 +20,7 @@ import java.util.List; +import org.apache.hadoop.hbase.ServerName; import org.apache.yetus.audience.InterfaceAudience; /** @@ -37,13 +38,13 @@ public interface ReplicationTracker { * Register a replication listener to receive replication events. * @param listener the listener to register */ - public void registerListener(ReplicationListener listener); + void registerListener(ReplicationListener listener); - public void removeListener(ReplicationListener listener); + void removeListener(ReplicationListener listener); /** * Returns a list of other live region servers in the cluster. * @return List of region servers. */ - public List getListOfRegionServers(); + List getListOfRegionServers(); } diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationTrackerZKImpl.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationTrackerZKImpl.java index 54c9c2cdc0af..6fc3c452723d 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationTrackerZKImpl.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationTrackerZKImpl.java @@ -20,7 +20,10 @@ import java.util.ArrayList; import java.util.List; import java.util.concurrent.CopyOnWriteArrayList; +import java.util.stream.Collectors; + import org.apache.hadoop.hbase.Abortable; +import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.Stoppable; import org.apache.hadoop.hbase.zookeeper.ZKListener; import org.apache.hadoop.hbase.zookeeper.ZKUtil; @@ -49,7 +52,7 @@ public class ReplicationTrackerZKImpl implements ReplicationTracker { // listeners to be notified private final List listeners = new CopyOnWriteArrayList<>(); // List of all the other region servers in this cluster - private final ArrayList otherRegionServers = new ArrayList<>(); + private final List otherRegionServers = new ArrayList<>(); public ReplicationTrackerZKImpl(ZKWatcher zookeeper, Abortable abortable, Stoppable stopper) { this.zookeeper = zookeeper; @@ -74,10 +77,10 @@ public void removeListener(ReplicationListener listener) { * Return a snapshot of the current region servers. */ @Override - public List getListOfRegionServers() { + public List getListOfRegionServers() { refreshOtherRegionServersList(false); - List list = null; + List list = null; synchronized (otherRegionServers) { list = new ArrayList<>(otherRegionServers); } @@ -162,7 +165,7 @@ private String getZNodeName(String fullPath) { * if it was empty), false if the data was missing in ZK */ private boolean refreshOtherRegionServersList(boolean watch) { - List newRsList = getRegisteredRegionServers(watch); + List newRsList = getRegisteredRegionServers(watch); if (newRsList == null) { return false; } else { @@ -178,7 +181,7 @@ private boolean refreshOtherRegionServersList(boolean watch) { * Get a list of all the other region servers in this cluster and set a watch * @return a list of server nanes */ - private List getRegisteredRegionServers(boolean watch) { + private List getRegisteredRegionServers(boolean watch) { List result = null; try { if (watch) { @@ -190,6 +193,7 @@ private List getRegisteredRegionServers(boolean watch) { } catch (KeeperException e) { this.abortable.abort("Get list of registered region servers", e); } - return result; + return result == null ? null : + result.stream().map(ServerName::parseServerName).collect(Collectors.toList()); } } diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationPeerStorage.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationPeerStorage.java index 7a943c4035ce..09aeee55cca8 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationPeerStorage.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationPeerStorage.java @@ -31,8 +31,6 @@ import org.apache.yetus.audience.InterfaceAudience; import org.apache.zookeeper.KeeperException; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; - import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos; /** @@ -78,22 +76,18 @@ public ZKReplicationPeerStorage(ZKWatcher zookeeper, Configuration conf) { this.peersZNode = ZNodePaths.joinZNode(replicationZNode, peersZNodeName); } - @VisibleForTesting public String getPeerStateNode(String peerId) { return ZNodePaths.joinZNode(getPeerNode(peerId), peerStateNodeName); } - @VisibleForTesting public String getPeerNode(String peerId) { return ZNodePaths.joinZNode(peersZNode, peerId); } - @VisibleForTesting public String getSyncReplicationStateNode(String peerId) { return ZNodePaths.joinZNode(getPeerNode(peerId), SYNC_REPLICATION_STATE_ZNODE); } - @VisibleForTesting public String getNewSyncReplicationStateNode(String peerId) { return ZNodePaths.joinZNode(getPeerNode(peerId), NEW_SYNC_REPLICATION_STATE_ZNODE); } diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationQueueStorage.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationQueueStorage.java index 6f1f5a3d6a42..5c480bacdd82 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationQueueStorage.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationQueueStorage.java @@ -51,7 +51,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.org.apache.commons.collections4.CollectionUtils; /** @@ -103,7 +102,6 @@ class ZKReplicationQueueStorage extends ZKReplicationStorageBase */ private final String hfileRefsZNode; - @VisibleForTesting final String regionsZNode; public ZKReplicationQueueStorage(ZKWatcher zookeeper, Configuration conf) { @@ -158,7 +156,6 @@ private String getFileNode(ServerName serverName, String queueId, String fileNam * @return ZNode path to persist the max sequence id that we've pushed for the given region and * peer. */ - @VisibleForTesting String getSerialReplicationRegionPeerNode(String encodedRegionName, String peerId) { if (encodedRegionName == null || encodedRegionName.length() != RegionInfo.MD5_HEX_LENGTH) { throw new IllegalArgumentException( @@ -264,7 +261,6 @@ public void setWALPosition(ServerName serverName, String queueId, String fileNam * Return the {lastPushedSequenceId, ZNodeDataVersion} pair. if ZNodeDataVersion is -1, it means * that the ZNode does not exist. */ - @VisibleForTesting protected Pair getLastSequenceIdWithVersion(String encodedRegionName, String peerId) throws KeeperException { Stat stat = new Stat(); @@ -503,7 +499,6 @@ public List getAllQueues(ServerName serverName) throws ReplicationExcept } // will be overridden in UTs - @VisibleForTesting protected int getQueuesZNodeCversion() throws KeeperException { Stat stat = new Stat(); ZKUtil.getDataNoWatch(this.zookeeper, this.queuesZNode, stat); @@ -641,7 +636,6 @@ public List getReplicableHFiles(String peerId) throws ReplicationExcepti } // will be overridden in UTs - @VisibleForTesting protected int getHFileRefsZNodeCversion() throws ReplicationException { Stat stat = new Stat(); try { diff --git a/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateBasic.java b/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateBasic.java index 348271905fcd..4bb1021b7a42 100644 --- a/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateBasic.java +++ b/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateBasic.java @@ -18,16 +18,15 @@ package org.apache.hadoop.hbase.replication; import static org.hamcrest.CoreMatchers.hasItems; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import java.util.ArrayList; import java.util.Collections; import java.util.List; - import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.ServerName; diff --git a/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestZKReplicationPeerStorage.java b/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestZKReplicationPeerStorage.java index e7ee1e7c4835..18b0c121e67e 100644 --- a/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestZKReplicationPeerStorage.java +++ b/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestZKReplicationPeerStorage.java @@ -26,7 +26,6 @@ import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; - import java.io.IOException; import java.util.HashMap; import java.util.Iterator; @@ -35,7 +34,6 @@ import java.util.Random; import java.util.Set; import java.util.stream.Stream; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseZKTestingUtility; @@ -45,6 +43,7 @@ import org.apache.hadoop.hbase.testclassification.ReplicationTests; import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.zookeeper.KeeperException; +import org.junit.After; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.ClassRule; @@ -73,6 +72,11 @@ public static void tearDown() throws IOException { UTIL.shutdownMiniZKCluster(); } + @After + public void cleanCustomConfigurations() { + UTIL.getConfiguration().unset(ReplicationPeerConfigUtil.HBASE_REPLICATION_PEER_BASE_CONFIG); + } + private Set randNamespaces(Random rand) { return Stream.generate(() -> Long.toHexString(rand.nextLong())).limit(rand.nextInt(5)) .collect(toSet()); @@ -220,8 +224,7 @@ public void testNoSyncReplicationState() STORAGE.getNewSyncReplicationStateNode(peerId))); } - @Test - public void testBaseReplicationPeerConfig() { + @Test public void testBaseReplicationPeerConfig() throws ReplicationException{ String customPeerConfigKey = "hbase.xxx.custom_config"; String customPeerConfigValue = "test"; String customPeerConfigUpdatedValue = "testUpdated"; @@ -241,7 +244,7 @@ public void testBaseReplicationPeerConfig() { concat(customPeerConfigSecondKey).concat("=").concat(customPeerConfigSecondValue)); ReplicationPeerConfig updatedReplicationPeerConfig = ReplicationPeerConfigUtil. - addBasePeerConfigsIfNotPresent(conf,existingReplicationPeerConfig); + updateReplicationBasePeerConfigs(conf, existingReplicationPeerConfig); // validates base configs are present in replicationPeerConfig assertEquals(customPeerConfigValue, updatedReplicationPeerConfig.getConfiguration(). @@ -249,17 +252,63 @@ public void testBaseReplicationPeerConfig() { assertEquals(customPeerConfigSecondValue, updatedReplicationPeerConfig.getConfiguration(). get(customPeerConfigSecondKey)); - // validates base configs does not override value if config already present + // validates base configs get updated values even if config already present + conf.unset(ReplicationPeerConfigUtil.HBASE_REPLICATION_PEER_BASE_CONFIG); conf.set(ReplicationPeerConfigUtil.HBASE_REPLICATION_PEER_BASE_CONFIG, customPeerConfigKey.concat("=").concat(customPeerConfigUpdatedValue).concat(";"). concat(customPeerConfigSecondKey).concat("=").concat(customPeerConfigSecondUpdatedValue)); ReplicationPeerConfig replicationPeerConfigAfterValueUpdate = ReplicationPeerConfigUtil. - addBasePeerConfigsIfNotPresent(conf,updatedReplicationPeerConfig); + updateReplicationBasePeerConfigs(conf, updatedReplicationPeerConfig); - assertEquals(customPeerConfigValue, replicationPeerConfigAfterValueUpdate. + assertEquals(customPeerConfigUpdatedValue, replicationPeerConfigAfterValueUpdate. getConfiguration().get(customPeerConfigKey)); - assertEquals(customPeerConfigSecondValue, replicationPeerConfigAfterValueUpdate. + assertEquals(customPeerConfigSecondUpdatedValue, replicationPeerConfigAfterValueUpdate. getConfiguration().get(customPeerConfigSecondKey)); } + + @Test public void testBaseReplicationRemovePeerConfig() throws ReplicationException { + String customPeerConfigKey = "hbase.xxx.custom_config"; + String customPeerConfigValue = "test"; + ReplicationPeerConfig existingReplicationPeerConfig = getConfig(1); + + // custom config not present + assertEquals(existingReplicationPeerConfig.getConfiguration().get(customPeerConfigKey), null); + + Configuration conf = UTIL.getConfiguration(); + conf.set(ReplicationPeerConfigUtil.HBASE_REPLICATION_PEER_BASE_CONFIG, + customPeerConfigKey.concat("=").concat(customPeerConfigValue)); + + ReplicationPeerConfig updatedReplicationPeerConfig = ReplicationPeerConfigUtil. + updateReplicationBasePeerConfigs(conf, existingReplicationPeerConfig); + + // validates base configs are present in replicationPeerConfig + assertEquals(customPeerConfigValue, updatedReplicationPeerConfig.getConfiguration(). + get(customPeerConfigKey)); + + conf.unset(ReplicationPeerConfigUtil.HBASE_REPLICATION_PEER_BASE_CONFIG); + conf.set(ReplicationPeerConfigUtil.HBASE_REPLICATION_PEER_BASE_CONFIG, + customPeerConfigKey.concat("=").concat("")); + + ReplicationPeerConfig replicationPeerConfigRemoved = ReplicationPeerConfigUtil. + updateReplicationBasePeerConfigs(conf, updatedReplicationPeerConfig); + + assertNull(replicationPeerConfigRemoved.getConfiguration().get(customPeerConfigKey)); + } + + @Test public void testBaseReplicationRemovePeerConfigWithNoExistingConfig() + throws ReplicationException { + String customPeerConfigKey = "hbase.xxx.custom_config"; + ReplicationPeerConfig existingReplicationPeerConfig = getConfig(1); + + // custom config not present + assertEquals(existingReplicationPeerConfig.getConfiguration().get(customPeerConfigKey), null); + Configuration conf = UTIL.getConfiguration(); + conf.set(ReplicationPeerConfigUtil.HBASE_REPLICATION_PEER_BASE_CONFIG, + customPeerConfigKey.concat("=").concat("")); + + ReplicationPeerConfig updatedReplicationPeerConfig = ReplicationPeerConfigUtil. + updateReplicationBasePeerConfigs(conf, existingReplicationPeerConfig); + assertNull(updatedReplicationPeerConfig.getConfiguration().get(customPeerConfigKey)); + } } diff --git a/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestZKReplicationQueueStorage.java b/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestZKReplicationQueueStorage.java index 74a24ac1eb62..4f1fd3908687 100644 --- a/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestZKReplicationQueueStorage.java +++ b/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestZKReplicationQueueStorage.java @@ -18,8 +18,8 @@ package org.apache.hadoop.hbase.replication; import static org.hamcrest.CoreMatchers.hasItems; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import java.io.IOException; @@ -28,7 +28,6 @@ import java.util.List; import java.util.Set; import java.util.SortedSet; - import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseZKTestingUtility; diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/Constants.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/Constants.java index 4cf8a93ed5b0..704eac78db5a 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/Constants.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/Constants.java @@ -51,6 +51,10 @@ public interface Constants { String REST_SSL_ENABLED = "hbase.rest.ssl.enabled"; String REST_SSL_KEYSTORE_STORE = "hbase.rest.ssl.keystore.store"; String REST_SSL_KEYSTORE_PASSWORD = "hbase.rest.ssl.keystore.password"; + String REST_SSL_KEYSTORE_TYPE = "hbase.rest.ssl.keystore.type"; + String REST_SSL_TRUSTSTORE_STORE = "hbase.rest.ssl.truststore.store"; + String REST_SSL_TRUSTSTORE_PASSWORD = "hbase.rest.ssl.truststore.password"; + String REST_SSL_TRUSTSTORE_TYPE = "hbase.rest.ssl.truststore.type"; String REST_SSL_KEYSTORE_KEYPASSWORD = "hbase.rest.ssl.keystore.keypassword"; String REST_SSL_EXCLUDE_CIPHER_SUITES = "hbase.rest.ssl.exclude.cipher.suites"; String REST_SSL_INCLUDE_CIPHER_SUITES = "hbase.rest.ssl.include.cipher.suites"; diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java index 2ad57e1b742c..4e6adfb6d7c3 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java @@ -27,6 +27,7 @@ import java.util.concurrent.ArrayBlockingQueue; import javax.servlet.DispatcherType; import org.apache.commons.lang3.ArrayUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; @@ -305,14 +306,32 @@ public synchronized void run() throws Exception { SslContextFactory sslCtxFactory = new SslContextFactory(); String keystore = conf.get(REST_SSL_KEYSTORE_STORE); + String keystoreType = conf.get(REST_SSL_KEYSTORE_TYPE); String password = HBaseConfiguration.getPassword(conf, REST_SSL_KEYSTORE_PASSWORD, null); String keyPassword = HBaseConfiguration.getPassword(conf, REST_SSL_KEYSTORE_KEYPASSWORD, password); sslCtxFactory.setKeyStorePath(keystore); + if(StringUtils.isNotBlank(keystoreType)) { + sslCtxFactory.setKeyStoreType(keystoreType); + } sslCtxFactory.setKeyStorePassword(password); sslCtxFactory.setKeyManagerPassword(keyPassword); + String trustStore = conf.get(REST_SSL_TRUSTSTORE_STORE); + if(StringUtils.isNotBlank(trustStore)) { + sslCtxFactory.setTrustStorePath(trustStore); + } + String trustStorePassword = + HBaseConfiguration.getPassword(conf, REST_SSL_TRUSTSTORE_PASSWORD, null); + if(StringUtils.isNotBlank(trustStorePassword)) { + sslCtxFactory.setTrustStorePassword(trustStorePassword); + } + String trustStoreType = conf.get(REST_SSL_TRUSTSTORE_TYPE); + if(StringUtils.isNotBlank(trustStoreType)) { + sslCtxFactory.setTrustStoreType(trustStoreType); + } + String[] excludeCiphers = servlet.getConfiguration().getStrings( REST_SSL_EXCLUDE_CIPHER_SUITES, ArrayUtils.EMPTY_STRING_ARRAY); if (excludeCiphers.length != 0) { @@ -382,13 +401,8 @@ public synchronized void run() throws Exception { this.infoServer.setAttribute("hbase.conf", conf); this.infoServer.start(); } - try { - // start server - server.start(); - } catch (Exception e) { - LOG.error(HBaseMarkers.FATAL, "Failed to start server", e); - throw e; - } + // start server + server.start(); } public synchronized void join() throws Exception { @@ -442,6 +456,7 @@ public static void main(String[] args) throws Exception { server.run(); server.join(); } catch (Exception e) { + LOG.error(HBaseMarkers.FATAL, "Failed to start server", e); System.exit(1); } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServletContainer.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServletContainer.java index 1cae45c0b3f7..28cf4cba9fa7 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServletContainer.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServletContainer.java @@ -22,6 +22,7 @@ import javax.servlet.ServletException; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authorize.AuthorizationException; @@ -30,6 +31,7 @@ import org.apache.hbase.thirdparty.org.glassfish.jersey.server.ResourceConfig; import org.apache.hbase.thirdparty.org.glassfish.jersey.servlet.ServletContainer; +import static org.apache.hadoop.hbase.http.ProxyUserAuthenticationFilter.toLowerCase; /** * REST servlet container. It is used to get the remote request user @@ -51,7 +53,8 @@ public RESTServletContainer(ResourceConfig config) { @Override public void service(final HttpServletRequest request, final HttpServletResponse response) throws ServletException, IOException { - final String doAsUserFromQuery = request.getParameter("doAs"); + final HttpServletRequest lowerCaseRequest = toLowerCase(request); + final String doAsUserFromQuery = lowerCaseRequest.getParameter("doas"); RESTServlet servlet = RESTServlet.getInstance(); if (doAsUserFromQuery != null) { Configuration conf = servlet.getConfiguration(); diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RegionsResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RegionsResource.java index c3eed6a2eecd..6d6293fb1647 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RegionsResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RegionsResource.java @@ -75,6 +75,9 @@ public Response get(final @Context UriInfo uriInfo) { servlet.getMetrics().incrementRequests(1); try { TableName tableName = TableName.valueOf(tableResource.getName()); + if (!tableResource.exists()) { + throw new TableNotFoundException(tableName); + } TableInfoModel model = new TableInfoModel(tableName.getNameAsString()); List locs; diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java index 9e6661bd2aac..47700aa9e4fe 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java @@ -21,15 +21,23 @@ import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; +import java.io.File; +import java.io.FileInputStream; import java.io.IOException; import java.io.InputStream; import java.net.URI; import java.net.URISyntaxException; import java.net.URL; +import java.security.KeyManagementException; +import java.security.KeyStore; +import java.security.KeyStoreException; +import java.security.NoSuchAlgorithmException; +import java.security.cert.CertificateException; import java.util.Collections; import java.util.Map; +import java.util.Optional; import java.util.concurrent.ConcurrentHashMap; - +import javax.net.ssl.SSLContext; import org.apache.hadoop.security.authentication.client.AuthenticatedURL; import org.apache.hadoop.security.authentication.client.AuthenticationException; import org.apache.hadoop.security.authentication.client.KerberosAuthenticator; @@ -37,6 +45,7 @@ import org.apache.http.HttpResponse; import org.apache.http.HttpStatus; import org.apache.http.client.HttpClient; +import org.apache.http.client.config.RequestConfig; import org.apache.http.client.methods.HttpDelete; import org.apache.http.client.methods.HttpGet; import org.apache.http.client.methods.HttpHead; @@ -44,9 +53,10 @@ import org.apache.http.client.methods.HttpPut; import org.apache.http.client.methods.HttpUriRequest; import org.apache.http.entity.InputStreamEntity; -import org.apache.http.impl.client.DefaultHttpClient; +import org.apache.http.impl.client.HttpClientBuilder; +import org.apache.http.impl.client.HttpClients; import org.apache.http.message.BasicHeader; -import org.apache.http.params.CoreConnectionPNames; +import org.apache.http.ssl.SSLContexts; import org.apache.http.util.EntityUtils; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; @@ -81,14 +91,35 @@ public Client() { this(null); } - private void initialize(Cluster cluster, boolean sslEnabled) { + private void initialize(Cluster cluster, boolean sslEnabled, Optional trustStore) { this.cluster = cluster; this.sslEnabled = sslEnabled; extraHeaders = new ConcurrentHashMap<>(); String clspath = System.getProperty("java.class.path"); LOG.debug("classpath " + clspath); - this.httpClient = new DefaultHttpClient(); - this.httpClient.getParams().setIntParameter(CoreConnectionPNames.CONNECTION_TIMEOUT, 2000); + HttpClientBuilder httpClientBuilder = HttpClients.custom(); + + RequestConfig requestConfig = RequestConfig.custom(). + setConnectTimeout(2000).build(); + httpClientBuilder.setDefaultRequestConfig(requestConfig); + + // Since HBASE-25267 we don't use the deprecated DefaultHttpClient anymore. + // The new http client would decompress the gzip content automatically. + // In order to keep the original behaviour of this public class, we disable + // automatic content compression. + httpClientBuilder.disableContentCompression(); + + if(sslEnabled && trustStore.isPresent()) { + try { + SSLContext sslcontext = + SSLContexts.custom().loadTrustMaterial(trustStore.get(), null).build(); + httpClientBuilder.setSSLContext(sslcontext); + } catch (NoSuchAlgorithmException | KeyStoreException | KeyManagementException e) { + throw new ClientTrustStoreInitializationException("Error while processing truststore", e); + } + } + + this.httpClient = httpClientBuilder.build(); } /** @@ -96,7 +127,7 @@ private void initialize(Cluster cluster, boolean sslEnabled) { * @param cluster the cluster definition */ public Client(Cluster cluster) { - initialize(cluster, false); + this(cluster, false); } /** @@ -105,7 +136,38 @@ public Client(Cluster cluster) { * @param sslEnabled enable SSL or not */ public Client(Cluster cluster, boolean sslEnabled) { - initialize(cluster, sslEnabled); + initialize(cluster, sslEnabled, Optional.empty()); + } + + /** + * Constructor, allowing to define custom trust store (only for SSL connections) + * + * @param cluster the cluster definition + * @param trustStorePath custom trust store to use for SSL connections + * @param trustStorePassword password to use for custom trust store + * @param trustStoreType type of custom trust store + * + * @throws ClientTrustStoreInitializationException if the trust store file can not be loaded + */ + public Client(Cluster cluster, String trustStorePath, + Optional trustStorePassword, Optional trustStoreType) { + + char[] password = trustStorePassword.map(String::toCharArray).orElse(null); + String type = trustStoreType.orElse(KeyStore.getDefaultType()); + + KeyStore trustStore; + try(FileInputStream inputStream = new FileInputStream(new File(trustStorePath))) { + trustStore = KeyStore.getInstance(type); + trustStore.load(inputStream, password); + } catch (KeyStoreException e) { + throw new ClientTrustStoreInitializationException( + "Invalid trust store type: " + type, e); + } catch (CertificateException | NoSuchAlgorithmException | IOException e) { + throw new ClientTrustStoreInitializationException( + "Trust store load error: " + trustStorePath, e); + } + + initialize(cluster, true, Optional.of(trustStore)); } /** @@ -724,4 +786,12 @@ public Response delete(Cluster cluster, String path, Header extraHdr) throws IOE method.releaseConnection(); } } + + + public static class ClientTrustStoreInitializationException extends RuntimeException { + + public ClientTrustStoreInitializationException(String message, Throwable cause) { + super(message, cause); + } + } } diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestRESTServerSSL.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestRESTServerSSL.java new file mode 100644 index 000000000000..a1fe2f010fdb --- /dev/null +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestRESTServerSSL.java @@ -0,0 +1,195 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.io.File; +import java.security.KeyPair; +import java.security.cert.X509Certificate; +import java.util.Optional; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.http.ssl.KeyStoreTestUtil; +import org.apache.hadoop.hbase.rest.client.Client; +import org.apache.hadoop.hbase.rest.client.Cluster; +import org.apache.hadoop.hbase.rest.client.Response; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.testclassification.RestTests; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category({ RestTests.class, MediumTests.class}) +public class TestRESTServerSSL { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestRESTServerSSL.class); + + private static final String KEY_STORE_PASSWORD = "myKSPassword"; + private static final String TRUST_STORE_PASSWORD = "myTSPassword"; + + private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + private static final HBaseRESTTestingUtility REST_TEST_UTIL = new HBaseRESTTestingUtility(); + private static Client sslClient; + private static File keyDir; + private Configuration conf; + + @BeforeClass + public static void beforeClass() throws Exception { + keyDir = initKeystoreDir(); + KeyPair keyPair = KeyStoreTestUtil.generateKeyPair("RSA"); + X509Certificate serverCertificate = KeyStoreTestUtil.generateCertificate( + "CN=localhost, O=server", keyPair, 30, "SHA1withRSA"); + + generateTrustStore("jks", serverCertificate); + generateTrustStore("jceks", serverCertificate); + generateTrustStore("pkcs12", serverCertificate); + + generateKeyStore("jks", keyPair, serverCertificate); + generateKeyStore("jceks", keyPair, serverCertificate); + generateKeyStore("pkcs12", keyPair, serverCertificate); + + TEST_UTIL.startMiniCluster(); + } + + @AfterClass + public static void afterClass() throws Exception { + // this will also delete the generated test keystore / teststore files, + // as we were placing them under the dataTestDir used by the minicluster + TEST_UTIL.shutdownMiniCluster(); + } + + @Before + public void beforeEachTest() { + conf = new Configuration(TEST_UTIL.getConfiguration()); + conf.set(Constants.REST_SSL_ENABLED, "true"); + conf.set(Constants.REST_SSL_KEYSTORE_KEYPASSWORD, KEY_STORE_PASSWORD); + conf.set(Constants.REST_SSL_KEYSTORE_PASSWORD, KEY_STORE_PASSWORD); + conf.set(Constants.REST_SSL_TRUSTSTORE_PASSWORD, TRUST_STORE_PASSWORD); + } + + @After + public void tearDownAfterTest() { + REST_TEST_UTIL.shutdownServletContainer(); + } + + @Test + public void testSslConnection() throws Exception { + startRESTServerWithDefaultKeystoreType(); + + Response response = sslClient.get("/version", Constants.MIMETYPE_TEXT); + assertEquals(200, response.getCode()); + } + + @Test(expected = org.apache.http.client.ClientProtocolException.class) + public void testNonSslClientDenied() throws Exception { + startRESTServerWithDefaultKeystoreType(); + + Cluster localCluster = new Cluster().add("localhost", REST_TEST_UTIL.getServletPort()); + Client nonSslClient = new Client(localCluster, false); + + nonSslClient.get("/version"); + } + + @Test + public void testSslConnectionUsingKeystoreFormatJKS() throws Exception { + startRESTServer("jks"); + + Response response = sslClient.get("/version", Constants.MIMETYPE_TEXT); + assertEquals(200, response.getCode()); + } + + @Test + public void testSslConnectionUsingKeystoreFormatJCEKS() throws Exception { + startRESTServer("jceks"); + + Response response = sslClient.get("/version", Constants.MIMETYPE_TEXT); + assertEquals(200, response.getCode()); + } + + @Test + public void testSslConnectionUsingKeystoreFormatPKCS12() throws Exception { + startRESTServer("pkcs12"); + + Response response = sslClient.get("/version", Constants.MIMETYPE_TEXT); + assertEquals(200, response.getCode()); + } + + + + private static File initKeystoreDir() { + String dataTestDir = TEST_UTIL.getDataTestDir().toString(); + File keystoreDir = new File(dataTestDir, TestRESTServerSSL.class.getSimpleName() + "_keys"); + keystoreDir.mkdirs(); + return keystoreDir; + } + + private static void generateKeyStore(String keyStoreType, KeyPair keyPair, + X509Certificate serverCertificate) throws Exception { + String keyStorePath = getKeystoreFilePath(keyStoreType); + KeyStoreTestUtil.createKeyStore(keyStorePath, KEY_STORE_PASSWORD, KEY_STORE_PASSWORD, + "serverKS", keyPair.getPrivate(), serverCertificate, keyStoreType); + } + + private static void generateTrustStore(String trustStoreType, X509Certificate serverCertificate) + throws Exception { + String trustStorePath = getTruststoreFilePath(trustStoreType); + KeyStoreTestUtil.createTrustStore(trustStorePath, TRUST_STORE_PASSWORD, "serverTS", + serverCertificate, trustStoreType); + } + + private static String getKeystoreFilePath(String keyStoreType) { + return String.format("%s/serverKS.%s", keyDir.getAbsolutePath(), keyStoreType); + } + + private static String getTruststoreFilePath(String trustStoreType) { + return String.format("%s/serverTS.%s", keyDir.getAbsolutePath(), trustStoreType); + } + + private void startRESTServerWithDefaultKeystoreType() throws Exception { + conf.set(Constants.REST_SSL_KEYSTORE_STORE, getKeystoreFilePath("jks")); + conf.set(Constants.REST_SSL_TRUSTSTORE_STORE, getTruststoreFilePath("jks")); + + REST_TEST_UTIL.startServletContainer(conf); + Cluster localCluster = new Cluster().add("localhost", REST_TEST_UTIL.getServletPort()); + sslClient = new Client(localCluster, getTruststoreFilePath("jks"), + Optional.of(TRUST_STORE_PASSWORD), Optional.empty()); + } + + private void startRESTServer(String storeType) throws Exception { + conf.set(Constants.REST_SSL_KEYSTORE_TYPE, storeType); + conf.set(Constants.REST_SSL_KEYSTORE_STORE, getKeystoreFilePath(storeType)); + + conf.set(Constants.REST_SSL_TRUSTSTORE_STORE, getTruststoreFilePath(storeType)); + conf.set(Constants.REST_SSL_TRUSTSTORE_TYPE, storeType); + + REST_TEST_UTIL.startServletContainer(conf); + Cluster localCluster = new Cluster().add("localhost", REST_TEST_UTIL.getServletPort()); + sslClient = new Client(localCluster, getTruststoreFilePath(storeType), + Optional.of(TRUST_STORE_PASSWORD), Optional.of(storeType)); + } + +} diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestSecureRESTServer.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestSecureRESTServer.java index 01d81c2226f7..47ef0539271a 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestSecureRESTServer.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestSecureRESTServer.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hbase.rest; +import static org.apache.hadoop.hbase.rest.RESTServlet.HBASE_REST_SUPPORT_PROXYUSER; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; @@ -24,6 +25,7 @@ import com.fasterxml.jackson.jaxrs.json.JacksonJaxbJsonProvider; import java.io.File; +import java.io.IOException; import java.net.HttpURLConnection; import java.net.URL; import java.security.Principal; @@ -115,6 +117,7 @@ public class TestSecureRESTServer { private static final String HOSTNAME = "localhost"; private static final String CLIENT_PRINCIPAL = "client"; + private static final String WHEEL_PRINCIPAL = "wheel"; // The principal for accepting SPNEGO authn'ed requests (*must* be HTTP/fqdn) private static final String SPNEGO_SERVICE_PRINCIPAL = "HTTP/" + HOSTNAME; // The principal we use to connect to HBase @@ -126,6 +129,7 @@ public class TestSecureRESTServer { private static RESTServer server; private static File restServerKeytab; private static File clientKeytab; + private static File wheelKeytab; private static File serviceKeytab; @BeforeClass @@ -148,6 +152,8 @@ public static void setupServer() throws Exception { restServerKeytab = new File(keytabDir, "spnego.keytab"); // Keytab for the client clientKeytab = new File(keytabDir, CLIENT_PRINCIPAL + ".keytab"); + // Keytab for wheel + wheelKeytab = new File(keytabDir, WHEEL_PRINCIPAL + ".keytab"); /* * Update UGI @@ -159,6 +165,7 @@ public static void setupServer() throws Exception { */ KDC = TEST_UTIL.setupMiniKdc(serviceKeytab); KDC.createPrincipal(clientKeytab, CLIENT_PRINCIPAL); + KDC.createPrincipal(wheelKeytab, WHEEL_PRINCIPAL); KDC.createPrincipal(serviceKeytab, SERVICE_PRINCIPAL); // REST server's keytab contains keys for both principals REST uses KDC.createPrincipal(restServerKeytab, SPNEGO_SERVICE_PRINCIPAL, REST_SERVER_PRINCIPAL); @@ -168,7 +175,7 @@ public static void setupServer() throws Exception { HBaseKerberosUtils.setKeytabFileForTesting(serviceKeytab.getAbsolutePath()); // Why doesn't `setKeytabFileForTesting` do this? conf.set("hbase.master.keytab.file", serviceKeytab.getAbsolutePath()); - conf.set("hbase.regionserver.hostname", "localhost"); + conf.set("hbase.unsafe.regionserver.hostname", "localhost"); conf.set("hbase.master.hostname", "localhost"); HBaseKerberosUtils.setSecuredConfiguration(conf, SERVICE_PRINCIPAL+ "@" + KDC.getRealm(), SPNEGO_SERVICE_PRINCIPAL+ "@" + KDC.getRealm()); @@ -184,6 +191,8 @@ public static void setupServer() throws Exception { conf.set("hbase.superuser", "hbase"); conf.set("hadoop.proxyuser.rest.hosts", "*"); conf.set("hadoop.proxyuser.rest.users", "*"); + conf.set("hadoop.proxyuser.wheel.hosts", "*"); + conf.set("hadoop.proxyuser.wheel.users", "*"); UserGroupInformation.setConfiguration(conf); updateKerberosConfiguration(conf, REST_SERVER_PRINCIPAL, SPNEGO_SERVICE_PRINCIPAL, @@ -230,6 +239,7 @@ public Void run() throws Exception { return null; } }); + instertData(); } @AfterClass @@ -299,21 +309,21 @@ private static void updateKerberosConfiguration(Configuration conf, // Keytab for both principals above conf.set(RESTServer.REST_KEYTAB_FILE, serverKeytab.getAbsolutePath()); conf.set("hbase.rest.authentication.kerberos.keytab", serverKeytab.getAbsolutePath()); + conf.set(HBASE_REST_SUPPORT_PROXYUSER, "true"); } - @Test - public void testPositiveAuthorization() throws Exception { + private static void instertData() throws IOException, InterruptedException { // Create a table, write a row to it, grant read perms to the client UserGroupInformation superuser = UserGroupInformation.loginUserFromKeytabAndReturnUGI( - SERVICE_PRINCIPAL, serviceKeytab.getAbsolutePath()); + SERVICE_PRINCIPAL, serviceKeytab.getAbsolutePath()); final TableName table = TableName.valueOf("publicTable"); superuser.doAs(new PrivilegedExceptionAction() { @Override public Void run() throws Exception { try (Connection conn = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration())) { TableDescriptor desc = TableDescriptorBuilder.newBuilder(table) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of("f1")) - .build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of("f1")) + .build(); conn.getAdmin().createTable(desc); try (Table t = conn.getTable(table)) { Put p = new Put(Bytes.toBytes("a")); @@ -331,6 +341,12 @@ public Void run() throws Exception { return null; } }); + } + + public void testProxy(String extraArgs, String PRINCIPAL, File keytab, int responseCode) throws Exception{ + UserGroupInformation superuser = UserGroupInformation.loginUserFromKeytabAndReturnUGI( + SERVICE_PRINCIPAL, serviceKeytab.getAbsolutePath()); + final TableName table = TableName.valueOf("publicTable"); // Read that row as the client Pair pair = getClient(); @@ -338,32 +354,54 @@ public Void run() throws Exception { HttpClientContext context = pair.getSecond(); HttpGet get = new HttpGet(new URL("http://localhost:"+ REST_TEST.getServletPort()).toURI() - + "/" + table + "/a"); + + "/" + table + "/a" + extraArgs); get.addHeader("Accept", "application/json"); UserGroupInformation user = UserGroupInformation.loginUserFromKeytabAndReturnUGI( - CLIENT_PRINCIPAL, clientKeytab.getAbsolutePath()); + PRINCIPAL, keytab.getAbsolutePath()); String jsonResponse = user.doAs(new PrivilegedExceptionAction() { @Override public String run() throws Exception { try (CloseableHttpResponse response = client.execute(get, context)) { final int statusCode = response.getStatusLine().getStatusCode(); - assertEquals(response.getStatusLine().toString(), HttpURLConnection.HTTP_OK, statusCode); + assertEquals(response.getStatusLine().toString(), responseCode, statusCode); HttpEntity entity = response.getEntity(); return EntityUtils.toString(entity); } } }); - ObjectMapper mapper = new JacksonJaxbJsonProvider() - .locateMapper(CellSetModel.class, MediaType.APPLICATION_JSON_TYPE); - CellSetModel model = mapper.readValue(jsonResponse, CellSetModel.class); - assertEquals(1, model.getRows().size()); - RowModel row = model.getRows().get(0); - assertEquals("a", Bytes.toString(row.getKey())); - assertEquals(1, row.getCells().size()); - CellModel cell = row.getCells().get(0); - assertEquals("1", Bytes.toString(cell.getValue())); + if(responseCode == HttpURLConnection.HTTP_OK) { + ObjectMapper mapper = new JacksonJaxbJsonProvider().locateMapper(CellSetModel.class, MediaType.APPLICATION_JSON_TYPE); + CellSetModel model = mapper.readValue(jsonResponse, CellSetModel.class); + assertEquals(1, model.getRows().size()); + RowModel row = model.getRows().get(0); + assertEquals("a", Bytes.toString(row.getKey())); + assertEquals(1, row.getCells().size()); + CellModel cell = row.getCells().get(0); + assertEquals("1", Bytes.toString(cell.getValue())); + } } + @Test + public void testPositiveAuthorization() throws Exception { + testProxy("", CLIENT_PRINCIPAL, clientKeytab, HttpURLConnection.HTTP_OK); + } + + @Test + public void testDoAs() throws Exception { + testProxy("?doAs="+CLIENT_PRINCIPAL, WHEEL_PRINCIPAL, wheelKeytab, HttpURLConnection.HTTP_OK); + } + + @Test + public void testDoas() throws Exception { + testProxy("?doas="+CLIENT_PRINCIPAL, WHEEL_PRINCIPAL, wheelKeytab, HttpURLConnection.HTTP_OK); + } + + @Test + public void testWithoutDoAs() throws Exception { + testProxy("", WHEEL_PRINCIPAL, wheelKeytab, HttpURLConnection.HTTP_FORBIDDEN); + } + + @Test public void testNegativeAuthorization() throws Exception { Pair pair = getClient(); diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestSecurityHeadersFilter.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestSecurityHeadersFilter.java index bf0c69502d52..02611dfaf905 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestSecurityHeadersFilter.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestSecurityHeadersFilter.java @@ -18,9 +18,10 @@ package org.apache.hadoop.hbase.rest; import static org.hamcrest.CoreMatchers.not; +import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.core.Is.is; import static org.hamcrest.core.IsEqual.equalTo; -import static org.junit.Assert.assertThat; + import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.rest.client.Client; diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestTableResource.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestTableResource.java index 0bece66df380..0c83a7fdeabc 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestTableResource.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestTableResource.java @@ -261,5 +261,14 @@ public void testTableInfoPB() throws IOException, JAXBException { checkTableInfo(model); } + @Test + public void testTableNotFound() throws IOException { + String notExistTable = "notexist"; + Response response1 = client.get("/" + notExistTable + "/schema", Constants.MIMETYPE_JSON); + assertEquals(404, response1.getCode()); + Response response2 = client.get("/" + notExistTable + "/regions", Constants.MIMETYPE_XML); + assertEquals(404, response2.getCode()); + } + } diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java index 0df64eaa0b81..4ae6d243752b 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java @@ -808,7 +808,7 @@ public void coprocessorService(Class service, byte[] s } @Override - public void mutateRow(RowMutations rm) throws IOException { + public Result mutateRow(RowMutations rm) throws IOException { throw new IOException("atomicMutation not supported"); } diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/common/TaskMonitorTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/common/TaskMonitorTmpl.jamon index f700d3994732..8d090276a807 100644 --- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/common/TaskMonitorTmpl.jamon +++ b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/common/TaskMonitorTmpl.jamon @@ -105,6 +105,7 @@ String parent = ""; Description State Status + Completion Time <%for MonitoredTask task : tasks %> @@ -116,6 +117,7 @@ String parent = ""; <% task.getStatus() %> (since <% StringUtils.formatTimeDiff(now, task.getStatusTime()) %> ago) + <% task.getCompletionTimestamp() < 0 ? task.getState() : new Date(task.getCompletionTimestamp()) %> diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/AssignmentManagerStatusTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/AssignmentManagerStatusTmpl.jamon index 7ec6ef46af34..ee899a7340dc 100644 --- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/AssignmentManagerStatusTmpl.jamon +++ b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/AssignmentManagerStatusTmpl.jamon @@ -120,6 +120,7 @@ int numOfPages = (int) Math.ceil(numOfRITs * 1.0 / ritsPerPage); +

diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon index d95a35904dc7..14e82e8b970c 100644 --- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon +++ b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon @@ -270,7 +270,6 @@ AssignmentManager assignmentManager = master.getAssignmentManager();
- <& userSnapshots &>
@@ -499,11 +498,12 @@ AssignmentManager assignmentManager = master.getAssignmentManager(); if (!ritTotalNum || ritTotalNum < 1) { return; } + var ritPerPage = parseInt($("#rit_per_page").val()); $("#rit_pagination").sPage({ page:1, total:ritTotalNum, - pageSize:5, + pageSize:ritPerPage, noData: false, showPN:true, prevPage:"prev", @@ -656,40 +656,6 @@ AssignmentManager assignmentManager = master.getAssignmentManager(); -<%def userSnapshots> -<%java> - List snapshots = master.isInitialized() ? - master.getSnapshotManager().getCompletedSnapshots() : null; - -<%if (snapshots != null && snapshots.size() > 0)%> - - - - - - - - - <%for SnapshotDescription snapshotDesc : snapshots%> - <%java> - TableName snapshotTable = TableName.valueOf(snapshotDesc.getTable()); - - - - - - - - - - -

<% snapshots.size() %> snapshot(s) in set. [Snapshot Storefile stats]

-
Snapshot NameTableCreation TimeOwnerTTL
<% snapshotDesc.getName() %> <% snapshotTable.getNameAsString() %> - <% new Date(snapshotDesc.getCreationTime()) %><% snapshotDesc.getOwner() %> - <% snapshotDesc.getTtl() == 0 ? "FOREVER": PrettyPrinter.format(String.valueOf(snapshotDesc.getTtl()), PrettyPrinter.Unit.TIME_INTERVAL) %> -
- - <%def deadRegionServers> diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RegionServerListTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RegionServerListTmpl.jamon index a3067eeaf8ad..58783611180e 100644 --- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RegionServerListTmpl.jamon +++ b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RegionServerListTmpl.jamon @@ -121,7 +121,7 @@ Arrays.sort(serverNames); <& serverNameLink; serverName=serverName; &> - <% new Date(startcode) %> + <% java.time.Instant.ofEpochMilli(startcode) %> <% TraditionalBinaryPrefix.long2String(lastContact, "s", 1) %> <% version %> <% String.format("%,.0f", requestsPerSecond) %> diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ExecutorStatusChore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ExecutorStatusChore.java index da03eba0351a..4b4aef30bbc5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ExecutorStatusChore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ExecutorStatusChore.java @@ -29,7 +29,6 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; /** * The Class ExecutorStatusChore for collect Executor status info periodically @@ -77,7 +76,6 @@ protected void chore() { } } - @VisibleForTesting public Pair getExecutorStatus(String poolName) { MutableGaugeLong running = metricsRegistry.getGauge(poolName + "_running", 0L); MutableGaugeLong queued = metricsRegistry.getGauge(poolName + "_queued", 0L); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/YouAreDeadException.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/YouAreDeadException.java similarity index 90% rename from hbase-client/src/main/java/org/apache/hadoop/hbase/YouAreDeadException.java rename to hbase-server/src/main/java/org/apache/hadoop/hbase/YouAreDeadException.java index 0c44b9a2cc42..6ba719a4acb1 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/YouAreDeadException.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/YouAreDeadException.java @@ -24,9 +24,9 @@ import org.apache.yetus.audience.InterfaceStability; /** - * This exception is thrown by the master when a region server reports and is - * already being processed as dead. This can happen when a region server loses - * its session but didn't figure it yet. + * This exception is thrown by the master when a region server reports and is already being + * processed as dead. This can happen when a region server loses its session but didn't figure it + * yet. */ @SuppressWarnings("serial") @InterfaceAudience.Private diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ZNodeClearer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ZNodeClearer.java index 697706507bf0..1cde2fa24844 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ZNodeClearer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ZNodeClearer.java @@ -146,9 +146,11 @@ public static String parseMasterServerName(String rsZnodePath) { } /** - * * @return true if cluster is configured with master-rs collocation + * @deprecated since 2.4.0, will be removed in 3.0.0. + * @see HBASE-15549 */ + @Deprecated private static boolean tablesOnMaster(Configuration conf) { boolean tablesOnMaster = true; String confValue = conf.get(BaseLoadBalancer.TABLES_ON_MASTER); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionReplicaReplayRetryingCaller.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionReplicaReplayRetryingCaller.java index 91d950265db8..0146c8b94d2a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionReplicaReplayRetryingCaller.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionReplicaReplayRetryingCaller.java @@ -24,7 +24,7 @@ import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.protobuf.ReplicationProtbufUtil; +import org.apache.hadoop.hbase.protobuf.ReplicationProtobufUtil; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.wal.WAL.Entry; @@ -104,7 +104,7 @@ private void call(HRegionLocation loc) { err -> conn.getLocator().updateCachedLocationOnError(loc, err)); return; } - Pair p = ReplicationProtbufUtil + Pair p = ReplicationProtobufUtil .buildReplicateWALEntryRequest(entries, encodedRegionName, null, null, null); resetCallTimeout(); controller.setCellScanner(p.getSecond()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/locking/EntityLock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/locking/EntityLock.java index e49fe8c8c931..e27574a0f924 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/locking/EntityLock.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/locking/EntityLock.java @@ -35,8 +35,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockService; import org.apache.hadoop.hbase.util.Threads; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; - /** * Lock for HBase Entity either a Table, a Namespace, or Regions. * @@ -145,12 +143,12 @@ public String toString() { return sb.toString(); } - @VisibleForTesting + @InterfaceAudience.Private void setTestingSleepTime(long timeInMillis) { testingSleepTime = timeInMillis; } - @VisibleForTesting + @InterfaceAudience.Private LockHeartbeatWorker getWorker() { return worker; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/locking/LockServiceClient.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/locking/LockServiceClient.java index 4229a7aa6fc3..24f2835af8b2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/locking/LockServiceClient.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/locking/LockServiceClient.java @@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.client.locking; import java.util.List; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.HBaseInterfaceAudience; @@ -30,7 +29,6 @@ import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockService; @@ -89,7 +87,7 @@ public EntityLock regionLock(List regionInfos, String description, A return new EntityLock(conf, stub, lockRequest, abort); } - @VisibleForTesting + @InterfaceAudience.Private public static LockRequest buildLockRequest(final LockType type, final String namespace, final TableName tableName, final List regionInfos, final String description, final long nonceGroup, final long nonce) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/codec/MessageCodec.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/codec/MessageCodec.java index 4b266e2bda7a..ddbbb5fc8bdc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/codec/MessageCodec.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/codec/MessageCodec.java @@ -74,7 +74,7 @@ static class MessageDecoder extends BaseDecoder { @Override protected Cell parseCell() throws IOException { - return ProtobufUtil.toCell(cellBuilder, CellProtos.Cell.parseDelimitedFrom(this.in)); + return ProtobufUtil.toCell(cellBuilder, CellProtos.Cell.parseDelimitedFrom(this.in), false); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/ConstraintProcessor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/ConstraintProcessor.java index 6aa5d977b678..b0a04c5044ac 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/ConstraintProcessor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/ConstraintProcessor.java @@ -22,20 +22,19 @@ import java.util.List; import java.util.Optional; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CoprocessorEnvironment; -import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Durability; +import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.coprocessor.ObserverContext; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.RegionObserver; -import org.apache.hadoop.hbase.regionserver.InternalScanner; import org.apache.hadoop.hbase.wal.WALEdit; +import org.apache.yetus.audience.InterfaceAudience; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /*** * Processes multiple {@link Constraint Constraints} on a given table. @@ -98,11 +97,4 @@ public void prePut(ObserverContext e, Put put, } // if we made it here, then the Put is valid } - - @Override - public boolean postScannerFilterRow(final ObserverContext e, - final InternalScanner s, final Cell curRowCell, final boolean hasMore) throws IOException { - // 'default' in RegionObserver might do unnecessary copy for Off heap backed Cells. - return hasMore; - } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogManagerCoordination.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogManagerCoordination.java index 33d8f2ca779d..9508321a625a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogManagerCoordination.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogManagerCoordination.java @@ -29,8 +29,6 @@ import org.apache.hadoop.hbase.master.SplitLogManager.Task; import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; - /** * Coordination for SplitLogManager. It creates and works with tasks for split log operations
* Manager prepares task by calling {@link #prepareTask} and submit it by @@ -147,6 +145,5 @@ public ServerName getServerName() { * Support method to init constants such as timeout. Mostly required for UTs. * @throws IOException */ - @VisibleForTesting void init() throws IOException; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogWorkerCoordination.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogWorkerCoordination.java index a9fae4640d5a..5452578a2c26 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogWorkerCoordination.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogWorkerCoordination.java @@ -25,7 +25,6 @@ import org.apache.hadoop.hbase.regionserver.SplitLogWorker; import org.apache.hadoop.hbase.regionserver.SplitLogWorker.TaskExecutor; import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; /** * Coordinated operations for {@link SplitLogWorker} and @@ -94,7 +93,6 @@ void init(RegionServerServices server, Configuration conf, * Used by unit tests to check how many tasks were processed * @return number of tasks */ - @VisibleForTesting int getTaskReadySeq(); /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java index 8fc351fb7751..dee94be9fad3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java @@ -58,7 +58,6 @@ import org.apache.zookeeper.data.Stat; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; /** * ZooKeeper based implementation of @@ -742,7 +741,6 @@ public SplitLogManagerDetails getDetails() { /** * Temporary function that is used by unit tests only */ - @VisibleForTesting public void setIgnoreDeleteForTesting(boolean b) { ignoreZKDeleteForTesting = b; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java index 7e0f2ae2b862..442507630056 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java @@ -44,7 +44,6 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.ipc.RpcServer; import org.apache.hadoop.hbase.security.User; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.hbase.util.CoprocessorClassLoader; import org.apache.hadoop.hbase.util.SortedList; @@ -253,7 +252,6 @@ public E load(Path path, String className, int priority, } } - @VisibleForTesting public void load(Class implClass, int priority, Configuration conf) throws IOException { E env = checkAndLoadInstance(implClass, priority, conf); @@ -325,7 +323,6 @@ public C findCoprocessor(String className) { return null; } - @VisibleForTesting public T findCoprocessor(Class cls) { for (E env: coprocEnvironments) { if (cls.isAssignableFrom(env.getInstance().getClass())) { @@ -360,7 +357,6 @@ public List findCoprocessors(Class cls) { * @param className the class name * @return the coprocessor, or null if not found */ - @VisibleForTesting public E findCoprocessorEnvironment(String className) { for (E env: coprocEnvironments) { if (env.getInstance().getClass().getName().equals(className) || diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MetricsCoprocessor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MetricsCoprocessor.java index 8c1f9e3c30ee..a77a0fe31f0c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MetricsCoprocessor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MetricsCoprocessor.java @@ -25,8 +25,6 @@ import org.apache.hadoop.hbase.metrics.MetricRegistry; import org.apache.hadoop.hbase.metrics.MetricRegistryInfo; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; - /** * Utility class for tracking metrics for various types of coprocessors. Each coprocessor instance * creates its own MetricRegistry which is exported as an individual MetricSource. MetricRegistries @@ -75,7 +73,6 @@ private static String suffix(String metricName, String cpName) { .toString(); } - @VisibleForTesting static MetricRegistryInfo createRegistryInfoForMasterCoprocessor(String clazz) { return new MetricRegistryInfo( suffix(MASTER_COPROC_METRICS_NAME, clazz), @@ -88,7 +85,6 @@ public static MetricRegistry createRegistryForMasterCoprocessor(String clazz) { return MetricRegistries.global().create(createRegistryInfoForMasterCoprocessor(clazz)); } - @VisibleForTesting static MetricRegistryInfo createRegistryInfoForRSCoprocessor(String clazz) { return new MetricRegistryInfo( suffix(RS_COPROC_METRICS_NAME, clazz), @@ -101,7 +97,6 @@ public static MetricRegistry createRegistryForRSCoprocessor(String clazz) { return MetricRegistries.global().create(createRegistryInfoForRSCoprocessor(clazz)); } - @VisibleForTesting public static MetricRegistryInfo createRegistryInfoForRegionCoprocessor(String clazz) { return new MetricRegistryInfo( suffix(REGION_COPROC_METRICS_NAME, clazz), @@ -114,7 +109,6 @@ public static MetricRegistry createRegistryForRegionCoprocessor(String clazz) { return MetricRegistries.global().create(createRegistryInfoForRegionCoprocessor(clazz)); } - @VisibleForTesting public static MetricRegistryInfo createRegistryInfoForWALCoprocessor(String clazz) { return new MetricRegistryInfo( suffix(WAL_COPROC_METRICS_NAME, clazz), diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/ObserverContextImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/ObserverContextImpl.java index fdeca130600d..9a23ffaa4a87 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/ObserverContextImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/ObserverContextImpl.java @@ -23,8 +23,6 @@ import org.apache.hadoop.hbase.security.User; import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; - /** * This is the only implementation of {@link ObserverContext}, which serves as the interface for * third-party Coprocessor developers. @@ -98,7 +96,6 @@ public Optional getCaller() { * @return An instance of ObserverContext with the environment set */ @Deprecated - @VisibleForTesting // TODO: Remove this method, ObserverContext should not depend on RpcServer public static ObserverContext createAndPrepare(E env) { ObserverContextImpl ctx = new ObserverContextImpl<>(RpcServer.getRequestUser().orElse(null)); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java index 3f1c6dc7fcee..9eac46f5ea37 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java @@ -919,7 +919,7 @@ default CheckAndMutateResult preCheckAndMutateAfterRowLock( /** * Called after checkAndMutate *

- * Note: Do not retain references to any Cells in 'delete' beyond the life of this invocation. + * Note: Do not retain references to any Cells in actions beyond the life of this invocation. * If need a Cell reference for later use, copy the cell and use that. * @param c the environment provided by the region server * @param checkAndMutate the CheckAndMutate object diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventType.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventType.java index a67447940b9d..600c96cc0267 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventType.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventType.java @@ -152,7 +152,7 @@ public enum EventType { * C_M_MERGE_REGION
* Client asking Master to merge regions. */ - C_M_MERGE_REGION (30, ExecutorType.MASTER_TABLE_OPERATIONS), + C_M_MERGE_REGION (30, ExecutorType.MASTER_MERGE_OPERATIONS), /** * Messages originating from Client to Master.
* C_M_DELETE_TABLE
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorService.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorService.java index c12c30aad67f..8d2d7e2afb64 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorService.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorService.java @@ -32,13 +32,11 @@ import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; - import org.apache.hadoop.hbase.monitoring.ThreadMonitoring; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.com.google.common.collect.Lists; import org.apache.hbase.thirdparty.com.google.common.collect.Maps; import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ListenableFuture; @@ -85,7 +83,6 @@ public ExecutorService(final String servername) { * started with the same name, this throws a RuntimeException. * @param name Name of the service to start. */ - @VisibleForTesting public void startExecutorService(String name, int maxThreads) { Executor hbes = this.executorMap.compute(name, (key, value) -> { if (value != null) { @@ -126,7 +123,6 @@ Executor getExecutor(String name) { return executor; } - @VisibleForTesting public ThreadPoolExecutor getExecutorThreadPool(final ExecutorType type) { return getExecutor(type).getThreadPoolExecutor(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorType.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorType.java index d06bd54484d7..36958c518a68 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorType.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorType.java @@ -35,6 +35,7 @@ public enum ExecutorType { MASTER_META_SERVER_OPERATIONS (6), M_LOG_REPLAY_OPS (7), MASTER_SNAPSHOT_OPERATIONS (8), + MASTER_MERGE_OPERATIONS (9), // RegionServer executor services RS_OPEN_REGION (20), diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeLoadBalancer.java index 8cde76e07c60..60a2c6cae13f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeLoadBalancer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeLoadBalancer.java @@ -22,12 +22,14 @@ import static org.apache.hadoop.hbase.favored.FavoredNodesPlan.Position.SECONDARY; import static org.apache.hadoop.hbase.favored.FavoredNodesPlan.Position.TERTIARY; +import edu.umd.cs.findbugs.annotations.NonNull; import java.io.IOException; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Set; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseIOException; import org.apache.hadoop.hbase.HBaseInterfaceAudience; @@ -161,6 +163,7 @@ public List balanceTable(TableName tableName, } @Override + @NonNull public Map> roundRobinAssignment(List regions, List servers) throws HBaseIOException { Map> assignmentMap; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodesManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodesManager.java index a34e3a3183ad..16f0934a2913 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodesManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodesManager.java @@ -22,6 +22,7 @@ import static org.apache.hadoop.hbase.favored.FavoredNodesPlan.Position.PRIMARY; import static org.apache.hadoop.hbase.favored.FavoredNodesPlan.Position.SECONDARY; import static org.apache.hadoop.hbase.favored.FavoredNodesPlan.Position.TERTIARY; + import java.io.IOException; import java.util.ArrayList; import java.util.Collection; @@ -42,7 +43,7 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; + import org.apache.hbase.thirdparty.com.google.common.collect.Lists; import org.apache.hbase.thirdparty.com.google.common.collect.Maps; import org.apache.hbase.thirdparty.com.google.common.collect.Sets; @@ -95,7 +96,6 @@ public synchronized void initialize(SnapshotOfRegionAssignmentFromMeta snapshot) datanodeDataTransferPort= getDataNodePort(); } - @VisibleForTesting public int getDataNodePort() { HdfsConfiguration.init(); @@ -282,7 +282,6 @@ public synchronized void deleteFavoredNodesForRegions(Collection reg } } - @VisibleForTesting public synchronized Set getRegionsOfFavoredNode(ServerName serverName) { Set regionInfos = Sets.newHashSet(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HFileSystem.java index 2be48b4aba20..17b04ab1a0bd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HFileSystem.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HFileSystem.java @@ -82,11 +82,15 @@ public HFileSystem(Configuration conf, boolean useHBaseChecksum) // Create the default filesystem with checksum verification switched on. // By default, any operation to this FilterFileSystem occurs on // the underlying filesystem that has checksums switched on. - this.fs = FileSystem.get(conf); + // This FS#get(URI, conf) clearly indicates in the javadoc that if the FS is + // not created it will initialize the FS and return that created FS. If it is + // already created it will just return the FS that was already created. + // We take pains to funnel all of our FileSystem instantiation through this call to ensure + // we never need to call FS.initialize ourself so that we do not have to track any state to + // avoid calling initialize more than once. + this.fs = FileSystem.get(getDefaultUri(conf), conf); this.useHBaseChecksum = useHBaseChecksum; - fs.initialize(getDefaultUri(conf), conf); - // disable checksum verification for local fileSystem, see HBASE-11218 if (fs instanceof LocalFileSystem) { fs.setWriteChecksum(false); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FSDataInputStreamWrapper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FSDataInputStreamWrapper.java index c1f9a7ddaccf..5bbc525b8459 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FSDataInputStreamWrapper.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FSDataInputStreamWrapper.java @@ -21,20 +21,17 @@ import java.io.IOException; import java.io.InputStream; import java.util.concurrent.atomic.AtomicInteger; - -import org.apache.commons.io.IOUtils; import org.apache.hadoop.fs.CanUnbuffer; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.fs.HFileSystem; -import org.apache.hadoop.hdfs.DFSInputStream; import org.apache.hadoop.hdfs.client.HdfsDataInputStream; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hbase.thirdparty.com.google.common.io.Closeables; /** * Wrapper for input stream(s) that takes care of the interaction of FS and HBase checksums, @@ -173,13 +170,11 @@ public void prepareForBlockReader(boolean forceNoHBaseChecksum) throws IOExcepti } /** For use in tests. */ - @VisibleForTesting public FSDataInputStreamWrapper(FSDataInputStream fsdis) { this(fsdis, fsdis); } /** For use in tests. */ - @VisibleForTesting public FSDataInputStreamWrapper(FSDataInputStream fsdis, FSDataInputStream noChecksum) { doCloseStreams = false; stream = fsdis; @@ -294,11 +289,11 @@ public void close() { } updateInputStreamStatistics(this.streamNoFsChecksum); // we do not care about the close exception as it is for reading, no data loss issue. - IOUtils.closeQuietly(streamNoFsChecksum); + Closeables.closeQuietly(streamNoFsChecksum); updateInputStreamStatistics(stream); - IOUtils.closeQuietly(stream); + Closeables.closeQuietly(stream); } public HFileSystem getHfs() { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/MetricsIO.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/MetricsIO.java index 3634ccb595ff..72da73e1e920 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/MetricsIO.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/MetricsIO.java @@ -22,8 +22,6 @@ import org.apache.hadoop.hbase.regionserver.MetricsRegionServerSourceFactory; import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; - @InterfaceAudience.Private public class MetricsIO { @@ -40,12 +38,10 @@ public MetricsIO(MetricsIOWrapper wrapper) { this.wrapper = wrapper; } - @VisibleForTesting public MetricsIOSource getMetricsSource() { return source; } - @VisibleForTesting public MetricsIOWrapper getWrapper() { return wrapper; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AdaptiveLruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AdaptiveLruBlockCache.java new file mode 100644 index 000000000000..329d80b67a95 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AdaptiveLruBlockCache.java @@ -0,0 +1,1446 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.io.hfile; + +import static java.util.Objects.requireNonNull; + +import java.lang.ref.WeakReference; +import java.util.EnumMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.PriorityQueue; +import java.util.SortedSet; +import java.util.TreeSet; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.LongAdder; +import java.util.concurrent.locks.ReentrantLock; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.io.HeapSize; +import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; +import org.apache.hadoop.hbase.util.ClassSize; +import org.apache.hadoop.util.StringUtils; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hbase.thirdparty.com.google.common.base.MoreObjects; +import org.apache.hbase.thirdparty.com.google.common.base.Objects; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; + +/** + * This realisation improve performance of classical LRU + * cache up to 3 times via reduce GC job. + *

+ * The classical block cache implementation that is memory-aware using {@link HeapSize}, + * memory-bound using an LRU eviction algorithm, and concurrent: backed by + * a {@link ConcurrentHashMap} and with a non-blocking eviction thread giving + * constant-time {@link #cacheBlock} and {@link #getBlock} operations. + *

+ * Contains three levels of block priority to allow for scan-resistance and in-memory families + * {@link org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder#setInMemory(boolean)} (An + * in-memory column family is a column family that should be served from memory if possible): + * single-access, multiple-accesses, and in-memory priority. A block is added with an in-memory + * priority flag if {@link org.apache.hadoop.hbase.client.ColumnFamilyDescriptor#isInMemory()}, + * otherwise a block becomes a single access priority the first time it is read into this block + * cache. If a block is accessed again while in cache, it is marked as a multiple access priority + * block. This delineation of blocks is used to prevent scans from thrashing the cache adding a + * least-frequently-used element to the eviction algorithm. + *

+ * Each priority is given its own chunk of the total cache to ensure fairness during eviction. Each + * priority will retain close to its maximum size, however, if any priority is not using its entire + * chunk the others are able to grow beyond their chunk size. + *

+ * Instantiated at a minimum with the total size and average block size. All sizes are in bytes. The + * block size is not especially important as this cache is fully dynamic in its sizing of blocks. It + * is only used for pre-allocating data structures and in initial heap estimation of the map. + *

+ * The detailed constructor defines the sizes for the three priorities (they should total to the + * maximum size defined). It also sets the levels that trigger and control the eviction + * thread. + *

+ * The acceptable size is the cache size level which triggers the eviction process to + * start. It evicts enough blocks to get the size below the minimum size specified. + *

+ * Eviction happens in a separate thread and involves a single full-scan of the map. It determines + * how many bytes must be freed to reach the minimum size, and then while scanning determines the + * fewest least-recently-used blocks necessary from each of the three priorities (would be 3 times + * bytes to free). It then uses the priority chunk sizes to evict fairly according to the relative + * sizes and usage. + *

+ * Adaptive LRU cache lets speed up performance while we are reading much more data than can fit + * into BlockCache and it is the cause of a high rate of evictions. This in turn leads to heavy + * Garbage Collector works. So a lot of blocks put into BlockCache but never read, but spending + * a lot of CPU resources for cleaning. We could avoid this situation via parameters: + *

+ * hbase.lru.cache.heavy.eviction.count.limit - set how many times we have to run the + * eviction process that starts to avoid putting data to BlockCache. By default it is 0 and it + * meats the feature will start at the beginning. But if we have some times short reading the same + * data and some times long-term reading - we can divide it by this parameter. For example we know + * that our short reading used to be about 1 minutes, then we have to set the parameter about 10 + * and it will enable the feature only for long time massive reading (after ~100 seconds). So when + * we use short-reading and want all of them in the cache we will have it (except for eviction of + * course). When we use long-term heavy reading the feature will be enabled after some time and + * bring better performance. + *

+ * hbase.lru.cache.heavy.eviction.mb.size.limit - set how many bytes in 10 seconds desirable + * putting into BlockCache (and evicted from it). The feature will try to reach this value and + * maintain it. Don't try to set it too small because it leads to premature exit from this mode. + * For powerful CPUs (about 20-40 physical cores) it could be about 400-500 MB. Average system + * (~10 cores) 200-300 MB. Some weak systems (2-5 cores) may be good with 50-100 MB. + * How it works: we set the limit and after each ~10 second calculate how many bytes were freed. + * Overhead = Freed Bytes Sum (MB) * 100 / Limit (MB) - 100; + * For example we set the limit = 500 and were evicted 2000 MB. Overhead is: + * 2000 * 100 / 500 - 100 = 300% + * The feature is going to reduce a percent caching data blocks and fit evicted bytes closer to + * 100% (500 MB). Some kind of an auto-scaling. + * If freed bytes less then the limit we have got negative overhead. + * For example if were freed 200 MB: + * 200 * 100 / 500 - 100 = -60% + * The feature will increase the percent of caching blocks. + * That leads to fit evicted bytes closer to 100% (500 MB). + * The current situation we can find out in the log of RegionServer: + * BlockCache evicted (MB): 0, overhead (%): -100, heavy eviction counter: 0, current caching + * DataBlock (%): 100 < no eviction, 100% blocks is caching + * BlockCache evicted (MB): 2000, overhead (%): 300, heavy eviction counter: 1, current caching + * DataBlock (%): 97 < eviction begin, reduce of caching blocks by 3%. + * It help to tune your system and find out what value is better set. Don't try to reach 0% + * overhead, it is impossible. Quite good 50-100% overhead, + * it prevents premature exit from this mode. + *

+ * hbase.lru.cache.heavy.eviction.overhead.coefficient - set how fast we want to get the + * result. If we know that our reading is heavy for a long time, we don't want to wait and can + * increase the coefficient and get good performance sooner. But if we aren't sure we can do it + * slowly and it could prevent premature exit from this mode. So, when the coefficient is higher + * we can get better performance when heavy reading is stable. But when reading is changing we + * can adjust to it and set the coefficient to lower value. + * For example, we set the coefficient = 0.01. It means the overhead (see above) will be + * multiplied by 0.01 and the result is the value of reducing percent caching blocks. For example, +<<<<<<< HEAD + * if the overhead = 300% and the coefficient = 0.01, * then percent of caching blocks will + * reduce by 3%. +======= + * if the overhead = 300% and the coefficient = 0.01, + * then percent of caching blocks will reduce by 3%. +>>>>>>> Added AdaptiveLruBlockCache + * Similar logic when overhead has got negative value (overshooting). Maybe it is just short-term + * fluctuation and we will try to stay in this mode. It helps avoid premature exit during + * short-term fluctuation. Backpressure has simple logic: more overshooting - more caching blocks. + *

+ * Find more information about improvement: https://issues.apache.org/jira/browse/HBASE-23887 + * + */ +@InterfaceAudience.Private +public class AdaptiveLruBlockCache implements FirstLevelBlockCache { + + private static final Logger LOG = LoggerFactory.getLogger(AdaptiveLruBlockCache.class); + + /** + * Percentage of total size that eviction will evict until; e.g. if set to .8, then we will keep + * evicting during an eviction run till the cache size is down to 80% of the total. + */ + private static final String LRU_MIN_FACTOR_CONFIG_NAME = "hbase.lru.blockcache.min.factor"; + + /** + * Acceptable size of cache (no evictions if size < acceptable) + */ + private static final String LRU_ACCEPTABLE_FACTOR_CONFIG_NAME = + "hbase.lru.blockcache.acceptable.factor"; + + /** + * Hard capacity limit of cache, will reject any put if size > this * acceptable + */ + static final String LRU_HARD_CAPACITY_LIMIT_FACTOR_CONFIG_NAME = + "hbase.lru.blockcache.hard.capacity.limit.factor"; + private static final String LRU_SINGLE_PERCENTAGE_CONFIG_NAME = + "hbase.lru.blockcache.single.percentage"; + private static final String LRU_MULTI_PERCENTAGE_CONFIG_NAME = + "hbase.lru.blockcache.multi.percentage"; + private static final String LRU_MEMORY_PERCENTAGE_CONFIG_NAME = + "hbase.lru.blockcache.memory.percentage"; + + /** + * Configuration key to force data-block always (except in-memory are too much) + * cached in memory for in-memory hfile, unlike inMemory, which is a column-family + * configuration, inMemoryForceMode is a cluster-wide configuration + */ + private static final String LRU_IN_MEMORY_FORCE_MODE_CONFIG_NAME = + "hbase.lru.rs.inmemoryforcemode"; + + /* Default Configuration Parameters*/ + + /* Backing Concurrent Map Configuration */ + static final float DEFAULT_LOAD_FACTOR = 0.75f; + static final int DEFAULT_CONCURRENCY_LEVEL = 16; + + /* Eviction thresholds */ + private static final float DEFAULT_MIN_FACTOR = 0.95f; + static final float DEFAULT_ACCEPTABLE_FACTOR = 0.99f; + + /* Priority buckets */ + private static final float DEFAULT_SINGLE_FACTOR = 0.25f; + private static final float DEFAULT_MULTI_FACTOR = 0.50f; + private static final float DEFAULT_MEMORY_FACTOR = 0.25f; + + private static final float DEFAULT_HARD_CAPACITY_LIMIT_FACTOR = 1.2f; + + private static final boolean DEFAULT_IN_MEMORY_FORCE_MODE = false; + + /* Statistics thread */ + private static final int STAT_THREAD_PERIOD = 60 * 5; + private static final String LRU_MAX_BLOCK_SIZE = "hbase.lru.max.block.size"; + private static final long DEFAULT_MAX_BLOCK_SIZE = 16L * 1024L * 1024L; + + private static final String LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT + = "hbase.lru.cache.heavy.eviction.count.limit"; + // Default value actually equal to disable feature of increasing performance. + // Because 2147483647 is about ~680 years (after that it will start to work) + // We can set it to 0-10 and get the profit right now. + // (see details https://issues.apache.org/jira/browse/HBASE-23887). + private static final int DEFAULT_LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT = Integer.MAX_VALUE; + + private static final String LRU_CACHE_HEAVY_EVICTION_MB_SIZE_LIMIT + = "hbase.lru.cache.heavy.eviction.mb.size.limit"; + private static final long DEFAULT_LRU_CACHE_HEAVY_EVICTION_MB_SIZE_LIMIT = 500; + + private static final String LRU_CACHE_HEAVY_EVICTION_OVERHEAD_COEFFICIENT + = "hbase.lru.cache.heavy.eviction.overhead.coefficient"; + private static final float DEFAULT_LRU_CACHE_HEAVY_EVICTION_OVERHEAD_COEFFICIENT = 0.01f; + + /** + * Defined the cache map as {@link ConcurrentHashMap} here, because in + * {@link AdaptiveLruBlockCache#getBlock}, we need to guarantee the atomicity of map#computeIfPresent + * (key, func). Besides, the func method must execute exactly once only when the key is present + * and under the lock context, otherwise the reference count will be messed up. Notice that the + * {@link java.util.concurrent.ConcurrentSkipListMap} can not guarantee that. + */ + private transient final ConcurrentHashMap map; + + /** Eviction lock (locked when eviction in process) */ + private transient final ReentrantLock evictionLock = new ReentrantLock(true); + + private final long maxBlockSize; + + /** Volatile boolean to track if we are in an eviction process or not */ + private volatile boolean evictionInProgress = false; + + /** Eviction thread */ + private transient final EvictionThread evictionThread; + + /** Statistics thread schedule pool (for heavy debugging, could remove) */ + private transient final ScheduledExecutorService scheduleThreadPool = + Executors.newScheduledThreadPool(1, new ThreadFactoryBuilder() + .setNameFormat("AdaptiveLruBlockCacheStatsExecutor").setDaemon(true).build()); + + /** Current size of cache */ + private final AtomicLong size; + + /** Current size of data blocks */ + private final LongAdder dataBlockSize; + + /** Current number of cached elements */ + private final AtomicLong elements; + + /** Current number of cached data block elements */ + private final LongAdder dataBlockElements; + + /** Cache access count (sequential ID) */ + private final AtomicLong count; + + /** hard capacity limit */ + private float hardCapacityLimitFactor; + + /** Cache statistics */ + private final CacheStats stats; + + /** Maximum allowable size of cache (block put if size > max, evict) */ + private long maxSize; + + /** Approximate block size */ + private long blockSize; + + /** Acceptable size of cache (no evictions if size < acceptable) */ + private float acceptableFactor; + + /** Minimum threshold of cache (when evicting, evict until size < min) */ + private float minFactor; + + /** Single access bucket size */ + private float singleFactor; + + /** Multiple access bucket size */ + private float multiFactor; + + /** In-memory bucket size */ + private float memoryFactor; + + /** Overhead of the structure itself */ + private long overhead; + + /** Whether in-memory hfile's data block has higher priority when evicting */ + private boolean forceInMemory; + + /** + * Where to send victims (blocks evicted/missing from the cache). + * This is used only when we use an external cache as L2. + * Note: See org.apache.hadoop.hbase.io.hfile.MemcachedBlockCache + */ + private transient BlockCache victimHandler = null; + + /** Percent of cached data blocks */ + private volatile int cacheDataBlockPercent; + + /** Limit of count eviction process when start to avoid to cache blocks */ + private final int heavyEvictionCountLimit; + + /** Limit of volume eviction process when start to avoid to cache blocks */ + private final long heavyEvictionMbSizeLimit; + + /** Adjust auto-scaling via overhead of evition rate */ + private final float heavyEvictionOverheadCoefficient; + + /** + * Default constructor. Specify maximum size and expected average block + * size (approximation is fine). + * + *

All other factors will be calculated based on defaults specified in + * this class. + * + * @param maxSize maximum size of cache, in bytes + * @param blockSize approximate size of each block, in bytes + */ + public AdaptiveLruBlockCache(long maxSize, long blockSize) { + this(maxSize, blockSize,true); + } + + /** + * Constructor used for testing. Allows disabling of the eviction thread. + */ + public AdaptiveLruBlockCache(long maxSize, long blockSize, boolean evictionThread) { + this(maxSize, blockSize, evictionThread, + (int) Math.ceil(1.2 * maxSize / blockSize), + DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL, + DEFAULT_MIN_FACTOR, DEFAULT_ACCEPTABLE_FACTOR, + DEFAULT_SINGLE_FACTOR, + DEFAULT_MULTI_FACTOR, + DEFAULT_MEMORY_FACTOR, + DEFAULT_HARD_CAPACITY_LIMIT_FACTOR, + false, + DEFAULT_MAX_BLOCK_SIZE, + DEFAULT_LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT, + DEFAULT_LRU_CACHE_HEAVY_EVICTION_MB_SIZE_LIMIT, + DEFAULT_LRU_CACHE_HEAVY_EVICTION_OVERHEAD_COEFFICIENT); + } + + public AdaptiveLruBlockCache(long maxSize, long blockSize, + boolean evictionThread, Configuration conf) { + this(maxSize, blockSize, evictionThread, + (int) Math.ceil(1.2 * maxSize / blockSize), + DEFAULT_LOAD_FACTOR, + DEFAULT_CONCURRENCY_LEVEL, + conf.getFloat(LRU_MIN_FACTOR_CONFIG_NAME, DEFAULT_MIN_FACTOR), + conf.getFloat(LRU_ACCEPTABLE_FACTOR_CONFIG_NAME, DEFAULT_ACCEPTABLE_FACTOR), + conf.getFloat(LRU_SINGLE_PERCENTAGE_CONFIG_NAME, DEFAULT_SINGLE_FACTOR), + conf.getFloat(LRU_MULTI_PERCENTAGE_CONFIG_NAME, DEFAULT_MULTI_FACTOR), + conf.getFloat(LRU_MEMORY_PERCENTAGE_CONFIG_NAME, DEFAULT_MEMORY_FACTOR), + conf.getFloat(LRU_HARD_CAPACITY_LIMIT_FACTOR_CONFIG_NAME, + DEFAULT_HARD_CAPACITY_LIMIT_FACTOR), + conf.getBoolean(LRU_IN_MEMORY_FORCE_MODE_CONFIG_NAME, DEFAULT_IN_MEMORY_FORCE_MODE), + conf.getLong(LRU_MAX_BLOCK_SIZE, DEFAULT_MAX_BLOCK_SIZE), + conf.getInt(LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT, + DEFAULT_LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT), + conf.getLong(LRU_CACHE_HEAVY_EVICTION_MB_SIZE_LIMIT, + DEFAULT_LRU_CACHE_HEAVY_EVICTION_MB_SIZE_LIMIT), + conf.getFloat(LRU_CACHE_HEAVY_EVICTION_OVERHEAD_COEFFICIENT, + DEFAULT_LRU_CACHE_HEAVY_EVICTION_OVERHEAD_COEFFICIENT)); + } + + public AdaptiveLruBlockCache(long maxSize, long blockSize, Configuration conf) { + this(maxSize, blockSize, true, conf); + } + + /** + * Configurable constructor. Use this constructor if not using defaults. + * + * @param maxSize maximum size of this cache, in bytes + * @param blockSize expected average size of blocks, in bytes + * @param evictionThread whether to run evictions in a bg thread or not + * @param mapInitialSize initial size of backing ConcurrentHashMap + * @param mapLoadFactor initial load factor of backing ConcurrentHashMap + * @param mapConcurrencyLevel initial concurrency factor for backing CHM + * @param minFactor percentage of total size that eviction will evict until + * @param acceptableFactor percentage of total size that triggers eviction + * @param singleFactor percentage of total size for single-access blocks + * @param multiFactor percentage of total size for multiple-access blocks + * @param memoryFactor percentage of total size for in-memory blocks + */ + public AdaptiveLruBlockCache(long maxSize, long blockSize, boolean evictionThread, + int mapInitialSize, float mapLoadFactor, int mapConcurrencyLevel, + float minFactor, float acceptableFactor, float singleFactor, + float multiFactor, float memoryFactor, float hardLimitFactor, + boolean forceInMemory, long maxBlockSize, + int heavyEvictionCountLimit, long heavyEvictionMbSizeLimit, + float heavyEvictionOverheadCoefficient) { + this.maxBlockSize = maxBlockSize; + if(singleFactor + multiFactor + memoryFactor != 1 || + singleFactor < 0 || multiFactor < 0 || memoryFactor < 0) { + throw new IllegalArgumentException("Single, multi, and memory factors " + + " should be non-negative and total 1.0"); + } + if (minFactor >= acceptableFactor) { + throw new IllegalArgumentException("minFactor must be smaller than acceptableFactor"); + } + if (minFactor >= 1.0f || acceptableFactor >= 1.0f) { + throw new IllegalArgumentException("all factors must be < 1"); + } + this.maxSize = maxSize; + this.blockSize = blockSize; + this.forceInMemory = forceInMemory; + map = new ConcurrentHashMap<>(mapInitialSize, mapLoadFactor, mapConcurrencyLevel); + this.minFactor = minFactor; + this.acceptableFactor = acceptableFactor; + this.singleFactor = singleFactor; + this.multiFactor = multiFactor; + this.memoryFactor = memoryFactor; + this.stats = new CacheStats(this.getClass().getSimpleName()); + this.count = new AtomicLong(0); + this.elements = new AtomicLong(0); + this.dataBlockElements = new LongAdder(); + this.dataBlockSize = new LongAdder(); + this.overhead = calculateOverhead(maxSize, blockSize, mapConcurrencyLevel); + this.size = new AtomicLong(this.overhead); + this.hardCapacityLimitFactor = hardLimitFactor; + if (evictionThread) { + this.evictionThread = new EvictionThread(this); + this.evictionThread.start(); // FindBugs SC_START_IN_CTOR + } else { + this.evictionThread = null; + } + + // check the bounds + this.heavyEvictionCountLimit = heavyEvictionCountLimit < 0 ? 0 : heavyEvictionCountLimit; + this.heavyEvictionMbSizeLimit = heavyEvictionMbSizeLimit < 1 ? 1 : heavyEvictionMbSizeLimit; + this.cacheDataBlockPercent = 100; + heavyEvictionOverheadCoefficient = heavyEvictionOverheadCoefficient > 0.1f + ? 1f : heavyEvictionOverheadCoefficient; + heavyEvictionOverheadCoefficient = heavyEvictionOverheadCoefficient < 0.001f + ? 0.001f : heavyEvictionOverheadCoefficient; + this.heavyEvictionOverheadCoefficient = heavyEvictionOverheadCoefficient; + + // TODO: Add means of turning this off. Bit obnoxious running thread just to make a log + // every five minutes. + this.scheduleThreadPool.scheduleAtFixedRate(new StatisticsThread(this), STAT_THREAD_PERIOD, + STAT_THREAD_PERIOD, TimeUnit.SECONDS); + } + + @Override + public void setVictimCache(BlockCache victimCache) { + if (victimHandler != null) { + throw new IllegalArgumentException("The victim cache has already been set"); + } + victimHandler = requireNonNull(victimCache); + } + + @Override + public void setMaxSize(long maxSize) { + this.maxSize = maxSize; + if (this.size.get() > acceptableSize() && !evictionInProgress) { + runEviction(); + } + } + + public int getCacheDataBlockPercent() { + return cacheDataBlockPercent; + } + + /** + * The block cached in AdaptiveLruBlockCache will always be an heap block: on the one side, + * the heap access will be more faster then off-heap, the small index block or meta block + * cached in CombinedBlockCache will benefit a lot. on other side, the AdaptiveLruBlockCache size + * is always * calculated based on the total heap size, if caching an off-heap block in + * AdaptiveLruBlockCache, the heap size will be messed up. Here we will clone the block into an + * heap block if it's an off-heap block, otherwise just use the original block. The key point is + * maintain the refCnt of the block (HBASE-22127):
+ * 1. if cache the cloned heap block, its refCnt is an totally new one, it's easy to handle;
+ * 2. if cache the original heap block, we're sure that it won't be tracked in ByteBuffAllocator's + * reservoir, if both RPC and AdaptiveLruBlockCache release the block, then it can be garbage + * collected by JVM, so need a retain here. + * @param buf the original block + * @return an block with an heap memory backend. + */ + private Cacheable asReferencedHeapBlock(Cacheable buf) { + if (buf instanceof HFileBlock) { + HFileBlock blk = ((HFileBlock) buf); + if (blk.isSharedMem()) { + return HFileBlock.deepCloneOnHeap(blk); + } + } + // The block will be referenced by this AdaptiveLruBlockCache, + // so should increase its refCnt here. + return buf.retain(); + } + + // BlockCache implementation + + /** + * Cache the block with the specified name and buffer. + *

+ * It is assumed this will NOT be called on an already cached block. In rare cases (HBASE-8547) + * this can happen, for which we compare the buffer contents. + * + * @param cacheKey block's cache key + * @param buf block buffer + * @param inMemory if block is in-memory + */ + @Override + public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory) { + + // Some data blocks will not put into BlockCache when eviction rate too much. + // It is good for performance + // (see details: https://issues.apache.org/jira/browse/HBASE-23887) + // How to calculate it can find inside EvictionThread class. + if (cacheDataBlockPercent != 100 && buf.getBlockType().isData()) { + // It works like filter - blocks which two last digits of offset + // more than we calculate in Eviction Thread will not put into BlockCache + if (cacheKey.getOffset() % 100 >= cacheDataBlockPercent) { + return; + } + } + + if (buf.heapSize() > maxBlockSize) { + // If there are a lot of blocks that are too + // big this can make the logs way too noisy. + // So we log 2% + if (stats.failInsert() % 50 == 0) { + LOG.warn("Trying to cache too large a block " + + cacheKey.getHfileName() + " @ " + + cacheKey.getOffset() + + " is " + buf.heapSize() + + " which is larger than " + maxBlockSize); + } + return; + } + + LruCachedBlock cb = map.get(cacheKey); + if (cb != null && !BlockCacheUtil.shouldReplaceExistingCacheBlock(this, cacheKey, + buf)) { + return; + } + long currentSize = size.get(); + long currentAcceptableSize = acceptableSize(); + long hardLimitSize = (long) (hardCapacityLimitFactor * currentAcceptableSize); + if (currentSize >= hardLimitSize) { + stats.failInsert(); + if (LOG.isTraceEnabled()) { + LOG.trace("AdaptiveLruBlockCache current size " + StringUtils.byteDesc(currentSize) + + " has exceeded acceptable size " + StringUtils.byteDesc(currentAcceptableSize) + "." + + " The hard limit size is " + StringUtils.byteDesc(hardLimitSize) + + ", failed to put cacheKey:" + cacheKey + " into AdaptiveLruBlockCache."); + } + if (!evictionInProgress) { + runEviction(); + } + return; + } + // Ensure that the block is an heap one. + buf = asReferencedHeapBlock(buf); + cb = new LruCachedBlock(cacheKey, buf, count.incrementAndGet(), inMemory); + long newSize = updateSizeMetrics(cb, false); + map.put(cacheKey, cb); + long val = elements.incrementAndGet(); + if (buf.getBlockType().isData()) { + dataBlockElements.increment(); + } + if (LOG.isTraceEnabled()) { + long size = map.size(); + assertCounterSanity(size, val); + } + if (newSize > currentAcceptableSize && !evictionInProgress) { + runEviction(); + } + } + + /** + * Sanity-checking for parity between actual block cache content and metrics. + * Intended only for use with TRACE level logging and -ea JVM. + */ + private static void assertCounterSanity(long mapSize, long counterVal) { + if (counterVal < 0) { + LOG.trace("counterVal overflow. Assertions unreliable. counterVal=" + counterVal + + ", mapSize=" + mapSize); + return; + } + if (mapSize < Integer.MAX_VALUE) { + double pct_diff = Math.abs((((double) counterVal) / ((double) mapSize)) - 1.); + if (pct_diff > 0.05) { + LOG.trace("delta between reported and actual size > 5%. counterVal=" + counterVal + + ", mapSize=" + mapSize); + } + } + } + + /** + * Cache the block with the specified name and buffer. + *

+ * TODO after HBASE-22005, we may cache an block which allocated from off-heap, but our LRU cache + * sizing is based on heap size, so we should handle this in HBASE-22127. It will introduce an + * switch whether make the LRU on-heap or not, if so we may need copy the memory to on-heap, + * otherwise the caching size is based on off-heap. + * @param cacheKey block's cache key + * @param buf block buffer + */ + @Override + public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf) { + cacheBlock(cacheKey, buf, false); + } + + /** + * Helper function that updates the local size counter and also updates any + * per-cf or per-blocktype metrics it can discern from given + * {@link LruCachedBlock} + */ + private long updateSizeMetrics(LruCachedBlock cb, boolean evict) { + long heapsize = cb.heapSize(); + BlockType bt = cb.getBuffer().getBlockType(); + if (evict) { + heapsize *= -1; + } + if (bt != null && bt.isData()) { + dataBlockSize.add(heapsize); + } + return size.addAndGet(heapsize); + } + + /** + * Get the buffer of the block with the specified name. + * + * @param cacheKey block's cache key + * @param caching true if the caller caches blocks on cache misses + * @param repeat Whether this is a repeat lookup for the same block + * (used to avoid double counting cache misses when doing double-check + * locking) + * @param updateCacheMetrics Whether to update cache metrics or not + * + * @return buffer of specified cache key, or null if not in cache + */ + @Override + public Cacheable getBlock(BlockCacheKey cacheKey, boolean caching, boolean repeat, + boolean updateCacheMetrics) { + LruCachedBlock cb = map.computeIfPresent(cacheKey, (key, val) -> { + // It will be referenced by RPC path, so increase here. NOTICE: Must do the retain inside + // this block. because if retain outside the map#computeIfPresent, the evictBlock may remove + // the block and release, then we're retaining a block with refCnt=0 which is disallowed. + // see HBASE-22422. + val.getBuffer().retain(); + return val; + }); + if (cb == null) { + if (!repeat && updateCacheMetrics) { + stats.miss(caching, cacheKey.isPrimary(), cacheKey.getBlockType()); + } + // If there is another block cache then try and read there. + // However if this is a retry ( second time in double checked locking ) + // And it's already a miss then the l2 will also be a miss. + if (victimHandler != null && !repeat) { + // The handler will increase result's refCnt for RPC, so need no extra retain. + Cacheable result = victimHandler.getBlock(cacheKey, caching, repeat, updateCacheMetrics); + // Promote this to L1. + if (result != null) { + if (caching) { + cacheBlock(cacheKey, result, /* inMemory = */ false); + } + } + return result; + } + return null; + } + if (updateCacheMetrics) { + stats.hit(caching, cacheKey.isPrimary(), cacheKey.getBlockType()); + } + cb.access(count.incrementAndGet()); + return cb.getBuffer(); + } + + /** + * Whether the cache contains block with specified cacheKey + * + * @return true if contains the block + */ + @Override + public boolean containsBlock(BlockCacheKey cacheKey) { + return map.containsKey(cacheKey); + } + + @Override + public boolean evictBlock(BlockCacheKey cacheKey) { + LruCachedBlock cb = map.get(cacheKey); + return cb != null && evictBlock(cb, false) > 0; + } + + /** + * Evicts all blocks for a specific HFile. This is an + * expensive operation implemented as a linear-time search through all blocks + * in the cache. Ideally this should be a search in a log-access-time map. + * + *

+ * This is used for evict-on-close to remove all blocks of a specific HFile. + * + * @return the number of blocks evicted + */ + @Override + public int evictBlocksByHfileName(String hfileName) { + int numEvicted = 0; + for (BlockCacheKey key : map.keySet()) { + if (key.getHfileName().equals(hfileName)) { + if (evictBlock(key)) { + ++numEvicted; + } + } + } + if (victimHandler != null) { + numEvicted += victimHandler.evictBlocksByHfileName(hfileName); + } + return numEvicted; + } + + /** + * Evict the block, and it will be cached by the victim handler if exists && + * block may be read again later + * + * @param evictedByEvictionProcess true if the given block is evicted by + * EvictionThread + * @return the heap size of evicted block + */ + protected long evictBlock(LruCachedBlock block, boolean evictedByEvictionProcess) { + LruCachedBlock previous = map.remove(block.getCacheKey()); + if (previous == null) { + return 0; + } + updateSizeMetrics(block, true); + long val = elements.decrementAndGet(); + if (LOG.isTraceEnabled()) { + long size = map.size(); + assertCounterSanity(size, val); + } + if (block.getBuffer().getBlockType().isData()) { + dataBlockElements.decrement(); + } + if (evictedByEvictionProcess) { + // When the eviction of the block happened because of invalidation of HFiles, no need to + // update the stats counter. + stats.evicted(block.getCachedTime(), block.getCacheKey().isPrimary()); + if (victimHandler != null) { + victimHandler.cacheBlock(block.getCacheKey(), block.getBuffer()); + } + } + // Decrease the block's reference count, and if refCount is 0, then it'll auto-deallocate. DO + // NOT move this up because if do that then the victimHandler may access the buffer with + // refCnt = 0 which is disallowed. + previous.getBuffer().release(); + return block.heapSize(); + } + + /** + * Multi-threaded call to run the eviction process. + */ + private void runEviction() { + if (evictionThread == null) { + evict(); + } else { + evictionThread.evict(); + } + } + + boolean isEvictionInProgress() { + return evictionInProgress; + } + + long getOverhead() { + return overhead; + } + + /** + * Eviction method. + * + * Evict items in order of use, allowing delete items + * which haven't been used for the longest amount of time. + * + * @return how many bytes were freed + */ + long evict() { + + // Ensure only one eviction at a time + if (!evictionLock.tryLock()) { + return 0; + } + + long bytesToFree = 0L; + + try { + evictionInProgress = true; + long currentSize = this.size.get(); + bytesToFree = currentSize - minSize(); + + if (LOG.isTraceEnabled()) { + LOG.trace("Block cache LRU eviction started; Attempting to free " + + StringUtils.byteDesc(bytesToFree) + " of total=" + + StringUtils.byteDesc(currentSize)); + } + + if (bytesToFree <= 0) { + return 0; + } + + // Instantiate priority buckets + BlockBucket bucketSingle + = new BlockBucket("single", bytesToFree, blockSize, singleSize()); + BlockBucket bucketMulti + = new BlockBucket("multi", bytesToFree, blockSize, multiSize()); + BlockBucket bucketMemory + = new BlockBucket("memory", bytesToFree, blockSize, memorySize()); + + // Scan entire map putting into appropriate buckets + for (LruCachedBlock cachedBlock : map.values()) { + switch (cachedBlock.getPriority()) { + case SINGLE: { + bucketSingle.add(cachedBlock); + break; + } + case MULTI: { + bucketMulti.add(cachedBlock); + break; + } + case MEMORY: { + bucketMemory.add(cachedBlock); + break; + } + } + } + + long bytesFreed = 0; + if (forceInMemory || memoryFactor > 0.999f) { + long s = bucketSingle.totalSize(); + long m = bucketMulti.totalSize(); + if (bytesToFree > (s + m)) { + // this means we need to evict blocks in memory bucket to make room, + // so the single and multi buckets will be emptied + bytesFreed = bucketSingle.free(s); + bytesFreed += bucketMulti.free(m); + if (LOG.isTraceEnabled()) { + LOG.trace("freed " + StringUtils.byteDesc(bytesFreed) + + " from single and multi buckets"); + } + bytesFreed += bucketMemory.free(bytesToFree - bytesFreed); + if (LOG.isTraceEnabled()) { + LOG.trace("freed " + StringUtils.byteDesc(bytesFreed) + + " total from all three buckets "); + } + } else { + // this means no need to evict block in memory bucket, + // and we try best to make the ratio between single-bucket and + // multi-bucket is 1:2 + long bytesRemain = s + m - bytesToFree; + if (3 * s <= bytesRemain) { + // single-bucket is small enough that no eviction happens for it + // hence all eviction goes from multi-bucket + bytesFreed = bucketMulti.free(bytesToFree); + } else if (3 * m <= 2 * bytesRemain) { + // multi-bucket is small enough that no eviction happens for it + // hence all eviction goes from single-bucket + bytesFreed = bucketSingle.free(bytesToFree); + } else { + // both buckets need to evict some blocks + bytesFreed = bucketSingle.free(s - bytesRemain / 3); + if (bytesFreed < bytesToFree) { + bytesFreed += bucketMulti.free(bytesToFree - bytesFreed); + } + } + } + } else { + PriorityQueue bucketQueue = new PriorityQueue<>(3); + + bucketQueue.add(bucketSingle); + bucketQueue.add(bucketMulti); + bucketQueue.add(bucketMemory); + + int remainingBuckets = bucketQueue.size(); + + BlockBucket bucket; + while ((bucket = bucketQueue.poll()) != null) { + long overflow = bucket.overflow(); + if (overflow > 0) { + long bucketBytesToFree = + Math.min(overflow, (bytesToFree - bytesFreed) / remainingBuckets); + bytesFreed += bucket.free(bucketBytesToFree); + } + remainingBuckets--; + } + } + if (LOG.isTraceEnabled()) { + long single = bucketSingle.totalSize(); + long multi = bucketMulti.totalSize(); + long memory = bucketMemory.totalSize(); + LOG.trace("Block cache LRU eviction completed; " + + "freed=" + StringUtils.byteDesc(bytesFreed) + ", " + + "total=" + StringUtils.byteDesc(this.size.get()) + ", " + + "single=" + StringUtils.byteDesc(single) + ", " + + "multi=" + StringUtils.byteDesc(multi) + ", " + + "memory=" + StringUtils.byteDesc(memory)); + } + } finally { + stats.evict(); + evictionInProgress = false; + evictionLock.unlock(); + return bytesToFree; + } + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("blockCount", getBlockCount()) + .add("currentSize", StringUtils.byteDesc(getCurrentSize())) + .add("freeSize", StringUtils.byteDesc(getFreeSize())) + .add("maxSize", StringUtils.byteDesc(getMaxSize())) + .add("heapSize", StringUtils.byteDesc(heapSize())) + .add("minSize", StringUtils.byteDesc(minSize())) + .add("minFactor", minFactor) + .add("multiSize", StringUtils.byteDesc(multiSize())) + .add("multiFactor", multiFactor) + .add("singleSize", StringUtils.byteDesc(singleSize())) + .add("singleFactor", singleFactor) + .toString(); + } + + /** + * Used to group blocks into priority buckets. There will be a BlockBucket + * for each priority (single, multi, memory). Once bucketed, the eviction + * algorithm takes the appropriate number of elements out of each according + * to configuration parameters and their relatives sizes. + */ + private class BlockBucket implements Comparable { + + private final String name; + private LruCachedBlockQueue queue; + private long totalSize = 0; + private long bucketSize; + + public BlockBucket(String name, long bytesToFree, long blockSize, long bucketSize) { + this.name = name; + this.bucketSize = bucketSize; + queue = new LruCachedBlockQueue(bytesToFree, blockSize); + totalSize = 0; + } + + public void add(LruCachedBlock block) { + totalSize += block.heapSize(); + queue.add(block); + } + + public long free(long toFree) { + if (LOG.isTraceEnabled()) { + LOG.trace("freeing " + StringUtils.byteDesc(toFree) + " from " + this); + } + LruCachedBlock cb; + long freedBytes = 0; + while ((cb = queue.pollLast()) != null) { + freedBytes += evictBlock(cb, true); + if (freedBytes >= toFree) { + return freedBytes; + } + } + if (LOG.isTraceEnabled()) { + LOG.trace("freed " + StringUtils.byteDesc(freedBytes) + " from " + this); + } + return freedBytes; + } + + public long overflow() { + return totalSize - bucketSize; + } + + public long totalSize() { + return totalSize; + } + + @Override + public int compareTo(BlockBucket that) { + return Long.compare(this.overflow(), that.overflow()); + } + + @Override + public boolean equals(Object that) { + if (that == null || !(that instanceof BlockBucket)) { + return false; + } + return compareTo((BlockBucket)that) == 0; + } + + @Override + public int hashCode() { + return Objects.hashCode(name, bucketSize, queue, totalSize); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("name", name) + .add("totalSize", StringUtils.byteDesc(totalSize)) + .add("bucketSize", StringUtils.byteDesc(bucketSize)) + .toString(); + } + } + + /** + * Get the maximum size of this cache. + * + * @return max size in bytes + */ + + @Override + public long getMaxSize() { + return this.maxSize; + } + + @Override + public long getCurrentSize() { + return this.size.get(); + } + + @Override + public long getCurrentDataSize() { + return this.dataBlockSize.sum(); + } + + @Override + public long getFreeSize() { + return getMaxSize() - getCurrentSize(); + } + + @Override + public long size() { + return getMaxSize(); + } + + @Override + public long getBlockCount() { + return this.elements.get(); + } + + @Override + public long getDataBlockCount() { + return this.dataBlockElements.sum(); + } + + EvictionThread getEvictionThread() { + return this.evictionThread; + } + + /* + * Eviction thread. Sits in waiting state until an eviction is triggered + * when the cache size grows above the acceptable level.

+ * + * Thread is triggered into action by {@link AdaptiveLruBlockCache#runEviction()} + */ + static class EvictionThread extends Thread { + + private WeakReference cache; + private volatile boolean go = true; + // flag set after enter the run method, used for test + private boolean enteringRun = false; + + public EvictionThread(AdaptiveLruBlockCache cache) { + super(Thread.currentThread().getName() + ".AdaptiveLruBlockCache.EvictionThread"); + setDaemon(true); + this.cache = new WeakReference<>(cache); + } + + @Override + public void run() { + enteringRun = true; + long freedSumMb = 0; + int heavyEvictionCount = 0; + int freedDataOverheadPercent = 0; + long startTime = System.currentTimeMillis(); + while (this.go) { + synchronized (this) { + try { + this.wait(1000 * 10/*Don't wait for ever*/); + } catch (InterruptedException e) { + LOG.warn("Interrupted eviction thread ", e); + Thread.currentThread().interrupt(); + } + } + AdaptiveLruBlockCache cache = this.cache.get(); + if (cache == null) { + break; + } + freedSumMb += cache.evict()/1024/1024; + /* + * Sometimes we are reading more data than can fit into BlockCache + * and it is the cause a high rate of evictions. + * This in turn leads to heavy Garbage Collector works. + * So a lot of blocks put into BlockCache but never read, + * but spending a lot of CPU resources. + * Here we will analyze how many bytes were freed and decide + * decide whether the time has come to reduce amount of caching blocks. + * It help avoid put too many blocks into BlockCache + * when evict() works very active and save CPU for other jobs. + * More delails: https://issues.apache.org/jira/browse/HBASE-23887 + */ + + // First of all we have to control how much time + // has passed since previuos evict() was launched + // This is should be almost the same time (+/- 10s) + // because we get comparable volumes of freed bytes each time. + // 10s because this is default period to run evict() (see above this.wait) + long stopTime = System.currentTimeMillis(); + if ((stopTime - startTime) > 1000 * 10 - 1) { + // Here we have to calc what situation we have got. + // We have the limit "hbase.lru.cache.heavy.eviction.bytes.size.limit" + // and can calculte overhead on it. + // We will use this information to decide, + // how to change percent of caching blocks. + freedDataOverheadPercent = + (int) (freedSumMb * 100 / cache.heavyEvictionMbSizeLimit) - 100; + if (freedSumMb > cache.heavyEvictionMbSizeLimit) { + // Now we are in the situation when we are above the limit + // But maybe we are going to ignore it because it will end quite soon + heavyEvictionCount++; + if (heavyEvictionCount > cache.heavyEvictionCountLimit) { + // It is going for a long time and we have to reduce of caching + // blocks now. So we calculate here how many blocks we want to skip. + // It depends on: + // 1. Overhead - if overhead is big we could more aggressive + // reducing amount of caching blocks. + // 2. How fast we want to get the result. If we know that our + // heavy reading for a long time, we don't want to wait and can + // increase the coefficient and get good performance quite soon. + // But if we don't sure we can do it slowly and it could prevent + // premature exit from this mode. So, when the coefficient is + // higher we can get better performance when heavy reading is stable. + // But when reading is changing we can adjust to it and set + // the coefficient to lower value. + int change = + (int) (freedDataOverheadPercent * cache.heavyEvictionOverheadCoefficient); + // But practice shows that 15% of reducing is quite enough. + // We are not greedy (it could lead to premature exit). + change = Math.min(15, change); + change = Math.max(0, change); // I think it will never happen but check for sure + // So this is the key point, here we are reducing % of caching blocks + cache.cacheDataBlockPercent -= change; + // If we go down too deep we have to stop here, 1% any way should be. + cache.cacheDataBlockPercent = Math.max(1, cache.cacheDataBlockPercent); + } + } else { + // Well, we have got overshooting. + // Mayby it is just short-term fluctuation and we can stay in this mode. + // It help avoid permature exit during short-term fluctuation. + // If overshooting less than 90%, we will try to increase the percent of + // caching blocks and hope it is enough. + if (freedSumMb >= cache.heavyEvictionMbSizeLimit * 0.1) { + // Simple logic: more overshooting - more caching blocks (backpressure) + int change = (int) (-freedDataOverheadPercent * 0.1 + 1); + cache.cacheDataBlockPercent += change; + // But it can't be more then 100%, so check it. + cache.cacheDataBlockPercent = Math.min(100, cache.cacheDataBlockPercent); + } else { + // Looks like heavy reading is over. + // Just exit form this mode. + heavyEvictionCount = 0; + cache.cacheDataBlockPercent = 100; + } + } + LOG.info("BlockCache evicted (MB): {}, overhead (%): {}, " + + "heavy eviction counter: {}, " + + "current caching DataBlock (%): {}", + freedSumMb, freedDataOverheadPercent, + heavyEvictionCount, cache.cacheDataBlockPercent); + + freedSumMb = 0; + startTime = stopTime; + } + } + } + + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="NN_NAKED_NOTIFY", + justification="This is what we want") + public void evict() { + synchronized (this) { + this.notifyAll(); + } + } + + synchronized void shutdown() { + this.go = false; + this.notifyAll(); + } + + /** + * Used for the test. + */ + boolean isEnteringRun() { + return this.enteringRun; + } + } + + /* + * Statistics thread. Periodically prints the cache statistics to the log. + */ + static class StatisticsThread extends Thread { + + private final AdaptiveLruBlockCache lru; + + public StatisticsThread(AdaptiveLruBlockCache lru) { + super("AdaptiveLruBlockCacheStats"); + setDaemon(true); + this.lru = lru; + } + + @Override + public void run() { + lru.logStats(); + } + } + + public void logStats() { + // Log size + long totalSize = heapSize(); + long freeSize = maxSize - totalSize; + AdaptiveLruBlockCache.LOG.info("totalSize=" + StringUtils.byteDesc(totalSize) + ", " + + "freeSize=" + StringUtils.byteDesc(freeSize) + ", " + + "max=" + StringUtils.byteDesc(this.maxSize) + ", " + + "blockCount=" + getBlockCount() + ", " + + "accesses=" + stats.getRequestCount() + ", " + + "hits=" + stats.getHitCount() + ", " + + "hitRatio=" + (stats.getHitCount() == 0 ? + "0" : (StringUtils.formatPercent(stats.getHitRatio(), 2)+ ", ")) + ", " + + "cachingAccesses=" + stats.getRequestCachingCount() + ", " + + "cachingHits=" + stats.getHitCachingCount() + ", " + + "cachingHitsRatio=" + (stats.getHitCachingCount() == 0 ? + "0,": (StringUtils.formatPercent(stats.getHitCachingRatio(), 2) + ", ")) + + "evictions=" + stats.getEvictionCount() + ", " + + "evicted=" + stats.getEvictedCount() + ", " + + "evictedPerRun=" + stats.evictedPerEviction()); + } + + /** + * Get counter statistics for this cache. + * + *

Includes: total accesses, hits, misses, evicted blocks, and runs + * of the eviction processes. + */ + @Override + public CacheStats getStats() { + return this.stats; + } + + public final static long CACHE_FIXED_OVERHEAD = + ClassSize.estimateBase(AdaptiveLruBlockCache.class, false); + + @Override + public long heapSize() { + return getCurrentSize(); + } + + private static long calculateOverhead(long maxSize, long blockSize, int concurrency) { + // FindBugs ICAST_INTEGER_MULTIPLY_CAST_TO_LONG + return CACHE_FIXED_OVERHEAD + ClassSize.CONCURRENT_HASHMAP + + ((long) Math.ceil(maxSize * 1.2 / blockSize) * ClassSize.CONCURRENT_HASHMAP_ENTRY) + + ((long) concurrency * ClassSize.CONCURRENT_HASHMAP_SEGMENT); + } + + @Override + public Iterator iterator() { + final Iterator iterator = map.values().iterator(); + + return new Iterator() { + private final long now = System.nanoTime(); + + @Override + public boolean hasNext() { + return iterator.hasNext(); + } + + @Override + public CachedBlock next() { + final LruCachedBlock b = iterator.next(); + return new CachedBlock() { + @Override + public String toString() { + return BlockCacheUtil.toString(this, now); + } + + @Override + public BlockPriority getBlockPriority() { + return b.getPriority(); + } + + @Override + public BlockType getBlockType() { + return b.getBuffer().getBlockType(); + } + + @Override + public long getOffset() { + return b.getCacheKey().getOffset(); + } + + @Override + public long getSize() { + return b.getBuffer().heapSize(); + } + + @Override + public long getCachedTime() { + return b.getCachedTime(); + } + + @Override + public String getFilename() { + return b.getCacheKey().getHfileName(); + } + + @Override + public int compareTo(CachedBlock other) { + int diff = this.getFilename().compareTo(other.getFilename()); + if (diff != 0) { + return diff; + } + diff = Long.compare(this.getOffset(), other.getOffset()); + if (diff != 0) { + return diff; + } + if (other.getCachedTime() < 0 || this.getCachedTime() < 0) { + throw new IllegalStateException(this.getCachedTime() + ", " + other.getCachedTime()); + } + return Long.compare(other.getCachedTime(), this.getCachedTime()); + } + + @Override + public int hashCode() { + return b.hashCode(); + } + + @Override + public boolean equals(Object obj) { + if (obj instanceof CachedBlock) { + CachedBlock cb = (CachedBlock)obj; + return compareTo(cb) == 0; + } else { + return false; + } + } + }; + } + + @Override + public void remove() { + throw new UnsupportedOperationException(); + } + }; + } + + // Simple calculators of sizes given factors and maxSize + + long acceptableSize() { + return (long)Math.floor(this.maxSize * this.acceptableFactor); + } + private long minSize() { + return (long)Math.floor(this.maxSize * this.minFactor); + } + private long singleSize() { + return (long)Math.floor(this.maxSize * this.singleFactor * this.minFactor); + } + private long multiSize() { + return (long)Math.floor(this.maxSize * this.multiFactor * this.minFactor); + } + private long memorySize() { + return (long) Math.floor(this.maxSize * this.memoryFactor * this.minFactor); + } + + @Override + public void shutdown() { + if (victimHandler != null) { + victimHandler.shutdown(); + } + this.scheduleThreadPool.shutdown(); + for (int i = 0; i < 10; i++) { + if (!this.scheduleThreadPool.isShutdown()) { + try { + Thread.sleep(10); + } catch (InterruptedException e) { + LOG.warn("Interrupted while sleeping"); + Thread.currentThread().interrupt(); + break; + } + } + } + + if (!this.scheduleThreadPool.isShutdown()) { + List runnables = this.scheduleThreadPool.shutdownNow(); + LOG.debug("Still running " + runnables); + } + this.evictionThread.shutdown(); + } + + /** Clears the cache. Used in tests. */ + public void clearCache() { + this.map.clear(); + this.elements.set(0); + } + + /** + * Used in testing. May be very inefficient. + * + * @return the set of cached file names + */ + SortedSet getCachedFileNamesForTest() { + SortedSet fileNames = new TreeSet<>(); + for (BlockCacheKey cacheKey : map.keySet()) { + fileNames.add(cacheKey.getHfileName()); + } + return fileNames; + } + + public Map getEncodingCountsForTest() { + Map counts = new EnumMap<>(DataBlockEncoding.class); + for (LruCachedBlock block : map.values()) { + DataBlockEncoding encoding = ((HFileBlock) block.getBuffer()).getDataBlockEncoding(); + Integer count = counts.get(encoding); + counts.put(encoding, (count == null ? 0 : count) + 1); + } + return counts; + } + + Map getMapForTests() { + return map; + } + + @Override + public BlockCache[] getBlockCaches() { + if (victimHandler != null) { + return new BlockCache[] { this, this.victimHandler }; + } + return null; + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheFactory.java index 2b9732092ce9..19725489a975 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheFactory.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheFactory.java @@ -145,6 +145,8 @@ private static FirstLevelBlockCache createFirstLevelCache(final Configuration c) return new LruBlockCache(cacheSize, blockSize, true, c); } else if (policy.equalsIgnoreCase("TinyLFU")) { return new TinyLfuBlockCache(cacheSize, blockSize, ForkJoinPool.commonPool(), c); + } else if (policy.equalsIgnoreCase("adaptiveLRU")) { + return new AdaptiveLruBlockCache(cacheSize, blockSize, true, c); } else { throw new IllegalArgumentException("Unknown policy: " + policy); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java index efc83af6a1be..dcbb71582f44 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java @@ -27,8 +27,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; - /** * Stores all of the cache objects and configuration for a single HFile. */ @@ -283,7 +281,6 @@ public boolean shouldCacheDataOnWrite() { * @param cacheDataOnWrite whether data blocks should be written to the cache * when an HFile is written */ - @VisibleForTesting public void setCacheDataOnWrite(boolean cacheDataOnWrite) { this.cacheDataOnWrite = cacheDataOnWrite; } @@ -329,7 +326,6 @@ public boolean shouldEvictOnClose() { * @param evictOnClose whether blocks should be evicted from the cache when an * HFile reader is closed */ - @VisibleForTesting public void setEvictOnClose(boolean evictOnClose) { this.evictOnClose = evictOnClose; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java index 5544ecef3741..1fb87133f201 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java @@ -25,9 +25,6 @@ import org.apache.hadoop.hbase.io.hfile.BlockType.BlockCategory; import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; - - /** * CombinedBlockCache is an abstraction layer that combines * {@link FirstLevelBlockCache} and {@link BucketCache}. The smaller lruCache is used @@ -397,7 +394,6 @@ public void setMaxSize(long size) { this.l1Cache.setMaxSize(size); } - @VisibleForTesting public int getRpcRefCount(BlockCacheKey cacheKey) { return (this.l2Cache instanceof BucketCache) ? ((BucketCache) this.l2Cache).getRpcRefCount(cacheKey) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java index cae284a1d408..6a2dcf926a4f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java @@ -204,7 +204,6 @@ void serialize(DataOutputStream outputStream) throws IOException { baos.writeTo(outputStream); } - @org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting HFileProtos.FileTrailerProto toProtobuf() { HFileProtos.FileTrailerProto.Builder builder = HFileProtos.FileTrailerProto.newBuilder() .setFileInfoOffset(fileInfoOffset) @@ -613,6 +612,8 @@ private static Class getComparatorClass(String compara comparatorKlass = CellComparatorImpl.class; } else if (comparatorClassName.equals(KeyValue.META_COMPARATOR.getLegacyKeyComparatorName()) || comparatorClassName.equals(KeyValue.META_COMPARATOR.getClass().getName()) + || (comparatorClassName.equals("org.apache.hadoop.hbase.CellComparator$MetaCellComparator")) + || (comparatorClassName.equals("org.apache.hadoop.hbase.CellComparatorImpl$MetaCellComparator")) || (comparatorClassName.equals("org.apache.hadoop.hbase.MetaCellComparator"))) { comparatorKlass = MetaCellComparator.class; } else if (comparatorClassName.equals("org.apache.hadoop.hbase.KeyValue$RawBytesComparator") diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java index 46cec4af1f8e..ed0e84deace6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java @@ -51,7 +51,7 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; + import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; /** @@ -459,10 +459,8 @@ public interface Reader extends Closeable, CachingBlockReader { DataBlockEncoding getEffectiveEncodingInCache(boolean isCompaction); - @VisibleForTesting HFileBlock.FSReader getUncachedBlockReader(); - @VisibleForTesting boolean prefetchComplete(); /** @@ -507,7 +505,8 @@ public static Reader createReader(ReaderContext context, HFileInfo fileInfo, throw new IllegalArgumentException("Invalid HFile version " + trailer.getMajorVersion()); } } catch (Throwable t) { - IOUtils.closeQuietly(context.getInputStreamWrapper()); + IOUtils.closeQuietly(context.getInputStreamWrapper(), + e -> LOG.warn("failed to close input stream wrapper", e)); throw new CorruptHFileException("Problem reading HFile Trailer from file " + context.getFilePath(), t); } finally { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java index f4fdb9b27326..a02ad7d4e100 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java @@ -55,7 +55,7 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; + import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; /** @@ -311,7 +311,6 @@ public int getDeserializerIdentifier() { * @param onDiskDataSizeWithHeader see {@link #onDiskDataSizeWithHeader} * @param fileContext HFile meta data */ - @VisibleForTesting public HFileBlock(BlockType blockType, int onDiskSizeWithoutHeader, int uncompressedSizeWithoutHeader, long prevBlockOffset, ByteBuff buf, boolean fillHeader, long offset, int nextBlockOnDiskSize, int onDiskDataSizeWithHeader, HFileContext fileContext, @@ -524,7 +523,6 @@ public ByteBuffAllocator getByteBuffAllocator() { return this.allocator; } - @VisibleForTesting private void sanityCheckAssertion(long valueFromBuf, long valueFromField, String fieldName) throws IOException { if (valueFromBuf != valueFromField) { @@ -533,7 +531,6 @@ private void sanityCheckAssertion(long valueFromBuf, long valueFromField, } } - @VisibleForTesting private void sanityCheckAssertion(BlockType valueFromBuf, BlockType valueFromField) throws IOException { if (valueFromBuf != valueFromField) { @@ -550,7 +547,6 @@ private void sanityCheckAssertion(BlockType valueFromBuf, BlockType valueFromFie * thread-safe, because it alters the internal buffer pointer. * Used by tests only. */ - @VisibleForTesting void sanityCheck() throws IOException { // Duplicate so no side-effects ByteBuff dup = this.buf.duplicate().rewind(); @@ -839,7 +835,6 @@ EncodingState getEncodingState() { /** * @param dataBlockEncoder data block encoding algorithm to use */ - @VisibleForTesting public Writer(HFileDataBlockEncoder dataBlockEncoder, HFileContext fileContext) { this(dataBlockEncoder, fileContext, ByteBuffAllocator.HEAP); } @@ -1402,7 +1397,6 @@ static class FSReaderImpl implements FSReader { private long fileSize; /** The size of the header */ - @VisibleForTesting protected final int hdrSize; /** The filesystem used to access data */ @@ -1693,7 +1687,6 @@ private ByteBuff allocate(int size, boolean intoHeap) { * @param intoHeap allocate the ByteBuff of block from heap or off-heap. * @return the HFileBlock or null if there is a HBase checksum mismatch */ - @VisibleForTesting protected HFileBlock readBlockDataInternal(FSDataInputStream is, long offset, long onDiskSizeWithHeaderL, boolean pread, boolean verifyChecksum, boolean updateMetrics, boolean intoHeap) throws IOException { @@ -1851,7 +1844,6 @@ public String toString() { } /** An additional sanity-check in case no compression or encryption is being used. */ - @VisibleForTesting void sanityCheckUncompressed() throws IOException { if (onDiskSizeWithoutHeader != uncompressedSizeWithoutHeader + totalChecksumBytes()) { @@ -1973,7 +1965,6 @@ DataBlockEncoding getDataBlockEncoding() { return DataBlockEncoding.NONE; } - @VisibleForTesting byte getChecksumType() { return this.fileContext.getChecksumType().getCode(); } @@ -1983,7 +1974,6 @@ int getBytesPerChecksum() { } /** @return the size of data on disk + header. Excludes checksum. */ - @VisibleForTesting int getOnDiskDataSizeWithHeader() { return this.onDiskDataSizeWithHeader; } @@ -2022,7 +2012,6 @@ public static int headerSize(boolean usesHBaseChecksum) { /** * Return the appropriate DUMMY_HEADER for the minor version */ - @VisibleForTesting // TODO: Why is this in here? byte[] getDummyHeaderForVersion() { return getDummyHeaderForVersion(this.fileContext.isUseHBaseChecksum()); @@ -2048,7 +2037,6 @@ HFileContext getHFileContext() { * This is mostly helpful for debugging. This assumes that the block * has minor version > 0. */ - @VisibleForTesting static String toStringHeader(ByteBuff buf) throws IOException { byte[] magicBuf = new byte[Math.min(buf.limit() - buf.position(), BlockType.MAGIC_LENGTH)]; buf.get(magicBuf); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileInfo.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileInfo.java index 330ef6fed003..072e5b10628a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileInfo.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileInfo.java @@ -44,7 +44,11 @@ import org.apache.hadoop.hbase.security.EncryptionUtil; import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.BytesBytesPair; @@ -62,6 +66,9 @@ */ @InterfaceAudience.Private public class HFileInfo implements SortedMap { + + private static final Logger LOG = LoggerFactory.getLogger(HFileInfo.class); + static final String RESERVED_PREFIX = "hfile."; static final byte[] RESERVED_PREFIX_BYTES = Bytes.toBytes(RESERVED_PREFIX); static final byte [] LASTKEY = Bytes.toBytes(RESERVED_PREFIX + "LASTKEY"); @@ -342,9 +349,10 @@ public void initTrailerAndContext(ReaderContext context, Configuration conf) thr Path path = context.getFilePath(); checkFileVersion(path); this.hfileContext = createHFileContext(path, trailer, conf); - } catch (Throwable t) { context.getInputStreamWrapper().unbuffer(); - IOUtils.closeQuietly(context.getInputStreamWrapper()); + } catch (Throwable t) { + IOUtils.closeQuietly(context.getInputStreamWrapper(), + e -> LOG.warn("failed to close input stream wrapper", e)); throw new CorruptHFileException("Problem reading HFile Trailer from file " + context.getFilePath(), t); } @@ -355,28 +363,37 @@ public void initTrailerAndContext(ReaderContext context, Configuration conf) thr */ public void initMetaAndIndex(HFile.Reader reader) throws IOException { ReaderContext context = reader.getContext(); - HFileBlock.FSReader blockReader = reader.getUncachedBlockReader(); - // Initialize an block iterator, and parse load-on-open blocks in the following. - blockIter = blockReader.blockRange(trailer.getLoadOnOpenDataOffset(), - context.getFileSize() - trailer.getTrailerSize()); - // Data index. We also read statistics about the block index written after - // the root level. - this.dataIndexReader = new HFileBlockIndex - .CellBasedKeyBlockIndexReader(trailer.createComparator(), trailer.getNumDataIndexLevels()); - dataIndexReader.readMultiLevelIndexRoot(blockIter.nextBlockWithBlockType(BlockType.ROOT_INDEX), - trailer.getDataIndexCount()); - reader.setDataBlockIndexReader(dataIndexReader); - // Meta index. - this.metaIndexReader = new HFileBlockIndex.ByteArrayKeyBlockIndexReader(1); - metaIndexReader.readRootIndex(blockIter.nextBlockWithBlockType(BlockType.ROOT_INDEX), + try { + HFileBlock.FSReader blockReader = reader.getUncachedBlockReader(); + // Initialize an block iterator, and parse load-on-open blocks in the following. + blockIter = blockReader.blockRange(trailer.getLoadOnOpenDataOffset(), + context.getFileSize() - trailer.getTrailerSize()); + // Data index. We also read statistics about the block index written after + // the root level. + this.dataIndexReader = + new HFileBlockIndex.CellBasedKeyBlockIndexReader(trailer.createComparator(), trailer.getNumDataIndexLevels()); + dataIndexReader + .readMultiLevelIndexRoot(blockIter.nextBlockWithBlockType(BlockType.ROOT_INDEX), trailer.getDataIndexCount()); + reader.setDataBlockIndexReader(dataIndexReader); + // Meta index. + this.metaIndexReader = new HFileBlockIndex.ByteArrayKeyBlockIndexReader(1); + metaIndexReader.readRootIndex(blockIter.nextBlockWithBlockType(BlockType.ROOT_INDEX), trailer.getMetaIndexCount()); - reader.setMetaBlockIndexReader(metaIndexReader); - loadMetaInfo(blockIter, hfileContext); - reader.setDataBlockEncoder(HFileDataBlockEncoderImpl.createFromFileInfo(this)); - // Load-On-Open info - HFileBlock b; - while ((b = blockIter.nextBlock()) != null) { - loadOnOpenBlocks.add(b); + reader.setMetaBlockIndexReader(metaIndexReader); + loadMetaInfo(blockIter, hfileContext); + reader.setDataBlockEncoder(HFileDataBlockEncoderImpl.createFromFileInfo(this)); + // Load-On-Open info + HFileBlock b; + while ((b = blockIter.nextBlock()) != null) { + loadOnOpenBlocks.add(b); + } + // close the block reader + context.getInputStreamWrapper().unbuffer(); + } catch (Throwable t) { + IOUtils.closeQuietly(context.getInputStreamWrapper(), + e -> LOG.warn("failed to close input stream wrapper", e)); + throw new CorruptHFileException( + "Problem reading data index and meta index from file " + context.getFilePath(), t); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java index 93d85af677b8..02efa8e89863 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java @@ -322,16 +322,16 @@ public int processFile(Path file, boolean checkRootDir) throws IOException { // scan over file and read key/value's and check if requested HFileScanner scanner = reader.getScanner(false, false, false); fileStats = new KeyValueStatsCollector(); - boolean shouldScanKeysValues = false; - if (this.isSeekToRow) { + boolean shouldScanKeysValues; + if (this.isSeekToRow && !Bytes.equals(row, reader.getFirstRowKey().orElse(null))) { // seek to the first kv on this row - shouldScanKeysValues = - (scanner.seekTo(PrivateCellUtil.createFirstOnRow(this.row)) != -1); + shouldScanKeysValues = (scanner.seekTo(PrivateCellUtil.createFirstOnRow(this.row)) != -1); } else { shouldScanKeysValues = scanner.seekTo(); } - if (shouldScanKeysValues) + if (shouldScanKeysValues) { scanKeysValues(file, fileStats, scanner, row); + } } // print meta data diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java index de0b15feebb8..1ed1bb5d19ce 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java @@ -26,15 +26,15 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.ByteBufferKeyOnlyKeyValue; -import org.apache.hadoop.hbase.ByteBufferKeyValue; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.NoTagsByteBufferKeyValue; import org.apache.hadoop.hbase.PrivateCellUtil; +import org.apache.hadoop.hbase.SizeCachedByteBufferKeyValue; import org.apache.hadoop.hbase.SizeCachedKeyValue; +import org.apache.hadoop.hbase.SizeCachedNoTagsByteBufferKeyValue; import org.apache.hadoop.hbase.SizeCachedNoTagsKeyValue; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoder; @@ -52,7 +52,6 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; /** * Implementation that can handle all hfile versions of {@link HFile.Reader}. @@ -233,7 +232,6 @@ public CellComparator getComparator() { return this.hfileContext.getCellComparator(); } - @VisibleForTesting public Compression.Algorithm getCompressionAlgorithm() { return trailer.getCompressionCodec(); } @@ -322,6 +320,7 @@ protected static class HFileScannerImpl implements HFileScanner { private long currMemstoreTS; protected final HFile.Reader reader; private int currTagsLen; + private short rowLen; // buffer backed keyonlyKV private ByteBufferKeyOnlyKeyValue bufBackedKeyOnlyKv = new ByteBufferKeyOnlyKeyValue(); // A pair for reusing in blockSeek() so that we don't garbage lot of objects @@ -446,6 +445,7 @@ protected void readKeyValueLen() { this.currKeyLen = (int)(ll >> Integer.SIZE); this.currValueLen = (int)(Bytes.MASK_FOR_LOWER_INT_IN_LONG ^ ll); checkKeyValueLen(); + this.rowLen = blockBuffer.getShortAfterPosition(Bytes.SIZEOF_LONG); // Move position past the key and value lengths and then beyond the key and value int p = (Bytes.SIZEOF_LONG + currKeyLen + currValueLen); if (reader.getFileContext().isIncludesTags()) { @@ -554,8 +554,9 @@ protected int blockSeek(Cell key, boolean seekBefore) { + " path=" + reader.getPath()); } offsetFromPos += Bytes.SIZEOF_LONG; + this.rowLen = blockBuffer.getShortAfterPosition(offsetFromPos); blockBuffer.asSubByteBuffer(blockBuffer.position() + offsetFromPos, klen, pair); - bufBackedKeyOnlyKv.setKey(pair.getFirst(), pair.getSecond(), klen); + bufBackedKeyOnlyKv.setKey(pair.getFirst(), pair.getSecond(), klen, rowLen); int comp = PrivateCellUtil.compareKeyIgnoresMvcc(reader.getComparator(), key, bufBackedKeyOnlyKv); offsetFromPos += klen + vlen; @@ -790,23 +791,28 @@ public Cell getCell() { // we can handle the 'no tags' case. if (currTagsLen > 0) { ret = new SizeCachedKeyValue(blockBuffer.array(), - blockBuffer.arrayOffset() + blockBuffer.position(), cellBufSize, seqId); + blockBuffer.arrayOffset() + blockBuffer.position(), cellBufSize, seqId, currKeyLen, + rowLen); } else { ret = new SizeCachedNoTagsKeyValue(blockBuffer.array(), - blockBuffer.arrayOffset() + blockBuffer.position(), cellBufSize, seqId); + blockBuffer.arrayOffset() + blockBuffer.position(), cellBufSize, seqId, currKeyLen, + rowLen); } } else { ByteBuffer buf = blockBuffer.asSubByteBuffer(cellBufSize); if (buf.isDirect()) { - ret = currTagsLen > 0 ? new ByteBufferKeyValue(buf, buf.position(), cellBufSize, seqId) - : new NoTagsByteBufferKeyValue(buf, buf.position(), cellBufSize, seqId); + ret = currTagsLen > 0 + ? new SizeCachedByteBufferKeyValue(buf, buf.position(), cellBufSize, seqId, + currKeyLen, rowLen) + : new SizeCachedNoTagsByteBufferKeyValue(buf, buf.position(), cellBufSize, seqId, + currKeyLen, rowLen); } else { if (currTagsLen > 0) { ret = new SizeCachedKeyValue(buf.array(), buf.arrayOffset() + buf.position(), - cellBufSize, seqId); + cellBufSize, seqId, currKeyLen, rowLen); } else { ret = new SizeCachedNoTagsKeyValue(buf.array(), buf.arrayOffset() + buf.position(), - cellBufSize, seqId); + cellBufSize, seqId, currKeyLen, rowLen); } } } @@ -1060,7 +1066,7 @@ public String getValueString() { public int compareKey(CellComparator comparator, Cell key) { blockBuffer.asSubByteBuffer(blockBuffer.position() + KEY_VALUE_LEN_SIZE, currKeyLen, pair); - this.bufBackedKeyOnlyKv.setKey(pair.getFirst(), pair.getSecond(), currKeyLen); + this.bufBackedKeyOnlyKv.setKey(pair.getFirst(), pair.getSecond(), currKeyLen, rowLen); return PrivateCellUtil.compareKeyIgnoresMvcc(comparator, key, this.bufBackedKeyOnlyKv); } @@ -1619,7 +1625,6 @@ public HFileContext getFileContext() { * not completed, true otherwise */ @Override - @VisibleForTesting public boolean prefetchComplete() { return PrefetchExecutor.isCompleted(path); } @@ -1637,7 +1642,6 @@ public boolean prefetchComplete() { * @return Scanner on this file. */ @Override - @VisibleForTesting public HFileScanner getScanner(boolean cacheBlocks, final boolean pread) { return getScanner(cacheBlocks, pread, false); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java index 60aa65d4d74f..b1a98487bdc0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java @@ -54,8 +54,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; - /** * Common functionality needed by all versions of {@link HFile} writers. */ @@ -784,7 +782,6 @@ public void beforeShipped() throws IOException { } } - @VisibleForTesting public Cell getLastCell() { return lastCell; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ReaderContextBuilder.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ReaderContextBuilder.java index c58d5b8ce077..1f903cfbea64 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ReaderContextBuilder.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ReaderContextBuilder.java @@ -27,7 +27,6 @@ import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper; import org.apache.hadoop.hbase.io.hfile.ReaderContext.ReaderType; import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; /** * A builder that helps in building up the ReaderContext @@ -82,7 +81,6 @@ public ReaderContextBuilder withReaderType(ReaderType type) { return this; } - @VisibleForTesting public ReaderContextBuilder withFileSystemAndPath(FileSystem fs, Path filePath) throws IOException { this.withFileSystem(fs) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/TinyLfuBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/TinyLfuBlockCache.java index a90c5a33db6d..a0dc30c52423 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/TinyLfuBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/TinyLfuBlockCache.java @@ -36,7 +36,6 @@ import org.apache.hadoop.hbase.io.HeapSize; import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache; import org.apache.hadoop.util.StringUtils; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.com.google.common.base.MoreObjects; import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.yetus.audience.InterfaceAudience; @@ -69,7 +68,6 @@ public final class TinyLfuBlockCache implements FirstLevelBlockCache { private BlockCache victimCache; - @VisibleForTesting final Cache cache; /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java index a205d27f8728..a84d8128207d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java @@ -49,7 +49,6 @@ import java.util.concurrent.locks.ReentrantLock; import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.function.Consumer; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.TableName; @@ -79,7 +78,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; @@ -119,7 +117,6 @@ public class BucketCache implements BlockCache, HeapSize { private static final boolean STRONG_REF_DEFAULT = false; /** Priority buckets */ - @VisibleForTesting static final float DEFAULT_SINGLE_FACTOR = 0.25f; static final float DEFAULT_MULTI_FACTOR = 0.50f; static final float DEFAULT_MEMORY_FACTOR = 0.25f; @@ -141,10 +138,8 @@ public class BucketCache implements BlockCache, HeapSize { transient final IOEngine ioEngine; // Store the block in this map before writing it to cache - @VisibleForTesting transient final RAMCache ramCache; // In this map, store the block's meta data like offset, length - @VisibleForTesting transient ConcurrentHashMap backingMap; /** @@ -161,9 +156,7 @@ public class BucketCache implements BlockCache, HeapSize { * WriterThread when it runs takes whatever has been recently added and 'drains' the entries * to the BucketCache. It then updates the ramCache and backingMap accordingly. */ - @VisibleForTesting transient final ArrayList> writerQueues = new ArrayList<>(); - @VisibleForTesting transient final WriterThread[] writerThreads; /** Volatile boolean to track if free space is in process or not */ @@ -185,7 +178,6 @@ public class BucketCache implements BlockCache, HeapSize { * bucket cache will skip some blocks when caching. If the flag is true, we * will wait until blocks are flushed to IOEngine. */ - @VisibleForTesting boolean wait_when_cache = false; private final BucketCacheStats cacheStats = new BucketCacheStats(); @@ -209,7 +201,6 @@ public class BucketCache implements BlockCache, HeapSize { * The purpose of this is to avoid freeing the block which is being read. *

*/ - @VisibleForTesting transient final IdReadWriteLock offsetLock; private final NavigableSet blocksByHFile = new ConcurrentSkipListSet<>((a, b) -> { @@ -352,14 +343,12 @@ private void sanityCheckConfigs() { * Called by the constructor to start the writer threads. Used by tests that need to override * starting the threads. */ - @VisibleForTesting protected void startWriterThreads() { for (WriterThread thread : writerThreads) { thread.start(); } } - @VisibleForTesting boolean isCacheEnabled() { return this.cacheEnabled; } @@ -556,7 +545,6 @@ public Cacheable getBlock(BlockCacheKey key, boolean caching, boolean repeat, return null; } - @VisibleForTesting void blockEvicted(BlockCacheKey cacheKey, BucketEntry bucketEntry, boolean decrementBlockNumber) { bucketAllocator.freeBlock(bucketEntry.offset()); realCacheSize.add(-1 * bucketEntry.getLength()); @@ -681,7 +669,6 @@ public long acceptableSize() { return (long) Math.floor(bucketAllocator.getTotalSize() * acceptableFactor); } - @VisibleForTesting long getPartitionSize(float partitionFactor) { return (long) Math.floor(bucketAllocator.getTotalSize() * partitionFactor * minFactor); } @@ -876,7 +863,6 @@ private void freeSpace(final String why) { } // This handles flushing the RAM cache to IOEngine. - @VisibleForTesting class WriterThread extends Thread { private final BlockingQueue inputQueue; private volatile boolean writerEnabled = true; @@ -887,7 +873,6 @@ class WriterThread extends Thread { } // Used for test - @VisibleForTesting void disableWriter() { this.writerEnabled = false; } @@ -947,7 +932,6 @@ private void putIntoBackingMap(BlockCacheKey key, BucketEntry bucketEntry) { * interference expected. * @throws InterruptedException */ - @VisibleForTesting void doDrain(final List entries) throws InterruptedException { if (entries.isEmpty()) { return; @@ -1055,7 +1039,6 @@ void doDrain(final List entries) throws InterruptedException { * @param q The queue to take from. * @return {@code receptacle} laden with elements taken from the queue or empty if none found. */ - @VisibleForTesting static List getRAMQueueEntries(BlockingQueue q, List receptacle) throws InterruptedException { // Clear sets all entries to null and sets size to 0. We retain allocations. Presume it @@ -1349,7 +1332,6 @@ public long totalSize() { /** * Block Entry stored in the memory with key,data and so on */ - @VisibleForTesting static class RAMQueueEntry { private final BlockCacheKey key; private final Cacheable data; @@ -1531,7 +1513,6 @@ public BlockCache[] getBlockCaches() { return null; } - @VisibleForTesting public int getRpcRefCount(BlockCacheKey cacheKey) { BucketEntry bucketEntry = backingMap.get(cacheKey); if (bucketEntry != null) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java index 2cdfc80a39c6..e4a2c0b1aeaa 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java @@ -27,7 +27,6 @@ import java.nio.channels.FileChannel; import java.util.Arrays; import java.util.concurrent.locks.ReentrantLock; - import org.apache.hadoop.hbase.exceptions.IllegalArgumentIOException; import org.apache.hadoop.hbase.io.hfile.Cacheable; import org.apache.hadoop.hbase.nio.ByteBuff; @@ -36,7 +35,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; /** @@ -154,7 +152,6 @@ public Cacheable read(BucketEntry be) throws IOException { return be.wrapAsCacheable(dstBuff); } - @VisibleForTesting void closeFileChannels() { for (FileChannel fileChannel: fileChannels) { try { @@ -283,12 +280,10 @@ private int getFileNum(long offset) { return fileNum; } - @VisibleForTesting FileChannel[] getFileChannels() { return fileChannels; } - @VisibleForTesting void refreshFileConnection(int accessFileNum, IOException ioe) throws IOException { ReentrantLock channelLock = channelLocks[accessFileNum]; channelLock.lock(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/util/MemorySizeUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/util/MemorySizeUtil.java index 471eb469b7e5..910498040e07 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/util/MemorySizeUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/util/MemorySizeUtil.java @@ -234,7 +234,7 @@ public static long getOnHeapCacheSize(final Configuration conf) { } /** - * @param conf used to read config for bucket cache size. (< 1 is treated as % and > is treated as MiB) + * @param conf used to read config for bucket cache size. * @return the number of bytes to use for bucket cache, negative if disabled. */ public static long getBucketCacheSize(final Configuration conf) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcFrameDecoder.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcFrameDecoder.java index 5ed3d2ef43f3..9444cd0dee99 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcFrameDecoder.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcFrameDecoder.java @@ -19,7 +19,6 @@ import java.io.IOException; import java.util.List; - import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.client.VersionInfoUtil; import org.apache.hadoop.hbase.exceptions.RequestTooBigException; @@ -30,6 +29,7 @@ import org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext; import org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder; import org.apache.hbase.thirdparty.io.netty.handler.codec.CorruptedFrameException; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos; @@ -124,10 +124,8 @@ private void handleTooBigRequest(ByteBuf in) throws IOException { RPCProtos.RequestHeader header = getHeader(in, headerSize); // Notify the client about the offending request - NettyServerCall reqTooBig = - new NettyServerCall(header.getCallId(), connection.service, null, null, null, null, - connection, 0, connection.addr, System.currentTimeMillis(), 0, - connection.rpcServer.bbAllocator, connection.rpcServer.cellBlockBuilder, null); + NettyServerCall reqTooBig = connection.createCall(header.getCallId(), connection.service, null, + null, null, null, 0, connection.addr, 0, null); connection.rpcServer.metrics.exception(SimpleRpcServer.REQUEST_TOO_BIG_EXCEPTION); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServer.java index eab2a0ec85c7..1d3981f78846 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServer.java @@ -23,22 +23,16 @@ import java.util.List; import java.util.concurrent.CountDownLatch; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.Server; -import org.apache.hadoop.hbase.monitoring.MonitoredRPCHandler; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.security.HBasePolicyProvider; import org.apache.hadoop.hbase.util.NettyEventLoopGroupConfig; -import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.security.authorize.ServiceAuthorizationManager; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; -import org.apache.hbase.thirdparty.com.google.protobuf.BlockingService; -import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors.MethodDescriptor; -import org.apache.hbase.thirdparty.com.google.protobuf.Message; + import org.apache.hbase.thirdparty.io.netty.bootstrap.ServerBootstrap; import org.apache.hbase.thirdparty.io.netty.channel.Channel; import org.apache.hbase.thirdparty.io.netty.channel.ChannelInitializer; @@ -124,7 +118,7 @@ protected void initChannel(Channel ch) throws Exception { this.scheduler.init(new RpcSchedulerContext(this)); } - @VisibleForTesting + @InterfaceAudience.Private protected NettyRpcServerPreambleHandler createNettyRpcServerPreambleHandler() { return new NettyRpcServerPreambleHandler(NettyRpcServer.this); } @@ -182,21 +176,4 @@ public int getNumOpenConnections() { // allChannels also contains the server channel, so exclude that from the count. return channelsCount > 0 ? channelsCount - 1 : channelsCount; } - - @Override - public Pair call(BlockingService service, - MethodDescriptor md, Message param, CellScanner cellScanner, - long receiveTime, MonitoredRPCHandler status) throws IOException { - return call(service, md, param, cellScanner, receiveTime, status, - System.currentTimeMillis(), 0); - } - - @Override - public Pair call(BlockingService service, MethodDescriptor md, - Message param, CellScanner cellScanner, long receiveTime, MonitoredRPCHandler status, - long startTime, int timeout) throws IOException { - NettyServerCall fakeCall = new NettyServerCall(-1, service, md, null, param, cellScanner, null, - -1, null, receiveTime, timeout, bbAllocator, cellBlockBuilder, null); - return call(fakeCall, status); - } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServerPreambleHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServerPreambleHandler.java index bac19f1bb114..855cf2fda4d6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServerPreambleHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServerPreambleHandler.java @@ -26,7 +26,6 @@ import java.nio.ByteBuffer; import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; /** * Handle connection preamble. @@ -58,7 +57,6 @@ protected void channelRead0(ChannelHandlerContext ctx, ByteBuf msg) throws Excep p.remove("preambleDecoder"); } - @VisibleForTesting protected NettyServerRpcConnection createNettyServerRpcConnection(Channel channel) { return new NettyServerRpcConnection(rpcServer, channel); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java index cace5f0240f4..b0e8b7d3d5d6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java @@ -33,7 +33,6 @@ import java.util.Map; import java.util.Optional; import java.util.concurrent.atomic.LongAdder; - import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CallQueueTooBigException; @@ -68,7 +67,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.com.google.gson.Gson; import org.apache.hbase.thirdparty.com.google.protobuf.BlockingService; import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors.MethodDescriptor; @@ -200,7 +198,7 @@ public abstract class RpcServer implements RpcServerInterface, protected static final String TRACE_LOG_MAX_LENGTH = "hbase.ipc.trace.log.max.length"; protected static final String KEY_WORD_TRUNCATED = " "; - protected static final Gson GSON = GsonUtil.createGson().create(); + protected static final Gson GSON = GsonUtil.createGsonWithDisableHtmlEscaping().create(); protected final int maxRequestSize; protected final int warnResponseTime; @@ -551,7 +549,6 @@ void logResponse(Message param, String methodName, String call, boolean tooLarge * @param strParam stringifiedParam to be truncated * @return truncated trace log string */ - @VisibleForTesting String truncateTraceLog(String strParam) { if (LOG.isTraceEnabled()) { int traceLogMaxLength = getConf().getInt(TRACE_LOG_MAX_LENGTH, DEFAULT_TRACE_LOG_MAX_LENGTH); @@ -825,4 +822,7 @@ public void setNamedQueueRecorder(NamedQueueRecorder namedQueueRecorder) { this.namedQueueRecorder = namedQueueRecorder; } + protected boolean needAuthorization() { + return authorize; + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerInterface.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerInterface.java index 99e01885b5bc..ee6e57a2a9f5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerInterface.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerInterface.java @@ -21,20 +21,16 @@ import java.io.IOException; import java.net.InetSocketAddress; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.io.ByteBuffAllocator; import org.apache.hadoop.hbase.monitoring.MonitoredRPCHandler; -import org.apache.hadoop.hbase.regionserver.RSRpcServices; import org.apache.hadoop.hbase.namequeues.NamedQueueRecorder; +import org.apache.hadoop.hbase.regionserver.RSRpcServices; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.security.authorize.PolicyProvider; import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; -import org.apache.hbase.thirdparty.com.google.protobuf.BlockingService; -import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors.MethodDescriptor; import org.apache.hbase.thirdparty.com.google.protobuf.Message; @InterfaceAudience.Private @@ -48,22 +44,6 @@ public interface RpcServerInterface { void setSocketSendBufSize(int size); InetSocketAddress getListenerAddress(); - /** - * @deprecated As of release 1.3, this will be removed in HBase 3.0 - */ - @Deprecated - Pair call(BlockingService service, MethodDescriptor md, - Message param, CellScanner cellScanner, long receiveTime, MonitoredRPCHandler status) - throws IOException; - - /** - * @deprecated As of release 2.0, this will be removed in HBase 3.0 - */ - @Deprecated - Pair call(BlockingService service, MethodDescriptor md, Message param, - CellScanner cellScanner, long receiveTime, MonitoredRPCHandler status, long startTime, - int timeout) throws IOException; - Pair call(RpcCall call, MonitoredRPCHandler status) throws IOException; @@ -86,7 +66,6 @@ Pair call(RpcCall call, MonitoredRPCHandler status) * Refresh authentication manager policy. * @param pp */ - @VisibleForTesting void refreshAuthManager(Configuration conf, PolicyProvider pp); RpcScheduler getScheduler(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerCall.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerCall.java index d20e28f8c786..a5c8a3920b17 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerCall.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerCall.java @@ -26,25 +26,27 @@ import java.util.concurrent.atomic.AtomicInteger; import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.DoNotRetryIOException; -import org.apache.hadoop.hbase.io.ByteBuffAllocator; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.RegionMovedException; +import org.apache.hadoop.hbase.io.ByteBuffAllocator; import org.apache.hadoop.hbase.io.ByteBufferListOutputStream; import org.apache.hadoop.hbase.ipc.RpcServer.CallCleanup; import org.apache.hadoop.hbase.security.User; +import org.apache.hadoop.hbase.util.ByteBufferUtils; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.util.StringUtils; +import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.com.google.protobuf.BlockingService; import org.apache.hbase.thirdparty.com.google.protobuf.CodedOutputStream; import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors.MethodDescriptor; import org.apache.hbase.thirdparty.com.google.protobuf.Message; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.VersionInfo; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.CellBlockMeta; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.ExceptionResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.ResponseHeader; -import org.apache.hadoop.hbase.util.ByteBufferUtils; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.util.StringUtils; /** * Datastructure that holds all necessary to a method invocation and then afterward, carries @@ -217,10 +219,14 @@ public String toShortString() { } @Override - public synchronized void setResponse(Message m, final CellScanner cells, - Throwable t, String errorMsg) { - if (this.isError) return; - if (t != null) this.isError = true; + public synchronized void setResponse(Message m, final CellScanner cells, Throwable t, + String errorMsg) { + if (this.isError) { + return; + } + if (t != null) { + this.isError = true; + } BufferChain bc = null; try { ResponseHeader.Builder headerBuilder = ResponseHeader.newBuilder(); @@ -385,9 +391,10 @@ private static ByteBuffer createHeaderAndMessageBytes(Message result, Message he return pbBuf; } - protected BufferChain wrapWithSasl(BufferChain bc) - throws IOException { - if (!this.connection.useSasl) return bc; + protected BufferChain wrapWithSasl(BufferChain bc) throws IOException { + if (!this.connection.useSasl) { + return bc; + } // Looks like no way around this; saslserver wants a byte array. I have to make it one. // THIS IS A BIG UGLY COPY. byte [] responseBytes = bc.getBytes(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java index 0226de4792c9..422003e1a6a4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java @@ -449,7 +449,7 @@ public void processOneRpc(ByteBuff buf) throws IOException, } else { processConnectionHeader(buf); this.connectionHeaderRead = true; - if (!authorizeConnection()) { + if (rpcServer.needAuthorization() && !authorizeConnection()) { // Throw FatalConnectionException wrapping ACE so client does right thing and closes // down the connection instead of trying to read non-existent retun. throw new AccessDeniedException("Connection from " + this + " for service " + diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java index f3f78073dc5d..38c771277360 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java @@ -40,22 +40,15 @@ import java.util.concurrent.Executors; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.atomic.AtomicInteger; - import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.Server; -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hadoop.hbase.monitoring.MonitoredRPCHandler; import org.apache.hadoop.hbase.security.HBasePolicyProvider; -import org.apache.hbase.thirdparty.com.google.protobuf.BlockingService; -import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors.MethodDescriptor; -import org.apache.hbase.thirdparty.com.google.protobuf.Message; -import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.security.authorize.ServiceAuthorizationManager; +import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; @@ -307,7 +300,7 @@ void doAccept(SelectionKey key) throws InterruptedException, IOException, OutOfM // If the connectionManager can't take it, close the connection. if (c == null) { if (channel.isOpen()) { - IOUtils.cleanup(null, channel); + IOUtils.cleanupWithLogger(LOG, channel); } continue; } @@ -416,10 +409,12 @@ protected void closeConnection(SimpleServerRpcConnection connection) { @Override public void setSocketSendBufSize(int size) { this.socketSendBufferSize = size; } - /** Starts the service. Must be called before any calls will be handled. */ + /** Starts the service. Must be called before any calls will be handled. */ @Override public synchronized void start() { - if (started) return; + if (started) { + return; + } authTokenSecretMgr = createSecretManager(); if (authTokenSecretMgr != null) { setSecretManager(authTokenSecretMgr); @@ -433,7 +428,7 @@ public synchronized void start() { started = true; } - /** Stops the service. No new calls will be handled after this is called. */ + /** Stops the service. No new calls will be handled after this is called. */ @Override public synchronized void stop() { LOG.info("Stopping server on " + port); @@ -449,10 +444,9 @@ public synchronized void stop() { notifyAll(); } - /** Wait for the server to be stopped. - * Does not wait for all subthreads to finish. - * See {@link #stop()}. - * @throws InterruptedException e + /** + * Wait for the server to be stopped. Does not wait for all subthreads to finish. + * @see #stop() */ @Override public synchronized void join() throws InterruptedException { @@ -475,23 +469,6 @@ public synchronized InetSocketAddress getListenerAddress() { return listener.getAddress(); } - @Override - public Pair call(BlockingService service, MethodDescriptor md, - Message param, CellScanner cellScanner, long receiveTime, MonitoredRPCHandler status) - throws IOException { - return call(service, md, param, cellScanner, receiveTime, status, System.currentTimeMillis(), - 0); - } - - @Override - public Pair call(BlockingService service, MethodDescriptor md, - Message param, CellScanner cellScanner, long receiveTime, MonitoredRPCHandler status, - long startTime, int timeout) throws IOException { - SimpleServerCall fakeCall = new SimpleServerCall(-1, service, md, null, param, cellScanner, - null, -1, null, receiveTime, timeout, bbAllocator, cellBlockBuilder, null, null); - return call(fakeCall, status); - } - /** * This is a wrapper around {@link java.nio.channels.WritableByteChannel#write(java.nio.ByteBuffer)}. * If the amount of data is large, it writes to channel in smaller chunks. @@ -503,13 +480,14 @@ public Pair call(BlockingService service, MethodDescriptor * @param channel writable byte channel to write to * @param bufferChain Chain of buffers to write * @return number of bytes written - * @throws java.io.IOException e * @see java.nio.channels.WritableByteChannel#write(java.nio.ByteBuffer) */ protected long channelWrite(GatheringByteChannel channel, BufferChain bufferChain) - throws IOException { - long count = bufferChain.write(channel, NIO_BUFFER_LIMIT); - if (count > 0) this.metrics.sentBytes(count); + throws IOException { + long count = bufferChain.write(channel, NIO_BUFFER_LIMIT); + if (count > 0) { + this.metrics.sentBytes(count); + } return count; } @@ -523,22 +501,20 @@ protected long channelWrite(GatheringByteChannel channel, BufferChain bufferChai * @throws UnknownHostException if the address isn't a valid host name * @throws IOException other random errors from bind */ - public static void bind(ServerSocket socket, InetSocketAddress address, - int backlog) throws IOException { + public static void bind(ServerSocket socket, InetSocketAddress address, int backlog) + throws IOException { try { socket.bind(address, backlog); } catch (BindException e) { BindException bindException = - new BindException("Problem binding to " + address + " : " + - e.getMessage()); + new BindException("Problem binding to " + address + " : " + e.getMessage()); bindException.initCause(e); throw bindException; } catch (SocketException e) { // If they try to bind to a different host's address, give a better // error message. if ("Unresolved address".equals(e.getMessage())) { - throw new UnknownHostException("Invalid hostname for server: " + - address.getHostName()); + throw new UnknownHostException("Invalid hostname for server: " + address.getHostName()); } throw e; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CachedClusterId.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CachedClusterId.java index 2e7baae27c39..2f75560dae8c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CachedClusterId.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CachedClusterId.java @@ -32,7 +32,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; /** @@ -157,7 +156,6 @@ public String getFromCacheOrFetch() { return getClusterId(); } - @VisibleForTesting public int getCacheStats() { return cacheMisses.get(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterSchema.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterSchema.java index 56a1f3378ded..0f7153ba8014 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterSchema.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterSchema.java @@ -18,10 +18,10 @@ package org.apache.hadoop.hbase.master; import java.io.IOException; -import java.io.InterruptedIOException; import java.util.List; import org.apache.hadoop.hbase.NamespaceDescriptor; +import org.apache.hadoop.hbase.ServiceNotRunningException; import org.apache.hadoop.hbase.master.procedure.ProcedurePrepareLatch; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.util.NonceKey; @@ -82,8 +82,7 @@ public interface ClusterSchema { * @param nonceKey A unique identifier for this operation from the client or process. * @param latch A latch to block on for precondition validation * @return procedure id - * @throws IOException Throws {@link ClusterSchemaException} and {@link InterruptedIOException} - * as well as {@link IOException} + * @throws IOException if service is not running see {@link ServiceNotRunningException} */ long createNamespace(NamespaceDescriptor namespaceDescriptor, NonceKey nonceKey, ProcedurePrepareLatch latch) throws IOException; @@ -93,8 +92,7 @@ long createNamespace(NamespaceDescriptor namespaceDescriptor, NonceKey nonceKey, * @param nonceKey A unique identifier for this operation from the client or process. * @param latch A latch to block on for precondition validation * @return procedure id - * @throws IOException Throws {@link ClusterSchemaException} and {@link InterruptedIOException} - * as well as {@link IOException} + * @throws IOException if service is not running see {@link ServiceNotRunningException} */ long modifyNamespace(NamespaceDescriptor descriptor, NonceKey nonceKey, ProcedurePrepareLatch latch) throws IOException; @@ -105,8 +103,7 @@ long modifyNamespace(NamespaceDescriptor descriptor, NonceKey nonceKey, Procedur * @param nonceKey A unique identifier for this operation from the client or process. * @param latch A latch to block on for precondition validation * @return procedure id - * @throws IOException Throws {@link ClusterSchemaException} and {@link InterruptedIOException} - * as well as {@link IOException} + * @throws IOException if service is not running see {@link ServiceNotRunningException} */ long deleteNamespace(String name, NonceKey nonceKey, ProcedurePrepareLatch latch) throws IOException; @@ -115,8 +112,7 @@ long deleteNamespace(String name, NonceKey nonceKey, ProcedurePrepareLatch latch * Get a Namespace * @param name Name of the Namespace * @return Namespace descriptor for name - * @throws IOException Throws {@link ClusterSchemaException} and {@link InterruptedIOException} - * as well as {@link IOException} + * @throws IOException if namespace does not exist */ // No Future here because presumption is that the request will go against cached metadata so // return immediately -- no need of running a Procedure. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/DeadServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/DeadServer.java index c527bc028263..0471fabe3489 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/DeadServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/DeadServer.java @@ -33,9 +33,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; - - /** * Class to hold dead servers list and utility querying dead server list. * Servers are added when they expire or when we find them in filesystem on startup. @@ -59,13 +56,6 @@ public class DeadServer { */ private final Map deadServers = new HashMap<>(); - /** - * Set of dead servers currently being processed by a SCP. - * Added to this list at the start of SCP and removed after it is done - * processing the crash. - */ - private final Set processingServers = new HashSet<>(); - /** * @param serverName server name. * @return true if this server is on the dead servers list false otherwise @@ -74,17 +64,6 @@ public synchronized boolean isDeadServer(final ServerName serverName) { return deadServers.containsKey(serverName); } - /** - * Checks if there are currently any dead servers being processed by the - * master. Returns true if at least one region server is currently being - * processed as dead. - * - * @return true if any RS are being processed as dead - */ - synchronized boolean areDeadServersInProgress() { - return !processingServers.isEmpty(); - } - public synchronized Set copyServerNames() { Set clone = new HashSet<>(deadServers.size()); clone.addAll(deadServers.keySet()); @@ -96,29 +75,6 @@ public synchronized Set copyServerNames() { */ synchronized void putIfAbsent(ServerName sn) { this.deadServers.putIfAbsent(sn, EnvironmentEdgeManager.currentTime()); - processing(sn); - } - - /** - * Add sn< to set of processing deadservers. - * @see #finish(ServerName) - */ - public synchronized void processing(ServerName sn) { - if (processingServers.add(sn)) { - // Only log on add. - LOG.debug("Processing {}; numProcessing={}", sn, processingServers.size()); - } - } - - /** - * Complete processing for this dead server. - * @param sn ServerName for the dead server. - * @see #processing(ServerName) - */ - public synchronized void finish(ServerName sn) { - if (processingServers.remove(sn)) { - LOG.debug("Removed {} from processing; numProcessing={}", sn, processingServers.size()); - } } public synchronized int size() { @@ -179,17 +135,12 @@ public synchronized String toString() { // Display unified set of servers from both maps Set servers = new HashSet<>(); servers.addAll(deadServers.keySet()); - servers.addAll(processingServers); StringBuilder sb = new StringBuilder(); for (ServerName sn : servers) { if (sb.length() > 0) { sb.append(", "); } sb.append(sn.toString()); - // Star entries that are being processed - if (processingServers.contains(sn)) { - sb.append("*"); - } } return sb.toString(); } @@ -228,9 +179,6 @@ public synchronized Date getTimeOfDeath(final ServerName deadServerName){ * @return true if this server was removed */ public synchronized boolean removeDeadServer(final ServerName deadServerName) { - Preconditions.checkState(!processingServers.contains(deadServerName), - "Asked to remove server still in processingServers set " + deadServerName + - " (numProcessing=" + processingServers.size() + ")"); return this.deadServers.remove(deadServerName) != null; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index c87f144fc876..9911f014d639 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -29,7 +29,6 @@ import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.UnknownHostException; -import java.time.Duration; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; @@ -49,19 +48,13 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.locks.ReentrantLock; import java.util.regex.Pattern; import java.util.stream.Collectors; -import javax.servlet.ServletException; import javax.servlet.http.HttpServlet; -import javax.servlet.http.HttpServletRequest; -import javax.servlet.http.HttpServletResponse; -import org.apache.commons.io.IOUtils; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.ChoreService; import org.apache.hadoop.hbase.ClusterId; import org.apache.hadoop.hbase.ClusterMetrics; import org.apache.hadoop.hbase.ClusterMetrics.Option; @@ -75,13 +68,16 @@ import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.PleaseHoldException; +import org.apache.hadoop.hbase.RegionMetrics; import org.apache.hadoop.hbase.ReplicationPeerNotFoundException; +import org.apache.hadoop.hbase.ServerMetrics; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotDisabledException; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.UnknownRegionException; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; +import org.apache.hadoop.hbase.client.CompactionState; import org.apache.hadoop.hbase.client.MasterSwitchType; import org.apache.hadoop.hbase.client.NormalizeTableFilterParams; import org.apache.hadoop.hbase.client.RegionInfo; @@ -91,9 +87,9 @@ import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; +import org.apache.hadoop.hbase.exceptions.MasterStoppedException; import org.apache.hadoop.hbase.executor.ExecutorType; import org.apache.hadoop.hbase.favored.FavoredNodesManager; -import org.apache.hadoop.hbase.http.InfoServer; import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils; import org.apache.hadoop.hbase.ipc.RpcServer; import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException; @@ -114,11 +110,8 @@ import org.apache.hadoop.hbase.master.cleaner.SnapshotCleanerChore; import org.apache.hadoop.hbase.master.janitor.CatalogJanitor; import org.apache.hadoop.hbase.master.locking.LockManager; -import org.apache.hadoop.hbase.master.normalizer.NormalizationPlan; -import org.apache.hadoop.hbase.master.normalizer.NormalizationPlan.PlanType; -import org.apache.hadoop.hbase.master.normalizer.RegionNormalizer; -import org.apache.hadoop.hbase.master.normalizer.RegionNormalizerChore; import org.apache.hadoop.hbase.master.normalizer.RegionNormalizerFactory; +import org.apache.hadoop.hbase.master.normalizer.RegionNormalizerManager; import org.apache.hadoop.hbase.master.procedure.CreateTableProcedure; import org.apache.hadoop.hbase.master.procedure.DeleteNamespaceProcedure; import org.apache.hadoop.hbase.master.procedure.DeleteTableProcedure; @@ -199,7 +192,6 @@ import org.apache.hadoop.hbase.trace.TraceUtil; import org.apache.hadoop.hbase.util.Addressing; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.FutureUtils; import org.apache.hadoop.hbase.util.HBaseFsck; import org.apache.hadoop.hbase.util.HFileArchiveUtil; @@ -224,93 +216,39 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.com.google.common.collect.Lists; import org.apache.hbase.thirdparty.com.google.common.collect.Maps; import org.apache.hbase.thirdparty.com.google.common.collect.Sets; +import org.apache.hbase.thirdparty.com.google.common.io.Closeables; import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors; import org.apache.hbase.thirdparty.com.google.protobuf.Service; -import org.apache.hbase.thirdparty.org.apache.commons.collections4.CollectionUtils; import org.apache.hbase.thirdparty.org.eclipse.jetty.server.Server; import org.apache.hbase.thirdparty.org.eclipse.jetty.server.ServerConnector; import org.apache.hbase.thirdparty.org.eclipse.jetty.servlet.ServletHolder; import org.apache.hbase.thirdparty.org.eclipse.jetty.webapp.WebAppContext; import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; -import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription; /** - * HMaster is the "master server" for HBase. An HBase cluster has one active - * master. If many masters are started, all compete. Whichever wins goes on to - * run the cluster. All others park themselves in their constructor until - * master or cluster shutdown or until the active master loses its lease in - * zookeeper. Thereafter, all running master jostle to take over master role. - * - *

The Master can be asked shutdown the cluster. See {@link #shutdown()}. In - * this case it will tell all regionservers to go down and then wait on them - * all reporting in that they are down. This master will then shut itself down. - * - *

You can also shutdown just this master. Call {@link #stopMaster()}. - * + * HMaster is the "master server" for HBase. An HBase cluster has one active master. If many masters + * are started, all compete. Whichever wins goes on to run the cluster. All others park themselves + * in their constructor until master or cluster shutdown or until the active master loses its lease + * in zookeeper. Thereafter, all running master jostle to take over master role. + *

+ * The Master can be asked shutdown the cluster. See {@link #shutdown()}. In this case it will tell + * all regionservers to go down and then wait on them all reporting in that they are down. This + * master will then shut itself down. + *

+ * You can also shutdown just this master. Call {@link #stopMaster()}. * @see org.apache.zookeeper.Watcher */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) @SuppressWarnings("deprecation") public class HMaster extends HRegionServer implements MasterServices { - private static Logger LOG = LoggerFactory.getLogger(HMaster.class); - /** - * Protection against zombie master. Started once Master accepts active responsibility and - * starts taking over responsibilities. Allows a finite time window before giving up ownership. - */ - private static class InitializationMonitor extends Thread { - /** The amount of time in milliseconds to sleep before checking initialization status. */ - public static final String TIMEOUT_KEY = "hbase.master.initializationmonitor.timeout"; - public static final long TIMEOUT_DEFAULT = TimeUnit.MILLISECONDS.convert(15, TimeUnit.MINUTES); - - /** - * When timeout expired and initialization has not complete, call {@link System#exit(int)} when - * true, do nothing otherwise. - */ - public static final String HALT_KEY = "hbase.master.initializationmonitor.haltontimeout"; - public static final boolean HALT_DEFAULT = false; - - private final HMaster master; - private final long timeout; - private final boolean haltOnTimeout; - - /** Creates a Thread that monitors the {@link #isInitialized()} state. */ - InitializationMonitor(HMaster master) { - super("MasterInitializationMonitor"); - this.master = master; - this.timeout = master.getConfiguration().getLong(TIMEOUT_KEY, TIMEOUT_DEFAULT); - this.haltOnTimeout = master.getConfiguration().getBoolean(HALT_KEY, HALT_DEFAULT); - this.setDaemon(true); - } - - @Override - public void run() { - try { - while (!master.isStopped() && master.isActiveMaster()) { - Thread.sleep(timeout); - if (master.isInitialized()) { - LOG.debug("Initialization completed within allotted tolerance. Monitor exiting."); - } else { - LOG.error("Master failed to complete initialization after " + timeout + "ms. Please" - + " consider submitting a bug report including a thread dump of this process."); - if (haltOnTimeout) { - LOG.error("Zombie Master exiting. Thread dump to stdout"); - Threads.printThreadInfo(System.out, "Zombie HMaster"); - System.exit(-1); - } - } - } - } catch (InterruptedException ie) { - LOG.trace("InitMonitor thread interrupted. Existing."); - } - } - } + private static final Logger LOG = LoggerFactory.getLogger(HMaster.class); // MASTER is name of the webapp and the attribute name used stuffing this //instance into web context. @@ -325,8 +263,9 @@ public void run() { // Tracker for load balancer state LoadBalancerTracker loadBalancerTracker; // Tracker for meta location, if any client ZK quorum specified - MetaLocationSyncer metaLocationSyncer; + private MetaLocationSyncer metaLocationSyncer; // Tracker for active master location, if any client ZK quorum specified + @InterfaceAudience.Private MasterAddressSyncer masterAddressSyncer; // Tracker for auto snapshot cleanup state SnapshotCleanupTracker snapshotCleanupTracker; @@ -334,9 +273,6 @@ public void run() { // Tracker for split and merge state private SplitOrMergeTracker splitOrMergeTracker; - // Tracker for region normalizer state - private RegionNormalizerTracker regionNormalizerTracker; - private ClusterSchemaService clusterSchemaService; public static final String HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS = @@ -403,11 +339,8 @@ public void run() { private final LockManager lockManager = new LockManager(this); private RSGroupBasedLoadBalancer balancer; - // a lock to prevent concurrent normalization actions. - private final ReentrantLock normalizationInProgressLock = new ReentrantLock(); - private RegionNormalizer normalizer; private BalancerChore balancerChore; - private RegionNormalizerChore normalizerChore; + private RegionNormalizerManager regionNormalizerManager; private ClusterStatusChore clusterStatusChore; private ClusterStatusPublisher clusterStatusPublisherChore = null; private SnapshotCleanerChore snapshotCleanerChore = null; @@ -461,9 +394,6 @@ public void run() { // handle table states private TableStateManager tableStateManager; - private long splitPlanCount; - private long mergePlanCount; - /** jetty server for master to redirect requests to regionserver infoServer */ private Server masterJettyServer; @@ -475,48 +405,6 @@ public void run() { // Cached clusterId on stand by masters to serve clusterID requests from clients. private final CachedClusterId cachedClusterId; - public static class RedirectServlet extends HttpServlet { - private static final long serialVersionUID = 2894774810058302473L; - private final int regionServerInfoPort; - private final String regionServerHostname; - - /** - * @param infoServer that we're trying to send all requests to - * @param hostname may be null. if given, will be used for redirects instead of host from client. - */ - public RedirectServlet(InfoServer infoServer, String hostname) { - regionServerInfoPort = infoServer.getPort(); - regionServerHostname = hostname; - } - - @Override - public void doGet(HttpServletRequest request, - HttpServletResponse response) throws ServletException, IOException { - String redirectHost = regionServerHostname; - if(redirectHost == null) { - redirectHost = request.getServerName(); - if(!Addressing.isLocalAddress(InetAddress.getByName(redirectHost))) { - LOG.warn("Couldn't resolve '" + redirectHost + "' as an address local to this node and '" + - MASTER_HOSTNAME_KEY + "' is not set; client will get an HTTP 400 response. If " + - "your HBase deployment relies on client accessible names that the region server process " + - "can't resolve locally, then you should set the previously mentioned configuration variable " + - "to an appropriate hostname."); - // no sending client provided input back to the client, so the goal host is just in the logs. - response.sendError(400, "Request was to a host that I can't resolve for any of the network interfaces on " + - "this node. If this is due to an intermediary such as an HTTP load balancer or other proxy, your HBase " + - "administrator can set '" + MASTER_HOSTNAME_KEY + "' to point to the correct hostname."); - return; - } - } - // TODO this scheme should come from looking at the scheme registered in the infoserver's http server for the - // host and port we're using, but it's buried way too deep to do that ATM. - String redirectUrl = request.getScheme() + "://" - + redirectHost + ":" + regionServerInfoPort - + request.getRequestURI(); - response.sendRedirect(redirectUrl); - } - } - /** * Initializes the HMaster. The steps are as follows: *

@@ -689,7 +577,7 @@ private int putUpJettyServer() throws IOException { final String redirectHostname = StringUtils.isBlank(useThisHostnameInstead) ? null : useThisHostnameInstead; - final RedirectServlet redirect = new RedirectServlet(infoServer, redirectHostname); + final MasterRedirectServlet redirect = new MasterRedirectServlet(infoServer, redirectHostname); final WebAppContext context = new WebAppContext(null, "/", null, null, null, null, WebAppContext.NO_SESSIONS); context.addServlet(new ServletHolder(redirect), "/*"); context.setServer(masterJettyServer); @@ -731,7 +619,7 @@ protected void waitForMasterActive(){ } } - @VisibleForTesting + @InterfaceAudience.Private public MasterRpcServices getMasterRpcServices() { return (MasterRpcServices)rpcServices; } @@ -785,26 +673,20 @@ public MetricsMaster getMasterMetrics() { } /** - *

* Initialize all ZK based system trackers. But do not include {@link RegionServerTracker}, it * should have already been initialized along with {@link ServerManager}. - *

- *

- * Will be overridden in tests. - *

*/ - @VisibleForTesting - protected void initializeZKBasedSystemTrackers() - throws IOException, InterruptedException, KeeperException, ReplicationException { + private void initializeZKBasedSystemTrackers() + throws IOException, KeeperException, ReplicationException { this.balancer = new RSGroupBasedLoadBalancer(); this.balancer.setConf(conf); this.loadBalancerTracker = new LoadBalancerTracker(zooKeeper, this); this.loadBalancerTracker.start(); - this.normalizer = RegionNormalizerFactory.getRegionNormalizer(conf); - this.normalizer.setMasterServices(this); - this.regionNormalizerTracker = new RegionNormalizerTracker(zooKeeper, this); - this.regionNormalizerTracker.start(); + this.regionNormalizerManager = + RegionNormalizerFactory.createNormalizerManager(conf, zooKeeper, this); + this.configurationManager.registerObserver(regionNormalizerManager); + this.regionNormalizerManager.start(); this.splitOrMergeTracker = new SplitOrMergeTracker(zooKeeper, conf, this); this.splitOrMergeTracker.start(); @@ -867,7 +749,7 @@ protected void initializeZKBasedSystemTrackers() } // Will be overriden in test to inject customized AssignmentManager - @VisibleForTesting + @InterfaceAudience.Private protected AssignmentManager createAssignmentManager(MasterServices master) { return new AssignmentManager(master); } @@ -897,10 +779,10 @@ protected AssignmentManager createAssignmentManager(MasterServices master) { * * *
  • If this is a new deploy, schedule a InitMetaProcedure to initialize meta
  • - *
  • Start necessary service threads - balancer, catalog janior, executor services, and also the - * procedure executor, etc. Notice that the balancer must be created first as assignment manager - * may use it when assigning regions.
  • - *
  • Wait for meta to be initialized if necesssary, start table state manager.
  • + *
  • Start necessary service threads - balancer, catalog janitor, executor services, and also + * the procedure executor, etc. Notice that the balancer must be created first as assignment + * manager may use it when assigning regions.
  • + *
  • Wait for meta to be initialized if necessary, start table state manager.
  • *
  • Wait for enough region servers to check-in
  • *
  • Let assignment manager load data from meta and construct region states
  • *
  • Start all other things such as chore services, etc
  • @@ -949,7 +831,7 @@ private void finishActiveMasterInitialization(MonitoredTask status) throws IOExc HBaseFsck.createLockRetryCounterFactory(this.conf).create()); } finally { if (result != null) { - IOUtils.closeQuietly(result.getSecond()); + Closeables.close(result.getSecond(), true); } } } @@ -993,14 +875,8 @@ private void finishActiveMasterInitialization(MonitoredTask status) throws IOExc procsByType.getOrDefault(ServerCrashProcedure.class, Collections.emptyList()).stream() .map(p -> (ServerCrashProcedure) p).map(p -> p.getServerName()).collect(Collectors.toSet()), walManager.getLiveServersFromWALDir(), walManager.getSplittingServersFromWALDir()); - // This manager will be started AFTER hbase:meta is confirmed on line. - // hbase.mirror.table.state.to.zookeeper is so hbase1 clients can connect. They read table - // state from zookeeper while hbase2 reads it from hbase:meta. Disable if no hbase1 clients. - this.tableStateManager = - this.conf.getBoolean(MirroringTableStateManager.MIRROR_TABLE_STATE_TO_ZK_KEY, true) - ? - new MirroringTableStateManager(this): - new TableStateManager(this); + // This manager must be accessed AFTER hbase:meta is confirmed on line.. + this.tableStateManager = new TableStateManager(this); status.setStatus("Initializing ZK system trackers"); initializeZKBasedSystemTrackers(); @@ -1015,7 +891,7 @@ private void finishActiveMasterInitialization(MonitoredTask status) throws IOExc this.activeMaster = true; // Start the Zombie master detector after setting master as active, see HBASE-21535 - Thread zombieDetector = new Thread(new InitializationMonitor(this), + Thread zombieDetector = new Thread(new MasterInitializationMonitor(this), "ActiveMasterInitializationMonitor-" + System.currentTimeMillis()); zombieDetector.setDaemon(true); zombieDetector.start(); @@ -1035,10 +911,7 @@ private void finishActiveMasterInitialization(MonitoredTask status) throws IOExc status.setStatus("Initializing meta table if this is a new deploy"); InitMetaProcedure initMetaProc = null; // Print out state of hbase:meta on startup; helps debugging. - RegionState rs = this.assignmentManager.getRegionStates(). - getRegionState(RegionInfoBuilder.FIRST_META_REGIONINFO); - LOG.info("hbase:meta {}", rs); - if (rs != null && rs.isOffline()) { + if (!this.assignmentManager.getRegionStates().hasTableRegionStates(TableName.META_TABLE_NAME)) { Optional optProc = procedureExecutor.getProcedures().stream() .filter(p -> p instanceof InitMetaProcedure).map(o -> (InitMetaProcedure) o).findAny(); initMetaProc = optProc.orElseGet(() -> { @@ -1089,13 +962,42 @@ private void finishActiveMasterInitialization(MonitoredTask status) throws IOExc } this.assignmentManager.joinCluster(); // The below depends on hbase:meta being online. - this.tableStateManager.start(); - // Below has to happen after tablestatemanager has started in the case where this hbase-2.x - // is being started over an hbase-1.x dataset. tablestatemanager runs a migration as part - // of its 'start' moving table state from zookeeper to hbase:meta. This migration needs to - // complete before we do this next step processing offline regions else it fails reading - // table states messing up master launch (namespace table, etc., are not assigned). this.assignmentManager.processOfflineRegions(); + // this must be called after the above processOfflineRegions to prevent race + this.assignmentManager.wakeMetaLoadedEvent(); + + // for migrating from a version without HBASE-25099, and also for honoring the configuration + // first. + if (conf.get(HConstants.META_REPLICAS_NUM) != null) { + int replicasNumInConf = + conf.getInt(HConstants.META_REPLICAS_NUM, HConstants.DEFAULT_META_REPLICA_NUM); + TableDescriptor metaDesc = tableDescriptors.get(TableName.META_TABLE_NAME); + if (metaDesc.getRegionReplication() != replicasNumInConf) { + // it is possible that we already have some replicas before upgrading, so we must set the + // region replication number in meta TableDescriptor directly first, without creating a + // ModifyTableProcedure, otherwise it may cause a double assign for the meta replicas. + int existingReplicasCount = + assignmentManager.getRegionStates().getRegionsOfTable(TableName.META_TABLE_NAME).size(); + if (existingReplicasCount > metaDesc.getRegionReplication()) { + LOG.info("Update replica count of hbase:meta from {}(in TableDescriptor)" + + " to {}(existing ZNodes)", metaDesc.getRegionReplication(), existingReplicasCount); + metaDesc = TableDescriptorBuilder.newBuilder(metaDesc) + .setRegionReplication(existingReplicasCount).build(); + tableDescriptors.update(metaDesc); + } + // check again, and issue a ModifyTableProcedure if needed + if (metaDesc.getRegionReplication() != replicasNumInConf) { + LOG.info( + "The {} config is {} while the replica count in TableDescriptor is {}" + + " for hbase:meta, altering...", + HConstants.META_REPLICAS_NUM, replicasNumInConf, metaDesc.getRegionReplication()); + procedureExecutor.submitProcedure(new ModifyTableProcedure( + procedureExecutor.getEnvironment(), TableDescriptorBuilder.newBuilder(metaDesc) + .setRegionReplication(replicasNumInConf).build(), + null, metaDesc, false)); + } + } + } // Initialize after meta is up as below scans meta if (getFavoredNodesManager() != null && !maintenanceMode) { SnapshotOfRegionAssignmentFromMeta snapshotOfRegionAssignment = @@ -1113,8 +1015,9 @@ private void finishActiveMasterInitialization(MonitoredTask status) throws IOExc getChoreService().scheduleChore(clusterStatusChore); this.balancerChore = new BalancerChore(this); getChoreService().scheduleChore(balancerChore); - this.normalizerChore = new RegionNormalizerChore(this); - getChoreService().scheduleChore(normalizerChore); + if (regionNormalizerManager != null) { + getChoreService().scheduleChore(regionNormalizerManager.getRegionNormalizerChore()); + } this.catalogJanitorChore = new CatalogJanitor(this); getChoreService().scheduleChore(catalogJanitorChore); this.hbckChore = new HbckChore(this); @@ -1154,13 +1057,6 @@ private void finishActiveMasterInitialization(MonitoredTask status) throws IOExc } assignmentManager.checkIfShouldMoveSystemRegionAsync(); - status.setStatus("Assign meta replicas"); - MasterMetaBootstrap metaBootstrap = createMetaBootstrap(); - try { - metaBootstrap.assignMetaReplicas(); - } catch (IOException | KeeperException e){ - LOG.error("Assigning meta replica failed: ", e); - } status.setStatus("Starting quota manager"); initQuotaManager(); if (QuotaUtil.isQuotaEnabled(conf)) { @@ -1219,7 +1115,7 @@ private void finishActiveMasterInitialization(MonitoredTask status) throws IOExc * @return True if meta is UP and online and startup can progress. Otherwise, meta is not online * and we will hold here until operator intervention. */ - @VisibleForTesting + @InterfaceAudience.Private public boolean waitForMetaOnline() { return isRegionOnline(RegionInfoBuilder.FIRST_META_REGIONINFO); } @@ -1248,7 +1144,7 @@ private boolean isRegionOnline(RegionInfo ri) { ri.getRegionNameAsString(), rs, optProc.isPresent()); // Check once-a-minute. if (rc == null) { - rc = new RetryCounterFactory(1000).create(); + rc = new RetryCounterFactory(Integer.MAX_VALUE, 1000, 60_000).create(); } Threads.sleep(rc.getBackoffTimeAndIncrementAttempts()); } @@ -1290,7 +1186,7 @@ private boolean waitForNamespaceOnline() throws IOException { * Adds the {@code MasterQuotasObserver} to the list of configured Master observers to * automatically remove quotas for a table when that table is deleted. */ - @VisibleForTesting + @InterfaceAudience.Private public void updateConfigurationForQuotasObserver(Configuration conf) { // We're configured to not delete quotas on table deletion, so we don't need to add the obs. if (!conf.getBoolean( @@ -1315,21 +1211,6 @@ private void initMobCleaner() { getChoreService().scheduleChore(mobFileCompactionChore); } - /** - *

    - * Create a {@link MasterMetaBootstrap} instance. - *

    - *

    - * Will be overridden in tests. - *

    - */ - @VisibleForTesting - protected MasterMetaBootstrap createMetaBootstrap() { - // We put this out here in a method so can do a Mockito.spy and stub it out - // w/ a mocked up MasterMetaBootstrap. - return new MasterMetaBootstrap(this); - } - /** *

    * Create a {@link ServerManager} instance. @@ -1338,7 +1219,7 @@ protected MasterMetaBootstrap createMetaBootstrap() { * Will be overridden in tests. *

    */ - @VisibleForTesting + @InterfaceAudience.Private protected ServerManager createServerManager(final MasterServices master) throws IOException { // We put this out here in a method so can do a Mockito.spy and stub it out // w/ a mocked up ServerManager. @@ -1352,7 +1233,7 @@ private void waitForRegionServers(final MonitoredTask status) } // Will be overridden in tests - @VisibleForTesting + @InterfaceAudience.Private protected void initClusterSchemaService() throws IOException, InterruptedException { this.clusterSchemaService = new ClusterSchemaServiceImpl(this); this.clusterSchemaService.startAsync(); @@ -1443,6 +1324,9 @@ private void startServiceThreads() throws IOException { HConstants.MASTER_LOG_REPLAY_OPS_THREADS, HConstants.MASTER_LOG_REPLAY_OPS_THREADS_DEFAULT)); this.executorService.startExecutorService(ExecutorType.MASTER_SNAPSHOT_OPERATIONS, conf.getInt( SnapshotManager.SNAPSHOT_POOL_THREADS_KEY, SnapshotManager.SNAPSHOT_POOL_THREADS_DEFAULT)); + this.executorService.startExecutorService(ExecutorType.MASTER_MERGE_OPERATIONS, conf.getInt( + HConstants.MASTER_MERGE_DISPATCH_THREADS, + HConstants.MASTER_MERGE_DISPATCH_THREADS_DEFAULT)); // We depend on there being only one instance of this executor running // at a time. To do concurrency, would need fencing of enable/disable of @@ -1454,17 +1338,17 @@ private void startServiceThreads() throws IOException { // Create cleaner thread pool cleanerPool = new DirScanPool(conf); + Map params = new HashMap<>(); + params.put(MASTER, this); // Start log cleaner thread int cleanerInterval = conf.getInt(HBASE_MASTER_CLEANER_INTERVAL, DEFAULT_HBASE_MASTER_CLEANER_INTERVAL); this.logCleaner = new LogCleaner(cleanerInterval, this, conf, - getMasterWalManager().getFileSystem(), getMasterWalManager().getOldLogDir(), cleanerPool); + getMasterWalManager().getFileSystem(), getMasterWalManager().getOldLogDir(), cleanerPool, params); getChoreService().scheduleChore(logCleaner); // start the hfile archive cleaner thread Path archiveDir = HFileArchiveUtil.getArchivePath(conf); - Map params = new HashMap<>(); - params.put(MASTER, this); this.hfileCleaner = new HFileCleaner(cleanerInterval, this, conf, getMasterFileSystem().getFileSystem(), archiveDir, cleanerPool, params); getChoreService().scheduleChore(hfileCleaner); @@ -1530,6 +1414,9 @@ protected void stopServiceThreads() { // example. stopProcedureExecutor(); + if (regionNormalizerManager != null) { + regionNormalizerManager.stop(); + } if (this.quotaManager != null) { this.quotaManager.stop(); } @@ -1615,11 +1502,9 @@ private void switchSnapshotCleanup(final boolean on) { try { snapshotCleanupTracker.setSnapshotCleanupEnabled(on); if (on) { - if (!getChoreService().isChoreScheduled(this.snapshotCleanerChore)) { - getChoreService().scheduleChore(this.snapshotCleanerChore); - } + getChoreService().scheduleChore(this.snapshotCleanerChore); } else { - getChoreService().cancelChore(this.snapshotCleanerChore); + this.snapshotCleanerChore.cancel(); } } catch (KeeperException e) { LOG.error("Error updating snapshot cleanup mode to {}", on, e); @@ -1643,22 +1528,23 @@ private void stopProcedureExecutor() { } private void stopChores() { - ChoreService choreService = getChoreService(); - if (choreService != null) { - choreService.cancelChore(this.mobFileCleanerChore); - choreService.cancelChore(this.mobFileCompactionChore); - choreService.cancelChore(this.balancerChore); - choreService.cancelChore(this.normalizerChore); - choreService.cancelChore(this.clusterStatusChore); - choreService.cancelChore(this.catalogJanitorChore); - choreService.cancelChore(this.clusterStatusPublisherChore); - choreService.cancelChore(this.snapshotQuotaChore); - choreService.cancelChore(this.logCleaner); - choreService.cancelChore(this.hfileCleaner); - choreService.cancelChore(this.replicationBarrierCleaner); - choreService.cancelChore(this.snapshotCleanerChore); - choreService.cancelChore(this.hbckChore); - choreService.cancelChore(this.regionsRecoveryChore); + if (getChoreService() != null) { + shutdownChore(mobFileCleanerChore); + shutdownChore(mobFileCompactionChore); + shutdownChore(balancerChore); + if (regionNormalizerManager != null) { + shutdownChore(regionNormalizerManager.getRegionNormalizerChore()); + } + shutdownChore(clusterStatusChore); + shutdownChore(catalogJanitorChore); + shutdownChore(clusterStatusPublisherChore); + shutdownChore(snapshotQuotaChore); + shutdownChore(logCleaner); + shutdownChore(hfileCleaner); + shutdownChore(replicationBarrierCleaner); + shutdownChore(snapshotCleanerChore); + shutdownChore(hbckChore); + shutdownChore(regionsRecoveryChore); } } @@ -1748,7 +1634,9 @@ public boolean balance() throws IOException { * @param action the name of the action under consideration, for logging. * @return {@code true} when the caller should exit early, {@code false} otherwise. */ - private boolean skipRegionManagementAction(final String action) { + @Override + public boolean skipRegionManagementAction(final String action) { + // Note: this method could be `default` on MasterServices if but for logging. if (!isInitialized()) { LOG.debug("Master has not been initialized, don't run {}.", action); return true; @@ -1787,7 +1675,7 @@ public boolean balance(boolean force) throws IOException { toPrint = regionsInTransition.subList(0, max); truncated = true; } - LOG.info(prefix + "unning balancer because " + regionsInTransition.size() + + LOG.info(prefix + " not running balancer because " + regionsInTransition.size() + " region(s) in transition: " + toPrint + (truncated? "(truncated list)": "")); if (!force || metaInTransition) return false; } @@ -1893,24 +1781,16 @@ public List executeRegionPlansWithThrottling(List plans) } @Override - public RegionNormalizer getRegionNormalizer() { - return this.normalizer; - } - - public boolean normalizeRegions() throws IOException { - return normalizeRegions(new NormalizeTableFilterParams.Builder().build()); + public RegionNormalizerManager getRegionNormalizerManager() { + return regionNormalizerManager; } - /** - * Perform normalization of cluster. - * - * @return true if an existing normalization was already in progress, or if a new normalization - * was performed successfully; false otherwise (specifically, if HMaster finished initializing - * or normalization is globally disabled). - */ - public boolean normalizeRegions(final NormalizeTableFilterParams ntfp) throws IOException { - final long startTime = EnvironmentEdgeManager.currentTime(); - if (regionNormalizerTracker == null || !regionNormalizerTracker.isNormalizerOn()) { + @Override + public boolean normalizeRegions( + final NormalizeTableFilterParams ntfp, + final boolean isHighPriority + ) throws IOException { + if (regionNormalizerManager == null || !regionNormalizerManager.isNormalizerOn()) { LOG.debug("Region normalization is disabled, don't run region normalizer."); return false; } @@ -1921,70 +1801,17 @@ public boolean normalizeRegions(final NormalizeTableFilterParams ntfp) throws IO return false; } - if (!normalizationInProgressLock.tryLock()) { - // Don't run the normalizer concurrently - LOG.info("Normalization already in progress. Skipping request."); - return true; - } - - int affectedTables = 0; - try { - final Set matchingTables = getTableDescriptors(new LinkedList<>(), - ntfp.getNamespace(), ntfp.getRegex(), ntfp.getTableNames(), false) - .stream() - .map(TableDescriptor::getTableName) - .collect(Collectors.toSet()); - final Set allEnabledTables = - tableStateManager.getTablesInStates(TableState.State.ENABLED); - final List targetTables = - new ArrayList<>(Sets.intersection(matchingTables, allEnabledTables)); - Collections.shuffle(targetTables); - - final List submittedPlanProcIds = new ArrayList<>(); - for (TableName table : targetTables) { - if (table.isSystemTable()) { - continue; - } - final TableDescriptor tblDesc = getTableDescriptors().get(table); - if (tblDesc != null && !tblDesc.isNormalizationEnabled()) { - LOG.debug( - "Skipping table {} because normalization is disabled in its table properties.", table); - continue; - } - - // make one last check that the cluster isn't shutting down before proceeding. - if (skipRegionManagementAction("region normalizer")) { - return false; - } - - final List plans = normalizer.computePlansForTable(table); - if (CollectionUtils.isEmpty(plans)) { - LOG.debug("No normalization required for table {}.", table); - continue; - } - - affectedTables++; - // as of this writing, `plan.submit()` is non-blocking and uses Async Admin APIs to - // submit task , so there's no artificial rate- - // limiting of merge/split requests due to this serial loop. - for (NormalizationPlan plan : plans) { - long procId = plan.submit(this); - submittedPlanProcIds.add(procId); - if (plan.getType() == PlanType.SPLIT) { - splitPlanCount++; - } else if (plan.getType() == PlanType.MERGE) { - mergePlanCount++; - } - } - } - final long endTime = EnvironmentEdgeManager.currentTime(); - LOG.info("Normalizer ran successfully in {}. Submitted {} plans, affecting {} tables.", - Duration.ofMillis(endTime - startTime), submittedPlanProcIds.size(), affectedTables); - LOG.debug("Normalizer submitted procID list: {}", submittedPlanProcIds); - } finally { - normalizationInProgressLock.unlock(); - } - return true; + final Set matchingTables = getTableDescriptors(new LinkedList<>(), + ntfp.getNamespace(), ntfp.getRegex(), ntfp.getTableNames(), false) + .stream() + .map(TableDescriptor::getTableName) + .collect(Collectors.toSet()); + final Set allEnabledTables = + tableStateManager.getTablesInStates(TableState.State.ENABLED); + final List targetTables = + new ArrayList<>(Sets.intersection(matchingTables, allEnabledTables)); + Collections.shuffle(targetTables); + return regionNormalizerManager.normalizeRegions(targetTables, isHighPriority); } /** @@ -2083,7 +1910,7 @@ private void warmUpRegion(ServerName server, RegionInfo region) { // Public so can be accessed by tests. Blocks until move is done. // Replace with an async implementation from which you can get // a success/failure result. - @VisibleForTesting + @InterfaceAudience.Private public void move(final byte[] encodedRegionName, byte[] destServerName) throws IOException { RegionState regionState = assignmentManager.getRegionStates(). getRegionState(Bytes.toString(encodedRegionName)); @@ -2911,19 +2738,13 @@ public void stop(String msg) { } } - @VisibleForTesting + @InterfaceAudience.Private protected void checkServiceStarted() throws ServerNotRunningYetException { if (!serviceStarted) { throw new ServerNotRunningYetException("Server is not running yet"); } } - public static class MasterStoppedException extends DoNotRetryIOException { - MasterStoppedException() { - super(); - } - } - void checkInitialized() throws PleaseHoldException, ServerNotRunningYetException, MasterNotRunningException, MasterStoppedException { checkServiceStarted(); @@ -2962,6 +2783,19 @@ public boolean isInitialized() { return initialized.isReady(); } + /** + * Report whether this master is started + * + * This method is used for testing. + * + * @return true if master is ready to go, false if not. + */ + + @Override + public boolean isOnline() { + return serviceStarted; + } + /** * Report whether this master is in maintenance mode. * @@ -2972,7 +2806,7 @@ public boolean isInMaintenanceMode() { return maintenanceMode; } - @VisibleForTesting + @InterfaceAudience.Private public void setInitialized(boolean isInitialized) { procedureExecutor.getEnvironment().setEventReady(initialized, isInitialized); } @@ -3000,20 +2834,6 @@ public double getAverageLoad() { return regionStates.getAverageLoad(); } - /* - * @return the count of region split plans executed - */ - public long getSplitPlanCount() { - return splitPlanCount; - } - - /* - * @return the count of region merge plans executed - */ - public long getMergePlanCount() { - return mergePlanCount; - } - @Override public boolean registerService(Service instance) { /* @@ -3445,12 +3265,12 @@ public long getLastMajorCompactionTimestampForRegion(byte[] regionName) throws I * @param tableName The current table name. * @return If a given table is in mob file compaction now. */ - public CompactionState getMobCompactionState(TableName tableName) { + public GetRegionInfoResponse.CompactionState getMobCompactionState(TableName tableName) { AtomicInteger compactionsCount = mobCompactionStates.get(tableName); if (compactionsCount != null && compactionsCount.get() != 0) { - return CompactionState.MAJOR_AND_MINOR; + return GetRegionInfoResponse.CompactionState.MAJOR_AND_MINOR; } - return CompactionState.NONE; + return GetRegionInfoResponse.CompactionState.NONE; } public void reportMobCompactionStart(TableName tableName) throws IOException { @@ -3508,8 +3328,7 @@ public boolean isBalancerOn() { */ public boolean isNormalizerOn() { return !isInMaintenanceMode() - && regionNormalizerTracker != null - && regionNormalizerTracker.isNormalizerOn(); + && getRegionNormalizerManager().isNormalizerOn(); } /** @@ -3537,13 +3356,6 @@ public String getLoadBalancerClassName() { LoadBalancerFactory.getDefaultLoadBalancerClass().getName()); } - /** - * @return RegionNormalizerTracker instance - */ - public RegionNormalizerTracker getRegionNormalizerTracker() { - return regionNormalizerTracker; - } - public SplitOrMergeTracker getSplitOrMergeTracker() { return splitOrMergeTracker; } @@ -3820,8 +3632,15 @@ public ReplicationPeerManager getReplicationPeerManager() { List replicationLoadSources = getServerManager().getLoad(serverName).getReplicationLoadSourceList(); for (ReplicationLoadSource replicationLoadSource : replicationLoadSources) { - replicationLoadSourceMap.get(replicationLoadSource.getPeerID()) - .add(new Pair<>(serverName, replicationLoadSource)); + List> replicationLoadSourceList = + replicationLoadSourceMap.get(replicationLoadSource.getPeerID()); + if (replicationLoadSourceList == null) { + LOG.debug("{} does not exist, but it exists " + + "in znode(/hbase/replication/rs). when the rs restarts, peerId is deleted, so " + + "we just need to ignore it", replicationLoadSource.getPeerID()); + continue; + } + replicationLoadSourceList.add(new Pair<>(serverName, replicationLoadSource)); } } for (List> loads : replicationLoadSourceMap.values()) { @@ -3835,7 +3654,7 @@ public ReplicationPeerManager getReplicationPeerManager() { /** * This method modifies the master's configuration in order to inject replication-related features */ - @VisibleForTesting + @InterfaceAudience.Private public static void decorateMasterConfiguration(Configuration conf) { String plugins = conf.get(HBASE_MASTER_LOGCLEANER_PLUGINS); String cleanerClass = ReplicationLogCleaner.class.getCanonicalName(); @@ -3900,4 +3719,52 @@ public MetaRegionLocationCache getMetaRegionLocationCache() { public RSGroupInfoManager getRSGroupInfoManager() { return rsGroupInfoManager; } + + /** + * Get the compaction state of the table + * + * @param tableName The table name + * @return CompactionState Compaction state of the table + */ + public CompactionState getCompactionState(final TableName tableName) { + CompactionState compactionState = CompactionState.NONE; + try { + List regions = + assignmentManager.getRegionStates().getRegionsOfTable(tableName); + for (RegionInfo regionInfo : regions) { + ServerName serverName = + assignmentManager.getRegionStates().getRegionServerOfRegion(regionInfo); + if (serverName == null) { + continue; + } + ServerMetrics sl = serverManager.getLoad(serverName); + if (sl == null) { + continue; + } + RegionMetrics regionMetrics = sl.getRegionMetrics().get(regionInfo.getRegionName()); + if (regionMetrics.getCompactionState() == CompactionState.MAJOR) { + if (compactionState == CompactionState.MINOR) { + compactionState = CompactionState.MAJOR_AND_MINOR; + } else { + compactionState = CompactionState.MAJOR; + } + } else if (regionMetrics.getCompactionState() == CompactionState.MINOR) { + if (compactionState == CompactionState.MAJOR) { + compactionState = CompactionState.MAJOR_AND_MINOR; + } else { + compactionState = CompactionState.MINOR; + } + } + } + } catch (Exception e) { + compactionState = null; + LOG.error("Exception when get compaction state for " + tableName.getNameAsString(), e); + } + return compactionState; + } + + @Override + public MetaLocationSyncer getMetaLocationSyncer() { + return metaLocationSyncer; + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMasterCommandLine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMasterCommandLine.java index 489894e8c666..d2464a616eb4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMasterCommandLine.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMasterCommandLine.java @@ -62,7 +62,9 @@ public class HMasterCommandLine extends ServerCommandLine { " --localRegionServers= " + "RegionServers to start in master process when in standalone mode.\n" + " --masters= Masters to start in this process.\n" + - " --backup Master should start in backup mode"; + " --backup Master should start in backup mode\n" + + " --shutDownCluster " + + "Start Cluster shutdown; Master signals RegionServer shutdown"; private final Class masterClass; @@ -77,12 +79,14 @@ protected String getUsage() { @Override public int run(String args[]) throws Exception { + boolean shutDownCluster = false; Options opt = new Options(); opt.addOption("localRegionServers", true, "RegionServers to start in master process when running standalone"); opt.addOption("masters", true, "Masters to start in this process"); opt.addOption("minRegionServers", true, "Minimum RegionServers needed to host user tables"); opt.addOption("backup", false, "Do not try to become HMaster until the primary fails"); + opt.addOption("shutDownCluster", false, "`hbase master stop --shutDownCluster` shuts down cluster"); CommandLine cmd; try { @@ -127,6 +131,11 @@ public int run(String args[]) throws Exception { LOG.debug("masters set to " + val); } + // Checking whether to shut down cluster or not + if (cmd.hasOption("shutDownCluster")) { + shutDownCluster = true; + } + @SuppressWarnings("unchecked") List remainingArgs = cmd.getArgList(); if (remainingArgs.size() != 1) { @@ -139,7 +148,15 @@ public int run(String args[]) throws Exception { if ("start".equals(command)) { return startMaster(); } else if ("stop".equals(command)) { - return stopMaster(); + if (shutDownCluster) { + return stopMaster(); + } + System.err.println( + "To shutdown the master run " + + "hbase-daemon.sh stop master or send a kill signal to " + + "the HMaster pid, " + + "and to stop HBase Cluster run \"stop-hbase.sh\" or \"hbase master stop --shutDownCluster\""); + return 1; } else if ("clear".equals(command)) { return (ZNodeClearer.clear(getConf()) ? 0 : 1); } else { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HbckChore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HbckChore.java index 8b886c8a4598..0973d037c8a9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HbckChore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HbckChore.java @@ -255,10 +255,10 @@ private void loadRegionsFromRSReport() { for (Map.Entry entry : regionInfoMap.entrySet()) { HbckRegionInfo hri = entry.getValue(); ServerName locationInMeta = hri.getMetaEntry().getRegionServer(); + if (locationInMeta == null) { + continue; + } if (hri.getDeployedOn().size() == 0) { - if (locationInMeta == null) { - continue; - } // skip the offline region which belong to disabled table. if (disabledTableRegions.contains(hri.getRegionNameAsString())) { continue; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java index b7ec1a3aa1bc..d908aa5ef514 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java @@ -18,7 +18,7 @@ */ package org.apache.hadoop.hbase.master; -import edu.umd.cs.findbugs.annotations.Nullable; +import edu.umd.cs.findbugs.annotations.NonNull; import java.io.IOException; import java.util.List; import java.util.Map; @@ -52,12 +52,20 @@ public interface LoadBalancer extends Configurable, Stoppable, ConfigurationObse * Master can carry regions as of hbase-2.0.0. * By default, it carries no tables. * TODO: Add any | system as flags to indicate what it can do. + * + * @deprecated since 2.4.0, will be removed in 3.0.0. + * @see HBASE-15549 */ + @Deprecated String TABLES_ON_MASTER = "hbase.balancer.tablesOnMaster"; /** * Master carries system tables. + * + * @deprecated since 2.4.0, will be removed in 3.0.0. + * @see HBASE-15549 */ + @Deprecated String SYSTEM_TABLES_ON_MASTER = "hbase.balancer.tablesOnMaster.systemTablesOnly"; // Used to signal to the caller that the region(s) cannot be assigned @@ -110,6 +118,7 @@ List balanceTable(TableName tableName, * Perform a Round Robin assignment of regions. * @return Map of servername to regioninfos */ + @NonNull Map> roundRobinAssignment(List regions, List servers) throws IOException; @@ -117,7 +126,7 @@ Map> roundRobinAssignment(List regions, * Assign regions to the previously hosting region server * @return List of plans */ - @Nullable + @NonNull Map> retainAssignment(Map regions, List servers) throws IOException; @@ -158,15 +167,28 @@ Map> retainAssignment(Map r /** * @return true if Master carries regions + * @deprecated since 2.4.0, will be removed in 3.0.0. + * @see HBASE-15549 */ + @Deprecated static boolean isTablesOnMaster(Configuration conf) { return conf.getBoolean(TABLES_ON_MASTER, false); } + /** + * @deprecated since 2.4.0, will be removed in 3.0.0. + * @see HBASE-15549 + */ + @Deprecated static boolean isSystemTablesOnlyOnMaster(Configuration conf) { return conf.getBoolean(SYSTEM_TABLES_ON_MASTER, false); } + /** + * @deprecated since 2.4.0, will be removed in 3.0.0. + * @see HBASE-15549 + */ + @Deprecated static boolean isMasterCanHostUserRegions(Configuration conf) { return isTablesOnMaster(conf) && !isSystemTablesOnlyOnMaster(conf); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java index 0bbfa4a180ec..ca7eb909859b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java @@ -43,8 +43,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; - /** * This class abstracts a bunch of operations the HMaster needs to interact with * the underlying file system like creating the initial layout, checking file @@ -293,7 +291,6 @@ private void checkRootDir(final Path rd, final Configuration c, final FileSystem * Make sure the hbase temp directory exists and is empty. * NOTE that this method is only executed once just after the master becomes the active one. */ - @VisibleForTesting void checkTempDir(final Path tmpdir, final Configuration c, final FileSystem fs) throws IOException { // If the temp directory exists, clear the content (left over, from the previous run) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterInitializationMonitor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterInitializationMonitor.java new file mode 100644 index 000000000000..dcfeeab41309 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterInitializationMonitor.java @@ -0,0 +1,80 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master; + +import java.util.concurrent.TimeUnit; +import org.apache.hadoop.hbase.util.Threads; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Protection against zombie master. Started once Master accepts active responsibility and starts + * taking over responsibilities. Allows a finite time window before giving up ownership. + */ +@InterfaceAudience.Private +class MasterInitializationMonitor extends Thread { + + private static final Logger LOG = LoggerFactory.getLogger(MasterInitializationMonitor.class); + + /** The amount of time in milliseconds to sleep before checking initialization status. */ + public static final String TIMEOUT_KEY = "hbase.master.initializationmonitor.timeout"; + public static final long TIMEOUT_DEFAULT = TimeUnit.MILLISECONDS.convert(15, TimeUnit.MINUTES); + + /** + * When timeout expired and initialization has not complete, call {@link System#exit(int)} when + * true, do nothing otherwise. + */ + public static final String HALT_KEY = "hbase.master.initializationmonitor.haltontimeout"; + public static final boolean HALT_DEFAULT = false; + + private final HMaster master; + private final long timeout; + private final boolean haltOnTimeout; + + /** Creates a Thread that monitors the {@link #isInitialized()} state. */ + MasterInitializationMonitor(HMaster master) { + super("MasterInitializationMonitor"); + this.master = master; + this.timeout = master.getConfiguration().getLong(TIMEOUT_KEY, TIMEOUT_DEFAULT); + this.haltOnTimeout = master.getConfiguration().getBoolean(HALT_KEY, HALT_DEFAULT); + this.setDaemon(true); + } + + @Override + public void run() { + try { + while (!master.isStopped() && master.isActiveMaster()) { + Thread.sleep(timeout); + if (master.isInitialized()) { + LOG.debug("Initialization completed within allotted tolerance. Monitor exiting."); + } else { + LOG.error("Master failed to complete initialization after " + timeout + "ms. Please" + + " consider submitting a bug report including a thread dump of this process."); + if (haltOnTimeout) { + LOG.error("Zombie Master exiting. Thread dump to stdout"); + Threads.printThreadInfo(System.out, "Zombie HMaster"); + System.exit(-1); + } + } + } + } catch (InterruptedException ie) { + LOG.trace("InitMonitor thread interrupted. Existing."); + } + } +} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMetaBootstrap.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMetaBootstrap.java deleted file mode 100644 index c676df8b6c88..000000000000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMetaBootstrap.java +++ /dev/null @@ -1,111 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.master; - -import java.io.IOException; -import java.util.List; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.client.RegionInfo; -import org.apache.hadoop.hbase.client.RegionInfoBuilder; -import org.apache.hadoop.hbase.client.RegionReplicaUtil; -import org.apache.hadoop.hbase.master.assignment.AssignmentManager; -import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; -import org.apache.hadoop.hbase.zookeeper.ZKUtil; -import org.apache.hadoop.hbase.zookeeper.ZKWatcher; -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.zookeeper.KeeperException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Used by the HMaster on startup to split meta logs and assign the meta table. - */ -@InterfaceAudience.Private -class MasterMetaBootstrap { - private static final Logger LOG = LoggerFactory.getLogger(MasterMetaBootstrap.class); - - private final HMaster master; - - public MasterMetaBootstrap(HMaster master) { - this.master = master; - } - - /** - * For assigning hbase:meta replicas only. - * TODO: The way this assign runs, nothing but chance to stop all replicas showing up on same - * server as the hbase:meta region. - */ - void assignMetaReplicas() - throws IOException, InterruptedException, KeeperException { - int numReplicas = master.getConfiguration().getInt(HConstants.META_REPLICAS_NUM, - HConstants.DEFAULT_META_REPLICA_NUM); - final AssignmentManager assignmentManager = master.getAssignmentManager(); - if (!assignmentManager.isMetaLoaded()) { - throw new IllegalStateException("hbase:meta must be initialized first before we can " + - "assign out its replicas"); - } - ServerName metaServername = MetaTableLocator.getMetaRegionLocation(this.master.getZooKeeper()); - for (int i = 1; i < numReplicas; i++) { - // Get current meta state for replica from zk. - RegionState metaState = MetaTableLocator.getMetaRegionState(master.getZooKeeper(), i); - RegionInfo hri = RegionReplicaUtil.getRegionInfoForReplica( - RegionInfoBuilder.FIRST_META_REGIONINFO, i); - LOG.debug(hri.getRegionNameAsString() + " replica region state from zookeeper=" + metaState); - if (metaServername.equals(metaState.getServerName())) { - metaState = null; - LOG.info(hri.getRegionNameAsString() + - " old location is same as current hbase:meta location; setting location as null..."); - } - // These assigns run inline. All is blocked till they complete. Only interrupt is shutting - // down hosting server which calls AM#stop. - if (metaState != null && metaState.getServerName() != null) { - // Try to retain old assignment. - assignmentManager.assignAsync(hri, metaState.getServerName()); - } else { - assignmentManager.assignAsync(hri); - } - } - unassignExcessMetaReplica(numReplicas); - } - - private void unassignExcessMetaReplica(int numMetaReplicasConfigured) { - final ZKWatcher zooKeeper = master.getZooKeeper(); - // unassign the unneeded replicas (for e.g., if the previous master was configured - // with a replication of 3 and now it is 2, we need to unassign the 1 unneeded replica) - try { - List metaReplicaZnodes = zooKeeper.getMetaReplicaNodes(); - for (String metaReplicaZnode : metaReplicaZnodes) { - int replicaId = zooKeeper.getZNodePaths().getMetaReplicaIdFromZnode(metaReplicaZnode); - if (replicaId >= numMetaReplicasConfigured) { - RegionState r = MetaTableLocator.getMetaRegionState(zooKeeper, replicaId); - LOG.info("Closing excess replica of meta region " + r.getRegion()); - // send a close and wait for a max of 30 seconds - ServerManager.closeRegionSilentlyAndWait(master.getAsyncClusterConnection(), - r.getServerName(), r.getRegion(), 30000); - ZKUtil.deleteNode(zooKeeper, zooKeeper.getZNodePaths().getZNodeForReplica(replicaId)); - } - } - } catch (Exception ex) { - // ignore the exception since we don't want the master to be wedged due to potential - // issues in the cleanup of the extra regions. We can do that cleanup via hbck or manually - LOG.warn("Ignoring exception " + ex); - } - } -} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRedirectServlet.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRedirectServlet.java new file mode 100644 index 000000000000..bda2934dbfb2 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRedirectServlet.java @@ -0,0 +1,81 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master; + +import static org.apache.hadoop.hbase.util.DNS.MASTER_HOSTNAME_KEY; + +import java.io.IOException; +import java.net.InetAddress; +import javax.servlet.ServletException; +import javax.servlet.http.HttpServlet; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; +import org.apache.hadoop.hbase.http.InfoServer; +import org.apache.hadoop.hbase.util.Addressing; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@InterfaceAudience.Private +class MasterRedirectServlet extends HttpServlet { + + private static final long serialVersionUID = 2894774810058302473L; + + private static final Logger LOG = LoggerFactory.getLogger(MasterRedirectServlet.class); + + private final int regionServerInfoPort; + private final String regionServerHostname; + + /** + * @param infoServer that we're trying to send all requests to + * @param hostname may be null. if given, will be used for redirects instead of host from client. + */ + public MasterRedirectServlet(InfoServer infoServer, String hostname) { + regionServerInfoPort = infoServer.getPort(); + regionServerHostname = hostname; + } + + @Override + public void doGet(HttpServletRequest request, HttpServletResponse response) + throws ServletException, IOException { + String redirectHost = regionServerHostname; + if (redirectHost == null) { + redirectHost = request.getServerName(); + if (!Addressing.isLocalAddress(InetAddress.getByName(redirectHost))) { + LOG.warn("Couldn't resolve '" + redirectHost + "' as an address local to this node and '" + + MASTER_HOSTNAME_KEY + "' is not set; client will get an HTTP 400 response. If " + + "your HBase deployment relies on client accessible names that the region server " + + "process can't resolve locally, then you should set the previously mentioned " + + "configuration variable to an appropriate hostname."); + // no sending client provided input back to the client, so the goal host is just in the + // logs. + response.sendError(400, + "Request was to a host that I can't resolve for any of the network interfaces on " + + "this node. If this is due to an intermediary such as an HTTP load balancer or " + + "other proxy, your HBase administrator can set '" + MASTER_HOSTNAME_KEY + + "' to point to the correct hostname."); + return; + } + } + // TODO: this scheme should come from looking at the scheme registered in the infoserver's http + // server for the host and port we're using, but it's buried way too deep to do that ATM. + String redirectUrl = request.getScheme() + "://" + redirectHost + ":" + regionServerInfoPort + + request.getRequestURI(); + response.sendRedirect(redirectUrl); + } +} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java index 37fc58985e7b..8f2f0dad4b7f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java @@ -1913,9 +1913,7 @@ public SetSplitOrMergeEnabledResponse setSplitOrMergeEnabled(RpcController contr master.cpHost.postSetSplitOrMergeEnabled(newValue, switchType); } } - } catch (IOException e) { - throw new ServiceException(e); - } catch (KeeperException e) { + } catch (IOException | KeeperException e) { throw new ServiceException(e); } return response.build(); @@ -1940,7 +1938,8 @@ public NormalizeResponse normalize(RpcController controller, .namespace(request.hasNamespace() ? request.getNamespace() : null) .build(); return NormalizeResponse.newBuilder() - .setNormalizerRan(master.normalizeRegions(ntfp)) + // all API requests are considered priority requests. + .setNormalizerRan(master.normalizeRegions(ntfp, true)) .build(); } catch (IOException ex) { throw new ServiceException(ex); @@ -1953,20 +1952,27 @@ public SetNormalizerRunningResponse setNormalizerRunning(RpcController controlle rpcPreCheck("setNormalizerRunning"); // Sets normalizer on/off flag in ZK. - boolean prevValue = master.getRegionNormalizerTracker().isNormalizerOn(); - boolean newValue = request.getOn(); - try { - master.getRegionNormalizerTracker().setNormalizerOn(newValue); - } catch (KeeperException ke) { - LOG.warn("Error flipping normalizer switch", ke); - } + // TODO: this method is totally broken in terms of atomicity of actions and values read. + // 1. The contract has this RPC returning the previous value. There isn't a ZKUtil method + // that lets us retrieve the previous value as part of setting a new value, so we simply + // perform a read before issuing the update. Thus we have a data race opportunity, between + // when the `prevValue` is read and whatever is actually overwritten. + // 2. Down in `setNormalizerOn`, the call to `createAndWatch` inside of the catch clause can + // itself fail in the event that the znode already exists. Thus, another data race, between + // when the initial `setData` call is notified of the absence of the target znode and the + // subsequent `createAndWatch`, with another client creating said node. + // That said, there's supposed to be only one active master and thus there's supposed to be + // only one process with the authority to modify the value. + final boolean prevValue = master.getRegionNormalizerManager().isNormalizerOn(); + final boolean newValue = request.getOn(); + master.getRegionNormalizerManager().setNormalizerOn(newValue); LOG.info("{} set normalizerSwitch={}", master.getClientIdAuditPrefix(), newValue); return SetNormalizerRunningResponse.newBuilder().setPrevNormalizerValue(prevValue).build(); } @Override public IsNormalizerEnabledResponse isNormalizerEnabled(RpcController controller, - IsNormalizerEnabledRequest request) throws ServiceException { + IsNormalizerEnabledRequest request) { IsNormalizerEnabledResponse.Builder response = IsNormalizerEnabledResponse.newBuilder(); response.setEnabled(master.isNormalizerOn()); return response.build(); @@ -2434,6 +2440,15 @@ public ClearDeadServersResponse clearDeadServers(RpcController controller, Set
    clearedServers = new HashSet<>(); for (HBaseProtos.ServerName pbServer : request.getServerNameList()) { ServerName server = ProtobufUtil.toServerName(pbServer); + + final boolean deadInProcess = master.getProcedures().stream().anyMatch( + p -> (p instanceof ServerCrashProcedure) + && ((ServerCrashProcedure) p).getServerName().equals(server)); + if (deadInProcess) { + throw new ServiceException( + String.format("Dead server '%s' is not 'dead' in fact...", server)); + } + if (!deadServer.removeDeadServer(server)) { response.addServerName(pbServer); } else { @@ -2512,6 +2527,7 @@ public RunHbckChoreResponse runHbckChore(RpcController c, RunHbckChoreRequest re @Override public GetTableStateResponse setTableStateInMeta(RpcController controller, SetTableStateInMetaRequest request) throws ServiceException { + rpcPreCheck("setTableStateInMeta"); TableName tn = ProtobufUtil.toTableName(request.getTableName()); try { TableState prevState = this.master.getTableStateManager().getTableState(tn); @@ -2534,6 +2550,7 @@ public GetTableStateResponse setTableStateInMeta(RpcController controller, @Override public SetRegionStateInMetaResponse setRegionStateInMeta(RpcController controller, SetRegionStateInMetaRequest request) throws ServiceException { + rpcPreCheck("setRegionStateInMeta"); SetRegionStateInMetaResponse.Builder builder = SetRegionStateInMetaResponse.newBuilder(); try { for (RegionSpecifierAndState s : request.getStatesList()) { @@ -2717,6 +2734,7 @@ public MasterProtos.ScheduleServerCrashProcedureResponse scheduleServerCrashProc @Override public FixMetaResponse fixMeta(RpcController controller, FixMetaRequest request) throws ServiceException { + rpcPreCheck("fixMeta"); try { MetaFixer mf = new MetaFixer(this.master); mf.fix(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java index 908d21270c6e..f24ecd46d488 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,6 +27,7 @@ import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; import org.apache.hadoop.hbase.client.MasterSwitchType; +import org.apache.hadoop.hbase.client.NormalizeTableFilterParams; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.executor.ExecutorService; @@ -34,11 +35,12 @@ import org.apache.hadoop.hbase.master.assignment.AssignmentManager; import org.apache.hadoop.hbase.master.janitor.CatalogJanitor; import org.apache.hadoop.hbase.master.locking.LockManager; -import org.apache.hadoop.hbase.master.normalizer.RegionNormalizer; +import org.apache.hadoop.hbase.master.normalizer.RegionNormalizerManager; import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; import org.apache.hadoop.hbase.master.replication.ReplicationPeerManager; import org.apache.hadoop.hbase.master.replication.SyncReplicationReplayWALManager; import org.apache.hadoop.hbase.master.snapshot.SnapshotManager; +import org.apache.hadoop.hbase.master.zksyncer.MetaLocationSyncer; import org.apache.hadoop.hbase.procedure.MasterProcedureManagerHost; import org.apache.hadoop.hbase.procedure2.LockedResource; import org.apache.hadoop.hbase.procedure2.Procedure; @@ -55,7 +57,6 @@ import org.apache.hadoop.hbase.zookeeper.LoadBalancerTracker; import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.com.google.protobuf.Service; /** @@ -122,9 +123,9 @@ public interface MasterServices extends Server { MasterQuotaManager getMasterQuotaManager(); /** - * @return Master's instance of {@link RegionNormalizer} + * @return Master's instance of {@link RegionNormalizerManager} */ - RegionNormalizer getRegionNormalizer(); + RegionNormalizerManager getRegionNormalizerManager(); /** * @return Master's instance of {@link CatalogJanitor} @@ -139,7 +140,6 @@ public interface MasterServices extends Server { /** * @return Tripped when Master has finished initialization. */ - @VisibleForTesting public ProcedureEvent getInitializedEvent(); /** @@ -354,6 +354,13 @@ long splitRegion( */ boolean isInMaintenanceMode(); + /** + * Checks master state before initiating action over region topology. + * @param action the name of the action under consideration, for logging. + * @return {@code true} when the caller should exit early, {@code false} otherwise. + */ + boolean skipRegionManagementAction(final String action); + /** * Abort a procedure. * @param procId ID of the procedure @@ -553,4 +560,21 @@ default SplitWALManager getSplitWALManager(){ * @return The state of the load balancer, or false if the load balancer isn't defined. */ boolean isBalancerOn(); + + /** + * Perform normalization of cluster. + * @param ntfp Selection criteria for identifying which tables to normalize. + * @param isHighPriority {@code true} when these requested tables should skip to the front of + * the queue. + * @return {@code true} when the request was submitted, {@code false} otherwise. + */ + boolean normalizeRegions( + final NormalizeTableFilterParams ntfp, final boolean isHighPriority) throws IOException; + + /** + * Get the meta location syncer. + *

    + * We need to get this in MTP to tell the syncer the new meta replica count. + */ + MetaLocationSyncer getMetaLocationSyncer(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterWalManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterWalManager.java index 6001c8f9a98f..61b432710d8d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterWalManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterWalManager.java @@ -43,7 +43,6 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; /** * This class abstracts a bunch of operations the HMaster needs @@ -66,7 +65,6 @@ public boolean accept(Path p) { /** * Filter *out* WAL files that are for the hbase:meta Region; i.e. return user-space WALs only. */ - @VisibleForTesting public final static PathFilter NON_META_FILTER = new PathFilter() { @Override public boolean accept(Path p) { @@ -124,7 +122,6 @@ public void stop() { } } - @VisibleForTesting SplitLogManager getSplitLogManager() { return this.splitLogManager; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetaRegionLocationCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetaRegionLocationCache.java index f4e91b56051d..07512d16fd60 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetaRegionLocationCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetaRegionLocationCache.java @@ -157,7 +157,7 @@ private HRegionLocation getMetaRegionLocation(int replicaId) } private void updateMetaLocation(String path, ZNodeOpType opType) { - if (!isValidMetaZNode(path)) { + if (!isValidMetaPath(path)) { return; } LOG.debug("Updating meta znode for path {}: {}", path, opType.name()); @@ -220,8 +220,8 @@ public Optional> getMetaRegionLocations() { * Helper to check if the given 'path' corresponds to a meta znode. This listener is only * interested in changes to meta znodes. */ - private boolean isValidMetaZNode(String path) { - return watcher.getZNodePaths().isAnyMetaReplicaZNode(path); + private boolean isValidMetaPath(String path) { + return watcher.getZNodePaths().isMetaZNodePath(path); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapperImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapperImpl.java index 9d4550c5eb0a..aeaae929209e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapperImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapperImpl.java @@ -55,12 +55,12 @@ public double getAverageLoad() { @Override public long getSplitPlanCount() { - return master.getSplitPlanCount(); + return master.getRegionNormalizerManager().getSplitPlanCount(); } @Override public long getMergePlanCount() { - return master.getMergePlanCount(); + return master.getRegionNormalizerManager().getMergePlanCount(); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MirroringTableStateManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MirroringTableStateManager.java deleted file mode 100644 index 590d6a065626..000000000000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MirroringTableStateManager.java +++ /dev/null @@ -1,98 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.master; - -import java.io.IOException; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.TableState; -import org.apache.hadoop.hbase.client.TableState.State; -import org.apache.hadoop.hbase.zookeeper.ZKUtil; -import org.apache.hadoop.hbase.zookeeper.ZNodePaths; -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.zookeeper.KeeperException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos; - -/** - * A subclass of TableStateManager that mirrors change in state out to zookeeper for hbase-1.x - * clients to pick up; hbase-1.x clients read table state of zookeeper rather than from hbase:meta - * as hbase-2.x clients do. Set "hbase.mirror.table.state.to.zookeeper" to false to disable - * mirroring. See in HMaster where we make the choice. The below does zk updates on a best-effort - * basis only. If we fail updating zk we keep going because only hbase1 clients suffer; we'll just - * log at WARN level. - * @deprecated Since 2.0.0. To be removed in 3.0.0. - */ -@Deprecated -@InterfaceAudience.Private -public class MirroringTableStateManager extends TableStateManager { - private static final Logger LOG = LoggerFactory.getLogger(MirroringTableStateManager.class); - - /** - * Set this key to true in Configuration to enable mirroring of table state out to zookeeper so - * hbase-1.x clients can pick-up table state. - */ - static final String MIRROR_TABLE_STATE_TO_ZK_KEY = "hbase.mirror.table.state.to.zookeeper"; - - public MirroringTableStateManager(MasterServices master) { - super(master); - } - - @Override - protected void metaStateUpdated(TableName tableName, State newState) throws IOException { - updateZooKeeper(new TableState(tableName, newState)); - } - - @Override - protected void metaStateDeleted(TableName tableName) throws IOException { - deleteZooKeeper(tableName); - } - - private void updateZooKeeper(TableState tableState) throws IOException { - if (tableState == null) { - return; - } - String znode = ZNodePaths.joinZNode(this.master.getZooKeeper().getZNodePaths().tableZNode, - tableState.getTableName().getNameAsString()); - try { - // Make sure znode exists. - if (ZKUtil.checkExists(this.master.getZooKeeper(), znode) == -1) { - ZKUtil.createAndFailSilent(this.master.getZooKeeper(), znode); - } - // Now set newState - ZooKeeperProtos.DeprecatedTableState.Builder builder = - ZooKeeperProtos.DeprecatedTableState.newBuilder(); - builder.setState( - ZooKeeperProtos.DeprecatedTableState.State.valueOf(tableState.getState().toString())); - byte[] data = ProtobufUtil.prependPBMagic(builder.build().toByteArray()); - ZKUtil.setData(this.master.getZooKeeper(), znode, data); - } catch (KeeperException e) { - // Only hbase1 clients suffer if this fails. - LOG.warn("Failed setting table state to zookeeper mirrored for hbase-1.x clients", e); - } - } - - // This method is called by the super class on each row it finds in the hbase:meta table with - // table state in it. - @Override - protected void fixTableState(TableState tableState) throws IOException { - updateZooKeeper(tableState); - } -} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionServerTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionServerTracker.java index 9d33a2120859..336f9dc04f8e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionServerTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionServerTracker.java @@ -19,6 +19,7 @@ import java.io.IOException; import java.io.InterruptedIOException; +import java.util.Collections; import java.util.HashSet; import java.util.Iterator; import java.util.List; @@ -35,6 +36,7 @@ import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.hadoop.hbase.zookeeper.ZNodePaths; +import org.apache.hbase.thirdparty.org.apache.commons.collections4.CollectionUtils; import org.apache.yetus.audience.InterfaceAudience; import org.apache.zookeeper.KeeperException; import org.slf4j.Logger; @@ -129,21 +131,24 @@ public void start(Set deadServersFromPE, Set liveServers splittingServersFromWALDir.stream().filter(s -> !deadServersFromPE.contains(s)). forEach(s -> LOG.error("{} has no matching ServerCrashProcedure", s)); //create ServerNode for all possible live servers from wal directory - liveServersFromWALDir.stream() + liveServersFromWALDir .forEach(sn -> server.getAssignmentManager().getRegionStates().getOrCreateServer(sn)); watcher.registerListener(this); synchronized (this) { List servers = ZKUtil.listChildrenAndWatchForNewChildren(watcher, watcher.getZNodePaths().rsZNode); - for (String n : servers) { - Pair pair = getServerInfo(n); - ServerName serverName = pair.getFirst(); - RegionServerInfo info = pair.getSecond(); - regionServers.add(serverName); - ServerMetrics serverMetrics = info != null ? ServerMetricsBuilder.of(serverName, - VersionInfoUtil.getVersionNumber(info.getVersionInfo()), - info.getVersionInfo().getVersion()) : ServerMetricsBuilder.of(serverName); - serverManager.checkAndRecordNewServer(serverName, serverMetrics); + if (null != servers) { + for (String n : servers) { + Pair pair = getServerInfo(n); + ServerName serverName = pair.getFirst(); + RegionServerInfo info = pair.getSecond(); + regionServers.add(serverName); + ServerMetrics serverMetrics = info != null ? + ServerMetricsBuilder.of(serverName, VersionInfoUtil.getVersionNumber(info.getVersionInfo()), + info.getVersionInfo().getVersion()) : + ServerMetricsBuilder.of(serverName); + serverManager.checkAndRecordNewServer(serverName, serverMetrics); + } } serverManager.findDeadServersAndProcess(deadServersFromPE, liveServersFromWALDir); } @@ -163,8 +168,9 @@ private synchronized void refresh() { server.abort("Unexpected zk exception getting RS nodes", e); return; } - Set servers = + Set servers = CollectionUtils.isEmpty(names) ? Collections.emptySet() : names.stream().map(ServerName::parseServerName).collect(Collectors.toSet()); + for (Iterator iter = regionServers.iterator(); iter.hasNext();) { ServerName sn = iter.next(); if (!servers.contains(sn)) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionsRecoveryChore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionsRecoveryChore.java index a756715062ec..5597cca1152b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionsRecoveryChore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionsRecoveryChore.java @@ -23,7 +23,6 @@ import java.util.HashMap; import java.util.List; import java.util.Map; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ClusterMetrics; import org.apache.hadoop.hbase.HConstants; @@ -70,7 +69,6 @@ public class RegionsRecoveryChore extends ScheduledChore { */ RegionsRecoveryChore(final Stoppable stopper, final Configuration configuration, final HMaster hMaster) { - super(REGIONS_RECOVERY_CHORE_NAME, stopper, configuration.getInt( HConstants.REGIONS_RECOVERY_INTERVAL, HConstants.DEFAULT_REGIONS_RECOVERY_INTERVAL)); this.hMaster = hMaster; @@ -125,7 +123,6 @@ protected void chore() { private Map> getTableToRegionsByRefCount( final Map serverMetricsMap) { - final Map> tableToReopenRegionsMap = new HashMap<>(); for (ServerMetrics serverMetrics : serverMetricsMap.values()) { Map regionMetricsMap = serverMetrics.getRegionMetrics(); @@ -146,13 +143,11 @@ private Map> getTableToRegionsByRefCount( } } return tableToReopenRegionsMap; - } private void prepareTableToReopenRegionsMap( final Map> tableToReopenRegionsMap, final byte[] regionName, final int regionStoreRefCount) { - final RegionInfo regionInfo = hMaster.getAssignmentManager().getRegionInfo(regionName); final TableName tableName = regionInfo.getTable(); if (TableName.isMetaTableName(tableName)) { @@ -165,21 +160,4 @@ private void prepareTableToReopenRegionsMap( tableToReopenRegionsMap .computeIfAbsent(tableName, (key) -> new ArrayList<>()).add(regionName); } - - // hashcode/equals implementation to ensure at-most one object of RegionsRecoveryChore - // is scheduled at a time - RegionsRecoveryConfigManager - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - return o != null && getClass() == o.getClass(); - } - - @Override - public int hashCode() { - return 31; - } - } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionsRecoveryConfigManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionsRecoveryConfigManager.java index b1bfdc0ecb04..78777a18cfd4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionsRecoveryConfigManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionsRecoveryConfigManager.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.master; +import com.google.errorprone.annotations.RestrictedApi; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ChoreService; import org.apache.hadoop.hbase.HConstants; @@ -27,8 +28,7 @@ import org.slf4j.LoggerFactory; /** - * Config manager for RegionsRecovery Chore - Dynamically reload config and update chore - * accordingly + * Config manager for RegionsRecovery Chore - Dynamically reload config and update chore accordingly */ @InterfaceAudience.Private public class RegionsRecoveryConfigManager implements ConfigurationObserver { @@ -36,6 +36,7 @@ public class RegionsRecoveryConfigManager implements ConfigurationObserver { private static final Logger LOG = LoggerFactory.getLogger(RegionsRecoveryConfigManager.class); private final HMaster hMaster; + private RegionsRecoveryChore chore; private int prevMaxStoreFileRefCount; private int prevRegionsRecoveryInterval; @@ -51,34 +52,35 @@ public void onConfigurationChange(Configuration conf) { final int newMaxStoreFileRefCount = getMaxStoreFileRefCount(conf); final int newRegionsRecoveryInterval = getRegionsRecoveryChoreInterval(conf); - if (prevMaxStoreFileRefCount == newMaxStoreFileRefCount - && prevRegionsRecoveryInterval == newRegionsRecoveryInterval) { + if (prevMaxStoreFileRefCount == newMaxStoreFileRefCount && + prevRegionsRecoveryInterval == newRegionsRecoveryInterval) { // no need to re-schedule the chore with updated config // as there is no change in desired configs return; } - LOG.info("Config Reload for RegionsRecovery Chore. prevMaxStoreFileRefCount: {}," + + LOG.info( + "Config Reload for RegionsRecovery Chore. prevMaxStoreFileRefCount: {}," + " newMaxStoreFileRefCount: {}, prevRegionsRecoveryInterval: {}, " + - "newRegionsRecoveryInterval: {}", prevMaxStoreFileRefCount, newMaxStoreFileRefCount, - prevRegionsRecoveryInterval, newRegionsRecoveryInterval); + "newRegionsRecoveryInterval: {}", + prevMaxStoreFileRefCount, newMaxStoreFileRefCount, prevRegionsRecoveryInterval, + newRegionsRecoveryInterval); - RegionsRecoveryChore regionsRecoveryChore = new RegionsRecoveryChore(this.hMaster, - conf, this.hMaster); + RegionsRecoveryChore regionsRecoveryChore = + new RegionsRecoveryChore(this.hMaster, conf, this.hMaster); ChoreService choreService = this.hMaster.getChoreService(); // Regions Reopen based on very high storeFileRefCount is considered enabled // only if hbase.regions.recovery.store.file.ref.count has value > 0 - synchronized (this) { + if (chore != null) { + chore.shutdown(); + chore = null; + } if (newMaxStoreFileRefCount > 0) { - // reschedule the chore - // provide mayInterruptIfRunning - false to take care of completion - // of in progress task if any - choreService.cancelChore(regionsRecoveryChore, false); + // schedule the new chore choreService.scheduleChore(regionsRecoveryChore); - } else { - choreService.cancelChore(regionsRecoveryChore, false); + chore = regionsRecoveryChore; } this.prevMaxStoreFileRefCount = newMaxStoreFileRefCount; this.prevRegionsRecoveryInterval = newRegionsRecoveryInterval; @@ -86,15 +88,18 @@ public void onConfigurationChange(Configuration conf) { } private int getMaxStoreFileRefCount(Configuration configuration) { - return configuration.getInt( - HConstants.STORE_FILE_REF_COUNT_THRESHOLD, + return configuration.getInt(HConstants.STORE_FILE_REF_COUNT_THRESHOLD, HConstants.DEFAULT_STORE_FILE_REF_COUNT_THRESHOLD); } private int getRegionsRecoveryChoreInterval(Configuration configuration) { - return configuration.getInt( - HConstants.REGIONS_RECOVERY_INTERVAL, + return configuration.getInt(HConstants.REGIONS_RECOVERY_INTERVAL, HConstants.DEFAULT_REGIONS_RECOVERY_INTERVAL); } + @RestrictedApi(explanation = "Only visible for testing", link = "", + allowedOnPath = ".*/src/test/.*") + RegionsRecoveryChore getChore() { + return chore; + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java index d327c3fd4a34..7bbfd0bb55d2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java @@ -53,6 +53,7 @@ import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.ipc.RemoteWithExtrasException; import org.apache.hadoop.hbase.master.assignment.RegionStates; +import org.apache.hadoop.hbase.master.procedure.ServerCrashProcedure; import org.apache.hadoop.hbase.monitoring.MonitoredTask; import org.apache.hadoop.hbase.procedure2.Procedure; import org.apache.hadoop.hbase.util.Bytes; @@ -67,7 +68,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.com.google.protobuf.ByteString; import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; @@ -275,7 +275,6 @@ private void updateLastFlushedSequenceIds(ServerName sn, ServerMetrics hsl) { } } - @VisibleForTesting public void regionServerReport(ServerName sn, ServerMetrics sl) throws YouAreDeadException { checkIsDead(sn, "REPORT"); @@ -427,13 +426,11 @@ private ServerName findServerWithSameHostnamePortWithLock( * Adds the onlineServers list. onlineServers should be locked. * @param serverName The remote servers name. */ - @VisibleForTesting void recordNewServerWithLock(final ServerName serverName, final ServerMetrics sl) { LOG.info("Registering regionserver=" + serverName); this.onlineServers.put(serverName, sl); } - @VisibleForTesting public ConcurrentNavigableMap getFlushedSequenceIdByRegion() { return flushedSequenceIdByRegion; } @@ -503,8 +500,9 @@ public DeadServer getDeadServers() { * Checks if any dead servers are currently in progress. * @return true if any RS are being processed as dead, false if not */ - public boolean areDeadServersInProgress() { - return this.deadservers.areDeadServersInProgress(); + public boolean areDeadServersInProgress() throws IOException { + return master.getProcedures().stream() + .anyMatch(p -> !p.isFinished() && p instanceof ServerCrashProcedure); } void letRegionServersShutdown() { @@ -569,7 +567,7 @@ private List getRegionServersInZK(final ZKWatcher zkw) * going down or we already have queued an SCP for this server or SCP processing is * currently disabled because we are in startup phase). */ - @VisibleForTesting // Redo test so we can make this protected. + // Redo test so we can make this protected. public synchronized long expireServer(final ServerName serverName) { return expireServer(serverName, false); @@ -628,7 +626,6 @@ synchronized long expireServer(final ServerName serverName, boolean force) { * Called when server has expired. */ // Locking in this class needs cleanup. - @VisibleForTesting public synchronized void moveFromOnlineToDeadServers(final ServerName sn) { synchronized (this.onlineServers) { boolean online = this.onlineServers.containsKey(sn); @@ -924,8 +921,13 @@ public boolean isClusterShutdown() { public void startChore() { Configuration c = master.getConfiguration(); if (persistFlushedSequenceId) { - // when reach here, RegionStates should loaded, firstly, we call remove deleted regions - removeDeletedRegionFromLoadedFlushedSequenceIds(); + new Thread(() -> { + // after AM#loadMeta, RegionStates should be loaded, and some regions are + // deleted by drop/split/merge during removeDeletedRegionFromLoadedFlushedSequenceIds, + // but these deleted regions are not added back to RegionStates, + // so we can safely remove deleted regions. + removeDeletedRegionFromLoadedFlushedSequenceIds(); + }, "RemoveDeletedRegionSyncThread").start(); int flushPeriod = c.getInt(FLUSHEDSEQUENCEID_FLUSHER_INTERVAL, FLUSHEDSEQUENCEID_FLUSHER_INTERVAL_DEFAULT); flushedSeqIdFlusher = new FlushedSequenceIdFlusher( @@ -939,7 +941,7 @@ public void startChore() { */ public void stop() { if (flushedSeqIdFlusher != null) { - flushedSeqIdFlusher.cancel(); + flushedSeqIdFlusher.shutdown(); } if (persistFlushedSequenceId) { try { @@ -995,7 +997,6 @@ public void removeRegion(final RegionInfo regionInfo) { flushedSequenceIdByRegion.remove(encodedName); } - @VisibleForTesting public boolean isRegionInServerManagerStates(final RegionInfo hri) { final byte[] encodedName = hri.getEncodedNameAsBytes(); return (storeFlushedSequenceIdsByRegion.containsKey(encodedName) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java index 3e0c7460eaf3..186a8ff11bba 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java @@ -55,7 +55,6 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; /** * Distributes the task of log splitting to the available region servers. @@ -104,7 +103,6 @@ public class SplitLogManager { private long unassignedTimeout; private long lastTaskCreateTime = Long.MAX_VALUE; - @VisibleForTesting final ConcurrentMap tasks = new ConcurrentHashMap<>(); private TimeoutMonitor timeoutMonitor; @@ -150,7 +148,7 @@ private SplitLogManagerCoordination getSplitLogManagerCoordination() { return server.getCoordinatedStateManager().getSplitLogManagerCoordination(); } - private FileStatus[] getFileList(List logDirs, PathFilter filter) throws IOException { + private List getFileList(List logDirs, PathFilter filter) throws IOException { return getFileList(conf, logDirs, filter); } @@ -165,8 +163,7 @@ private FileStatus[] getFileList(List logDirs, PathFilter filter) throws I * {@link org.apache.hadoop.hbase.wal.WALSplitter#split(Path, Path, Path, FileSystem, * Configuration, org.apache.hadoop.hbase.wal.WALFactory)} for tests. */ - @VisibleForTesting - public static FileStatus[] getFileList(final Configuration conf, final List logDirs, + public static List getFileList(final Configuration conf, final List logDirs, final PathFilter filter) throws IOException { List fileStatus = new ArrayList<>(); @@ -183,8 +180,8 @@ public static FileStatus[] getFileList(final Configuration conf, final List serverNames, final List logfiles = getFileList(logDirs, filter); + if (!logfiles.isEmpty()) { status.setStatus("Checking directory contents..."); SplitLogCounters.tot_mgr_log_split_batch_start.increment(); - LOG.info("Started splitting " + logfiles.length + " logs in " + logDirs + + LOG.info("Started splitting " + logfiles.size() + " logs in " + logDirs + " for " + serverNames); startTime = EnvironmentEdgeManager.currentTime(); batch = new TaskBatch(); @@ -375,7 +372,6 @@ private void waitForSplittingCompletion(TaskBatch batch, MonitoredTask status) { } } - @VisibleForTesting ConcurrentMap getTasks() { return tasks; } @@ -460,7 +456,7 @@ public void stop() { choreService.shutdown(); } if (timeoutMonitor != null) { - timeoutMonitor.cancel(true); + timeoutMonitor.shutdown(true); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitWALManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitWALManager.java index 9ff84dc942e8..6db094c4e6df 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitWALManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitWALManager.java @@ -46,8 +46,6 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; -import org.apache.hbase.thirdparty.com.google.common.collect.Lists; /** * Create {@link SplitWALProcedure} for each WAL which need to split. Manage the workers for each @@ -87,8 +85,7 @@ public SplitWALManager(MasterServices master) throws IOException { this.splitWorkerAssigner = new SplitWorkerAssigner(this.master, conf.getInt(HBASE_SPLIT_WAL_MAX_SPLITTER, DEFAULT_HBASE_SPLIT_WAL_MAX_SPLITTER)); this.rootDir = master.getMasterFileSystem().getWALRootDir(); - // TODO: This should be the WAL FS, not the Master FS? - this.fs = master.getMasterFileSystem().getFileSystem(); + this.fs = master.getMasterFileSystem().getWALFileSystem(); this.walArchiveDir = new Path(this.rootDir, HConstants.HREGION_OLDLOGDIR_NAME); } @@ -108,10 +105,10 @@ public List splitWALs(ServerName crashedServer, boolean splitMeta) public List getWALsToSplit(ServerName serverName, boolean splitMeta) throws IOException { List logDirs = master.getMasterWalManager().getLogDirs(Collections.singleton(serverName)); - FileStatus[] fileStatuses = - SplitLogManager.getFileList(this.conf, logDirs, splitMeta ? META_FILTER : NON_META_FILTER); - LOG.info("{} WAL count={}, meta={}", serverName, fileStatuses.length, splitMeta); - return Lists.newArrayList(fileStatuses); + List fileStatuses = + SplitLogManager.getFileList(this.conf, logDirs, splitMeta ? META_FILTER : NON_META_FILTER); + LOG.info("{} WAL count={}, meta={}", serverName, fileStatuses.size(), splitMeta); + return fileStatuses; } private Path getWALSplitDir(ServerName serverName) { @@ -145,7 +142,6 @@ public boolean isSplitWALFinished(String walPath) throws IOException { return !fs.exists(new Path(rootDir, walPath)); } - @VisibleForTesting List createSplitWALProcedures(List splittingWALs, ServerName crashedServer) { return splittingWALs.stream() diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java index 984833fbbac1..dbe90319f20d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java @@ -20,8 +20,6 @@ import edu.umd.cs.findbugs.annotations.NonNull; import edu.umd.cs.findbugs.annotations.Nullable; import java.io.IOException; -import java.util.HashMap; -import java.util.Map; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; @@ -29,21 +27,14 @@ import org.apache.hadoop.hbase.CatalogFamilyFormat; import org.apache.hadoop.hbase.ClientMetaTableAccessor; import org.apache.hadoop.hbase.MetaTableAccessor; -import org.apache.hadoop.hbase.TableDescriptors; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotFoundException; -import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.Result; -import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.exceptions.IllegalArgumentIOException; import org.apache.hadoop.hbase.util.IdReadWriteLock; import org.apache.hadoop.hbase.util.IdReadWriteLockWithObjectPool; -import org.apache.hadoop.hbase.util.ZKDataMigrator; -import org.apache.hadoop.hbase.zookeeper.ZKUtil; -import org.apache.hadoop.hbase.zookeeper.ZNodePaths; import org.apache.yetus.audience.InterfaceAudience; -import org.apache.zookeeper.KeeperException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -51,22 +42,15 @@ /** * This is a helper class used to manage table states. This class uses hbase:meta as its store for - * table state so hbase:meta must be online before {@link #start()} is called. + * table state so hbase:meta must be online before accessing its methods. */ -// TODO: Make this a guava Service @InterfaceAudience.Private public class TableStateManager { private static final Logger LOG = LoggerFactory.getLogger(TableStateManager.class); - /** - * Set this key to false in Configuration to disable migrating table state from zookeeper so - * hbase:meta table. - */ - private static final String MIGRATE_TABLE_STATE_FROM_ZK_KEY = - "hbase.migrate.table.state.from.zookeeper"; private final IdReadWriteLock tnLock = new IdReadWriteLockWithObjectPool<>(); - protected final MasterServices master; + private final MasterServices master; private final ConcurrentMap tableName2State = new ConcurrentHashMap<>(); @@ -110,7 +94,6 @@ public void setDeletedTable(TableName tableName) throws IOException { lock.writeLock().lock(); try { MetaTableAccessor.deleteTableState(master.getConnection(), tableName); - metaStateDeleted(tableName); } finally { tableName2State.remove(tableName); lock.writeLock().unlock(); @@ -182,14 +165,6 @@ private void updateMetaState(TableName tableName, TableState.State newState) thr this.tableName2State.remove(tableName); } } - metaStateUpdated(tableName, newState); - } - - protected void metaStateUpdated(TableName tableName, TableState.State newState) - throws IOException { - } - - protected void metaStateDeleted(TableName tableName) throws IOException { } @Nullable @@ -204,118 +179,4 @@ private TableState readMetaState(TableName tableName) throws IOException { } return tableState; } - - public void start() throws IOException { - migrateZooKeeper(); - fixTableStates(master.getTableDescriptors(), master.getConnection()); - } - - private void fixTableStates(TableDescriptors tableDescriptors, Connection connection) - throws IOException { - Map states = new HashMap<>(); - // NOTE: Full hbase:meta table scan! - MetaTableAccessor.fullScanTables(connection, new ClientMetaTableAccessor.Visitor() { - @Override - public boolean visit(Result r) throws IOException { - TableState state = CatalogFamilyFormat.getTableState(r); - states.put(state.getTableName().getNameAsString(), state); - return true; - } - }); - for (TableDescriptor tableDesc : tableDescriptors.getAll().values()) { - TableName tableName = tableDesc.getTableName(); - if (TableName.isMetaTableName(tableName)) { - // This table is always enabled. No fixup needed. No entry in hbase:meta needed. - // Call through to fixTableState though in case a super class wants to do something. - fixTableState(new TableState(tableName, TableState.State.ENABLED)); - continue; - } - TableState tableState = states.get(tableName.getNameAsString()); - if (tableState == null) { - LOG.warn(tableName + " has no table state in hbase:meta, assuming ENABLED"); - MetaTableAccessor.updateTableState(connection, tableName, TableState.State.ENABLED); - fixTableState(new TableState(tableName, TableState.State.ENABLED)); - tableName2State.put(tableName, TableState.State.ENABLED); - } else { - fixTableState(tableState); - tableName2State.put(tableName, tableState.getState()); - } - } - } - - /** - * For subclasses in case they want to do fixup post hbase:meta. - */ - protected void fixTableState(TableState tableState) throws IOException { - } - - /** - * This code is for case where a hbase2 Master is starting for the first time. ZooKeeper is where - * we used to keep table state. On first startup, read zookeeper and update hbase:meta with the - * table states found in zookeeper. This is tricky as we'll do this check every time we startup - * until mirroring is disabled. See the {@link #MIGRATE_TABLE_STATE_FROM_ZK_KEY} flag. Original - * form of this migration came in with HBASE-13032. It deleted all znodes when done. We can't do - * that if we want to support hbase-1.x clients who need to be able to read table state out of zk. - * See {@link MirroringTableStateManager}. - * @deprecated Since 2.0.0. Remove in hbase-3.0.0. - */ - @Deprecated - private void migrateZooKeeper() throws IOException { - if (!this.master.getConfiguration().getBoolean(MIGRATE_TABLE_STATE_FROM_ZK_KEY, true)) { - return; - } - try { - for (Map.Entry entry : ZKDataMigrator - .queryForTableStates(this.master.getZooKeeper()).entrySet()) { - if (this.master.getTableDescriptors().get(entry.getKey()) == null) { - deleteZooKeeper(entry.getKey()); - LOG.info("Purged table state entry from zookeepr for table not in hbase:meta: " + - entry.getKey()); - continue; - } - TableState ts = null; - try { - ts = getTableState(entry.getKey()); - } catch (TableNotFoundException e) { - // This can happen; table exists but no TableState. - } - if (ts == null) { - TableState.State zkstate = entry.getValue(); - // Only migrate if it is an enable or disabled table. If in-between -- ENABLING or - // DISABLING then we have a problem; we are starting up an hbase-2 on a cluster with - // RIT. It is going to be rough! - if (zkstate.equals(TableState.State.ENABLED) || - zkstate.equals(TableState.State.DISABLED)) { - LOG.info("Migrating table state from zookeeper to hbase:meta; tableName=" + - entry.getKey() + ", state=" + entry.getValue()); - updateMetaState(entry.getKey(), entry.getValue()); - } else { - LOG.warn("Table={} has no state and zookeeper state is in-between={} (neither " + - "ENABLED or DISABLED); NOT MIGRATING table state", entry.getKey(), zkstate); - } - } - // What if the table states disagree? Defer to the hbase:meta setting rather than have the - // hbase-1.x support prevail. - } - } catch (KeeperException | InterruptedException e) { - LOG.warn("Failed reading table state from zookeeper", e); - } - } - - /** - * Utility method that knows how to delete the old hbase-1.x table state znode. Used also by the - * Mirroring subclass. - * @deprecated Since 2.0.0. To be removed in hbase-3.0.0. - */ - @Deprecated - protected void deleteZooKeeper(TableName tableName) { - try { - // Delete from ZooKeeper - String znode = ZNodePaths.joinZNode(this.master.getZooKeeper().getZNodePaths().tableZNode, - tableName.getNameAsString()); - ZKUtil.deleteNodeFailSilent(this.master.getZooKeeper(), znode); - } catch (KeeperException e) { - LOG.warn("Failed deleting table state from zookeeper", e); - } - } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignProcedure.java index 25c94ba5bbc5..fe304cebf064 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignProcedure.java @@ -27,14 +27,11 @@ import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer; import org.apache.hadoop.hbase.procedure2.ProcedureSuspendedException; import org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher.RemoteOperation; -import org.apache.yetus.audience.InterfaceAudience; - -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; - import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.AssignRegionStateData; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RegionTransitionState; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode; +import org.apache.yetus.audience.InterfaceAudience; /** * Leave here only for checking if we can successfully start the master. @@ -143,7 +140,6 @@ protected ProcedureMetrics getProcedureMetrics(MasterProcedureEnv env) { return env.getAssignmentManager().getAssignmentManagerMetrics().getAssignProcMetrics(); } - @VisibleForTesting @Override public void setProcId(long procId) { super.setProcId(procId); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java index 5638af5af48f..1eb39028f454 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java @@ -45,6 +45,7 @@ import org.apache.hadoop.hbase.client.MasterSwitchType; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionInfoBuilder; +import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.client.RegionStatesCount; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.TableDescriptor; @@ -85,8 +86,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; - import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode; @@ -180,7 +179,6 @@ public AssignmentManager(final MasterServices master) { this(master, new RegionStateStore(master)); } - @VisibleForTesting AssignmentManager(final MasterServices master, final RegionStateStore stateStore) { this.master = master; this.regionStateStore = stateStore; @@ -228,13 +226,18 @@ public void start() throws IOException, KeeperException { // load meta region state ZKWatcher zkw = master.getZooKeeper(); // it could be null in some tests - if (zkw != null) { + if (zkw == null) { + return; + } + List metaZNodes = zkw.getMetaReplicaNodes(); + LOG.debug("hbase:meta replica znodes: {}", metaZNodes); + for (String metaZNode : metaZNodes) { + int replicaId = zkw.getZNodePaths().getMetaReplicaIdFromZNode(metaZNode); // here we are still in the early steps of active master startup. There is only one thread(us) // can access AssignmentManager and create region node, so here we do not need to lock the // region node. - RegionState regionState = MetaTableLocator.getMetaRegionState(zkw); - RegionStateNode regionNode = - regionStates.getOrCreateRegionStateNode(RegionInfoBuilder.FIRST_META_REGIONINFO); + RegionState regionState = MetaTableLocator.getMetaRegionState(zkw, replicaId); + RegionStateNode regionNode = regionStates.getOrCreateRegionStateNode(regionState.getRegion()); regionNode.setRegionLocation(regionState.getServerName()); regionNode.setState(regionState.getState()); if (regionNode.getProcedure() != null) { @@ -243,7 +246,10 @@ public void start() throws IOException, KeeperException { if (regionState.getServerName() != null) { regionStates.addRegionToServer(regionNode); } - setMetaAssigned(regionState.getRegion(), regionState.getState() == State.OPEN); + if (RegionReplicaUtil.isDefaultReplica(replicaId)) { + setMetaAssigned(regionState.getRegion(), regionState.getState() == State.OPEN); + } + LOG.debug("Loaded hbase:meta {}", regionNode); } } @@ -386,15 +392,15 @@ public List getFavoredNodes(final RegionInfo regionInfo) { // ============================================================================================ // Table State Manager helpers // ============================================================================================ - TableStateManager getTableStateManager() { + private TableStateManager getTableStateManager() { return master.getTableStateManager(); } - public boolean isTableEnabled(final TableName tableName) { + private boolean isTableEnabled(final TableName tableName) { return getTableStateManager().isTableState(tableName, TableState.State.ENABLED); } - public boolean isTableDisabled(final TableName tableName) { + private boolean isTableDisabled(final TableName tableName) { return getTableStateManager().isTableState(tableName, TableState.State.DISABLED, TableState.State.DISABLING); } @@ -496,8 +502,15 @@ public boolean waitMetaLoaded(Procedure proc) { return metaLoadEvent.suspendIfNotReady(proc); } - @VisibleForTesting - void wakeMetaLoadedEvent() { + /** + * This method will be called in master initialization method after calling + * {@link #processOfflineRegions()}, as in processOfflineRegions we will generate assign + * procedures for offline regions, which may be conflict with creating table. + *

    + * This is a bit dirty, should be reconsidered after we decide whether to keep the + * {@link #processOfflineRegions()} method. + */ + public void wakeMetaLoadedEvent() { metaLoadEvent.wake(getProcedureScheduler()); assert isMetaLoaded() : "expected meta to be loaded"; } @@ -766,7 +779,6 @@ public TransitRegionStateProcedure[] createRoundRobinAssignProcedures(List { - regionNode.lock(); - try { - if (!regionStates.include(regionNode, false) || - regionStates.isRegionOffline(regionNode.getRegionInfo())) { - return null; - } - // As in DisableTableProcedure, we will hold the xlock for table, so we can make sure that - // this procedure has not been executed yet, as TRSP will hold the shared lock for table all - // the time. So here we will unset it and when it is actually executed, it will find that - // the attach procedure is not itself and quit immediately. - if (regionNode.getProcedure() != null) { - regionNode.unsetProcedure(regionNode.getProcedure()); - } - TransitRegionStateProcedure proc = TransitRegionStateProcedure - .unassign(getProcedureEnvironment(), regionNode.getRegionInfo()); - regionNode.setProcedure(proc); - return proc; - } finally { - regionNode.unlock(); - } - }).filter(p -> p != null).toArray(TransitRegionStateProcedure[]::new); + return regionStates.getTableRegionStateNodes(tableName).stream() + .map(this::forceCreateUnssignProcedure).filter(p -> p != null) + .toArray(TransitRegionStateProcedure[]::new); + } + + /** + * Called by ModifyTableProcedures to unassign all the excess region replicas + * for a table. + */ + public TransitRegionStateProcedure[] createUnassignProceduresForClosingExcessRegionReplicas( + TableName tableName, int newReplicaCount) { + return regionStates.getTableRegionStateNodes(tableName).stream() + .filter(regionNode -> regionNode.getRegionInfo().getReplicaId() >= newReplicaCount) + .map(this::forceCreateUnssignProcedure).filter(p -> p != null) + .toArray(TransitRegionStateProcedure[]::new); } public SplitTableRegionProcedure createSplitProcedure(final RegionInfo regionToSplit, @@ -1331,7 +1363,6 @@ public static class RegionInTransitionStat { private int totalRITsTwiceThreshold = 0; private int totalRITs = 0; - @VisibleForTesting public RegionInTransitionStat(final Configuration conf) { this.ritThreshold = conf.getInt(METRICS_RIT_STUCK_WARNING_THRESHOLD, DEFAULT_RIT_STUCK_WARNING_THRESHOLD); @@ -1394,6 +1425,13 @@ protected void update(final AssignmentManager am) { this.statTimestamp = EnvironmentEdgeManager.currentTime(); update(regionStates.getRegionsStateInTransition(), statTimestamp); update(regionStates.getRegionFailedOpen(), statTimestamp); + + if (LOG.isDebugEnabled() && ritsOverThreshold != null && !ritsOverThreshold.isEmpty()) { + LOG.debug("RITs over threshold: {}", + ritsOverThreshold.entrySet().stream() + .map(e -> e.getKey() + ":" + e.getValue().getState().name()) + .collect(Collectors.joining("\n"))); + } } private void update(final Collection regions, final long currentTime) { @@ -1476,12 +1514,23 @@ public void joinCluster() throws IOException { // Public so can be run by the Master as part of the startup. Needs hbase:meta to be online. // Needs to be done after the table state manager has been started. public void processOfflineRegions() { - List offlineRegions = regionStates.getRegionStates().stream() - .filter(RegionState::isOffline).filter(s -> isTableEnabled(s.getRegion().getTable())) - .map(RegionState::getRegion).collect(Collectors.toList()); - if (!offlineRegions.isEmpty()) { - master.getMasterProcedureExecutor().submitProcedures( - master.getAssignmentManager().createRoundRobinAssignProcedures(offlineRegions)); + TransitRegionStateProcedure[] procs = + regionStates.getRegionStateNodes().stream().filter(rsn -> rsn.isInState(State.OFFLINE)) + .filter(rsn -> isTableEnabled(rsn.getRegionInfo().getTable())).map(rsn -> { + rsn.lock(); + try { + if (rsn.getProcedure() != null) { + return null; + } else { + return rsn.setProcedure(TransitRegionStateProcedure.assign(getProcedureEnvironment(), + rsn.getRegionInfo(), null)); + } + } finally { + rsn.unlock(); + } + }).filter(p -> p != null).toArray(TransitRegionStateProcedure[]::new); + if (procs.length > 0) { + master.getMasterProcedureExecutor().submitProcedures(procs); } } @@ -1561,8 +1610,6 @@ public RegionInfo loadRegionFromMeta(String regionEncodedName) throws UnknownReg private void loadMeta() throws IOException { // TODO: use a thread pool regionStateStore.visitMeta(new RegionMetaLoadingVisitor()); - // every assignment is blocked until meta is loaded. - wakeMetaLoadedEvent(); } /** @@ -1898,6 +1945,14 @@ public void markRegionAsSplit(final RegionInfo parent, final ServerName serverNa nodeB.setState(State.SPLITTING_NEW); TableDescriptor td = master.getTableDescriptors().get(parent.getTable()); + // TODO: here we just update the parent region info in meta, to set split and offline to true, + // without changing the one in the region node. This is a bit confusing but the region info + // field in RegionStateNode is not expected to be changed in the current design. Need to find a + // possible way to address this problem, or at least adding more comments about the trick to + // deal with this problem, that when you want to filter out split parent, you need to check both + // the RegionState on whether it is split, and also the region info. If one of them matches then + // it is a split parent. And usually only one of them can match, as after restart, the region + // state will be changed from SPLIT to CLOSED. regionStateStore.splitRegion(parent, daughterA, daughterB, serverName, td); if (shouldAssignFavoredNodes(parent)) { List onlineServers = this.master.getServerManager().getOnlineServersList(); @@ -2150,12 +2205,8 @@ private void acceptPlan(final HashMap regions, final ProcedureEvent[] events = new ProcedureEvent[regions.size()]; final long st = System.currentTimeMillis(); - if (plan == null) { - throw new HBaseIOException("unable to compute plans for regions=" + regions.size()); - } - if (plan.isEmpty()) { - return; + throw new HBaseIOException("unable to compute plans for regions=" + regions.size()); } int evcount = 0; @@ -2220,7 +2271,6 @@ public List getExcludedServersForSystemTable() { .collect(Collectors.toList()); } - @VisibleForTesting MasterServices getMaster() { return master; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java index f1b3329b25c0..80a61dae9036 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java @@ -60,8 +60,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; - import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos; @@ -534,8 +532,10 @@ private void preMergeRegions(final MasterProcedureEnv env) throws IOException { try { env.getMasterServices().getMasterQuotaManager().onRegionMerged(this.mergedRegion); } catch (QuotaExceededException e) { - env.getMasterServices().getRegionNormalizer().planSkipped(this.mergedRegion, - NormalizationPlan.PlanType.MERGE); + // TODO: why is this here? merge requests can be submitted by actors other than the normalizer + env.getMasterServices() + .getRegionNormalizerManager() + .planSkipped(NormalizationPlan.PlanType.MERGE); throw e; } } @@ -752,7 +752,6 @@ private void writeMaxSequenceIdFile(MasterProcedureEnv env) throws IOException { /** * @return The merged region. Maybe be null if called to early or we failed. */ - @VisibleForTesting RegionInfo getMergedRegion() { return this.mergedRegion; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionRemoteProcedureBase.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionRemoteProcedureBase.java index 1c90d81ed06f..805b51caebec 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionRemoteProcedureBase.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionRemoteProcedureBase.java @@ -352,7 +352,10 @@ protected void deserializeStateData(ProcedureStateSerializer serializer) throws serializer.deserialize(RegionRemoteProcedureBaseStateData.class); region = ProtobufUtil.toRegionInfo(data.getRegion()); targetServer = ProtobufUtil.toServerName(data.getTargetServer()); - state = data.getState(); + // 'state' may not be present if we are reading an 'old' form of this pb Message. + if (data.hasState()) { + state = data.getState(); + } if (data.hasTransitionCode()) { transitionCode = data.getTransitionCode(); seqId = data.getSeqId(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateNode.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateNode.java index b7fcdab96b98..d04dbef66ba1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateNode.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateNode.java @@ -36,8 +36,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; - /** * Current Region State. Most fields are synchronized with meta region, i.e, we will update meta * immediately after we modify this RegionStateNode, and usually under the lock. The only exception @@ -77,7 +75,6 @@ public AssignmentProcedureEvent(final RegionInfo regionInfo) { } } - @VisibleForTesting final Lock lock = new ReentrantLock(); private final RegionInfo regionInfo; private final ProcedureEvent event; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java index 935f61abd2f1..5036711507f1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java @@ -48,6 +48,8 @@ import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.ResultScanner; +import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.master.MasterFileSystem; @@ -62,12 +64,13 @@ import org.apache.hadoop.hbase.util.FutureUtils; import org.apache.hadoop.hbase.wal.WALSplitUtil; import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; +import org.apache.hadoop.hbase.zookeeper.ZKUtil; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.yetus.audience.InterfaceAudience; import org.apache.zookeeper.KeeperException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; @@ -124,24 +127,23 @@ public boolean visit(final Result r) throws IOException { } /** - * Queries META table for the passed region encoded name, - * delegating action upon results to the RegionStateVisitor - * passed as second parameter. + * Queries META table for the passed region encoded name, delegating action upon results to the + * RegionStateVisitor passed as second parameter. * @param regionEncodedName encoded name for the Region we want to query META for. * @param visitor The RegionStateVisitor instance to react over the query results. * @throws IOException If some error occurs while querying META or parsing results. */ public void visitMetaForRegion(final String regionEncodedName, final RegionStateVisitor visitor) - throws IOException { - Result result = MetaTableAccessor. - scanByRegionEncodedName(master.getConnection(), regionEncodedName); + throws IOException { + Result result = + MetaTableAccessor.scanByRegionEncodedName(master.getConnection(), regionEncodedName); if (result != null) { visitMetaEntry(visitor, result); } } private void visitMetaEntry(final RegionStateVisitor visitor, final Result result) - throws IOException { + throws IOException { final RegionLocations rl = CatalogFamilyFormat.getRegionLocations(result); if (rl == null) return; @@ -162,8 +164,7 @@ private void visitMetaEntry(final RegionStateVisitor visitor, final Result resul ServerName regionLocation = MetaTableAccessor.getTargetServerName(result, replicaId); final long openSeqNum = hrl.getSeqNum(); - // TODO: move under trace, now is visible for debugging - LOG.info( + LOG.debug( "Load hbase:meta entry region={}, regionState={}, lastHost={}, " + "regionLocation={}, openSeqNum={}", regionInfo.getEncodedName(), state, lastHost, regionLocation, openSeqNum); @@ -176,18 +177,18 @@ void updateRegionLocation(RegionStateNode regionStateNode) throws IOException { updateMetaLocation(regionStateNode.getRegionInfo(), regionStateNode.getRegionLocation(), regionStateNode.getState()); } else { - long openSeqNum = regionStateNode.getState() == State.OPEN ? regionStateNode.getOpenSeqNum() - : HConstants.NO_SEQNUM; + long openSeqNum = regionStateNode.getState() == State.OPEN ? regionStateNode.getOpenSeqNum() : + HConstants.NO_SEQNUM; updateUserRegionLocation(regionStateNode.getRegionInfo(), regionStateNode.getState(), regionStateNode.getRegionLocation(), openSeqNum, // The regionStateNode may have no procedure in a test scenario; allow for this. - regionStateNode.getProcedure() != null ? regionStateNode.getProcedure().getProcId() - : Procedure.NO_PROC_ID); + regionStateNode.getProcedure() != null ? regionStateNode.getProcedure().getProcId() : + Procedure.NO_PROC_ID); } } private void updateMetaLocation(RegionInfo regionInfo, ServerName serverName, State state) - throws IOException { + throws IOException { try { MetaTableLocator.setMetaLocation(master.getZooKeeper(), serverName, regionInfo.getReplicaId(), state); @@ -197,8 +198,7 @@ private void updateMetaLocation(RegionInfo regionInfo, ServerName serverName, St } private void updateUserRegionLocation(RegionInfo regionInfo, State state, - ServerName regionLocation, long openSeqNum, - long pid) throws IOException { + ServerName regionLocation, long openSeqNum, long pid) throws IOException { long time = EnvironmentEdgeManager.currentTime(); final int replicaId = regionInfo.getReplicaId(); final Put put = new Put(CatalogFamilyFormat.getMetaKeyForRegion(regionInfo), time); @@ -208,7 +208,7 @@ private void updateUserRegionLocation(RegionInfo regionInfo, State state, .append(regionInfo.getEncodedName()).append(", regionState=").append(state); if (openSeqNum >= 0) { Preconditions.checkArgument(state == State.OPEN && regionLocation != null, - "Open region should be on a server"); + "Open region should be on a server"); MetaTableAccessor.addLocation(put, regionLocation, openSeqNum, replicaId); // only update replication barrier for default replica if (regionInfo.getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID && @@ -221,30 +221,23 @@ private void updateUserRegionLocation(RegionInfo regionInfo, State state, } else if (regionLocation != null) { // Ideally, if no regionLocation, write null to the hbase:meta but this will confuse clients // currently; they want a server to hit. TODO: Make clients wait if no location. - put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setRow(put.getRow()) - .setFamily(HConstants.CATALOG_FAMILY) - .setQualifier(CatalogFamilyFormat.getServerNameColumn(replicaId)) - .setTimestamp(put.getTimestamp()) - .setType(Cell.Type.Put) - .setValue(Bytes.toBytes(regionLocation.getServerName())) - .build()); + put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(put.getRow()) + .setFamily(HConstants.CATALOG_FAMILY) + .setQualifier(CatalogFamilyFormat.getServerNameColumn(replicaId)) + .setTimestamp(put.getTimestamp()).setType(Cell.Type.Put) + .setValue(Bytes.toBytes(regionLocation.getServerName())).build()); info.append(", regionLocation=").append(regionLocation); } - put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setRow(put.getRow()) - .setFamily(HConstants.CATALOG_FAMILY) - .setQualifier(getStateColumn(replicaId)) - .setTimestamp(put.getTimestamp()) - .setType(Cell.Type.Put) - .setValue(Bytes.toBytes(state.name())) - .build()); + put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(put.getRow()) + .setFamily(HConstants.CATALOG_FAMILY).setQualifier(getStateColumn(replicaId)) + .setTimestamp(put.getTimestamp()).setType(Cell.Type.Put).setValue(Bytes.toBytes(state.name())) + .build()); LOG.info(info.toString()); updateRegionLocation(regionInfo, state, put); } private void updateRegionLocation(RegionInfo regionInfo, State state, Put put) - throws IOException { + throws IOException { try (Table table = master.getConnection().getTable(TableName.META_TABLE_NAME)) { table.put(put); } catch (IOException e) { @@ -317,7 +310,7 @@ private static Put addSequenceNum(Put p, long openSeqNum, int replicaId) throws } // ============================================================================================ - // Update Region Splitting State helpers + // Update Region Splitting State helpers // ============================================================================================ /** * Splits the region into two in an atomic operation. Offlines the parent region with the @@ -368,7 +361,7 @@ public void splitRegion(RegionInfo parent, RegionInfo splitA, RegionInfo splitB, } // ============================================================================================ - // Update Region Merging State helpers + // Update Region Merging State helpers // ============================================================================================ public void mergeRegions(RegionInfo child, RegionInfo[] parents, ServerName serverName, TableDescriptor htd) throws IOException { @@ -376,7 +369,7 @@ public void mergeRegions(RegionInfo child, RegionInfo[] parents, ServerName serv long time = HConstants.LATEST_TIMESTAMP; List mutations = new ArrayList<>(); List replicationParents = new ArrayList<>(); - for (RegionInfo ri: parents) { + for (RegionInfo ri : parents) { long seqNum = globalScope ? getOpenSeqNumForParentRegion(ri) : -1; // Deletes for merging regions mutations.add(MetaTableAccessor.makeDeleteFromRegionInfo(ri, time)); @@ -436,8 +429,7 @@ public List getMergeRegions(RegionInfo region) throws IOException { * @param connection connection we're using * @param mergeRegion the merged region */ - public void deleteMergeQualifiers(RegionInfo mergeRegion) - throws IOException { + public void deleteMergeQualifiers(RegionInfo mergeRegion) throws IOException { // NOTE: We are doing a new hbase:meta read here. Cell[] cells = getRegionCatalogResult(mergeRegion).rawCells(); if (cells == null || cells.length == 0) { @@ -470,7 +462,6 @@ public void deleteMergeQualifiers(RegionInfo mergeRegion) qualifiers.stream().map(Bytes::toStringBinary).collect(Collectors.joining(", "))); } - @VisibleForTesting static Put addMergeRegions(Put put, Collection mergeRegions) throws IOException { int limit = 10000; // Arbitrary limit. No room in our formatted 'task0000' below for more. int max = mergeRegions.size(); @@ -491,7 +482,7 @@ static Put addMergeRegions(Put put, Collection mergeRegions) throws } // ============================================================================================ - // Delete Region State helpers + // Delete Region State helpers // ============================================================================================ /** * Deletes the specified region. @@ -545,8 +536,59 @@ public void overwriteRegions(List regionInfos, int regionReplication LOG.debug("Overwritten regions: {} ", regionInfos); } + private Scan getScanForUpdateRegionReplicas(TableName tableName) { + return MetaTableAccessor.getScanForTableName(master.getConfiguration(), tableName) + .addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER); + } + + public void removeRegionReplicas(TableName tableName, int oldReplicaCount, int newReplicaCount) + throws IOException { + if (TableName.isMetaTableName(tableName)) { + ZKWatcher zk = master.getZooKeeper(); + try { + for (int i = newReplicaCount; i < oldReplicaCount; i++) { + ZKUtil.deleteNode(zk, zk.getZNodePaths().getZNodeForReplica(i)); + } + } catch (KeeperException e) { + throw new IOException(e); + } + } else { + Scan scan = getScanForUpdateRegionReplicas(tableName); + List deletes = new ArrayList<>(); + long now = EnvironmentEdgeManager.currentTime(); + try (Table metaTable = getMetaTable(); ResultScanner scanner = metaTable.getScanner(scan)) { + for (;;) { + Result result = scanner.next(); + if (result == null) { + break; + } + RegionInfo primaryRegionInfo = CatalogFamilyFormat.getRegionInfo(result); + if (primaryRegionInfo == null || primaryRegionInfo.isSplitParent()) { + continue; + } + Delete delete = new Delete(result.getRow()); + for (int i = newReplicaCount; i < oldReplicaCount; i++) { + delete.addColumns(HConstants.CATALOG_FAMILY, CatalogFamilyFormat.getServerColumn(i), + now); + delete.addColumns(HConstants.CATALOG_FAMILY, CatalogFamilyFormat.getSeqNumColumn(i), + now); + delete.addColumns(HConstants.CATALOG_FAMILY, CatalogFamilyFormat.getStartCodeColumn(i), + now); + delete.addColumns(HConstants.CATALOG_FAMILY, CatalogFamilyFormat.getServerNameColumn(i), + now); + delete.addColumns(HConstants.CATALOG_FAMILY, + CatalogFamilyFormat.getRegionStateColumn(i), now); + } + deletes.add(delete); + } + debugLogMutations(deletes); + metaTable.delete(deletes); + } + } + } + // ========================================================================== - // Table Descriptors helpers + // Table Descriptors helpers // ========================================================================== private boolean hasGlobalReplicationScope(TableName tableName) throws IOException { return hasGlobalReplicationScope(getDescriptor(tableName)); @@ -565,7 +607,7 @@ private TableDescriptor getDescriptor(TableName tableName) throws IOException { } // ========================================================================== - // Region State + // Region State // ========================================================================== /** @@ -573,29 +615,29 @@ private TableDescriptor getDescriptor(TableName tableName) throws IOException { * @return the region state, or null if unknown. */ public static State getRegionState(final Result r, RegionInfo regionInfo) { - Cell cell = r.getColumnLatestCell(HConstants.CATALOG_FAMILY, - getStateColumn(regionInfo.getReplicaId())); + Cell cell = + r.getColumnLatestCell(HConstants.CATALOG_FAMILY, getStateColumn(regionInfo.getReplicaId())); if (cell == null || cell.getValueLength() == 0) { return null; } - String state = Bytes.toString(cell.getValueArray(), cell.getValueOffset(), - cell.getValueLength()); + String state = + Bytes.toString(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()); try { return State.valueOf(state); } catch (IllegalArgumentException e) { - LOG.warn("BAD value {} in hbase:meta info:state column for region {} , " + - "Consider using HBCK2 setRegionState ENCODED_REGION_NAME STATE", - state, regionInfo.getEncodedName()); + LOG.warn( + "BAD value {} in hbase:meta info:state column for region {} , " + + "Consider using HBCK2 setRegionState ENCODED_REGION_NAME STATE", + state, regionInfo.getEncodedName()); return null; } } private static byte[] getStateColumn(int replicaId) { - return replicaId == 0 - ? HConstants.STATE_QUALIFIER - : Bytes.toBytes(HConstants.STATE_QUALIFIER_STR + META_REPLICA_ID_DELIMITER - + String.format(RegionInfo.REPLICA_ID_FORMAT, replicaId)); + return replicaId == 0 ? HConstants.STATE_QUALIFIER : + Bytes.toBytes(HConstants.STATE_QUALIFIER_STR + META_REPLICA_ID_DELIMITER + + String.format(RegionInfo.REPLICA_ID_FORMAT, replicaId)); } private static void debugLogMutations(List mutations) throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStates.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStates.java index 3bb3c4c0b358..147a112152bb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStates.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStates.java @@ -45,8 +45,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; - /** * RegionStates contains a set of Maps that describes the in-memory state of the AM, with * the regions available in the system, the region in transition, the offline regions and @@ -115,7 +113,6 @@ public void clear() { serverMap.clear(); } - @VisibleForTesting public boolean isRegionInRegionStates(final RegionInfo hri) { return (regionsMap.containsKey(hri.getRegionName()) || regionInTransition.containsKey(hri) || regionOffline.containsKey(hri)); @@ -124,7 +121,6 @@ public boolean isRegionInRegionStates(final RegionInfo hri) { // ========================================================================== // RegionStateNode helpers // ========================================================================== - @VisibleForTesting RegionStateNode createRegionStateNode(RegionInfo regionInfo) { synchronized (regionsMapLock) { RegionStateNode node = regionsMap.computeIfAbsent(regionInfo.getRegionName(), @@ -173,7 +169,7 @@ public void deleteRegions(final List regionInfos) { regionInfos.forEach(this::deleteRegion); } - ArrayList getTableRegionStateNodes(final TableName tableName) { + List getTableRegionStateNodes(final TableName tableName) { final ArrayList regions = new ArrayList(); for (RegionStateNode node: regionsMap.tailMap(tableName.getName()).values()) { if (!node.getTable().equals(tableName)) break; @@ -241,8 +237,10 @@ public boolean hasTableRegionStates(final TableName tableName) { /** * @return Return online regions of table; does not include OFFLINE or SPLITTING regions. */ - public List getRegionsOfTable(final TableName table) { - return getRegionsOfTable(table, false); + public List getRegionsOfTable(TableName table) { + return getRegionsOfTable(table, + regionNode -> !regionNode.isInState(State.OFFLINE, State.SPLIT) && + !regionNode.getRegionInfo().isSplitParent()); } private HRegionLocation createRegionForReopen(RegionStateNode node) { @@ -346,16 +344,34 @@ public HRegionLocation checkReopened(HRegionLocation oldLoc) { } /** - * @return Return online regions of table; does not include OFFLINE or SPLITTING regions. + * Get the regions for enabling a table. + *

    + * Here we want the EnableTableProcedure to be more robust and can be used to fix some nasty + * states, so the checks in this method will be a bit strange. In general, a region can only be + * offline when it is split, for merging we will just delete the parent regions, but with HBCK we + * may force update the state of a region to fix some nasty bugs, so in this method we will try to + * bring the offline regions back if it is not split. That's why we only check for split state + * here. */ - public List getRegionsOfTable(TableName table, boolean offline) { - return getRegionsOfTable(table, state -> include(state, offline)); + public List getRegionsOfTableForEnabling(TableName table) { + return getRegionsOfTable(table, + regionNode -> !regionNode.isInState(State.SPLIT) && !regionNode.getRegionInfo().isSplit()); + } + + /** + * Get the regions for deleting a table. + *

    + * Here we need to return all the regions irrespective of the states in order to archive them + * all. This is because if we don't archive OFFLINE/SPLIT regions and if a snapshot or a cloned + * table references to the regions, we will lose the data of the regions. + */ + public List getRegionsOfTableForDeleting(TableName table) { + return getTableRegionStateNodes(table).stream().map(RegionStateNode::getRegionInfo) + .collect(Collectors.toList()); } /** - * @return Return the regions of the table; does not include OFFLINE unless you set - * offline to true. Does not include regions that are in the - * {@link State#SPLIT} state. + * @return Return the regions of the table and filter them. */ private List getRegionsOfTable(TableName table, Predicate filter) { return getTableRegionStateNodes(table).stream().filter(filter).map(n -> n.getRegionInfo()) @@ -368,7 +384,7 @@ private List getRegionsOfTable(TableName table, Predicatenode (do not include * if split or offline unless offline is set to true. */ - boolean include(final RegionStateNode node, final boolean offline) { + private boolean include(final RegionStateNode node, final boolean offline) { if (LOG.isTraceEnabled()) { LOG.trace("WORKING ON " + node + " " + node.getRegionInfo()); } @@ -744,7 +760,6 @@ public void removeServer(final ServerName serverName) { /** * @return Pertinent ServerStateNode or NULL if none found (Do not make modifications). */ - @VisibleForTesting public ServerStateNode getServerNode(final ServerName serverName) { return serverMap.get(serverName); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.java index 2e767718367b..bc46e19978d8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.java @@ -31,10 +31,9 @@ import org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher.RemoteOperation; import org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher.RemoteProcedure; import org.apache.hadoop.hbase.procedure2.RemoteProcedureException; -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RegionTransitionState; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode; +import org.apache.yetus.audience.InterfaceAudience; /** * Leave here only for checking if we can successfully start the master. @@ -62,12 +61,10 @@ public RegionTransitionProcedure(final RegionInfo regionInfo) { this.regionInfo = regionInfo; } - @VisibleForTesting public RegionInfo getRegionInfo() { return regionInfo; } - @VisibleForTesting public void setRegionInfo(final RegionInfo regionInfo) { this.regionInfo = regionInfo; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java index d0413360e6df..e5b5fe9055e7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -71,12 +71,11 @@ import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.wal.WALSplitUtil; import org.apache.hadoop.util.ReflectionUtils; -import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse; @@ -111,7 +110,7 @@ public SplitTableRegionProcedure(final MasterProcedureEnv env, // we fail-fast on construction. There it skips the split with just a warning. checkOnline(env, regionToSplit); this.bestSplitRow = splitRow; - checkSplittable(env, regionToSplit, bestSplitRow); + checkSplittable(env, regionToSplit); final TableName table = regionToSplit.getTable(); final long rid = getDaughterRegionIdTimestamp(regionToSplit); this.daughterOneRI = RegionInfoBuilder.newBuilder(table) @@ -158,12 +157,10 @@ protected void releaseLock(final MasterProcedureEnv env) { daughterTwoRI); } - @VisibleForTesting public RegionInfo getDaughterOneRI() { return daughterOneRI; } - @VisibleForTesting public RegionInfo getDaughterTwoRI() { return daughterTwoRI; } @@ -176,14 +173,14 @@ private boolean hasBestSplitRow() { * Check whether the region is splittable * @param env MasterProcedureEnv * @param regionToSplit parent Region to be split - * @param splitRow if splitRow is not specified, will first try to get bestSplitRow from RS */ private void checkSplittable(final MasterProcedureEnv env, - final RegionInfo regionToSplit, final byte[] splitRow) throws IOException { + final RegionInfo regionToSplit) throws IOException { // Ask the remote RS if this region is splittable. - // If we get an IOE, report it along w/ the failure so can see why we are not splittable at this time. + // If we get an IOE, report it along w/ the failure so can see why we are not splittable at + // this time. if(regionToSplit.getReplicaId() != RegionInfo.DEFAULT_REPLICA_ID) { - throw new IllegalArgumentException ("Can't invoke split on non-default regions directly"); + throw new IllegalArgumentException("Can't invoke split on non-default regions directly"); } RegionStateNode node = env.getAssignmentManager().getRegionStates().getRegionStateNode(getParentRegion()); @@ -229,12 +226,12 @@ private void checkSplittable(final MasterProcedureEnv env, if (Bytes.equals(regionToSplit.getStartKey(), bestSplitRow)) { throw new DoNotRetryIOException( - "Split row is equal to startkey: " + Bytes.toStringBinary(splitRow)); + "Split row is equal to startkey: " + Bytes.toStringBinary(bestSplitRow)); } if (!regionToSplit.containsRow(bestSplitRow)) { throw new DoNotRetryIOException("Split row is not inside region key range splitKey:" + - Bytes.toStringBinary(splitRow) + " region: " + regionToSplit); + Bytes.toStringBinary(bestSplitRow) + " region: " + regionToSplit); } } @@ -485,7 +482,6 @@ private byte[] getSplitRow() { * Prepare to Split region. * @param env MasterProcedureEnv */ - @VisibleForTesting public boolean prepareSplitRegion(final MasterProcedureEnv env) throws IOException { // Fail if we are taking snapshot for the given table if (env.getMasterServices().getSnapshotManager() @@ -570,8 +566,10 @@ private void preSplitRegion(final MasterProcedureEnv env) try { env.getMasterServices().getMasterQuotaManager().onRegionSplit(this.getParentRegion()); } catch (QuotaExceededException e) { - env.getMasterServices().getRegionNormalizer().planSkipped(this.getParentRegion(), - NormalizationPlan.PlanType.SPLIT); + // TODO: why is this here? split requests can be submitted by actors other than the normalizer + env.getMasterServices() + .getRegionNormalizerManager() + .planSkipped(NormalizationPlan.PlanType.SPLIT); throw e; } } @@ -599,7 +597,6 @@ private void openParentRegion(MasterProcedureEnv env) throws IOException { /** * Create daughter regions */ - @VisibleForTesting public void createDaughterRegions(final MasterProcedureEnv env) throws IOException { final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem(); final Path tabledir = CommonFSUtils.getTableDir(mfs.getRootDir(), getTableName()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/TransitRegionStateProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/TransitRegionStateProcedure.java index b0a697deaa97..8ca1ee482e81 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/TransitRegionStateProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/TransitRegionStateProcedure.java @@ -41,8 +41,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; - import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RegionStateTransitionState; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RegionStateTransitionStateData; @@ -143,7 +141,6 @@ private void setInitialAndLastState() { } } - @VisibleForTesting protected TransitRegionStateProcedure(MasterProcedureEnv env, RegionInfo hri, ServerName assignCandidate, boolean forceNewPlan, TransitionType type) { super(env, hri); @@ -348,6 +345,7 @@ protected Flow executeFromState(MasterProcedureEnv env, RegionStateTransitionSta LOG.error( "Cannot assign replica region {} because its primary region {} does not exist.", regionNode.getRegionInfo(), defaultRI); + regionNode.unsetProcedure(this); return Flow.NO_MORE_STATE; } } @@ -416,13 +414,8 @@ public void reportTransition(MasterProcedureEnv env, RegionStateNode regionNode, // Should be called with RegionStateNode locked public void serverCrashed(MasterProcedureEnv env, RegionStateNode regionNode, - ServerName serverName) throws IOException { - // force to assign to a new candidate server - // AssignmentManager#regionClosedAbnormally will set region location to null - // TODO: the forceNewPlan flag not be persistent so if master crash then the flag will be lost. - // But assign to old server is not big deal because it not effect correctness. - // See HBASE-23035 for more details. - forceNewPlan = true; + ServerName serverName, boolean forceNewPlan) throws IOException { + this.forceNewPlan = forceNewPlan; if (remoteProc != null) { // this means we are waiting for the sub procedure, so wake it up remoteProc.serverCrashed(env, regionNode, serverName); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java index 6a27a6a05680..91215c7e265a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java @@ -18,6 +18,7 @@ */ package org.apache.hadoop.hbase.master.balancer; +import edu.umd.cs.findbugs.annotations.NonNull; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; @@ -36,7 +37,6 @@ import java.util.TreeMap; import java.util.function.Predicate; import java.util.stream.Collectors; - import org.apache.commons.lang3.NotImplementedException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ClusterMetrics; @@ -57,14 +57,14 @@ import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.Action.Type; import org.apache.hadoop.hbase.namequeues.NamedQueueRecorder; import org.apache.hadoop.hbase.net.Address; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.base.Joiner; import org.apache.hbase.thirdparty.com.google.common.collect.ArrayListMultimap; import org.apache.hbase.thirdparty.com.google.common.collect.Lists; import org.apache.hbase.thirdparty.com.google.common.collect.Sets; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; /** * The base class for load balancers. It provides the the functions used to by @@ -999,12 +999,10 @@ float getLocalityOfRegion(int region, int server) { } } - @VisibleForTesting protected void setNumRegions(int numRegions) { this.numRegions = numRegions; } - @VisibleForTesting protected void setNumMovedRegions(int numMovedRegions) { this.numMovedRegions = numMovedRegions; } @@ -1041,7 +1039,14 @@ public String toString() { protected ClusterMetrics clusterStatus = null; protected ServerName masterServerName; protected MasterServices services; + + /** + * @deprecated since 2.4.0, will be removed in 3.0.0. + * @see HBASE-15549 + */ + @Deprecated protected boolean onlySystemTablesOnMaster; + protected boolean maintenanceMode; @Override @@ -1074,7 +1079,11 @@ protected void setSlop(Configuration conf) { /** * Check if a region belongs to some system table. * If so, the primary replica may be expected to be put on the master regionserver. + * + * @deprecated since 2.4.0, will be removed in 3.0.0. + * @see HBASE-15549 */ + @Deprecated public boolean shouldBeOnMaster(RegionInfo region) { return (this.maintenanceMode || this.onlySystemTablesOnMaster) && region.getTable().isSystemTable(); @@ -1082,7 +1091,11 @@ public boolean shouldBeOnMaster(RegionInfo region) { /** * Balance the regions that should be on master regionserver. + * + * @deprecated since 2.4.0, will be removed in 3.0.0. + * @see HBASE-15549 */ + @Deprecated protected List balanceMasterRegions(Map> clusterMap) { if (masterServerName == null || clusterMap == null || clusterMap.size() <= 1) return null; List plans = null; @@ -1131,12 +1144,14 @@ protected List balanceMasterRegions(Map /** * If master is configured to carry system tables only, in here is * where we figure what to assign it. + * + * @deprecated since 2.4.0, will be removed in 3.0.0. + * @see HBASE-15549 */ + @Deprecated + @NonNull protected Map> assignMasterSystemRegions( Collection regions, List servers) { - if (servers == null || regions == null || regions.isEmpty()) { - return null; - } Map> assignments = new TreeMap<>(); if (this.maintenanceMode || this.onlySystemTablesOnMaster) { if (masterServerName != null && servers.contains(masterServerName)) { @@ -1267,15 +1282,16 @@ protected final boolean idleRegionServerExist(Cluster c){ * * @param regions all regions * @param servers all servers - * @return map of server to the regions it should take, or null if no - * assignment is possible (ie. no regions or no servers) + * @return map of server to the regions it should take, or emptyMap if no + * assignment is possible (ie. no servers) */ @Override + @NonNull public Map> roundRobinAssignment(List regions, List servers) throws HBaseIOException { metricsBalancer.incrMiscInvocations(); Map> assignments = assignMasterSystemRegions(regions, servers); - if (assignments != null && !assignments.isEmpty()) { + if (!assignments.isEmpty()) { servers = new ArrayList<>(servers); // Guarantee not to put other regions on master servers.remove(masterServerName); @@ -1285,14 +1301,17 @@ public Map> roundRobinAssignment(List r regions.removeAll(masterRegions); } } - if (this.maintenanceMode || regions == null || regions.isEmpty()) { + /** + * only need assign system table + */ + if (this.maintenanceMode || regions.isEmpty()) { return assignments; } int numServers = servers == null ? 0 : servers.size(); if (numServers == 0) { LOG.warn("Wanted to do round robin assignment but no servers to assign to"); - return null; + return Collections.emptyMap(); } // TODO: instead of retainAssignment() and roundRobinAssignment(), we should just run the @@ -1407,15 +1426,17 @@ public ServerName randomAssignment(RegionInfo regionInfo, List serve * * @param regions regions and existing assignment from meta * @param servers available servers - * @return map of servers and regions to be assigned to them + * @return map of servers and regions to be assigned to them, or emptyMap if no + * assignment is possible (ie. no servers) */ @Override + @NonNull public Map> retainAssignment(Map regions, List servers) throws HBaseIOException { // Update metrics metricsBalancer.incrMiscInvocations(); Map> assignments = assignMasterSystemRegions(regions.keySet(), servers); - if (assignments != null && !assignments.isEmpty()) { + if (!assignments.isEmpty()) { servers = new ArrayList<>(servers); // Guarantee not to put other regions on master servers.remove(masterServerName); @@ -1430,7 +1451,7 @@ public Map> retainAssignment(Map> roundRobinAssignment(List regions, List servers) throws HBaseIOException { @@ -116,7 +119,7 @@ public Map> roundRobinAssignment(List r Set regionSet = Sets.newHashSet(regions); Map> assignmentMap = assignMasterSystemRegions(regions, servers); - if (assignmentMap != null && !assignmentMap.isEmpty()) { + if (!assignmentMap.isEmpty()) { servers = new ArrayList<>(servers); // Guarantee not to put other regions on master servers.remove(masterServerName); @@ -367,14 +370,15 @@ private void updateFavoredNodesForRegion(RegionInfo regionInfo, List * Reuse BaseLoadBalancer's retainAssignment, but generate favored nodes when its missing. */ @Override + @NonNull public Map> retainAssignment(Map regions, List servers) throws HBaseIOException { Map> assignmentMap = Maps.newHashMap(); Map> result = super.retainAssignment(regions, servers); - if (result == null || result.isEmpty()) { + if (result.isEmpty()) { LOG.warn("Nothing to assign to, probably no servers or no regions"); - return null; + return result; } // Guarantee not to put other regions on master diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/SimpleLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/SimpleLoadBalancer.java index d8b42560b8d1..4435813a96b5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/SimpleLoadBalancer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/SimpleLoadBalancer.java @@ -134,9 +134,9 @@ public void onConfigurationChange(Configuration conf) { float originSlop = slop; float originOverallSlop = overallSlop; super.setConf(conf); - LOG.info("Update configuration of SimpleLoadBalancer, previous slop is " - + originSlop + ", current slop is " + slop + "previous overallSlop is" + - originOverallSlop + ", current overallSlop is " + originOverallSlop); + LOG.info("Update configuration of SimpleLoadBalancer, previous slop is {}," + + " current slop is {}, previous overallSlop is {}, current overallSlop is {}", + originSlop, slop, originOverallSlop, overallSlop); } private void setLoad(List slList, int i, int loadChange){ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java index eb1c2bcd6bc8..3f249b890133 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java @@ -29,7 +29,6 @@ import java.util.Objects; import java.util.Random; import java.util.stream.Collectors; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ClusterMetrics; import org.apache.hadoop.hbase.HBaseInterfaceAudience; @@ -56,10 +55,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.com.google.common.collect.Lists; - /** *

    This is a best effort load balancer. Given a Cost function F(C) => x It will * randomly try and mutate the cluster to Cprime. If F(Cprime) < F(C) then the @@ -365,7 +362,6 @@ protected boolean needsBalance(TableName tableName, Cluster cluster) { return !balanced; } - @VisibleForTesting Cluster.Action nextAction(Cluster cluster) { return candidateGenerators.get(RANDOM.nextInt(candidateGenerators.size())) .generate(cluster); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java index 4331d490c28d..9416e5a12a62 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java @@ -28,7 +28,6 @@ import java.util.concurrent.CompletableFuture; import java.util.concurrent.atomic.AtomicBoolean; import java.util.stream.Collectors; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; @@ -42,7 +41,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableSet; import org.apache.hbase.thirdparty.com.google.common.collect.Iterables; @@ -373,7 +371,6 @@ public synchronized void cleanup() { } } - @VisibleForTesting int getChorePoolSize() { return pool.getSize(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileCleaner.java index 6926f12c49ee..ff288572e252 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileCleaner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileCleaner.java @@ -39,7 +39,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; /** * This Chore, every time it runs, will clear the HFiles in the hfile archive * folder that are deletable for each HFile cleaner in the chain. @@ -82,12 +81,10 @@ public HFileCleaner(final int period, final Stoppable stopper, Configuration con public static final String HFILE_DELETE_THREAD_TIMEOUT_MSEC = "hbase.regionserver.hfilecleaner.thread.timeout.msec"; - @VisibleForTesting static final long DEFAULT_HFILE_DELETE_THREAD_TIMEOUT_MSEC = 60 * 1000L; public static final String HFILE_DELETE_THREAD_CHECK_INTERVAL_MSEC = "hbase.regionserver.hfilecleaner.thread.check.interval.msec"; - @VisibleForTesting static final long DEFAULT_HFILE_DELETE_THREAD_CHECK_INTERVAL_MSEC = 1000L; private static final Logger LOG = LoggerFactory.getLogger(HFileCleaner.class); @@ -383,42 +380,34 @@ public synchronized boolean getResult(long waitIfNotFinished) { } } - @VisibleForTesting public List getCleanerThreads() { return threads; } - @VisibleForTesting public long getNumOfDeletedLargeFiles() { return deletedLargeFiles.get(); } - @VisibleForTesting public long getNumOfDeletedSmallFiles() { return deletedSmallFiles.get(); } - @VisibleForTesting public long getLargeQueueInitSize() { return largeQueueInitSize; } - @VisibleForTesting public long getSmallQueueInitSize() { return smallQueueInitSize; } - @VisibleForTesting public long getThrottlePoint() { return throttlePoint; } - @VisibleForTesting long getCleanerThreadTimeoutMsec() { return cleanerThreadTimeoutMsec; } - @VisibleForTesting long getCleanerThreadCheckIntervalMsec() { return cleanerThreadCheckIntervalMsec; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileLinkCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileLinkCleaner.java index a99c784d2ac8..b19e174be0c5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileLinkCleaner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileLinkCleaner.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.master.cleaner; import java.io.IOException; +import java.util.concurrent.locks.ReentrantReadWriteLock; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; @@ -44,63 +45,75 @@ public class HFileLinkCleaner extends BaseHFileCleanerDelegate { private static final Logger LOG = LoggerFactory.getLogger(HFileLinkCleaner.class); private FileSystem fs = null; + private ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); @Override - public synchronized boolean isFileDeletable(FileStatus fStat) { - if (this.fs == null) return false; - Path filePath = fStat.getPath(); - // HFile Link is always deletable - if (HFileLink.isHFileLink(filePath)) return true; + public boolean isFileDeletable(FileStatus fStat) { + lock.readLock().lock(); + try { + if (this.fs == null) { + return false; + } + Path filePath = fStat.getPath(); + // HFile Link is always deletable + if (HFileLink.isHFileLink(filePath)) { + return true; + } - // If the file is inside a link references directory, means that it is a back ref link. - // The back ref can be deleted only if the referenced file doesn't exists. - Path parentDir = filePath.getParent(); - if (HFileLink.isBackReferencesDir(parentDir)) { - Path hfilePath = null; - try { - // Also check if the HFile is in the HBASE_TEMP_DIRECTORY; this is where the referenced - // file gets created when cloning a snapshot. - hfilePath = HFileLink.getHFileFromBackReference( - new Path(CommonFSUtils.getRootDir(getConf()), HConstants.HBASE_TEMP_DIRECTORY), filePath); - if (fs.exists(hfilePath)) { - return false; - } - // check whether the HFileLink still exists in mob dir. - hfilePath = HFileLink.getHFileFromBackReference(MobUtils.getMobHome(getConf()), filePath); - if (fs.exists(hfilePath)) { + // If the file is inside a link references directory, means that it is a back ref link. + // The back ref can be deleted only if the referenced file doesn't exists. + Path parentDir = filePath.getParent(); + if (HFileLink.isBackReferencesDir(parentDir)) { + Path hfilePath = null; + try { + // Also check if the HFile is in the HBASE_TEMP_DIRECTORY; this is where the referenced + // file gets created when cloning a snapshot. + hfilePath = HFileLink.getHFileFromBackReference(new Path( + CommonFSUtils.getRootDir(getConf()), HConstants.HBASE_TEMP_DIRECTORY), filePath); + if (fs.exists(hfilePath)) { + return false; + } + // check whether the HFileLink still exists in mob dir. + hfilePath = HFileLink.getHFileFromBackReference(MobUtils.getMobHome(getConf()), filePath); + if (fs.exists(hfilePath)) { + return false; + } + hfilePath = HFileLink.getHFileFromBackReference(CommonFSUtils.getRootDir(getConf()), + filePath); + return !fs.exists(hfilePath); + } catch (IOException e) { + if (LOG.isDebugEnabled()) { + LOG.debug("Couldn't verify if the referenced file still exists, keep it just in case: " + + hfilePath); + } return false; } - hfilePath = - HFileLink.getHFileFromBackReference(CommonFSUtils.getRootDir(getConf()), filePath); - return !fs.exists(hfilePath); + } + + // HFile is deletable only if has no links + Path backRefDir = null; + try { + backRefDir = HFileLink.getBackReferencesDir(parentDir, filePath.getName()); + return CommonFSUtils.listStatus(fs, backRefDir) == null; } catch (IOException e) { if (LOG.isDebugEnabled()) { - LOG.debug("Couldn't verify if the referenced file still exists, keep it just in case: " + - hfilePath); + LOG.debug( + "Couldn't get the references, not deleting file, just in case. filePath=" + + filePath + ", backRefDir=" + backRefDir); } return false; } - } - - // HFile is deletable only if has no links - Path backRefDir = null; - try { - backRefDir = HFileLink.getBackReferencesDir(parentDir, filePath.getName()); - return CommonFSUtils.listStatus(fs, backRefDir) == null; - } catch (IOException e) { - if (LOG.isDebugEnabled()) { - LOG.debug("Couldn't get the references, not deleting file, just in case. filePath=" - + filePath + ", backRefDir=" + backRefDir); - } - return false; + } finally { + lock.readLock().unlock(); } } @Override - public synchronized void setConf(Configuration conf) { + public void setConf(Configuration conf) { super.setConf(conf); // setup filesystem + lock.writeLock().lock(); try { this.fs = FileSystem.get(this.getConf()); } catch (IOException e) { @@ -109,6 +122,8 @@ public synchronized void setConf(Configuration conf) { + FileSystem.FS_DEFAULT_NAME_KEY + "=" + getConf().get(FileSystem.FS_DEFAULT_NAME_KEY, FileSystem.DEFAULT_FS)); } + } finally { + lock.writeLock().unlock(); } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/LogCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/LogCleaner.java index 5fa115c9b8b8..d8993b38ffef 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/LogCleaner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/LogCleaner.java @@ -22,12 +22,12 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; +import java.util.Map; import java.util.concurrent.CountDownLatch; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.stream.Collectors; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; @@ -41,7 +41,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; /** @@ -59,7 +58,6 @@ public class LogCleaner extends CleanerChore public static final String OLD_WALS_CLEANER_THREAD_TIMEOUT_MSEC = "hbase.oldwals.cleaner.thread.timeout.msec"; - @VisibleForTesting static final long DEFAULT_OLD_WALS_CLEANER_THREAD_TIMEOUT_MSEC = 60 * 1000L; private final LinkedBlockingQueue pendingDelete; @@ -75,9 +73,9 @@ public class LogCleaner extends CleanerChore * @param pool the thread pool used to scan directories */ public LogCleaner(final int period, final Stoppable stopper, Configuration conf, FileSystem fs, - Path oldLogDir, DirScanPool pool) { + Path oldLogDir, DirScanPool pool, Map params) { super("LogsCleaner", period, stopper, conf, fs, oldLogDir, HBASE_MASTER_LOGCLEANER_PLUGINS, - pool); + pool, params); this.pendingDelete = new LinkedBlockingQueue<>(); int size = conf.getInt(OLD_WALS_CLEANER_THREAD_SIZE, DEFAULT_OLD_WALS_CLEANER_THREAD_SIZE); this.oldWALsCleaner = createOldWalsCleaner(size); @@ -138,12 +136,10 @@ public synchronized void cleanup() { interruptOldWALsCleaner(); } - @VisibleForTesting int getSizeOfCleaners() { return oldWALsCleaner.size(); } - @VisibleForTesting long getCleanerThreadTimeoutMsec() { return cleanerThreadTimeoutMsec; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/CatalogJanitor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/CatalogJanitor.java index 6123f6b6c3b6..45b6a746a54d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/CatalogJanitor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/CatalogJanitor.java @@ -60,8 +60,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; - /** * A janitor for the catalog tables. Scans the hbase:meta catalog table on a period. * Makes a lastReport on state of hbase:meta. Looks for unused regions to garbage collect. Scan of @@ -218,7 +216,6 @@ public int scan() throws IOException { * @return Return generated {@link Report} */ // will be override in tests. - @VisibleForTesting protected Report scanForReport() throws IOException { ReportMakingVisitor visitor = new ReportMakingVisitor(this.services); // Null tablename means scan all of meta. @@ -304,7 +301,6 @@ public int compare(RegionInfo left, RegionInfo right) { } } - @VisibleForTesting static boolean cleanParent(MasterServices services, RegionInfo parent, Result rowContent) throws IOException { // Check whether it is a merged region and if it is clean of references. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/MetaFixer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/MetaFixer.java index 6f5162775da1..4a5aa0a1b8e1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/MetaFixer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/MetaFixer.java @@ -29,6 +29,7 @@ import java.util.SortedSet; import java.util.TreeSet; import java.util.stream.Collectors; +import org.apache.hadoop.hbase.HBaseIOException; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.TableName; @@ -39,6 +40,7 @@ import org.apache.hadoop.hbase.exceptions.MergeRegionException; import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.master.assignment.TransitRegionStateProcedure; +import org.apache.hadoop.hbase.replication.ReplicationException; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil; @@ -46,11 +48,9 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.com.google.common.collect.ArrayListMultimap; import org.apache.hbase.thirdparty.com.google.common.collect.ListMultimap; - /** * Server-side fixing of bad or inconsistent state in hbase:meta. * Distinct from MetaTableAccessor because {@link MetaTableAccessor} is about low-level @@ -190,8 +190,8 @@ private static List createMetaEntries(final MasterServices masterSer // Add replicas if needed // we need to create regions with replicaIds starting from 1 - List newRegions = RegionReplicaUtil.addReplicas( - Collections.singletonList(regionInfo), 1, td.getRegionReplication()); + List newRegions = RegionReplicaUtil + .addReplicas(Collections.singletonList(regionInfo), 1, td.getRegionReplication()); // Add regions to META MetaTableAccessor.addRegionsToMeta(masterServices.getConnection(), newRegions, @@ -199,12 +199,13 @@ private static List createMetaEntries(final MasterServices masterSer // Setup replication for region replicas if needed if (td.getRegionReplication() > 1) { - ServerRegionReplicaUtil.setupRegionReplicaReplication( - masterServices.getConfiguration()); + ServerRegionReplicaUtil.setupRegionReplicaReplication(masterServices); } - return Either., IOException>ofLeft(newRegions); + return Either., IOException> ofLeft(newRegions); } catch (IOException e) { - return Either., IOException>ofRight(e); + return Either., IOException> ofRight(e); + } catch (ReplicationException e) { + return Either., IOException> ofRight(new HBaseIOException(e)); } }) .collect(Collectors.toList()); @@ -258,7 +259,6 @@ List fixOverlaps(Report report) throws IOException { * @param maxMergeCount Maximum regions to merge at a time (avoid merging * 100k regions in one go!) */ - @VisibleForTesting static List> calculateMerges(int maxMergeCount, List> overlaps) { if (overlaps.isEmpty()) { @@ -330,7 +330,6 @@ private static void calculateTableMerges(int maxMergeCount, Lista or b, whichever has the * endkey that is furthest along in the Table. */ - @VisibleForTesting static RegionInfo getRegionInfoWithLargestEndKey(RegionInfo a, RegionInfo b) { if (a == null) { // b may be null. @@ -358,7 +357,6 @@ static RegionInfo getRegionInfoWithLargestEndKey(RegionInfo a, RegionInfo b) { * @return True if an overlap found between passed in ri and * the pair. Does NOT check the pairs themselves overlap. */ - @VisibleForTesting static boolean isOverlap(RegionInfo ri, Pair pair) { if (ri == null || pair == null) { // Can't be an overlap in either of these cases. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/locking/LockManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/locking/LockManager.java index 26b838dcefb8..aaf51526f4f7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/locking/LockManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/locking/LockManager.java @@ -30,7 +30,6 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; /** * Functions to acquire lock on table/namespace/regions. @@ -192,7 +191,6 @@ public String toString() { return "MasterLock: proc = " + proc.toString(); } - @VisibleForTesting LockProcedure getProc() { return proc; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/MergeNormalizationPlan.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/MergeNormalizationPlan.java index 17e313047d72..f5a72863fe8a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/MergeNormalizationPlan.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/MergeNormalizationPlan.java @@ -18,41 +18,35 @@ */ package org.apache.hadoop.hbase.master.normalizer; -import java.io.IOException; +import java.util.Collections; +import java.util.LinkedList; +import java.util.List; import org.apache.commons.lang3.builder.EqualsBuilder; import org.apache.commons.lang3.builder.HashCodeBuilder; import org.apache.commons.lang3.builder.ToStringBuilder; import org.apache.commons.lang3.builder.ToStringStyle; -import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.master.MasterServices; import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; /** - * Normalization plan to merge regions (smallest region in the table with its smallest neighbor). + * Normalization plan to merge adjacent regions. As with any call to + * {@link MasterServices#mergeRegions(RegionInfo[], boolean, long, long)} + * with {@code forcible=false}, Region order and adjacency are important. It's the caller's + * responsibility to ensure the provided parameters are ordered according to the + * {code mergeRegions} method requirements. */ @InterfaceAudience.Private -public class MergeNormalizationPlan implements NormalizationPlan { +final class MergeNormalizationPlan implements NormalizationPlan { - private final RegionInfo firstRegion; - private final RegionInfo secondRegion; + private final List normalizationTargets; - public MergeNormalizationPlan(RegionInfo firstRegion, RegionInfo secondRegion) { - this.firstRegion = firstRegion; - this.secondRegion = secondRegion; - } - - /** - * {@inheritDoc} - */ - @Override - public long submit(MasterServices masterServices) throws IOException { - // Do not use force=true as corner cases can happen, non adjacent regions, - // merge with a merged child region with no GC done yet, it is going to - // cause all different issues. - return masterServices - .mergeRegions(new RegionInfo[] { firstRegion, secondRegion }, false, HConstants.NO_NONCE, - HConstants.NO_NONCE); + private MergeNormalizationPlan(List normalizationTargets) { + Preconditions.checkNotNull(normalizationTargets); + Preconditions.checkState(normalizationTargets.size() >= 2, + "normalizationTargets.size() must be >= 2 but was %s", normalizationTargets.size()); + this.normalizationTargets = Collections.unmodifiableList(normalizationTargets); } @Override @@ -60,19 +54,14 @@ public PlanType getType() { return PlanType.MERGE; } - RegionInfo getFirstRegion() { - return firstRegion; - } - - RegionInfo getSecondRegion() { - return secondRegion; + public List getNormalizationTargets() { + return normalizationTargets; } @Override public String toString() { return new ToStringBuilder(this, ToStringStyle.SHORT_PREFIX_STYLE) - .append("firstRegion", firstRegion) - .append("secondRegion", secondRegion) + .append("normalizationTargets", normalizationTargets) .toString(); } @@ -89,16 +78,37 @@ public boolean equals(Object o) { MergeNormalizationPlan that = (MergeNormalizationPlan) o; return new EqualsBuilder() - .append(firstRegion, that.firstRegion) - .append(secondRegion, that.secondRegion) + .append(normalizationTargets, that.normalizationTargets) .isEquals(); } @Override public int hashCode() { return new HashCodeBuilder(17, 37) - .append(firstRegion) - .append(secondRegion) + .append(normalizationTargets) .toHashCode(); } + + /** + * A helper for constructing instances of {@link MergeNormalizationPlan}. + */ + static class Builder { + + private final List normalizationTargets = new LinkedList<>(); + + public Builder setTargets(final List targets) { + normalizationTargets.clear(); + normalizationTargets.addAll(targets); + return this; + } + + public Builder addTarget(final RegionInfo regionInfo, final long regionSizeMb) { + normalizationTargets.add(new NormalizationTarget(regionInfo, regionSizeMb)); + return this; + } + + public MergeNormalizationPlan build() { + return new MergeNormalizationPlan(normalizationTargets); + } + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/NormalizationPlan.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/NormalizationPlan.java index cd13f69e764e..3bfae14e0b7d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/NormalizationPlan.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/NormalizationPlan.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,12 +17,12 @@ */ package org.apache.hadoop.hbase.master.normalizer; -import org.apache.hadoop.hbase.master.MasterServices; import org.apache.yetus.audience.InterfaceAudience; -import java.io.IOException; /** - * Interface for normalization plan. + * A {@link NormalizationPlan} describes some modification to region split points as identified + * by an instance of {@link RegionNormalizer}. It is a POJO describing what action needs taken + * and the regions it targets. */ @InterfaceAudience.Private public interface NormalizationPlan { @@ -33,15 +32,6 @@ enum PlanType { NONE } - /** - * Submits normalization plan on cluster (does actual splitting/merging work) and - * returns proc Id to caller. - * @param masterServices instance of {@link MasterServices} - * @return Proc Id for the submitted task - * @throws IOException If plan submission to Admin fails - */ - long submit(MasterServices masterServices) throws IOException; - /** * @return the type of this plan */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/NormalizationTarget.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/NormalizationTarget.java new file mode 100644 index 000000000000..95490288cef9 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/NormalizationTarget.java @@ -0,0 +1,81 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master.normalizer; + +import java.util.Objects; +import org.apache.commons.lang3.builder.EqualsBuilder; +import org.apache.commons.lang3.builder.HashCodeBuilder; +import org.apache.commons.lang3.builder.ToStringBuilder; +import org.apache.commons.lang3.builder.ToStringStyle; +import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.yetus.audience.InterfaceAudience; + +/** + * A POJO that caries details about a region selected for normalization through the pipeline. + */ +@InterfaceAudience.Private +class NormalizationTarget { + private final RegionInfo regionInfo; + private final long regionSizeMb; + + NormalizationTarget(final RegionInfo regionInfo, final long regionSizeMb) { + this.regionInfo = Objects.requireNonNull(regionInfo); + this.regionSizeMb = regionSizeMb; + } + + public RegionInfo getRegionInfo() { + return regionInfo; + } + + public long getRegionSizeMb() { + return regionSizeMb; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + + if (o == null || getClass() != o.getClass()) { + return false; + } + + NormalizationTarget that = (NormalizationTarget) o; + + return new EqualsBuilder() + .append(regionSizeMb, that.regionSizeMb) + .append(regionInfo, that.regionInfo) + .isEquals(); + } + + @Override + public int hashCode() { + return new HashCodeBuilder(17, 37) + .append(regionInfo) + .append(regionSizeMb) + .toHashCode(); + } + + @Override public String toString() { + return new ToStringBuilder(this, ToStringStyle.SHORT_PREFIX_STYLE) + .append("regionInfo", regionInfo) + .append("regionSizeMb", regionSizeMb) + .toString(); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/RegionNormalizer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/RegionNormalizer.java index 672171d1caff..6f939daeda92 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/RegionNormalizer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/RegionNormalizer.java @@ -20,13 +20,9 @@ import java.util.List; import org.apache.hadoop.conf.Configurable; -import org.apache.hadoop.hbase.HBaseIOException; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.master.MasterServices; -import org.apache.hadoop.hbase.master.normalizer.NormalizationPlan.PlanType; import org.apache.yetus.audience.InterfaceAudience; -import org.apache.yetus.audience.InterfaceStability; /** * Performs "normalization" of regions of a table, making sure that suboptimal @@ -39,8 +35,7 @@ * "split/merge storms". */ @InterfaceAudience.Private -@InterfaceStability.Evolving -public interface RegionNormalizer extends Configurable { +interface RegionNormalizer extends Configurable { /** * Set the master service. Must be called before first call to * {@link #computePlansForTable(TableName)}. @@ -55,20 +50,5 @@ public interface RegionNormalizer extends Configurable { * @return A list of the normalization actions to perform, or an empty list * if there's nothing to do. */ - List computePlansForTable(TableName table) - throws HBaseIOException; - - /** - * Notification for the case where plan couldn't be executed due to constraint violation, such as - * namespace quota - * @param hri the region which is involved in the plan - * @param type type of plan - */ - void planSkipped(RegionInfo hri, PlanType type); - - /** - * @param type type of plan for which skipped count is to be returned - * @return the count of plans of specified type which were skipped - */ - long getSkippedCount(NormalizationPlan.PlanType type); + List computePlansForTable(TableName table); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/RegionNormalizerChore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/RegionNormalizerChore.java index 19d2dc7a3ba9..d56acc2a935e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/RegionNormalizerChore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/RegionNormalizerChore.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,34 +17,35 @@ */ package org.apache.hadoop.hbase.master.normalizer; +import java.io.IOException; import org.apache.hadoop.hbase.ScheduledChore; +import org.apache.hadoop.hbase.client.NormalizeTableFilterParams; +import org.apache.hadoop.hbase.master.HMaster; +import org.apache.hadoop.hbase.master.MasterServices; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.master.HMaster; - -import java.io.IOException; /** - * Chore that will call {@link org.apache.hadoop.hbase.master.HMaster#normalizeRegions()} - * when needed. + * Chore that will periodically call + * {@link HMaster#normalizeRegions(NormalizeTableFilterParams, boolean)}. */ @InterfaceAudience.Private -public class RegionNormalizerChore extends ScheduledChore { +class RegionNormalizerChore extends ScheduledChore { private static final Logger LOG = LoggerFactory.getLogger(RegionNormalizerChore.class); - private final HMaster master; + private final MasterServices master; - public RegionNormalizerChore(HMaster master) { + public RegionNormalizerChore(MasterServices master) { super(master.getServerName() + "-RegionNormalizerChore", master, - master.getConfiguration().getInt("hbase.normalizer.period", 300000)); + master.getConfiguration().getInt("hbase.normalizer.period", 300_000)); this.master = master; } @Override protected void chore() { try { - master.normalizeRegions(); + master.normalizeRegions(new NormalizeTableFilterParams.Builder().build(), false); } catch (IOException e) { LOG.error("Failed to normalize regions.", e); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/RegionNormalizerFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/RegionNormalizerFactory.java index 06774c97a81e..92d16648fcd7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/RegionNormalizerFactory.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/RegionNormalizerFactory.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,8 +19,12 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; -import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.master.HMaster; +import org.apache.hadoop.hbase.zookeeper.RegionNormalizerTracker; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.hadoop.util.ReflectionUtils; +import org.apache.yetus.audience.InterfaceAudience; /** * Factory to create instance of {@link RegionNormalizer} as configured. @@ -32,13 +35,30 @@ public final class RegionNormalizerFactory { private RegionNormalizerFactory() { } + public static RegionNormalizerManager createNormalizerManager( + final Configuration conf, + final ZKWatcher zkWatcher, + final HMaster master // TODO: consolidate this down to MasterServices + ) { + final RegionNormalizer regionNormalizer = getRegionNormalizer(conf); + regionNormalizer.setMasterServices(master); + final RegionNormalizerTracker tracker = new RegionNormalizerTracker(zkWatcher, master); + final RegionNormalizerChore chore = + master.isInMaintenanceMode() ? null : new RegionNormalizerChore(master); + final RegionNormalizerWorkQueue workQueue = + master.isInMaintenanceMode() ? null : new RegionNormalizerWorkQueue<>(); + final RegionNormalizerWorker worker = master.isInMaintenanceMode() + ? null + : new RegionNormalizerWorker(conf, master, regionNormalizer, workQueue); + return new RegionNormalizerManager(tracker, chore, workQueue, worker); + } + /** * Create a region normalizer from the given conf. * @param conf configuration * @return {@link RegionNormalizer} implementation */ - public static RegionNormalizer getRegionNormalizer(Configuration conf) { - + private static RegionNormalizer getRegionNormalizer(Configuration conf) { // Create instance of Region Normalizer Class balancerKlass = conf.getClass(HConstants.HBASE_MASTER_NORMALIZER_CLASS, SimpleRegionNormalizer.class, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/RegionNormalizerManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/RegionNormalizerManager.java new file mode 100644 index 000000000000..b4d16e796731 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/RegionNormalizerManager.java @@ -0,0 +1,196 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master.normalizer; + +import edu.umd.cs.findbugs.annotations.NonNull; +import edu.umd.cs.findbugs.annotations.Nullable; +import java.util.List; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.ScheduledChore; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.conf.ConfigurationManager; +import org.apache.hadoop.hbase.conf.PropagatingConfigurationObserver; +import org.apache.hadoop.hbase.zookeeper.RegionNormalizerTracker; +import org.apache.yetus.audience.InterfaceAudience; +import org.apache.zookeeper.KeeperException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; + +/** + * This class encapsulates the details of the {@link RegionNormalizer} subsystem. + */ +@InterfaceAudience.Private +public class RegionNormalizerManager implements PropagatingConfigurationObserver { + private static final Logger LOG = LoggerFactory.getLogger(RegionNormalizerManager.class); + + private final RegionNormalizerTracker regionNormalizerTracker; + private final RegionNormalizerChore regionNormalizerChore; + private final RegionNormalizerWorkQueue workQueue; + private final RegionNormalizerWorker worker; + private final ExecutorService pool; + + private final Object startStopLock = new Object(); + private boolean started = false; + private boolean stopped = false; + + RegionNormalizerManager( + @NonNull final RegionNormalizerTracker regionNormalizerTracker, + @Nullable final RegionNormalizerChore regionNormalizerChore, + @Nullable final RegionNormalizerWorkQueue workQueue, + @Nullable final RegionNormalizerWorker worker + ) { + this.regionNormalizerTracker = regionNormalizerTracker; + this.regionNormalizerChore = regionNormalizerChore; + this.workQueue = workQueue; + this.worker = worker; + this.pool = Executors.newSingleThreadExecutor(new ThreadFactoryBuilder() + .setDaemon(true) + .setNameFormat("normalizer-worker-%d") + .setUncaughtExceptionHandler( + (thread, throwable) -> + LOG.error("Uncaught exception, worker thread likely terminated.", throwable)) + .build()); + } + + @Override + public void registerChildren(ConfigurationManager manager) { + if (worker != null) { + manager.registerObserver(worker); + } + } + + @Override + public void deregisterChildren(ConfigurationManager manager) { + if (worker != null) { + manager.deregisterObserver(worker); + } + } + + @Override + public void onConfigurationChange(Configuration conf) { + // no configuration managed here directly. + } + + public void start() { + synchronized (startStopLock) { + if (started) { + return; + } + regionNormalizerTracker.start(); + if (worker != null) { + // worker will be null when master is in maintenance mode. + pool.submit(worker); + } + started = true; + } + } + + public void stop() { + synchronized (startStopLock) { + if (!started) { + throw new IllegalStateException("calling `stop` without first calling `start`."); + } + if (stopped) { + return; + } + pool.shutdownNow(); // shutdownNow to interrupt the worker thread sitting on `take()` + regionNormalizerTracker.stop(); + stopped = true; + } + } + + public ScheduledChore getRegionNormalizerChore() { + return regionNormalizerChore; + } + + /** + * Return {@code true} if region normalizer is on, {@code false} otherwise + */ + public boolean isNormalizerOn() { + return regionNormalizerTracker.isNormalizerOn(); + } + + /** + * Set region normalizer on/off + * @param normalizerOn whether normalizer should be on or off + */ + public void setNormalizerOn(boolean normalizerOn) { + try { + regionNormalizerTracker.setNormalizerOn(normalizerOn); + } catch (KeeperException e) { + LOG.warn("Error flipping normalizer switch", e); + } + } + + /** + * Call-back for the case where plan couldn't be executed due to constraint violation, + * such as namespace quota. + * @param type type of plan that was skipped. + */ + public void planSkipped(NormalizationPlan.PlanType type) { + // TODO: this appears to be used only for testing. + if (worker != null) { + worker.planSkipped(type); + } + } + + /** + * Retrieve a count of the number of times plans of type {@code type} were submitted but skipped. + * @param type type of plan for which skipped count is to be returned + */ + public long getSkippedCount(NormalizationPlan.PlanType type) { + // TODO: this appears to be used only for testing. + return worker == null ? 0 : worker.getSkippedCount(type); + } + + /** + * Return the number of times a {@link SplitNormalizationPlan} has been submitted. + */ + public long getSplitPlanCount() { + return worker == null ? 0 : worker.getSplitPlanCount(); + } + + /** + * Return the number of times a {@link MergeNormalizationPlan} has been submitted. + */ + public long getMergePlanCount() { + return worker == null ? 0 : worker.getMergePlanCount(); + } + + /** + * Submit tables for normalization. + * @param tables a list of tables to submit. + * @param isHighPriority {@code true} when these requested tables should skip to the front of + * the queue. + * @return {@code true} when work was queued, {@code false} otherwise. + */ + public boolean normalizeRegions(List tables, boolean isHighPriority) { + if (workQueue == null) { + return false; + } + if (isHighPriority) { + workQueue.putAllFirst(tables); + } else { + workQueue.putAll(tables); + } + return true; + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/RegionNormalizerWorkQueue.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/RegionNormalizerWorkQueue.java new file mode 100644 index 000000000000..5ebb4f9ad08d --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/RegionNormalizerWorkQueue.java @@ -0,0 +1,244 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master.normalizer; + +import java.util.Collection; +import java.util.Collections; +import java.util.Iterator; +import java.util.LinkedHashSet; +import java.util.Queue; +import java.util.Set; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.locks.Condition; +import java.util.concurrent.locks.ReentrantLock; +import org.apache.yetus.audience.InterfaceAudience; + +/** + * A specialized collection that holds pending work for the {@link RegionNormalizerWorker}. It is + * an ordered collection class that has the following properties: + *

      + *
    • Guarantees uniqueness of elements, as a {@link Set}.
    • + *
    • Consumers retrieve objects from the head, as a {@link Queue}, via {@link #take()}.
    • + *
    • Work is retrieved on a FIFO policy.
    • + *
    • Work retrieval blocks the calling thread until new work is available, as a + * {@link BlockingQueue}.
    • + *
    • Allows a producer to insert an item at the head of the queue, if desired.
    • + *
    + * Assumes low-frequency and low-parallelism concurrent access, so protects state using a + * simplistic synchronization strategy. + */ +@InterfaceAudience.Private +class RegionNormalizerWorkQueue { + + /** Underlying storage structure that gives us the Set behavior and FIFO retrieval policy. */ + private LinkedHashSet delegate; + + // the locking structure used here follows the example found in LinkedBlockingQueue. The + // difference is that our locks guard access to `delegate` rather than the head node. + + /** Lock held by take, poll, etc */ + private final ReentrantLock takeLock; + + /** Wait queue for waiting takes */ + private final Condition notEmpty; + + /** Lock held by put, offer, etc */ + private final ReentrantLock putLock; + + RegionNormalizerWorkQueue() { + delegate = new LinkedHashSet<>(); + takeLock = new ReentrantLock(); + notEmpty = takeLock.newCondition(); + putLock = new ReentrantLock(); + } + + /** + * Signals a waiting take. Called only from put/offer (which do not + * otherwise ordinarily lock takeLock.) + */ + private void signalNotEmpty() { + final ReentrantLock takeLock = this.takeLock; + takeLock.lock(); + try { + notEmpty.signal(); + } finally { + takeLock.unlock(); + } + } + + /** + * Locks to prevent both puts and takes. + */ + private void fullyLock() { + putLock.lock(); + takeLock.lock(); + } + + /** + * Unlocks to allow both puts and takes. + */ + private void fullyUnlock() { + takeLock.unlock(); + putLock.unlock(); + } + + /** + * Inserts the specified element at the tail of the queue, if it's not already present. + * + * @param e the element to add + */ + public void put(E e) { + if (e == null) { + throw new NullPointerException(); + } + + putLock.lock(); + try { + delegate.add(e); + } finally { + putLock.unlock(); + } + + if (!delegate.isEmpty()) { + signalNotEmpty(); + } + } + + /** + * Inserts the specified element at the head of the queue. + * + * @param e the element to add + */ + public void putFirst(E e) { + if (e == null) { + throw new NullPointerException(); + } + putAllFirst(Collections.singleton(e)); + } + + /** + * Inserts the specified elements at the tail of the queue. Any elements already present in + * the queue are ignored. + * + * @param c the elements to add + */ + public void putAll(Collection c) { + if (c == null) { + throw new NullPointerException(); + } + + putLock.lock(); + try { + delegate.addAll(c); + } finally { + putLock.unlock(); + } + + if (!delegate.isEmpty()) { + signalNotEmpty(); + } + } + + /** + * Inserts the specified elements at the head of the queue. + * + * @param c the elements to add + */ + public void putAllFirst(Collection c) { + if (c == null) { + throw new NullPointerException(); + } + + fullyLock(); + try { + final LinkedHashSet copy = new LinkedHashSet<>(c.size() + delegate.size()); + copy.addAll(c); + copy.addAll(delegate); + delegate = copy; + } finally { + fullyUnlock(); + } + + if (!delegate.isEmpty()) { + signalNotEmpty(); + } + } + + /** + * Retrieves and removes the head of this queue, waiting if necessary + * until an element becomes available. + * + * @return the head of this queue + * @throws InterruptedException if interrupted while waiting + */ + public E take() throws InterruptedException { + E x; + takeLock.lockInterruptibly(); + try { + while (delegate.isEmpty()) { + notEmpty.await(); + } + final Iterator iter = delegate.iterator(); + x = iter.next(); + iter.remove(); + if (!delegate.isEmpty()) { + notEmpty.signal(); + } + } finally { + takeLock.unlock(); + } + return x; + } + + /** + * Atomically removes all of the elements from this queue. + * The queue will be empty after this call returns. + */ + public void clear() { + putLock.lock(); + try { + delegate.clear(); + } finally { + putLock.unlock(); + } + } + + /** + * Returns the number of elements in this queue. + * + * @return the number of elements in this queue + */ + public int size() { + takeLock.lock(); + try { + return delegate.size(); + } finally { + takeLock.unlock(); + } + } + + @Override + public String toString() { + takeLock.lock(); + try { + return delegate.toString(); + } finally { + takeLock.unlock(); + } + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/RegionNormalizerWorker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/RegionNormalizerWorker.java new file mode 100644 index 000000000000..408317a31f87 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/RegionNormalizerWorker.java @@ -0,0 +1,290 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master.normalizer; + +import java.io.IOException; +import java.time.Duration; +import java.util.Collections; +import java.util.List; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.conf.ConfigurationManager; +import org.apache.hadoop.hbase.conf.ConfigurationObserver; +import org.apache.hadoop.hbase.conf.PropagatingConfigurationObserver; +import org.apache.hadoop.hbase.master.MasterServices; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.RateLimiter; +import org.apache.hbase.thirdparty.org.apache.commons.collections4.CollectionUtils; + +/** + * Consumes normalization request targets ({@link TableName}s) off the + * {@link RegionNormalizerWorkQueue}, dispatches them to the {@link RegionNormalizer}, + * and executes the resulting {@link NormalizationPlan}s. + */ +@InterfaceAudience.Private +class RegionNormalizerWorker implements PropagatingConfigurationObserver, Runnable { + private static final Logger LOG = LoggerFactory.getLogger(RegionNormalizerWorker.class); + + static final String RATE_LIMIT_BYTES_PER_SEC_KEY = + "hbase.normalizer.throughput.max_bytes_per_sec"; + private static final long RATE_UNLIMITED_BYTES = 1_000_000_000_000L; // 1TB/sec + + private final MasterServices masterServices; + private final RegionNormalizer regionNormalizer; + private final RegionNormalizerWorkQueue workQueue; + private final RateLimiter rateLimiter; + + private final long[] skippedCount; + private long splitPlanCount; + private long mergePlanCount; + + RegionNormalizerWorker( + final Configuration configuration, + final MasterServices masterServices, + final RegionNormalizer regionNormalizer, + final RegionNormalizerWorkQueue workQueue + ) { + this.masterServices = masterServices; + this.regionNormalizer = regionNormalizer; + this.workQueue = workQueue; + this.skippedCount = new long[NormalizationPlan.PlanType.values().length]; + this.splitPlanCount = 0; + this.mergePlanCount = 0; + this.rateLimiter = loadRateLimiter(configuration); + } + + @Override + public void registerChildren(ConfigurationManager manager) { + if (regionNormalizer instanceof ConfigurationObserver) { + final ConfigurationObserver observer = (ConfigurationObserver) regionNormalizer; + manager.registerObserver(observer); + } + } + + @Override + public void deregisterChildren(ConfigurationManager manager) { + if (regionNormalizer instanceof ConfigurationObserver) { + final ConfigurationObserver observer = (ConfigurationObserver) regionNormalizer; + manager.deregisterObserver(observer); + } + } + + @Override + public void onConfigurationChange(Configuration conf) { + rateLimiter.setRate(loadRateLimit(conf)); + } + + private static RateLimiter loadRateLimiter(final Configuration configuration) { + return RateLimiter.create(loadRateLimit(configuration)); + } + + private static long loadRateLimit(final Configuration configuration) { + long rateLimitBytes = + configuration.getLongBytes(RATE_LIMIT_BYTES_PER_SEC_KEY, RATE_UNLIMITED_BYTES); + long rateLimitMbs = rateLimitBytes / 1_000_000L; + if (rateLimitMbs <= 0) { + LOG.warn("Configured value {}={} is <= 1MB. Falling back to default.", + RATE_LIMIT_BYTES_PER_SEC_KEY, rateLimitBytes); + rateLimitBytes = RATE_UNLIMITED_BYTES; + rateLimitMbs = RATE_UNLIMITED_BYTES / 1_000_000L; + } + LOG.info("Normalizer rate limit set to {}", + rateLimitBytes == RATE_UNLIMITED_BYTES ? "unlimited" : rateLimitMbs + " MB/sec"); + return rateLimitMbs; + } + + /** + * @see RegionNormalizerManager#planSkipped(NormalizationPlan.PlanType) + */ + void planSkipped(NormalizationPlan.PlanType type) { + synchronized (skippedCount) { + // updates come here via procedure threads, so synchronize access to this counter. + skippedCount[type.ordinal()]++; + } + } + + /** + * @see RegionNormalizerManager#getSkippedCount(NormalizationPlan.PlanType) + */ + long getSkippedCount(NormalizationPlan.PlanType type) { + return skippedCount[type.ordinal()]; + } + + /** + * @see RegionNormalizerManager#getSplitPlanCount() + */ + long getSplitPlanCount() { + return splitPlanCount; + } + + /** + * @see RegionNormalizerManager#getMergePlanCount() + */ + long getMergePlanCount() { + return mergePlanCount; + } + + /** + * Used in test only. This field is exposed to the test, as opposed to tracking the current + * configuration value beside the RateLimiter instance and managing synchronization to keep the + * two in sync. + */ + RateLimiter getRateLimiter() { + return rateLimiter; + } + + @Override + public void run() { + while (true) { + if (Thread.interrupted()) { + LOG.debug("interrupt detected. terminating."); + break; + } + final TableName tableName; + try { + tableName = workQueue.take(); + } catch (InterruptedException e) { + LOG.debug("interrupt detected. terminating."); + break; + } + + final List plans = calculatePlans(tableName); + submitPlans(plans); + } + } + + private List calculatePlans(final TableName tableName) { + if (masterServices.skipRegionManagementAction("region normalizer")) { + return Collections.emptyList(); + } + + try { + final TableDescriptor tblDesc = masterServices.getTableDescriptors().get(tableName); + if (tblDesc != null && !tblDesc.isNormalizationEnabled()) { + LOG.debug("Skipping table {} because normalization is disabled in its table properties.", + tableName); + return Collections.emptyList(); + } + } catch (IOException e) { + LOG.debug("Skipping table {} because unable to access its table descriptor.", tableName, e); + return Collections.emptyList(); + } + + final List plans = regionNormalizer.computePlansForTable(tableName); + if (CollectionUtils.isEmpty(plans)) { + LOG.debug("No normalization required for table {}.", tableName); + return Collections.emptyList(); + } + return plans; + } + + private void submitPlans(final List plans) { + // as of this writing, `plan.submit()` is non-blocking and uses Async Admin APIs to submit + // task, so there's no artificial rate-limiting of merge/split requests due to this serial loop. + for (NormalizationPlan plan : plans) { + switch (plan.getType()) { + case MERGE: { + submitMergePlan((MergeNormalizationPlan) plan); + break; + } + case SPLIT: { + submitSplitPlan((SplitNormalizationPlan) plan); + break; + } + case NONE: + LOG.debug("Nothing to do for {} with PlanType=NONE. Ignoring.", plan); + planSkipped(plan.getType()); + break; + default: + LOG.warn("Plan {} is of an unrecognized PlanType. Ignoring.", plan); + planSkipped(plan.getType()); + break; + } + } + } + + /** + * Interacts with {@link MasterServices} in order to execute a plan. + */ + private void submitMergePlan(final MergeNormalizationPlan plan) { + final int totalSizeMb; + try { + final long totalSizeMbLong = plan.getNormalizationTargets() + .stream() + .mapToLong(NormalizationTarget::getRegionSizeMb) + .reduce(0, Math::addExact); + totalSizeMb = Math.toIntExact(totalSizeMbLong); + } catch (ArithmeticException e) { + LOG.debug("Sum of merge request size overflows rate limiter data type. {}", plan); + planSkipped(plan.getType()); + return; + } + + final RegionInfo[] infos = plan.getNormalizationTargets() + .stream() + .map(NormalizationTarget::getRegionInfo) + .toArray(RegionInfo[]::new); + final long pid; + try { + pid = masterServices.mergeRegions( + infos, false, HConstants.NO_NONCE, HConstants.NO_NONCE); + } catch (IOException e) { + LOG.info("failed to submit plan {}.", plan, e); + planSkipped(plan.getType()); + return; + } + mergePlanCount++; + LOG.info("Submitted {} resulting in pid {}", plan, pid); + final long rateLimitedSecs = Math.round(rateLimiter.acquire(Math.max(1, totalSizeMb))); + LOG.debug("Rate limiting delayed the worker by {}", Duration.ofSeconds(rateLimitedSecs)); + } + + /** + * Interacts with {@link MasterServices} in order to execute a plan. + */ + private void submitSplitPlan(final SplitNormalizationPlan plan) { + final int totalSizeMb; + try { + totalSizeMb = Math.toIntExact(plan.getSplitTarget().getRegionSizeMb()); + } catch (ArithmeticException e) { + LOG.debug("Split request size overflows rate limiter data type. {}", plan); + planSkipped(plan.getType()); + return; + } + final RegionInfo info = plan.getSplitTarget().getRegionInfo(); + final long rateLimitedSecs = Math.round(rateLimiter.acquire(Math.max(1, totalSizeMb))); + LOG.debug("Rate limiting delayed this operation by {}", Duration.ofSeconds(rateLimitedSecs)); + + final long pid; + try { + pid = masterServices.splitRegion( + info, null, HConstants.NO_NONCE, HConstants.NO_NONCE); + } catch (IOException e) { + LOG.info("failed to submit plan {}.", plan, e); + planSkipped(plan.getType()); + return; + } + splitPlanCount++; + LOG.info("Submitted {} resulting in pid {}", plan, pid); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java index a904e17f7b0f..52455686895f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java @@ -17,32 +17,34 @@ */ package org.apache.hadoop.hbase.master.normalizer; +import static org.apache.hbase.thirdparty.org.apache.commons.collections4.CollectionUtils.isEmpty; import java.io.IOException; import java.time.Instant; import java.time.Period; import java.util.ArrayList; import java.util.Collections; +import java.util.LinkedList; import java.util.List; import java.util.Objects; import java.util.function.BooleanSupplier; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.RegionMetrics; +import org.apache.hadoop.hbase.ServerMetrics; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.Size; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.MasterSwitchType; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.conf.ConfigurationObserver; import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.master.RegionState; import org.apache.hadoop.hbase.master.assignment.RegionStates; -import org.apache.hadoop.hbase.master.normalizer.NormalizationPlan.PlanType; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.org.apache.commons.collections4.CollectionUtils; /** * Simple implementation of region normalizer. Logic in use: @@ -54,29 +56,9 @@ *
  • Otherwise, for the next region in the chain R1, if R0 + R1 is smaller then S, R0 and R1 * are kindly requested to merge.
  • * - *

    - * The following parameters are configurable: - *

      - *
    1. Whether to split a region as part of normalization. Configuration: - * {@value #SPLIT_ENABLED_KEY}, default: {@value #DEFAULT_SPLIT_ENABLED}.
    2. - *
    3. Whether to merge a region as part of normalization. Configuration: - * {@value #MERGE_ENABLED_KEY}, default: {@value #DEFAULT_MERGE_ENABLED}.
    4. - *
    5. The minimum number of regions in a table to consider it for merge normalization. - * Configuration: {@value #MIN_REGION_COUNT_KEY}, default: - * {@value #DEFAULT_MIN_REGION_COUNT}.
    6. - *
    7. The minimum age for a region to be considered for a merge, in days. Configuration: - * {@value #MERGE_MIN_REGION_AGE_DAYS_KEY}, default: - * {@value #DEFAULT_MERGE_MIN_REGION_AGE_DAYS}.
    8. - *
    9. The minimum size for a region to be considered for a merge, in whole MBs. Configuration: - * {@value #MERGE_MIN_REGION_SIZE_MB_KEY}, default: - * {@value #DEFAULT_MERGE_MIN_REGION_SIZE_MB}.
    10. - *
    - *

    - * To see detailed logging of the application of these configuration values, set the log level for - * this class to `TRACE`. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) -public class SimpleRegionNormalizer implements RegionNormalizer { +class SimpleRegionNormalizer implements RegionNormalizer, ConfigurationObserver { private static final Logger LOG = LoggerFactory.getLogger(SimpleRegionNormalizer.class); static final String SPLIT_ENABLED_KEY = "hbase.normalizer.split.enabled"; @@ -92,27 +74,17 @@ public class SimpleRegionNormalizer implements RegionNormalizer { static final String MERGE_MIN_REGION_SIZE_MB_KEY = "hbase.normalizer.merge.min_region_size.mb"; static final int DEFAULT_MERGE_MIN_REGION_SIZE_MB = 1; - private final long[] skippedCount; - private Configuration conf; private MasterServices masterServices; - private boolean splitEnabled; - private boolean mergeEnabled; - private int minRegionCount; - private Period mergeMinRegionAge; - private int mergeMinRegionSizeMb; + private NormalizerConfiguration normalizerConfiguration; public SimpleRegionNormalizer() { - skippedCount = new long[NormalizationPlan.PlanType.values().length]; - splitEnabled = DEFAULT_SPLIT_ENABLED; - mergeEnabled = DEFAULT_MERGE_ENABLED; - minRegionCount = DEFAULT_MIN_REGION_COUNT; - mergeMinRegionAge = Period.ofDays(DEFAULT_MERGE_MIN_REGION_AGE_DAYS); - mergeMinRegionSizeMb = DEFAULT_MERGE_MIN_REGION_SIZE_MB; + masterServices = null; + normalizerConfiguration = new NormalizerConfiguration(); } @Override public Configuration getConf() { - return conf; + return normalizerConfiguration.getConf(); } @Override @@ -120,12 +92,13 @@ public void setConf(final Configuration conf) { if (conf == null) { return; } - this.conf = conf; - splitEnabled = conf.getBoolean(SPLIT_ENABLED_KEY, DEFAULT_SPLIT_ENABLED); - mergeEnabled = conf.getBoolean(MERGE_ENABLED_KEY, DEFAULT_MERGE_ENABLED); - minRegionCount = parseMinRegionCount(conf); - mergeMinRegionAge = parseMergeMinRegionAge(conf); - mergeMinRegionSizeMb = parseMergeMinRegionSizeMb(conf); + normalizerConfiguration = new NormalizerConfiguration(conf, normalizerConfiguration); + } + + @Override + public void onConfigurationChange(Configuration conf) { + LOG.debug("Updating configuration parameters according to new configuration instance."); + setConf(conf); } private static int parseMinRegionCount(final Configuration conf) { @@ -147,10 +120,10 @@ private static Period parseMergeMinRegionAge(final Configuration conf) { return Period.ofDays(settledValue); } - private static int parseMergeMinRegionSizeMb(final Configuration conf) { - final int parsedValue = - conf.getInt(MERGE_MIN_REGION_SIZE_MB_KEY, DEFAULT_MERGE_MIN_REGION_SIZE_MB); - final int settledValue = Math.max(0, parsedValue); + private static long parseMergeMinRegionSizeMb(final Configuration conf) { + final long parsedValue = + conf.getLong(MERGE_MIN_REGION_SIZE_MB_KEY, DEFAULT_MERGE_MIN_REGION_SIZE_MB); + final long settledValue = Math.max(0, parsedValue); if (parsedValue != settledValue) { warnInvalidValue(MERGE_MIN_REGION_SIZE_MB_KEY, parsedValue, settledValue); } @@ -163,39 +136,46 @@ private static void warnInvalidValue(final String key, final T parsedValue, key, parsedValue, settledValue); } + private static void logConfigurationUpdated(final String key, final T oldValue, + final T newValue) { + if (!Objects.equals(oldValue, newValue)) { + LOG.info("Updated configuration for key '{}' from {} to {}", key, oldValue, newValue); + } + } + /** * Return this instance's configured value for {@value #SPLIT_ENABLED_KEY}. */ public boolean isSplitEnabled() { - return splitEnabled; + return normalizerConfiguration.isSplitEnabled(); } /** * Return this instance's configured value for {@value #MERGE_ENABLED_KEY}. */ public boolean isMergeEnabled() { - return mergeEnabled; + return normalizerConfiguration.isMergeEnabled(); } /** * Return this instance's configured value for {@value #MIN_REGION_COUNT_KEY}. */ public int getMinRegionCount() { - return minRegionCount; + return normalizerConfiguration.getMinRegionCount(); } /** * Return this instance's configured value for {@value #MERGE_MIN_REGION_AGE_DAYS_KEY}. */ public Period getMergeMinRegionAge() { - return mergeMinRegionAge; + return normalizerConfiguration.getMergeMinRegionAge(); } /** * Return this instance's configured value for {@value #MERGE_MIN_REGION_SIZE_MB_KEY}. */ - public int getMergeMinRegionSizeMb() { - return mergeMinRegionSizeMb; + public long getMergeMinRegionSizeMb() { + return normalizerConfiguration.getMergeMinRegionSizeMb(); } @Override @@ -203,16 +183,6 @@ public void setMasterServices(final MasterServices masterServices) { this.masterServices = masterServices; } - @Override - public void planSkipped(final RegionInfo hri, final PlanType type) { - skippedCount[type.ordinal()]++; - } - - @Override - public long getSkippedCount(NormalizationPlan.PlanType type) { - return skippedCount[type.ordinal()]; - } - @Override public List computePlansForTable(final TableName table) { if (table == null) { @@ -231,7 +201,7 @@ public List computePlansForTable(final TableName table) { } final NormalizeContext ctx = new NormalizeContext(table); - if (CollectionUtils.isEmpty(ctx.getTableRegions())) { + if (isEmpty(ctx.getTableRegions())) { return Collections.emptyList(); } @@ -239,14 +209,21 @@ public List computePlansForTable(final TableName table) { ctx.getTableRegions().size()); final List plans = new ArrayList<>(); + int splitPlansCount = 0; if (proceedWithSplitPlanning) { - plans.addAll(computeSplitNormalizationPlans(ctx)); + List splitPlans = computeSplitNormalizationPlans(ctx); + splitPlansCount = splitPlans.size(); + plans.addAll(splitPlans); } + int mergePlansCount = 0; if (proceedWithMergePlanning) { - plans.addAll(computeMergeNormalizationPlans(ctx)); + List mergePlans = computeMergeNormalizationPlans(ctx); + mergePlansCount = mergePlans.size(); + plans.addAll(mergePlans); } - LOG.debug("Computed {} normalization plans for table {}", plans.size(), table); + LOG.debug("Computed normalization plans for table {}. Total plans: {}, split plans: {}, " + + "merge plans: {}", table, plans.size(), splitPlansCount, mergePlansCount); return plans; } @@ -256,8 +233,16 @@ public List computePlansForTable(final TableName table) { private long getRegionSizeMB(RegionInfo hri) { ServerName sn = masterServices.getAssignmentManager().getRegionStates().getRegionServerOfRegion(hri); - RegionMetrics regionLoad = - masterServices.getServerManager().getLoad(sn).getRegionMetrics().get(hri.getRegionName()); + if (sn == null) { + LOG.debug("{} region was not found on any Server", hri.getRegionNameAsString()); + return -1; + } + ServerMetrics serverMetrics = masterServices.getServerManager().getLoad(sn); + if (serverMetrics == null) { + LOG.debug("server {} was not found in ServerManager", sn.getServerName()); + return -1; + } + RegionMetrics regionLoad = serverMetrics.getRegionMetrics().get(hri.getRegionName()); if (regionLoad == null) { LOG.debug("{} was not found in RegionsLoad", hri.getRegionNameAsString()); return -1; @@ -284,20 +269,17 @@ private boolean proceedWithMergePlanning() { * Also make sure tableRegions contains regions of the same table */ private double getAverageRegionSizeMb(final List tableRegions) { - if (CollectionUtils.isEmpty(tableRegions)) { + if (isEmpty(tableRegions)) { throw new IllegalStateException( "Cannot calculate average size of a table without any regions."); } - final int regionCount = tableRegions.size(); - final long totalSizeMb = tableRegions.stream() - .mapToLong(this::getRegionSizeMB) - .sum(); TableName table = tableRegions.get(0).getTable(); int targetRegionCount = -1; long targetRegionSize = -1; + double avgRegionSize; try { TableDescriptor tableDescriptor = masterServices.getTableDescriptors().get(table); - if (tableDescriptor != null && LOG.isDebugEnabled()) { + if (tableDescriptor != null) { targetRegionCount = tableDescriptor.getNormalizerTargetRegionCount(); targetRegionSize = tableDescriptor.getNormalizerTargetRegionSize(); LOG.debug("Table {} configured with target region count {}, target region size {}", table, @@ -307,25 +289,36 @@ private double getAverageRegionSizeMb(final List tableRegions) { LOG.warn("TableDescriptor for {} unavailable, table-level target region count and size" + " configurations cannot be considered.", table, e); } - - double avgRegionSize; if (targetRegionSize > 0) { avgRegionSize = targetRegionSize; - } else if (targetRegionCount > 0) { - avgRegionSize = totalSizeMb / (double) targetRegionCount; } else { - avgRegionSize = totalSizeMb / (double) regionCount; + final int regionCount = tableRegions.size(); + final long totalSizeMb = tableRegions.stream() + .mapToLong(this::getRegionSizeMB) + .sum(); + if (targetRegionCount > 0) { + avgRegionSize = totalSizeMb / (double) targetRegionCount; + } else { + avgRegionSize = totalSizeMb / (double) regionCount; + } + LOG.debug("Table {}, total aggregated regions size: {} and average region size {}", table, + totalSizeMb, avgRegionSize); } - LOG.debug("Table {}, total aggregated regions size: {} and average region size {}", table, - totalSizeMb, avgRegionSize); return avgRegionSize; } /** * Determine if a {@link RegionInfo} should be considered for a merge operation. + *

    + * Callers beware: for safe concurrency, be sure to pass in the local instance of + * {@link NormalizerConfiguration}, don't use {@code this}'s instance. */ - private boolean skipForMerge(final RegionStates regionStates, final RegionInfo regionInfo) { + private boolean skipForMerge( + final NormalizerConfiguration normalizerConfiguration, + final RegionStates regionStates, + final RegionInfo regionInfo + ) { final RegionState state = regionStates.getRegionState(regionInfo); final String name = regionInfo.getEncodedName(); return @@ -336,10 +329,10 @@ private boolean skipForMerge(final RegionStates regionStates, final RegionInfo r () -> !Objects.equals(state.getState(), RegionState.State.OPEN), "skipping merge of region {} because it is not open.", name) || logTraceReason( - () -> !isOldEnoughForMerge(regionInfo), + () -> !isOldEnoughForMerge(normalizerConfiguration, regionInfo), "skipping merge of region {} because it is not old enough.", name) || logTraceReason( - () -> !isLargeEnoughForMerge(regionInfo), + () -> !isLargeEnoughForMerge(normalizerConfiguration, regionInfo), "skipping merge region {} because it is not large enough.", name); } @@ -348,31 +341,65 @@ private boolean skipForMerge(final RegionStates regionStates, final RegionInfo r * towards target average or target region count. */ private List computeMergeNormalizationPlans(final NormalizeContext ctx) { - if (ctx.getTableRegions().size() < minRegionCount) { + final NormalizerConfiguration configuration = normalizerConfiguration; + if (ctx.getTableRegions().size() < configuration.getMinRegionCount()) { LOG.debug("Table {} has {} regions, required min number of regions for normalizer to run" - + " is {}, not computing merge plans.", ctx.getTableName(), ctx.getTableRegions().size(), - minRegionCount); + + " is {}, not computing merge plans.", ctx.getTableName(), + ctx.getTableRegions().size(), configuration.getMinRegionCount()); return Collections.emptyList(); } - final double avgRegionSizeMb = ctx.getAverageRegionSizeMb(); + final long avgRegionSizeMb = (long) ctx.getAverageRegionSizeMb(); + if (avgRegionSizeMb < configuration.getMergeMinRegionSizeMb()) { + return Collections.emptyList(); + } LOG.debug("Computing normalization plan for table {}. average region size: {}, number of" + " regions: {}.", ctx.getTableName(), avgRegionSizeMb, ctx.getTableRegions().size()); - final List plans = new ArrayList<>(); - for (int candidateIdx = 0; candidateIdx < ctx.getTableRegions().size() - 1; candidateIdx++) { - final RegionInfo current = ctx.getTableRegions().get(candidateIdx); - final RegionInfo next = ctx.getTableRegions().get(candidateIdx + 1); - if (skipForMerge(ctx.getRegionStates(), current) - || skipForMerge(ctx.getRegionStates(), next)) { - continue; + // this nested loop walks the table's region chain once, looking for contiguous sequences of + // regions that meet the criteria for merge. The outer loop tracks the starting point of the + // next sequence, the inner loop looks for the end of that sequence. A single sequence becomes + // an instance of MergeNormalizationPlan. + + final List plans = new LinkedList<>(); + final List rangeMembers = new LinkedList<>(); + long sumRangeMembersSizeMb; + int current = 0; + for (int rangeStart = 0; + rangeStart < ctx.getTableRegions().size() - 1 && current < ctx.getTableRegions().size();) { + // walk the region chain looking for contiguous sequences of regions that can be merged. + rangeMembers.clear(); + sumRangeMembersSizeMb = 0; + for (current = rangeStart; current < ctx.getTableRegions().size(); current++) { + final RegionInfo regionInfo = ctx.getTableRegions().get(current); + final long regionSizeMb = getRegionSizeMB(regionInfo); + if (skipForMerge(configuration, ctx.getRegionStates(), regionInfo)) { + // this region cannot participate in a range. resume the outer loop. + rangeStart = Math.max(current, rangeStart + 1); + break; + } + if (rangeMembers.isEmpty() // when there are no range members, seed the range with whatever + // we have. this way we're prepared in case the next region is + // 0-size. + || (rangeMembers.size() == 1 && sumRangeMembersSizeMb == 0) // when there is only one + // region and the size is 0, + // seed the range with + // whatever we have. + || regionSizeMb == 0 // always add an empty region to the current range. + || (regionSizeMb + sumRangeMembersSizeMb <= avgRegionSizeMb)) { // add the current region + // to the range when + // there's capacity + // remaining. + rangeMembers.add(new NormalizationTarget(regionInfo, regionSizeMb)); + sumRangeMembersSizeMb += regionSizeMb; + continue; + } + // we have accumulated enough regions to fill a range. resume the outer loop. + rangeStart = Math.max(current, rangeStart + 1); + break; } - final long currentSizeMb = getRegionSizeMB(current); - final long nextSizeMb = getRegionSizeMB(next); - // always merge away empty regions when they present themselves. - if (currentSizeMb == 0 || nextSizeMb == 0 || currentSizeMb + nextSizeMb < avgRegionSizeMb) { - plans.add(new MergeNormalizationPlan(current, next)); - candidateIdx++; + if (rangeMembers.size() > 1) { + plans.add(new MergeNormalizationPlan.Builder().setTargets(rangeMembers).build()); } } return plans; @@ -408,11 +435,11 @@ private List computeSplitNormalizationPlans(final NormalizeCo if (skipForSplit(ctx.getRegionStates().getRegionState(hri), hri)) { continue; } - final long regionSize = getRegionSizeMB(hri); - if (regionSize > 2 * avgRegionSize) { + final long regionSizeMb = getRegionSizeMB(hri); + if (regionSizeMb > 2 * avgRegionSize) { LOG.info("Table {}, large region {} has size {}, more than twice avg size {}, splitting", - ctx.getTableName(), hri.getRegionNameAsString(), regionSize, avgRegionSize); - plans.add(new SplitNormalizationPlan(hri)); + ctx.getTableName(), hri.getRegionNameAsString(), regionSizeMb, avgRegionSize); + plans.add(new SplitNormalizationPlan(hri, regionSizeMb)); } } return plans; @@ -422,18 +449,28 @@ private List computeSplitNormalizationPlans(final NormalizeCo * Return {@code true} when {@code regionInfo} has a creation date that is old * enough to be considered for a merge operation, {@code false} otherwise. */ - private boolean isOldEnoughForMerge(final RegionInfo regionInfo) { + private static boolean isOldEnoughForMerge( + final NormalizerConfiguration normalizerConfiguration, + final RegionInfo regionInfo + ) { final Instant currentTime = Instant.ofEpochMilli(EnvironmentEdgeManager.currentTime()); final Instant regionCreateTime = Instant.ofEpochMilli(regionInfo.getRegionId()); - return currentTime.isAfter(regionCreateTime.plus(mergeMinRegionAge)); + return currentTime.isAfter( + regionCreateTime.plus(normalizerConfiguration.getMergeMinRegionAge())); } /** * Return {@code true} when {@code regionInfo} has a size that is sufficient * to be considered for a merge operation, {@code false} otherwise. + *

    + * Callers beware: for safe concurrency, be sure to pass in the local instance of + * {@link NormalizerConfiguration}, don't use {@code this}'s instance. */ - private boolean isLargeEnoughForMerge(final RegionInfo regionInfo) { - return getRegionSizeMB(regionInfo) >= mergeMinRegionSizeMb; + private boolean isLargeEnoughForMerge( + final NormalizerConfiguration normalizerConfiguration, + final RegionInfo regionInfo + ) { + return getRegionSizeMB(regionInfo) >= normalizerConfiguration.getMergeMinRegionSizeMb(); } private static boolean logTraceReason(final BooleanSupplier predicate, final String fmtWhenTrue, @@ -445,6 +482,74 @@ private static boolean logTraceReason(final BooleanSupplier predicate, final Str return value; } + /** + * Holds the configuration values read from {@link Configuration}. Encapsulation in a POJO + * enables atomic hot-reloading of configs without locks. + */ + private static final class NormalizerConfiguration { + private final Configuration conf; + private final boolean splitEnabled; + private final boolean mergeEnabled; + private final int minRegionCount; + private final Period mergeMinRegionAge; + private final long mergeMinRegionSizeMb; + + private NormalizerConfiguration() { + conf = null; + splitEnabled = DEFAULT_SPLIT_ENABLED; + mergeEnabled = DEFAULT_MERGE_ENABLED; + minRegionCount = DEFAULT_MIN_REGION_COUNT; + mergeMinRegionAge = Period.ofDays(DEFAULT_MERGE_MIN_REGION_AGE_DAYS); + mergeMinRegionSizeMb = DEFAULT_MERGE_MIN_REGION_SIZE_MB; + } + + private NormalizerConfiguration( + final Configuration conf, + final NormalizerConfiguration currentConfiguration + ) { + this.conf = conf; + splitEnabled = conf.getBoolean(SPLIT_ENABLED_KEY, DEFAULT_SPLIT_ENABLED); + mergeEnabled = conf.getBoolean(MERGE_ENABLED_KEY, DEFAULT_MERGE_ENABLED); + minRegionCount = parseMinRegionCount(conf); + mergeMinRegionAge = parseMergeMinRegionAge(conf); + mergeMinRegionSizeMb = parseMergeMinRegionSizeMb(conf); + logConfigurationUpdated(SPLIT_ENABLED_KEY, currentConfiguration.isSplitEnabled(), + splitEnabled); + logConfigurationUpdated(MERGE_ENABLED_KEY, currentConfiguration.isMergeEnabled(), + mergeEnabled); + logConfigurationUpdated(MIN_REGION_COUNT_KEY, currentConfiguration.getMinRegionCount(), + minRegionCount); + logConfigurationUpdated(MERGE_MIN_REGION_AGE_DAYS_KEY, + currentConfiguration.getMergeMinRegionAge(), mergeMinRegionAge); + logConfigurationUpdated(MERGE_MIN_REGION_SIZE_MB_KEY, + currentConfiguration.getMergeMinRegionSizeMb(), mergeMinRegionSizeMb); + } + + public Configuration getConf() { + return conf; + } + + public boolean isSplitEnabled() { + return splitEnabled; + } + + public boolean isMergeEnabled() { + return mergeEnabled; + } + + public int getMinRegionCount() { + return minRegionCount; + } + + public Period getMergeMinRegionAge() { + return mergeMinRegionAge; + } + + public long getMergeMinRegionSizeMb() { + return mergeMinRegionSizeMb; + } + } + /** * Inner class caries the state necessary to perform a single invocation of * {@link #computePlansForTable(TableName)}. Grabbing this data from the assignment manager diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SplitNormalizationPlan.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SplitNormalizationPlan.java index 7c634fbf2488..ffe68cc9f62d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SplitNormalizationPlan.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SplitNormalizationPlan.java @@ -18,32 +18,23 @@ */ package org.apache.hadoop.hbase.master.normalizer; -import java.io.IOException; import org.apache.commons.lang3.builder.EqualsBuilder; import org.apache.commons.lang3.builder.HashCodeBuilder; import org.apache.commons.lang3.builder.ToStringBuilder; import org.apache.commons.lang3.builder.ToStringStyle; -import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.client.RegionInfo; -import org.apache.hadoop.hbase.master.MasterServices; import org.apache.yetus.audience.InterfaceAudience; /** - * Normalization plan to split region. + * Normalization plan to split a region. */ @InterfaceAudience.Private -public class SplitNormalizationPlan implements NormalizationPlan { +final class SplitNormalizationPlan implements NormalizationPlan { - private final RegionInfo regionInfo; + private final NormalizationTarget splitTarget; - public SplitNormalizationPlan(RegionInfo regionInfo) { - this.regionInfo = regionInfo; - } - - @Override - public long submit(MasterServices masterServices) throws IOException { - return masterServices.splitRegion(regionInfo, null, HConstants.NO_NONCE, - HConstants.NO_NONCE); + SplitNormalizationPlan(final RegionInfo splitTarget, final long splitTargetSizeMb) { + this.splitTarget = new NormalizationTarget(splitTarget, splitTargetSizeMb); } @Override @@ -51,14 +42,14 @@ public PlanType getType() { return PlanType.SPLIT; } - public RegionInfo getRegionInfo() { - return regionInfo; + public NormalizationTarget getSplitTarget() { + return splitTarget; } @Override public String toString() { return new ToStringBuilder(this, ToStringStyle.SHORT_PREFIX_STYLE) - .append("regionInfo", regionInfo) + .append("splitTarget", splitTarget) .toString(); } @@ -75,13 +66,13 @@ public boolean equals(Object o) { SplitNormalizationPlan that = (SplitNormalizationPlan) o; return new EqualsBuilder() - .append(regionInfo, that.regionInfo) + .append(splitTarget, that.splitTarget) .isEquals(); } @Override public int hashCode() { return new HashCodeBuilder(17, 37) - .append(regionInfo) + .append(splitTarget) .toHashCode(); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/package-info.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/package-info.java new file mode 100644 index 000000000000..e3180347dc34 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/package-info.java @@ -0,0 +1,100 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * The Region Normalizer subsystem is responsible for coaxing all the regions in a table toward + * a "normal" size, according to their storefile size. It does this by splitting regions that + * are significantly larger than the norm, and merging regions that are significantly smaller than + * the norm. + *

    + * The public interface to the Region Normalizer subsystem is limited to the following classes: + *
      + *
    • + * The {@link org.apache.hadoop.hbase.master.normalizer.RegionNormalizerFactory} provides an + * entry point for creating an instance of the + * {@link org.apache.hadoop.hbase.master.normalizer.RegionNormalizerManager}. + *
    • + *
    • + * The {@link org.apache.hadoop.hbase.master.normalizer.RegionNormalizerManager} encapsulates + * the whole Region Normalizer subsystem. You'll find one of these hanging off of the + * {@link org.apache.hadoop.hbase.master.HMaster}, which uses it to delegate API calls. There + * is usually only a single instance of this class. + *
    • + *
    • + * Various configuration points that share the common prefix of {@code hbase.normalizer}. + *
        + *
      • Whether to split a region as part of normalization. Configuration: + * {@value org.apache.hadoop.hbase.master.normalizer.SimpleRegionNormalizer#SPLIT_ENABLED_KEY}, + * default: {@value org.apache.hadoop.hbase.master.normalizer.SimpleRegionNormalizer#DEFAULT_SPLIT_ENABLED}. + *
      • + *
      • Whether to merge a region as part of normalization. Configuration: + * {@value org.apache.hadoop.hbase.master.normalizer.SimpleRegionNormalizer#MERGE_ENABLED_KEY}, + * default: {@value org.apache.hadoop.hbase.master.normalizer.SimpleRegionNormalizer#DEFAULT_MERGE_ENABLED}. + *
      • + *
      • The minimum number of regions in a table to consider it for merge normalization. + * Configuration: {@value org.apache.hadoop.hbase.master.normalizer.SimpleRegionNormalizer#MIN_REGION_COUNT_KEY}, + * default: {@value org.apache.hadoop.hbase.master.normalizer.SimpleRegionNormalizer#DEFAULT_MIN_REGION_COUNT}. + *
      • + *
      • The minimum age for a region to be considered for a merge, in days. Configuration: + * {@value org.apache.hadoop.hbase.master.normalizer.SimpleRegionNormalizer#MERGE_MIN_REGION_AGE_DAYS_KEY}, + * default: {@value org.apache.hadoop.hbase.master.normalizer.SimpleRegionNormalizer#DEFAULT_MERGE_MIN_REGION_AGE_DAYS}. + *
      • + *
      • The minimum size for a region to be considered for a merge, in whole MBs. Configuration: + * {@value org.apache.hadoop.hbase.master.normalizer.SimpleRegionNormalizer#MERGE_MIN_REGION_SIZE_MB_KEY}, + * default: {@value org.apache.hadoop.hbase.master.normalizer.SimpleRegionNormalizer#DEFAULT_MERGE_MIN_REGION_SIZE_MB}. + *
      • + *
      • The limit on total throughput of the Region Normalizer's actions, in whole MBs. Configuration: + * {@value org.apache.hadoop.hbase.master.normalizer.RegionNormalizerWorker#RATE_LIMIT_BYTES_PER_SEC_KEY}, + * default: unlimited. + *
      • + *
      + *

      + * To see detailed logging of the application of these configuration values, set the log + * level for this package to `TRACE`. + *

      + *
    • + *
    + * The Region Normalizer subsystem is composed of a handful of related classes: + *
      + *
    • + * The {@link org.apache.hadoop.hbase.zookeeper.RegionNormalizerTracker} provides a system by + * which the Normalizer can be disabled at runtime. It currently does this by managing a znode, + * but this is an implementation detail. + *
    • + *
    • + * The {@link org.apache.hadoop.hbase.master.normalizer.RegionNormalizerWorkQueue} is a + * {@link java.util.Set}-like {@link java.util.Queue} that permits a single copy of a given + * work item to exist in the queue at one time. It also provides a facility for a producer to + * add an item to the front of the line. Consumers are blocked waiting for new work. + *
    • + *
    • + * The {@link org.apache.hadoop.hbase.master.normalizer.RegionNormalizerChore} wakes up + * periodically and schedules new normalization work, adding targets to the queue. + *
    • + *
    • + * The {@link org.apache.hadoop.hbase.master.normalizer.RegionNormalizerWorker} runs in a + * daemon thread, grabbing work off the queue as is it becomes available. + *
    • + *
    • + * The {@link org.apache.hadoop.hbase.master.normalizer.SimpleRegionNormalizer} implements the + * logic for calculating target region sizes and emitting a list of corresponding + * {@link org.apache.hadoop.hbase.master.normalizer.NormalizationPlan} objects. + *
    • + *
    + */ +package org.apache.hadoop.hbase.master.normalizer; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineNamespaceProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineNamespaceProcedure.java index e027a738fc79..bd6d44ca1660 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineNamespaceProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineNamespaceProcedure.java @@ -29,8 +29,6 @@ import org.apache.hadoop.hbase.util.CommonFSUtils; import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; - /** * Base class for all the Namespace procedures that want to use a StateMachineProcedure. It provide * some basic helpers like basic locking and basic toStringClassDetails(). @@ -114,7 +112,6 @@ protected static void createDirectory(MasterProcedureEnv env, NamespaceDescripto createDirectory(env.getMasterServices().getMasterFileSystem(), nsDescriptor); } - @VisibleForTesting public static void createDirectory(MasterFileSystem mfs, NamespaceDescriptor nsDescriptor) throws IOException { mfs.getFileSystem() diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineTableProcedure.java index 1edfc74179ae..9b1dfc6a23a4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineTableProcedure.java @@ -176,6 +176,11 @@ protected void preflightChecks(MasterProcedureEnv env, Boolean enabled) throws H } } + protected boolean isTableEnabled(MasterProcedureEnv env) { + return env.getMasterServices().getTableStateManager().isTableState(getTableName(), + TableState.State.ENABLED); + } + /** * Check region is online. */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java index e7162d9b3add..2313e70f75bb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java @@ -25,6 +25,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.DoNotRetryIOException; +import org.apache.hadoop.hbase.HBaseIOException; import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.TableExistsException; @@ -36,6 +37,7 @@ import org.apache.hadoop.hbase.master.MasterCoprocessorHost; import org.apache.hadoop.hbase.master.MasterFileSystem; import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer; +import org.apache.hadoop.hbase.replication.ReplicationException; import org.apache.hadoop.hbase.rsgroup.RSGroupInfo; import org.apache.hadoop.hbase.util.CommonFSUtils; import org.apache.hadoop.hbase.util.FSTableDescriptors; @@ -45,7 +47,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; @@ -235,8 +236,9 @@ protected void deserializeStateData(ProcedureStateSerializer serializer) @Override protected boolean waitInitialized(MasterProcedureEnv env) { if (getTableName().isSystemTable()) { - // Creating system table is part of the initialization, so do not wait here. - return false; + // Creating system table is part of the initialization, so only wait for meta loaded instead + // of waiting for master fully initialized. + return env.getAssignmentManager().waitMetaLoaded(this); } return super.waitInitialized(env); } @@ -363,23 +365,26 @@ protected static void moveTempDirectoryToHBaseRoot( } protected static List addTableToMeta(final MasterProcedureEnv env, - final TableDescriptor tableDescriptor, - final List regions) throws IOException { + final TableDescriptor tableDescriptor, final List regions) throws IOException { assert (regions != null && regions.size() > 0) : "expected at least 1 region, got " + regions; ProcedureSyncWait.waitMetaRegions(env); // Add replicas if needed // we need to create regions with replicaIds starting from 1 - List newRegions = RegionReplicaUtil.addReplicas(regions, 1, - tableDescriptor.getRegionReplication()); + List newRegions = + RegionReplicaUtil.addReplicas(regions, 1, tableDescriptor.getRegionReplication()); // Add regions to META addRegionsToMeta(env, tableDescriptor, newRegions); // Setup replication for region replicas if needed if (tableDescriptor.getRegionReplication() > 1) { - ServerRegionReplicaUtil.setupRegionReplicaReplication(env.getMasterConfiguration()); + try { + ServerRegionReplicaUtil.setupRegionReplicaReplication(env.getMasterServices()); + } catch (ReplicationException e) { + throw new HBaseIOException(e); + } } return newRegions; } @@ -415,7 +420,6 @@ protected boolean shouldWaitClientAck(MasterProcedureEnv env) { return !getTableName().isSystemTable(); } - @VisibleForTesting RegionInfo getFirstRegionInfo() { if (newRegions == null || newRegions.isEmpty()) { return null; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java index 5b118a4f37c5..80dddc7ccda1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java @@ -31,7 +31,6 @@ import org.apache.hadoop.hbase.TableNotDisabledException; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.backup.HFileArchiver; -import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionReplicaUtil; @@ -40,12 +39,14 @@ import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.favored.FavoredNodesManager; +import org.apache.hadoop.hbase.filter.KeyOnlyFilter; import org.apache.hadoop.hbase.master.MasterCoprocessorHost; import org.apache.hadoop.hbase.master.MasterFileSystem; import org.apache.hadoop.hbase.mob.MobConstants; import org.apache.hadoop.hbase.mob.MobUtils; import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer; import org.apache.hadoop.hbase.util.CommonFSUtils; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; @@ -98,7 +99,8 @@ protected Flow executeFromState(final MasterProcedureEnv env, DeleteTableState s // TODO: Move out... in the acquireLock() LOG.debug("Waiting for RIT for {}", this); - regions = env.getAssignmentManager().getRegionStates().getRegionsOfTable(getTableName()); + regions = env.getAssignmentManager().getRegionStates() + .getRegionsOfTableForDeleting(getTableName()); assert regions != null && !regions.isEmpty() : "unexpected 0 regions"; ProcedureSyncWait.waitRegionInTransition(env, regions); @@ -357,22 +359,29 @@ protected static void deleteFromFs(final MasterProcedureEnv env, /** * There may be items for this table still up in hbase:meta in the case where the info:regioninfo * column was empty because of some write error. Remove ALL rows from hbase:meta that have to do - * with this table. See HBASE-12980. + * with this table. + *

    + * See HBASE-12980. */ private static void cleanRegionsInMeta(final MasterProcedureEnv env, final TableName tableName) - throws IOException { - Connection connection = env.getMasterServices().getConnection(); - Scan tableScan = MetaTableAccessor.getScanForTableName(connection, tableName); - try (Table metaTable = connection.getTable(TableName.META_TABLE_NAME)) { - List deletes = new ArrayList<>(); - try (ResultScanner resScanner = metaTable.getScanner(tableScan)) { - for (Result result : resScanner) { - deletes.add(new Delete(result.getRow())); + throws IOException { + Scan tableScan = MetaTableAccessor.getScanForTableName(env.getMasterConfiguration(), tableName) + .setFilter(new KeyOnlyFilter()); + long now = EnvironmentEdgeManager.currentTime(); + List deletes = new ArrayList<>(); + try ( + Table metaTable = env.getMasterServices().getConnection().getTable(TableName.META_TABLE_NAME); + ResultScanner scanner = metaTable.getScanner(tableScan)) { + for (;;) { + Result result = scanner.next(); + if (result == null) { + break; } + deletes.add(new Delete(result.getRow(), now)); } if (!deletes.isEmpty()) { - LOG.warn("Deleting some vestigial " + deletes.size() + " rows of " + tableName + " from " - + TableName.META_TABLE_NAME); + LOG.warn("Deleting some vestigial " + deletes.size() + " rows of " + tableName + " from " + + TableName.META_TABLE_NAME); metaTable.delete(deletes); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java index 8ad3ae6d33c6..8b295ec72fc9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java @@ -20,17 +20,11 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; -import org.apache.hadoop.hbase.CatalogFamilyFormat; -import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotDisabledException; import org.apache.hadoop.hbase.TableNotFoundException; -import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionReplicaUtil; -import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.master.MasterCoprocessorHost; @@ -100,36 +94,26 @@ protected Flow executeFromState(final MasterProcedureEnv env, final EnableTableS case ENABLE_TABLE_MARK_REGIONS_ONLINE: // Get the region replica count. If changed since disable, need to do // more work assigning. - Connection connection = env.getMasterServices().getConnection(); TableDescriptor tableDescriptor = env.getMasterServices().getTableDescriptors().get(tableName); int configuredReplicaCount = tableDescriptor.getRegionReplication(); - // Get regions for the table from memory; get both online and offline regions ('true'). + // Get regions for the table from memory List regionsOfTable = - env.getAssignmentManager().getRegionStates().getRegionsOfTable(tableName, true); + env.getAssignmentManager().getRegionStates().getRegionsOfTableForEnabling(tableName); // How many replicas do we currently have? Check regions returned from // in-memory state. int currentMaxReplica = getMaxReplicaId(regionsOfTable); - - // Read the META table to know the number of replicas the table currently has. - // If there was a table modification on region replica count then need to - // adjust replica counts here. - int replicasFound = TableName.isMetaTableName(this.tableName)? - 0: // TODO: Figure better what to do here for hbase:meta replica. - getReplicaCountInMeta(connection, configuredReplicaCount, regionsOfTable); - LOG.info("replicasFound={} (configuredReplicaCount={} for {}", replicasFound, - configuredReplicaCount, tableName.getNameAsString()); - if (currentMaxReplica == (configuredReplicaCount - 1)) { - if (LOG.isDebugEnabled()) { - LOG.debug("No change in number of region replicas (configuredReplicaCount={});" - + " assigning.", configuredReplicaCount); - } + if (currentMaxReplica == configuredReplicaCount - 1) { + LOG.debug("No change in number of region replicas (configuredReplicaCount={});" + + " assigning.", configuredReplicaCount); } else if (currentMaxReplica > (configuredReplicaCount - 1)) { // We have additional regions as the replica count has been decreased. Delete // those regions because already the table is in the unassigned state - LOG.info("The number of replicas " + (currentMaxReplica + 1) - + " is more than the region replica count " + configuredReplicaCount); + LOG.warn( + "The number of replicas {} is more than the region replica count {}" + + ", usually this should not happen as we will delete them in ModifyTableProcedure", + currentMaxReplica + 1, configuredReplicaCount); List copyOfRegions = new ArrayList(regionsOfTable); for (RegionInfo regionInfo : copyOfRegions) { if (regionInfo.getReplicaId() > (configuredReplicaCount - 1)) { @@ -140,11 +124,11 @@ protected Flow executeFromState(final MasterProcedureEnv env, final EnableTableS regionsOfTable.remove(regionInfo); } } - } else { + } else if (currentMaxReplica < configuredReplicaCount - 1) { // the replicasFound is less than the regionReplication - LOG.info("Number of replicas has increased. Assigning new region replicas." + + LOG.info("Number of replicas has increased for {}. Assigning new region replicas." + "The previous replica count was {}. The current replica count is {}.", - (currentMaxReplica + 1), configuredReplicaCount); + this.tableName, currentMaxReplica + 1, configuredReplicaCount); regionsOfTable = RegionReplicaUtil.addReplicas(regionsOfTable, currentMaxReplica + 1, configuredReplicaCount); } @@ -174,25 +158,6 @@ protected Flow executeFromState(final MasterProcedureEnv env, final EnableTableS return Flow.HAS_MORE_STATE; } - /** - * @return Count of replicas found reading hbase:meta Region row or zk if - * asking about the hbase:meta table itself.. - */ - private int getReplicaCountInMeta(Connection connection, int regionReplicaCount, - List regionsOfTable) throws IOException { - Result r = MetaTableAccessor.getCatalogFamilyRow(connection, regionsOfTable.get(0)); - int replicasFound = 0; - for (int i = 1; i < regionReplicaCount; i++) { - // Since we have already added the entries to the META we will be getting only that here - List columnCells = - r.getColumnCells(HConstants.CATALOG_FAMILY, CatalogFamilyFormat.getServerColumn(i)); - if (!columnCells.isEmpty()) { - replicasFound++; - } - } - return replicasFound; - } - @Override protected void rollbackState(final MasterProcedureEnv env, final EnableTableState state) throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/InitMetaProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/InitMetaProcedure.java index f158452296c8..e92fc110aba2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/InitMetaProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/InitMetaProcedure.java @@ -28,7 +28,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.client.TableDescriptor; @@ -83,9 +82,8 @@ private static TableDescriptor writeFsLayout(Path rootDir, Configuration conf) t // created here in bootstrap and it'll need to be cleaned up. Better to // not make it in first place. Turn off block caching for bootstrap. // Enable after. - TableDescriptor metaDescriptor = FSTableDescriptors.tryUpdateAndGetMetaTableDescriptor(conf, fs, - rootDir, builder -> builder.setRegionReplication( - conf.getInt(HConstants.META_REPLICAS_NUM, HConstants.DEFAULT_META_REPLICA_NUM))); + TableDescriptor metaDescriptor = + FSTableDescriptors.tryUpdateAndGetMetaTableDescriptor(conf, fs, rootDir); HRegion .createHRegion(RegionInfoBuilder.FIRST_META_REGIONINFO, rootDir, conf, metaDescriptor, null) .close(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java index c470c428125a..456660d37577 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java @@ -42,8 +42,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; - /** * ProcedureScheduler for the Master Procedures. * This ProcedureScheduler tries to provide to the ProcedureExecutor procedures @@ -659,7 +657,6 @@ public void wakeTableSharedLock(final Procedure procedure, final TableName ta * @return true if deletion succeeded, false otherwise meaning that there are * other new operations pending for that table (e.g. a new create). */ - @VisibleForTesting boolean markTableAsDeleted(final TableName table, final Procedure procedure) { schedLock(); try { @@ -1015,7 +1012,6 @@ public void wakeMetaExclusiveLock(Procedure procedure) { /** * For debugging. Expensive. */ - @VisibleForTesting public String dumpLocks() throws IOException { schedLock(); try { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java index 64f4bf6c84d9..247dd9c202f4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java @@ -21,28 +21,24 @@ import java.io.IOException; import java.util.Arrays; import java.util.Collections; -import java.util.HashSet; import java.util.List; import java.util.Set; import java.util.function.Supplier; +import java.util.stream.Collectors; +import java.util.stream.IntStream; import org.apache.hadoop.hbase.ConcurrentTableModificationException; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HBaseIOException; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.TableNotDisabledException; import org.apache.hadoop.hbase.TableNotFoundException; -import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.RegionInfo; -import org.apache.hadoop.hbase.client.Result; -import org.apache.hadoop.hbase.client.ResultScanner; -import org.apache.hadoop.hbase.client.Scan; -import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.client.TableDescriptor; -import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.master.MasterCoprocessorHost; +import org.apache.hadoop.hbase.master.zksyncer.MetaLocationSyncer; import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer; +import org.apache.hadoop.hbase.replication.ReplicationException; import org.apache.hadoop.hbase.rsgroup.RSGroupInfo; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil; @@ -134,6 +130,12 @@ protected Flow executeFromState(final MasterProcedureEnv env, final ModifyTableS break; case MODIFY_TABLE_PRE_OPERATION: preModify(env, state); + setNextState(ModifyTableState.MODIFY_TABLE_CLOSE_EXCESS_REPLICAS); + break; + case MODIFY_TABLE_CLOSE_EXCESS_REPLICAS: + if (isTableEnabled(env)) { + closeExcessReplicasIfNeeded(env); + } setNextState(ModifyTableState.MODIFY_TABLE_UPDATE_TABLE_DESCRIPTOR); break; case MODIFY_TABLE_UPDATE_TABLE_DESCRIPTOR: @@ -141,7 +143,7 @@ protected Flow executeFromState(final MasterProcedureEnv env, final ModifyTableS setNextState(ModifyTableState.MODIFY_TABLE_REMOVE_REPLICA_COLUMN); break; case MODIFY_TABLE_REMOVE_REPLICA_COLUMN: - updateReplicaColumnsIfNeeded(env, unmodifiedTableDescriptor, modifiedTableDescriptor); + removeReplicaColumnsIfNeeded(env); setNextState(ModifyTableState.MODIFY_TABLE_POST_OPERATION); break; case MODIFY_TABLE_POST_OPERATION: @@ -149,9 +151,19 @@ protected Flow executeFromState(final MasterProcedureEnv env, final ModifyTableS setNextState(ModifyTableState.MODIFY_TABLE_REOPEN_ALL_REGIONS); break; case MODIFY_TABLE_REOPEN_ALL_REGIONS: - if (env.getAssignmentManager().isTableEnabled(getTableName())) { + if (isTableEnabled(env)) { addChildProcedure(new ReopenTableRegionsProcedure(getTableName())); } + setNextState(ModifyTableState.MODIFY_TABLE_ASSIGN_NEW_REPLICAS); + break; + case MODIFY_TABLE_ASSIGN_NEW_REPLICAS: + assignNewReplicasIfNeeded(env); + if (TableName.isMetaTableName(getTableName())) { + MetaLocationSyncer syncer = env.getMasterServices().getMetaLocationSyncer(); + if (syncer != null) { + syncer.setMetaReplicaCount(modifiedTableDescriptor.getRegionReplication()); + } + } if (deleteColumnFamilyInModify) { setNextState(ModifyTableState.MODIFY_TABLE_DELETE_FS_LAYOUT); } else { @@ -303,14 +315,6 @@ private void prepareModify(final MasterProcedureEnv env) throws IOException { env.getMasterServices().getTableDescriptors().get(getTableName()); } - if (env.getMasterServices().getTableStateManager() - .isTableState(getTableName(), TableState.State.ENABLED)) { - if (modifiedTableDescriptor.getRegionReplication() != unmodifiedTableDescriptor - .getRegionReplication()) { - throw new TableNotDisabledException( - "REGION_REPLICATION change is not supported for enabled tables"); - } - } this.deleteColumnFamilyInModify = isDeleteColumnFamily(unmodifiedTableDescriptor, modifiedTableDescriptor); if (!unmodifiedTableDescriptor.getRegionServerGroup() @@ -346,8 +350,6 @@ private static boolean isDeleteColumnFamily(TableDescriptor originalDescriptor, * Action before modifying table. * @param env MasterProcedureEnv * @param state the procedure state - * @throws IOException - * @throws InterruptedException */ private void preModify(final MasterProcedureEnv env, final ModifyTableState state) throws IOException, InterruptedException { @@ -357,7 +359,6 @@ private void preModify(final MasterProcedureEnv env, final ModifyTableState stat /** * Update descriptor * @param env MasterProcedureEnv - * @throws IOException **/ private void updateTableDescriptor(final MasterProcedureEnv env) throws IOException { env.getMasterServices().getTableDescriptors().update(modifiedTableDescriptor); @@ -366,7 +367,6 @@ private void updateTableDescriptor(final MasterProcedureEnv env) throws IOExcept /** * Removes from hdfs the families that are not longer present in the new table descriptor. * @param env MasterProcedureEnv - * @throws IOException */ private void deleteFromFs(final MasterProcedureEnv env, final TableDescriptor oldTableDescriptor, final TableDescriptor newTableDescriptor) @@ -385,62 +385,58 @@ private void deleteFromFs(final MasterProcedureEnv env, } /** - * update replica column families if necessary. - * @param env MasterProcedureEnv - * @throws IOException + * remove replica columns if necessary. */ - private void updateReplicaColumnsIfNeeded( - final MasterProcedureEnv env, - final TableDescriptor oldTableDescriptor, - final TableDescriptor newTableDescriptor) throws IOException { - final int oldReplicaCount = oldTableDescriptor.getRegionReplication(); - final int newReplicaCount = newTableDescriptor.getRegionReplication(); - - if (newReplicaCount < oldReplicaCount) { - Set tableRows = new HashSet<>(); - Connection connection = env.getMasterServices().getConnection(); - Scan scan = MetaTableAccessor.getScanForTableName(connection, getTableName()); - scan.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER); - - try (Table metaTable = connection.getTable(TableName.META_TABLE_NAME)) { - ResultScanner resScanner = metaTable.getScanner(scan); - for (Result result : resScanner) { - tableRows.add(result.getRow()); - } - MetaTableAccessor.removeRegionReplicasFromMeta( - tableRows, - newReplicaCount, - oldReplicaCount - newReplicaCount, - connection); - } + private void removeReplicaColumnsIfNeeded(MasterProcedureEnv env) throws IOException { + final int oldReplicaCount = unmodifiedTableDescriptor.getRegionReplication(); + final int newReplicaCount = modifiedTableDescriptor.getRegionReplication(); + if (newReplicaCount >= oldReplicaCount) { + return; } - if (newReplicaCount > oldReplicaCount) { - Connection connection = env.getMasterServices().getConnection(); - // Get the existing table regions - List existingTableRegions = - MetaTableAccessor.getTableRegions(connection, getTableName()); - // add all the new entries to the meta table - addRegionsToMeta(env, newTableDescriptor, existingTableRegions); - if (oldReplicaCount <= 1) { - // The table has been newly enabled for replica. So check if we need to setup - // region replication - ServerRegionReplicaUtil.setupRegionReplicaReplication(env.getMasterConfiguration()); + env.getAssignmentManager().getRegionStateStore().removeRegionReplicas(getTableName(), + oldReplicaCount, newReplicaCount); + env.getAssignmentManager().getRegionStates().getRegionsOfTable(getTableName()).stream() + .filter(r -> r.getReplicaId() >= newReplicaCount) + .forEach(env.getAssignmentManager().getRegionStates()::deleteRegion); + } + + private void assignNewReplicasIfNeeded(MasterProcedureEnv env) throws IOException { + final int oldReplicaCount = unmodifiedTableDescriptor.getRegionReplication(); + final int newReplicaCount = modifiedTableDescriptor.getRegionReplication(); + if (newReplicaCount <= oldReplicaCount) { + return; + } + if (isTableEnabled(env)) { + List newReplicas = env.getAssignmentManager().getRegionStates() + .getRegionsOfTable(getTableName()).stream().filter(RegionReplicaUtil::isDefaultReplica) + .flatMap(primaryRegion -> IntStream.range(oldReplicaCount, newReplicaCount).mapToObj( + replicaId -> RegionReplicaUtil.getRegionInfoForReplica(primaryRegion, replicaId))) + .collect(Collectors.toList()); + addChildProcedure(env.getAssignmentManager().createAssignProcedures(newReplicas)); + } + if (oldReplicaCount <= 1) { + try { + ServerRegionReplicaUtil.setupRegionReplicaReplication(env.getMasterServices()); + } catch (ReplicationException e) { + throw new HBaseIOException(e); } } } - private static void addRegionsToMeta(final MasterProcedureEnv env, - final TableDescriptor tableDescriptor, final List regionInfos) - throws IOException { - MetaTableAccessor.addRegionsToMeta(env.getMasterServices().getConnection(), regionInfos, - tableDescriptor.getRegionReplication()); + private void closeExcessReplicasIfNeeded(MasterProcedureEnv env) { + final int oldReplicaCount = unmodifiedTableDescriptor.getRegionReplication(); + final int newReplicaCount = modifiedTableDescriptor.getRegionReplication(); + if (newReplicaCount >= oldReplicaCount) { + return; + } + addChildProcedure(env.getAssignmentManager() + .createUnassignProceduresForClosingExcessRegionReplicas(getTableName(), newReplicaCount)); } + /** * Action after modifying table. * @param env MasterProcedureEnv * @param state the procedure state - * @throws IOException - * @throws InterruptedException */ private void postModify(final MasterProcedureEnv env, final ModifyTableState state) throws IOException, InterruptedException { @@ -451,8 +447,6 @@ private void postModify(final MasterProcedureEnv env, final ModifyTableState sta * Coprocessor Action. * @param env MasterProcedureEnv * @param state the procedure state - * @throws IOException - * @throws InterruptedException */ private void runCoprocessorAction(final MasterProcedureEnv env, final ModifyTableState state) throws IOException, InterruptedException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java index 1942ed6e8abf..d028bb40321b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java @@ -41,9 +41,10 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; + import org.apache.hbase.thirdparty.com.google.common.collect.ArrayListMultimap; import org.apache.hbase.thirdparty.com.google.protobuf.ByteString; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest; @@ -93,6 +94,7 @@ public boolean start() { if (!super.start()) { return false; } + setTimeoutExecutorUncaughtExceptionHandler(this::abort); if (master.isStopped()) { LOG.debug("Stopped"); return false; @@ -125,6 +127,13 @@ public boolean start() { return true; } + private void abort(Thread t, Throwable e) { + LOG.error("Caught error", e); + if (!master.isStopped() && !master.isStopping() && !master.isAborted()) { + master.abort("Aborting master", e); + } + } + @Override public boolean stop() { if (!super.stop()) { @@ -377,7 +386,6 @@ public void dispatchServerOperations(MasterProcedureEnv env, List regionNames; + private List regionNames; private List regions = Collections.emptyList(); private RetryCounter retryCounter; public ReopenTableRegionsProcedure() { - regionNames = null; + regionNames = Collections.emptyList(); } public ReopenTableRegionsProcedure(TableName tableName) { this.tableName = tableName; - this.regionNames = null; + this.regionNames = Collections.emptyList(); } public ReopenTableRegionsProcedure(final TableName tableName, @@ -105,7 +105,7 @@ protected Flow executeFromState(MasterProcedureEnv env, ReopenTableRegionsState throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException { switch (state) { case REOPEN_TABLE_REGIONS_GET_REGIONS: - if (!env.getAssignmentManager().isTableEnabled(tableName)) { + if (!isTableEnabled(env)) { LOG.info("Table {} is disabled, give up reopening its regions", tableName); return Flow.NO_MORE_STATE; } @@ -224,6 +224,17 @@ protected void serializeStateData(ProcedureStateSerializer serializer) throws IO ReopenTableRegionsStateData.Builder builder = ReopenTableRegionsStateData.newBuilder() .setTableName(ProtobufUtil.toProtoTableName(tableName)); regions.stream().map(ProtobufUtil::toRegionLocation).forEachOrdered(builder::addRegion); + if (CollectionUtils.isNotEmpty(regionNames)) { + // As of this writing, wrapping this statement withing if condition is only required + // for backward compatibility as we used to have 'regionNames' as null for cases + // where all regions of given table should be reopened. Now, we have kept emptyList() + // for 'regionNames' to indicate all regions of given table should be reopened unless + // 'regionNames' contains at least one specific region, in which case only list of regions + // that 'regionNames' contain should be reopened, not all regions of given table. + // Now, we don't need this check since we are not dealing with null 'regionNames' and hence, + // guarding by this if condition can be removed in HBase 4.0.0. + regionNames.stream().map(ByteString::copyFrom).forEachOrdered(builder::addRegionNames); + } serializer.serialize(builder.build()); } @@ -234,5 +245,11 @@ protected void deserializeStateData(ProcedureStateSerializer serializer) throws tableName = ProtobufUtil.toTableName(data.getTableName()); regions = data.getRegionList().stream().map(ProtobufUtil::toRegionLocation) .collect(Collectors.toList()); + if (CollectionUtils.isNotEmpty(data.getRegionNamesList())) { + regionNames = data.getRegionNamesList().stream().map(ByteString::toByteArray) + .collect(Collectors.toList()); + } else { + regionNames = Collections.emptyList(); + } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java index 17606c340665..e7fba555c9cf 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java @@ -65,6 +65,21 @@ public class ServerCrashProcedure implements ServerProcedureInterface { private static final Logger LOG = LoggerFactory.getLogger(ServerCrashProcedure.class); + /** + * Configuration parameter to enable/disable the retain region assignment during + * ServerCrashProcedure. + *

    + * By default retain assignment is disabled which makes the failover faster and improve the + * availability; useful for cloud scenario where region block locality is not important. Enable + * this when RegionServers are deployed on same host where Datanode are running, this will improve + * read performance due to local read. + *

    + * see HBASE-24900 for more details. + */ + public static final String MASTER_SCP_RETAIN_ASSIGNMENT = "hbase.master.scp.retain.assignment"; + /** Default value of {@link #MASTER_SCP_RETAIN_ASSIGNMENT} */ + public static final boolean DEFAULT_MASTER_SCP_RETAIN_ASSIGNMENT = false; + /** * Name of the crashed server to process. */ @@ -122,7 +137,6 @@ protected Flow executeFromState(MasterProcedureEnv env, ServerCrashState state) // This adds server to the DeadServer processing list but not to the DeadServers list. // Server gets removed from processing list below on procedure successful finish. if (!notifiedDeadServer) { - services.getServerManager().getDeadServers().processing(serverName); notifiedDeadServer = true; } @@ -230,7 +244,6 @@ protected Flow executeFromState(MasterProcedureEnv env, ServerCrashState state) case SERVER_CRASH_FINISH: LOG.info("removed crashed server {} after splitting done", serverName); services.getAssignmentManager().getRegionStates().removeServer(serverName); - services.getServerManager().getDeadServers().finish(serverName); updateProgress(true); return Flow.NO_MORE_STATE; default: @@ -488,6 +501,8 @@ protected boolean isMatchingRegionLocation(RegionStateNode rsn) { */ private void assignRegions(MasterProcedureEnv env, List regions) throws IOException { AssignmentManager am = env.getMasterServices().getAssignmentManager(); + boolean retainAssignment = env.getMasterConfiguration().getBoolean(MASTER_SCP_RETAIN_ASSIGNMENT, + DEFAULT_MASTER_SCP_RETAIN_ASSIGNMENT); for (RegionInfo region : regions) { RegionStateNode regionNode = am.getRegionStates().getOrCreateRegionStateNode(region); regionNode.lock(); @@ -514,7 +529,8 @@ private void assignRegions(MasterProcedureEnv env, List regions) thr } if (regionNode.getProcedure() != null) { LOG.info("{} found RIT {}; {}", this, regionNode.getProcedure(), regionNode); - regionNode.getProcedure().serverCrashed(env, regionNode, getServerName()); + regionNode.getProcedure().serverCrashed(env, regionNode, getServerName(), + !retainAssignment); continue; } if (env.getMasterServices().getTableStateManager() @@ -533,9 +549,8 @@ private void assignRegions(MasterProcedureEnv env, List regions) thr LOG.warn("Found table disabled for region {}, procDetails: {}", regionNode, this); continue; } - // force to assign to a new candidate server, see HBASE-23035 for more details. TransitRegionStateProcedure proc = - TransitRegionStateProcedure.assign(env, region, true, null); + TransitRegionStateProcedure.assign(env, region, !retainAssignment, null); regionNode.setProcedure(proc); addChildProcedure(proc); } finally { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SplitWALProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SplitWALProcedure.java index 4ae408f417d6..f8822311efab 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SplitWALProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SplitWALProcedure.java @@ -16,6 +16,7 @@ * limitations under the License. */ package org.apache.hadoop.hbase.master.procedure; + import java.io.IOException; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.ServerName; @@ -30,7 +31,7 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos; @@ -163,8 +164,7 @@ public String getWAL() { return walPath; } - @VisibleForTesting - public ServerName getWorker(){ + public ServerName getWorker() { return worker; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateTableProcedure.java index 290e0c19df17..26eceb907457 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateTableProcedure.java @@ -22,7 +22,6 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.List; - import org.apache.hadoop.hbase.HBaseIOException; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotDisabledException; @@ -37,7 +36,7 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos; @@ -318,7 +317,6 @@ private void postTruncate(final MasterProcedureEnv env) } } - @VisibleForTesting RegionInfo getFirstRegionInfo() { if (regions == null || regions.isEmpty()) { return null; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegion.java index aa1b9d1257ea..c2188b4dc0dc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegion.java @@ -50,7 +50,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.com.google.common.math.IntMath; /** @@ -79,9 +78,9 @@ * Notice that, you can use different root file system and WAL file system. Then the above directory * will be on two file systems, the root file system will have the data directory while the WAL * filesystem will have the WALs directory. The archived HFile will be moved to the global HFile - * archived directory with the {@link MasterRegionParams#archivedWalSuffix()} suffix. The archived + * archived directory with the {@link MasterRegionParams#archivedHFileSuffix()} suffix. The archived * WAL will be moved to the global WAL archived directory with the - * {@link MasterRegionParams#archivedHFileSuffix()} suffix. + * {@link MasterRegionParams#archivedWalSuffix()} suffix. */ @InterfaceAudience.Private public final class MasterRegion { @@ -96,10 +95,8 @@ public final class MasterRegion { private final WALFactory walFactory; - @VisibleForTesting final HRegion region; - @VisibleForTesting final MasterRegionFlusherAndCompactor flusherAndCompactor; private MasterRegionWALRoller walRoller; @@ -141,17 +138,14 @@ public RegionScanner getScanner(Scan scan) throws IOException { return region.getScanner(scan); } - @VisibleForTesting public FlushResult flush(boolean force) throws IOException { return region.flush(force); } - @VisibleForTesting public void requestRollAll() { walRoller.requestRollAll(); } - @VisibleForTesting public void waitUntilWalRollFinished() throws InterruptedException { walRoller.waitUntilWalRollFinished(); } @@ -301,7 +295,7 @@ public static MasterRegion create(MasterRegionParams params) throws IOException params.archivedWalSuffix(), params.rollPeriodMs(), params.flushSize()); walRoller.start(); - WALFactory walFactory = new WALFactory(conf, server.getServerName().toString(), false); + WALFactory walFactory = new WALFactory(conf, server.getServerName().toString(), server, false); Path tableDir = CommonFSUtils.getTableDir(rootDir, td.getTableName()); HRegion region; if (fs.exists(tableDir)) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegionWALRoller.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegionWALRoller.java index ef3dd121133b..bba6611c68d8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegionWALRoller.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegionWALRoller.java @@ -29,7 +29,6 @@ import org.apache.hadoop.hbase.regionserver.wal.WALUtil; import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; import org.apache.hadoop.hbase.wal.AbstractWALRoller; -import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.wal.WALFactory; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; @@ -68,8 +67,11 @@ private MasterRegionWALRoller(String name, Configuration conf, Abortable abortab } @Override - protected void afterRoll(WAL wal) { + protected void afterWALArchive(Path oldPath, Path newPath) { // move the archived WAL files to the global archive path + // here we do not use the newPath directly, so that even if we fail to move some of the + // newPaths, we are still safe because every time we will get all the files under the archive + // directory. try { MasterRegionUtils.moveFilesUnderDir(fs, walArchiveDir, globalWALArchiveDir, archivedWALSuffix); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AbstractPeerProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AbstractPeerProcedure.java index fc254a397a0e..8985d2f629ed 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AbstractPeerProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AbstractPeerProcedure.java @@ -38,8 +38,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; - /** * The base class for all replication peer related procedure. */ @@ -93,7 +91,6 @@ protected final void refreshPeer(MasterProcedureEnv env, PeerOperationType type) } // will be override in test to simulate error - @VisibleForTesting protected void enablePeer(MasterProcedureEnv env) throws ReplicationException { env.getReplicationPeerManager().enablePeer(peerId); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.java index 8dd329f85698..7401c4b8d543 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.java @@ -34,8 +34,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; - import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.PeerModificationState; /** @@ -133,7 +131,6 @@ private boolean needReopen(TableStateManager tsm, TableName tn) throws IOExcepti } // will be override in test to simulate error - @VisibleForTesting protected void reopenRegions(MasterProcedureEnv env) throws IOException { ReplicationPeerConfig peerConfig = getNewPeerConfig(); ReplicationPeerConfig oldPeerConfig = getOldPeerConfig(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java index 2c930e103fc8..add51210a38f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java @@ -233,7 +233,7 @@ public void addPeer(String peerId, ReplicationPeerConfig peerConfig, boolean ena // this should be a retry, just return return; } - peerConfig = ReplicationPeerConfigUtil.addBasePeerConfigsIfNotPresent(conf, peerConfig); + peerConfig = ReplicationPeerConfigUtil.updateReplicationBasePeerConfigs(conf, peerConfig); ReplicationPeerConfig copiedPeerConfig = ReplicationPeerConfig.newBuilder(peerConfig).build(); SyncReplicationState syncReplicationState = copiedPeerConfig.isSyncReplication() ? SyncReplicationState.DOWNGRADE_ACTIVE @@ -547,7 +547,7 @@ public static ReplicationPeerManager create(ZKWatcher zk, Configuration conf, St for (String peerId : peerStorage.listPeerIds()) { ReplicationPeerConfig peerConfig = peerStorage.getPeerConfig(peerId); - peerConfig = ReplicationPeerConfigUtil.addBasePeerConfigsIfNotPresent(conf, peerConfig); + peerConfig = ReplicationPeerConfigUtil.updateReplicationBasePeerConfigs(conf, peerConfig); peerStorage.updatePeerConfig(peerId, peerConfig); boolean enabled = peerStorage.isPeerEnabled(peerId); SyncReplicationState state = peerStorage.getPeerSyncReplicationState(peerId); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/SyncReplicationReplayWALManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/SyncReplicationReplayWALManager.java index ae624b145709..419e79b14483 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/SyncReplicationReplayWALManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/SyncReplicationReplayWALManager.java @@ -47,8 +47,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; - /** * The manager for replaying remote wal. *

    @@ -289,7 +287,6 @@ public boolean isReplayWALFinished(String wal) throws IOException { return fs.getFileStatus(walPath).getLen() == 0; } - @VisibleForTesting public Path getRemoteWALDir() { return remoteWALDir; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java index 358fd5e3492b..289e012c28ab 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java @@ -36,8 +36,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; - import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.PeerSyncReplicationStateTransitionState; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.TransitPeerSyncReplicationStateStateData; @@ -110,7 +108,6 @@ protected PeerSyncReplicationStateTransitionState getInitialState() { return PeerSyncReplicationStateTransitionState.PRE_PEER_SYNC_REPLICATION_STATE_TRANSITION; } - @VisibleForTesting protected void preTransit(MasterProcedureEnv env) throws IOException { MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost(); if (cpHost != null) { @@ -144,14 +141,12 @@ private void postTransit(MasterProcedureEnv env) throws IOException { } } - @VisibleForTesting protected void reopenRegions(MasterProcedureEnv env) { addChildProcedure( env.getReplicationPeerManager().getPeerConfig(peerId).get().getTableCFsMap().keySet().stream() .map(ReopenTableRegionsProcedure::new).toArray(ReopenTableRegionsProcedure[]::new)); } - @VisibleForTesting protected void createDirForRemoteWAL(MasterProcedureEnv env) throws IOException { MasterFileSystem mfs = env.getMasterFileSystem(); Path remoteWALDir = new Path(mfs.getWALRootDir(), ReplicationUtils.REMOTE_WAL_DIR_NAME); @@ -204,7 +199,6 @@ private void replayRemoteWAL(boolean serial) { addChildProcedure(new RecoverStandbyProcedure(peerId, serial)); } - @VisibleForTesting protected void setPeerNewSyncReplicationState(MasterProcedureEnv env) throws ReplicationException { if (toState.equals(SyncReplicationState.STANDBY) || @@ -223,12 +217,10 @@ protected void setPeerNewSyncReplicationState(MasterProcedureEnv env) env.getReplicationPeerManager().setPeerNewSyncReplicationState(peerId, toState); } - @VisibleForTesting protected void removeAllReplicationQueues(MasterProcedureEnv env) throws ReplicationException { env.getReplicationPeerManager().removeAllQueues(peerId); } - @VisibleForTesting protected void transitPeerSyncReplicationState(MasterProcedureEnv env) throws ReplicationException { env.getReplicationPeerManager().transitPeerSyncReplicationState(peerId, toState); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotFileCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotFileCache.java index 039988af186a..fe46e4c97110 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotFileCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotFileCache.java @@ -41,7 +41,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.com.google.common.collect.Lists; /** @@ -271,7 +270,6 @@ private void refreshCache() throws IOException { this.snapshots.putAll(newSnapshots); } - @VisibleForTesting List getSnapshotsInProgress() throws IOException { List snapshotInProgress = Lists.newArrayList(); // only add those files to the cache, but not to the known snapshots diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java index 8f57e6ebc28c..301077906770 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java @@ -92,7 +92,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; @@ -196,7 +195,7 @@ public SnapshotManager() {} * @param coordinator procedure coordinator instance. exposed for testing. * @param pool HBase ExecutorServcie instance, exposed for testing. */ - @VisibleForTesting + @InterfaceAudience.Private SnapshotManager(final MasterServices master, ProcedureCoordinator coordinator, ExecutorService pool, int sentinelCleanInterval) throws IOException, UnsupportedOperationException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/zksyncer/ClientZKSyncer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/zksyncer/ClientZKSyncer.java index b1c70c569356..51208e37d4e4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/zksyncer/ClientZKSyncer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/zksyncer/ClientZKSyncer.java @@ -19,12 +19,11 @@ package org.apache.hadoop.hbase.master.zksyncer; import java.io.IOException; -import java.util.Collection; -import java.util.HashMap; +import java.util.Iterator; import java.util.Map; -import java.util.concurrent.ArrayBlockingQueue; -import java.util.concurrent.BlockingQueue; - +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.util.Threads; @@ -34,7 +33,6 @@ import org.apache.yetus.audience.InterfaceAudience; import org.apache.zookeeper.CreateMode; import org.apache.zookeeper.KeeperException; - import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -42,22 +40,68 @@ * Tracks the target znode(s) on server ZK cluster and synchronize them to client ZK cluster if * changed *

    - * The target znode(s) is given through {@link #getNodesToWatch()} method + * The target znode(s) is given through {@link #getPathsToWatch()} method */ @InterfaceAudience.Private public abstract class ClientZKSyncer extends ZKListener { private static final Logger LOG = LoggerFactory.getLogger(ClientZKSyncer.class); private final Server server; private final ZKWatcher clientZkWatcher; + + /** + * Used to store the newest data which we want to sync to client zk. + *

    + * For meta location, since we may reduce the replica number, so here we add a {@code delete} flag + * to tell the updater delete the znode on client zk and quit. + */ + private static final class ZKData { + + byte[] data; + + boolean delete = false; + + synchronized void set(byte[] data) { + this.data = data; + notifyAll(); + } + + synchronized byte[] get() throws InterruptedException { + while (!delete && data == null) { + wait(); + } + byte[] d = data; + data = null; + return d; + } + + synchronized void delete() { + this.delete = true; + notifyAll(); + } + + synchronized boolean isDeleted() { + return delete; + } + } + // We use queues and daemon threads to synchronize the data to client ZK cluster // to avoid blocking the single event thread for watchers - private final Map> queues; + private final ConcurrentMap queues; public ClientZKSyncer(ZKWatcher watcher, ZKWatcher clientZkWatcher, Server server) { super(watcher); this.server = server; this.clientZkWatcher = clientZkWatcher; - this.queues = new HashMap<>(); + this.queues = new ConcurrentHashMap<>(); + } + + private void startNewSyncThread(String path) { + ZKData zkData = new ZKData(); + queues.put(path, zkData); + Thread updater = new ClientZkUpdater(path, zkData); + updater.setDaemon(true); + updater.start(); + watchAndCheckExists(path); } /** @@ -69,17 +113,12 @@ public void start() throws KeeperException { this.watcher.registerListener(this); // create base znode on remote ZK ZKUtil.createWithParents(clientZkWatcher, watcher.getZNodePaths().baseZNode); - // set meta znodes for client ZK - Collection nodes = getNodesToWatch(); - LOG.debug("Znodes to watch: " + nodes); + // set znodes for client ZK + Set paths = getPathsToWatch(); + LOG.debug("ZNodes to watch: {}", paths); // initialize queues and threads - for (String node : nodes) { - BlockingQueue queue = new ArrayBlockingQueue<>(1); - queues.put(node, queue); - Thread updater = new ClientZkUpdater(node, queue); - updater.setDaemon(true); - updater.start(); - watchAndCheckExists(node); + for (String path : paths) { + startNewSyncThread(path); } } @@ -112,10 +151,9 @@ private void watchAndCheckExists(String node) { * @param data the data to write to queue */ private void upsertQueue(String node, byte[] data) { - BlockingQueue queue = queues.get(node); - synchronized (queue) { - queue.poll(); - queue.offer(data); + ZKData zkData = queues.get(node); + if (zkData != null) { + zkData.set(data); } } @@ -126,35 +164,49 @@ private void upsertQueue(String node, byte[] data) { * @param data the data to set to client ZK * @throws InterruptedException if the thread is interrupted during process */ - private final void setDataForClientZkUntilSuccess(String node, byte[] data) - throws InterruptedException { + private void setDataForClientZkUntilSuccess(String node, byte[] data) + throws InterruptedException { + boolean create = false; while (!server.isStopped()) { try { LOG.debug("Set data for remote " + node + ", client zk wather: " + clientZkWatcher); - ZKUtil.setData(clientZkWatcher, node, data); - break; - } catch (KeeperException.NoNodeException nne) { - // Node doesn't exist, create it and set value - try { + if (create) { ZKUtil.createNodeIfNotExistsNoWatch(clientZkWatcher, node, data, CreateMode.PERSISTENT); - break; - } catch (KeeperException.ConnectionLossException - | KeeperException.SessionExpiredException ee) { - reconnectAfterExpiration(); - } catch (KeeperException e) { - LOG.warn( - "Failed to create znode " + node + " due to: " + e.getMessage() + ", will retry later"); + } else { + ZKUtil.setData(clientZkWatcher, node, data); } - } catch (KeeperException.ConnectionLossException - | KeeperException.SessionExpiredException ee) { - reconnectAfterExpiration(); + break; } catch (KeeperException e) { - LOG.debug("Failed to set data to client ZK, will retry later", e); + LOG.debug("Failed to set data for {} to client ZK, will retry later", node, e); + if (e.code() == KeeperException.Code.SESSIONEXPIRED) { + reconnectAfterExpiration(); + } + if (e.code() == KeeperException.Code.NONODE) { + create = true; + } + if (e.code() == KeeperException.Code.NODEEXISTS) { + create = false; + } } Threads.sleep(HConstants.SOCKET_RETRY_WAIT_MS); } } + private void deleteDataForClientZkUntilSuccess(String node) throws InterruptedException { + while (!server.isStopped()) { + LOG.debug("Delete remote " + node + ", client zk wather: " + clientZkWatcher); + try { + ZKUtil.deleteNode(clientZkWatcher, node); + } catch (KeeperException e) { + LOG.debug("Failed to delete node from client ZK, will retry later", e); + if (e.code() == KeeperException.Code.SESSIONEXPIRED) { + reconnectAfterExpiration(); + } + + } + } + } + private final void reconnectAfterExpiration() throws InterruptedException { LOG.warn("ZK session expired or lost. Retry a new connection..."); try { @@ -164,11 +216,7 @@ private final void reconnectAfterExpiration() throws InterruptedException { } } - @Override - public void nodeCreated(String path) { - if (!validate(path)) { - return; - } + private void getDataAndWatch(String path) { try { byte[] data = ZKUtil.getDataAndWatch(watcher, path); upsertQueue(path, data); @@ -177,23 +225,39 @@ public void nodeCreated(String path) { } } + private void removeQueue(String path) { + ZKData zkData = queues.remove(path); + if (zkData != null) { + zkData.delete(); + } + } + @Override - public void nodeDataChanged(String path) { + public void nodeCreated(String path) { if (validate(path)) { - nodeCreated(path); + getDataAndWatch(path); + } else { + removeQueue(path); } } + @Override + public void nodeDataChanged(String path) { + nodeCreated(path); + } + @Override public synchronized void nodeDeleted(String path) { if (validate(path)) { try { if (ZKUtil.watchAndCheckExists(watcher, path)) { - nodeCreated(path); + getDataAndWatch(path); } } catch (KeeperException e) { LOG.warn("Unexpected exception handling nodeDeleted event for path: " + path, e); } + } else { + removeQueue(path); } } @@ -202,41 +266,67 @@ public synchronized void nodeDeleted(String path) { * @param path the path to validate * @return true if the znode is watched by us */ - abstract boolean validate(String path); + protected abstract boolean validate(String path); /** - * @return the znode(s) to watch + * @return the zk path(s) to watch */ - abstract Collection getNodesToWatch(); + protected abstract Set getPathsToWatch(); + + protected final void refreshWatchingList() { + Set newPaths = getPathsToWatch(); + LOG.debug("New ZNodes to watch: {}", newPaths); + Iterator> iter = queues.entrySet().iterator(); + // stop unused syncers + while (iter.hasNext()) { + Map.Entry entry = iter.next(); + if (!newPaths.contains(entry.getKey())) { + iter.remove(); + entry.getValue().delete(); + } + } + // start new syncers + for (String newPath : newPaths) { + if (!queues.containsKey(newPath)) { + startNewSyncThread(newPath); + } + } + } /** * Thread to synchronize znode data to client ZK cluster */ - class ClientZkUpdater extends Thread { - final String znode; - final BlockingQueue queue; + private final class ClientZkUpdater extends Thread { + private final String znode; + private final ZKData zkData; - public ClientZkUpdater(String znode, BlockingQueue queue) { + public ClientZkUpdater(String znode, ZKData zkData) { this.znode = znode; - this.queue = queue; + this.zkData = zkData; setName("ClientZKUpdater-" + znode); } @Override public void run() { + LOG.debug("Client zk updater for znode {} started", znode); while (!server.isStopped()) { try { - byte[] data = queue.take(); - setDataForClientZkUntilSuccess(znode, data); - } catch (InterruptedException e) { - if (LOG.isDebugEnabled()) { - LOG.debug( - "Interrupted while checking whether need to update meta location to client zk"); + byte[] data = zkData.get(); + if (data != null) { + setDataForClientZkUntilSuccess(znode, data); + } else { + if (zkData.isDeleted()) { + deleteDataForClientZkUntilSuccess(znode); + break; + } } + } catch (InterruptedException e) { + LOG.debug("Interrupted while checking whether need to update meta location to client zk"); Thread.currentThread().interrupt(); break; } } + LOG.debug("Client zk updater for znode {} stopped", znode); } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/zksyncer/MasterAddressSyncer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/zksyncer/MasterAddressSyncer.java index a9aa13cb93d3..ee04238d0b95 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/zksyncer/MasterAddressSyncer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/zksyncer/MasterAddressSyncer.java @@ -18,9 +18,8 @@ */ package org.apache.hadoop.hbase.master.zksyncer; -import java.util.ArrayList; -import java.util.Collection; - +import java.util.Collections; +import java.util.Set; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.yetus.audience.InterfaceAudience; @@ -39,14 +38,12 @@ public MasterAddressSyncer(ZKWatcher watcher, ZKWatcher clientZkWatcher, Server } @Override - boolean validate(String path) { + protected boolean validate(String path) { return path.equals(masterAddressZNode); } @Override - Collection getNodesToWatch() { - ArrayList toReturn = new ArrayList<>(); - toReturn.add(masterAddressZNode); - return toReturn; + protected Set getPathsToWatch() { + return Collections.singleton(masterAddressZNode); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/zksyncer/MetaLocationSyncer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/zksyncer/MetaLocationSyncer.java index 98d73224ce9b..f6e38329ac34 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/zksyncer/MetaLocationSyncer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/zksyncer/MetaLocationSyncer.java @@ -18,8 +18,9 @@ */ package org.apache.hadoop.hbase.master.zksyncer; -import java.util.Collection; - +import java.util.Set; +import java.util.stream.Collectors; +import java.util.stream.IntStream; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.yetus.audience.InterfaceAudience; @@ -30,17 +31,28 @@ */ @InterfaceAudience.Private public class MetaLocationSyncer extends ClientZKSyncer { + + private volatile int metaReplicaCount = 1; + public MetaLocationSyncer(ZKWatcher watcher, ZKWatcher clientZkWatcher, Server server) { super(watcher, clientZkWatcher, server); } @Override - boolean validate(String path) { - return watcher.getZNodePaths().isAnyMetaReplicaZNode(path); + protected boolean validate(String path) { + return watcher.getZNodePaths().isMetaZNodePath(path); } @Override - Collection getNodesToWatch() { - return watcher.getZNodePaths().getMetaReplicaZNodes(); + protected Set getPathsToWatch() { + return IntStream.range(0, metaReplicaCount) + .mapToObj(watcher.getZNodePaths()::getZNodeForReplica).collect(Collectors.toSet()); + } + + public void setMetaReplicaCount(int replicaCount) { + if (replicaCount != metaReplicaCount) { + metaReplicaCount = replicaCount; + refreshWatchingList(); + } } } \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFileCleanerChore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFileCleanerChore.java index 82a08a9fb84a..13f43fcf0834 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFileCleanerChore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFileCleanerChore.java @@ -52,7 +52,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.com.google.common.collect.SetMultimap; /** @@ -101,7 +100,6 @@ private void checkObsoleteConfigurations() { } } - @VisibleForTesting public MobFileCleanerChore() { this.master = null; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFileCompactionChore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFileCompactionChore.java index 314729833959..7fe2d0d0ace3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFileCompactionChore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFileCompactionChore.java @@ -31,7 +31,6 @@ import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; import org.apache.hadoop.hbase.client.CompactionState; -import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableState; @@ -42,9 +41,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; - - /** * Periodic MOB compaction chore. * It runs MOB compaction on region servers in parallel, thus @@ -75,7 +71,6 @@ public MobFileCompactionChore(HMaster master) { } - @VisibleForTesting public MobFileCompactionChore(Configuration conf, int batchSize) { this.regionBatchSize = batchSize; } @@ -85,9 +80,7 @@ protected void chore() { boolean reported = false; - try (Connection conn = master.getConnection(); - Admin admin = conn.getAdmin();) { - + try (Admin admin = master.getConnection().getAdmin()) { TableDescriptors htds = master.getTableDescriptors(); Map map = htds.getAll(); for (TableDescriptor htd : map.values()) { @@ -146,7 +139,6 @@ protected void chore() { } } - @VisibleForTesting public void performMajorCompactionInBatches(Admin admin, TableDescriptor htd, ColumnFamilyDescriptor hcd) throws IOException, InterruptedException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java index acc8f74a501b..2ae29385eb42 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java @@ -56,9 +56,9 @@ import org.apache.hadoop.hbase.io.hfile.HFileContext; import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder; import org.apache.hadoop.hbase.regionserver.BloomType; -import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.regionserver.HStoreFile; import org.apache.hadoop.hbase.regionserver.StoreFileWriter; +import org.apache.hadoop.hbase.regionserver.StoreUtils; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ChecksumType; import org.apache.hadoop.hbase.util.CommonFSUtils; @@ -540,8 +540,8 @@ public static StoreFileWriter createWriter(Configuration conf, FileSystem fs, Compression.Algorithm compression, CacheConfig cacheConfig, Encryption.Context cryptoContext, boolean isCompaction) throws IOException { return createWriter(conf, fs, family, new Path(basePath, mobFileName.getFileName()), - maxKeyCount, compression, cacheConfig, cryptoContext, HStore.getChecksumType(conf), - HStore.getBytesPerChecksum(conf), family.getBlocksize(), BloomType.NONE, isCompaction); + maxKeyCount, compression, cacheConfig, cryptoContext, StoreUtils.getChecksumType(conf), + StoreUtils.getBytesPerChecksum(conf), family.getBlocksize(), BloomType.NONE, isCompaction); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/TaskMonitor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/TaskMonitor.java index 1bde91553628..d2edaa8b1d84 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/TaskMonitor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/TaskMonitor.java @@ -87,8 +87,12 @@ public static synchronized TaskMonitor get() { } return instance; } - + public synchronized MonitoredTask createStatus(String description) { + return createStatus(description, false); + } + + public synchronized MonitoredTask createStatus(String description, boolean ignore) { MonitoredTask stat = new MonitoredTaskImpl(); stat.setDescription(description); MonitoredTask proxy = (MonitoredTask) Proxy.newProxyInstance( @@ -99,7 +103,9 @@ public synchronized MonitoredTask createStatus(String description) { if (tasks.isFull()) { purgeExpiredTasks(); } - tasks.add(pair); + if (!ignore) { + tasks.add(pair); + } return proxy; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureCoordinator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureCoordinator.java index 9e58daa3f74b..720f25b57d9b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureCoordinator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureCoordinator.java @@ -28,16 +28,14 @@ import java.util.concurrent.SynchronousQueue; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; - import org.apache.hadoop.hbase.errorhandling.ForeignException; import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher; import org.apache.hadoop.hbase.util.Threads; -import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hbase.thirdparty.com.google.common.collect.MapMaker; /** @@ -71,7 +69,7 @@ public class ProcedureCoordinator { * * @param pool Used for executing procedures. */ - @VisibleForTesting // Only used in tests. SimpleMasterProcedureManager is a test class. + // Only used in tests. SimpleMasterProcedureManager is a test class. public ProcedureCoordinator(ProcedureCoordinatorRpcs rpcs, ThreadPoolExecutor pool) { this(rpcs, pool, TIMEOUT_MILLIS_DEFAULT, WAKE_MILLIS_DEFAULT); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/Subprocedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/Subprocedure.java index 4b6924438377..9e45ad514369 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/Subprocedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/Subprocedure.java @@ -39,7 +39,7 @@ * member), {@link #insideBarrier()} (execute while globally barriered and release barrier) and * {@link #cleanup(Exception)} (release state associated with subprocedure.) * - * When submitted to a ProcedureMemeber, the call method is executed in a separate thread. + * When submitted to a ProcedureMember, the call method is executed in a separate thread. * Latches are use too block its progress and trigger continuations when barrier conditions are * met. * @@ -147,7 +147,7 @@ private void rethrowException() throws ForeignException { * Execute the Subprocedure {@link #acquireBarrier()} and {@link #insideBarrier()} methods * while keeping some state for other threads to access. * - * This would normally be executed by the ProcedureMemeber when a acquire message comes from the + * This would normally be executed by the ProcedureMember when a acquire message comes from the * coordinator. Rpcs are used to spend message back to the coordinator after different phases * are executed. Any exceptions caught during the execution (except for InterruptedException) get * converted and propagated to coordinator via {@link ProcedureMemberRpcs#sendMemberAborted( diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/RegionServerFlushTableProcedureManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/RegionServerFlushTableProcedureManager.java index 0a72d9a738a5..1e95d15881fb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/RegionServerFlushTableProcedureManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/RegionServerFlushTableProcedureManager.java @@ -132,7 +132,7 @@ public void stop(boolean force) throws IOException { * * @param table * @param family - * @return Subprocedure to submit to the ProcedureMemeber. + * @return Subprocedure to submit to the ProcedureMember. */ public Subprocedure buildSubprocedure(String table, String family) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure2/store/region/RegionProcedureStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure2/store/region/RegionProcedureStore.java index 36a919d8f192..301b6053fb25 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure2/store/region/RegionProcedureStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure2/store/region/RegionProcedureStore.java @@ -63,7 +63,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableSet; import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos; @@ -87,7 +86,6 @@ public class RegionProcedureStore extends ProcedureStoreBase { private final LeaseRecovery leaseRecovery; - @VisibleForTesting final MasterRegion region; private int numThreads; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure2/store/region/WALProcedurePrettyPrinter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure2/store/region/WALProcedurePrettyPrinter.java index a4ed7339845d..0e60709b5e09 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure2/store/region/WALProcedurePrettyPrinter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure2/store/region/WALProcedurePrettyPrinter.java @@ -105,7 +105,7 @@ protected int doWork() throws Exception { if (!Bytes.equals(PROC_FAMILY, 0, PROC_FAMILY.length, cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength())) { // We could have cells other than procedure edits, for example, a flush marker - WALPrettyPrinter.printCell(out, op, false); + WALPrettyPrinter.printCell(out, op, false, false); continue; } long procId = Bytes.toLong(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtobufUtil.java similarity index 99% rename from hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java rename to hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtobufUtil.java index 4e2e5779303f..e47c92914f0d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtobufUtil.java @@ -42,7 +42,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos; @InterfaceAudience.Private -public class ReplicationProtbufUtil { +public class ReplicationProtobufUtil { /** * A helper to replicate a list of WAL entries using region server admin diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/AverageIntervalRateLimiter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/AverageIntervalRateLimiter.java index b2457959190b..d10e6eacc7c9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/AverageIntervalRateLimiter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/AverageIntervalRateLimiter.java @@ -13,7 +13,6 @@ import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; /** * This limiter will refill resources at every TimeUnit/resources interval. For example: For a @@ -62,13 +61,11 @@ public long getWaitInterval(long limit, long available, long amount) { } // This method is for strictly testing purpose only - @VisibleForTesting @Override public void setNextRefillTime(long nextRefillTime) { this.nextRefillTime = nextRefillTime; } - @VisibleForTesting @Override public long getNextRefillTime() { return this.nextRefillTime; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FileArchiverNotifierFactoryImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FileArchiverNotifierFactoryImpl.java index bbe53b267afa..b7cd26f1c255 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FileArchiverNotifierFactoryImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FileArchiverNotifierFactoryImpl.java @@ -27,8 +27,6 @@ import org.apache.hadoop.hbase.client.Connection; import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; - /** * A factory for getting instances of {@link FileArchiverNotifier}. */ @@ -47,12 +45,10 @@ public static FileArchiverNotifierFactory getInstance() { return CURRENT_INSTANCE; } - @VisibleForTesting static void setInstance(FileArchiverNotifierFactory inst) { CURRENT_INSTANCE = Objects.requireNonNull(inst); } - @VisibleForTesting static void reset() { CURRENT_INSTANCE = DEFAULT_INSTANCE; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FileSystemUtilizationChore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FileSystemUtilizationChore.java index edda4dfeafd4..501ad8b8b2e6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FileSystemUtilizationChore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FileSystemUtilizationChore.java @@ -161,7 +161,7 @@ long computeSize(Region r) { return regionSize; } - // VisibleForTesting + // visible for testing RegionSizeStore getRegionSizeStore() { return rs.getRegionServerSpaceQuotaManager().getRegionSizeStore(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FixedIntervalRateLimiter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FixedIntervalRateLimiter.java index e67eda5c5bd0..50f40afbe3cd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FixedIntervalRateLimiter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FixedIntervalRateLimiter.java @@ -13,7 +13,6 @@ import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; /** * With this limiter resources will be refilled only after a fixed interval of time. @@ -44,13 +43,11 @@ public long getWaitInterval(long limit, long available, long amount) { } // This method is for strictly testing purpose only - @VisibleForTesting @Override public void setNextRefillTime(long nextRefillTime) { this.nextRefillTime = nextRefillTime; } - @VisibleForTesting @Override public long getNextRefillTime() { return this.nextRefillTime; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java index d80134a364b4..ff4c5191c942 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java @@ -48,7 +48,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.com.google.common.collect.HashMultimap; import org.apache.hbase.thirdparty.com.google.common.collect.Maps; import org.apache.hbase.thirdparty.com.google.protobuf.TextFormat; @@ -676,7 +675,6 @@ public String toString() { } } - @VisibleForTesting void initializeRegionSizes() { assert regionSizes == null; this.regionSizes = new ConcurrentHashMap<>(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaCache.java index 1c97b2012c09..f2d88bac527a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaCache.java @@ -23,7 +23,6 @@ import org.apache.hadoop.hbase.ClusterMetrics.Option; import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.regionserver.HRegionServer; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import java.io.IOException; import java.util.ArrayList; @@ -103,7 +102,7 @@ public void start() throws IOException { public void stop(final String why) { if (refreshChore != null) { LOG.debug("Stopping QuotaRefresherChore chore."); - refreshChore.cancel(true); + refreshChore.shutdown(true); } stopped = true; } @@ -180,32 +179,26 @@ private QuotaState getQuotaState(final ConcurrentMap quotasMa return computeIfAbsent(quotasMap, key, QuotaState::new, this::triggerCacheRefresh); } - @VisibleForTesting void triggerCacheRefresh() { refreshChore.triggerNow(); } - @VisibleForTesting long getLastUpdate() { return refreshChore.lastUpdate; } - @VisibleForTesting Map getNamespaceQuotaCache() { return namespaceQuotaCache; } - @VisibleForTesting Map getRegionServerQuotaCache() { return regionServerQuotaCache; } - @VisibleForTesting Map getTableQuotaCache() { return tableQuotaCache; } - @VisibleForTesting Map getUserQuotaCache() { return userQuotaCache; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaObserverChore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaObserverChore.java index 425c24013aa8..0a8bfe18abb1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaObserverChore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaObserverChore.java @@ -25,7 +25,6 @@ import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.TimeUnit; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ScheduledChore; import org.apache.hadoop.hbase.Stoppable; @@ -41,10 +40,11 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; + import org.apache.hbase.thirdparty.com.google.common.collect.HashMultimap; import org.apache.hbase.thirdparty.com.google.common.collect.Iterables; import org.apache.hbase.thirdparty.com.google.common.collect.Multimap; + import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota; /** @@ -521,12 +521,10 @@ TablesWithQuotas fetchAllTablesWithQuotasDefined() throws IOException { } } - @VisibleForTesting QuotaSnapshotStore getTableSnapshotStore() { return tableSnapshotStore; } - @VisibleForTesting QuotaSnapshotStore getNamespaceSnapshotStore() { return namespaceSnapshotStore; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RateLimiter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RateLimiter.java index 852d8a68f02e..a29b90d52177 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RateLimiter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RateLimiter.java @@ -23,8 +23,6 @@ import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; - /** * Simple rate limiter. * @@ -230,9 +228,8 @@ public synchronized long waitInterval(final long amount) { } // These two method are for strictly testing purpose only - @VisibleForTesting + public abstract void setNextRefillTime(long nextRefillTime); - @VisibleForTesting public abstract long getNextRefillTime(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerRpcQuotaManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerRpcQuotaManager.java index 0f96de535651..8eee70efabec 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerRpcQuotaManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerRpcQuotaManager.java @@ -33,7 +33,7 @@ import org.apache.yetus.audience.InterfaceStability; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; + import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; /** @@ -83,7 +83,6 @@ public void stop() { } } - @VisibleForTesting protected boolean isRpcThrottleEnabled() { return rpcThrottleEnabled; } @@ -108,7 +107,6 @@ public void switchRpcThrottle(boolean enable) throws IOException { } } - @VisibleForTesting QuotaCache getQuotaCache() { return quotaCache; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerSpaceQuotaManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerSpaceQuotaManager.java index b9797bc8b7d1..282075b6d71b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerSpaceQuotaManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerSpaceQuotaManager.java @@ -24,16 +24,14 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicReference; import java.util.Map.Entry; - import org.apache.hadoop.hbase.TableName; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshot.SpaceQuotaStatus; import org.apache.hadoop.hbase.regionserver.RegionServerServices; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.com.google.protobuf.TextFormat; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; @@ -68,7 +66,6 @@ public RegionServerSpaceQuotaManager(RegionServerServices rsServices) { this(rsServices, SpaceViolationPolicyEnforcementFactory.getInstance()); } - @VisibleForTesting RegionServerSpaceQuotaManager( RegionServerServices rsServices, SpaceViolationPolicyEnforcementFactory factory) { this.rsServices = Objects.requireNonNull(rsServices); @@ -101,11 +98,11 @@ public synchronized void start() throws IOException { public synchronized void stop() { if (spaceQuotaRefresher != null) { - spaceQuotaRefresher.cancel(); + spaceQuotaRefresher.shutdown(); spaceQuotaRefresher = null; } if (regionSizeReporter != null) { - regionSizeReporter.cancel(); + regionSizeReporter.shutdown(); regionSizeReporter = null; } started = false; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java index 60cc92f428ce..0d5f0b5965f3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java @@ -18,7 +18,6 @@ */ package org.apache.hadoop.hbase.regionserver; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import java.io.IOException; import java.util.List; import java.util.NavigableSet; @@ -28,12 +27,12 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.ExtendedCell; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; import org.apache.hadoop.hbase.exceptions.UnexpectedStateException; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ClassSize; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; /** * An abstract class, which implements the behaviour shared by all concrete memstore instances. @@ -377,12 +376,10 @@ protected CellComparator getComparator() { return comparator; } - @VisibleForTesting MutableSegment getActive() { return active; } - @VisibleForTesting ImmutableSegment getSnapshot() { return snapshot; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AnnotationReadingPriorityFunction.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AnnotationReadingPriorityFunction.java index 2dbc1066be74..df6295702a3d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AnnotationReadingPriorityFunction.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AnnotationReadingPriorityFunction.java @@ -20,14 +20,18 @@ import java.lang.reflect.Method; import java.util.HashMap; import java.util.Map; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.ipc.PriorityFunction; +import org.apache.hadoop.hbase.ipc.QosPriority; +import org.apache.hadoop.hbase.security.User; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.ipc.PriorityFunction; -import org.apache.hadoop.hbase.ipc.QosPriority; + +import org.apache.hbase.thirdparty.com.google.protobuf.Message; +import org.apache.hbase.thirdparty.com.google.protobuf.TextFormat; + import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest; @@ -39,11 +43,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; -import org.apache.hbase.thirdparty.com.google.protobuf.Message; -import org.apache.hbase.thirdparty.com.google.protobuf.TextFormat; -import org.apache.hadoop.hbase.security.User; - /** * Reads special method annotations and table names to figure a priority for use by QoS facility in * ipc; e.g: rpcs to hbase:meta get priority. @@ -275,7 +274,6 @@ public long getDeadline(RequestHeader header, Message param) { return 0; } - @VisibleForTesting void setRegionServer(final HRegionServer hrs) { this.rpcServices = hrs.getRSRpcServices(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellSet.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellSet.java index d74655d1b7a6..e1cfd15ed195 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellSet.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellSet.java @@ -18,8 +18,6 @@ */ package org.apache.hadoop.hbase.regionserver; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import java.util.Collection; import java.util.Comparator; import java.util.Iterator; @@ -30,6 +28,7 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.HConstants; import org.apache.yetus.audience.InterfaceAudience; /** @@ -62,13 +61,11 @@ public CellSet(final CellComparator c) { this.numUniqueKeys = numUniqueKeys; } - @VisibleForTesting CellSet(final NavigableMap m) { this.delegatee = m; this.numUniqueKeys = UNKNOWN_NUM_UNIQUES; } - @VisibleForTesting NavigableMap getDelegatee() { return delegatee; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Chunk.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Chunk.java index 136efeec2016..1023890dac25 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Chunk.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Chunk.java @@ -19,11 +19,9 @@ import java.nio.ByteBuffer; import java.util.concurrent.atomic.AtomicInteger; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; /** @@ -176,7 +174,6 @@ public String toString() { + (data.capacity() - nextFreeOffset.get()); } - @VisibleForTesting int getNextFreeOffset() { return this.nextFreeOffset.get(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ChunkCreator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ChunkCreator.java index d9f327167567..5245ac2853ec 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ChunkCreator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ChunkCreator.java @@ -28,15 +28,13 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.LongAdder; - -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.regionserver.HeapMemoryManager.HeapMemoryTuneObserver; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.util.StringUtils; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; /** @@ -75,15 +73,12 @@ public enum ChunkType { private Map chunkIdMap = new ConcurrentHashMap(); private final boolean offheap; - @VisibleForTesting static ChunkCreator instance; - @VisibleForTesting static boolean chunkPoolDisabled = false; private MemStoreChunkPool dataChunksPool; private final int chunkSize; private MemStoreChunkPool indexChunksPool; - @VisibleForTesting ChunkCreator(int chunkSize, boolean offheap, long globalMemStoreSize, float poolSizePercentage, float initialCountPercentage, HeapMemoryManager heapMemoryManager, float indexChunkSizePercentage) { @@ -93,7 +88,6 @@ public enum ChunkType { initialCountPercentage, heapMemoryManager); } - @VisibleForTesting private void initializePools(int chunkSize, long globalMemStoreSize, float poolSizePercentage, float indexChunkSizePercentage, float initialCountPercentage, @@ -122,7 +116,6 @@ private void initializePools(int chunkSize, long globalMemStoreSize, */ @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "LI_LAZY_INIT_STATIC", justification = "Method is called by single thread at the starting of RS") - @VisibleForTesting public static ChunkCreator initialize(int chunkSize, boolean offheap, long globalMemStoreSize, float poolSizePercentage, float initialCountPercentage, HeapMemoryManager heapMemoryManager, @@ -135,7 +128,6 @@ public static ChunkCreator initialize(int chunkSize, boolean offheap, long globa return instance; } - @VisibleForTesting public static ChunkCreator getInstance() { return instance; } @@ -280,8 +272,7 @@ private Chunk createChunkForPool(CompactingMemStore.IndexType chunkIndexType, in return createChunk(true, chunkIndexType, chunkSize); } - @VisibleForTesting - // Used to translate the ChunkID into a chunk ref + // Used to translate the ChunkID into a chunk ref Chunk getChunk(int id) { // can return null if chunk was never mapped return chunkIdMap.get(id); @@ -299,14 +290,12 @@ Chunk removeChunk(int chunkId) { return this.chunkIdMap.remove(chunkId); } - @VisibleForTesting - // the chunks in the chunkIdMap may already be released so we shouldn't relay - // on this counting for strong correctness. This method is used only in testing. + // the chunks in the chunkIdMap may already be released so we shouldn't relay + // on this counting for strong correctness. This method is used only in testing. int numberOfMappedChunks() { return this.chunkIdMap.size(); } - @VisibleForTesting void clearChunkIds() { this.chunkIdMap.clear(); } @@ -471,7 +460,6 @@ public void onHeapMemoryTune(long newMemstoreSize, long newBlockCacheSize) { } } - @VisibleForTesting static void clearDisableFlag() { chunkPoolDisabled = false; } @@ -507,12 +495,10 @@ private MemStoreChunkPool initializePool(String label, long globalMemStoreSize, return memStoreChunkPool; } - @VisibleForTesting int getMaxCount() { return getMaxCount(ChunkType.DATA_CHUNK); } - @VisibleForTesting int getMaxCount(ChunkType chunkType) { switch (chunkType) { case INDEX_CHUNK: @@ -533,12 +519,10 @@ int getMaxCount(ChunkType chunkType) { return 0; } - @VisibleForTesting int getPoolSize() { return getPoolSize(ChunkType.DATA_CHUNK); } - @VisibleForTesting int getPoolSize(ChunkType chunkType) { switch (chunkType) { case INDEX_CHUNK: @@ -558,7 +542,6 @@ int getPoolSize(ChunkType chunkType) { return 0; } - @VisibleForTesting boolean isChunkInPool(int chunkId) { Chunk c = getChunk(chunkId); if (c==null) { @@ -577,7 +560,6 @@ boolean isChunkInPool(int chunkId) { /* * Only used in testing */ - @VisibleForTesting void clearChunksInPool() { if (dataChunksPool != null) { dataChunksPool.reclaimedChunks.clear(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplit.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplit.java index 7516c54e625b..441b18b3302f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplit.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplit.java @@ -20,6 +20,7 @@ import static org.apache.hadoop.hbase.regionserver.Store.NO_PRIORITY; import static org.apache.hadoop.hbase.regionserver.Store.PRIORITY_USER; + import java.io.IOException; import java.io.PrintWriter; import java.io.StringWriter; @@ -53,7 +54,7 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; + import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; @@ -779,12 +780,10 @@ public void deregisterChildren(ConfigurationManager manager) { // No children to register } - @VisibleForTesting public ThroughputController getCompactionThroughputController() { return compactionThroughputController; } - @VisibleForTesting /** * Shutdown the long compaction thread pool. * Should only be used in unit test to prevent long compaction thread pool from stealing job diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactedHFilesDischarger.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactedHFilesDischarger.java index 2f24135ccc79..2be04c22729f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactedHFilesDischarger.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactedHFilesDischarger.java @@ -26,7 +26,6 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; /** * A chore service that periodically cleans up the compacted files when there are no active readers @@ -38,7 +37,6 @@ public class CompactedHFilesDischarger extends ScheduledChore { private static final Logger LOG = LoggerFactory.getLogger(CompactedHFilesDischarger.class); private RegionServerServices regionServerServices; // Default is to use executor - @VisibleForTesting private boolean useExecutor = true; /** @@ -59,7 +57,6 @@ public CompactedHFilesDischarger(final int period, final Stoppable stopper, * @param regionServerServices the region server that starts this chore * @param useExecutor true if to use the region server's executor service, false otherwise */ - @VisibleForTesting public CompactedHFilesDischarger(final int period, final Stoppable stopper, final RegionServerServices regionServerServices, boolean useExecutor) { // Need to add the config classes @@ -73,7 +70,6 @@ public CompactedHFilesDischarger(final int period, final Stoppable stopper, * cleanup. Use this method to set no-executor before you call run. * @return The old setting for useExecutor */ - @VisibleForTesting boolean setUseExecutor(final boolean useExecutor) { boolean oldSetting = this.useExecutor; this.useExecutor = useExecutor; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java index 76292736558f..abe9cf8c23cc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java @@ -18,9 +18,6 @@ */ package org.apache.hadoop.hbase.regionserver; -import org.apache.hadoop.hbase.exceptions.IllegalArgumentIOException; -import org.apache.hadoop.util.StringUtils; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import java.io.IOException; import java.util.ArrayList; import java.util.List; @@ -32,13 +29,15 @@ import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.MemoryCompactionPolicy; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import org.apache.hadoop.hbase.exceptions.IllegalArgumentIOException; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ClassSize; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.wal.WAL; +import org.apache.hadoop.util.StringUtils; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * A memstore implementation which supports in-memory compaction. @@ -79,7 +78,6 @@ public class CompactingMemStore extends AbstractMemStore { // inWalReplay is true while we are synchronously replaying the edits from WAL private boolean inWalReplay = false; - @VisibleForTesting protected final AtomicBoolean allowCompaction = new AtomicBoolean(true); private boolean compositeSnapshot = true; @@ -128,7 +126,6 @@ public CompactingMemStore(Configuration conf, CellComparator c, (this.compactor == null? "NULL": this.compactor.toString())); } - @VisibleForTesting protected MemStoreCompactor createMemStoreCompactor(MemoryCompactionPolicy compactionPolicy) throws IllegalArgumentIOException { return new MemStoreCompactor(this, compactionPolicy); @@ -334,7 +331,6 @@ protected boolean preUpdate(MutableSegment currentActive, Cell cell, } // the getSegments() method is used for tests only - @VisibleForTesting @Override protected List getSegments() { List pipelineList = pipeline.getSegments(); @@ -367,7 +363,6 @@ public void flattenOneSegment(long requesterVersion, MemStoreCompactionStrategy } // setter is used only for testability - @VisibleForTesting void setIndexType(IndexType type) { indexType = type; // Because this functionality is for testing only and tests are setting in-memory flush size @@ -413,7 +408,6 @@ public List getScanners(long readPt) throws IOException { return list; } - @VisibleForTesting protected List createList(int capacity) { return new ArrayList<>(capacity); } @@ -451,7 +445,6 @@ private boolean checkAndAddToActiveSize(MutableSegment currActive, Cell cellToAd // externally visible only for tests // when invoked directly from tests it must be verified that the caller doesn't hold updatesLock, // otherwise there is a deadlock - @VisibleForTesting void flushInMemory() { MutableSegment currActive = getActive(); if(currActive.setInMemoryFlushed()) { @@ -499,7 +492,6 @@ private ThreadPoolExecutor getPool() { return getRegionServices().getInMemoryCompactionPool(); } - @VisibleForTesting protected boolean shouldFlushInMemory(MutableSegment currActive, Cell cellToAdd, MemStoreSizing memstoreSizing) { long cellSize = MutableSegment.getCellLength(cellToAdd); @@ -596,7 +588,6 @@ public void run() { } } - @VisibleForTesting boolean isMemStoreFlushingInMemory() { return inMemoryCompactionInProgress.get(); } @@ -619,7 +610,6 @@ Cell getNextRow(final Cell cell) { return lowest; } - @VisibleForTesting long getInmemoryFlushSize() { return inmemoryFlushSize; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.java index 19647faa9de1..53ef82d56950 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.java @@ -28,7 +28,6 @@ import org.apache.hadoop.hbase.io.TimeRange; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; /** * The CompositeImmutableSegments is created as a collection of ImmutableSegments and supports @@ -52,7 +51,6 @@ public CompositeImmutableSegment(CellComparator comparator, List getAllSegments() { return new ArrayList<>(segments); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ConstantSizeRegionSplitPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ConstantSizeRegionSplitPolicy.java index 5ffd96021fe9..1f22dc4d2b2e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ConstantSizeRegionSplitPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ConstantSizeRegionSplitPolicy.java @@ -17,8 +17,6 @@ */ package org.apache.hadoop.hbase.regionserver; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; - import java.util.Random; import org.apache.hadoop.conf.Configuration; @@ -86,7 +84,7 @@ long getDesiredMaxFileSize() { return desiredMaxFileSize; } - @VisibleForTesting + @InterfaceAudience.Private public boolean positiveJitterRate() { return this.jitterRate > 0; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FifoRpcSchedulerFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FifoRpcSchedulerFactory.java index c77de648f4e9..12896a2d54ee 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FifoRpcSchedulerFactory.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FifoRpcSchedulerFactory.java @@ -38,10 +38,4 @@ public RpcScheduler create(Configuration conf, PriorityFunction priority, Aborta HConstants.DEFAULT_REGION_SERVER_HANDLER_COUNT); return new FifoRpcScheduler(conf, handlerCount); } - - @Deprecated - @Override - public RpcScheduler create(Configuration conf, PriorityFunction priority) { - return create(conf, priority, null); - } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java index 5960b8030900..7ce7f0310c7d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java @@ -93,7 +93,6 @@ public class HMobStore extends HStore { private AtomicLong mobFlushedCellsSize = new AtomicLong(); private AtomicLong mobScanCellsCount = new AtomicLong(); private AtomicLong mobScanCellsSize = new AtomicLong(); - private ColumnFamilyDescriptor family; private Map> map = new ConcurrentHashMap<>(); private final IdLock keyLock = new IdLock(); // When we add a MOB reference cell to the HFile, we will add 2 tags along with it @@ -107,16 +106,15 @@ public class HMobStore extends HStore { public HMobStore(final HRegion region, final ColumnFamilyDescriptor family, final Configuration confParam, boolean warmup) throws IOException { super(region, family, confParam, warmup); - this.family = family; this.mobFileCache = region.getMobFileCache(); this.homePath = MobUtils.getMobHome(conf); this.mobFamilyPath = MobUtils.getMobFamilyPath(conf, this.getTableName(), - family.getNameAsString()); + getColumnFamilyName()); List locations = new ArrayList<>(2); locations.add(mobFamilyPath); TableName tn = region.getTableDescriptor().getTableName(); locations.add(HFileArchiveUtil.getStoreArchivePath(conf, tn, MobUtils.getMobRegionInfo(tn) - .getEncodedName(), family.getNameAsString())); + .getEncodedName(), getColumnFamilyName())); map.put(tn, locations); List tags = new ArrayList<>(2); tags.add(MobConstants.MOB_REF_TAG); @@ -209,7 +207,7 @@ public StoreFileWriter createWriterInTmp(String date, Path basePath, long maxKey Compression.Algorithm compression, byte[] startKey, boolean isCompaction) throws IOException { MobFileName mobFileName = MobFileName.create(startKey, date, UUID.randomUUID() - .toString().replaceAll("-", ""), region.getRegionInfo().getEncodedName()); + .toString().replaceAll("-", ""), getHRegion().getRegionInfo().getEncodedName()); return createWriterInTmp(mobFileName, basePath, maxKeyCount, compression, isCompaction); } @@ -226,9 +224,11 @@ public StoreFileWriter createWriterInTmp(String date, Path basePath, long maxKey public StoreFileWriter createWriterInTmp(MobFileName mobFileName, Path basePath, long maxKeyCount, Compression.Algorithm compression, boolean isCompaction) throws IOException { - return MobUtils.createWriter(conf, region.getFilesystem(), family, - new Path(basePath, mobFileName.getFileName()), maxKeyCount, compression, cacheConf, - cryptoContext, checksumType, bytesPerChecksum, blocksize, BloomType.NONE, isCompaction); + return MobUtils.createWriter(conf, getFileSystem(), getColumnFamilyDescriptor(), + new Path(basePath, mobFileName.getFileName()), maxKeyCount, compression, getCacheConfig(), + getStoreContext().getEncryptionContext(), StoreUtils.getChecksumType(conf), + StoreUtils.getBytesPerChecksum(conf), getStoreContext().getBlockSize(), BloomType.NONE, + isCompaction); } /** @@ -245,10 +245,10 @@ public void commitFile(final Path sourceFile, Path targetPath) throws IOExceptio validateMobFile(sourceFile); LOG.info(" FLUSH Renaming flushed file from {} to {}", sourceFile, dstPath); Path parent = dstPath.getParent(); - if (!region.getFilesystem().exists(parent)) { - region.getFilesystem().mkdirs(parent); + if (!getFileSystem().exists(parent)) { + getFileSystem().mkdirs(parent); } - if (!region.getFilesystem().rename(sourceFile, dstPath)) { + if (!getFileSystem().rename(sourceFile, dstPath)) { throw new IOException("Failed rename of " + sourceFile + " to " + dstPath); } } @@ -261,7 +261,7 @@ public void commitFile(final Path sourceFile, Path targetPath) throws IOExceptio private void validateMobFile(Path path) throws IOException { HStoreFile storeFile = null; try { - storeFile = new HStoreFile(region.getFilesystem(), path, conf, this.cacheConf, + storeFile = new HStoreFile(getFileSystem(), path, conf, getCacheConfig(), BloomType.NONE, isPrimaryReplicaStore()); storeFile.initReader(); } catch (IOException e) { @@ -352,9 +352,11 @@ public List getLocations(TableName tableName) throws IOException { locations = map.get(tableName); if (locations == null) { locations = new ArrayList<>(2); - locations.add(MobUtils.getMobFamilyPath(conf, tableName, family.getNameAsString())); + locations.add(MobUtils.getMobFamilyPath(conf, tableName, getColumnFamilyDescriptor() + .getNameAsString())); locations.add(HFileArchiveUtil.getStoreArchivePath(conf, tableName, - MobUtils.getMobRegionInfo(tableName).getEncodedName(), family.getNameAsString())); + MobUtils.getMobRegionInfo(tableName).getEncodedName(), getColumnFamilyDescriptor() + .getNameAsString())); map.put(tableName, locations); } } finally { @@ -388,7 +390,7 @@ private MobCell readCell(List locations, String fileName, Cell search, MobFile file = null; Path path = new Path(location, fileName); try { - file = mobFileCache.openFile(fs, path, cacheConf); + file = mobFileCache.openFile(fs, path, getCacheConfig()); return readPt != -1 ? file.readCell(search, cacheMobBlocks, readPt) : file.readCell(search, cacheMobBlocks); } catch (IOException e) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index a09151564356..3b32f46ed044 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -30,7 +30,6 @@ import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; import java.text.ParseException; -import java.util.AbstractList; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; @@ -100,7 +99,6 @@ import org.apache.hadoop.hbase.RegionTooBusyException; import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.TagUtil; -import org.apache.hadoop.hbase.UnknownScannerException; import org.apache.hadoop.hbase.client.Append; import org.apache.hadoop.hbase.client.CheckAndMutate; import org.apache.hadoop.hbase.client.CheckAndMutateResult; @@ -112,7 +110,6 @@ import org.apache.hadoop.hbase.client.Increment; import org.apache.hadoop.hbase.client.IsolationLevel; import org.apache.hadoop.hbase.client.Mutation; -import org.apache.hadoop.hbase.client.PackagePrivateFieldAccessor; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionReplicaUtil; @@ -132,14 +129,11 @@ import org.apache.hadoop.hbase.filter.BinaryComparator; import org.apache.hadoop.hbase.filter.ByteArrayComparable; import org.apache.hadoop.hbase.filter.Filter; -import org.apache.hadoop.hbase.filter.FilterWrapper; -import org.apache.hadoop.hbase.filter.IncompatibleFilterException; import org.apache.hadoop.hbase.io.HFileLink; import org.apache.hadoop.hbase.io.HeapSize; import org.apache.hadoop.hbase.io.TimeRange; import org.apache.hadoop.hbase.io.hfile.BlockCache; import org.apache.hadoop.hbase.io.hfile.HFile; -import org.apache.hadoop.hbase.ipc.CallerDisconnectedException; import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils; import org.apache.hadoop.hbase.ipc.RpcCall; import org.apache.hadoop.hbase.ipc.RpcServer; @@ -148,8 +142,6 @@ import org.apache.hadoop.hbase.monitoring.TaskMonitor; import org.apache.hadoop.hbase.quotas.RegionServerSpaceQuotaManager; import org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl.WriteEntry; -import org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope; -import org.apache.hadoop.hbase.regionserver.ScannerContext.NextState; import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext; import org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker; import org.apache.hadoop.hbase.regionserver.compactions.ForbidMajorCompactionChecker; @@ -189,7 +181,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.com.google.common.collect.Iterables; import org.apache.hbase.thirdparty.com.google.common.collect.Lists; @@ -319,7 +310,6 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi // Track data size in all memstores private final MemStoreSizing memStoreSizing = new ThreadSafeMemStoreSizing(); - @VisibleForTesting RegionServicesForStores regionServicesForStores; // Debug possible data loss due to WAL off @@ -397,7 +387,7 @@ public void setRestoredRegion(boolean restoredRegion) { static final long DEFAULT_ROW_PROCESSOR_TIMEOUT = 60 * 1000L; final ExecutorService rowProcessorExecutor = Executors.newCachedThreadPool(); - private final ConcurrentHashMap scannerReadPoints; + final ConcurrentHashMap scannerReadPoints; /** * The sequence ID that was enLongAddered when this region was opened. @@ -581,7 +571,6 @@ public Result getResult() { } /** A result object from prepare flush cache stage */ - @VisibleForTesting static class PrepareFlushResult { final FlushResultImpl result; // indicating a failure result from prepare final TreeMap storeFlushCtxs; @@ -688,7 +677,7 @@ void sawNoSuchFamily() { // Last flush time for each Store. Useful when we are flushing for each column private final ConcurrentMap lastStoreFlushTimeMap = new ConcurrentHashMap<>(); - final RegionServerServices rsServices; + protected RegionServerServices rsServices; private RegionServerAccounting rsAccounting; private long flushCheckInterval; // flushPerChanges is to prevent too many changes in memstore @@ -696,6 +685,10 @@ void sawNoSuchFamily() { private long blockingMemStoreSize; // Used to guard closes final ReentrantReadWriteLock lock; + // Used to track interruptible holders of the region lock. Currently that is only RPC handler + // threads. Boolean value in map determines if lock holder can be interrupted, normally true, + // but may be false when thread is transiting a critical section. + final ConcurrentHashMap regionLockHolders; // Stop updates lock private final ReentrantReadWriteLock updatesLock = new ReentrantReadWriteLock(); @@ -742,7 +735,6 @@ void sawNoSuchFamily() { * @deprecated Use other constructors. */ @Deprecated - @VisibleForTesting public HRegion(final Path tableDir, final WAL wal, final FileSystem fs, final Configuration confParam, final RegionInfo regionInfo, final TableDescriptor htd, final RegionServerServices rsServices) { @@ -788,6 +780,7 @@ public HRegion(final HRegionFileSystem fs, final WAL wal, final Configuration co MetaCellComparator.META_COMPARATOR : CellComparatorImpl.COMPARATOR; this.lock = new ReentrantReadWriteLock(conf.getBoolean(FAIR_REENTRANT_CLOSE_LOCK, DEFAULT_FAIR_REENTRANT_CLOSE_LOCK)); + this.regionLockHolders = new ConcurrentHashMap<>(); this.flushCheckInterval = conf.getInt(MEMSTORE_PERIODIC_FLUSH_INTERVAL, DEFAULT_CACHE_FLUSH_INTERVAL); this.flushPerChanges = conf.getLong(MEMSTORE_FLUSH_PER_CHANGES, DEFAULT_FLUSH_PER_CHANGES); @@ -903,8 +896,8 @@ public HRegion(final HRegionFileSystem fs, final WAL wal, final Configuration co Pair retainedRWRequestsCnt = rsServices.getRegionServerAccounting() .getRetainedRegionRWRequestsCnt().get(getRegionInfo().getEncodedName()); if (retainedRWRequestsCnt != null) { - this.setReadRequestsCount(retainedRWRequestsCnt.getFirst()); - this.setWriteRequestsCount(retainedRWRequestsCnt.getSecond()); + this.addReadRequestsCount(retainedRWRequestsCnt.getFirst()); + this.addWriteRequestsCount(retainedRWRequestsCnt.getSecond()); // remove them since won't use again rsServices.getRegionServerAccounting().getRetainedRegionRWRequestsCnt() .remove(getRegionInfo().getEncodedName()); @@ -912,17 +905,19 @@ public HRegion(final HRegionFileSystem fs, final WAL wal, final Configuration co } } - void setHTableSpecificConf() { - if (this.htableDescriptor == null) return; + private void setHTableSpecificConf() { + if (this.htableDescriptor == null) { + return; + } long flushSize = this.htableDescriptor.getMemStoreFlushSize(); if (flushSize <= 0) { flushSize = conf.getLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, - TableDescriptorBuilder.DEFAULT_MEMSTORE_FLUSH_SIZE); + TableDescriptorBuilder.DEFAULT_MEMSTORE_FLUSH_SIZE); } this.memstoreFlushSize = flushSize; long mult = conf.getLong(HConstants.HREGION_MEMSTORE_BLOCK_MULTIPLIER, - HConstants.DEFAULT_HREGION_MEMSTORE_BLOCK_MULTIPLIER); + HConstants.DEFAULT_HREGION_MEMSTORE_BLOCK_MULTIPLIER); this.blockingMemStoreSize = this.memstoreFlushSize * mult; } @@ -946,7 +941,6 @@ public long initialize() throws IOException { * @return What the next sequence (edit) id should be. * @throws IOException e */ - @VisibleForTesting long initialize(final CancelableProgressable reporter) throws IOException { //Refuse to open the region if there is no column family in the table @@ -1174,7 +1168,7 @@ public HStore call() throws IOException { LOG.info("Setting FlushNonSloppyStoresFirstPolicy for the region=" + this); } } catch (InterruptedException e) { - throw (InterruptedIOException)new InterruptedIOException().initCause(e); + throw throwOnInterrupt(e); } catch (ExecutionException e) { throw new IOException(e.getCause()); } finally { @@ -1198,11 +1192,11 @@ public HStore call() throws IOException { private void initializeWarmup(final CancelableProgressable reporter) throws IOException { MonitoredTask status = TaskMonitor.get().createStatus("Initializing region " + this); // Initialize all the HStores - status.setStatus("Warming up all the Stores"); + status.setStatus("Warmup all stores of " + this.getRegionInfo().getRegionNameAsString()); try { initializeStores(reporter, status, true); } finally { - status.markComplete("Done warming up."); + status.markComplete("Warmed up " + this.getRegionInfo().getRegionNameAsString()); } } @@ -1225,7 +1219,6 @@ private NavigableMap> getStoreFiles() { return allStoreFiles; } - @VisibleForTesting protected void writeRegionOpenMarker(WAL wal, long openSeqId) throws IOException { Map> storeFiles = getStoreFiles(); RegionEventDescriptor regionOpenDesc = ProtobufUtil.toRegionEventDescriptor( @@ -1337,7 +1330,7 @@ public static HDFSBlocksDistribution computeHDFSBlocksDistribution(Configuration * Increase the size of mem store in this region and the size of global mem * store */ - void incMemStoreSize(MemStoreSize mss) { + private void incMemStoreSize(MemStoreSize mss) { incMemStoreSize(mss.getDataSize(), mss.getHeapSize(), mss.getOffHeapSize(), mss.getCellsCount()); } @@ -1357,7 +1350,7 @@ void decrMemStoreSize(MemStoreSize mss) { mss.getCellsCount()); } - void decrMemStoreSize(long dataSizeDelta, long heapSizeDelta, long offHeapSizeDelta, + private void decrMemStoreSize(long dataSizeDelta, long heapSizeDelta, long offHeapSizeDelta, int cellsCountDelta) { if (this.rsAccounting != null) { rsAccounting.decGlobalMemStoreSize(dataSizeDelta, heapSizeDelta, offHeapSizeDelta); @@ -1509,7 +1502,6 @@ public boolean areWritesEnabled() { } } - @VisibleForTesting public MultiVersionConcurrencyControl getMVCC() { return mvcc; } @@ -1578,6 +1570,17 @@ public Map> close() throws IOException { */ public static final long MAX_FLUSH_PER_CHANGES = 1000000000; // 1G + public static final String CLOSE_WAIT_ABORT = "hbase.regionserver.close.wait.abort"; + public static final boolean DEFAULT_CLOSE_WAIT_ABORT = true; + public static final String CLOSE_WAIT_TIME = "hbase.regionserver.close.wait.time.ms"; + public static final long DEFAULT_CLOSE_WAIT_TIME = 60000; // 1 minute + public static final String CLOSE_WAIT_INTERVAL = "hbase.regionserver.close.wait.interval.ms"; + public static final long DEFAULT_CLOSE_WAIT_INTERVAL = 10000; // 10 seconds + + public Map> close(boolean abort) throws IOException { + return close(abort, false); + } + /** * Close down this HRegion. Flush the cache unless abort parameter is true, * Shut down each HStore, don't service any more calls. @@ -1586,6 +1589,7 @@ public Map> close() throws IOException { * time-sensitive thread. * * @param abort true if server is aborting (only during testing) + * @param ignoreStatus true if ignore the status (wont be showed on task list) * @return Vector of all the storage files that the HRegion's component * HStores make use of. It's a list of StoreFile objects. Can be null if * we are not to close at this time or we are already closed. @@ -1595,12 +1599,13 @@ public Map> close() throws IOException { * because a Snapshot was not properly persisted. The region is put in closing mode, and the * caller MUST abort after this. */ - public Map> close(boolean abort) throws IOException { + public Map> close(boolean abort, boolean ignoreStatus) + throws IOException { // Only allow one thread to close at a time. Serialize them so dual // threads attempting to close will run up against each other. MonitoredTask status = TaskMonitor.get().createStatus( "Closing region " + this.getRegionInfo().getEncodedName() + - (abort ? " due to abort" : "")); + (abort ? " due to abort" : ""), ignoreStatus); status.enableStatusJournal(true); status.setStatus("Waiting for close lock"); try { @@ -1619,7 +1624,6 @@ public Map> close(boolean abort) throws IOException { /** * Exposed for some very specific unit tests. */ - @VisibleForTesting public void setClosing(boolean closing) { this.closing.set(closing); } @@ -1629,7 +1633,6 @@ public void setClosing(boolean closing) { * Instead of blocking, the {@link HRegion#doClose} will throw exception if you set the timeout. * @param timeoutForWriteLock the second time to wait for the write lock in {@link HRegion#doClose} */ - @VisibleForTesting public void setTimeoutForWriteLock(long timeoutForWriteLock) { assert timeoutForWriteLock >= 0; this.timeoutForWriteLock = timeoutForWriteLock; @@ -1673,22 +1676,103 @@ private Map> doClose(boolean abort, MonitoredTask statu } } - if (timeoutForWriteLock == null - || timeoutForWriteLock == Long.MAX_VALUE) { - // block waiting for the lock for closing - lock.writeLock().lock(); // FindBugs: Complains UL_UNRELEASED_LOCK_EXCEPTION_PATH but seems fine - } else { - try { - boolean succeed = lock.writeLock().tryLock(timeoutForWriteLock, TimeUnit.SECONDS); - if (!succeed) { - throw new IOException("Failed to get write lock when closing region"); + // Set the closing flag + // From this point new arrivals at the region lock will get NSRE. + + this.closing.set(true); + LOG.info("Closing region {}", this); + + // Acquire the close lock + + // The configuration parameter CLOSE_WAIT_ABORT is overloaded to enable both + // the new regionserver abort condition and interrupts for running requests. + // If CLOSE_WAIT_ABORT is not enabled there is no change from earlier behavior, + // we will not attempt to interrupt threads servicing requests nor crash out + // the regionserver if something remains stubborn. + + final boolean canAbort = conf.getBoolean(CLOSE_WAIT_ABORT, DEFAULT_CLOSE_WAIT_ABORT); + boolean useTimedWait = false; + if (timeoutForWriteLock != null && timeoutForWriteLock != Long.MAX_VALUE) { + // convert legacy use of timeoutForWriteLock in seconds to new use in millis + timeoutForWriteLock = TimeUnit.SECONDS.toMillis(timeoutForWriteLock); + useTimedWait = true; + } else if (canAbort) { + timeoutForWriteLock = conf.getLong(CLOSE_WAIT_TIME, DEFAULT_CLOSE_WAIT_TIME); + useTimedWait = true; + } + if (LOG.isDebugEnabled()) { + LOG.debug((useTimedWait ? "Time limited wait" : "Waiting without time limit") + + " for close lock on " + this); + } + final long closeWaitInterval = conf.getLong(CLOSE_WAIT_INTERVAL, DEFAULT_CLOSE_WAIT_INTERVAL); + long elapsedWaitTime = 0; + if (useTimedWait) { + // Sanity check configuration + long remainingWaitTime = timeoutForWriteLock; + if (remainingWaitTime < closeWaitInterval) { + LOG.warn("Time limit for close wait of " + timeoutForWriteLock + + " ms is less than the configured lock acquisition wait interval " + + closeWaitInterval + " ms, using wait interval as time limit"); + remainingWaitTime = closeWaitInterval; + } + boolean acquired = false; + do { + long start = EnvironmentEdgeManager.currentTime(); + try { + acquired = lock.writeLock().tryLock(Math.min(remainingWaitTime, closeWaitInterval), + TimeUnit.MILLISECONDS); + } catch (InterruptedException e) { + // Interrupted waiting for close lock. More likely the server is shutting down, not + // normal operation, so aborting upon interrupt while waiting on this lock would not + // provide much value. Throw an IOE (as IIOE) like we would in the case where we + // fail to acquire the lock. + String msg = "Interrupted while waiting for close lock on " + this; + LOG.warn(msg, e); + throw (InterruptedIOException) new InterruptedIOException(msg).initCause(e); + } + long elapsed = EnvironmentEdgeManager.currentTime() - start; + elapsedWaitTime += elapsed; + remainingWaitTime -= elapsed; + if (canAbort && !acquired && remainingWaitTime > 0) { + // Before we loop to wait again, interrupt all region operations that might + // still be in progress, to encourage them to break out of waiting states or + // inner loops, throw an exception to clients, and release the read lock via + // endRegionOperation. + if (LOG.isDebugEnabled()) { + LOG.debug("Interrupting region operations after waiting for close lock for " + + elapsedWaitTime + " ms on " + this + ", " + remainingWaitTime + + " ms remaining"); + } + interruptRegionOperations(); } - } catch (InterruptedException e) { - throw (InterruptedIOException) new InterruptedIOException().initCause(e); + } while (!acquired && remainingWaitTime > 0); + + // If we fail to acquire the lock, trigger an abort if we can; otherwise throw an IOE + // to let the caller know we could not proceed with the close. + if (!acquired) { + String msg = "Failed to acquire close lock on " + this + " after waiting " + + elapsedWaitTime + " ms"; + LOG.error(msg); + if (canAbort) { + // If we failed to acquire the write lock, abort the server + rsServices.abort(msg, null); + } + throw new IOException(msg); } + + } else { + + long start = EnvironmentEdgeManager.currentTime(); + lock.writeLock().lock(); + elapsedWaitTime = EnvironmentEdgeManager.currentTime() - start; + } - this.closing.set(true); - LOG.info("Closing region {}", this); + + if (LOG.isDebugEnabled()) { + LOG.debug("Acquired close lock on " + this + " after waiting " + + elapsedWaitTime + " ms"); + } + status.setStatus("Disabling writes for close"); try { if (this.isClosed()) { @@ -1776,7 +1860,7 @@ public Pair> call() throws IOException { familyFiles.addAll(storeFiles.getSecond()); } } catch (InterruptedException e) { - throw (InterruptedIOException)new InterruptedIOException().initCause(e); + throw throwOnInterrupt(e); } catch (ExecutionException e) { Throwable cause = e.getCause(); if (cause instanceof IOException) { @@ -1897,7 +1981,7 @@ public boolean waitForFlushes(long timeout) { } } - protected ThreadPoolExecutor getStoreOpenAndCloseThreadPool( + private ThreadPoolExecutor getStoreOpenAndCloseThreadPool( final String threadNamePrefix) { int numStores = Math.max(1, this.htableDescriptor.getColumnFamilyCount()); int maxThreads = Math.min(numStores, @@ -1906,7 +1990,7 @@ protected ThreadPoolExecutor getStoreOpenAndCloseThreadPool( return getOpenAndCloseThreadPool(maxThreads, threadNamePrefix); } - protected ThreadPoolExecutor getStoreFileOpenAndCloseThreadPool( + ThreadPoolExecutor getStoreFileOpenAndCloseThreadPool( final String threadNamePrefix) { int numStores = Math.max(1, this.htableDescriptor.getColumnFamilyCount()); int maxThreads = Math.max(1, @@ -1916,7 +2000,7 @@ protected ThreadPoolExecutor getStoreFileOpenAndCloseThreadPool( return getOpenAndCloseThreadPool(maxThreads, threadNamePrefix); } - static ThreadPoolExecutor getOpenAndCloseThreadPool(int maxThreads, + private static ThreadPoolExecutor getOpenAndCloseThreadPool(int maxThreads, final String threadNamePrefix) { return Threads.getBoundedCachedThreadPool(maxThreads, 30L, TimeUnit.SECONDS, new ThreadFactory() { @@ -1946,7 +2030,6 @@ public TableDescriptor getTableDescriptor() { return this.htableDescriptor; } - @VisibleForTesting public void setTableDescriptor(TableDescriptor desc) { htableDescriptor = desc; } @@ -1963,7 +2046,6 @@ public BlockCache getBlockCache() { /** * Only used for unit test which doesn't start region server. */ - @VisibleForTesting public void setBlockCache(BlockCache blockCache) { this.blockCache = blockCache; } @@ -1975,7 +2057,6 @@ public MobFileCache getMobFileCache() { /** * Only used for unit test which doesn't start region server. */ - @VisibleForTesting public void setMobFileCache(MobFileCache mobFileCache) { this.mobFileCache = mobFileCache; } @@ -1983,7 +2064,6 @@ public void setMobFileCache(MobFileCache mobFileCache) { /** * @return split policy for this region. */ - @VisibleForTesting RegionSplitPolicy getSplitPolicy() { return this.splitPolicy; } @@ -2027,7 +2107,6 @@ FileSystem getWalFileSystem() throws IOException { * @return the Region directory under WALRootDirectory * @throws IOException if there is an error getting WALRootDir */ - @VisibleForTesting public Path getWALRegionDir() throws IOException { if (regionDir == null) { regionDir = CommonFSUtils.getWALRegionDir(conf, getRegionInfo().getTable(), @@ -2136,7 +2215,6 @@ public void compact(boolean majorCompaction) throws IOException { *

    * It is used by utilities and testing */ - @VisibleForTesting public void compactStores() throws IOException { for (HStore s : stores.values()) { Optional compaction = s.requestCompaction(); @@ -2151,7 +2229,6 @@ public void compactStores() throws IOException { *

    * It is used by utilities and testing */ - @VisibleForTesting void compactStore(byte[] family, ThroughputController throughputController) throws IOException { HStore s = getStore(family); Optional compaction = s.requestCompaction(); @@ -2392,11 +2469,11 @@ enum Result { boolean isCompactionNeeded(); } - public FlushResultImpl flushcache(boolean flushAllStores, boolean writeFlushRequestWalMarker, + FlushResultImpl flushcache(boolean flushAllStores, boolean writeFlushRequestWalMarker, FlushLifeCycleTracker tracker) throws IOException { - List families = null; + List families = null; if (flushAllStores) { - families = new ArrayList(); + families = new ArrayList<>(); families.addAll(this.getTableDescriptor().getColumnFamilyNames()); } return this.flushcache(families, writeFlushRequestWalMarker, tracker); @@ -2438,11 +2515,13 @@ public FlushResultImpl flushcache(List families, status.setStatus("Acquiring readlock on region"); // block waiting for the lock for flushing cache lock.readLock().lock(); + boolean flushed = true; try { if (this.closed.get()) { String msg = "Skipping flush on " + this + " because closed"; LOG.debug(msg); status.abort(msg); + flushed = false; return new FlushResultImpl(FlushResult.Result.CANNOT_FLUSH, msg, false); } if (coprocessorHost != null) { @@ -2459,15 +2538,11 @@ public FlushResultImpl flushcache(List families, if (!writestate.flushing && writestate.writesEnabled) { this.writestate.flushing = true; } else { - if (LOG.isDebugEnabled()) { - LOG.debug("NOT flushing memstore for region " + this - + ", flushing=" + writestate.flushing + ", writesEnabled=" - + writestate.writesEnabled); - } - String msg = "Not flushing since " - + (writestate.flushing ? "already flushing" - : "writes not enabled"); + String msg = "NOT flushing " + this + " as " + (writestate.flushing ? "already flushing" + : "writes are not enabled"); + LOG.debug(msg); status.abort(msg); + flushed = false; return new FlushResultImpl(FlushResult.Result.CANNOT_FLUSH, msg, false); } } @@ -2505,8 +2580,11 @@ public FlushResultImpl flushcache(List families, } } finally { lock.readLock().unlock(); - LOG.debug("Flush status journal for {}:\n{}", this.getRegionInfo().getEncodedName(), - status.prettyPrintJournal()); + if (flushed) { + // Don't log this journal stuff if no flush -- confusing. + LOG.debug("Flush status journal for {}:\n{}", this.getRegionInfo().getEncodedName(), + status.prettyPrintJournal()); + } status.cleanup(); } } @@ -2876,7 +2954,7 @@ private boolean writeFlushRequestMarkerToWAL(WAL wal, boolean writeFlushWalMarke @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="NN_NAKED_NOTIFY", justification="Intentional; notify is about completed flush") - protected FlushResultImpl internalFlushCacheAndCommit(WAL wal, MonitoredTask status, + FlushResultImpl internalFlushCacheAndCommit(WAL wal, MonitoredTask status, PrepareFlushResult prepareResult, Collection storesToFlush) throws IOException { // prepare flush context is carried via PrepareFlushResult TreeMap storeFlushCtxs = prepareResult.storeFlushCtxs; @@ -3031,7 +3109,6 @@ protected FlushResultImpl internalFlushCacheAndCommit(WAL wal, MonitoredTask sta * @return Next sequence number unassociated with any actual edit. * @throws IOException */ - @VisibleForTesting protected long getNextSequenceId(final WAL wal) throws IOException { WriteEntry we = mvcc.begin(); mvcc.completeAndWait(we); @@ -3074,19 +3151,13 @@ private RegionScannerImpl getScanner(Scan scan, List additional } } - protected RegionScanner instantiateRegionScanner(Scan scan, - List additionalScanners) throws IOException { - return instantiateRegionScanner(scan, additionalScanners, HConstants.NO_NONCE, - HConstants.NO_NONCE); - } - protected RegionScannerImpl instantiateRegionScanner(Scan scan, - List additionalScanners, long nonceGroup, long nonce) throws IOException { + List additionalScanners, long nonceGroup, long nonce) throws IOException { if (scan.isReversed()) { if (scan.getFilter() != null) { scan.getFilter().setReversed(true); } - return new ReversedRegionScannerImpl(scan, additionalScanners, this); + return new ReversedRegionScannerImpl(scan, additionalScanners, this, nonceGroup, nonce); } return new RegionScannerImpl(scan, additionalScanners, this, nonceGroup, nonce); } @@ -3094,9 +3165,8 @@ protected RegionScannerImpl instantiateRegionScanner(Scan scan, /** * Prepare a delete for a row mutation processor * @param delete The passed delete is modified by this method. WARNING! - * @throws IOException */ - public void prepareDelete(Delete delete) throws IOException { + private void prepareDelete(Delete delete) throws IOException { // Check to see if this is a deleteRow insert if(delete.getFamilyCellMap().isEmpty()){ for(byte [] family : this.htableDescriptor.getColumnFamilyNames()){ @@ -3120,38 +3190,18 @@ public void delete(Delete delete) throws IOException { startRegionOperation(Operation.DELETE); try { // All edits for the given row (across all column families) must happen atomically. - doBatchMutate(delete); + mutate(delete); } finally { closeRegionOperation(Operation.DELETE); } } - /** - * Row needed by below method. - */ - private static final byte [] FOR_UNIT_TESTS_ONLY = Bytes.toBytes("ForUnitTestsOnly"); - - /** - * This is used only by unit tests. Not required to be a public API. - * @param familyMap map of family to edits for the given family. - * @throws IOException - */ - void delete(NavigableMap> familyMap, - Durability durability) throws IOException { - Delete delete = new Delete(FOR_UNIT_TESTS_ONLY, HConstants.LATEST_TIMESTAMP, familyMap); - delete.setDurability(durability); - doBatchMutate(delete); - } - /** * Set up correct timestamps in the KVs in Delete object. - *

    Caller should have the row and region locks. - * @param mutation - * @param familyMap - * @param byteNow - * @throws IOException + *

    + * Caller should have the row and region locks. */ - public void prepareDeleteTimestamps(Mutation mutation, Map> familyMap, + private void prepareDeleteTimestamps(Mutation mutation, Map> familyMap, byte[] byteNow) throws IOException { for (Map.Entry> e : familyMap.entrySet()) { @@ -3195,7 +3245,7 @@ public void prepareDeleteTimestamps(Mutation mutation, Map> f } } - void updateDeleteLatestVersionTimestamp(Cell cell, Get get, int count, byte[] byteNow) + private void updateDeleteLatestVersionTimestamp(Cell cell, Get get, int count, byte[] byteNow) throws IOException { List result = get(get, false); @@ -3223,7 +3273,7 @@ public void put(Put put) throws IOException { startRegionOperation(Operation.PUT); try { // All edits for the given row (across all column families) must happen atomically. - doBatchMutate(put); + mutate(put); } finally { closeRegionOperation(Operation.PUT); } @@ -3270,7 +3320,7 @@ public BatchOperation(final HRegion region, T[] operations) { * Visitor interface for batch operations */ @FunctionalInterface - public interface Visitor { + interface Visitor { /** * @param index operation index * @return If true continue visiting remaining entries, break otherwise @@ -3408,8 +3458,11 @@ protected void checkAndPrepareMutation(int index, long timestamp) throws IOExcep try { this.checkAndPrepareMutation(mutation, timestamp); - // store the family map reference to allow for mutations - familyCellMaps[index] = mutation.getFamilyCellMap(); + if (mutation instanceof Put || mutation instanceof Delete) { + // store the family map reference to allow for mutations + familyCellMaps[index] = mutation.getFamilyCellMap(); + } + // store durability for the batch (highest durability of all operations in the batch) Durability tmpDur = region.getEffectiveDurability(mutation.getDurability()); if (tmpDur.ordinal() > durability.ordinal()) { @@ -3673,14 +3726,17 @@ protected void applyFamilyMapToMemStore(Map> familyMap, /** - * Batch of mutation operations. Base class is shared with {@link ReplayBatchOperation} as most - * of the logic is same. + * Batch of mutation operations. Base class is shared with {@link ReplayBatchOperation} as most of + * the logic is same. */ - static class MutationBatchOperation extends BatchOperation { + private static class MutationBatchOperation extends BatchOperation { + private long nonceGroup; + private long nonce; + public MutationBatchOperation(final HRegion region, Mutation[] operations, boolean atomic, - long nonceGroup, long nonce) { + long nonceGroup, long nonce) { super(region, operations); this.atomic = atomic; this.nonceGroup = nonceGroup; @@ -3800,33 +3856,51 @@ public void prepareMiniBatchOperations(MiniBatchOperationInProgress mi Bytes.toBytes(timestamp)); miniBatchOp.incrementNumOfDeletes(); } else if (mutation instanceof Increment || mutation instanceof Append) { + boolean returnResults; + if (mutation instanceof Increment) { + returnResults = ((Increment) mutation).isReturnResults(); + } else { + returnResults = ((Append) mutation).isReturnResults(); + } + // For nonce operations canProceed[index] = startNonceOperation(nonceGroup, nonce); if (!canProceed[index]) { - // convert duplicate increment/append to get - List results = region.get(toGet(mutation), false, nonceGroup, nonce); - retCodeDetails[index] = new OperationStatus(OperationStatusCode.SUCCESS, - Result.create(results)); + Result result; + if (returnResults) { + // convert duplicate increment/append to get + List results = region.get(toGet(mutation), false, nonceGroup, nonce); + result = Result.create(results); + } else { + result = Result.EMPTY_RESULT; + } + retCodeDetails[index] = new OperationStatus(OperationStatusCode.SUCCESS, result); return true; } - boolean returnResults; - if (mutation instanceof Increment) { - returnResults = ((Increment) mutation).isReturnResults(); - miniBatchOp.incrementNumOfIncrements(); - } else { - returnResults = ((Append) mutation).isReturnResults(); - miniBatchOp.incrementNumOfAppends(); + Result result = null; + if (region.coprocessorHost != null) { + if (mutation instanceof Increment) { + result = region.coprocessorHost.preIncrementAfterRowLock((Increment) mutation); + } else { + result = region.coprocessorHost.preAppendAfterRowLock((Append) mutation); + } } - Result result = doCoprocessorPreCallAfterRowLock(mutation); if (result != null) { retCodeDetails[index] = new OperationStatus(OperationStatusCode.SUCCESS, returnResults ? result : Result.EMPTY_RESULT); return true; } + List results = returnResults ? new ArrayList<>(mutation.size()) : null; familyCellMaps[index] = reckonDeltas(mutation, results, timestamp); - this.results[index] = results != null ? Result.create(results): Result.EMPTY_RESULT; + this.results[index] = results != null ? Result.create(results) : Result.EMPTY_RESULT; + + if (mutation instanceof Increment) { + miniBatchOp.incrementNumOfIncrements(); + } else { + miniBatchOp.incrementNumOfAppends(); + } } region.rewriteCellTags(familyCellMaps[index], mutation); @@ -3908,28 +3982,10 @@ private static Get toGet(final Mutation mutation) throws IOException { return get; } - /** - * Do coprocessor pre-increment or pre-append after row lock call. - * @return Result returned out of the coprocessor, which means bypass all further processing - * and return the preferred Result instead, or null which means proceed. - */ - private Result doCoprocessorPreCallAfterRowLock(Mutation mutation) throws IOException { - assert mutation instanceof Increment || mutation instanceof Append; - Result result = null; - if (region.coprocessorHost != null) { - if (mutation instanceof Increment) { - result = region.coprocessorHost.preIncrementAfterRowLock((Increment) mutation); - } else { - result = region.coprocessorHost.preAppendAfterRowLock((Append) mutation); - } - } - return result; - } - private Map> reckonDeltas(Mutation mutation, List results, long now) throws IOException { assert mutation instanceof Increment || mutation instanceof Append; - Map> ret = new HashMap<>(); + Map> ret = new TreeMap<>(Bytes.BYTES_COMPARATOR); // Process a Store/family at a time. for (Map.Entry> entry: mutation.getFamilyCellMap().entrySet()) { final byte[] columnFamilyName = entry.getKey(); @@ -3972,14 +4028,28 @@ private List reckonDeltasByStore(HStore store, Mutation mutation, long now byte[] columnFamily = store.getColumnFamilyDescriptor().getName(); List> cellPairs = new ArrayList<>(deltas.size()); + // Sort the cells so that they match the order that they appear in the Get results. + // Otherwise, we won't be able to find the existing values if the cells are not specified + // in order by the client since cells are in an array list. + deltas.sort(store.getComparator()); + // Get previous values for all columns in this family. + Get get = new Get(mutation.getRow()); + for (Cell cell: deltas) { + get.addColumn(columnFamily, CellUtil.cloneQualifier(cell)); + } TimeRange tr; if (mutation instanceof Increment) { tr = ((Increment) mutation).getTimeRange(); } else { tr = ((Append) mutation).getTimeRange(); } - List currentValues = get(mutation, store, deltas, tr); + + if (tr != null) { + get.setTimeRange(tr.getMin(), tr.getMax()); + } + + List currentValues = region.get(get, false); // Iterate the input columns and update existing values if they were found, otherwise // add new column initialized to the delta amount @@ -4073,31 +4143,6 @@ private static long getLongValue(final Cell cell) throws DoNotRetryIOException { return PrivateCellUtil.getValueAsLong(cell); } - /** - * Do a specific Get on passed columnFamily and column qualifiers. - * @param mutation Mutation we are doing this Get for. - * @param store Which column family on row (TODO: Go all Gets in one go) - * @param coordinates Cells from mutation used as coordinates applied to Get. - * @return Return list of Cells found. - */ - private List get(Mutation mutation, HStore store, List coordinates, - TimeRange tr) throws IOException { - // Sort the cells so that they match the order that they appear in the Get results. - // Otherwise, we won't be able to find the existing values if the cells are not specified - // in order by the client since cells are in an array list. - // TODO: I don't get why we are sorting. St.Ack 20150107 - sort(coordinates, store.getComparator()); - Get get = new Get(mutation.getRow()); - for (Cell cell: coordinates) { - get.addColumn(store.getColumnFamilyDescriptor().getName(), CellUtil.cloneQualifier(cell)); - } - // Increments carry time range. If an Increment instance, put it on the Get. - if (tr != null) { - get.setTimeRange(tr.getMin(), tr.getMax()); - } - return region.get(get, false); - } - @Override public List> buildWALEdits(final MiniBatchOperationInProgress miniBatchOp) throws IOException { @@ -4272,6 +4317,7 @@ private void callPreMutateCPHook(int index, final WALEdit walEdit, final int[] m } } + // TODO Support Increment/Append operations private void checkAndMergeCPMutations(final MiniBatchOperationInProgress miniBatchOp, final List acquiredRowLocks, final long timestamp) throws IOException { visitBatchOperations(true, nextIndexToProcess + miniBatchOp.size(), (int i) -> { @@ -4325,10 +4371,12 @@ private void mergeFamilyMaps(Map> familyMap, * Batch of mutations for replay. Base class is shared with {@link MutationBatchOperation} as most * of the logic is same. */ - static class ReplayBatchOperation extends BatchOperation { + private static final class ReplayBatchOperation extends BatchOperation { + private long origLogSeqNum = 0; + public ReplayBatchOperation(final HRegion region, MutationReplay[] operations, - long origLogSeqNum) { + long origLogSeqNum) { super(region, operations); this.origLogSeqNum = origLogSeqNum; } @@ -4436,12 +4484,12 @@ public void completeMiniBatchOperations( } } - public OperationStatus[] batchMutate(Mutation[] mutations, boolean atomic, long nonceGroup, - long nonce) throws IOException { + private OperationStatus[] batchMutate(Mutation[] mutations, boolean atomic, long nonceGroup, + long nonce) throws IOException { // As it stands, this is used for 3 things - // * batchMutate with single mutation - put/delete/increment/append, separate or from - // checkAndMutate. - // * coprocessor calls (see ex. BulkDeleteEndpoint). + // * batchMutate with single mutation - put/delete/increment/append, separate or from + // checkAndMutate. + // * coprocessor calls (see ex. BulkDeleteEndpoint). // So nonces are not really ever used by HBase. They could be by coprocs, and checkAnd... return batchMutate(new MutationBatchOperation(this, mutations, atomic, nonceGroup, nonce)); } @@ -4449,8 +4497,12 @@ public OperationStatus[] batchMutate(Mutation[] mutations, boolean atomic, long @Override public OperationStatus[] batchMutate(Mutation[] mutations) throws IOException { // If the mutations has any Increment/Append operations, we need to do batchMutate atomically - boolean atomic = Arrays.stream(mutations) - .anyMatch(m -> m instanceof Increment || m instanceof Append); + boolean atomic = + Arrays.stream(mutations).anyMatch(m -> m instanceof Increment || m instanceof Append); + return batchMutate(mutations, atomic); + } + + OperationStatus[] batchMutate(Mutation[] mutations, boolean atomic) throws IOException { return batchMutate(mutations, atomic, HConstants.NO_NONCE, HConstants.NO_NONCE); } @@ -4480,25 +4532,23 @@ public OperationStatus[] batchReplay(MutationReplay[] mutations, long replaySeqI /** * Perform a batch of mutations. - * - * It supports Put, Delete, Increment, Append mutations and will ignore other types passed. + *

    * Operations in a batch are stored with highest durability specified of for all operations in a * batch, except for {@link Durability#SKIP_WAL}. - * - *

    This function is called from {@link #batchReplay(WALSplitUtil.MutationReplay[], long)} with + *

    + * This function is called from {@link #batchReplay(WALSplitUtil.MutationReplay[], long)} with * {@link ReplayBatchOperation} instance and {@link #batchMutate(Mutation[])} with - * {@link MutationBatchOperation} instance as an argument. As the processing of replay batch - * and mutation batch is very similar, lot of code is shared by providing generic methods in - * base class {@link BatchOperation}. The logic for this method and - * {@link #doMiniBatchMutate(BatchOperation)} is implemented using methods in base class which - * are overridden by derived classes to implement special behavior. - * + * {@link MutationBatchOperation} instance as an argument. As the processing of replay batch and + * mutation batch is very similar, lot of code is shared by providing generic methods in base + * class {@link BatchOperation}. The logic for this method and + * {@link #doMiniBatchMutate(BatchOperation)} is implemented using methods in base class which are + * overridden by derived classes to implement special behavior. * @param batchOp contains the list of mutations - * @return an array of OperationStatus which internally contains the - * OperationStatusCode and the exceptionMessage if any. + * @return an array of OperationStatus which internally contains the OperationStatusCode and the + * exceptionMessage if any. * @throws IOException if an IO problem is encountered */ - OperationStatus[] batchMutate(BatchOperation batchOp) throws IOException { + private OperationStatus[] batchMutate(BatchOperation batchOp) throws IOException { boolean initialized = false; batchOp.startRegionOperation(); try { @@ -4542,6 +4592,11 @@ private void doMiniBatchMutate(BatchOperation batchOp) throws IOException { MiniBatchOperationInProgress miniBatchOp = null; /** Keep track of the locks we hold so we can release them in finally clause */ List acquiredRowLocks = Lists.newArrayListWithCapacity(batchOp.size()); + + // Check for thread interrupt status in case we have been signaled from + // #interruptRegionOperation. + checkInterrupt(); + try { // STEP 1. Try to acquire as many locks as we can and build mini-batch of operations with // locked rows @@ -4555,20 +4610,31 @@ private void doMiniBatchMutate(BatchOperation batchOp) throws IOException { return; } + // Check for thread interrupt status in case we have been signaled from + // #interruptRegionOperation. Do it before we take the lock and disable interrupts for + // the WAL append. + checkInterrupt(); + lock(this.updatesLock.readLock(), miniBatchOp.getReadyToWriteCount()); locked = true; + // From this point until memstore update this operation should not be interrupted. + disableInterrupts(); + // STEP 2. Update mini batch of all operations in progress with LATEST_TIMESTAMP timestamp // We should record the timestamp only after we have acquired the rowLock, // otherwise, newer puts/deletes/increment/append are not guaranteed to have a newer // timestamp + long now = EnvironmentEdgeManager.currentTime(); batchOp.prepareMiniBatchOperations(miniBatchOp, now, acquiredRowLocks); // STEP 3. Build WAL edit + List> walEdits = batchOp.buildWALEdits(miniBatchOp); // STEP 4. Append the WALEdits to WAL and sync. + for(Iterator> it = walEdits.iterator(); it.hasNext();) { Pair nonceKeyWALEditPair = it.next(); walEdit = nonceKeyWALEditPair.getSecond(); @@ -4604,6 +4670,8 @@ private void doMiniBatchMutate(BatchOperation batchOp) throws IOException { } releaseRowLocks(acquiredRowLocks); + enableInterrupts(); + final int finalLastIndexExclusive = miniBatchOp != null ? miniBatchOp.getLastIndexExclusive() : batchOp.size(); final boolean finalSuccess = success; @@ -4634,7 +4702,7 @@ private void doMiniBatchMutate(BatchOperation batchOp) throws IOException { * Returns effective durability from the passed durability and * the table descriptor. */ - protected Durability getEffectiveDurability(Durability d) { + private Durability getEffectiveDurability(Durability d) { return d == Durability.USE_DEFAULT ? this.regionDurability : d; } @@ -4821,11 +4889,11 @@ public CheckAndMutateResult checkAndMutate(CheckAndMutate checkAndMutate) throws // timestamp from get (see prepareDeleteTimestamps). } // All edits for the given row (across all column families) must happen atomically. - Result r = null; + Result r; if (mutation != null) { - r = doBatchMutate(mutation, true).getResult(); + r = mutate(mutation, true).getResult(); } else { - mutateRow(rowMutations); + r = mutateRow(rowMutations); } this.checkAndMutateChecksPassed.increment(); return new CheckAndMutateResult(true, r); @@ -4883,27 +4951,26 @@ private boolean matches(final CompareOperator op, final int compareResult) { return matches; } - private OperationStatus doBatchMutate(Mutation mutation) throws IOException { - return doBatchMutate(mutation, false); + private OperationStatus mutate(Mutation mutation) throws IOException { + return mutate(mutation, false); } - private OperationStatus doBatchMutate(Mutation mutation, boolean atomic) throws IOException { - return doBatchMutate(mutation, atomic, HConstants.NO_NONCE, HConstants.NO_NONCE); + private OperationStatus mutate(Mutation mutation, boolean atomic) throws IOException { + return mutate(mutation, atomic, HConstants.NO_NONCE, HConstants.NO_NONCE); } - private OperationStatus doBatchMutate(Mutation mutation, boolean atomic, long nonceGroup, - long nonce) throws IOException { - OperationStatus[] batchMutate = this.batchMutate(new Mutation[]{mutation}, atomic, - nonceGroup, nonce); - if (batchMutate[0].getOperationStatusCode().equals(OperationStatusCode.SANITY_CHECK_FAILURE)) { - throw new FailedSanityCheckException(batchMutate[0].getExceptionMsg()); - } else if (batchMutate[0].getOperationStatusCode().equals(OperationStatusCode.BAD_FAMILY)) { - throw new NoSuchColumnFamilyException(batchMutate[0].getExceptionMsg()); - } else if (batchMutate[0].getOperationStatusCode().equals( - OperationStatusCode.STORE_TOO_BUSY)) { - throw new RegionTooBusyException(batchMutate[0].getExceptionMsg()); + private OperationStatus mutate(Mutation mutation, boolean atomic, long nonceGroup, long nonce) + throws IOException { + OperationStatus[] status = + this.batchMutate(new Mutation[] { mutation }, atomic, nonceGroup, nonce); + if (status[0].getOperationStatusCode().equals(OperationStatusCode.SANITY_CHECK_FAILURE)) { + throw new FailedSanityCheckException(status[0].getExceptionMsg()); + } else if (status[0].getOperationStatusCode().equals(OperationStatusCode.BAD_FAMILY)) { + throw new NoSuchColumnFamilyException(status[0].getExceptionMsg()); + } else if (status[0].getOperationStatusCode().equals(OperationStatusCode.STORE_TOO_BUSY)) { + throw new RegionTooBusyException(status[0].getExceptionMsg()); } - return batchMutate[0]; + return status[0]; } /** @@ -4962,7 +5029,7 @@ private static void updateCellTimestamps(final Iterable> cellItr, fin /** * Possibly rewrite incoming cell tags. */ - void rewriteCellTags(Map> familyMap, final Mutation m) { + private void rewriteCellTags(Map> familyMap, final Mutation m) { // Check if we have any work to do and early out otherwise // Update these checks as more logic is added here if (m.getTTL() == Long.MAX_VALUE) { @@ -4984,15 +5051,17 @@ void rewriteCellTags(Map> familyMap, final Mutation m) { } } - /* + /** * Check if resources to support an update. - * - * We throw RegionTooBusyException if above memstore limit - * and expect client to retry using some kind of backoff - */ - void checkResources() throws RegionTooBusyException { + *

    + * We throw RegionTooBusyException if above memstore limit and expect client to retry using some + * kind of backoff + */ + private void checkResources() throws RegionTooBusyException { // If catalog region, do not impose resource constraints or block updates. - if (this.getRegionInfo().isMetaRegion()) return; + if (this.getRegionInfo().isMetaRegion()) { + return; + } MemStoreSize mss = this.memStoreSizing.getMemStoreSize(); if (mss.getHeapSize() + mss.getOffHeapSize() > this.blockingMemStoreSize) { @@ -5017,13 +5086,13 @@ void checkResources() throws RegionTooBusyException { /** * @throws IOException Throws exception if region is in read-only mode. */ - protected void checkReadOnly() throws IOException { + private void checkReadOnly() throws IOException { if (isReadOnly()) { throw new DoNotRetryIOException("region is read only"); } } - protected void checkReadsEnabled() throws IOException { + private void checkReadsEnabled() throws IOException { if (!this.writestate.readsEnabled) { throw new IOException(getRegionInfo().getEncodedName() + ": The region's reads are disabled. Cannot serve the request"); @@ -5032,26 +5101,11 @@ protected void checkReadsEnabled() throws IOException { public void setReadsEnabled(boolean readsEnabled) { if (readsEnabled && !this.writestate.readsEnabled) { - LOG.info(getRegionInfo().getEncodedName() + " : Enabling reads for region."); + LOG.info("Enabling reads for {}", getRegionInfo().getEncodedName()); } this.writestate.setReadsEnabled(readsEnabled); } - /** - * Add updates first to the wal and then add values to memstore. - *

    - * Warning: Assumption is caller has lock on passed in row. - * @param edits Cell updates by column - */ - void put(final byte[] row, byte[] family, List edits) throws IOException { - NavigableMap> familyMap; - familyMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); - - familyMap.put(family, edits); - Put p = new Put(row, HConstants.LATEST_TIMESTAMP, familyMap); - doBatchMutate(p); - } - /** * @param delta If we are doing delta changes -- e.g. increment/append -- then this flag will be * set; when set we will run operations that make sense in the increment/append scenario @@ -5101,7 +5155,7 @@ private void checkFamily(final byte[] family, Durability durability) } } - void checkFamily(final byte[] family) throws NoSuchColumnFamilyException { + private void checkFamily(final byte[] family) throws NoSuchColumnFamilyException { if (!this.htableDescriptor.hasColumnFamily(family)) { throw new NoSuchColumnFamilyException( "Column family " + Bytes.toString(family) + " does not exist in region " + this @@ -5188,7 +5242,6 @@ private void deleteRecoveredEdits(FileSystem fs, Iterable files) throws IO * @return the sequence id of the last edit added to this region out of the * recovered edits log or minSeqId if nothing added from editlogs. */ - @VisibleForTesting long replayRecoveredEditsIfAny(Map maxSeqIdInStores, final CancelableProgressable reporter, final MonitoredTask status) throws IOException { long minSeqIdForTheRegion = -1; @@ -5254,6 +5307,11 @@ long replayRecoveredEditsIfAny(Map maxSeqIdInStores, recoveredEditsDir); if (files != null) { for (FileStatus file : files) { + // it is safe to trust the zero-length in this case because we've been through rename and + // lease recovery in the above. + if (isZeroLengthThenDelete(fs, file, file.getPath())) { + continue; + } seqId = Math.max(seqId, replayRecoveredEdits(file.getPath(), maxSeqIdInStores, reporter, fs)); } @@ -5649,7 +5707,6 @@ void replayWALFlushMarker(FlushDescriptor flush, long replaySeqId) throws IOExce * the store memstores, only if the memstores do not have a higher seqId from an earlier wal * edit (because the events may be coming out of order). */ - @VisibleForTesting PrepareFlushResult replayWALFlushStartMarker(FlushDescriptor flush) throws IOException { long flushSeqId = flush.getFlushSequenceNumber(); @@ -5760,7 +5817,6 @@ PrepareFlushResult replayWALFlushStartMarker(FlushDescriptor flush) throws IOExc return null; } - @VisibleForTesting @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="NN_NAKED_NOTIFY", justification="Intentional; post memstore flush") void replayWALFlushCommitMarker(FlushDescriptor flush) throws IOException { @@ -5960,7 +6016,7 @@ private long loadRecoveredHFilesIfAny(Collection stores) throws IOExcept * Currently, this method is used to drop memstore to prevent memory leak * when replaying recovered.edits while opening region. */ - public MemStoreSize dropMemStoreContents() throws IOException { + private MemStoreSize dropMemStoreContents() throws IOException { MemStoreSizing totalFreedSize = new NonThreadSafeMemStoreSizing(); this.updatesLock.writeLock().lock(); try { @@ -6048,7 +6104,6 @@ private void replayWALFlushCannotFlushMarker(FlushDescriptor flush, long replayS } } - @VisibleForTesting PrepareFlushResult getPrepareFlushResult() { return prepareFlushResult; } @@ -6412,12 +6467,13 @@ private void checkTargetRegion(byte[] encodedRegionName, String exceptionMsg, Ob * @param s Store to add edit too. * @param cell Cell to add. */ - @VisibleForTesting protected void restoreEdit(HStore s, Cell cell, MemStoreSizing memstoreAccounting) { s.add(cell, memstoreAccounting); } /** + * make sure have been through lease recovery before get file status, so the file length can be + * trusted. * @param p File to check. * @return True if file was zero-length (and if so, we'll delete it in here). * @throws IOException @@ -6581,13 +6637,12 @@ protected RowLock getRowLockInternal(byte[] row, boolean readLock, final RowLock success = true; return result; } catch (InterruptedException ie) { - LOG.warn("Thread interrupted waiting for lock on row: {}, in region {}", rowKey, - getRegionInfo().getRegionNameAsString()); - InterruptedIOException iie = new InterruptedIOException(); - iie.initCause(ie); + if (LOG.isDebugEnabled()) { + LOG.debug("Thread interrupted waiting for lock on row: {}, in region {}", rowKey, + getRegionInfo().getRegionNameAsString()); + } TraceUtil.addTimelineAnnotation("Interrupted exception getting row lock"); - Thread.currentThread().interrupt(); - throw iie; + throw throwOnInterrupt(ie); } catch (Error error) { // The maximum lock count for read lock is 64K (hardcoded), when this maximum count // is reached, it will throw out an Error. This Error needs to be caught so it can @@ -6614,7 +6669,6 @@ private void releaseRowLocks(List rowLocks) { } } - @VisibleForTesting public int getReadLockCount() { return lock.getReadLockCount(); } @@ -6623,7 +6677,6 @@ public ConcurrentHashMap getLockedRows() { return lockedRows; } - @VisibleForTesting class RowLockContext { private final HashedBytes row; final ReadWriteLock readWriteLock = new ReentrantReadWriteLock(true); @@ -6700,7 +6753,6 @@ public Lock getLock() { return lock; } - @VisibleForTesting public RowLockContext getContext() { return context; } @@ -6972,6 +7024,19 @@ public Map> bulkLoadHFiles(Collection> f } isSuccessful = true; + //request compaction + familyWithFinalPath.keySet().forEach(family -> { + HStore store = getStore(family); + try { + if (this.rsServices != null && store.needsCompaction()) { + this.rsServices.getCompactionRequestor().requestCompaction(this, store, + "bulkload hfiles request compaction", Store.PRIORITY_USER + 1, + CompactionLifeCycleTracker.DUMMY, null); + } + } catch (IOException e) { + LOG.error("bulkload hfiles request compaction error ", e); + } + }); } finally { if (wal != null && !storeFiles.isEmpty()) { // Write a bulk load event for hfiles that are loaded @@ -7013,703 +7078,21 @@ public String toString() { return getRegionInfo().getRegionNameAsString(); } - /** - * RegionScannerImpl is used to combine scanners from multiple Stores (aka column families). - */ - class RegionScannerImpl - implements RegionScanner, Shipper, org.apache.hadoop.hbase.ipc.RpcCallback { - // Package local for testability - KeyValueHeap storeHeap = null; - /** Heap of key-values that are not essential for the provided filters and are thus read - * on demand, if on-demand column family loading is enabled.*/ - KeyValueHeap joinedHeap = null; - /** - * If the joined heap data gathering is interrupted due to scan limits, this will - * contain the row for which we are populating the values.*/ - protected Cell joinedContinuationRow = null; - private boolean filterClosed = false; - - protected final byte[] stopRow; - protected final boolean includeStopRow; - protected final HRegion region; - protected final CellComparator comparator; - - private final long readPt; - private final long maxResultSize; - private final ScannerContext defaultScannerContext; - private final FilterWrapper filter; - - @Override - public RegionInfo getRegionInfo() { - return region.getRegionInfo(); - } - - RegionScannerImpl(Scan scan, List additionalScanners, HRegion region) - throws IOException { - this(scan, additionalScanners, region, HConstants.NO_NONCE, HConstants.NO_NONCE); - } - - RegionScannerImpl(Scan scan, List additionalScanners, HRegion region, - long nonceGroup, long nonce) throws IOException { - this.region = region; - this.maxResultSize = scan.getMaxResultSize(); - if (scan.hasFilter()) { - this.filter = new FilterWrapper(scan.getFilter()); - } else { - this.filter = null; - } - this.comparator = region.getCellComparator(); - /** - * By default, calls to next/nextRaw must enforce the batch limit. Thus, construct a default - * scanner context that can be used to enforce the batch limit in the event that a - * ScannerContext is not specified during an invocation of next/nextRaw - */ - defaultScannerContext = ScannerContext.newBuilder() - .setBatchLimit(scan.getBatch()).build(); - this.stopRow = scan.getStopRow(); - this.includeStopRow = scan.includeStopRow(); - - // synchronize on scannerReadPoints so that nobody calculates - // getSmallestReadPoint, before scannerReadPoints is updated. - IsolationLevel isolationLevel = scan.getIsolationLevel(); - long mvccReadPoint = PackagePrivateFieldAccessor.getMvccReadPoint(scan); - synchronized (scannerReadPoints) { - if (mvccReadPoint > 0) { - this.readPt = mvccReadPoint; - } else if (nonce == HConstants.NO_NONCE || rsServices == null - || rsServices.getNonceManager() == null) { - this.readPt = getReadPoint(isolationLevel); - } else { - this.readPt = rsServices.getNonceManager().getMvccFromOperationContext(nonceGroup, nonce); - } - scannerReadPoints.put(this, this.readPt); - } - initializeScanners(scan, additionalScanners); - } - - protected void initializeScanners(Scan scan, List additionalScanners) - throws IOException { - // Here we separate all scanners into two lists - scanner that provide data required - // by the filter to operate (scanners list) and all others (joinedScanners list). - List scanners = new ArrayList<>(scan.getFamilyMap().size()); - List joinedScanners = new ArrayList<>(scan.getFamilyMap().size()); - // Store all already instantiated scanners for exception handling - List instantiatedScanners = new ArrayList<>(); - // handle additionalScanners - if (additionalScanners != null && !additionalScanners.isEmpty()) { - scanners.addAll(additionalScanners); - instantiatedScanners.addAll(additionalScanners); - } - - try { - for (Map.Entry> entry : scan.getFamilyMap().entrySet()) { - HStore store = stores.get(entry.getKey()); - KeyValueScanner scanner = store.getScanner(scan, entry.getValue(), this.readPt); - instantiatedScanners.add(scanner); - if (this.filter == null || !scan.doLoadColumnFamiliesOnDemand() - || this.filter.isFamilyEssential(entry.getKey())) { - scanners.add(scanner); - } else { - joinedScanners.add(scanner); - } - } - initializeKVHeap(scanners, joinedScanners, region); - } catch (Throwable t) { - throw handleException(instantiatedScanners, t); - } - } - - protected void initializeKVHeap(List scanners, - List joinedScanners, HRegion region) - throws IOException { - this.storeHeap = new KeyValueHeap(scanners, comparator); - if (!joinedScanners.isEmpty()) { - this.joinedHeap = new KeyValueHeap(joinedScanners, comparator); - } - } - - private IOException handleException(List instantiatedScanners, - Throwable t) { - // remove scaner read point before throw the exception - scannerReadPoints.remove(this); - if (storeHeap != null) { - storeHeap.close(); - storeHeap = null; - if (joinedHeap != null) { - joinedHeap.close(); - joinedHeap = null; - } - } else { - // close all already instantiated scanners before throwing the exception - for (KeyValueScanner scanner : instantiatedScanners) { - scanner.close(); - } - } - return t instanceof IOException ? (IOException) t : new IOException(t); - } - - @Override - public long getMaxResultSize() { - return maxResultSize; - } - - @Override - public long getMvccReadPoint() { - return this.readPt; - } - - @Override - public int getBatch() { - return this.defaultScannerContext.getBatchLimit(); - } - - /** - * Reset both the filter and the old filter. - * - * @throws IOException in case a filter raises an I/O exception. - */ - protected void resetFilters() throws IOException { - if (filter != null) { - filter.reset(); - } - } - - @Override - public boolean next(List outResults) - throws IOException { - // apply the batching limit by default - return next(outResults, defaultScannerContext); - } - - @Override - public synchronized boolean next(List outResults, ScannerContext scannerContext) - throws IOException { - if (this.filterClosed) { - throw new UnknownScannerException("Scanner was closed (timed out?) " + - "after we renewed it. Could be caused by a very slow scanner " + - "or a lengthy garbage collection"); - } - startRegionOperation(Operation.SCAN); - try { - return nextRaw(outResults, scannerContext); - } finally { - closeRegionOperation(Operation.SCAN); - } - } - - @Override - public boolean nextRaw(List outResults) throws IOException { - // Use the RegionScanner's context by default - return nextRaw(outResults, defaultScannerContext); - } - - @Override - public boolean nextRaw(List outResults, ScannerContext scannerContext) - throws IOException { - if (storeHeap == null) { - // scanner is closed - throw new UnknownScannerException("Scanner was closed"); - } - boolean moreValues = false; - if (outResults.isEmpty()) { - // Usually outResults is empty. This is true when next is called - // to handle scan or get operation. - moreValues = nextInternal(outResults, scannerContext); - } else { - List tmpList = new ArrayList<>(); - moreValues = nextInternal(tmpList, scannerContext); - outResults.addAll(tmpList); - } - - if (!outResults.isEmpty()) { - readRequestsCount.increment(); - if (metricsRegion != null) { - metricsRegion.updateReadRequestCount(); - } - } - if (rsServices != null && rsServices.getMetrics() != null) { - rsServices.getMetrics().updateReadQueryMeter(getRegionInfo().getTable()); - } - - // If the size limit was reached it means a partial Result is being returned. Returning a - // partial Result means that we should not reset the filters; filters should only be reset in - // between rows - if (!scannerContext.mayHaveMoreCellsInRow()) { - resetFilters(); - } - - if (isFilterDoneInternal()) { - moreValues = false; - } - return moreValues; - } - - /** - * @return true if more cells exist after this batch, false if scanner is done - */ - private boolean populateFromJoinedHeap(List results, ScannerContext scannerContext) - throws IOException { - assert joinedContinuationRow != null; - boolean moreValues = populateResult(results, this.joinedHeap, scannerContext, - joinedContinuationRow); - - if (!scannerContext.checkAnyLimitReached(LimitScope.BETWEEN_CELLS)) { - // We are done with this row, reset the continuation. - joinedContinuationRow = null; - } - // As the data is obtained from two independent heaps, we need to - // ensure that result list is sorted, because Result relies on that. - sort(results, comparator); - return moreValues; - } - - /** - * Fetches records with currentRow into results list, until next row, batchLimit (if not -1) is - * reached, or remainingResultSize (if not -1) is reaced - * @param heap KeyValueHeap to fetch data from.It must be positioned on correct row before call. - * @param scannerContext - * @param currentRowCell - * @return state of last call to {@link KeyValueHeap#next()} - */ - private boolean populateResult(List results, KeyValueHeap heap, - ScannerContext scannerContext, Cell currentRowCell) throws IOException { - Cell nextKv; - boolean moreCellsInRow = false; - boolean tmpKeepProgress = scannerContext.getKeepProgress(); - // Scanning between column families and thus the scope is between cells - LimitScope limitScope = LimitScope.BETWEEN_CELLS; - do { - // We want to maintain any progress that is made towards the limits while scanning across - // different column families. To do this, we toggle the keep progress flag on during calls - // to the StoreScanner to ensure that any progress made thus far is not wiped away. - scannerContext.setKeepProgress(true); - heap.next(results, scannerContext); - scannerContext.setKeepProgress(tmpKeepProgress); - - nextKv = heap.peek(); - moreCellsInRow = moreCellsInRow(nextKv, currentRowCell); - if (!moreCellsInRow) incrementCountOfRowsScannedMetric(scannerContext); - if (moreCellsInRow && scannerContext.checkBatchLimit(limitScope)) { - return scannerContext.setScannerState(NextState.BATCH_LIMIT_REACHED).hasMoreValues(); - } else if (scannerContext.checkSizeLimit(limitScope)) { - ScannerContext.NextState state = - moreCellsInRow ? NextState.SIZE_LIMIT_REACHED_MID_ROW : NextState.SIZE_LIMIT_REACHED; - return scannerContext.setScannerState(state).hasMoreValues(); - } else if (scannerContext.checkTimeLimit(limitScope)) { - ScannerContext.NextState state = - moreCellsInRow ? NextState.TIME_LIMIT_REACHED_MID_ROW : NextState.TIME_LIMIT_REACHED; - return scannerContext.setScannerState(state).hasMoreValues(); - } - } while (moreCellsInRow); - return nextKv != null; - } - - /** - * Based on the nextKv in the heap, and the current row, decide whether or not there are more - * cells to be read in the heap. If the row of the nextKv in the heap matches the current row - * then there are more cells to be read in the row. - * @param nextKv - * @param currentRowCell - * @return true When there are more cells in the row to be read - */ - private boolean moreCellsInRow(final Cell nextKv, Cell currentRowCell) { - return nextKv != null && CellUtil.matchingRows(nextKv, currentRowCell); - } - - /* - * @return True if a filter rules the scanner is over, done. - */ - @Override - public synchronized boolean isFilterDone() throws IOException { - return isFilterDoneInternal(); - } - - private boolean isFilterDoneInternal() throws IOException { - return this.filter != null && this.filter.filterAllRemaining(); - } - - private boolean nextInternal(List results, ScannerContext scannerContext) - throws IOException { - if (!results.isEmpty()) { - throw new IllegalArgumentException("First parameter should be an empty list"); - } - if (scannerContext == null) { - throw new IllegalArgumentException("Scanner context cannot be null"); - } - Optional rpcCall = RpcServer.getCurrentCall(); - - // Save the initial progress from the Scanner context in these local variables. The progress - // may need to be reset a few times if rows are being filtered out so we save the initial - // progress. - int initialBatchProgress = scannerContext.getBatchProgress(); - long initialSizeProgress = scannerContext.getDataSizeProgress(); - long initialHeapSizeProgress = scannerContext.getHeapSizeProgress(); - - // Used to check time limit - LimitScope limitScope = LimitScope.BETWEEN_CELLS; - - // The loop here is used only when at some point during the next we determine - // that due to effects of filters or otherwise, we have an empty row in the result. - // Then we loop and try again. Otherwise, we must get out on the first iteration via return, - // "true" if there's more data to read, "false" if there isn't (storeHeap is at a stop row, - // and joinedHeap has no more data to read for the last row (if set, joinedContinuationRow). - while (true) { - // Starting to scan a new row. Reset the scanner progress according to whether or not - // progress should be kept. - if (scannerContext.getKeepProgress()) { - // Progress should be kept. Reset to initial values seen at start of method invocation. - scannerContext.setProgress(initialBatchProgress, initialSizeProgress, - initialHeapSizeProgress); - } else { - scannerContext.clearProgress(); - } - if (rpcCall.isPresent()) { - // If a user specifies a too-restrictive or too-slow scanner, the - // client might time out and disconnect while the server side - // is still processing the request. We should abort aggressively - // in that case. - long afterTime = rpcCall.get().disconnectSince(); - if (afterTime >= 0) { - throw new CallerDisconnectedException( - "Aborting on region " + getRegionInfo().getRegionNameAsString() + ", call " + - this + " after " + afterTime + " ms, since " + - "caller disconnected"); - } - } - - // Let's see what we have in the storeHeap. - Cell current = this.storeHeap.peek(); - - boolean shouldStop = shouldStop(current); - // When has filter row is true it means that the all the cells for a particular row must be - // read before a filtering decision can be made. This means that filters where hasFilterRow - // run the risk of enLongAddering out of memory errors in the case that they are applied to a - // table that has very large rows. - boolean hasFilterRow = this.filter != null && this.filter.hasFilterRow(); - - // If filter#hasFilterRow is true, partial results are not allowed since allowing them - // would prevent the filters from being evaluated. Thus, if it is true, change the - // scope of any limits that could potentially create partial results to - // LimitScope.BETWEEN_ROWS so that those limits are not reached mid-row - if (hasFilterRow) { - if (LOG.isTraceEnabled()) { - LOG.trace("filter#hasFilterRow is true which prevents partial results from being " - + " formed. Changing scope of limits that may create partials"); - } - scannerContext.setSizeLimitScope(LimitScope.BETWEEN_ROWS); - scannerContext.setTimeLimitScope(LimitScope.BETWEEN_ROWS); - limitScope = LimitScope.BETWEEN_ROWS; - } - - if (scannerContext.checkTimeLimit(LimitScope.BETWEEN_CELLS)) { - if (hasFilterRow) { - throw new IncompatibleFilterException( - "Filter whose hasFilterRow() returns true is incompatible with scans that must " + - " stop mid-row because of a limit. ScannerContext:" + scannerContext); - } - return true; - } - - // Check if we were getting data from the joinedHeap and hit the limit. - // If not, then it's main path - getting results from storeHeap. - if (joinedContinuationRow == null) { - // First, check if we are at a stop row. If so, there are no more results. - if (shouldStop) { - if (hasFilterRow) { - filter.filterRowCells(results); - } - return scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues(); - } - - // Check if rowkey filter wants to exclude this row. If so, loop to next. - // Technically, if we hit limits before on this row, we don't need this call. - if (filterRowKey(current)) { - incrementCountOfRowsFilteredMetric(scannerContext); - // early check, see HBASE-16296 - if (isFilterDoneInternal()) { - return scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues(); - } - // Typically the count of rows scanned is incremented inside #populateResult. However, - // here we are filtering a row based purely on its row key, preventing us from calling - // #populateResult. Thus, perform the necessary increment here to rows scanned metric - incrementCountOfRowsScannedMetric(scannerContext); - boolean moreRows = nextRow(scannerContext, current); - if (!moreRows) { - return scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues(); - } - results.clear(); - - // Read nothing as the rowkey was filtered, but still need to check time limit - if (scannerContext.checkTimeLimit(limitScope)) { - return true; - } - continue; - } - - // Ok, we are good, let's try to get some results from the main heap. - populateResult(results, this.storeHeap, scannerContext, current); - if (scannerContext.checkAnyLimitReached(LimitScope.BETWEEN_CELLS)) { - if (hasFilterRow) { - throw new IncompatibleFilterException( - "Filter whose hasFilterRow() returns true is incompatible with scans that must " - + " stop mid-row because of a limit. ScannerContext:" + scannerContext); - } - return true; - } - - Cell nextKv = this.storeHeap.peek(); - shouldStop = shouldStop(nextKv); - // save that the row was empty before filters applied to it. - final boolean isEmptyRow = results.isEmpty(); - - // We have the part of the row necessary for filtering (all of it, usually). - // First filter with the filterRow(List). - FilterWrapper.FilterRowRetCode ret = FilterWrapper.FilterRowRetCode.NOT_CALLED; - if (hasFilterRow) { - ret = filter.filterRowCellsWithRet(results); - - // We don't know how the results have changed after being filtered. Must set progress - // according to contents of results now. - if (scannerContext.getKeepProgress()) { - scannerContext.setProgress(initialBatchProgress, initialSizeProgress, - initialHeapSizeProgress); - } else { - scannerContext.clearProgress(); - } - scannerContext.incrementBatchProgress(results.size()); - for (Cell cell : results) { - scannerContext.incrementSizeProgress(PrivateCellUtil.estimatedSerializedSizeOf(cell), - cell.heapSize()); - } - } - - if (isEmptyRow || ret == FilterWrapper.FilterRowRetCode.EXCLUDE || filterRow()) { - incrementCountOfRowsFilteredMetric(scannerContext); - results.clear(); - boolean moreRows = nextRow(scannerContext, current); - if (!moreRows) { - return scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues(); - } - - // This row was totally filtered out, if this is NOT the last row, - // we should continue on. Otherwise, nothing else to do. - if (!shouldStop) { - // Read nothing as the cells was filtered, but still need to check time limit - if (scannerContext.checkTimeLimit(limitScope)) { - return true; - } - continue; - } - return scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues(); - } - - // Ok, we are done with storeHeap for this row. - // Now we may need to fetch additional, non-essential data into row. - // These values are not needed for filter to work, so we postpone their - // fetch to (possibly) reduce amount of data loads from disk. - if (this.joinedHeap != null) { - boolean mayHaveData = joinedHeapMayHaveData(current); - if (mayHaveData) { - joinedContinuationRow = current; - populateFromJoinedHeap(results, scannerContext); - - if (scannerContext.checkAnyLimitReached(LimitScope.BETWEEN_CELLS)) { - return true; - } - } - } - } else { - // Populating from the joined heap was stopped by limits, populate some more. - populateFromJoinedHeap(results, scannerContext); - if (scannerContext.checkAnyLimitReached(LimitScope.BETWEEN_CELLS)) { - return true; - } - } - // We may have just called populateFromJoinedMap and hit the limits. If that is - // the case, we need to call it again on the next next() invocation. - if (joinedContinuationRow != null) { - return scannerContext.setScannerState(NextState.MORE_VALUES).hasMoreValues(); - } - - // Finally, we are done with both joinedHeap and storeHeap. - // Double check to prevent empty rows from appearing in result. It could be - // the case when SingleColumnValueExcludeFilter is used. - if (results.isEmpty()) { - incrementCountOfRowsFilteredMetric(scannerContext); - boolean moreRows = nextRow(scannerContext, current); - if (!moreRows) { - return scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues(); - } - if (!shouldStop) continue; - } - - if (shouldStop) { - return scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues(); - } else { - return scannerContext.setScannerState(NextState.MORE_VALUES).hasMoreValues(); - } - } - } - - protected void incrementCountOfRowsFilteredMetric(ScannerContext scannerContext) { - filteredReadRequestsCount.increment(); - if (metricsRegion != null) { - metricsRegion.updateFilteredRecords(); - } - - if (scannerContext == null || !scannerContext.isTrackingMetrics()) return; - - scannerContext.getMetrics().countOfRowsFiltered.incrementAndGet(); - } - - protected void incrementCountOfRowsScannedMetric(ScannerContext scannerContext) { - if (scannerContext == null || !scannerContext.isTrackingMetrics()) return; - - scannerContext.getMetrics().countOfRowsScanned.incrementAndGet(); - } - - /** - * @param currentRowCell - * @return true when the joined heap may have data for the current row - * @throws IOException - */ - private boolean joinedHeapMayHaveData(Cell currentRowCell) - throws IOException { - Cell nextJoinedKv = joinedHeap.peek(); - boolean matchCurrentRow = - nextJoinedKv != null && CellUtil.matchingRows(nextJoinedKv, currentRowCell); - boolean matchAfterSeek = false; - - // If the next value in the joined heap does not match the current row, try to seek to the - // correct row - if (!matchCurrentRow) { - Cell firstOnCurrentRow = PrivateCellUtil.createFirstOnRow(currentRowCell); - boolean seekSuccessful = this.joinedHeap.requestSeek(firstOnCurrentRow, true, true); - matchAfterSeek = - seekSuccessful && joinedHeap.peek() != null - && CellUtil.matchingRows(joinedHeap.peek(), currentRowCell); - } - - return matchCurrentRow || matchAfterSeek; - } - - /** - * This function is to maintain backward compatibility for 0.94 filters. HBASE-6429 combines - * both filterRow & filterRow({@code List kvs}) functions. While 0.94 code or older, - * it may not implement hasFilterRow as HBase-6429 expects because 0.94 hasFilterRow() only - * returns true when filterRow({@code List kvs}) is overridden not the filterRow(). - * Therefore, the filterRow() will be skipped. - */ - private boolean filterRow() throws IOException { - // when hasFilterRow returns true, filter.filterRow() will be called automatically inside - // filterRowCells(List kvs) so we skip that scenario here. - return filter != null && (!filter.hasFilterRow()) - && filter.filterRow(); - } - - private boolean filterRowKey(Cell current) throws IOException { - return filter != null && filter.filterRowKey(current); - } - - protected boolean nextRow(ScannerContext scannerContext, Cell curRowCell) throws IOException { - assert this.joinedContinuationRow == null: "Trying to go to next row during joinedHeap read."; - Cell next; - while ((next = this.storeHeap.peek()) != null && - CellUtil.matchingRows(next, curRowCell)) { - this.storeHeap.next(MOCKED_LIST); - } - resetFilters(); - - // Calling the hook in CP which allows it to do a fast forward - return this.region.getCoprocessorHost() == null - || this.region.getCoprocessorHost() - .postScannerFilterRow(this, curRowCell); - } - - protected boolean shouldStop(Cell currentRowCell) { - if (currentRowCell == null) { - return true; - } - if (stopRow == null || Bytes.equals(stopRow, HConstants.EMPTY_END_ROW)) { - return false; - } - int c = comparator.compareRows(currentRowCell, stopRow, 0, stopRow.length); - return c > 0 || (c == 0 && !includeStopRow); - } - - @Override - public synchronized void close() { - if (storeHeap != null) { - storeHeap.close(); - storeHeap = null; - } - if (joinedHeap != null) { - joinedHeap.close(); - joinedHeap = null; - } - // no need to synchronize here. - scannerReadPoints.remove(this); - this.filterClosed = true; - } - - KeyValueHeap getStoreHeapForTesting() { - return storeHeap; - } - - @Override - public synchronized boolean reseek(byte[] row) throws IOException { - if (row == null) { - throw new IllegalArgumentException("Row cannot be null."); - } - boolean result = false; - startRegionOperation(); - Cell kv = PrivateCellUtil.createFirstOnRow(row, 0, (short) row.length); - try { - // use request seek to make use of the lazy seek option. See HBASE-5520 - result = this.storeHeap.requestSeek(kv, true, true); - if (this.joinedHeap != null) { - result = this.joinedHeap.requestSeek(kv, true, true) || result; - } - } finally { - closeRegionOperation(); - } - return result; - } - - @Override - public void shipped() throws IOException { - if (storeHeap != null) { - storeHeap.shipped(); - } - if (joinedHeap != null) { - joinedHeap.shipped(); - } - } - - @Override - public void run() throws IOException { - // This is the RPC callback method executed. We do the close in of the scanner in this - // callback - this.close(); - } - } - // Utility methods /** - * A utility method to create new instances of HRegion based on the - * {@link HConstants#REGION_IMPL} configuration property. - * @param tableDir qualified path of directory where region should be located, - * usually the table directory. - * @param wal The WAL is the outbound log for any updates to the HRegion - * The wal file is a logfile from the previous execution that's - * custom-computed for this HRegion. The HRegionServer computes and sorts the - * appropriate wal info for this HRegion. If there is a previous file - * (implying that the HRegion has been written-to before), then read it from - * the supplied path. + * A utility method to create new instances of HRegion based on the {@link HConstants#REGION_IMPL} + * configuration property. + * @param tableDir qualified path of directory where region should be located, usually the table + * directory. + * @param wal The WAL is the outbound log for any updates to the HRegion The wal file is a logfile + * from the previous execution that's custom-computed for this HRegion. The HRegionServer + * computes and sorts the appropriate wal info for this HRegion. If there is a previous + * file (implying that the HRegion has been written-to before), then read it from the + * supplied path. * @param fs is the filesystem. * @param conf is global configuration settings. - * @param regionInfo - RegionInfo that describes the region - * is new), then read them from the supplied path. + * @param regionInfo - RegionInfo that describes the region is new), then read them from the + * supplied path. * @param htd the table descriptor * @return the new instance */ @@ -7735,7 +7118,6 @@ public static HRegion newHRegion(Path tableDir, WAL wal, FileSystem fs, /** * Convenience method creating new HRegions. Used by createTable. - * * @param info Info for region to create. * @param rootDir Root directory for HBase instance * @param wal shared WAL @@ -7743,14 +7125,30 @@ public static HRegion newHRegion(Path tableDir, WAL wal, FileSystem fs, * @return new HRegion */ public static HRegion createHRegion(final RegionInfo info, final Path rootDir, - final Configuration conf, final TableDescriptor hTableDescriptor, final WAL wal, - final boolean initialize) throws IOException { - LOG.info("creating " + info + ", tableDescriptor=" + - (hTableDescriptor == null ? "null" : hTableDescriptor) + ", regionDir=" + rootDir); + final Configuration conf, final TableDescriptor hTableDescriptor, final WAL wal, + final boolean initialize) throws IOException { + return createHRegion(info, rootDir, conf, hTableDescriptor, wal, initialize, null); + } + + /** + * Convenience method creating new HRegions. Used by createTable. + * @param info Info for region to create. + * @param rootDir Root directory for HBase instance + * @param wal shared WAL + * @param initialize - true to initialize the region + * @param rsRpcServices An interface we can request flushes against. + * @return new HRegion + */ + public static HRegion createHRegion(final RegionInfo info, final Path rootDir, + final Configuration conf, final TableDescriptor hTableDescriptor, final WAL wal, + final boolean initialize, RegionServerServices rsRpcServices) throws IOException { + LOG.info("creating " + info + ", tableDescriptor=" + + (hTableDescriptor == null ? "null" : hTableDescriptor) + ", regionDir=" + rootDir); createRegionDir(conf, info, rootDir); FileSystem fs = rootDir.getFileSystem(conf); Path tableDir = CommonFSUtils.getTableDir(rootDir, info.getTable()); - HRegion region = HRegion.newHRegion(tableDir, wal, fs, conf, info, hTableDescriptor, null); + HRegion region = + HRegion.newHRegion(tableDir, wal, fs, conf, info, hTableDescriptor, rsRpcServices); if (initialize) { region.initialize(null); } @@ -7948,7 +7346,6 @@ public static HRegion openHRegionFromTableDir(final Configuration conf, final Fi return r.openHRegion(reporter); } - @VisibleForTesting public NavigableMap getReplicationScope() { return this.replicationScope; } @@ -7974,11 +7371,11 @@ public static Region openHRegion(final Region other, final CancelableProgressabl /** * Open HRegion. + *

    * Calls initialize and sets sequenceId. * @return Returns this */ - protected HRegion openHRegion(final CancelableProgressable reporter) - throws IOException { + private HRegion openHRegion(final CancelableProgressable reporter) throws IOException { try { // Refuse to open the region if we are missing local compression support TableDescriptorChecker.checkCompression(htableDescriptor); @@ -7998,11 +7395,18 @@ protected HRegion openHRegion(final CancelableProgressable reporter) RegionReplicaUtil.isDefaultReplica(getRegionInfo())) { writeRegionOpenMarker(wal, openSeqNum); } - } catch(Throwable t) { + } catch (Throwable t) { // By coprocessor path wrong region will open failed, // MetricsRegionWrapperImpl is already init and not close, // add region close when open failed - this.close(); + try { + // It is not required to write sequence id file when region open is failed. + // Passing true to skip the sequence id file write. + this.close(true); + } catch (Throwable e) { + LOG.warn("Open region: {} failed. Try close region but got exception ", this.getRegionInfo(), + e); + } throw t; } return this; @@ -8038,14 +7442,9 @@ public static void warmupHRegion(final RegionInfo info, throws IOException { Objects.requireNonNull(info, "RegionInfo cannot be null"); - - if (LOG.isDebugEnabled()) { - LOG.debug("HRegion.Warming up region: " + info); - } - + LOG.debug("Warmup {}", info); Path rootDir = CommonFSUtils.getRootDir(conf); Path tableDir = CommonFSUtils.getTableDir(rootDir, info.getTable()); - FileSystem fs = null; if (rsServices != null) { fs = rsServices.getFileSystem(); @@ -8053,7 +7452,6 @@ public static void warmupHRegion(final RegionInfo info, if (fs == null) { fs = rootDir.getFileSystem(conf); } - HRegion r = HRegion.newHRegion(tableDir, wal, fs, conf, info, htd, null); r.initializeWarmup(reporter); } @@ -8122,7 +7520,7 @@ public List get(Get get, boolean withCoprocessor) throws IOException { return get(get, withCoprocessor, HConstants.NO_NONCE, HConstants.NO_NONCE); } - public List get(Get get, boolean withCoprocessor, long nonceGroup, long nonce) + private List get(Get get, boolean withCoprocessor, long nonceGroup, long nonce) throws IOException { List results = new ArrayList<>(); long before = EnvironmentEdgeManager.currentTime(); @@ -8164,11 +7562,30 @@ void metricsUpdateForGet(List results, long before) { } @Override - public void mutateRow(RowMutations rm) throws IOException { - // Don't need nonces here - RowMutations only supports puts and deletes + public Result mutateRow(RowMutations rm) throws IOException { final List m = rm.getMutations(); - batchMutate(m.toArray(new Mutation[m.size()]), true, HConstants.NO_NONCE, - HConstants.NO_NONCE); + OperationStatus[] statuses = batchMutate(m.toArray(new Mutation[0]), true, + HConstants.NO_NONCE, HConstants.NO_NONCE); + + List results = new ArrayList<>(); + for (OperationStatus status : statuses) { + if (status.getResult() != null) { + results.add(status.getResult()); + } + } + + if (results.isEmpty()) { + return null; + } + + // Merge the results of the Increment/Append operations + List cells = new ArrayList<>(); + for (Result result : results) { + if (result.rawCells() != null) { + cells.addAll(Arrays.asList(result.rawCells())); + } + } + return Result.create(cells); } /** @@ -8281,6 +7698,11 @@ public void processRowsWithLocks(RowProcessor processor, long timeout, // when it assigns the edit a sequencedid (A.K.A the mvcc write number). WriteEntry writeEntry = null; MemStoreSizing memstoreAccounting = new NonThreadSafeMemStoreSizing(); + + // Check for thread interrupt status in case we have been signaled from + // #interruptRegionOperation. + checkInterrupt(); + try { boolean success = false; try { @@ -8296,9 +7718,19 @@ public void processRowsWithLocks(RowProcessor processor, long timeout, prevRowLock = rowLock; } } + + // Check for thread interrupt status in case we have been signaled from + // #interruptRegionOperation. Do it before we take the lock and disable interrupts for + // the WAL append. + checkInterrupt(); + // STEP 3. Region lock lock(this.updatesLock.readLock(), acquiredRowLocks.isEmpty() ? 1 : acquiredRowLocks.size()); locked = true; + + // From this point until memstore update this operation should not be interrupted. + disableInterrupts(); + long now = EnvironmentEdgeManager.currentTime(); // STEP 4. Let the processor scan the rows, generate mutations and add waledits doProcessRowWithTimeout(processor, now, this, mutations, walEdit, timeout); @@ -8364,6 +7796,8 @@ public void processRowsWithLocks(RowProcessor processor, long timeout, } // release locks if some were acquired but another timed out releaseRowLocks(acquiredRowLocks); + + enableInterrupts(); } // 12. Run post-process hook @@ -8426,6 +7860,8 @@ public Void call() throws IOException { rowProcessorExecutor.execute(task); try { task.get(timeout, TimeUnit.MILLISECONDS); + } catch (InterruptedException ie) { + throw throwOnInterrupt(ie); } catch (TimeoutException te) { String row = processor.getRowsToLock().isEmpty() ? "" : " on row(s):" + Bytes.toStringBinary(processor.getRowsToLock().iterator().next()) + "..."; @@ -8448,7 +7884,7 @@ public Result append(Append append, long nonceGroup, long nonce) throws IOExcept startRegionOperation(Operation.APPEND); try { // All edits for the given row (across all column families) must happen atomically. - return doBatchMutate(append, true, nonceGroup, nonce).getResult(); + return mutate(append, true, nonceGroup, nonce).getResult(); } finally { closeRegionOperation(Operation.APPEND); } @@ -8465,7 +7901,7 @@ public Result increment(Increment increment, long nonceGroup, long nonce) throws startRegionOperation(Operation.INCREMENT); try { // All edits for the given row (across all column families) must happen atomically. - return doBatchMutate(increment, true, nonceGroup, nonce).getResult(); + return mutate(increment, true, nonceGroup, nonce).getResult(); } finally { closeRegionOperation(Operation.INCREMENT); } @@ -8521,19 +7957,6 @@ private WriteEntry doWALAppend(WALEdit walEdit, Durability durability, Listcells using comparator - */ - private static List sort(List cells, final CellComparator comparator) { - cells.sort(comparator); - return cells; - } - public static final long FIXED_OVERHEAD = ClassSize.estimateBase(HRegion.class, false); // woefully out of date - currently missing: @@ -8551,7 +7974,7 @@ private static List sort(List cells, final CellComparator comparator (2 * ClassSize.ATOMIC_BOOLEAN) + // closed, closing (3 * ClassSize.ATOMIC_LONG) + // numPutsWithoutWAL, dataInMemoryWithoutWAL, // compactionsFailed - (2 * ClassSize.CONCURRENT_HASHMAP) + // lockedRows, scannerReadPoints + (3 * ClassSize.CONCURRENT_HASHMAP) + // lockedRows, scannerReadPoints, regionLockHolders WriteState.HEAP_SIZE + // writestate ClassSize.CONCURRENT_SKIPLISTMAP + ClassSize.CONCURRENT_SKIPLISTMAP_ENTRY + // stores (2 * ClassSize.REENTRANT_LOCK) + // lock, updatesLock @@ -8711,7 +8134,6 @@ public RegionCoprocessorHost getCoprocessorHost() { } /** @param coprocessorHost the new coprocessor host */ - @VisibleForTesting public void setCoprocessorHost(final RegionCoprocessorHost coprocessorHost) { this.coprocessorHost = coprocessorHost; } @@ -8723,12 +8145,22 @@ public void startRegionOperation() throws IOException { @Override public void startRegionOperation(Operation op) throws IOException { + boolean isInterruptableOp = false; switch (op) { - case GET: // read operations + case GET: // interruptible read operations case SCAN: + isInterruptableOp = true; checkReadsEnabled(); break; - default: + case INCREMENT: // interruptible write operations + case APPEND: + case PUT: + case DELETE: + case BATCH_MUTATE: + case CHECK_AND_MUTATE: + isInterruptableOp = true; + break; + default: // all others break; } if (op == Operation.MERGE_REGION || op == Operation.SPLIT_REGION @@ -8741,6 +8173,12 @@ public void startRegionOperation(Operation op) throws IOException { throw new NotServingRegionException(getRegionInfo().getRegionNameAsString() + " is closing"); } lock(lock.readLock()); + // Update regionLockHolders ONLY for any startRegionOperation call that is invoked from + // an RPC handler + Thread thisThread = Thread.currentThread(); + if (isInterruptableOp) { + regionLockHolders.put(thisThread, true); + } if (this.closed.get()) { lock.readLock().unlock(); throw new NotServingRegionException(getRegionInfo().getRegionNameAsString() + " is closed"); @@ -8755,6 +8193,11 @@ public void startRegionOperation(Operation op) throws IOException { coprocessorHost.postStartRegionOperation(op); } } catch (Exception e) { + if (isInterruptableOp) { + // would be harmless to remove what we didn't add but we know by 'isInterruptableOp' + // if we added this thread to regionLockHolders + regionLockHolders.remove(thisThread); + } lock.readLock().unlock(); throw new IOException(e); } @@ -8770,6 +8213,8 @@ public void closeRegionOperation(Operation operation) throws IOException { if (operation == Operation.SNAPSHOT) { stores.values().forEach(HStore::postSnapshotOperation); } + Thread thisThread = Thread.currentThread(); + regionLockHolders.remove(thisThread); lock.readLock().unlock(); if (coprocessorHost != null) { coprocessorHost.postCloseRegionOperation(operation); @@ -8785,8 +8230,7 @@ public void closeRegionOperation(Operation operation) throws IOException { * @throws RegionTooBusyException if failed to get the lock in time * @throws InterruptedIOException if interrupted while waiting for a lock */ - private void startBulkRegionOperation(boolean writeLockNeeded) - throws NotServingRegionException, RegionTooBusyException, InterruptedIOException { + private void startBulkRegionOperation(boolean writeLockNeeded) throws IOException { if (this.closing.get()) { throw new NotServingRegionException(getRegionInfo().getRegionNameAsString() + " is closing"); } @@ -8797,6 +8241,7 @@ private void startBulkRegionOperation(boolean writeLockNeeded) else lock.readLock().unlock(); throw new NotServingRegionException(getRegionInfo().getRegionNameAsString() + " is closed"); } + regionLockHolders.put(Thread.currentThread(), true); } /** @@ -8804,6 +8249,7 @@ private void startBulkRegionOperation(boolean writeLockNeeded) * to the try block of #startRegionOperation */ private void closeBulkRegionOperation(){ + regionLockHolders.remove(Thread.currentThread()); if (lock.writeLock().isHeldByCurrentThread()) lock.writeLock().unlock(); else lock.readLock().unlock(); } @@ -8834,7 +8280,7 @@ private void recordMutationWithoutWal(final Map> familyMap) dataInMemoryWithoutWAL.add(mutationSize); } - private void lock(final Lock lock) throws RegionTooBusyException, InterruptedIOException { + private void lock(final Lock lock) throws IOException { lock(lock, 1); } @@ -8843,8 +8289,7 @@ private void lock(final Lock lock) throws RegionTooBusyException, InterruptedIOE * if failed to get the lock in time. Throw InterruptedIOException * if interrupted while waiting for the lock. */ - private void lock(final Lock lock, final int multiplier) - throws RegionTooBusyException, InterruptedIOException { + private void lock(final Lock lock, final int multiplier) throws IOException { try { final long waitTime = Math.min(maxBusyWaitDuration, busyWaitDuration * Math.min(multiplier, maxBusyWaitMultiplier)); @@ -8862,10 +8307,10 @@ private void lock(final Lock lock, final int multiplier) throw rtbe; } } catch (InterruptedException ie) { - LOG.info("Interrupted while waiting for a lock in region {}", this); - InterruptedIOException iie = new InterruptedIOException(); - iie.initCause(ie); - throw iie; + if (LOG.isDebugEnabled()) { + LOG.debug("Interrupted while waiting for a lock in region {}", this); + } + throw throwOnInterrupt(ie); } } @@ -8910,32 +8355,6 @@ private boolean shouldSyncWAL() { return regionDurability.ordinal() > Durability.ASYNC_WAL.ordinal(); } - /** - * A mocked list implementation - discards all updates. - */ - private static final List MOCKED_LIST = new AbstractList() { - - @Override - public void add(int index, Cell element) { - // do nothing - } - - @Override - public boolean addAll(int index, Collection c) { - return false; // this list is never changed as a result of an update - } - - @Override - public KeyValue get(int index) { - throw new UnsupportedOperationException(); - } - - @Override - public int size() { - return 0; - } - }; - /** @return the latest sequence number that was read from storage when this region was opened */ public long getOpenSeqNum() { return this.openSeqNum; @@ -8988,9 +8407,65 @@ public void incrementFlushesQueuedCount() { flushesQueued.increment(); } - @VisibleForTesting - public long getReadPoint() { - return getReadPoint(IsolationLevel.READ_COMMITTED); + /** + * If a handler thread is eligible for interrupt, make it ineligible. Should be paired + * with {{@link #enableInterrupts()}. + */ + void disableInterrupts() { + regionLockHolders.computeIfPresent(Thread.currentThread(), (t,b) -> false); + } + + /** + * If a handler thread was made ineligible for interrupt via {{@link #disableInterrupts()}, + * make it eligible again. No-op if interrupts are already enabled. + */ + void enableInterrupts() { + regionLockHolders.computeIfPresent(Thread.currentThread(), (t,b) -> true); + } + + /** + * Interrupt any region options that have acquired the region lock via + * {@link #startRegionOperation(org.apache.hadoop.hbase.regionserver.Region.Operation)}, + * or {@link #startBulkRegionOperation(boolean)}. + */ + private void interruptRegionOperations() { + for (Map.Entry entry: regionLockHolders.entrySet()) { + // An entry in this map will have a boolean value indicating if it is currently + // eligible for interrupt; if so, we should interrupt it. + if (entry.getValue().booleanValue()) { + entry.getKey().interrupt(); + } + } + } + + /** + * Check thread interrupt status and throw an exception if interrupted. + * @throws NotServingRegionException if region is closing + * @throws InterruptedIOException if interrupted but region is not closing + */ + // Package scope for tests + void checkInterrupt() throws NotServingRegionException, InterruptedIOException { + if (Thread.interrupted()) { + if (this.closing.get()) { + throw new NotServingRegionException( + getRegionInfo().getRegionNameAsString() + " is closing"); + } + throw new InterruptedIOException(); + } + } + + /** + * Throw the correct exception upon interrupt + * @param t cause + */ + // Package scope for tests + IOException throwOnInterrupt(Throwable t) { + if (this.closing.get()) { + return (NotServingRegionException) new NotServingRegionException( + getRegionInfo().getRegionNameAsString() + " is closing") + .initCause(t); + } + return (InterruptedIOException) new InterruptedIOException().initCause(t); } /** @@ -9116,7 +8591,7 @@ public void requestFlush(FlushLifeCycleTracker tracker) throws IOException { * features * @param conf region configurations */ - static void decorateRegionConfiguration(Configuration conf) { + private static void decorateRegionConfiguration(Configuration conf) { if (ReplicationUtils.isReplicationForBulkLoadDataEnabled(conf)) { String plugins = conf.get(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,""); String replicationCoprocessorClass = ReplicationObserver.class.getCanonicalName(); @@ -9127,13 +8602,11 @@ static void decorateRegionConfiguration(Configuration conf) { } } - @VisibleForTesting - public void setReadRequestsCount(long readRequestsCount) { + public void addReadRequestsCount(long readRequestsCount) { this.readRequestsCount.add(readRequestsCount); } - @VisibleForTesting - public void setWriteRequestsCount(long writeRequestsCount) { + public void addWriteRequestsCount(long writeRequestsCount) { this.writeRequestsCount.add(writeRequestsCount); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java index d5ef30ecc79f..73234f161c4b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java @@ -54,7 +54,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.com.google.common.collect.Lists; /** @@ -75,7 +74,7 @@ public class HRegionFileSystem { public static final String REGION_SPLITS_DIR = ".splits"; /** Temporary subdirectory of the region directory used for compaction output. */ - @VisibleForTesting static final String REGION_TEMP_DIR = ".tmp"; + static final String REGION_TEMP_DIR = ".tmp"; private final RegionInfo regionInfo; //regionInfo for interacting with FS (getting encodedName, etc) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index fc0e3d75f592..e40e25158269 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,7 +21,7 @@ import static org.apache.hadoop.hbase.HConstants.DEFAULT_HBASE_SPLIT_WAL_MAX_SPLITTER; import static org.apache.hadoop.hbase.HConstants.HBASE_SPLIT_WAL_COORDINATED_BY_ZK; import static org.apache.hadoop.hbase.HConstants.HBASE_SPLIT_WAL_MAX_SPLITTER; -import static org.apache.hadoop.hbase.util.DNS.RS_HOSTNAME_KEY; +import static org.apache.hadoop.hbase.util.DNS.UNSAFE_RS_HOSTNAME_KEY; import java.io.IOException; import java.lang.management.MemoryType; @@ -123,6 +123,9 @@ import org.apache.hadoop.hbase.master.RegionState; import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer; import org.apache.hadoop.hbase.mob.MobFileCache; +import org.apache.hadoop.hbase.namequeues.NamedQueueRecorder; +import org.apache.hadoop.hbase.namequeues.SlowLogTableOpsChore; +import org.apache.hadoop.hbase.net.Address; import org.apache.hadoop.hbase.procedure.RegionServerProcedureManagerHost; import org.apache.hadoop.hbase.procedure2.RSProcedureCallable; import org.apache.hadoop.hbase.quotas.FileSystemUtilizationChore; @@ -139,8 +142,6 @@ import org.apache.hadoop.hbase.regionserver.handler.CloseRegionHandler; import org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler; import org.apache.hadoop.hbase.regionserver.handler.RegionReplicaFlushHandler; -import org.apache.hadoop.hbase.namequeues.NamedQueueRecorder; -import org.apache.hadoop.hbase.namequeues.SlowLogTableOpsChore; import org.apache.hadoop.hbase.regionserver.throttle.FlushThroughputControllerFactory; import org.apache.hadoop.hbase.regionserver.throttle.ThroughputController; import org.apache.hadoop.hbase.replication.regionserver.ReplicationLoad; @@ -175,7 +176,6 @@ import org.apache.hadoop.hbase.wal.NettyAsyncFSWALConfigHelper; import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.wal.WALFactory; -import org.apache.hadoop.hbase.wal.WALProvider; import org.apache.hadoop.hbase.zookeeper.ClusterStatusTracker; import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker; import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; @@ -192,7 +192,6 @@ import org.slf4j.LoggerFactory; import sun.misc.Signal; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.com.google.common.base.Throwables; import org.apache.hbase.thirdparty.com.google.common.cache.Cache; @@ -253,7 +252,7 @@ public class HRegionServer extends Thread implements /** * For testing only! Set to true to skip notifying region assignment to master . */ - @VisibleForTesting + @InterfaceAudience.Private @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="MS_SHOULD_BE_FINAL") public static boolean TEST_SKIP_REPORTING_TRANSITION = false; @@ -277,6 +276,13 @@ public class HRegionServer extends Thread implements private final Cache executedRegionProcedures = CacheBuilder.newBuilder().expireAfterAccess(600, TimeUnit.SECONDS).build(); + /** + * Used to cache the moved-out regions + */ + private final Cache movedRegionInfoCache = + CacheBuilder.newBuilder().expireAfterWrite(movedRegionCacheExpiredTime(), + TimeUnit.MILLISECONDS).build(); + private MemStoreFlusher cacheFlusher; private HeapMemoryManager hMemManager; @@ -294,6 +300,7 @@ public class HRegionServer extends Thread implements // Replication services. If no replication, this handler will be null. private ReplicationSourceService replicationSourceHandler; private ReplicationSinkService replicationSinkHandler; + private boolean sameReplicationSourceAndSink; // Compactions public CompactSplit compactSplitThread; @@ -311,14 +318,16 @@ public class HRegionServer extends Thread implements /** * Map of encoded region names to the DataNode locations they should be hosted on - * We store the value as InetSocketAddress since this is used only in HDFS + * We store the value as Address since InetSocketAddress is required by the HDFS * API (create() that takes favored nodes as hints for placing file blocks). * We could have used ServerName here as the value class, but we'd need to * convert it to InetSocketAddress at some point before the HDFS API call, and * it seems a bit weird to store ServerName since ServerName refers to RegionServers - * and here we really mean DataNode locations. + * and here we really mean DataNode locations. We don't store it as InetSocketAddress + * here because the conversion on demand from Address to InetSocketAddress will + * guarantee the resolution results will be fresh when we need it. */ - private final Map regionFavoredNodesMap = new ConcurrentHashMap<>(); + private final Map regionFavoredNodesMap = new ConcurrentHashMap<>(); private LeaseManager leaseManager; @@ -426,6 +435,9 @@ public class HRegionServer extends Thread implements private final int shortOperationTimeout; + // Time to pause if master says 'please hold' + private final long retryPauseTime; + private final RegionServerAccounting regionServerAccounting; private SlowLogTableOpsChore slowLogTableOpsChore = null; @@ -459,13 +471,32 @@ public class HRegionServer extends Thread implements protected String useThisHostnameInstead; /** - * HBASE-18226: This config and hbase.regionserver.hostname are mutually exclusive. - * Exception will be thrown if both are used. + * @deprecated since 2.4.0 and will be removed in 4.0.0. + * Use {@link HRegionServer#UNSAFE_RS_HOSTNAME_DISABLE_MASTER_REVERSEDNS_KEY} instead. + * @see HBASE-24667 */ + @Deprecated @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) final static String RS_HOSTNAME_DISABLE_MASTER_REVERSEDNS_KEY = "hbase.regionserver.hostname.disable.master.reversedns"; + /** + * HBASE-18226: This config and hbase.unasfe.regionserver.hostname are mutually exclusive. + * Exception will be thrown if both are used. + */ + @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) + final static String UNSAFE_RS_HOSTNAME_DISABLE_MASTER_REVERSEDNS_KEY = + "hbase.unsafe.regionserver.hostname.disable.master.reversedns"; + + /** + * HBASE-24667: This config hbase.regionserver.hostname.disable.master.reversedns will be replaced by + * hbase.unsafe.regionserver.hostname.disable.master.reversedns. Keep the old config keys here for backward + * compatibility. + */ + static { + Configuration.addDeprecation(RS_HOSTNAME_DISABLE_MASTER_REVERSEDNS_KEY, UNSAFE_RS_HOSTNAME_DISABLE_MASTER_REVERSEDNS_KEY); + } + /** * This servers startcode. */ @@ -476,11 +507,6 @@ public class HRegionServer extends Thread implements */ protected String clusterId; - /** - * Chore to clean periodically the moved region list - */ - private MovedRegionsCleaner movedRegionsCleaner; - // chore for refreshing store files for secondary regions private StorefileRefresherChore storefileRefresher; @@ -523,7 +549,7 @@ public class HRegionServer extends Thread implements */ protected final ConfigurationManager configurationManager; - @VisibleForTesting + @InterfaceAudience.Private CompactedHFilesDischarger compactedFileDischarger; private volatile ThroughputController flushThroughputController; @@ -592,6 +618,9 @@ public HRegionServer(final Configuration conf) throws IOException { this.shortOperationTimeout = conf.getInt(HConstants.HBASE_RPC_SHORTOPERATION_TIMEOUT_KEY, HConstants.DEFAULT_HBASE_RPC_SHORTOPERATION_TIMEOUT); + this.retryPauseTime = conf.getLong(HConstants.HBASE_RPC_SHORTOPERATION_RETRY_PAUSE_TIME, + HConstants.DEFAULT_HBASE_RPC_SHORTOPERATION_RETRY_PAUSE_TIME); + this.abortRequested = new AtomicBoolean(false); this.stopped = false; @@ -688,12 +717,12 @@ private void initNamedQueueRecorder(Configuration conf) { // HMaster should override this method to load the specific config for master protected String getUseThisHostnameInstead(Configuration conf) throws IOException { - String hostname = conf.get(RS_HOSTNAME_KEY); - if (conf.getBoolean(RS_HOSTNAME_DISABLE_MASTER_REVERSEDNS_KEY, false)) { + String hostname = conf.get(UNSAFE_RS_HOSTNAME_KEY); + if (conf.getBoolean(UNSAFE_RS_HOSTNAME_DISABLE_MASTER_REVERSEDNS_KEY, false)) { if (!StringUtils.isBlank(hostname)) { - String msg = RS_HOSTNAME_DISABLE_MASTER_REVERSEDNS_KEY + " and " + RS_HOSTNAME_KEY + - " are mutually exclusive. Do not set " + RS_HOSTNAME_DISABLE_MASTER_REVERSEDNS_KEY + - " to true while " + RS_HOSTNAME_KEY + " is used"; + String msg = UNSAFE_RS_HOSTNAME_DISABLE_MASTER_REVERSEDNS_KEY + " and " + UNSAFE_RS_HOSTNAME_KEY + + " are mutually exclusive. Do not set " + UNSAFE_RS_HOSTNAME_DISABLE_MASTER_REVERSEDNS_KEY + + " to true while " + UNSAFE_RS_HOSTNAME_KEY + " is used"; throw new IOException(msg); } else { return rpcServices.isa.getHostName(); @@ -728,17 +757,28 @@ private void initializeFileSystem() throws IOException { // Get fs instance used by this RS. Do we use checksum verification in the hbase? If hbase // checksum verification enabled, then automatically switch off hdfs checksum verification. boolean useHBaseChecksum = conf.getBoolean(HConstants.HBASE_CHECKSUM_VERIFICATION, true); - CommonFSUtils.setFsDefault(this.conf, CommonFSUtils.getWALRootDir(this.conf)); + String walDirUri = CommonFSUtils.getDirUri(this.conf, + new Path(conf.get(CommonFSUtils.HBASE_WAL_DIR, conf.get(HConstants.HBASE_DIR)))); + // set WAL's uri + if (walDirUri != null) { + CommonFSUtils.setFsDefault(this.conf, walDirUri); + } + // init the WALFs this.walFs = new HFileSystem(this.conf, useHBaseChecksum); this.walRootDir = CommonFSUtils.getWALRootDir(this.conf); // Set 'fs.defaultFS' to match the filesystem on hbase.rootdir else // underlying hadoop hdfs accessors will be going against wrong filesystem // (unless all is set to defaults). - CommonFSUtils.setFsDefault(this.conf, CommonFSUtils.getRootDir(this.conf)); + String rootDirUri = + CommonFSUtils.getDirUri(this.conf, new Path(conf.get(HConstants.HBASE_DIR))); + if (rootDirUri != null) { + CommonFSUtils.setFsDefault(this.conf, rootDirUri); + } + // init the filesystem this.dataFs = new HFileSystem(this.conf, useHBaseChecksum); this.dataRootDir = CommonFSUtils.getRootDir(this.conf); this.tableDescriptors = new FSTableDescriptors(this.dataFs, this.dataRootDir, - !canUpdateTableDescriptor(), cacheTableDescriptor()); + !canUpdateTableDescriptor(), cacheTableDescriptor()); } protected void login(UserProvider user, String host) throws IOException { @@ -1079,10 +1119,6 @@ public void run() { mobFileCache.shutdown(); } - if (movedRegionsCleaner != null) { - movedRegionsCleaner.stop("Region Server stopping"); - } - // Send interrupts to wake up threads if sleeping so they notice shutdown. // TODO: Should we check they are alive? If OOME could have exited already if (this.hMemManager != null) this.hMemManager.stop(); @@ -1215,7 +1251,7 @@ private long getWriteRequestCount() { return writeCount; } - @VisibleForTesting + @InterfaceAudience.Private protected void tryRegionServerReport(long reportStartTime, long reportEndTime) throws IOException { RegionServerStatusService.BlockingInterface rss = rssStub; @@ -1392,20 +1428,32 @@ private ClusterStatusProtos.ServerLoad buildServerLoad(long reportStartTime, lon serverLoad.addUserLoads(createUserLoad(entry.getKey(), entry.getValue())); } } - // for the replicationLoad purpose. Only need to get from one executorService - // either source or sink will get the same info - ReplicationSourceService rsources = getReplicationSourceService(); - if (rsources != null) { + if (sameReplicationSourceAndSink && replicationSourceHandler != null) { // always refresh first to get the latest value - ReplicationLoad rLoad = rsources.refreshAndGetReplicationLoad(); + ReplicationLoad rLoad = replicationSourceHandler.refreshAndGetReplicationLoad(); if (rLoad != null) { serverLoad.setReplLoadSink(rLoad.getReplicationLoadSink()); - for (ClusterStatusProtos.ReplicationLoadSource rLS : - rLoad.getReplicationLoadSourceEntries()) { + for (ClusterStatusProtos.ReplicationLoadSource rLS : rLoad + .getReplicationLoadSourceEntries()) { serverLoad.addReplLoadSource(rLS); } - + } + } else { + if (replicationSourceHandler != null) { + ReplicationLoad rLoad = replicationSourceHandler.refreshAndGetReplicationLoad(); + if (rLoad != null) { + for (ClusterStatusProtos.ReplicationLoadSource rLS : rLoad + .getReplicationLoadSourceEntries()) { + serverLoad.addReplLoadSource(rLS); + } + } + } + if (replicationSinkHandler != null) { + ReplicationLoad rLoad = replicationSinkHandler.refreshAndGetReplicationLoad(); + if (rLoad != null) { + serverLoad.setReplLoadSink(rLoad.getReplicationLoadSink()); + } } } @@ -1747,9 +1795,9 @@ RegionLoad createRegionLoad(final HRegion r, RegionLoad.Builder regionLoadBldr, .setBlocksLocalWeight(blocksLocalWeight) .setBlocksLocalWithSsdWeight(blocksLocalWithSsdWeight) .setBlocksTotalWeight(blocksTotalWeight) + .setCompactionState(ProtobufUtil.createCompactionStateForRegionLoad(r.getCompactionState())) .setLastMajorCompactionTs(r.getOldestHfileTs(true)); r.setCompleteSequenceId(regionLoadBldr); - return regionLoadBldr.build(); } @@ -1895,7 +1943,7 @@ private void setupWALAndReplication() throws IOException { boolean isMasterNoTableOrSystemTableOnly = this instanceof HMaster && !LoadBalancer.isMasterCanHostUserRegions(conf); WALFactory factory = - new WALFactory(conf, serverName.toString(), !isMasterNoTableOrSystemTableOnly); + new WALFactory(conf, serverName.toString(), this, !isMasterNoTableOrSystemTableOnly); if (!isMasterNoTableOrSystemTableOnly) { // TODO Replication make assumptions here based on the default filesystem impl Path oldLogDir = new Path(walRootDir, HConstants.HREGION_OLDLOGDIR_NAME); @@ -1913,8 +1961,7 @@ private void setupWALAndReplication() throws IOException { throw new IOException("Can not create wal directory " + logDir); } // Instantiate replication if replication enabled. Pass it the log directories. - createNewReplicationInstance(conf, this, this.walFs, logDir, oldLogDir, - factory.getWALProvider()); + createNewReplicationInstance(conf, this, this.walFs, logDir, oldLogDir, factory); } this.walFactory = factory; } @@ -1923,8 +1970,7 @@ private void setupWALAndReplication() throws IOException { * Start up replication source and sink handlers. */ private void startReplicationService() throws IOException { - if (this.replicationSourceHandler == this.replicationSinkHandler && - this.replicationSourceHandler != null) { + if (sameReplicationSourceAndSink && this.replicationSourceHandler != null) { this.replicationSourceHandler.startReplicationService(); } else { if (this.replicationSourceHandler != null) { @@ -2051,9 +2097,6 @@ private void startServices() throws IOException { if (this.storefileRefresher != null) { choreService.scheduleChore(storefileRefresher); } - if (this.movedRegionsCleaner != null) { - choreService.scheduleChore(movedRegionsCleaner); - } if (this.fsUtilizationChore != null) { choreService.scheduleChore(fsUtilizationChore); } @@ -2111,9 +2154,6 @@ private void initializeThreads() { slowLogTableOpsChore = new SlowLogTableOpsChore(this, duration, this.namedQueueRecorder); } - // Create the thread to clean the moved regions list - movedRegionsCleaner = MovedRegionsCleaner.create(this); - if (this.nonceManager != null) { // Create the scheduled chore that cleans up nonces. nonceManagerChore = this.nonceManager.createCleanupScheduledChore(this); @@ -2402,10 +2442,8 @@ public boolean reportRegionStateTransition(final RegionStateTransitionContext co final ReportRegionStateTransitionRequest request = createReportRegionStateTransitionRequest(context); - // Time to pause if master says 'please hold'. Make configurable if needed. - final long initPauseTime = 1000; int tries = 0; - long pauseTime; + long pauseTime = this.retryPauseTime; // Keep looping till we get an error. We want to send reports even though server is going down. // Only go down if clusterConnection is null. It is set to null almost as last thing as the // HRegionServer does down. @@ -2436,9 +2474,9 @@ public boolean reportRegionStateTransition(final RegionStateTransitionContext co || ioe instanceof CallQueueTooBigException; if (pause) { // Do backoff else we flood the Master with requests. - pauseTime = ConnectionUtils.getPauseTime(initPauseTime, tries); + pauseTime = ConnectionUtils.getPauseTime(this.retryPauseTime, tries); } else { - pauseTime = initPauseTime; // Reset. + pauseTime = this.retryPauseTime; // Reset. } LOG.info("Failed report transition " + TextFormat.shortDebugString(request) + "; retry (#" + tries + ")" + @@ -2464,9 +2502,9 @@ private void triggerFlushInPrimaryRegion(final HRegion region) { if (ServerRegionReplicaUtil.isDefaultReplica(region.getRegionInfo())) { return; } - if (!ServerRegionReplicaUtil.isRegionReplicaReplicationEnabled(region.conf) || - !ServerRegionReplicaUtil.isRegionReplicaWaitForPrimaryFlushEnabled( - region.conf)) { + TableName tn = region.getTableDescriptor().getTableName(); + if (!ServerRegionReplicaUtil.isRegionReplicaReplicationEnabled(region.conf, tn) || + !ServerRegionReplicaUtil.isRegionReplicaWaitForPrimaryFlushEnabled(region.conf)) { region.setReadsEnabled(true); return; } @@ -2474,10 +2512,13 @@ private void triggerFlushInPrimaryRegion(final HRegion region) { region.setReadsEnabled(false); // disable reads before marking the region as opened. // RegionReplicaFlushHandler might reset this. - // submit it to be handled by one of the handlers so that we do not block OpenRegionHandler + // Submit it to be handled by one of the handlers so that we do not block OpenRegionHandler if (this.executorService != null) { this.executorService.submit(new RegionReplicaFlushHandler(this, region)); - } + } else { + LOG.info("Executor is null; not running flush of primary region replica for {}", + region.getRegionInfo()); + } } @Override @@ -2485,7 +2526,7 @@ public RpcServerInterface getRpcServer() { return rpcServices.rpcServer; } - @VisibleForTesting + @InterfaceAudience.Private public RSRpcServices getRSRpcServices() { return rpcServices; } @@ -2575,7 +2616,7 @@ public boolean isAborted() { * logs but it does close socket in case want to bring up server on old * hostname+port immediately. */ - @VisibleForTesting + @InterfaceAudience.Private protected void kill() { this.killed = true; abort("Simulated kill"); @@ -2601,6 +2642,11 @@ private void scheduleAbortTimer() { } } + protected final void shutdownChore(ScheduledChore chore) { + if (chore != null) { + chore.shutdown(); + } + } /** * Wait on all threads to finish. Presumption is that all closes and stops * have already been called. @@ -2608,16 +2654,16 @@ private void scheduleAbortTimer() { protected void stopServiceThreads() { // clean up the scheduled chores if (this.choreService != null) { - choreService.cancelChore(nonceManagerChore); - choreService.cancelChore(compactionChecker); - choreService.cancelChore(periodicFlusher); - choreService.cancelChore(healthCheckChore); - choreService.cancelChore(executorStatusChore); - choreService.cancelChore(storefileRefresher); - choreService.cancelChore(movedRegionsCleaner); - choreService.cancelChore(fsUtilizationChore); - choreService.cancelChore(slowLogTableOpsChore); - // clean up the remaining scheduled chores (in case we missed out any) + shutdownChore(nonceManagerChore); + shutdownChore(compactionChecker); + shutdownChore(periodicFlusher); + shutdownChore(healthCheckChore); + shutdownChore(executorStatusChore); + shutdownChore(storefileRefresher); + shutdownChore(fsUtilizationChore); + shutdownChore(slowLogTableOpsChore); + // cancel the remaining scheduled chores (in case we missed out any) + // TODO: cancel will not cleanup the chores, so we need make sure we do not miss any choreService.shutdown(); } @@ -2634,9 +2680,10 @@ protected void stopServiceThreads() { if (this.compactSplitThread != null) { this.compactSplitThread.join(); } - if (this.executorService != null) this.executorService.shutdown(); - if (this.replicationSourceHandler != null && - this.replicationSourceHandler == this.replicationSinkHandler) { + if (this.executorService != null) { + this.executorService.shutdown(); + } + if (sameReplicationSourceAndSink && this.replicationSourceHandler != null) { this.replicationSourceHandler.stopReplicationService(); } else { if (this.replicationSourceHandler != null) { @@ -2684,7 +2731,7 @@ private synchronized ServerName createRegionServerStatusStub() { * @param refresh If true then master address will be read from ZK, otherwise use cached data * @return master + port, or null if server has been stopped */ - @VisibleForTesting + @InterfaceAudience.Private protected synchronized ServerName createRegionServerStatusStub(boolean refresh) { if (rssStub != null) { return masterAddressTracker.getMasterAddress(); @@ -2780,6 +2827,7 @@ private RegionServerStartupResponse reportForDuty() throws IOException { rpcServices.requestCount.reset(); rpcServices.rpcGetRequestCount.reset(); rpcServices.rpcScanRequestCount.reset(); + rpcServices.rpcFullScanRequestCount.reset(); rpcServices.rpcMultiRequestCount.reset(); rpcServices.rpcMutateRequestCount.reset(); LOG.info("reportForDuty to master=" + masterServerName + " with port=" @@ -3069,32 +3117,34 @@ public RegionServerRpcQuotaManager getRegionServerRpcQuotaManager() { * Load the replication executorService objects, if any */ private static void createNewReplicationInstance(Configuration conf, HRegionServer server, - FileSystem walFs, Path walDir, Path oldWALDir, WALProvider walProvider) throws IOException { + FileSystem walFs, Path walDir, Path oldWALDir, WALFactory walFactory) throws IOException { // read in the name of the source replication class from the config file. String sourceClassname = conf.get(HConstants.REPLICATION_SOURCE_SERVICE_CLASSNAME, HConstants.REPLICATION_SERVICE_CLASSNAME_DEFAULT); // read in the name of the sink replication class from the config file. String sinkClassname = conf.get(HConstants.REPLICATION_SINK_SERVICE_CLASSNAME, - HConstants.REPLICATION_SERVICE_CLASSNAME_DEFAULT); + HConstants.REPLICATION_SINK_SERVICE_CLASSNAME_DEFAULT); // If both the sink and the source class names are the same, then instantiate // only one object. if (sourceClassname.equals(sinkClassname)) { server.replicationSourceHandler = newReplicationInstance(sourceClassname, - ReplicationSourceService.class, conf, server, walFs, walDir, oldWALDir, walProvider); + ReplicationSourceService.class, conf, server, walFs, walDir, oldWALDir, walFactory); server.replicationSinkHandler = (ReplicationSinkService) server.replicationSourceHandler; + server.sameReplicationSourceAndSink = true; } else { server.replicationSourceHandler = newReplicationInstance(sourceClassname, - ReplicationSourceService.class, conf, server, walFs, walDir, oldWALDir, walProvider); + ReplicationSourceService.class, conf, server, walFs, walDir, oldWALDir, walFactory); server.replicationSinkHandler = newReplicationInstance(sinkClassname, - ReplicationSinkService.class, conf, server, walFs, walDir, oldWALDir, walProvider); + ReplicationSinkService.class, conf, server, walFs, walDir, oldWALDir, walFactory); + server.sameReplicationSourceAndSink = false; } } private static T newReplicationInstance(String classname, Class xface, Configuration conf, HRegionServer server, FileSystem walFs, Path logDir, - Path oldLogDir, WALProvider walProvider) throws IOException { + Path oldLogDir, WALFactory walFactory) throws IOException { final Class clazz; try { ClassLoader classLoader = Thread.currentThread().getContextClassLoader(); @@ -3103,7 +3153,7 @@ private static T newReplicationInstance(String cl throw new IOException("Could not find class for " + classname); } T service = ReflectionUtils.newInstance(clazz, conf); - service.initialize(server, walFs, logDir, oldLogDir, walProvider); + service.initialize(server, walFs, logDir, oldLogDir, walFactory); return service; } @@ -3456,11 +3506,11 @@ boolean checkFileSystem() { @Override public void updateRegionFavoredNodesMapping(String encodedRegionName, List favoredNodes) { - InetSocketAddress[] addr = new InetSocketAddress[favoredNodes.size()]; + Address[] addr = new Address[favoredNodes.size()]; // Refer to the comment on the declaration of regionFavoredNodesMap on why - // it is a map of region name to InetSocketAddress[] + // it is a map of region name to Address[] for (int i = 0; i < favoredNodes.size(); i++) { - addr[i] = InetSocketAddress.createUnresolved(favoredNodes.get(i).getHostName(), + addr[i] = Address.fromParts(favoredNodes.get(i).getHostName(), favoredNodes.get(i).getPort()); } regionFavoredNodesMap.put(encodedRegionName, addr); @@ -3468,13 +3518,14 @@ public void updateRegionFavoredNodesMapping(String encodedRegionName, /** * Return the favored nodes for a region given its encoded name. Look at the - * comment around {@link #regionFavoredNodesMap} on why it is InetSocketAddress[] - * + * comment around {@link #regionFavoredNodesMap} on why we convert to InetSocketAddress[] + * here. + * @param encodedRegionName * @return array of favored locations */ @Override public InetSocketAddress[] getFavoredNodesForRegion(String encodedRegionName) { - return regionFavoredNodesMap.get(encodedRegionName); + return Address.toSocketAddress(regionFavoredNodesMap.get(encodedRegionName)); } @Override @@ -3485,12 +3536,10 @@ public ServerNonceManager getNonceManager() { private static class MovedRegionInfo { private final ServerName serverName; private final long seqNum; - private final long moveTime; MovedRegionInfo(ServerName serverName, long closeSeqNum) { this.serverName = serverName; this.seqNum = closeSeqNum; - this.moveTime = EnvironmentEdgeManager.currentTime(); } public ServerName getServerName() { @@ -3500,18 +3549,8 @@ public ServerName getServerName() { public long getSeqNum() { return seqNum; } - - long getMoveTime() { - return moveTime; - } } - /** - * This map will contains all the regions that we closed for a move. - * We add the time it was moved as we don't want to keep too old information - */ - private Map movedRegions = new ConcurrentHashMap<>(3000); - /** * We need a timeout. If not there is a risk of giving a wrong information: this would double * the number of network calls instead of reducing them. @@ -3525,86 +3564,23 @@ private void addToMovedRegions(String encodedName, ServerName destination, long } LOG.info("Adding " + encodedName + " move to " + destination + " record at close sequenceid=" + closeSeqNum); - movedRegions.put(encodedName, new MovedRegionInfo(destination, closeSeqNum)); + movedRegionInfoCache.put(encodedName, new MovedRegionInfo(destination, closeSeqNum)); } void removeFromMovedRegions(String encodedName) { - movedRegions.remove(encodedName); + movedRegionInfoCache.invalidate(encodedName); } - private MovedRegionInfo getMovedRegion(final String encodedRegionName) { - MovedRegionInfo dest = movedRegions.get(encodedRegionName); - - long now = EnvironmentEdgeManager.currentTime(); - if (dest != null) { - if (dest.getMoveTime() > (now - TIMEOUT_REGION_MOVED)) { - return dest; - } else { - movedRegions.remove(encodedRegionName); - } - } - - return null; + @InterfaceAudience.Private + public MovedRegionInfo getMovedRegion(String encodedRegionName) { + return movedRegionInfoCache.getIfPresent(encodedRegionName); } - /** - * Remove the expired entries from the moved regions list. - */ - protected void cleanMovedRegions() { - final long cutOff = System.currentTimeMillis() - TIMEOUT_REGION_MOVED; - - movedRegions.entrySet().removeIf(e -> e.getValue().getMoveTime() < cutOff); - } - - /* - * Use this to allow tests to override and schedule more frequently. - */ - - protected int movedRegionCleanerPeriod() { + @InterfaceAudience.Private + public int movedRegionCacheExpiredTime() { return TIMEOUT_REGION_MOVED; } - /** - * Creates a Chore thread to clean the moved region cache. - */ - protected final static class MovedRegionsCleaner extends ScheduledChore implements Stoppable { - private HRegionServer regionServer; - Stoppable stoppable; - - private MovedRegionsCleaner( - HRegionServer regionServer, Stoppable stoppable){ - super("MovedRegionsCleaner for region " + regionServer, stoppable, - regionServer.movedRegionCleanerPeriod()); - this.regionServer = regionServer; - this.stoppable = stoppable; - } - - static MovedRegionsCleaner create(HRegionServer rs){ - Stoppable stoppable = new Stoppable() { - private volatile boolean isStopped = false; - @Override public void stop(String why) { isStopped = true;} - @Override public boolean isStopped() {return isStopped;} - }; - - return new MovedRegionsCleaner(rs, stoppable); - } - - @Override - protected void chore() { - regionServer.cleanMovedRegions(); - } - - @Override - public void stop(String why) { - stoppable.stop(why); - } - - @Override - public boolean isStopped() { - return stoppable.isStopped(); - } - } - private String getMyEphemeralNodePath() { return ZNodePaths.joinZNode(this.zooKeeper.getZNodePaths().rsZNode, getServerName().toString()); } @@ -3696,7 +3672,7 @@ public ZKPermissionWatcher getZKPermissionWatcher() { /** * @return : Returns the ConfigurationManager object for testing purposes. */ - @VisibleForTesting + @InterfaceAudience.Private ConfigurationManager getConfigurationManager() { return configurationManager; } @@ -3760,7 +3736,7 @@ MemStoreFlusher getMemStoreFlusher() { * For testing * @return whether all wal roll request finished for this regionserver */ - @VisibleForTesting + @InterfaceAudience.Private public boolean walRollRequestFinished() { return this.walRoller.walRollFinished(); } @@ -3824,6 +3800,9 @@ public RegionServerSpaceQuotaManager getRegionServerSpaceQuotaManager() { @Override public boolean reportFileArchivalForQuotas(TableName tableName, Collection> archivedFiles) { + if (TEST_SKIP_REPORTING_TRANSITION) { + return false; + } RegionServerStatusService.BlockingInterface rss = rssStub; if (rss == null || rsSpaceQuotaManager == null) { // the current server could be stopping. @@ -3965,8 +3944,17 @@ public AsyncClusterConnection getAsyncClusterConnection() { return asyncClusterConnection; } - @VisibleForTesting + @InterfaceAudience.Private public CompactedHFilesDischarger getCompactedHFilesDischarger() { return compactedFileDischarger; } + + /** + * Return pause time configured in {@link HConstants#HBASE_RPC_SHORTOPERATION_RETRY_PAUSE_TIME}} + * @return pause time + */ + @InterfaceAudience.Private + public long getRetryPauseTime() { + return this.retryPauseTime; + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java index a05d4a6d31ef..99880efece73 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java @@ -96,10 +96,7 @@ import org.apache.hadoop.hbase.regionserver.wal.WALUtil; import org.apache.hadoop.hbase.security.EncryptionUtil; import org.apache.hadoop.hbase.security.User; -import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.CompactionDescriptor; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.ChecksumType; import org.apache.hadoop.hbase.util.ClassSize; import org.apache.hadoop.hbase.util.CommonFSUtils; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; @@ -110,7 +107,7 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; + import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableCollection; import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList; @@ -119,6 +116,10 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Sets; import org.apache.hbase.thirdparty.org.apache.commons.collections4.CollectionUtils; import org.apache.hbase.thirdparty.org.apache.commons.collections4.IterableUtils; + +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.CompactionDescriptor; + /** * A Store holds a column family in a Region. Its a memstore and a set of zero * or more StoreFiles, which stretch backwards over time. @@ -155,11 +156,8 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, protected final MemStore memstore; // This stores directory in the filesystem. - protected final HRegion region; - private final ColumnFamilyDescriptor family; - private final HRegionFileSystem fs; + private final HRegion region; protected Configuration conf; - protected CacheConfig cacheConf; private long lastCompactSize = 0; volatile boolean forceMajor = false; private AtomicLong storeSize = new AtomicLong(); @@ -213,16 +211,8 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, private final Set changedReaderObservers = Collections.newSetFromMap(new ConcurrentHashMap()); - protected final int blocksize; private HFileDataBlockEncoder dataBlockEncoder; - /** Checksum configuration */ - protected ChecksumType checksumType; - protected int bytesPerChecksum; - - // Comparing KeyValues - protected final CellComparator comparator; - final StoreEngine storeEngine; private static final AtomicBoolean offPeakCompactionTracker = new AtomicBoolean(); @@ -234,7 +224,6 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, private long blockingFileCount; private int compactionCheckMultiplier; - protected Encryption.Context cryptoContext = Encryption.Context.NONE; private AtomicLong flushedCellsCount = new AtomicLong(); private AtomicLong compactedCellsCount = new AtomicLong(); @@ -244,6 +233,8 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, private AtomicLong compactedCellsSize = new AtomicLong(); private AtomicLong majorCompactedCellsSize = new AtomicLong(); + private final StoreContext storeContext; + /** * Constructor * @param family HColumnDescriptor for this column @@ -252,12 +243,6 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, protected HStore(final HRegion region, final ColumnFamilyDescriptor family, final Configuration confParam, boolean warmup) throws IOException { - this.fs = region.getRegionFileSystem(); - - // Assemble the store's home directory and Ensure it exists. - fs.createStoreDir(family.getNameAsString()); - this.region = region; - this.family = family; // 'conf' renamed to 'confParam' b/c we use this.conf in the constructor // CompoundConfiguration will look for keys in reverse order of addition, so we'd // add global config first, then table and cf overrides, then cf metadata. @@ -266,18 +251,22 @@ protected HStore(final HRegion region, final ColumnFamilyDescriptor family, .addBytesMap(region.getTableDescriptor().getValues()) .addStringMap(family.getConfiguration()) .addBytesMap(family.getValues()); - this.blocksize = family.getBlocksize(); + + this.region = region; + this.storeContext = initializeStoreContext(family); + + // Assemble the store's home directory and Ensure it exists. + region.getRegionFileSystem().createStoreDir(family.getNameAsString()); // set block storage policy for store directory String policyName = family.getStoragePolicy(); if (null == policyName) { policyName = this.conf.get(BLOCK_STORAGE_POLICY_KEY, DEFAULT_BLOCK_STORAGE_POLICY); } - this.fs.setStoragePolicy(family.getNameAsString(), policyName.trim()); + region.getRegionFileSystem().setStoragePolicy(family.getNameAsString(), policyName.trim()); this.dataBlockEncoder = new HFileDataBlockEncoderImpl(family.getDataBlockEncoding()); - this.comparator = region.getCellComparator(); // used by ScanQueryMatcher long timeToPurgeDeletes = Math.max(conf.getLong("hbase.hstore.time.to.purge.deletes", 0), 0); @@ -286,14 +275,11 @@ protected HStore(final HRegion region, final ColumnFamilyDescriptor family, long ttl = determineTTLFromFamily(family); // Why not just pass a HColumnDescriptor in here altogether? Even if have // to clone it? - scanInfo = new ScanInfo(conf, family, ttl, timeToPurgeDeletes, this.comparator); + scanInfo = new ScanInfo(conf, family, ttl, timeToPurgeDeletes, region.getCellComparator()); this.memstore = getMemstore(); this.offPeakHours = OffPeakHours.getInstance(conf); - // Setting up cache configuration for this family - createCacheConf(family); - this.verifyBulkLoads = conf.getBoolean("hbase.hstore.bulkload.verify", false); this.blockingFileCount = @@ -306,7 +292,7 @@ protected HStore(final HRegion region, final ColumnFamilyDescriptor family, this.compactionCheckMultiplier = DEFAULT_COMPACTCHECKER_INTERVAL_MULTIPLIER; } - this.storeEngine = createStoreEngine(this, this.conf, this.comparator); + this.storeEngine = createStoreEngine(this, this.conf, region.getCellComparator()); List hStoreFiles = loadStoreFiles(warmup); // Move the storeSize calculation out of loadStoreFiles() method, because the secondary read // replica's refreshStoreFiles() will also use loadStoreFiles() to refresh its store files and @@ -316,10 +302,6 @@ protected HStore(final HRegion region, final ColumnFamilyDescriptor family, this.totalUncompressedBytes.addAndGet(getTotalUncompressedBytes(hStoreFiles)); this.storeEngine.getStoreFileManager().loadFiles(hStoreFiles); - // Initialize checksum type from name. The names are CRC32, CRC32C, etc. - this.checksumType = getChecksumType(conf); - // Initialize bytes per checksum - this.bytesPerChecksum = getBytesPerChecksum(conf); flushRetriesNumber = conf.getInt( "hbase.hstore.flush.retries.number", DEFAULT_FLUSH_RETRIES_NUMBER); pauseTime = conf.getInt(HConstants.HBASE_SERVER_PAUSE, HConstants.DEFAULT_HBASE_SERVER_PAUSE); @@ -328,7 +310,6 @@ protected HStore(final HRegion region, final ColumnFamilyDescriptor family, "hbase.hstore.flush.retries.number must be > 0, not " + flushRetriesNumber); } - cryptoContext = EncryptionUtil.createEncryptionContext(conf, family); int confPrintThreshold = this.conf.getInt("hbase.region.store.parallel.put.print.threshold", 50); @@ -345,6 +326,32 @@ protected HStore(final HRegion region, final ColumnFamilyDescriptor family, cacheOnWriteLogged = false; } + private StoreContext initializeStoreContext(ColumnFamilyDescriptor family) throws IOException { + return new StoreContext.Builder() + .withBlockSize(family.getBlocksize()) + .withEncryptionContext(EncryptionUtil.createEncryptionContext(conf, family)) + .withBloomType(family.getBloomFilterType()) + .withCacheConfig(createCacheConf(family)) + .withCellComparator(region.getCellComparator()) + .withColumnFamilyDescriptor(family) + .withCompactedFilesSupplier(this::getCompactedFiles) + .withRegionFileSystem(region.getRegionFileSystem()) + .withFavoredNodesSupplier(this::getFavoredNodes) + .withFamilyStoreDirectoryPath(region.getRegionFileSystem() + .getStoreDir(family.getNameAsString())) + .withRegionCoprocessorHost(region.getCoprocessorHost()) + .build(); + } + + private InetSocketAddress[] getFavoredNodes() { + InetSocketAddress[] favoredNodes = null; + if (region.getRegionServerServices() != null) { + favoredNodes = region.getRegionServerServices().getFavoredNodesForRegion( + region.getRegionInfo().getEncodedName()); + } + return favoredNodes; + } + /** * @return MemStore Instance to use in this store. */ @@ -356,7 +363,7 @@ private MemStore getMemstore() { inMemoryCompaction = MemoryCompactionPolicy.valueOf( conf.get("hbase.systemtables.compacting.memstore.type", "NONE")); } else { - inMemoryCompaction = family.getInMemoryCompaction(); + inMemoryCompaction = getColumnFamilyDescriptor().getInMemoryCompaction(); } if (inMemoryCompaction == null) { inMemoryCompaction = @@ -366,13 +373,13 @@ private MemStore getMemstore() { switch (inMemoryCompaction) { case NONE: ms = ReflectionUtils.newInstance(DefaultMemStore.class, - new Object[] { conf, this.comparator, + new Object[] { conf, getComparator(), this.getHRegion().getRegionServicesForStores()}); break; default: Class clz = conf.getClass(MEMSTORE_CLASS_NAME, CompactingMemStore.class, CompactingMemStore.class); - ms = ReflectionUtils.newInstance(clz, new Object[]{conf, this.comparator, this, + ms = ReflectionUtils.newInstance(clz, new Object[]{conf, getComparator(), this, this.getHRegion().getRegionServicesForStores(), inMemoryCompaction}); } return ms; @@ -382,10 +389,12 @@ private MemStore getMemstore() { * Creates the cache config. * @param family The current column family. */ - protected void createCacheConf(final ColumnFamilyDescriptor family) { - this.cacheConf = new CacheConfig(conf, family, region.getBlockCache(), + protected CacheConfig createCacheConf(final ColumnFamilyDescriptor family) { + CacheConfig cacheConf = new CacheConfig(conf, family, region.getBlockCache(), region.getRegionServicesForStores().getByteBuffAllocator()); - LOG.info("Created cacheConfig: " + this.getCacheConfig() + " for " + this); + LOG.info("Created cacheConfig: {}, for column family {} of region {} ", cacheConf, + family.getNameAsString(), region.getRegionInfo().getEncodedName()); + return cacheConf; } /** @@ -398,7 +407,7 @@ protected void createCacheConf(final ColumnFamilyDescriptor family) { */ protected StoreEngine createStoreEngine(HStore store, Configuration conf, CellComparator kvComparator) throws IOException { - return StoreEngine.create(store, conf, comparator); + return StoreEngine.create(store, conf, kvComparator); } /** @@ -419,9 +428,13 @@ public static long determineTTLFromFamily(final ColumnFamilyDescriptor family) { return ttl; } + StoreContext getStoreContext() { + return storeContext; + } + @Override public String getColumnFamilyName() { - return this.family.getNameAsString(); + return this.storeContext.getFamily().getNameAsString(); } @Override @@ -431,11 +444,11 @@ public TableName getTableName() { @Override public FileSystem getFileSystem() { - return this.fs.getFileSystem(); + return storeContext.getRegionFileSystem().getFileSystem(); } public HRegionFileSystem getRegionFileSystem() { - return this.fs; + return storeContext.getRegionFileSystem(); } /* Implementation of StoreConfigInformation */ @@ -472,33 +485,10 @@ public long getBlockingFileCount() { } /* End implementation of StoreConfigInformation */ - /** - * Returns the configured bytesPerChecksum value. - * @param conf The configuration - * @return The bytesPerChecksum that is set in the configuration - */ - public static int getBytesPerChecksum(Configuration conf) { - return conf.getInt(HConstants.BYTES_PER_CHECKSUM, - HFile.DEFAULT_BYTES_PER_CHECKSUM); - } - - /** - * Returns the configured checksum algorithm. - * @param conf The configuration - * @return The checksum algorithm that is set in the configuration - */ - public static ChecksumType getChecksumType(Configuration conf) { - String checksumName = conf.get(HConstants.CHECKSUM_TYPE_NAME); - if (checksumName == null) { - return ChecksumType.getDefaultChecksumType(); - } else { - return ChecksumType.nameToType(checksumName); - } - } @Override public ColumnFamilyDescriptor getColumnFamilyDescriptor() { - return this.family; + return this.storeContext.getFamily(); } @Override @@ -557,7 +547,7 @@ void setDataBlockEncoderInTest(HFileDataBlockEncoder blockEncoder) { * from the given directory. */ private List loadStoreFiles(boolean warmup) throws IOException { - Collection files = fs.getStoreFiles(getColumnFamilyName()); + Collection files = getRegionFileSystem().getStoreFiles(getColumnFamilyName()); return openStoreFiles(files, warmup); } @@ -608,7 +598,7 @@ private List openStoreFiles(Collection files, boolean if (ioe != null) { // close StoreFile readers boolean evictOnClose = - cacheConf != null? cacheConf.shouldEvictOnClose(): true; + getCacheConfig() != null? getCacheConfig().shouldEvictOnClose(): true; for (HStoreFile file : results) { try { if (file != null) { @@ -636,7 +626,8 @@ private List openStoreFiles(Collection files, boolean results.removeAll(filesToRemove); if (!filesToRemove.isEmpty() && this.isPrimaryReplicaStore()) { LOG.debug("Moving the files {} to archive", filesToRemove); - this.fs.removeStoreFiles(this.getColumnFamilyDescriptor().getNameAsString(), filesToRemove); + getRegionFileSystem().removeStoreFiles(this.getColumnFamilyDescriptor().getNameAsString(), + filesToRemove); } } @@ -645,7 +636,7 @@ private List openStoreFiles(Collection files, boolean @Override public void refreshStoreFiles() throws IOException { - Collection newFiles = fs.getStoreFiles(getColumnFamilyName()); + Collection newFiles = getRegionFileSystem().getStoreFiles(getColumnFamilyName()); refreshStoreFilesInternal(newFiles); } @@ -656,7 +647,7 @@ public void refreshStoreFiles() throws IOException { public void refreshStoreFiles(Collection newFiles) throws IOException { List storeFiles = new ArrayList<>(newFiles.size()); for (String file : newFiles) { - storeFiles.add(fs.getStoreFileInfo(getColumnFamilyName(), file)); + storeFiles.add(getRegionFileSystem().getStoreFileInfo(getColumnFamilyName(), file)); } refreshStoreFilesInternal(storeFiles); } @@ -725,7 +716,6 @@ private void refreshStoreFilesInternal(Collection newFiles) throw completeCompaction(toBeRemovedStoreFiles); } - @VisibleForTesting protected HStoreFile createStoreFileAndReader(final Path p) throws IOException { StoreFileInfo info = new StoreFileInfo(conf, this.getFileSystem(), p, isPrimaryReplicaStore()); @@ -734,7 +724,8 @@ protected HStoreFile createStoreFileAndReader(final Path p) throws IOException { private HStoreFile createStoreFileAndReader(StoreFileInfo info) throws IOException { info.setRegionCoprocessorHost(this.region.getCoprocessorHost()); - HStoreFile storeFile = new HStoreFile(info, this.family.getBloomFilterType(), this.cacheConf); + HStoreFile storeFile = new HStoreFile(info, getColumnFamilyDescriptor().getBloomFilterType(), + getCacheConfig()); storeFile.initReader(); return storeFile; } @@ -817,7 +808,7 @@ public void assertBulkLoadHFileOk(Path srcPath) throws IOException { LOG.info("Validating hfile at " + srcPath + " for inclusion in " + this); FileSystem srcFs = srcPath.getFileSystem(conf); srcFs.access(srcPath, FsAction.READ_WRITE); - reader = HFile.createReader(srcFs, srcPath, cacheConf, isPrimaryReplicaStore(), conf); + reader = HFile.createReader(srcFs, srcPath, getCacheConfig(), isPrimaryReplicaStore(), conf); Optional firstKey = reader.getFirstRowKey(); Preconditions.checkState(firstKey.isPresent(), "First key can not be null"); @@ -854,7 +845,7 @@ public void assertBulkLoadHFileOk(Path srcPath) throws IOException { do { Cell cell = scanner.getCell(); if (prevCell != null) { - if (comparator.compareRows(prevCell, cell) > 0) { + if (getComparator().compareRows(prevCell, cell) > 0) { throw new InvalidHFileException("Previous row is greater than" + " current row: path=" + srcPath + " previous=" + CellUtil.getCellKeyAsString(prevCell) + " current=" @@ -891,13 +882,13 @@ public void assertBulkLoadHFileOk(Path srcPath) throws IOException { */ public Pair preBulkLoadHFile(String srcPathStr, long seqNum) throws IOException { Path srcPath = new Path(srcPathStr); - return fs.bulkLoadStoreFile(getColumnFamilyName(), srcPath, seqNum); + return getRegionFileSystem().bulkLoadStoreFile(getColumnFamilyName(), srcPath, seqNum); } public Path bulkLoadHFile(byte[] family, String srcPathStr, Path dstPath) throws IOException { Path srcPath = new Path(srcPathStr); try { - fs.commitStoreFile(srcPath, dstPath); + getRegionFileSystem().commitStoreFile(srcPath, dstPath); } finally { if (this.getCoprocessorHost() != null) { this.getCoprocessorHost().postCommitStoreFile(family, srcPath, dstPath); @@ -963,8 +954,8 @@ public ImmutableCollection close() throws IOException { storeEngine.getStoreFileManager().clearCompactedFiles(); // clear the compacted files if (CollectionUtils.isNotEmpty(compactedfiles)) { - removeCompactedfiles(compactedfiles, cacheConf != null ? - cacheConf.shouldEvictOnClose() : true); + removeCompactedfiles(compactedfiles, getCacheConfig() != null ? + getCacheConfig().shouldEvictOnClose() : true); } if (!result.isEmpty()) { // initialize the thread pool for closing store files in parallel. @@ -980,7 +971,7 @@ public ImmutableCollection close() throws IOException { @Override public Void call() throws IOException { boolean evictOnClose = - cacheConf != null? cacheConf.shouldEvictOnClose(): true; + getCacheConfig() != null? getCacheConfig().shouldEvictOnClose(): true; f.closeStoreFile(evictOnClose); return null; } @@ -1091,7 +1082,7 @@ public HStoreFile tryCommitRecoveredHFile(Path path) throws IOException { FileSystem srcFs = path.getFileSystem(conf); srcFs.access(path, FsAction.READ_WRITE); try (HFile.Reader reader = - HFile.createReader(srcFs, path, cacheConf, isPrimaryReplicaStore(), conf)) { + HFile.createReader(srcFs, path, getCacheConfig(), isPrimaryReplicaStore(), conf)) { Optional firstKey = reader.getFirstRowKey(); Preconditions.checkState(firstKey.isPresent(), "First key can not be null"); Optional lk = reader.getLastKey(); @@ -1103,7 +1094,7 @@ public HStoreFile tryCommitRecoveredHFile(Path path) throws IOException { } } - Path dstPath = fs.commitStoreFile(getColumnFamilyName(), path); + Path dstPath = getRegionFileSystem().commitStoreFile(getColumnFamilyName(), path); HStoreFile sf = createStoreFileAndReader(dstPath); StoreFileReader r = sf.getReader(); this.storeSize.addAndGet(r.length()); @@ -1128,7 +1119,7 @@ public HStoreFile tryCommitRecoveredHFile(Path path) throws IOException { private HStoreFile commitFile(Path path, long logCacheFlushId, MonitoredTask status) throws IOException { // Write-out finished successfully, move into the right spot - Path dstPath = fs.commitStoreFile(getColumnFamilyName(), path); + Path dstPath = getRegionFileSystem().commitStoreFile(getColumnFamilyName(), path); status.setStatus("Flushing " + this + ": reopening flushed file"); HStoreFile sf = createStoreFileAndReader(dstPath); @@ -1166,12 +1157,13 @@ public StoreFileWriter createWriterInTmp(long maxKeyCount, Compression.Algorithm boolean shouldDropBehind, long totalCompactedFilesSize, String fileStoragePolicy) throws IOException { // creating new cache config for each new writer + final CacheConfig cacheConf = getCacheConfig(); final CacheConfig writerCacheConf = new CacheConfig(cacheConf); if (isCompaction) { // Don't cache data on write on compactions, unless specifically configured to do so // Cache only when total file size remains lower than configured threshold final boolean cacheCompactedBlocksOnWrite = - cacheConf.shouldCacheCompactedBlocksOnWrite(); + getCacheConfig().shouldCacheCompactedBlocksOnWrite(); // if data blocks are to be cached on write // during compaction, we should forcefully // cache index and bloom blocks as well @@ -1205,53 +1197,48 @@ public StoreFileWriter createWriterInTmp(long maxKeyCount, Compression.Algorithm } } } - InetSocketAddress[] favoredNodes = null; - if (region.getRegionServerServices() != null) { - favoredNodes = region.getRegionServerServices().getFavoredNodesForRegion( - region.getRegionInfo().getEncodedName()); - } + Encryption.Context encryptionContext = storeContext.getEncryptionContext(); HFileContext hFileContext = createFileContext(compression, includeMVCCReadpoint, includesTag, - cryptoContext); - Path familyTempDir = new Path(fs.getTempDir(), family.getNameAsString()); - StoreFileWriter.Builder builder = new StoreFileWriter.Builder(conf, writerCacheConf, - this.getFileSystem()) - .withOutputDir(familyTempDir) - .withBloomType(family.getBloomFilterType()) - .withMaxKeyCount(maxKeyCount) - .withFavoredNodes(favoredNodes) - .withFileContext(hFileContext) - .withShouldDropCacheBehind(shouldDropBehind) - .withCompactedFilesSupplier(this::getCompactedFiles) - .withFileStoragePolicy(fileStoragePolicy); + encryptionContext); + Path familyTempDir = new Path(getRegionFileSystem().getTempDir(), getColumnFamilyName()); + StoreFileWriter.Builder builder = + new StoreFileWriter.Builder(conf, writerCacheConf, getFileSystem()) + .withOutputDir(familyTempDir) + .withBloomType(storeContext.getBloomFilterType()) + .withMaxKeyCount(maxKeyCount) + .withFavoredNodes(storeContext.getFavoredNodes()) + .withFileContext(hFileContext) + .withShouldDropCacheBehind(shouldDropBehind) + .withCompactedFilesSupplier(storeContext.getCompactedFilesSupplier()) + .withFileStoragePolicy(fileStoragePolicy); return builder.build(); } private HFileContext createFileContext(Compression.Algorithm compression, - boolean includeMVCCReadpoint, boolean includesTag, Encryption.Context cryptoContext) { + boolean includeMVCCReadpoint, boolean includesTag, Encryption.Context encryptionContext) { if (compression == null) { compression = HFile.DEFAULT_COMPRESSION_ALGORITHM; } + ColumnFamilyDescriptor family = getColumnFamilyDescriptor(); HFileContext hFileContext = new HFileContextBuilder() - .withIncludesMvcc(includeMVCCReadpoint) - .withIncludesTags(includesTag) - .withCompression(compression) - .withCompressTags(family.isCompressTags()) - .withChecksumType(checksumType) - .withBytesPerCheckSum(bytesPerChecksum) - .withBlockSize(blocksize) - .withHBaseCheckSum(true) - .withDataBlockEncoding(family.getDataBlockEncoding()) - .withEncryptionContext(cryptoContext) - .withCreateTime(EnvironmentEdgeManager.currentTime()) - .withColumnFamily(family.getName()) - .withTableName(region.getTableDescriptor() - .getTableName().getName()) - .withCellComparator(this.comparator) - .build(); + .withIncludesMvcc(includeMVCCReadpoint) + .withIncludesTags(includesTag) + .withCompression(compression) + .withCompressTags(family.isCompressTags()) + .withChecksumType(StoreUtils.getChecksumType(conf)) + .withBytesPerCheckSum(StoreUtils.getBytesPerChecksum(conf)) + .withBlockSize(family.getBlocksize()) + .withHBaseCheckSum(true) + .withDataBlockEncoding(family.getDataBlockEncoding()) + .withEncryptionContext(encryptionContext) + .withCreateTime(EnvironmentEdgeManager.currentTime()) + .withColumnFamily(getColumnFamilyDescriptor().getName()) + .withTableName(getTableName().getName()) + .withCellComparator(getComparator()) + .build(); return hFileContext; } - private long getTotalSize(Collection sfs) { return sfs.stream().mapToLong(sf -> sf.getReader().length()).sum(); } @@ -1528,7 +1515,7 @@ public List compact(CompactionContext compaction, // Ready to go. Have list of files to compact. LOG.info("Starting compaction of " + filesToCompact + - " into tmpdir=" + fs.getTempDir() + ", totalSize=" + + " into tmpdir=" + getRegionFileSystem().getTempDir() + ", totalSize=" + TraditionalBinaryPrefix.long2String(cr.getSize(), "", 1)); return doCompaction(cr, filesToCompact, user, compactionStartTime, @@ -1538,7 +1525,6 @@ public List compact(CompactionContext compaction, } } - @VisibleForTesting protected List doCompaction(CompactionRequestImpl cr, Collection filesToCompact, User user, long compactionStartTime, List newFiles) throws IOException { @@ -1579,7 +1565,7 @@ private void setStoragePolicyFromFileName(List newFiles) throws IOExceptio String prefix = HConstants.STORAGE_POLICY_PREFIX; for (Path newFile : newFiles) { if (newFile.getParent().getName().startsWith(prefix)) { - CommonFSUtils.setStoragePolicy(fs.getFileSystem(), newFile, + CommonFSUtils.setStoragePolicy(getRegionFileSystem().getFileSystem(), newFile, newFile.getParent().getName().substring(prefix.length())); } } @@ -1604,7 +1590,7 @@ private List moveCompactedFilesIntoPlace(CompactionRequestImpl cr, HStoreFile moveFileIntoPlace(Path newFile) throws IOException { validateStoreFile(newFile); // Move the file into the right spot - Path destPath = fs.commitStoreFile(getColumnFamilyName(), newFile); + Path destPath = getRegionFileSystem().commitStoreFile(getColumnFamilyName(), newFile); return createStoreFileAndReader(destPath); } @@ -1624,8 +1610,8 @@ private void writeCompactionWalRecord(Collection filesCompacted, newFiles.stream().map(HStoreFile::getPath).collect(Collectors.toList()); RegionInfo info = this.region.getRegionInfo(); CompactionDescriptor compactionDescriptor = ProtobufUtil.toCompactionDescriptor(info, - family.getName(), inputPaths, outputPaths, - fs.getStoreDir(getColumnFamilyDescriptor().getNameAsString())); + getColumnFamilyDescriptor().getName(), inputPaths, outputPaths, + getRegionFileSystem().getStoreDir(getColumnFamilyDescriptor().getNameAsString())); // Fix reaching into Region to get the maxWaitForSeqId. // Does this method belong in Region altogether given it is making so many references up there? // Could be Region#writeCompactionMarker(compactionDescriptor); @@ -1633,7 +1619,6 @@ private void writeCompactionWalRecord(Collection filesCompacted, this.region.getRegionInfo(), compactionDescriptor, this.region.getMVCC()); } - @VisibleForTesting void replaceStoreFiles(Collection compactedFiles, Collection result) throws IOException { this.lock.writeLock().lock(); @@ -1753,7 +1738,7 @@ public void replayCompactionMarker(CompactionDescriptor compaction, boolean pick String familyName = this.getColumnFamilyName(); Set inputFiles = new HashSet<>(); for (String compactionInput : compactionInputs) { - Path inputPath = fs.getStoreFilePath(familyName, compactionInput); + Path inputPath = getRegionFileSystem().getStoreFilePath(familyName, compactionInput); inputFiles.add(inputPath.getName()); } @@ -1773,7 +1758,8 @@ public void replayCompactionMarker(CompactionDescriptor compaction, boolean pick compactionOutputs.remove(sf.getPath().getName()); } for (String compactionOutput : compactionOutputs) { - StoreFileInfo storeFileInfo = fs.getStoreFileInfo(getColumnFamilyName(), compactionOutput); + StoreFileInfo storeFileInfo = + getRegionFileSystem().getStoreFileInfo(getColumnFamilyName(), compactionOutput); HStoreFile storeFile = createStoreFileAndReader(storeFileInfo); outputStoreFiles.add(storeFile); } @@ -1794,7 +1780,6 @@ public void replayCompactionMarker(CompactionDescriptor compaction, boolean pick * but instead makes a compaction candidate list by itself. * @param N Number of files. */ - @VisibleForTesting public void compactRecentForTestingAssumingDefaultPolicy(int N) throws IOException { List filesToCompact; boolean isMajor; @@ -2069,7 +2054,6 @@ private void validateStoreFile(Path path) throws IOException { * Update counts. * @param compactedFiles list of files that were compacted */ - @VisibleForTesting protected void completeCompaction(Collection compactedFiles) // Rename this method! TODO. throws IOException { @@ -2095,7 +2079,7 @@ int versionsToReturn(final int wantedVersions) { throw new IllegalArgumentException("Number of versions must be > 0"); } // Make sure we do not return more than maximum versions for this store. - int maxVersions = this.family.getMaxVersions(); + int maxVersions = getColumnFamilyDescriptor().getMaxVersions(); return wantedVersions > maxVersions ? maxVersions: wantedVersions; } @@ -2370,7 +2354,7 @@ public RegionCoprocessorHost getCoprocessorHost() { @Override public RegionInfo getRegionInfo() { - return this.fs.getRegionInfo(); + return getRegionFileSystem().getRegionInfo(); } @Override @@ -2512,7 +2496,8 @@ public void replayFlush(List fileNames, boolean dropMemstoreSnapshot) List storeFiles = new ArrayList<>(fileNames.size()); for (String file : fileNames) { // open the file as a store file (hfile link, etc) - StoreFileInfo storeFileInfo = fs.getStoreFileInfo(getColumnFamilyName(), file); + StoreFileInfo storeFileInfo = + getRegionFileSystem().getStoreFileInfo(getColumnFamilyName(), file); HStoreFile storeFile = createStoreFileAndReader(storeFileInfo); storeFiles.add(storeFile); HStore.this.storeSize.addAndGet(storeFile.getReader().length()); @@ -2561,9 +2546,8 @@ public boolean needsCompaction() { * Used for tests. * @return cache configuration for this Store. */ - @VisibleForTesting public CacheConfig getCacheConfig() { - return this.cacheConf; + return storeContext.getCacheConf(); } public static final long FIXED_OVERHEAD = ClassSize.estimateBase(HStore.class, false); @@ -2577,12 +2561,12 @@ public CacheConfig getCacheConfig() { @Override public long heapSize() { MemStoreSize memstoreSize = this.memstore.size(); - return DEEP_OVERHEAD + memstoreSize.getHeapSize(); + return DEEP_OVERHEAD + memstoreSize.getHeapSize() + storeContext.heapSize(); } @Override public CellComparator getComparator() { - return comparator; + return storeContext.getComparator(); } public ScanInfo getScanInfo() { @@ -2641,7 +2625,6 @@ public long getMajorCompactedCellsSize() { * Returns the StoreEngine that is backing this concrete implementation of Store. * @return Returns the {@link StoreEngine} object used internally inside this HStore object. */ - @VisibleForTesting public StoreEngine getStoreEngine() { return this.storeEngine; } @@ -2657,7 +2640,7 @@ protected OffPeakHours getOffPeakHours() { public void onConfigurationChange(Configuration conf) { this.conf = new CompoundConfiguration() .add(conf) - .addBytesMap(family.getValues()); + .addBytesMap(getColumnFamilyDescriptor().getValues()); this.storeEngine.compactionPolicy.setConf(conf); this.offPeakHours = OffPeakHours.getInstance(conf); } @@ -2789,8 +2772,8 @@ private void removeCompactedfiles(Collection compactedfiles, boolean LOG.debug("Moving the files {} to archive", filesToRemove); // Only if this is successful it has to be removed try { - this.fs.removeStoreFiles(this.getColumnFamilyDescriptor().getNameAsString(), - filesToRemove); + getRegionFileSystem() + .removeStoreFiles(this.getColumnFamilyDescriptor().getNameAsString(), filesToRemove); } catch (FailedArchiveException fae) { // Even if archiving some files failed, we still need to clear out any of the // files which were successfully archived. Otherwise we will receive a diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStoreFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStoreFile.java index 5e0bf2a43e95..7a7468973af9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStoreFile.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStoreFile.java @@ -47,8 +47,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; /** @@ -319,7 +319,6 @@ public boolean isCompactedAway() { return compactedAway; } - @VisibleForTesting public int getRefCount() { return fileInfo.refCount.get(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HeapMemoryManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HeapMemoryManager.java index ea5586c4af9e..342ec18e1ed9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HeapMemoryManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HeapMemoryManager.java @@ -39,8 +39,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; - /** * Manages tuning of Heap memory using HeapMemoryTuner. Most part of the heap memory is * split between Memstores and BlockCache. This manager helps in tuning sizes of both these @@ -106,7 +104,6 @@ public class HeapMemoryManager { private List tuneObservers = new ArrayList<>(); - @VisibleForTesting HeapMemoryManager(BlockCache blockCache, FlushRequester memStoreFlusher, Server server, RegionServerAccounting regionServerAccounting) { Configuration conf = server.getConfiguration(); @@ -219,7 +216,7 @@ public void start(ChoreService service) { public void stop() { // The thread is Daemon. Just interrupting the ongoing process. LOG.info("Stopping"); - this.heapMemTunerChore.cancel(true); + this.heapMemTunerChore.shutdown(true); } public void registerTuneObserver(HeapMemoryTuneObserver observer) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueHeap.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueHeap.java index f28e28e31cc8..2fef5d106f04 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueHeap.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueHeap.java @@ -19,8 +19,6 @@ package org.apache.hadoop.hbase.regionserver; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; - import java.io.IOException; import java.util.ArrayList; import java.util.Comparator; @@ -29,10 +27,10 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.regionserver.ScannerContext.NextState; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.regionserver.ScannerContext.NextState; /** * Implements a heap merge across any number of KeyValueScanners. @@ -419,8 +417,6 @@ public PriorityQueue getHeap() { return this.heap; } - - @VisibleForTesting KeyValueScanner getCurrentForTesting() { return current; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LogRoller.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LogRoller.java index cbab595517ff..be73bbaf8a3d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LogRoller.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LogRoller.java @@ -27,8 +27,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; - /** * Runs periodically to determine if the WAL should be rolled. * @@ -39,7 +37,6 @@ * TODO: change to a pool of threads */ @InterfaceAudience.Private -@VisibleForTesting public class LogRoller extends AbstractWALRoller { private static final Logger LOG = LoggerFactory.getLogger(LogRoller.class); @@ -64,7 +61,6 @@ protected void scheduleFlush(String encodedRegionName, List families) { requester.requestFlush(r, families, FlushLifeCycleTracker.DUMMY); } - @VisibleForTesting Map getWalNeedsRoll() { return this.wals; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactor.java index 2dafceee2e8d..a0313444a53e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactor.java @@ -32,8 +32,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; - /** * The ongoing MemStore Compaction manager, dispatches a solo running compaction and interrupts * the compaction if requested. The compaction is interrupted and stopped by CompactingMemStore, @@ -242,7 +240,6 @@ private ImmutableSegment createSubstitution(MemStoreCompactionStrategy.Action ac return result; } - @VisibleForTesting void initiateCompactionStrategy(MemoryCompactionPolicy compType, Configuration configuration, String cfName) throws IllegalArgumentIOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreLABImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreLABImpl.java index 7951c72b0da4..f5fccf4d2a8d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreLABImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreLABImpl.java @@ -27,7 +27,6 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.locks.ReentrantLock; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ByteBufferExtendedCell; import org.apache.hadoop.hbase.Cell; @@ -36,8 +35,9 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; + import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; + /** * A memstore-local allocation buffer. *

    @@ -73,7 +73,6 @@ public class MemStoreLABImpl implements MemStoreLAB { private ReentrantLock lock = new ReentrantLock(); // A set of chunks contained by this memstore LAB - @VisibleForTesting Set chunks = new ConcurrentSkipListSet(); private final int dataChunkSize; private final int maxAlloc; @@ -270,7 +269,6 @@ public void close() { } } - @VisibleForTesting int getOpenScannerCount() { return this.openScannerCount.get(); } @@ -397,12 +395,10 @@ public boolean isOffHeap() { return this.chunkCreator.isOffheap(); } - @VisibleForTesting Chunk getCurrentChunk() { return currChunk.get(); } - @VisibleForTesting BlockingQueue getPooledChunks() { BlockingQueue pooledChunks = new LinkedBlockingQueue<>(); for (Integer id : this.chunks) { @@ -414,7 +410,7 @@ BlockingQueue getPooledChunks() { return pooledChunks; } - @VisibleForTesting Integer getNumOfChunksReturnedToPool() { + Integer getNumOfChunksReturnedToPool() { int i = 0; for (Integer id : this.chunks) { if (chunkCreator.isChunkInPool(id)) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java index 715da6c47bd8..86b97a2afb9e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java @@ -27,8 +27,6 @@ import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; - /** *

    * This class is for maintaining the various regionserver statistics @@ -44,6 +42,7 @@ public class MetricsRegionServer { "hbase.regionserver.enable.table.latencies"; public static final boolean RS_ENABLE_TABLE_METRICS_DEFAULT = true; + public static final String SLOW_METRIC_TIME = "hbase.ipc.slow.metric.time"; private final MetricsRegionServerSource serverSource; private final MetricsRegionServerWrapper regionServerWrapper; private RegionServerTableMetrics tableMetrics; @@ -55,6 +54,8 @@ public class MetricsRegionServer { private Timer bulkLoadTimer; private Meter serverReadQueryMeter; private Meter serverWriteQueryMeter; + protected long slowMetricTime; + protected static final int DEFAULT_SLOW_METRIC_TIME = 1000; // milliseconds public MetricsRegionServer(MetricsRegionServerWrapper regionServerWrapper, Configuration conf, MetricsTable metricsTable) { @@ -70,6 +71,7 @@ public MetricsRegionServer(MetricsRegionServerWrapper regionServerWrapper, Confi // create and use metrics from the new hbase-metrics based registry. bulkLoadTimer = metricRegistry.timer("Bulkload"); + slowMetricTime = conf.getLong(SLOW_METRIC_TIME, DEFAULT_SLOW_METRIC_TIME); quotaSource = CompatibilitySingletonFactory.getInstance(MetricsRegionServerQuotaSource.class); serverReadQueryMeter = metricRegistry.meter("ServerReadQueryPerSecond"); serverWriteQueryMeter = metricRegistry.meter("ServerWriteQueryPerSecond"); @@ -95,12 +97,10 @@ static RegionServerTableMetrics createTableMetrics(Configuration conf) { return null; } - @VisibleForTesting public MetricsRegionServerSource getMetricsSource() { return serverSource; } - @VisibleForTesting public MetricsUserAggregate getMetricsUserAggregate() { return userAggregate; } @@ -113,9 +113,6 @@ public void updatePutBatch(TableName tn, long t) { if (tableMetrics != null && tn != null) { tableMetrics.updatePutBatch(tn, t); } - if (t > 1000) { - serverSource.incrSlowPut(); - } serverSource.updatePutBatch(t); } @@ -123,6 +120,9 @@ public void updatePut(TableName tn, long t) { if (tableMetrics != null && tn != null) { tableMetrics.updatePut(tn, t); } + if (t > slowMetricTime) { + serverSource.incrSlowPut(); + } serverSource.updatePut(t); userAggregate.updatePut(t); } @@ -131,6 +131,9 @@ public void updateDelete(TableName tn, long t) { if (tableMetrics != null && tn != null) { tableMetrics.updateDelete(tn, t); } + if (t > slowMetricTime) { + serverSource.incrSlowDelete(); + } serverSource.updateDelete(t); userAggregate.updateDelete(t); } @@ -139,21 +142,27 @@ public void updateDeleteBatch(TableName tn, long t) { if (tableMetrics != null && tn != null) { tableMetrics.updateDeleteBatch(tn, t); } - if (t > 1000) { - serverSource.incrSlowDelete(); - } serverSource.updateDeleteBatch(t); } - public void updateCheckAndDelete(long t) { + public void updateCheckAndDelete(TableName tn, long t) { + if (tableMetrics != null && tn != null) { + tableMetrics.updateCheckAndDelete(tn, t); + } serverSource.updateCheckAndDelete(t); } - public void updateCheckAndPut(long t) { + public void updateCheckAndPut(TableName tn, long t) { + if (tableMetrics != null && tn != null) { + tableMetrics.updateCheckAndPut(tn, t); + } serverSource.updateCheckAndPut(t); } - public void updateCheckAndMutate(long t) { + public void updateCheckAndMutate(TableName tn, long t) { + if (tableMetrics != null && tn != null) { + tableMetrics.updateCheckAndMutate(tn, t); + } serverSource.updateCheckAndMutate(t); } @@ -161,7 +170,7 @@ public void updateGet(TableName tn, long t) { if (tableMetrics != null && tn != null) { tableMetrics.updateGet(tn, t); } - if (t > 1000) { + if (t > slowMetricTime) { serverSource.incrSlowGet(); } serverSource.updateGet(t); @@ -172,7 +181,7 @@ public void updateIncrement(TableName tn, long t) { if (tableMetrics != null && tn != null) { tableMetrics.updateIncrement(tn, t); } - if (t > 1000) { + if (t > slowMetricTime) { serverSource.incrSlowIncrement(); } serverSource.updateIncrement(t); @@ -183,7 +192,7 @@ public void updateAppend(TableName tn, long t) { if (tableMetrics != null && tn != null) { tableMetrics.updateAppend(tn, t); } - if (t > 1000) { + if (t > slowMetricTime) { serverSource.incrSlowAppend(); } serverSource.updateAppend(t); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java index c4328c410da4..5a358bc0d8ec 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java @@ -498,6 +498,11 @@ public long getRpcScanRequestsCount() { return regionServer.rpcServices.rpcScanRequestCount.sum(); } + @Override + public long getRpcFullScanRequestsCount() { + return regionServer.rpcServices.rpcFullScanRequestCount.sum(); + } + @Override public long getRpcMultiRequestsCount() { return regionServer.rpcServices.rpcMultiRequestCount.sum(); @@ -856,7 +861,7 @@ synchronized public void run() { numWALFiles = (provider == null ? 0 : provider.getNumLogFiles()) + (metaProvider == null ? 0 : metaProvider.getNumLogFiles()); walFileSize = (provider == null ? 0 : provider.getLogFileSize()) + - (provider == null ? 0 : provider.getLogFileSize()); + (metaProvider == null ? 0 : metaProvider.getLogFileSize()); // Copy over computed values so that no thread sees half computed values. numStores = tempNumStores; numStoreFiles = tempNumStoreFiles; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperAggregateImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperAggregateImpl.java index 7b5c6ef9701d..77130b8da4cc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperAggregateImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperAggregateImpl.java @@ -94,7 +94,7 @@ public void run() { (long) store.getAvgStoreFileAge().getAsDouble() * store.getStorefilesCount(); } mt.storeCount += 1; - tempKey = tbl.getNameAsString() + UNDERSCORE + familyName; + tempKey = tbl.getNameAsString() + HASH + familyName; Long tempVal = mt.perStoreMemstoreOnlyReadCount.get(tempKey); if (tempVal == null) { tempVal = 0L; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MiniBatchOperationInProgress.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MiniBatchOperationInProgress.java index ae5b6ec3c8ea..ac6b5dc382e3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MiniBatchOperationInProgress.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MiniBatchOperationInProgress.java @@ -124,6 +124,7 @@ private int getAbsoluteIndex(int index) { * in the same batch. These mutations are applied to the WAL and applied to the memstore as well. * The timestamp of the cells in the given Mutations MUST be obtained from the original mutation. * Note: The durability from CP will be replaced by the durability of corresponding mutation. + * Note: Currently only supports Put and Delete operations. * @param index the index that corresponds to the original mutation index in the batch * @param newOperations the Mutations to add */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MultiVersionConcurrencyControl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MultiVersionConcurrencyControl.java index 2b350e686f9a..d821eecf6f3e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MultiVersionConcurrencyControl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MultiVersionConcurrencyControl.java @@ -18,19 +18,16 @@ */ package org.apache.hadoop.hbase.regionserver; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; -import org.apache.hbase.thirdparty.com.google.common.base.MoreObjects; -import org.apache.hbase.thirdparty.com.google.common.base.MoreObjects.ToStringHelper; - import java.util.LinkedList; import java.util.concurrent.atomic.AtomicLong; - +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.ClassSize; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.ClassSize; +import org.apache.hbase.thirdparty.com.google.common.base.MoreObjects; +import org.apache.hbase.thirdparty.com.google.common.base.MoreObjects.ToStringHelper; /** * Manages the read/write consistency. This provides an interface for readers to determine what @@ -251,7 +248,6 @@ void waitForRead(WriteEntry e) { } } - @VisibleForTesting @Override public String toString() { ToStringHelper helper = MoreObjects.toStringHelper(this).add("readPoint", readPoint) @@ -266,7 +262,6 @@ public long getReadPoint() { return readPoint.get(); } - @VisibleForTesting public long getWritePoint() { return writePoint.get(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MutableSegment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MutableSegment.java index 2c1e859fbd4b..6e813433fac6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MutableSegment.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MutableSegment.java @@ -28,10 +28,8 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.KeyValue; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.util.ClassSize; - -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.yetus.audience.InterfaceAudience; /** * A mutable segment in memstore, specifically the active segment. @@ -124,7 +122,6 @@ public boolean setInMemoryFlushed() { * Returns the first cell in the segment * @return the first cell in the segment */ - @VisibleForTesting Cell first() { return this.getCellSet().first(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java index be64966570f1..587919dac6d6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java @@ -107,8 +107,8 @@ import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.MasterRpcServices; import org.apache.hadoop.hbase.namequeues.NamedQueuePayload; -import org.apache.hadoop.hbase.namequeues.RpcLogDetails; import org.apache.hadoop.hbase.namequeues.NamedQueueRecorder; +import org.apache.hadoop.hbase.namequeues.RpcLogDetails; import org.apache.hadoop.hbase.namequeues.request.NamedQueueGetRequest; import org.apache.hadoop.hbase.namequeues.response.NamedQueueGetResponse; import org.apache.hadoop.hbase.net.Address; @@ -120,7 +120,6 @@ import org.apache.hadoop.hbase.quotas.RegionServerSpaceQuotaManager; import org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshot; import org.apache.hadoop.hbase.quotas.SpaceViolationPolicyEnforcement; -import org.apache.hadoop.hbase.regionserver.HRegion.RegionScannerImpl; import org.apache.hadoop.hbase.regionserver.LeaseManager.Lease; import org.apache.hadoop.hbase.regionserver.LeaseManager.LeaseStillHeldException; import org.apache.hadoop.hbase.regionserver.Region.Operation; @@ -140,7 +139,6 @@ import org.apache.hadoop.hbase.security.access.NoopAccessChecker; import org.apache.hadoop.hbase.security.access.Permission; import org.apache.hadoop.hbase.security.access.ZKPermissionWatcher; -import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.DNS; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; @@ -157,7 +155,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.com.google.common.cache.Cache; import org.apache.hbase.thirdparty.com.google.common.cache.CacheBuilder; import org.apache.hbase.thirdparty.com.google.common.collect.Lists; @@ -248,6 +245,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad; +import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameBytesPair; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameInt64Pair; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier; @@ -271,7 +269,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, AdminService.BlockingInterface, ClientService.BlockingInterface, PriorityFunction, ConfigurationObserver { - protected static final Logger LOG = LoggerFactory.getLogger(RSRpcServices.class); + private static final Logger LOG = LoggerFactory.getLogger(RSRpcServices.class); /** RPC scheduler to use for the region server. */ public static final String REGION_SERVER_RPC_SCHEDULER_FACTORY_CLASS = @@ -314,6 +312,9 @@ public class RSRpcServices implements HBaseRPCErrorHandler, // Request counter for rpc scan final LongAdder rpcScanRequestCount = new LongAdder(); + + // Request counter for scans that might end up in full scans + final LongAdder rpcFullScanRequestCount = new LongAdder(); // Request counter for rpc multi final LongAdder rpcMultiRequestCount = new LongAdder(); @@ -325,7 +326,6 @@ public class RSRpcServices implements HBaseRPCErrorHandler, final RpcServerInterface rpcServer; final InetSocketAddress isa; - @VisibleForTesting protected final HRegionServer regionServer; private final long maxScannerResultSize; @@ -459,15 +459,18 @@ private static final class RegionScannerHolder { private final RpcCallback shippedCallback; private byte[] rowOfLastPartialResult; private boolean needCursor; + private boolean fullRegionScan; public RegionScannerHolder(String scannerName, RegionScanner s, HRegion r, - RpcCallback closeCallBack, RpcCallback shippedCallback, boolean needCursor) { + RpcCallback closeCallBack, RpcCallback shippedCallback, boolean needCursor, + boolean fullRegionScan) { this.scannerName = scannerName; this.s = s; this.r = r; this.closeCallBack = closeCallBack; this.shippedCallback = shippedCallback; this.needCursor = needCursor; + this.fullRegionScan = fullRegionScan; } public long getNextCallSeq() { @@ -620,8 +623,22 @@ private CheckAndMutateResult checkAndMutate(HRegion region, List Row.COMPARATOR.compare(v1, v2)); } - OperationStatus[] codes = region.batchMutate(mArray, atomic, HConstants.NO_NONCE, - HConstants.NO_NONCE); + OperationStatus[] codes = region.batchMutate(mArray, atomic); + + // When atomic is true, it indicates that the mutateRow API or the batch API with + // RowMutations is called. In this case, we need to merge the results of the + // Increment/Append operations if the mutations include those operations, and set the merged + // result to the first element of the ResultOrException list + if (atomic) { + List resultOrExceptions = new ArrayList<>(); + List results = new ArrayList<>(); + for (i = 0; i < codes.length; i++) { + if (codes[i].getResult() != null) { + results.add(codes[i].getResult()); + } + if (i != 0) { + resultOrExceptions.add(getResultOrException( + ClientProtos.Result.getDefaultInstance(), i)); + } + } + + if (results.isEmpty()) { + builder.addResultOrException(getResultOrException( + ClientProtos.Result.getDefaultInstance(), 0)); + } else { + // Merge the results of the Increment/Append operations + List cellList = new ArrayList<>(); + for (Result result : results) { + if (result.rawCells() != null) { + cellList.addAll(Arrays.asList(result.rawCells())); + } + } + Result result = Result.create(cellList); + + // Set the merged result of the Increment/Append operations to the first element of the + // ResultOrException list + builder.addResultOrException(getResultOrException(ProtobufUtil.toResult(result), 0)); + } + + builder.addAllResultOrException(resultOrExceptions); + return; + } + for (i = 0; i < codes.length; i++) { Mutation currentMutation = mArray[i]; ClientProtos.Action currentAction = mutationActionMap.get(currentMutation); - int index = currentAction.hasIndex() || !atomic ? currentAction.getIndex() : i; - Exception e = null; + int index = currentAction.hasIndex() ? currentAction.getIndex() : i; + Exception e; switch (codes[i].getOperationStatusCode()) { case BAD_FAMILY: e = new NoSuchColumnFamilyException(codes[i].getExceptionMsg()); @@ -1112,34 +1183,9 @@ private void closeAllScanners() { } } - // Exposed for testing - interface LogDelegate { - void logBatchWarning(String firstRegionName, int sum, int rowSizeWarnThreshold); - } - - private static LogDelegate DEFAULT_LOG_DELEGATE = new LogDelegate() { - @Override - public void logBatchWarning(String firstRegionName, int sum, int rowSizeWarnThreshold) { - if (LOG.isWarnEnabled()) { - LOG.warn("Large batch operation detected (greater than " + rowSizeWarnThreshold - + ") (HBASE-18023)." + " Requested Number of Rows: " + sum + " Client: " - + RpcServer.getRequestUserName().orElse(null) + "/" - + RpcServer.getRemoteAddress().orElse(null) - + " first region in multi=" + firstRegionName); - } - } - }; - - private final LogDelegate ld; - - public RSRpcServices(final HRegionServer rs) throws IOException { - this(rs, DEFAULT_LOG_DELEGATE); - } - // Directly invoked only for testing - RSRpcServices(final HRegionServer rs, final LogDelegate ld) throws IOException { + public RSRpcServices(final HRegionServer rs) throws IOException { final Configuration conf = rs.getConfiguration(); - this.ld = ld; regionServer = rs; rowSizeWarnThreshold = conf.getInt( HConstants.BATCH_ROWS_THRESHOLD_NAME, HConstants.BATCH_ROWS_THRESHOLD_DEFAULT); @@ -1255,7 +1301,6 @@ protected void requirePermission(String request, Permission.Action perm) throws } } - @VisibleForTesting public int getScannersCount() { return scanners.size(); } @@ -1351,7 +1396,7 @@ Object addSize(RpcCallContext context, Result r, Object lastBlock) { } private RegionScannerHolder addScanner(String scannerName, RegionScanner s, Shipper shipper, - HRegion r, boolean needCursor) throws LeaseStillHeldException { + HRegion r, boolean needCursor, boolean fullRegionScan) throws LeaseStillHeldException { Lease lease = regionServer.getLeaseManager().createLease( scannerName, this.scannerLeaseTimeoutPeriod, new ScannerListener(scannerName)); RpcCallback shippedCallback = new RegionScannerShippedCallBack(scannerName, shipper, lease); @@ -1361,14 +1406,30 @@ private RegionScannerHolder addScanner(String scannerName, RegionScanner s, Ship } else { closeCallback = new RegionScannerCloseCallBack(s); } + RegionScannerHolder rsh = - new RegionScannerHolder(scannerName, s, r, closeCallback, shippedCallback, needCursor); + new RegionScannerHolder(scannerName, s, r, closeCallback, shippedCallback, + needCursor, fullRegionScan); RegionScannerHolder existing = scanners.putIfAbsent(scannerName, rsh); assert existing == null : "scannerId must be unique within regionserver's whole lifecycle! " + scannerName; return rsh; } + private boolean isFullRegionScan(Scan scan, HRegion region) { + // If the scan start row equals or less than the start key of the region + // and stop row greater than equals end key (if stop row present) + // or if the stop row is empty + // account this as a full region scan + if (Bytes.compareTo(scan.getStartRow(), region.getRegionInfo().getStartKey()) <= 0 + && (Bytes.compareTo(scan.getStopRow(), region.getRegionInfo().getEndKey()) >= 0 && + !Bytes.equals(region.getRegionInfo().getEndKey(), HConstants.EMPTY_END_ROW) + || Bytes.equals(scan.getStopRow(), HConstants.EMPTY_END_ROW))) { + return true; + } + return false; + } + /** * Find the HRegion based on a region specifier * @@ -1377,7 +1438,6 @@ private RegionScannerHolder addScanner(String scannerName, RegionScanner s, Ship * @throws IOException if the specifier is not null, * but failed to find the region */ - @VisibleForTesting public HRegion getRegion( final RegionSpecifier regionSpecifier) throws IOException { return regionServer.getRegion(regionSpecifier.getValue().toByteArray()); @@ -1404,12 +1464,10 @@ private List getRegions(final List regionSpecifiers, return regions; } - @VisibleForTesting public PriorityFunction getPriority() { return priority; } - @VisibleForTesting public Configuration getConfiguration() { return regionServer.getConfiguration(); } @@ -2069,48 +2127,34 @@ public OpenRegionResponse openRegion(final RpcController controller, } /** - * Wamrmup a region on this server. - * - * This method should only be called by Master. It synchrnously opens the region and + * Warmup a region on this server. + * This method should only be called by Master. It synchronously opens the region and * closes the region bringing the most important pages in cache. - *

    - * - * @param controller the RPC controller - * @param request the request - * @throws ServiceException */ @Override public WarmupRegionResponse warmupRegion(final RpcController controller, final WarmupRegionRequest request) throws ServiceException { - final RegionInfo region = ProtobufUtil.toRegionInfo(request.getRegionInfo()); - TableDescriptor htd; WarmupRegionResponse response = WarmupRegionResponse.getDefaultInstance(); - try { checkOpen(); String encodedName = region.getEncodedName(); byte[] encodedNameBytes = region.getEncodedNameAsBytes(); final HRegion onlineRegion = regionServer.getRegion(encodedName); - if (onlineRegion != null) { - LOG.info("Region already online. Skipping warming up " + region); + LOG.info("{} is online; skipping warmup", region); return response; } - - htd = regionServer.tableDescriptors.get(region.getTable()); - + TableDescriptor htd = regionServer.tableDescriptors.get(region.getTable()); if (regionServer.getRegionsInTransitionInRS().containsKey(encodedNameBytes)) { - LOG.info("Region is in transition. Skipping warmup " + region); + LOG.info("{} is in transition; skipping warmup", region); return response; } - - LOG.info("Warming up region " + region.getRegionNameAsString()); + LOG.info("Warmup {}", region.getRegionNameAsString()); HRegion.warmupHRegion(region, htd, regionServer.getWAL(region), regionServer.getConfiguration(), regionServer, null); - } catch (IOException ie) { - LOG.error("Failed warming up region " + region.getRegionNameAsString(), ie); + LOG.error("Failed warmup of {}", region.getRegionNameAsString(), ie); throw new ServiceException(ie); } @@ -2299,6 +2343,7 @@ public RollWALWriterResponse rollWALWriter(final RpcController controller, @QosPriority(priority=HConstants.ADMIN_QOS) public StopServerResponse stopServer(final RpcController controller, final StopServerRequest request) throws ServiceException { + rpcPreCheck("stopServer"); requestCount.increment(); String reason = request.getReason(); regionServer.stop(reason); @@ -2308,6 +2353,7 @@ public StopServerResponse stopServer(final RpcController controller, @Override public UpdateFavoredNodesResponse updateFavoredNodes(RpcController controller, UpdateFavoredNodesRequest request) throws ServiceException { + rpcPreCheck("updateFavoredNodes"); List openInfoList = request.getUpdateInfoList(); UpdateFavoredNodesResponse.Builder respBuilder = UpdateFavoredNodesResponse.newBuilder(); for (UpdateFavoredNodesRequest.RegionUpdateInfo regionUpdateInfo : openInfoList) { @@ -2627,12 +2673,15 @@ private void checkBatchSizeAndLogLargeSize(MultiRequest request) throws ServiceE sum += regionAction.getActionCount(); } if (sum > rowSizeWarnThreshold) { - ld.logBatchWarning(firstRegionName, sum, rowSizeWarnThreshold); + LOG.warn("Large batch operation detected (greater than " + rowSizeWarnThreshold + + ") (HBASE-18023)." + " Requested Number of Rows: " + sum + " Client: " + + RpcServer.getRequestUserName().orElse(null) + "/" + + RpcServer.getRemoteAddress().orElse(null) + " first region in multi=" + firstRegionName); if (rejectRowsWithSizeOverThreshold) { throw new ServiceException( - "Rejecting large batch operation for current batch with firstRegionName: " - + firstRegionName + " , Requested Number of Rows: " + sum + " , Size Threshold: " - + rowSizeWarnThreshold); + "Rejecting large batch operation for current batch with firstRegionName: " + + firstRegionName + " , Requested Number of Rows: " + sum + " , Size Threshold: " + + rowSizeWarnThreshold); } } } @@ -2821,6 +2870,8 @@ cellScanner, new DoNotRetryIOException(region.getRegionInfo() regionActionResultBuilder.setProcessed(result.isSuccess()); for (int i = 0; i < regionAction.getActionCount(); i++) { if (i == 0 && result.getResult() != null) { + // Set the result of the Increment/Append operations to the first element of the + // ResultOrException list resultOrExceptionOrBuilder.setIndex(i); regionActionResultBuilder.addResultOrException(resultOrExceptionOrBuilder .setResult(ProtobufUtil.toResult(result.getResult())).build()); @@ -2851,7 +2902,7 @@ cellScanner, new DoNotRetryIOException(region.getRegionInfo() cellScanner, spaceQuotaEnforcement); regionActionResultBuilder.setProcessed(true); // We no longer use MultiResponse#processed. Instead, we use - // RegionActionResult#condition. This is for backward compatibility for old clients. + // RegionActionResult#processed. This is for backward compatibility for old clients. responseBuilder.setProcessed(true); } catch (IOException e) { rpcServer.getMetrics().exception(e); @@ -3076,15 +3127,18 @@ private CheckAndMutateResult checkAndMutate(HRegion region, OperationQuota quota MetricsRegionServer metricsRegionServer = regionServer.getMetrics(); if (metricsRegionServer != null) { long after = EnvironmentEdgeManager.currentTime(); - metricsRegionServer.updateCheckAndMutate(after - before); + metricsRegionServer.updateCheckAndMutate( + region.getRegionInfo().getTable(), after - before); MutationType type = mutation.getMutateType(); switch (type) { case PUT: - metricsRegionServer.updateCheckAndPut(after - before); + metricsRegionServer.updateCheckAndPut( + region.getRegionInfo().getTable(), after - before); break; case DELETE: - metricsRegionServer.updateCheckAndDelete(after - before); + metricsRegionServer.updateCheckAndDelete( + region.getRegionInfo().getTable(), after - before); break; default: break; @@ -3183,7 +3237,12 @@ private RegionScannerHolder newRegionScanner(ScanRequest request, ScanResponse.B builder.setMvccReadPoint(scanner.getMvccReadPoint()); builder.setTtl(scannerLeaseTimeoutPeriod); String scannerName = String.valueOf(scannerId); - return addScanner(scannerName, scanner, shipper, region, scan.isNeedCursorResult()); + + boolean fullRegionScan = !region.getRegionInfo().getTable().isSystemTable() && + isFullRegionScan(scan, region); + + return addScanner(scannerName, scanner, shipper, region, scan.isNeedCursorResult(), + fullRegionScan); } private void checkScanNextCallSeq(ScanRequest request, RegionScannerHolder rsh) @@ -3497,6 +3556,9 @@ public ScanResponse scan(final RpcController controller, final ScanRequest reque } throw new ServiceException(e); } + if (rsh.fullRegionScan) { + rpcFullScanRequestCount.increment(); + } HRegion region = rsh.r; String scannerName = rsh.scannerName; LeaseManager.Lease lease; @@ -3712,6 +3774,7 @@ public UpdateConfigurationResponse updateConfiguration( RpcController controller, UpdateConfigurationRequest request) throws ServiceException { try { + requirePermission("updateConfiguration", Permission.Action.ADMIN); this.regionServer.updateConfiguration(); } catch (Exception e) { throw new ServiceException(e); @@ -3744,7 +3807,8 @@ public GetSpaceQuotaSnapshotsResponse getSpaceQuotaSnapshots( @Override public ClearRegionBlockCacheResponse clearRegionBlockCache(RpcController controller, - ClearRegionBlockCacheRequest request) { + ClearRegionBlockCacheRequest request) throws ServiceException { + rpcPreCheck("clearRegionBlockCache"); ClearRegionBlockCacheResponse.Builder builder = ClearRegionBlockCacheResponse.newBuilder(); CacheEvictionStatsBuilder stats = CacheEvictionStats.builder(); @@ -3851,19 +3915,6 @@ public ExecuteProceduresResponse executeProcedures(RpcController controller, } } - @Override - @QosPriority(priority = HConstants.ADMIN_QOS) - public SlowLogResponses getSlowLogResponses(final RpcController controller, - final SlowLogResponseRequest request) { - final NamedQueueRecorder namedQueueRecorder = - this.regionServer.getNamedQueueRecorder(); - final List slowLogPayloads = getSlowLogPayloads(request, namedQueueRecorder); - SlowLogResponses slowLogResponses = SlowLogResponses.newBuilder() - .addAllSlowLogPayloads(slowLogPayloads) - .build(); - return slowLogResponses; - } - private List getSlowLogPayloads(SlowLogResponseRequest request, NamedQueueRecorder namedQueueRecorder) { if (namedQueueRecorder == null) { @@ -3881,23 +3932,11 @@ private List getSlowLogPayloads(SlowLogResponseRequest request, return slowLogPayloads; } - @Override - @QosPriority(priority = HConstants.ADMIN_QOS) - public SlowLogResponses getLargeLogResponses(final RpcController controller, - final SlowLogResponseRequest request) { - final NamedQueueRecorder namedQueueRecorder = - this.regionServer.getNamedQueueRecorder(); - final List slowLogPayloads = getSlowLogPayloads(request, namedQueueRecorder); - SlowLogResponses slowLogResponses = SlowLogResponses.newBuilder() - .addAllSlowLogPayloads(slowLogPayloads) - .build(); - return slowLogResponses; - } - @Override @QosPriority(priority = HConstants.ADMIN_QOS) public ClearSlowLogResponses clearSlowLogsResponses(final RpcController controller, - final ClearSlowLogResponseRequest request) { + final ClearSlowLogResponseRequest request) throws ServiceException { + rpcPreCheck("clearSlowLogsResponses"); final NamedQueueRecorder namedQueueRecorder = this.regionServer.getNamedQueueRecorder(); boolean slowLogsCleaned = Optional.ofNullable(namedQueueRecorder) @@ -3911,6 +3950,7 @@ public ClearSlowLogResponses clearSlowLogsResponses(final RpcController controll } @Override + @QosPriority(priority = HConstants.ADMIN_QOS) public HBaseProtos.LogEntry getLogEntries(RpcController controller, HBaseProtos.LogRequest request) throws ServiceException { try { @@ -3940,7 +3980,6 @@ public HBaseProtos.LogEntry getLogEntries(RpcController controller, throw new ServiceException("Invalid request params"); } - @VisibleForTesting public RpcScheduler getRpcScheduler() { return rpcServer.getScheduler(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java index 79df0013e087..1457cda1fcae 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java @@ -199,7 +199,8 @@ public interface Region extends ConfigurationObserver { */ enum Operation { ANY, GET, PUT, DELETE, SCAN, APPEND, INCREMENT, SPLIT_REGION, MERGE_REGION, BATCH_MUTATE, - REPLAY_BATCH_MUTATE, COMPACT_REGION, REPLAY_EVENT, SNAPSHOT, COMPACT_SWITCH + REPLAY_BATCH_MUTATE, COMPACT_REGION, REPLAY_EVENT, SNAPSHOT, COMPACT_SWITCH, + CHECK_AND_MUTATE } /** @@ -292,8 +293,9 @@ public interface RowLock { /** * Perform a batch of mutations. *

    - * Note this supports only Put, Delete, Increment and Append mutations and will ignore other - * types passed. + * Please do not operate on a same column of a single row in a batch, we will not consider the + * previous operation in the same batch when performing the operations in the batch. + * * @param mutations the list of mutations * @return an array of OperationStatus which internally contains the * OperationStatusCode and the exceptionMessage if any. @@ -530,13 +532,14 @@ boolean checkAndRowMutate(byte [] row, Filter filter, TimeRange timeRange, Result increment(Increment increment) throws IOException; /** - * Performs multiple mutations atomically on a single row. Currently - * {@link Put} and {@link Delete} are supported. + * Performs multiple mutations atomically on a single row. * * @param mutations object that specifies the set of mutations to perform atomically + * @return results of Increment/Append operations. If no Increment/Append operations, it returns + * null * @throws IOException */ - void mutateRow(RowMutations mutations) throws IOException; + Result mutateRow(RowMutations mutations) throws IOException; /** * Perform atomic mutations within the region. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java index 5ebf7e1c1590..7ed23f695ecd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java @@ -79,7 +79,6 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; - import org.apache.hbase.thirdparty.com.google.protobuf.Message; import org.apache.hbase.thirdparty.com.google.protobuf.Service; import org.apache.hbase.thirdparty.org.apache.commons.collections4.map.AbstractReferenceMap; @@ -102,6 +101,13 @@ public class RegionCoprocessorHost // optimization: no need to call postScannerFilterRow, if no coprocessor implements it private final boolean hasCustomPostScannerFilterRow; + /* + * Whether any configured CPs override postScannerFilterRow hook + */ + public boolean hasCustomPostScannerFilterRow() { + return hasCustomPostScannerFilterRow; + } + /** * * Encapsulation of the environment of each coprocessor @@ -275,11 +281,10 @@ public RegionCoprocessorHost(final HRegion region, out: for (RegionCoprocessorEnvironment env: coprocEnvironments) { if (env.getInstance() instanceof RegionObserver) { Class clazz = env.getInstance().getClass(); - for(;;) { - if (clazz == null) { - // we must have directly implemented RegionObserver - hasCustomPostScannerFilterRow = true; - break out; + for (;;) { + if (clazz == Object.class) { + // we dont need to look postScannerFilterRow into Object class + break; // break the inner loop } try { clazz.getDeclaredMethod("postScannerFilterRow", ObserverContext.class, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionScannerImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionScannerImpl.java new file mode 100644 index 000000000000..5d81687cbf45 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionScannerImpl.java @@ -0,0 +1,782 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import java.io.IOException; +import java.util.AbstractList; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.NavigableSet; +import java.util.Optional; +import java.util.concurrent.ConcurrentHashMap; +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.PrivateCellUtil; +import org.apache.hadoop.hbase.UnknownScannerException; +import org.apache.hadoop.hbase.client.IsolationLevel; +import org.apache.hadoop.hbase.client.PackagePrivateFieldAccessor; +import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.filter.FilterWrapper; +import org.apache.hadoop.hbase.filter.IncompatibleFilterException; +import org.apache.hadoop.hbase.ipc.CallerDisconnectedException; +import org.apache.hadoop.hbase.ipc.RpcCall; +import org.apache.hadoop.hbase.ipc.RpcCallback; +import org.apache.hadoop.hbase.ipc.RpcServer; +import org.apache.hadoop.hbase.regionserver.Region.Operation; +import org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope; +import org.apache.hadoop.hbase.regionserver.ScannerContext.NextState; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; + +/** + * RegionScannerImpl is used to combine scanners from multiple Stores (aka column families). + */ +@InterfaceAudience.Private +class RegionScannerImpl implements RegionScanner, Shipper, RpcCallback { + + private static final Logger LOG = LoggerFactory.getLogger(RegionScannerImpl.class); + + // Package local for testability + KeyValueHeap storeHeap = null; + + /** + * Heap of key-values that are not essential for the provided filters and are thus read on demand, + * if on-demand column family loading is enabled. + */ + KeyValueHeap joinedHeap = null; + + /** + * If the joined heap data gathering is interrupted due to scan limits, this will contain the row + * for which we are populating the values. + */ + protected Cell joinedContinuationRow = null; + private boolean filterClosed = false; + + protected final byte[] stopRow; + protected final boolean includeStopRow; + protected final HRegion region; + protected final CellComparator comparator; + + private final ConcurrentHashMap scannerReadPoints; + + private final long readPt; + private final long maxResultSize; + private final ScannerContext defaultScannerContext; + private final FilterWrapper filter; + + private RegionServerServices rsServices; + + @Override + public RegionInfo getRegionInfo() { + return region.getRegionInfo(); + } + + private static boolean hasNonce(HRegion region, long nonce) { + RegionServerServices rsServices = region.getRegionServerServices(); + return nonce != HConstants.NO_NONCE && rsServices != null && + rsServices.getNonceManager() != null; + } + + RegionScannerImpl(Scan scan, List additionalScanners, HRegion region, + long nonceGroup, long nonce) throws IOException { + this.region = region; + this.maxResultSize = scan.getMaxResultSize(); + if (scan.hasFilter()) { + this.filter = new FilterWrapper(scan.getFilter()); + } else { + this.filter = null; + } + this.comparator = region.getCellComparator(); + /** + * By default, calls to next/nextRaw must enforce the batch limit. Thus, construct a default + * scanner context that can be used to enforce the batch limit in the event that a + * ScannerContext is not specified during an invocation of next/nextRaw + */ + defaultScannerContext = ScannerContext.newBuilder().setBatchLimit(scan.getBatch()).build(); + this.stopRow = scan.getStopRow(); + this.includeStopRow = scan.includeStopRow(); + + // synchronize on scannerReadPoints so that nobody calculates + // getSmallestReadPoint, before scannerReadPoints is updated. + IsolationLevel isolationLevel = scan.getIsolationLevel(); + long mvccReadPoint = PackagePrivateFieldAccessor.getMvccReadPoint(scan); + this.scannerReadPoints = region.scannerReadPoints; + this.rsServices = region.getRegionServerServices(); + synchronized (scannerReadPoints) { + if (mvccReadPoint > 0) { + this.readPt = mvccReadPoint; + } else if (hasNonce(region, nonce)) { + this.readPt = rsServices.getNonceManager().getMvccFromOperationContext(nonceGroup, nonce); + } else { + this.readPt = region.getReadPoint(isolationLevel); + } + scannerReadPoints.put(this, this.readPt); + } + initializeScanners(scan, additionalScanners); + } + + private void initializeScanners(Scan scan, List additionalScanners) + throws IOException { + // Here we separate all scanners into two lists - scanner that provide data required + // by the filter to operate (scanners list) and all others (joinedScanners list). + List scanners = new ArrayList<>(scan.getFamilyMap().size()); + List joinedScanners = new ArrayList<>(scan.getFamilyMap().size()); + // Store all already instantiated scanners for exception handling + List instantiatedScanners = new ArrayList<>(); + // handle additionalScanners + if (additionalScanners != null && !additionalScanners.isEmpty()) { + scanners.addAll(additionalScanners); + instantiatedScanners.addAll(additionalScanners); + } + + try { + for (Map.Entry> entry : scan.getFamilyMap().entrySet()) { + HStore store = region.getStore(entry.getKey()); + KeyValueScanner scanner = store.getScanner(scan, entry.getValue(), this.readPt); + instantiatedScanners.add(scanner); + if (this.filter == null || !scan.doLoadColumnFamiliesOnDemand() || + this.filter.isFamilyEssential(entry.getKey())) { + scanners.add(scanner); + } else { + joinedScanners.add(scanner); + } + } + initializeKVHeap(scanners, joinedScanners, region); + } catch (Throwable t) { + throw handleException(instantiatedScanners, t); + } + } + + protected void initializeKVHeap(List scanners, + List joinedScanners, HRegion region) throws IOException { + this.storeHeap = new KeyValueHeap(scanners, comparator); + if (!joinedScanners.isEmpty()) { + this.joinedHeap = new KeyValueHeap(joinedScanners, comparator); + } + } + + private IOException handleException(List instantiatedScanners, Throwable t) { + // remove scaner read point before throw the exception + scannerReadPoints.remove(this); + if (storeHeap != null) { + storeHeap.close(); + storeHeap = null; + if (joinedHeap != null) { + joinedHeap.close(); + joinedHeap = null; + } + } else { + // close all already instantiated scanners before throwing the exception + for (KeyValueScanner scanner : instantiatedScanners) { + scanner.close(); + } + } + return t instanceof IOException ? (IOException) t : new IOException(t); + } + + @Override + public long getMaxResultSize() { + return maxResultSize; + } + + @Override + public long getMvccReadPoint() { + return this.readPt; + } + + @Override + public int getBatch() { + return this.defaultScannerContext.getBatchLimit(); + } + + /** + * Reset both the filter and the old filter. + * @throws IOException in case a filter raises an I/O exception. + */ + protected final void resetFilters() throws IOException { + if (filter != null) { + filter.reset(); + } + } + + @Override + public boolean next(List outResults) throws IOException { + // apply the batching limit by default + return next(outResults, defaultScannerContext); + } + + @Override + public synchronized boolean next(List outResults, ScannerContext scannerContext) + throws IOException { + if (this.filterClosed) { + throw new UnknownScannerException("Scanner was closed (timed out?) " + + "after we renewed it. Could be caused by a very slow scanner " + + "or a lengthy garbage collection"); + } + region.startRegionOperation(Operation.SCAN); + try { + return nextRaw(outResults, scannerContext); + } finally { + region.closeRegionOperation(Operation.SCAN); + } + } + + @Override + public boolean nextRaw(List outResults) throws IOException { + // Use the RegionScanner's context by default + return nextRaw(outResults, defaultScannerContext); + } + + @Override + public boolean nextRaw(List outResults, ScannerContext scannerContext) throws IOException { + if (storeHeap == null) { + // scanner is closed + throw new UnknownScannerException("Scanner was closed"); + } + boolean moreValues = false; + if (outResults.isEmpty()) { + // Usually outResults is empty. This is true when next is called + // to handle scan or get operation. + moreValues = nextInternal(outResults, scannerContext); + } else { + List tmpList = new ArrayList<>(); + moreValues = nextInternal(tmpList, scannerContext); + outResults.addAll(tmpList); + } + + if (!outResults.isEmpty()) { + region.addReadRequestsCount(1); + if (region.getMetrics() != null) { + region.getMetrics().updateReadRequestCount(); + } + } + if (rsServices != null && rsServices.getMetrics() != null) { + rsServices.getMetrics().updateReadQueryMeter(getRegionInfo().getTable()); + } + + // If the size limit was reached it means a partial Result is being returned. Returning a + // partial Result means that we should not reset the filters; filters should only be reset in + // between rows + if (!scannerContext.mayHaveMoreCellsInRow()) { + resetFilters(); + } + + if (isFilterDoneInternal()) { + moreValues = false; + } + return moreValues; + } + + /** + * @return true if more cells exist after this batch, false if scanner is done + */ + private boolean populateFromJoinedHeap(List results, ScannerContext scannerContext) + throws IOException { + assert joinedContinuationRow != null; + boolean moreValues = + populateResult(results, this.joinedHeap, scannerContext, joinedContinuationRow); + + if (!scannerContext.checkAnyLimitReached(LimitScope.BETWEEN_CELLS)) { + // We are done with this row, reset the continuation. + joinedContinuationRow = null; + } + // As the data is obtained from two independent heaps, we need to + // ensure that result list is sorted, because Result relies on that. + results.sort(comparator); + return moreValues; + } + + /** + * Fetches records with currentRow into results list, until next row, batchLimit (if not -1) is + * reached, or remainingResultSize (if not -1) is reaced + * @param heap KeyValueHeap to fetch data from.It must be positioned on correct row before call. + * @return state of last call to {@link KeyValueHeap#next()} + */ + private boolean populateResult(List results, KeyValueHeap heap, + ScannerContext scannerContext, Cell currentRowCell) throws IOException { + Cell nextKv; + boolean moreCellsInRow = false; + boolean tmpKeepProgress = scannerContext.getKeepProgress(); + // Scanning between column families and thus the scope is between cells + LimitScope limitScope = LimitScope.BETWEEN_CELLS; + do { + // Check for thread interrupt status in case we have been signaled from + // #interruptRegionOperation. + region.checkInterrupt(); + + // We want to maintain any progress that is made towards the limits while scanning across + // different column families. To do this, we toggle the keep progress flag on during calls + // to the StoreScanner to ensure that any progress made thus far is not wiped away. + scannerContext.setKeepProgress(true); + heap.next(results, scannerContext); + scannerContext.setKeepProgress(tmpKeepProgress); + + nextKv = heap.peek(); + moreCellsInRow = moreCellsInRow(nextKv, currentRowCell); + if (!moreCellsInRow) { + incrementCountOfRowsScannedMetric(scannerContext); + } + if (moreCellsInRow && scannerContext.checkBatchLimit(limitScope)) { + return scannerContext.setScannerState(NextState.BATCH_LIMIT_REACHED).hasMoreValues(); + } else if (scannerContext.checkSizeLimit(limitScope)) { + ScannerContext.NextState state = + moreCellsInRow ? NextState.SIZE_LIMIT_REACHED_MID_ROW : NextState.SIZE_LIMIT_REACHED; + return scannerContext.setScannerState(state).hasMoreValues(); + } else if (scannerContext.checkTimeLimit(limitScope)) { + ScannerContext.NextState state = + moreCellsInRow ? NextState.TIME_LIMIT_REACHED_MID_ROW : NextState.TIME_LIMIT_REACHED; + return scannerContext.setScannerState(state).hasMoreValues(); + } + } while (moreCellsInRow); + return nextKv != null; + } + + /** + * Based on the nextKv in the heap, and the current row, decide whether or not there are more + * cells to be read in the heap. If the row of the nextKv in the heap matches the current row then + * there are more cells to be read in the row. + * @return true When there are more cells in the row to be read + */ + private boolean moreCellsInRow(final Cell nextKv, Cell currentRowCell) { + return nextKv != null && CellUtil.matchingRows(nextKv, currentRowCell); + } + + /** + * @return True if a filter rules the scanner is over, done. + */ + @Override + public synchronized boolean isFilterDone() throws IOException { + return isFilterDoneInternal(); + } + + private boolean isFilterDoneInternal() throws IOException { + return this.filter != null && this.filter.filterAllRemaining(); + } + + private void checkClientDisconnect(Optional rpcCall) throws CallerDisconnectedException { + if (rpcCall.isPresent()) { + // If a user specifies a too-restrictive or too-slow scanner, the + // client might time out and disconnect while the server side + // is still processing the request. We should abort aggressively + // in that case. + long afterTime = rpcCall.get().disconnectSince(); + if (afterTime >= 0) { + throw new CallerDisconnectedException( + "Aborting on region " + getRegionInfo().getRegionNameAsString() + ", call " + this + + " after " + afterTime + " ms, since " + "caller disconnected"); + } + } + } + + private void resetProgress(ScannerContext scannerContext, int initialBatchProgress, + long initialSizeProgress, long initialHeapSizeProgress) { + // Starting to scan a new row. Reset the scanner progress according to whether or not + // progress should be kept. + if (scannerContext.getKeepProgress()) { + // Progress should be kept. Reset to initial values seen at start of method invocation. + scannerContext.setProgress(initialBatchProgress, initialSizeProgress, + initialHeapSizeProgress); + } else { + scannerContext.clearProgress(); + } + } + + private boolean nextInternal(List results, ScannerContext scannerContext) + throws IOException { + Preconditions.checkArgument(results.isEmpty(), "First parameter should be an empty list"); + Preconditions.checkArgument(scannerContext != null, "Scanner context cannot be null"); + Optional rpcCall = RpcServer.getCurrentCall(); + + // Save the initial progress from the Scanner context in these local variables. The progress + // may need to be reset a few times if rows are being filtered out so we save the initial + // progress. + int initialBatchProgress = scannerContext.getBatchProgress(); + long initialSizeProgress = scannerContext.getDataSizeProgress(); + long initialHeapSizeProgress = scannerContext.getHeapSizeProgress(); + + // Used to check time limit + LimitScope limitScope = LimitScope.BETWEEN_CELLS; + + // The loop here is used only when at some point during the next we determine + // that due to effects of filters or otherwise, we have an empty row in the result. + // Then we loop and try again. Otherwise, we must get out on the first iteration via return, + // "true" if there's more data to read, "false" if there isn't (storeHeap is at a stop row, + // and joinedHeap has no more data to read for the last row (if set, joinedContinuationRow). + while (true) { + resetProgress(scannerContext, initialBatchProgress, initialSizeProgress, + initialHeapSizeProgress); + checkClientDisconnect(rpcCall); + + // Check for thread interrupt status in case we have been signaled from + // #interruptRegionOperation. + region.checkInterrupt(); + + // Let's see what we have in the storeHeap. + Cell current = this.storeHeap.peek(); + + boolean shouldStop = shouldStop(current); + // When has filter row is true it means that the all the cells for a particular row must be + // read before a filtering decision can be made. This means that filters where hasFilterRow + // run the risk of enLongAddering out of memory errors in the case that they are applied to a + // table that has very large rows. + boolean hasFilterRow = this.filter != null && this.filter.hasFilterRow(); + + // If filter#hasFilterRow is true, partial results are not allowed since allowing them + // would prevent the filters from being evaluated. Thus, if it is true, change the + // scope of any limits that could potentially create partial results to + // LimitScope.BETWEEN_ROWS so that those limits are not reached mid-row + if (hasFilterRow) { + if (LOG.isTraceEnabled()) { + LOG.trace("filter#hasFilterRow is true which prevents partial results from being " + + " formed. Changing scope of limits that may create partials"); + } + scannerContext.setSizeLimitScope(LimitScope.BETWEEN_ROWS); + scannerContext.setTimeLimitScope(LimitScope.BETWEEN_ROWS); + limitScope = LimitScope.BETWEEN_ROWS; + } + + if (scannerContext.checkTimeLimit(LimitScope.BETWEEN_CELLS)) { + if (hasFilterRow) { + throw new IncompatibleFilterException( + "Filter whose hasFilterRow() returns true is incompatible with scans that must " + + " stop mid-row because of a limit. ScannerContext:" + scannerContext); + } + return true; + } + + // Check if we were getting data from the joinedHeap and hit the limit. + // If not, then it's main path - getting results from storeHeap. + if (joinedContinuationRow == null) { + // First, check if we are at a stop row. If so, there are no more results. + if (shouldStop) { + if (hasFilterRow) { + filter.filterRowCells(results); + } + return scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues(); + } + + // Check if rowkey filter wants to exclude this row. If so, loop to next. + // Technically, if we hit limits before on this row, we don't need this call. + if (filterRowKey(current)) { + incrementCountOfRowsFilteredMetric(scannerContext); + // early check, see HBASE-16296 + if (isFilterDoneInternal()) { + return scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues(); + } + // Typically the count of rows scanned is incremented inside #populateResult. However, + // here we are filtering a row based purely on its row key, preventing us from calling + // #populateResult. Thus, perform the necessary increment here to rows scanned metric + incrementCountOfRowsScannedMetric(scannerContext); + boolean moreRows = nextRow(scannerContext, current); + if (!moreRows) { + return scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues(); + } + results.clear(); + + // Read nothing as the rowkey was filtered, but still need to check time limit + if (scannerContext.checkTimeLimit(limitScope)) { + return true; + } + continue; + } + + // Ok, we are good, let's try to get some results from the main heap. + populateResult(results, this.storeHeap, scannerContext, current); + if (scannerContext.checkAnyLimitReached(LimitScope.BETWEEN_CELLS)) { + if (hasFilterRow) { + throw new IncompatibleFilterException( + "Filter whose hasFilterRow() returns true is incompatible with scans that must " + + " stop mid-row because of a limit. ScannerContext:" + scannerContext); + } + return true; + } + + // Check for thread interrupt status in case we have been signaled from + // #interruptRegionOperation. + region.checkInterrupt(); + + Cell nextKv = this.storeHeap.peek(); + shouldStop = shouldStop(nextKv); + // save that the row was empty before filters applied to it. + final boolean isEmptyRow = results.isEmpty(); + + // We have the part of the row necessary for filtering (all of it, usually). + // First filter with the filterRow(List). + FilterWrapper.FilterRowRetCode ret = FilterWrapper.FilterRowRetCode.NOT_CALLED; + if (hasFilterRow) { + ret = filter.filterRowCellsWithRet(results); + + // We don't know how the results have changed after being filtered. Must set progress + // according to contents of results now. + if (scannerContext.getKeepProgress()) { + scannerContext.setProgress(initialBatchProgress, initialSizeProgress, + initialHeapSizeProgress); + } else { + scannerContext.clearProgress(); + } + scannerContext.incrementBatchProgress(results.size()); + for (Cell cell : results) { + scannerContext.incrementSizeProgress(PrivateCellUtil.estimatedSerializedSizeOf(cell), + cell.heapSize()); + } + } + + if (isEmptyRow || ret == FilterWrapper.FilterRowRetCode.EXCLUDE || filterRow()) { + incrementCountOfRowsFilteredMetric(scannerContext); + results.clear(); + boolean moreRows = nextRow(scannerContext, current); + if (!moreRows) { + return scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues(); + } + + // This row was totally filtered out, if this is NOT the last row, + // we should continue on. Otherwise, nothing else to do. + if (!shouldStop) { + // Read nothing as the cells was filtered, but still need to check time limit + if (scannerContext.checkTimeLimit(limitScope)) { + return true; + } + continue; + } + return scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues(); + } + + // Ok, we are done with storeHeap for this row. + // Now we may need to fetch additional, non-essential data into row. + // These values are not needed for filter to work, so we postpone their + // fetch to (possibly) reduce amount of data loads from disk. + if (this.joinedHeap != null) { + boolean mayHaveData = joinedHeapMayHaveData(current); + if (mayHaveData) { + joinedContinuationRow = current; + populateFromJoinedHeap(results, scannerContext); + + if (scannerContext.checkAnyLimitReached(LimitScope.BETWEEN_CELLS)) { + return true; + } + } + } + } else { + // Populating from the joined heap was stopped by limits, populate some more. + populateFromJoinedHeap(results, scannerContext); + if (scannerContext.checkAnyLimitReached(LimitScope.BETWEEN_CELLS)) { + return true; + } + } + // We may have just called populateFromJoinedMap and hit the limits. If that is + // the case, we need to call it again on the next next() invocation. + if (joinedContinuationRow != null) { + return scannerContext.setScannerState(NextState.MORE_VALUES).hasMoreValues(); + } + + // Finally, we are done with both joinedHeap and storeHeap. + // Double check to prevent empty rows from appearing in result. It could be + // the case when SingleColumnValueExcludeFilter is used. + if (results.isEmpty()) { + incrementCountOfRowsFilteredMetric(scannerContext); + boolean moreRows = nextRow(scannerContext, current); + if (!moreRows) { + return scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues(); + } + if (!shouldStop) { + continue; + } + } + + if (shouldStop) { + return scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues(); + } else { + return scannerContext.setScannerState(NextState.MORE_VALUES).hasMoreValues(); + } + } + } + + private void incrementCountOfRowsFilteredMetric(ScannerContext scannerContext) { + region.filteredReadRequestsCount.increment(); + if (region.getMetrics() != null) { + region.getMetrics().updateFilteredRecords(); + } + + if (scannerContext == null || !scannerContext.isTrackingMetrics()) { + return; + } + + scannerContext.getMetrics().countOfRowsFiltered.incrementAndGet(); + } + + private void incrementCountOfRowsScannedMetric(ScannerContext scannerContext) { + if (scannerContext == null || !scannerContext.isTrackingMetrics()) { + return; + } + + scannerContext.getMetrics().countOfRowsScanned.incrementAndGet(); + } + + /** + * @return true when the joined heap may have data for the current row + */ + private boolean joinedHeapMayHaveData(Cell currentRowCell) throws IOException { + Cell nextJoinedKv = joinedHeap.peek(); + boolean matchCurrentRow = + nextJoinedKv != null && CellUtil.matchingRows(nextJoinedKv, currentRowCell); + boolean matchAfterSeek = false; + + // If the next value in the joined heap does not match the current row, try to seek to the + // correct row + if (!matchCurrentRow) { + Cell firstOnCurrentRow = PrivateCellUtil.createFirstOnRow(currentRowCell); + boolean seekSuccessful = this.joinedHeap.requestSeek(firstOnCurrentRow, true, true); + matchAfterSeek = seekSuccessful && joinedHeap.peek() != null && + CellUtil.matchingRows(joinedHeap.peek(), currentRowCell); + } + + return matchCurrentRow || matchAfterSeek; + } + + /** + * This function is to maintain backward compatibility for 0.94 filters. HBASE-6429 combines both + * filterRow & filterRow({@code List kvs}) functions. While 0.94 code or older, it may + * not implement hasFilterRow as HBase-6429 expects because 0.94 hasFilterRow() only returns true + * when filterRow({@code List kvs}) is overridden not the filterRow(). Therefore, the + * filterRow() will be skipped. + */ + private boolean filterRow() throws IOException { + // when hasFilterRow returns true, filter.filterRow() will be called automatically inside + // filterRowCells(List kvs) so we skip that scenario here. + return filter != null && (!filter.hasFilterRow()) && filter.filterRow(); + } + + private boolean filterRowKey(Cell current) throws IOException { + return filter != null && filter.filterRowKey(current); + } + + /** + * A mocked list implementation - discards all updates. + */ + private static final List MOCKED_LIST = new AbstractList() { + + @Override + public void add(int index, Cell element) { + // do nothing + } + + @Override + public boolean addAll(int index, Collection c) { + return false; // this list is never changed as a result of an update + } + + @Override + public KeyValue get(int index) { + throw new UnsupportedOperationException(); + } + + @Override + public int size() { + return 0; + } + }; + + protected boolean nextRow(ScannerContext scannerContext, Cell curRowCell) throws IOException { + assert this.joinedContinuationRow == null : "Trying to go to next row during joinedHeap read."; + Cell next; + while ((next = this.storeHeap.peek()) != null && CellUtil.matchingRows(next, curRowCell)) { + // Check for thread interrupt status in case we have been signaled from + // #interruptRegionOperation. + region.checkInterrupt(); + this.storeHeap.next(MOCKED_LIST); + } + resetFilters(); + + // Calling the hook in CP which allows it to do a fast forward + return this.region.getCoprocessorHost() == null || + this.region.getCoprocessorHost().postScannerFilterRow(this, curRowCell); + } + + protected boolean shouldStop(Cell currentRowCell) { + if (currentRowCell == null) { + return true; + } + if (stopRow == null || Bytes.equals(stopRow, HConstants.EMPTY_END_ROW)) { + return false; + } + int c = comparator.compareRows(currentRowCell, stopRow, 0, stopRow.length); + return c > 0 || (c == 0 && !includeStopRow); + } + + @Override + public synchronized void close() { + if (storeHeap != null) { + storeHeap.close(); + storeHeap = null; + } + if (joinedHeap != null) { + joinedHeap.close(); + joinedHeap = null; + } + // no need to synchronize here. + scannerReadPoints.remove(this); + this.filterClosed = true; + } + + @Override + public synchronized boolean reseek(byte[] row) throws IOException { + if (row == null) { + throw new IllegalArgumentException("Row cannot be null."); + } + boolean result = false; + region.startRegionOperation(); + Cell kv = PrivateCellUtil.createFirstOnRow(row, 0, (short) row.length); + try { + // use request seek to make use of the lazy seek option. See HBASE-5520 + result = this.storeHeap.requestSeek(kv, true, true); + if (this.joinedHeap != null) { + result = this.joinedHeap.requestSeek(kv, true, true) || result; + } + } finally { + region.closeRegionOperation(); + } + return result; + } + + @Override + public void shipped() throws IOException { + if (storeHeap != null) { + storeHeap.shipped(); + } + if (joinedHeap != null) { + joinedHeap.shipped(); + } + } + + @Override + public void run() throws IOException { + // This is the RPC callback method executed. We do the close in of the scanner in this + // callback + this.close(); + } +} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java index 9e395d4f5cc5..fce8df172643 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java @@ -317,4 +317,4 @@ boolean reportFileArchivalForQuotas( * @return {@link ZKPermissionWatcher} */ ZKPermissionWatcher getZKPermissionWatcher(); -} \ No newline at end of file +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerTableMetrics.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerTableMetrics.java index ec6c0493bb75..812ae45e8840 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerTableMetrics.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerTableMetrics.java @@ -65,6 +65,18 @@ public void updateDeleteBatch(TableName table, long time) { latencies.updateDeleteBatch(table.getNameAsString(), time); } + public void updateCheckAndDelete(TableName table, long time) { + latencies.updateCheckAndDelete(table.getNameAsString(), time); + } + + public void updateCheckAndPut(TableName table, long time) { + latencies.updateCheckAndPut(table.getNameAsString(), time); + } + + public void updateCheckAndMutate(TableName table, long time) { + latencies.updateCheckAndMutate(table.getNameAsString(), time); + } + public void updateScanTime(TableName table, long time) { latencies.updateScanTime(table.getNameAsString(), time); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServicesForStores.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServicesForStores.java index 36392d7ef73f..06795a58545e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServicesForStores.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServicesForStores.java @@ -27,7 +27,6 @@ import org.apache.hadoop.hbase.wal.WAL; import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; /** @@ -113,7 +112,6 @@ public int getNumStores() { return region.getTableDescriptor().getColumnFamilyCount(); } - @VisibleForTesting long getMemStoreSize() { return region.getMemStoreDataSize(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RemoteProcedureResultReporter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RemoteProcedureResultReporter.java index 981f090534a3..63e050a710ab 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RemoteProcedureResultReporter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RemoteProcedureResultReporter.java @@ -41,9 +41,6 @@ class RemoteProcedureResultReporter extends Thread { private static final Logger LOG = LoggerFactory.getLogger(RemoteProcedureResultReporter.class); - // Time to pause if master says 'please hold'. Make configurable if needed. - private static final int INIT_PAUSE_TIME_MS = 1000; - private static final int MAX_BATCH = 100; private final HRegionServer server; @@ -98,9 +95,9 @@ public void run() { long pauseTime; if (pause) { // Do backoff else we flood the Master with requests. - pauseTime = ConnectionUtils.getPauseTime(INIT_PAUSE_TIME_MS, tries); + pauseTime = ConnectionUtils.getPauseTime(server.getRetryPauseTime(), tries); } else { - pauseTime = INIT_PAUSE_TIME_MS; // Reset. + pauseTime = server.getRetryPauseTime(); // Reset. } LOG.info("Failed procedure report " + TextFormat.shortDebugString(request) + "; retry (#" + tries + ")" + (pause ? " after " + pauseTime + "ms delay (Master is coming online...)." diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationService.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationService.java index e9bbaea8ae46..33b3321755fa 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationService.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationService.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,7 +22,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.replication.regionserver.ReplicationLoad; -import org.apache.hadoop.hbase.wal.WALProvider; +import org.apache.hadoop.hbase.wal.WALFactory; import org.apache.yetus.audience.InterfaceAudience; /** @@ -32,14 +32,11 @@ */ @InterfaceAudience.Private public interface ReplicationService { - /** * Initializes the replication service object. - * @param walProvider can be null if not initialized inside a live region server environment, for - * example, {@code ReplicationSyncUp}. */ - void initialize(Server rs, FileSystem fs, Path logdir, Path oldLogDir, WALProvider walProvider) - throws IOException; + void initialize(Server rs, FileSystem fs, Path logdir, Path oldLogDir, WALFactory walFactory) + throws IOException; /** * Start replication services. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedRegionScannerImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedRegionScannerImpl.java index 3ca064f05101..d1995f237d2e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedRegionScannerImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedRegionScannerImpl.java @@ -26,7 +26,6 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.client.Scan; -import org.apache.hadoop.hbase.regionserver.HRegion.RegionScannerImpl; import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; @@ -37,15 +36,9 @@ @InterfaceAudience.Private class ReversedRegionScannerImpl extends RegionScannerImpl { - /** - * @param scan - * @param additionalScanners - * @param region - * @throws IOException - */ - ReversedRegionScannerImpl(Scan scan, List additionalScanners, HRegion region) - throws IOException { - region.super(scan, additionalScanners, region); + ReversedRegionScannerImpl(Scan scan, List additionalScanners, HRegion region, + long nonceGroup, long nonce) throws IOException { + super(scan, additionalScanners, region, nonceGroup, nonce); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RpcSchedulerFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RpcSchedulerFactory.java index dbd393db9884..d1d1cfc52942 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RpcSchedulerFactory.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RpcSchedulerFactory.java @@ -35,11 +35,4 @@ public interface RpcSchedulerFactory { * Constructs a {@link org.apache.hadoop.hbase.ipc.RpcScheduler}. */ RpcScheduler create(Configuration conf, PriorityFunction priority, Abortable server); - - /** - * @deprecated since 1.0.0. - * @see HBASE-12028 - */ - @Deprecated - RpcScheduler create(Configuration conf, PriorityFunction priority); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanInfo.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanInfo.java index 7d6161849ade..4e2066601c5a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanInfo.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanInfo.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.regionserver; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; @@ -26,15 +25,13 @@ import org.apache.hadoop.hbase.KeepDeletedCells; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ClassSize; - -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.yetus.audience.InterfaceAudience; /** * Immutable information for scans over a store. */ // Has to be public for PartitionedMobCompactor to access; ditto on tests making use of a few of // the accessors below. Shutdown access. TODO -@VisibleForTesting @InterfaceAudience.Private public class ScanInfo { private byte[] family; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadManager.java index 15d87101a8f2..e8a9154d4107 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadManager.java @@ -57,8 +57,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; - import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.BulkLoadHFileRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CleanupBulkLoadRequest; @@ -166,12 +164,10 @@ public void cleanupBulkLoad(final HRegion region, final CleanupBulkLoadRequest r private Consumer fsCreatedListener; - @VisibleForTesting void setFsCreatedListener(Consumer fsCreatedListener) { this.fsCreatedListener = fsCreatedListener; } - private void incrementUgiReference(UserGroupInformation ugi) { // if we haven't seen this ugi before, make a new counter ugiReferenceCounter.compute(ugi, (key, value) -> { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Segment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Segment.java index b1c92a4e8d5b..b0763aa3835a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Segment.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Segment.java @@ -34,7 +34,6 @@ import org.apache.hadoop.hbase.util.ClassSize; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; /** * This is an abstraction of a segment maintained in a memstore, e.g., the active @@ -181,7 +180,6 @@ public Cell maybeCloneWithAllocator(Cell cell, boolean forceCloneOfBigCell) { /** * Get cell length after serialized in {@link KeyValue} */ - @VisibleForTesting static int getCellLength(Cell cell) { return cell.getSerializedSize(); } @@ -414,7 +412,6 @@ protected SortedSet tailSet(Cell firstCell) { return getCellSet().tailSet(firstCell); } - @VisibleForTesting MemStoreLAB getMemStoreLAB() { return memStoreLAB; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ServerNonceManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ServerNonceManager.java index 0b1d251d9898..e0ea974b1945 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ServerNonceManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ServerNonceManager.java @@ -27,13 +27,11 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.ScheduledChore; import org.apache.hadoop.hbase.Stoppable; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.hadoop.hbase.util.NonceKey; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -import org.apache.hadoop.hbase.util.NonceKey; - -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; /** * Implementation of nonce manager that stores nonces in a hash map and cleans them up after @@ -134,7 +132,6 @@ public ServerNonceManager(Configuration conf) { } } - @VisibleForTesting public void setConflictWaitIterationMs(int conflictWaitIterationMs) { this.conflictWaitIterationMs = conflictWaitIterationMs; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SimpleRpcSchedulerFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SimpleRpcSchedulerFactory.java index 22a9da548d6f..06b004321c55 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SimpleRpcSchedulerFactory.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SimpleRpcSchedulerFactory.java @@ -32,16 +32,6 @@ @InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX}) @InterfaceStability.Evolving public class SimpleRpcSchedulerFactory implements RpcSchedulerFactory { - /** - * @deprecated since 1.0.0. - * @see HBASE-12028 - */ - @Override - @Deprecated - public RpcScheduler create(Configuration conf, PriorityFunction priority) { - return create(conf, priority, null); - } - @Override public RpcScheduler create(Configuration conf, PriorityFunction priority, Abortable server) { int handlerCount = conf.getInt(HConstants.REGION_SERVER_HANDLER_COUNT, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java index 90e705c71867..ed3a73fbcfaa 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java @@ -48,7 +48,6 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; /** * This worker is spawned in every regionserver, including master. The Worker waits for log @@ -289,7 +288,6 @@ enum Status { * Returns the number of tasks processed by coordination. * This method is used by tests only */ - @VisibleForTesting public int getTaskReadySeq() { return coordination.getTaskReadySeq(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreContext.java new file mode 100644 index 000000000000..26233505db73 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreContext.java @@ -0,0 +1,194 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import java.net.InetSocketAddress; +import java.util.Collection; +import java.util.function.Supplier; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; +import org.apache.hadoop.hbase.io.HeapSize; +import org.apache.hadoop.hbase.io.crypto.Encryption; +import org.apache.hadoop.hbase.io.hfile.CacheConfig; +import org.apache.hadoop.hbase.util.ClassSize; +import org.apache.yetus.audience.InterfaceAudience; + +/** + * This carries the immutable information and references on some of the meta data about the HStore. + * This meta data can be used across the HFileWriter/Readers and other HStore consumers without the + * need of passing around the complete store. + */ +@InterfaceAudience.Private +public final class StoreContext implements HeapSize { + public static final long FIXED_OVERHEAD = ClassSize.estimateBase(HStore.class, false); + + private final int blockSize; + private final Encryption.Context encryptionContext; + private final CacheConfig cacheConf; + private final HRegionFileSystem regionFileSystem; + private final CellComparator comparator; + private final BloomType bloomFilterType; + private final Supplier> compactedFilesSupplier; + private final Supplier favoredNodesSupplier; + private final ColumnFamilyDescriptor family; + private final Path familyStoreDirectoryPath; + private final RegionCoprocessorHost coprocessorHost; + + private StoreContext(Builder builder) { + this.blockSize = builder.blockSize; + this.encryptionContext = builder.encryptionContext; + this.cacheConf = builder.cacheConf; + this.regionFileSystem = builder.regionFileSystem; + this.comparator = builder.comparator; + this.bloomFilterType = builder.bloomFilterType; + this.compactedFilesSupplier = builder.compactedFilesSupplier; + this.favoredNodesSupplier = builder.favoredNodesSupplier; + this.family = builder.family; + this.familyStoreDirectoryPath = builder.familyStoreDirectoryPath; + this.coprocessorHost = builder.coprocessorHost; + } + + public int getBlockSize() { + return blockSize; + } + + public Encryption.Context getEncryptionContext() { + return encryptionContext; + } + + public CacheConfig getCacheConf() { + return cacheConf; + } + + public HRegionFileSystem getRegionFileSystem() { + return regionFileSystem; + } + + public CellComparator getComparator() { + return comparator; + } + + public BloomType getBloomFilterType() { + return bloomFilterType; + } + + public Supplier> getCompactedFilesSupplier() { + return compactedFilesSupplier; + } + + public InetSocketAddress[] getFavoredNodes() { + return favoredNodesSupplier.get(); + } + + public ColumnFamilyDescriptor getFamily() { + return family; + } + + public Path getFamilyStoreDirectoryPath() { + return familyStoreDirectoryPath; + } + + public RegionCoprocessorHost getCoprocessorHost() { + return coprocessorHost; + } + + public static Builder getBuilder() { + return new Builder(); + } + + @Override + public long heapSize() { + return FIXED_OVERHEAD; + } + + public static class Builder { + private int blockSize; + private Encryption.Context encryptionContext; + private CacheConfig cacheConf; + private HRegionFileSystem regionFileSystem; + private CellComparator comparator; + private BloomType bloomFilterType; + private Supplier> compactedFilesSupplier; + private Supplier favoredNodesSupplier; + private ColumnFamilyDescriptor family; + private Path familyStoreDirectoryPath; + private RegionCoprocessorHost coprocessorHost; + + public Builder withBlockSize(int blockSize) { + this.blockSize = blockSize; + return this; + } + + public Builder withEncryptionContext(Encryption.Context encryptionContext) { + this.encryptionContext = encryptionContext; + return this; + } + + public Builder withCacheConfig(CacheConfig cacheConf) { + this.cacheConf = cacheConf; + return this; + } + + public Builder withRegionFileSystem(HRegionFileSystem regionFileSystem) { + this.regionFileSystem = regionFileSystem; + return this; + } + + public Builder withCellComparator(CellComparator comparator) { + this.comparator = comparator; + return this; + } + + public Builder withBloomType(BloomType bloomFilterType) { + this.bloomFilterType = bloomFilterType; + return this; + } + + public Builder withCompactedFilesSupplier(Supplier> + compactedFilesSupplier) { + this.compactedFilesSupplier = compactedFilesSupplier; + return this; + } + + public Builder withFavoredNodesSupplier(Supplier favoredNodesSupplier) { + this.favoredNodesSupplier = favoredNodesSupplier; + return this; + } + + public Builder withColumnFamilyDescriptor(ColumnFamilyDescriptor family) { + this.family = family; + return this; + } + + public Builder withFamilyStoreDirectoryPath(Path familyStoreDirectoryPath) { + this.familyStoreDirectoryPath = familyStoreDirectoryPath; + return this; + } + + public Builder withRegionCoprocessorHost(RegionCoprocessorHost coprocessorHost) { + this.coprocessorHost = coprocessorHost; + return this; + } + + public StoreContext build() { + return new StoreContext(this); + } + } + +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java index f92a4d386598..7550511a356e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java @@ -54,7 +54,6 @@ import org.apache.yetus.audience.InterfaceStability; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; /** * Reader for a StoreFile. @@ -115,7 +114,7 @@ public boolean isPrimaryReplicaReader() { /** * ONLY USE DEFAULT CONSTRUCTOR FOR UNIT TESTS */ - @VisibleForTesting + @InterfaceAudience.Private StoreFileReader() { this.refCount = new AtomicInteger(0); this.reader = null; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java index 8c48b124645c..1744a7f06ab5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java @@ -25,7 +25,6 @@ import java.util.NavigableSet; import java.util.concurrent.CountDownLatch; import java.util.concurrent.locks.ReentrantLock; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; @@ -46,11 +45,9 @@ import org.apache.hadoop.hbase.regionserver.querymatcher.UserScanQueryMatcher; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.yetus.audience.InterfaceAudience; - import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.org.apache.commons.collections4.CollectionUtils; @@ -93,9 +90,7 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner private final int minVersions; private final long maxRowSize; private final long cellsPerHeartbeatCheck; - @VisibleForTesting long memstoreOnlyReads; - @VisibleForTesting long mixedReads; // 1) Collects all the KVHeap that are eagerly getting closed during the @@ -155,7 +150,6 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner // Since CompactingMemstore is now default, we get three memstore scanners from a flush private final List memStoreScannersAfterFlush = new ArrayList<>(3); // The current list of scanners - @VisibleForTesting final List currentScanners = new ArrayList<>(); // flush update lock private final ReentrantLock flushLock = new ReentrantLock(); @@ -344,7 +338,6 @@ public StoreScanner(ScanInfo scanInfo, ScanType scanType, } // Used to instantiate a scanner for user scan in test - @VisibleForTesting StoreScanner(Scan scan, ScanInfo scanInfo, NavigableSet columns, List scanners, ScanType scanType) throws IOException { // 0 is passed as readpoint because the test bypasses Store @@ -361,7 +354,6 @@ public StoreScanner(ScanInfo scanInfo, ScanType scanType, } // Used to instantiate a scanner for user scan in test - @VisibleForTesting StoreScanner(Scan scan, ScanInfo scanInfo, NavigableSet columns, List scanners) throws IOException { // 0 is passed as readpoint because the test bypasses Store @@ -373,7 +365,6 @@ public StoreScanner(ScanInfo scanInfo, ScanType scanType, } // Used to instantiate a scanner for compaction in test - @VisibleForTesting StoreScanner(ScanInfo scanInfo, int maxVersions, ScanType scanType, List scanners) throws IOException { // 0 is passed as readpoint because the test bypasses Store @@ -384,7 +375,6 @@ public StoreScanner(ScanInfo scanInfo, ScanType scanType, seekAllScanner(scanInfo, scanners); } - @VisibleForTesting boolean isScanUsePread() { return this.scanUsePread; } @@ -427,7 +417,6 @@ protected void seekScanners(List scanners, } } - @VisibleForTesting protected void resetKVHeap(List scanners, CellComparator comparator) throws IOException { // Combine all seeked scanners with a heap @@ -444,7 +433,6 @@ protected KeyValueHeap newKVHeap(List scanners, *

    * Will be overridden by testcase so declared as protected. */ - @VisibleForTesting protected List selectScannersFrom(HStore store, List allScanners) { boolean memOnly; @@ -870,7 +858,6 @@ private void seekOrSkipToNextColumn(Cell cell) throws IOException { * @param cell current cell * @return true means skip to next row, false means not */ - @VisibleForTesting protected boolean trySkipToNextRow(Cell cell) throws IOException { Cell nextCell = null; // used to guard against a changed next indexed key by doing a identity comparison @@ -896,7 +883,6 @@ protected boolean trySkipToNextRow(Cell cell) throws IOException { * @param cell current cell * @return true means skip to next column, false means not */ - @VisibleForTesting protected boolean trySkipToNextColumn(Cell cell) throws IOException { Cell nextCell = null; // used to guard against a changed next indexed key by doing a identity comparison @@ -1096,7 +1082,6 @@ public boolean reseek(Cell kv) throws IOException { return heap.reseek(kv); } - @VisibleForTesting void trySwitchToStreamRead() { if (readType != Scan.ReadType.DEFAULT || !scanUsePread || closing || heap.peek() == null || bytesRead < preadMaxBytes) { @@ -1208,7 +1193,6 @@ private void parallelSeek(final List * Used in testing. * @return all scanners in no particular order */ - @VisibleForTesting List getAllScannersForTesting() { List allScanners = new ArrayList<>(); KeyValueScanner current = heap.getCurrentForTesting(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java index 0e4f6c2bb8a4..ac5955feca7e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java @@ -24,9 +24,13 @@ import java.util.OptionalInt; import java.util.OptionalLong; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.io.hfile.HFile; +import org.apache.hadoop.hbase.util.ChecksumType; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -136,4 +140,25 @@ static Optional getSplitPoint(Collection storefiles, return largestFile.isPresent() ? StoreUtils.getFileSplitPoint(largestFile.get(), comparator) : Optional.empty(); } + + /** + * Returns the configured checksum algorithm. + * @param conf The configuration + * @return The checksum algorithm that is set in the configuration + */ + public static ChecksumType getChecksumType(Configuration conf) { + return ChecksumType.nameToType( + conf.get(HConstants.CHECKSUM_TYPE_NAME, ChecksumType.getDefaultChecksumType().getName())); + } + + /** + * Returns the configured bytesPerChecksum value. + * @param conf The configuration + * @return The bytesPerChecksum that is set in the configuration + */ + public static int getBytesPerChecksum(Configuration conf) { + return conf.getInt(HConstants.BYTES_PER_CHECKSUM, + HFile.DEFAULT_BYTES_PER_CHECKSUM); + } + } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFileManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFileManager.java index 84c623c6c832..beed41fae099 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFileManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFileManager.java @@ -48,7 +48,7 @@ import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList; /** - * Stripe implementation of StoreFileManager. + * Stripe implementation of {@link StoreFileManager}. * Not thread safe - relies on external locking (in HStore). Collections that this class * returns are immutable or unique to the call, so they should be safe. * Stripe store splits the key space of the region into non-overlapping stripes, as well as @@ -56,9 +56,10 @@ * When L0 is compacted, it's split into the files corresponding to existing stripe boundaries, * that can thus be added to stripes. * When scan or get happens, it only has to read the files from the corresponding stripes. - * See StripeCompationPolicy on how the stripes are determined; this class doesn't care. + * See {@link StripeCompactionPolicy} on how the stripes are determined; this class doesn't care. * - * This class should work together with StripeCompactionPolicy and StripeCompactor. + * This class should work together with {@link StripeCompactionPolicy} and + * {@link org.apache.hadoop.hbase.regionserver.compactions.StripeCompactor}. * With regard to how they work, we make at least the following (reasonable) assumptions: * - Compaction produces one file per new stripe (if any); that is easy to change. * - Compaction has one contiguous set of stripes both in and out, except if L0 is involved. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFlusher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFlusher.java index 7beec5e2e8ac..1560aef5f6b3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFlusher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFlusher.java @@ -33,7 +33,6 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; /** * Stripe implementation of StoreFlusher. Flushes files either into L0 file w/o metadata, or @@ -119,7 +118,6 @@ public StripeFlushRequest(CellComparator comparator) { this.comparator = comparator; } - @VisibleForTesting public StripeMultiFileWriter createWriter() throws IOException { StripeMultiFileWriter writer = new StripeMultiFileWriter.SizeMultiWriter(comparator, 1, Long.MAX_VALUE, OPEN_KEY, OPEN_KEY); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/TimeRangeTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/TimeRangeTracker.java index 18175648f305..fdf9db273a69 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/TimeRangeTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/TimeRangeTracker.java @@ -24,14 +24,13 @@ import java.io.DataOutputStream; import java.io.IOException; import java.util.concurrent.atomic.AtomicLong; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.io.TimeRange; import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; @@ -240,10 +239,9 @@ TimeRange toTimeRange() { if (max == INITIAL_MAX_TIMESTAMP) { max = TimeRange.INITIAL_MAX_TIMESTAMP; } - return new TimeRange(min, max); + return TimeRange.between(min, max); } - @VisibleForTesting //In order to estimate the heap size, this inner class need to be accessible to TestHeapSize. public static class NonSyncTimeRangeTracker extends TimeRangeTracker { private long minimumTimestamp = INITIAL_MIN_TIMESTAMP; @@ -301,7 +299,6 @@ public long getMax() { } } - @VisibleForTesting //In order to estimate the heap size, this inner class need to be accessible to TestHeapSize. public static class SyncTimeRangeTracker extends TimeRangeTracker { private final AtomicLong minimumTimestamp = new AtomicLong(INITIAL_MIN_TIMESTAMP); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/VersionedSegmentsList.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/VersionedSegmentsList.java index 2a684da01311..d5be356f93f9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/VersionedSegmentsList.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/VersionedSegmentsList.java @@ -21,8 +21,6 @@ import java.util.List; import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; - /** * A list of segment managers coupled with the version of the memstore (version at the time it was * created). @@ -65,7 +63,6 @@ public int getNumOfSegments() { } // Estimates fraction of unique keys - @VisibleForTesting double getEstimatedUniquesFrac() { int segmentCells = 0; int maxCells = 0; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionConfiguration.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionConfiguration.java index dbc5b1fea1b1..75966b9e7467 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionConfiguration.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionConfiguration.java @@ -180,7 +180,9 @@ public class CompactionConfiguration { @Override public String toString() { return String.format( - "size [%s, %s, %s); files [%d, %d); ratio %f; off-peak ratio %f; throttle point %d;" + "size [minCompactSize:%s, maxCompactSize:%s, offPeakMaxCompactSize:%s);" + + " files [minFilesToCompact:%d, maxFilesToCompact:%d);" + + " ratio %f; off-peak ratio %f; throttle point %d;" + " major period %d, major jitter %f, min locality to compact %f;" + " tiered compaction: max_age %d, incoming window min %d," + " compaction policy for tiered window %s, single output for minor %b," diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CurrentHourProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CurrentHourProvider.java index b68363498e4c..1f5a8208f0a3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CurrentHourProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CurrentHourProvider.java @@ -22,7 +22,6 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; @InterfaceAudience.Private public class CurrentHourProvider { @@ -38,7 +37,6 @@ private static final class Tick { } } - @VisibleForTesting static Tick nextTick() { Calendar calendar = new GregorianCalendar(); calendar.setTimeInMillis(EnvironmentEdgeManager.currentTime()); @@ -54,7 +52,6 @@ private static void moveToNextHour(Calendar calendar) { calendar.set(Calendar.MILLISECOND, 0); } - @VisibleForTesting static volatile Tick tick = nextTick(); public static int getCurrentHour() { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactionPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactionPolicy.java index 1cc7dda0948a..f60e97db4836 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactionPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactionPolicy.java @@ -26,7 +26,6 @@ import java.util.List; import java.util.Map; import java.util.OptionalLong; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HDFSBlocksDistribution; @@ -40,7 +39,7 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; + import org.apache.hbase.thirdparty.com.google.common.collect.Iterators; import org.apache.hbase.thirdparty.com.google.common.collect.Lists; import org.apache.hbase.thirdparty.com.google.common.collect.PeekingIterator; @@ -98,7 +97,7 @@ public DateTieredCompactionPolicy(Configuration conf, StoreConfigInformation sto * Heuristics for guessing whether we need minor compaction. */ @Override - @VisibleForTesting + @InterfaceAudience.Private public boolean needsCompaction(Collection storeFiles, List filesCompacting) { ArrayList candidates = new ArrayList<>(storeFiles); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/AssignRegionHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/AssignRegionHandler.java index 737f1653bc94..5d9819ca56ab 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/AssignRegionHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/AssignRegionHandler.java @@ -20,8 +20,11 @@ import edu.umd.cs.findbugs.annotations.Nullable; import java.io.IOException; import java.util.concurrent.TimeUnit; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.executor.EventHandler; import org.apache.hadoop.hbase.executor.EventType; @@ -31,6 +34,7 @@ import org.apache.hadoop.hbase.regionserver.RegionServerServices.PostOpenDeployContext; import org.apache.hadoop.hbase.regionserver.RegionServerServices.RegionStateTransitionContext; import org.apache.hadoop.hbase.util.RetryCounter; +import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -92,7 +96,7 @@ public void process() throws IOException { String regionName = regionInfo.getRegionNameAsString(); Region onlineRegion = rs.getRegion(encodedName); if (onlineRegion != null) { - LOG.warn("Received OPEN for the region:{}, which is already online", regionName); + LOG.warn("Received OPEN for {} which is already online", regionName); // Just follow the old behavior, do we need to call reportRegionStateTransition? Maybe not? // For normal case, it could happen that the rpc call to schedule this handler is succeeded, // but before returning to master the connection is broken. And when master tries again, we @@ -104,7 +108,7 @@ public void process() throws IOException { if (previous != null) { if (previous) { // The region is opening and this maybe a retry on the rpc call, it is safe to ignore it. - LOG.info("Receiving OPEN for the region:{}, which we are already trying to OPEN" + + LOG.info("Receiving OPEN for {} which we are already trying to OPEN" + " - ignoring this new request for this region.", regionName); } else { // The region is closing. This is possible as we will update the region state to CLOSED when @@ -113,7 +117,7 @@ public void process() throws IOException { // closing process. long backoff = retryCounter.getBackoffTimeAndIncrementAttempts(); LOG.info( - "Receiving OPEN for the region:{}, which we are trying to close, try again after {}ms", + "Receiving OPEN for {} which we are trying to close, try again after {}ms", regionName, backoff); rs.getExecutorService().delayedSubmit(this, backoff, TimeUnit.MILLISECONDS); } @@ -129,8 +133,16 @@ public void process() throws IOException { } // pass null for the last parameter, which used to be a CancelableProgressable, as now the // opening can not be interrupted by a close request any more. - region = HRegion.openHRegion(regionInfo, htd, rs.getWAL(regionInfo), rs.getConfiguration(), - rs, null); + Configuration conf = rs.getConfiguration(); + TableName tn = htd.getTableName(); + if (ServerRegionReplicaUtil.isMetaRegionReplicaReplicationEnabled(conf, tn)) { + if (RegionReplicaUtil.isDefaultReplica(this.regionInfo.getReplicaId())) { + // Add the hbase:meta replication source on replica zero/default. + rs.getReplicationSourceService().getReplicationManager(). + addCatalogReplicationSource(this.regionInfo); + } + } + region = HRegion.openHRegion(regionInfo, htd, rs.getWAL(regionInfo), conf, rs, null); } catch (IOException e) { cleanUpAndReportFailure(e); return; @@ -145,11 +157,10 @@ public void process() throws IOException { Boolean current = rs.getRegionsInTransitionInRS().remove(regionInfo.getEncodedNameAsBytes()); if (current == null) { // Should NEVER happen, but let's be paranoid. - LOG.error("Bad state: we've just opened a region that was NOT in transition. Region={}", - regionName); + LOG.error("Bad state: we've just opened {} which was NOT in transition", regionName); } else if (!current) { // Should NEVER happen, but let's be paranoid. - LOG.error("Bad state: we've just opened a region that was closing. Region={}", regionName); + LOG.error("Bad state: we've just opened {} which was closing", regionName); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RegionReplicaFlushHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RegionReplicaFlushHandler.java index cc798cc2443f..829d0bf01578 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RegionReplicaFlushHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RegionReplicaFlushHandler.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -39,9 +39,9 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse; /** - * HBASE-11580: With the async wal approach (HBASE-11568), the edits are not persisted to wal in + * HBASE-11580: With the async wal approach (HBASE-11568), the edits are not persisted to WAL in * secondary region replicas. This means that a secondary region replica can serve some edits from - * it's memstore that that is still not flushed from primary. We do not want to allow secondary + * it's memstore that are still not flushed from primary. We do not want to allow secondary * region's seqId to go back in time, when this secondary region is opened elsewhere after a * crash or region move. We will trigger a flush cache in the primary region replica and wait * for observing a complete flush cycle before marking the region readsEnabled. This handler does @@ -50,7 +50,6 @@ */ @InterfaceAudience.Private public class RegionReplicaFlushHandler extends EventHandler { - private static final Logger LOG = LoggerFactory.getLogger(RegionReplicaFlushHandler.class); private final AsyncClusterConnection connection; @@ -73,7 +72,7 @@ protected void handleException(Throwable t) { if (t instanceof InterruptedIOException || t instanceof InterruptedException) { LOG.error("Caught throwable while processing event " + eventType, t); } else if (t instanceof RuntimeException) { - server.abort("ServerAborting because a runtime exception was thrown", t); + server.abort("Server aborting", t); } else { // something fishy since we cannot flush the primary region until all retries (retries from // rpc times 35 trigger). We cannot close the region since there is no such mechanism to @@ -101,9 +100,9 @@ void triggerFlushInPrimaryRegion(final HRegion region) throws IOException { RetryCounter counter = new RetryCounterFactory(maxAttempts, (int)pause).create(); if (LOG.isDebugEnabled()) { - LOG.debug("Attempting to do an RPC to the primary region replica " + ServerRegionReplicaUtil - .getRegionInfoForDefaultReplica(region.getRegionInfo()).getEncodedName() + " of region " - + region.getRegionInfo().getEncodedName() + " to trigger a flush"); + LOG.debug("RPC'ing to primary " + ServerRegionReplicaUtil. + getRegionInfoForDefaultReplica(region.getRegionInfo()).getRegionNameAsString() + + " from " + region.getRegionInfo().getRegionNameAsString() + " to trigger FLUSH"); } while (!region.isClosing() && !region.isClosed() && !server.isAborted() && !server.isStopped()) { @@ -142,11 +141,11 @@ void triggerFlushInPrimaryRegion(final HRegion region) throws IOException { // then we have to wait for seeing the flush entry. All reads will be rejected until we see // a complete flush cycle or replay a region open event if (LOG.isDebugEnabled()) { - LOG.debug("Successfully triggered a flush of primary region replica " + + LOG.debug("Triggered flush of primary region replica " + ServerRegionReplicaUtil.getRegionInfoForDefaultReplica(region.getRegionInfo()) .getRegionNameAsString() + - " of region " + region.getRegionInfo().getRegionNameAsString() + - " Now waiting and blocking reads until observing a full flush cycle"); + " for " + region.getRegionInfo().getEncodedName() + + "; now waiting and blocking reads until completes a full flush cycle"); } region.setReadsEnabled(true); break; @@ -154,12 +153,10 @@ void triggerFlushInPrimaryRegion(final HRegion region) throws IOException { if (response.hasWroteFlushWalMarker()) { if (response.getWroteFlushWalMarker()) { if (LOG.isDebugEnabled()) { - LOG.debug("Successfully triggered an empty flush marker(memstore empty) of primary " + - "region replica " + - ServerRegionReplicaUtil.getRegionInfoForDefaultReplica(region.getRegionInfo()) - .getRegionNameAsString() + - " of region " + region.getRegionInfo().getRegionNameAsString() + - " Now waiting and " + "blocking reads until observing a flush marker"); + LOG.debug("Triggered empty flush marker (memstore empty) on primary region replica " + + ServerRegionReplicaUtil.getRegionInfoForDefaultReplica(region.getRegionInfo()). + getRegionNameAsString() + " for " + region.getRegionInfo().getEncodedName() + + "; now waiting and blocking reads until observing a flush marker"); } region.setReadsEnabled(true); break; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/UnassignRegionHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/UnassignRegionHandler.java index 8b275d0e6ed1..0d02f30e5ab7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/UnassignRegionHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/UnassignRegionHandler.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,6 +22,7 @@ import java.util.concurrent.TimeUnit; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.executor.EventHandler; import org.apache.hadoop.hbase.executor.EventType; import org.apache.hadoop.hbase.regionserver.HRegion; @@ -30,6 +31,7 @@ import org.apache.hadoop.hbase.regionserver.RegionServerServices.RegionStateTransitionContext; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.RetryCounter; +import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -84,19 +86,18 @@ public void process() throws IOException { // reportRegionStateTransition, so the HMaster will think the region is online, before we // actually open the region, as reportRegionStateTransition is part of the opening process. long backoff = retryCounter.getBackoffTimeAndIncrementAttempts(); - LOG.warn("Received CLOSE for the region: {}, which we are already " + - "trying to OPEN. try again after {}ms", encodedName, backoff); + LOG.warn("Received CLOSE for {} which we are already " + + "trying to OPEN; try again after {}ms", encodedName, backoff); rs.getExecutorService().delayedSubmit(this, backoff, TimeUnit.MILLISECONDS); } else { - LOG.info("Received CLOSE for the region: {}, which we are already trying to CLOSE," + + LOG.info("Received CLOSE for {} which we are already trying to CLOSE," + " but not completed yet", encodedName); } return; } HRegion region = rs.getRegion(encodedName); if (region == null) { - LOG.debug( - "Received CLOSE for a region {} which is not online, and we're not opening/closing.", + LOG.debug("Received CLOSE for {} which is not ONLINE and we're not opening/closing.", encodedName); rs.getRegionsInTransitionInRS().remove(encodedNameBytes, Boolean.FALSE); return; @@ -114,11 +115,21 @@ public void process() throws IOException { if (region.close(abort) == null) { // XXX: Is this still possible? The old comment says about split, but now split is done at // master side, so... - LOG.warn("Can't close region {}, was already closed during close()", regionName); + LOG.warn("Can't close {}, already closed during close()", regionName); rs.getRegionsInTransitionInRS().remove(encodedNameBytes, Boolean.FALSE); return; } + rs.removeRegion(region, destination); + if (ServerRegionReplicaUtil.isMetaRegionReplicaReplicationEnabled(rs.getConfiguration(), + region.getTableDescriptor().getTableName())) { + if (RegionReplicaUtil.isDefaultReplica(region.getRegionInfo().getReplicaId())) { + // If hbase:meta read replicas enabled, remove replication source for hbase:meta Regions. + // See assign region handler where we add the replication source on open. + rs.getReplicationSourceService().getReplicationManager(). + removeCatalogReplicationSource(region.getRegionInfo()); + } + } if (!rs.reportRegionStateTransition( new RegionStateTransitionContext(TransitionCode.CLOSED, HConstants.NO_SEQNUM, closeProcId, -1, region.getRegionInfo()))) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java index cc92003315f2..a01d118718d0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java @@ -150,7 +150,7 @@ public void stop(boolean force) throws IOException { * the snapshot verification step. * * @param snapshot - * @return Subprocedure to submit to the ProcedureMemeber. + * @return Subprocedure to submit to the ProcedureMember. */ public Subprocedure buildSubprocedure(SnapshotDescription snapshot) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/StoreHotnessProtector.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/StoreHotnessProtector.java index ee6db3110bb8..b907aa640093 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/StoreHotnessProtector.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/StoreHotnessProtector.java @@ -34,8 +34,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; - /** * StoreHotnessProtector is designed to help limit the concurrency of puts with dense columns, it * does best-effort to avoid exhausting all RS's handlers. When a lot of clients write requests with @@ -184,7 +182,6 @@ public boolean isEnable() { return this.parallelPutToStoreThreadLimit > 0; } - @VisibleForTesting Map getPreparePutToStoreMap() { return preparePutToStoreMap; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java index d2c624ab446c..af5bfd5b03c5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java @@ -41,18 +41,20 @@ import java.util.concurrent.ConcurrentSkipListMap; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.locks.ReentrantLock; - import org.apache.commons.lang3.mutable.MutableLong; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; +import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; @@ -85,7 +87,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; /** * Implementation of {@link WAL} to go against {@link FileSystem}; i.e. keep WALs in HDFS. Only one @@ -185,6 +187,8 @@ public abstract class AbstractFSWAL implements WAL { */ protected final Configuration conf; + protected final Abortable abortable; + /** Listeners that are called on WAL events. */ protected final List listeners = new CopyOnWriteArrayList<>(); @@ -329,6 +333,11 @@ public WalProps(Map encodedName2HighestSequenceId, long logSize) { protected final AtomicBoolean rollRequested = new AtomicBoolean(false); + private final ExecutorService logArchiveExecutor = Executors.newSingleThreadExecutor( + new ThreadFactoryBuilder().setDaemon(true).setNameFormat("WAL-Archiver-%d").build()); + + private final int archiveRetries; + public long getFilenum() { return this.filenum.get(); } @@ -380,10 +389,19 @@ protected AbstractFSWAL(final FileSystem fs, final Path rootDir, final String lo final String archiveDir, final Configuration conf, final List listeners, final boolean failIfWALExists, final String prefix, final String suffix) throws FailedLogCloseException, IOException { + this(fs, null, rootDir, logDir, archiveDir, conf, listeners, failIfWALExists, prefix, suffix); + } + + protected AbstractFSWAL(final FileSystem fs, final Abortable abortable, final Path rootDir, + final String logDir, final String archiveDir, final Configuration conf, + final List listeners, final boolean failIfWALExists, final String prefix, + final String suffix) + throws FailedLogCloseException, IOException { this.fs = fs; this.walDir = new Path(rootDir, logDir); this.walArchiveDir = new Path(rootDir, archiveDir); this.conf = conf; + this.abortable = abortable; if (!fs.exists(walDir) && !fs.mkdirs(walDir)) { throw new IOException("Unable to mkdir " + walDir); @@ -482,6 +500,8 @@ protected SyncFuture initialValue() { this.walTooOldNs = TimeUnit.SECONDS.toNanos(conf.getInt( SURVIVED_TOO_LONG_SEC_KEY, SURVIVED_TOO_LONG_SEC_DEFAULT)); this.useHsync = conf.getBoolean(HRegion.WAL_HSYNC_CONF_KEY, HRegion.DEFAULT_WAL_HSYNC); + archiveRetries = this.conf.getInt("hbase.regionserver.walroll.archive.retries", 0); + } /** @@ -585,7 +605,6 @@ private Path getNewPath() throws IOException { return newPath; } - @VisibleForTesting Path getOldPath() { long currentFilenum = this.filenum.get(); Path oldPath = null; @@ -715,11 +734,39 @@ private void cleanOldLogs() throws IOException { regionsBlockingThisWal.clear(); } } + if (logsToArchive != null) { - for (Pair logAndSize : logsToArchive) { - this.totalLogSize.addAndGet(-logAndSize.getSecond()); - archiveLogFile(logAndSize.getFirst()); - this.walFile2Props.remove(logAndSize.getFirst()); + final List> localLogsToArchive = logsToArchive; + // make it async + for (Pair log : localLogsToArchive) { + logArchiveExecutor.execute(() -> { + archive(log); + }); + this.walFile2Props.remove(log.getFirst()); + } + } + } + + protected void archive(final Pair log) { + int retry = 1; + while (true) { + try { + archiveLogFile(log.getFirst()); + totalLogSize.addAndGet(-log.getSecond()); + // successful + break; + } catch (Throwable e) { + if (retry > archiveRetries) { + LOG.error("Failed log archiving for the log {},", log.getFirst(), e); + if (this.abortable != null) { + this.abortable.abort("Failed log archiving", e); + break; + } + } else { + LOG.error("Log archiving failed for the log {} - attempt {}", log.getFirst(), retry, + e); + } + retry++; } } } @@ -732,7 +779,7 @@ public static Path getWALArchivePath(Path archiveDir, Path p) { return new Path(archiveDir, p.getName()); } - private void archiveLogFile(final Path p) throws IOException { + protected void archiveLogFile(final Path p) throws IOException { Path newPath = getWALArchivePath(this.walArchiveDir, p); // Tell our listeners that a log is going to be archived. if (!this.listeners.isEmpty()) { @@ -783,7 +830,6 @@ protected final void logRollAndSetupWalProps(Path oldPath, Path newPath, long ol * @return the passed in newPath * @throws IOException if there is a problem flushing or closing the underlying FS */ - @VisibleForTesting Path replaceWriter(Path oldPath, Path newPath, W nextWriter) throws IOException { try (TraceScope scope = TraceUtil.createTrace("FSHFile.replaceWriter")) { doReplaceWriter(oldPath, newPath, nextWriter); @@ -887,7 +933,6 @@ public void requestLogRoll() { * Get the backing files associated with this WAL. * @return may be null if there are no files. */ - @VisibleForTesting FileStatus[] getFiles() throws IOException { return CommonFSUtils.listStatus(fs, walDir, ourFiles); } @@ -907,6 +952,9 @@ public void shutdown() throws IOException { rollWriterLock.lock(); try { doShutdown(); + if (logArchiveExecutor != null) { + logArchiveExecutor.shutdownNow(); + } } finally { rollWriterLock.unlock(); } @@ -983,7 +1031,6 @@ boolean isUnflushedEntries() { /** * Exposed for testing only. Use to tricks like halt the ring buffer appending. */ - @VisibleForTesting protected void atHeadOfRingBufferEventHandlerAppend() { // Noop } @@ -1238,13 +1285,11 @@ public void checkLogLowReplication(long checkInterval) { /** * This method gets the pipeline for the current WAL. */ - @VisibleForTesting abstract DatanodeInfo[] getPipeline(); /** * This method gets the datanode replication count for the current WAL. */ - @VisibleForTesting abstract int getLogReplication(); private static void split(final Configuration conf, final Path p) throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java index a40e50335d99..342446098be8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java @@ -44,9 +44,11 @@ import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; import java.util.function.Supplier; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.io.asyncfs.AsyncFSOutput; @@ -60,7 +62,6 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; - import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hbase.thirdparty.io.netty.channel.Channel; @@ -68,6 +69,7 @@ import org.apache.hbase.thirdparty.io.netty.channel.EventLoopGroup; import org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor; + /** * An asynchronous implementation of FSWAL. *

    @@ -206,7 +208,16 @@ public AsyncFSWAL(FileSystem fs, Path rootDir, String logDir, String archiveDir, Configuration conf, List listeners, boolean failIfWALExists, String prefix, String suffix, EventLoopGroup eventLoopGroup, Class channelClass) throws FailedLogCloseException, IOException { - super(fs, rootDir, logDir, archiveDir, conf, listeners, failIfWALExists, prefix, suffix); + this(fs, null, rootDir, logDir, archiveDir, conf, listeners, failIfWALExists, prefix, suffix, + eventLoopGroup, channelClass); + } + + public AsyncFSWAL(FileSystem fs, Abortable abortable, Path rootDir, String logDir, + String archiveDir, Configuration conf, List listeners, + boolean failIfWALExists, String prefix, String suffix, EventLoopGroup eventLoopGroup, + Class channelClass) throws FailedLogCloseException, IOException { + super(fs, abortable, rootDir, logDir, archiveDir, conf, listeners, failIfWALExists, prefix, + suffix); this.eventLoopGroup = eventLoopGroup; this.channelClass = channelClass; Supplier hasConsumerTask; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/DualAsyncFSWAL.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/DualAsyncFSWAL.java index bf5b96dfce1e..1279c2f31e83 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/DualAsyncFSWAL.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/DualAsyncFSWAL.java @@ -29,7 +29,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.com.google.common.io.Closeables; import org.apache.hbase.thirdparty.io.netty.channel.Channel; import org.apache.hbase.thirdparty.io.netty.channel.EventLoopGroup; @@ -61,7 +60,6 @@ public DualAsyncFSWAL(FileSystem fs, FileSystem remoteFs, Path rootDir, Path rem } // will be overridden in testcase - @VisibleForTesting protected AsyncWriter createCombinedAsyncWriter(AsyncWriter localWriter, AsyncWriter remoteWriter) { return CombinedAsyncWriter.create(remoteWriter, localWriter); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java index 001be00d8a11..e2320dbf3c8c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java @@ -44,6 +44,7 @@ import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.trace.TraceUtil; @@ -63,7 +64,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; /** @@ -168,7 +168,7 @@ public class FSHLog extends AbstractFSWAL { private final int waitOnShutdownInSeconds; private final ExecutorService closeExecutor = Executors.newCachedThreadPool( - new ThreadFactoryBuilder().setDaemon(true).setNameFormat("Close-WAL-Writer-%d").build()); + new ThreadFactoryBuilder().setDaemon(true).setNameFormat("Close-WAL-Writer-%d").build()); /** * Exception handler to pass the disruptor ringbuffer. Same as native implementation only it logs @@ -202,17 +202,29 @@ public void handleOnShutdownException(Throwable ex) { * @param logDir dir where wals are stored * @param conf configuration to use */ - @VisibleForTesting public FSHLog(final FileSystem fs, final Path root, final String logDir, final Configuration conf) throws IOException { this(fs, root, logDir, HConstants.HREGION_OLDLOGDIR_NAME, conf, null, true, null, null); } + public FSHLog(final FileSystem fs, Abortable abortable, final Path root, final String logDir, + final Configuration conf) throws IOException { + this(fs, abortable, root, logDir, HConstants.HREGION_OLDLOGDIR_NAME, conf, null, true, null, + null); + } + + public FSHLog(final FileSystem fs, final Path rootDir, final String logDir, + final String archiveDir, final Configuration conf, final List listeners, + final boolean failIfWALExists, final String prefix, final String suffix) throws IOException { + this(fs, null, rootDir, logDir, archiveDir, conf, listeners, failIfWALExists, prefix, suffix); + } + /** * Create an edit log at the given dir location. You should never have to load an * existing log. If there is a log at startup, it should have already been processed and deleted * by the time the WAL object is started up. * @param fs filesystem handle + * @param abortable Abortable - the server here * @param rootDir path to where logs and oldlogs * @param logDir dir where wals are stored * @param archiveDir dir where wals are archived @@ -226,10 +238,12 @@ public FSHLog(final FileSystem fs, final Path root, final String logDir, final C * @param suffix will be url encoded. null is treated as empty. non-empty must start with * {@link org.apache.hadoop.hbase.wal.AbstractFSWALProvider#WAL_FILE_NAME_DELIMITER} */ - public FSHLog(final FileSystem fs, final Path rootDir, final String logDir, - final String archiveDir, final Configuration conf, final List listeners, - final boolean failIfWALExists, final String prefix, final String suffix) throws IOException { - super(fs, rootDir, logDir, archiveDir, conf, listeners, failIfWALExists, prefix, suffix); + public FSHLog(final FileSystem fs, final Abortable abortable, final Path rootDir, + final String logDir, final String archiveDir, final Configuration conf, + final List listeners, final boolean failIfWALExists, final String prefix, + final String suffix) throws IOException { + super(fs, abortable, rootDir, logDir, archiveDir, conf, listeners, failIfWALExists, prefix, + suffix); this.minTolerableReplication = conf.getInt(TOLERABLE_LOW_REPLICATION, CommonFSUtils.getDefaultReplication(fs, this.walDir)); this.lowReplicationRollLimit = conf.getInt(LOW_REPLICATION_ROLL_LIMIT, DEFAULT_LOW_REPLICATION_ROLL_LIMIT); @@ -265,7 +279,6 @@ public FSHLog(final FileSystem fs, final Path rootDir, final String logDir, * removed. * @return null if underlying stream is not ready. */ - @VisibleForTesting OutputStream getOutputStream() { FSDataOutputStream fsdos = this.hdfs_out; return fsdos != null ? fsdos.getWrappedStream() : null; @@ -303,14 +316,12 @@ protected Writer createWriterInstance(final Path path) throws IOException { * Used to manufacture race condition reliably. For testing only. * @see #beforeWaitOnSafePoint() */ - @VisibleForTesting protected void afterCreatingZigZagLatch() { } /** * @see #afterCreatingZigZagLatch() */ - @VisibleForTesting protected void beforeWaitOnSafePoint() { } @@ -759,7 +770,6 @@ protected boolean doCheckLogLowReplication() { return logRollNeeded; } - @VisibleForTesting protected long getSequenceOnRingBuffer() { return this.disruptor.getRingBuffer().next(); } @@ -769,7 +779,6 @@ private SyncFuture publishSyncOnRingBuffer(boolean forceSync) { return publishSyncOnRingBuffer(sequence, forceSync); } - @VisibleForTesting protected SyncFuture publishSyncOnRingBuffer(long sequence, boolean forceSync) { // here we use ring buffer sequence as transaction id SyncFuture syncFuture = getSyncFuture(sequence, forceSync); @@ -796,7 +805,6 @@ private void publishSyncThenBlockOnCompletion(TraceScope scope, boolean forceSyn * patch. */ @Override - @VisibleForTesting int getLogReplication() { try { // in standalone mode, it will return 0 @@ -837,7 +845,6 @@ public void sync(long txid, boolean forceSync) throws IOException { } } - @VisibleForTesting boolean isLowReplicationRollEnabled() { return lowReplicationRollEnabled; } @@ -1193,12 +1200,10 @@ DatanodeInfo[] getPipeline() { return new DatanodeInfo[0]; } - @VisibleForTesting Writer getWriter() { return this.writer; } - @VisibleForTesting void setWriter(Writer writer) { this.writer = writer; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSWALEntry.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSWALEntry.java index 44c96dee7619..ca51ec0c5684 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSWALEntry.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSWALEntry.java @@ -34,7 +34,6 @@ import org.apache.hadoop.hbase.wal.WALKeyImpl; import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.org.apache.commons.collections4.CollectionUtils; /** @@ -91,7 +90,6 @@ class FSWALEntry extends Entry { } } - @VisibleForTesting static Set collectFamilies(List cells) { if (CollectionUtils.isEmpty(cells)) { return Collections.emptySet(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWAL.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWAL.java index b2af4a80ad3b..f23dae215707 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWAL.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWAL.java @@ -19,17 +19,16 @@ package org.apache.hadoop.hbase.regionserver.wal; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; - import java.io.IOException; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import org.apache.hadoop.hbase.CompatibilitySingletonFactory; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.wal.WALEdit; import org.apache.hadoop.hbase.wal.WALKey; -import org.apache.hadoop.hbase.CompatibilitySingletonFactory; import org.apache.hadoop.util.StringUtils; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Class used to push numbers about the WAL into the metrics subsystem. This will take a @@ -45,7 +44,6 @@ public MetricsWAL() { this(CompatibilitySingletonFactory.getInstance(MetricsWALSource.class)); } - @VisibleForTesting MetricsWAL(MetricsWALSource s) { this.source = s; } @@ -58,9 +56,10 @@ public void postSync(final long timeInNanos, final int handlerSyncs) { @Override public void postAppend(final long size, final long time, final WALKey logkey, final WALEdit logEdit) throws IOException { - source.incrementAppendCount(); + TableName tableName = logkey.getTableName(); + source.incrementAppendCount(tableName); source.incrementAppendTime(time); - source.incrementAppendSize(size); + source.incrementAppendSize(tableName, size); source.incrementWrittenBytes(size); if (time > 1000) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java index 6f537df94900..0967c101ce58 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java @@ -412,14 +412,14 @@ protected boolean readNext(Entry entry) throws IOException { + "because originalPosition is negative. last offset={}", this.inputStream.getPos(), eof); throw eof; } - // If stuck at the same place and we got and exception, lets go back at the beginning. + // If stuck at the same place and we got an exception, lets go back at the beginning. if (inputStream.getPos() == originalPosition) { if (resetPosition) { LOG.warn("Encountered a malformed edit, seeking to the beginning of the WAL since " + "current position and original position match at {}", originalPosition); seekOnFs(0); } else { - LOG.debug("Reached the end of file at position {}", originalPosition); + LOG.debug("EOF at position {}", originalPosition); } } else { // Else restore our position to original location in hope that next time through we will diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceIdAccounting.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceIdAccounting.java index 7146ca743926..6be95391819b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceIdAccounting.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceIdAccounting.java @@ -37,8 +37,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; - /** * Accounting of sequence ids per region and then by column family. So we can keep our accounting * current, call startCacheFlush and then finishedCacheFlush or abortCacheFlush so this instance can @@ -240,7 +238,6 @@ void updateStore(byte[] encodedRegionName, byte[] familyName, Long sequenceId, } } - @VisibleForTesting ConcurrentMap getOrCreateLowestSequenceIds(byte[] encodedRegionName) { // Intentionally, this access is done outside of this.regionSequenceIdLock. Done per append. return computeIfAbsent(this.lowestUnflushedSequenceIds, encodedRegionName, @@ -253,7 +250,11 @@ ConcurrentMap getOrCreateLowestSequenceIds(byte[] enco */ private static long getLowestSequenceId(Map sequenceids) { long lowest = HConstants.NO_SEQNUM; - for (Long sid: sequenceids.values()) { + for (Map.Entry entry : sequenceids.entrySet()){ + if (entry.getKey().toString().equals("METAFAMILY")){ + continue; + } + Long sid = entry.getValue(); if (lowest == HConstants.NO_SEQNUM || sid.longValue() < lowest) { lowest = sid.longValue(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ChainWALEmptyEntryFilter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ChainWALEmptyEntryFilter.java index 19fd0c77e7e4..492364780718 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ChainWALEmptyEntryFilter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ChainWALEmptyEntryFilter.java @@ -21,7 +21,6 @@ import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.wal.WAL; import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; /** * A {@link ChainWALEntryFilter} for providing more flexible options @@ -55,7 +54,7 @@ public WAL.Entry filter(WAL.Entry entry) { * * @param filterEmptyEntry flag */ - @VisibleForTesting + @InterfaceAudience.Private public void setFilterEmptyEntry(final boolean filterEmptyEntry) { this.filterEmptyEntry = filterEmptyEntry; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.java index 3cde0d5113a0..86786856f214 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.java @@ -22,15 +22,22 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; +import java.util.Map; import java.util.UUID; - +import java.util.concurrent.ThreadLocalRandom; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.client.AsyncClusterConnection; +import org.apache.hadoop.hbase.client.AsyncRegionServerAdmin; +import org.apache.hadoop.hbase.client.ClusterConnectionFactory; +import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.zookeeper.ZKListener; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.zookeeper.ZKClusterId; import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; +import org.apache.yetus.audience.InterfaceAudience; import org.apache.zookeeper.KeeperException; import org.apache.zookeeper.KeeperException.AuthFailedException; import org.apache.zookeeper.KeeperException.ConnectionLossException; @@ -38,6 +45,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.collect.Maps; + /** * A {@link BaseReplicationEndpoint} for replication endpoints whose * target cluster is an HBase cluster. @@ -49,13 +58,70 @@ public abstract class HBaseReplicationEndpoint extends BaseReplicationEndpoint private static final Logger LOG = LoggerFactory.getLogger(HBaseReplicationEndpoint.class); private ZKWatcher zkw = null; + private final Object zkwLock = new Object(); + + protected Configuration conf; - private List regionServers = new ArrayList<>(0); - private long lastRegionServerUpdate; + private AsyncClusterConnection conn; - protected synchronized void disconnect() { - if (zkw != null) { - zkw.close(); + /** + * Default maximum number of times a replication sink can be reported as bad before + * it will no longer be provided as a sink for replication without the pool of + * replication sinks being refreshed. + */ + public static final int DEFAULT_BAD_SINK_THRESHOLD = 3; + + /** + * Default ratio of the total number of peer cluster region servers to consider + * replicating to. + */ + public static final float DEFAULT_REPLICATION_SOURCE_RATIO = 0.5f; + + // Ratio of total number of potential peer region servers to be used + private float ratio; + + // Maximum number of times a sink can be reported as bad before the pool of + // replication sinks is refreshed + private int badSinkThreshold; + // Count of "bad replication sink" reports per peer sink + private Map badReportCounts; + + private List sinkServers = new ArrayList<>(0); + + /* + * Some implementations of HBaseInterClusterReplicationEndpoint may require instantiate different + * Connection implementations, or initialize it in a different way, so defining createConnection + * as protected for possible overridings. + */ + protected AsyncClusterConnection createConnection(Configuration conf) throws IOException { + return ClusterConnectionFactory.createAsyncClusterConnection(conf, + null, User.getCurrent()); + } + + @Override + public void init(Context context) throws IOException { + super.init(context); + this.conf = HBaseConfiguration.create(ctx.getConfiguration()); + this.ratio = + ctx.getConfiguration().getFloat("replication.source.ratio", DEFAULT_REPLICATION_SOURCE_RATIO); + this.badSinkThreshold = + ctx.getConfiguration().getInt("replication.bad.sink.threshold", DEFAULT_BAD_SINK_THRESHOLD); + this.badReportCounts = Maps.newHashMap(); + } + + protected void disconnect() { + synchronized (zkwLock) { + if (zkw != null) { + zkw.close(); + } + } + if (this.conn != null) { + try { + this.conn.close(); + this.conn = null; + } catch (IOException e) { + LOG.warn("{} Failed to close the connection", ctx.getPeerId()); + } } } @@ -63,15 +129,15 @@ protected synchronized void disconnect() { * A private method used to re-establish a zookeeper session with a peer cluster. * @param ke */ - protected void reconnect(KeeperException ke) { + private void reconnect(KeeperException ke) { if (ke instanceof ConnectionLossException || ke instanceof SessionExpiredException || ke instanceof AuthFailedException) { String clusterKey = ctx.getPeerConfig().getClusterKey(); - LOG.warn("Lost the ZooKeeper connection for peer " + clusterKey, ke); + LOG.warn("Lost the ZooKeeper connection for peer {}", clusterKey, ke); try { reloadZkWatcher(); } catch (IOException io) { - LOG.warn("Creation of ZookeeperWatcher failed for peer " + clusterKey, io); + LOG.warn("Creation of ZookeeperWatcher failed for peer {}", clusterKey, io); } } } @@ -90,6 +156,7 @@ public void stop() { protected void doStart() { try { reloadZkWatcher(); + connectPeerCluster(); notifyStarted(); } catch (IOException e) { notifyFailed(e); @@ -107,33 +174,40 @@ protected void doStop() { // limit connections when multiple replication sources try to connect to // the peer cluster. If the peer cluster is down we can get out of control // over time. - public synchronized UUID getPeerUUID() { + public UUID getPeerUUID() { UUID peerUUID = null; try { - peerUUID = ZKClusterId.getUUIDForCluster(zkw); + synchronized (zkwLock) { + peerUUID = ZKClusterId.getUUIDForCluster(zkw); + } } catch (KeeperException ke) { reconnect(ke); } return peerUUID; } - /** - * Get the ZK connection to this peer - * @return zk connection - */ - protected synchronized ZKWatcher getZkw() { - return zkw; - } - /** * Closes the current ZKW (if not null) and creates a new one * @throws IOException If anything goes wrong connecting */ - synchronized void reloadZkWatcher() throws IOException { - if (zkw != null) zkw.close(); - zkw = new ZKWatcher(ctx.getConfiguration(), - "connection to cluster: " + ctx.getPeerId(), this); - getZkw().registerListener(new PeerRegionServerListener(this)); + private void reloadZkWatcher() throws IOException { + synchronized (zkwLock) { + if (zkw != null) { + zkw.close(); + } + zkw = new ZKWatcher(ctx.getConfiguration(), + "connection to cluster: " + ctx.getPeerId(), this); + zkw.registerListener(new PeerRegionServerListener(this)); + } + } + + private void connectPeerCluster() throws IOException { + try { + conn = createConnection(this.conf); + } catch (IOException ioe) { + LOG.warn("{} Failed to create connection for peer cluster", ctx.getPeerId(), ioe); + throw ioe; + } } @Override @@ -150,13 +224,21 @@ public boolean isAborted() { /** * Get the list of all the region servers from the specified peer - * @param zkw zk connection to use + * * @return list of region server addresses or an empty list if the slave is unavailable */ - protected static List fetchSlavesAddresses(ZKWatcher zkw) - throws KeeperException { - List children = ZKUtil.listChildrenAndWatchForNewChildren(zkw, - zkw.getZNodePaths().rsZNode); + protected List fetchSlavesAddresses() { + List children = null; + try { + synchronized (zkwLock) { + children = ZKUtil.listChildrenAndWatchForNewChildren(zkw, zkw.getZNodePaths().rsZNode); + } + } catch (KeeperException ke) { + if (LOG.isDebugEnabled()) { + LOG.debug("Fetch slaves addresses failed", ke); + } + reconnect(ke); + } if (children == null) { return Collections.emptyList(); } @@ -167,43 +249,69 @@ protected static List fetchSlavesAddresses(ZKWatcher zkw) return addresses; } + protected synchronized void chooseSinks() { + List slaveAddresses = fetchSlavesAddresses(); + if (slaveAddresses.isEmpty()) { + LOG.warn("No sinks available at peer. Will not be able to replicate"); + } + Collections.shuffle(slaveAddresses, ThreadLocalRandom.current()); + int numSinks = (int) Math.ceil(slaveAddresses.size() * ratio); + this.sinkServers = slaveAddresses.subList(0, numSinks); + badReportCounts.clear(); + } + + protected synchronized int getNumSinks() { + return sinkServers.size(); + } + /** - * Get a list of all the addresses of all the available region servers - * for this peer cluster, or an empty list if no region servers available at peer cluster. - * @return list of addresses + * Get a randomly-chosen replication sink to replicate to. + * @return a replication sink to replicate to */ - // Synchronize peer cluster connection attempts to avoid races and rate - // limit connections when multiple replication sources try to connect to - // the peer cluster. If the peer cluster is down we can get out of control - // over time. - public synchronized List getRegionServers() { - try { - setRegionServers(fetchSlavesAddresses(this.getZkw())); - } catch (KeeperException ke) { - if (LOG.isDebugEnabled()) { - LOG.debug("Fetch slaves addresses failed", ke); - } - reconnect(ke); + protected synchronized SinkPeer getReplicationSink() throws IOException { + if (sinkServers.isEmpty()) { + LOG.info("Current list of sinks is out of date or empty, updating"); + chooseSinks(); } - return regionServers; + if (sinkServers.isEmpty()) { + throw new IOException("No replication sinks are available"); + } + ServerName serverName = + sinkServers.get(ThreadLocalRandom.current().nextInt(sinkServers.size())); + return new SinkPeer(serverName, conn.getRegionServerAdmin(serverName)); } /** - * Set the list of region servers for that peer - * @param regionServers list of addresses for the region servers + * Report a {@code SinkPeer} as being bad (i.e. an attempt to replicate to it + * failed). If a single SinkPeer is reported as bad more than + * replication.bad.sink.threshold times, it will be removed + * from the pool of potential replication targets. + * + * @param sinkPeer The SinkPeer that had a failed replication attempt on it */ - public synchronized void setRegionServers(List regionServers) { - this.regionServers = regionServers; - lastRegionServerUpdate = System.currentTimeMillis(); + protected synchronized void reportBadSink(SinkPeer sinkPeer) { + ServerName serverName = sinkPeer.getServerName(); + int badReportCount = badReportCounts.compute(serverName, (k, v) -> v == null ? 1 : v + 1); + if (badReportCount > badSinkThreshold) { + this.sinkServers.remove(serverName); + if (sinkServers.isEmpty()) { + chooseSinks(); + } + } } /** - * Get the timestamp at which the last change occurred to the list of region servers to replicate - * to. - * @return The System.currentTimeMillis at the last time the list of peer region servers changed. + * Report that a {@code SinkPeer} successfully replicated a chunk of data. + * + * @param sinkPeer + * The SinkPeer that had a failed replication attempt on it */ - public long getLastRegionServerUpdate() { - return lastRegionServerUpdate; + protected synchronized void reportSinkSuccess(SinkPeer sinkPeer) { + badReportCounts.remove(sinkPeer.getServerName()); + } + + List getSinkServers() { + return sinkServers; } /** @@ -214,22 +322,39 @@ public static class PeerRegionServerListener extends ZKListener { private final HBaseReplicationEndpoint replicationEndpoint; private final String regionServerListNode; - public PeerRegionServerListener(HBaseReplicationEndpoint replicationPeer) { - super(replicationPeer.getZkw()); - this.replicationEndpoint = replicationPeer; - this.regionServerListNode = replicationEndpoint.getZkw().getZNodePaths().rsZNode; + public PeerRegionServerListener(HBaseReplicationEndpoint endpoint) { + super(endpoint.zkw); + this.replicationEndpoint = endpoint; + this.regionServerListNode = endpoint.zkw.getZNodePaths().rsZNode; } @Override public synchronized void nodeChildrenChanged(String path) { if (path.equals(regionServerListNode)) { - try { - LOG.info("Detected change to peer region servers, fetching updated list"); - replicationEndpoint.setRegionServers(fetchSlavesAddresses(replicationEndpoint.getZkw())); - } catch (KeeperException e) { - LOG.error("Error reading slave addresses", e); - } + LOG.info("Detected change to peer region servers, fetching updated list"); + replicationEndpoint.chooseSinks(); } } } + + /** + * Wraps a replication region server sink to provide the ability to identify it. + */ + public static class SinkPeer { + private ServerName serverName; + private AsyncRegionServerAdmin regionServer; + + public SinkPeer(ServerName serverName, AsyncRegionServerAdmin regionServer) { + this.serverName = serverName; + this.regionServer = regionServer; + } + + ServerName getServerName() { + return serverName; + } + + public AsyncRegionServerAdmin getRegionServer() { + return regionServer; + } + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationBarrierFamilyFormat.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationBarrierFamilyFormat.java index 123a036ca0fd..81be5a3e3a00 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationBarrierFamilyFormat.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationBarrierFamilyFormat.java @@ -49,8 +49,6 @@ import org.apache.hadoop.hbase.util.Pair; import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; - /** * Helper class for storing replication barriers in family 'rep_barrier' of meta table. *

    @@ -59,7 +57,6 @@ @InterfaceAudience.Private public final class ReplicationBarrierFamilyFormat { - @VisibleForTesting public static final byte[] REPLICATION_PARENT_QUALIFIER = Bytes.toBytes("parent"); private static final byte ESCAPE_BYTE = (byte) 0xFF; @@ -85,7 +82,6 @@ private static void writeRegionName(ByteArrayOutputStream out, byte[] regionName } } - @VisibleForTesting public static byte[] getParentsBytes(List parents) { ByteArrayOutputStream bos = new ByteArrayOutputStream(); Iterator iter = parents.iterator(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationSinkServiceImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationSinkServiceImpl.java new file mode 100644 index 000000000000..edd567914dc7 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationSinkServiceImpl.java @@ -0,0 +1,118 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.replication; + +import java.io.IOException; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.TimeUnit; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.CellScanner; +import org.apache.hadoop.hbase.ScheduledChore; +import org.apache.hadoop.hbase.Server; +import org.apache.hadoop.hbase.Stoppable; +import org.apache.hadoop.hbase.regionserver.ReplicationSinkService; +import org.apache.hadoop.hbase.replication.regionserver.ReplicationLoad; +import org.apache.hadoop.hbase.replication.regionserver.ReplicationSink; +import org.apache.hadoop.hbase.wal.WALFactory; +import org.apache.hadoop.hbase.wal.WALProvider; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos; + +@InterfaceAudience.Private +public class ReplicationSinkServiceImpl implements ReplicationSinkService { + private static final Logger LOG = LoggerFactory.getLogger(ReplicationSinkServiceImpl.class); + + private Configuration conf; + + private Server server; + + private ReplicationSink replicationSink; + + // ReplicationLoad to access replication metrics + private ReplicationLoad replicationLoad; + + private int statsPeriodInSecond; + + @Override + public void replicateLogEntries(List entries, CellScanner cells, + String replicationClusterId, String sourceBaseNamespaceDirPath, + String sourceHFileArchiveDirPath) throws IOException { + this.replicationSink.replicateEntries(entries, cells, replicationClusterId, + sourceBaseNamespaceDirPath, sourceHFileArchiveDirPath); + } + + @Override + public void initialize(Server server, FileSystem fs, Path logdir, Path oldLogDir, + WALFactory walFactory) throws IOException { + this.server = server; + this.conf = server.getConfiguration(); + this.statsPeriodInSecond = + this.conf.getInt("replication.stats.thread.period.seconds", 5 * 60); + this.replicationLoad = new ReplicationLoad(); + } + + @Override + public void startReplicationService() throws IOException { + this.replicationSink = new ReplicationSink(this.conf); + this.server.getChoreService().scheduleChore( + new ReplicationStatisticsChore("ReplicationSinkStatistics", server, + (int) TimeUnit.SECONDS.toMillis(statsPeriodInSecond))); + } + + @Override + public void stopReplicationService() { + if (this.replicationSink != null) { + this.replicationSink.stopReplicationSinkServices(); + } + } + + @Override + public ReplicationLoad refreshAndGetReplicationLoad() { + if (replicationLoad == null) { + return null; + } + // always build for latest data + replicationLoad.buildReplicationLoad(Collections.emptyList(), replicationSink.getSinkMetrics()); + return replicationLoad; + } + + private final class ReplicationStatisticsChore extends ScheduledChore { + + ReplicationStatisticsChore(String name, Stoppable stopper, int period) { + super(name, stopper, period); + } + + @Override + protected void chore() { + printStats(replicationSink.getStats()); + } + + private void printStats(String stats) { + if (!stats.isEmpty()) { + LOG.info(stats); + } + } + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationHFileCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationHFileCleaner.java index 3271696e1103..f06b29ccdeff 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationHFileCleaner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationHFileCleaner.java @@ -34,7 +34,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.com.google.common.base.Predicate; import org.apache.hbase.thirdparty.com.google.common.collect.Iterables; @@ -110,7 +109,7 @@ public void setConf(Configuration config) { } } - @VisibleForTesting + @InterfaceAudience.Private public void setConf(Configuration conf, ZKWatcher zk) { super.setConf(conf); try { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java index 4e0be94a7564..a7821f1894a1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java @@ -19,22 +19,23 @@ import java.io.IOException; import java.util.Collections; +import java.util.Map; import java.util.Set; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.hbase.HBaseInterfaceAudience; +import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.cleaner.BaseLogCleanerDelegate; import org.apache.hadoop.hbase.replication.ReplicationException; import org.apache.hadoop.hbase.replication.ReplicationQueueStorage; import org.apache.hadoop.hbase.replication.ReplicationStorageFactory; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; +import org.apache.hbase.thirdparty.org.apache.commons.collections4.MapUtils; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.com.google.common.base.Predicate; import org.apache.hbase.thirdparty.com.google.common.collect.Iterables; @@ -45,7 +46,8 @@ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) public class ReplicationLogCleaner extends BaseLogCleanerDelegate { private static final Logger LOG = LoggerFactory.getLogger(ReplicationLogCleaner.class); - private ZKWatcher zkw; + private ZKWatcher zkw = null; + private boolean shareZK = false; private ReplicationQueueStorage queueStorage; private boolean stopped = false; private Set wals; @@ -94,18 +96,26 @@ public boolean apply(FileStatus file) { } @Override - public void setConf(Configuration config) { - // Make my own Configuration. Then I'll have my own connection to zk that - // I can close myself when comes time. - Configuration conf = new Configuration(config); + public void init(Map params) { + super.init(params); try { - setConf(conf, new ZKWatcher(conf, "replicationLogCleaner", null)); + if (MapUtils.isNotEmpty(params)) { + Object master = params.get(HMaster.MASTER); + if (master != null && master instanceof HMaster) { + zkw = ((HMaster) master).getZooKeeper(); + shareZK = true; + } + } + if (zkw == null) { + zkw = new ZKWatcher(getConf(), "replicationLogCleaner", null); + } + this.queueStorage = ReplicationStorageFactory.getReplicationQueueStorage(zkw, getConf()); } catch (IOException e) { LOG.error("Error while configuring " + this.getClass().getName(), e); } } - @VisibleForTesting + @InterfaceAudience.Private public void setConf(Configuration conf, ZKWatcher zk) { super.setConf(conf); try { @@ -115,7 +125,8 @@ public void setConf(Configuration conf, ZKWatcher zk) { LOG.error("Error while configuring " + this.getClass().getName(), e); } } - @VisibleForTesting + + @InterfaceAudience.Private public void setConf(Configuration conf, ZKWatcher zk, ReplicationQueueStorage replicationQueueStorage) { super.setConf(conf); @@ -127,7 +138,7 @@ public void setConf(Configuration conf, ZKWatcher zk, public void stop(String why) { if (this.stopped) return; this.stopped = true; - if (this.zkw != null) { + if (!shareZK && this.zkw != null) { LOG.info("Stopping " + this.zkw); this.zkw.close(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/CatalogReplicationSource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/CatalogReplicationSource.java new file mode 100644 index 000000000000..8cb7860e73f9 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/CatalogReplicationSource.java @@ -0,0 +1,47 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.replication.regionserver; + +import java.util.Collections; +import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; +import org.apache.yetus.audience.InterfaceAudience; + +/** + * ReplicationSource that reads catalog WAL files -- e.g. hbase:meta WAL files -- and lets through + * all WALEdits from these WALs. This ReplicationSource is NOT created via + * {@link ReplicationSourceFactory}. + */ +@InterfaceAudience.Private +class CatalogReplicationSource extends ReplicationSource { + CatalogReplicationSource() { + // Filters in hbase:meta WAL files and allows all edits, including 'meta' edits (these are + // filtered out in the 'super' class default implementation). + super(p -> AbstractFSWALProvider.isMetaFile(p), Collections.emptyList()); + } + + @Override + public void logPositionAndCleanOldLogs(WALEntryBatch entryBatch) { + // Noop. This CatalogReplicationSource implementation does not persist state to backing storage + // nor does it keep its WALs in a general map up in ReplicationSourceManager -- + // CatalogReplicationSource is used by the Catalog Read Replica feature which resets everytime + // the WAL source process crashes. Skip calling through to the default implementation. + // See "4.1 Skip maintaining zookeeper replication queue (offsets/WALs)" in the + // design doc attached to HBASE-18070 'Enable memstore replication for meta replica for detail' + // for background on why no need to keep WAL state. + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/CatalogReplicationSourcePeer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/CatalogReplicationSourcePeer.java new file mode 100644 index 000000000000..cb00ac2990db --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/CatalogReplicationSourcePeer.java @@ -0,0 +1,50 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.replication.regionserver; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; +import org.apache.hadoop.hbase.replication.ReplicationPeerImpl; +import org.apache.hadoop.hbase.replication.SyncReplicationState; +import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil; +import org.apache.yetus.audience.InterfaceAudience; + +/** + * The 'peer' used internally by Catalog Region Replicas Replication Source. + * The Replication system has 'peer' baked into its core so though we do not need 'peering', we + * need a 'peer' and its configuration else the replication system breaks at a few locales. + * Set "hbase.region.replica.catalog.replication" if you want to change the configured endpoint. + */ +@InterfaceAudience.Private +class CatalogReplicationSourcePeer extends ReplicationPeerImpl { + /** + * @param clusterKey Usually the UUID from zk passed in by caller as a String. + */ + CatalogReplicationSourcePeer(Configuration configuration, String clusterKey, String peerId) { + super(configuration, ServerRegionReplicaUtil.REGION_REPLICA_REPLICATION_PEER + "_catalog", + ReplicationPeerConfig.newBuilder(). + setClusterKey(clusterKey). + setReplicationEndpointImpl( + configuration.get("hbase.region.replica.catalog.replication", + RegionReplicaReplicationEndpoint.class.getName())). + setBandwidth(0). // '0' means no bandwidth. + setSerial(false). + build(), + true, SyncReplicationState.NONE, SyncReplicationState.NONE); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java index cc0d9bbaa2e7..92c57a89d6be 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java @@ -308,7 +308,7 @@ public String dumpQueues(ZKWatcher zkw, Set peerIds, queueStorage = ReplicationStorageFactory.getReplicationQueueStorage(zkw, getConf()); replicationTracker = ReplicationFactory.getReplicationTracker(zkw, new WarnOnlyAbortable(), new WarnOnlyStoppable()); - Set liveRegionServers = new HashSet<>(replicationTracker.getListOfRegionServers()); + Set liveRegionServers = new HashSet<>(replicationTracker.getListOfRegionServers()); // Loops each peer on each RS and dumps the queues List regionservers = queueStorage.getListOfReplicators(); @@ -317,7 +317,7 @@ public String dumpQueues(ZKWatcher zkw, Set peerIds, } for (ServerName regionserver : regionservers) { List queueIds = queueStorage.getAllQueues(regionserver); - if (!liveRegionServers.contains(regionserver.getServerName())) { + if (!liveRegionServers.contains(regionserver)) { deadRegionServers.add(regionserver.getServerName()); } for (String queueId : queueIds) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java index 816345f629d3..c77f74fe5c17 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java @@ -39,29 +39,22 @@ import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; import java.util.stream.Stream; - import org.apache.commons.lang3.StringUtils; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.CellUtil; -import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.client.Admin; -import org.apache.hadoop.hbase.client.AsyncClusterConnection; import org.apache.hadoop.hbase.client.AsyncRegionServerAdmin; -import org.apache.hadoop.hbase.client.ClusterConnectionFactory; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.ipc.RpcServer; -import org.apache.hadoop.hbase.protobuf.ReplicationProtbufUtil; +import org.apache.hadoop.hbase.protobuf.ReplicationProtobufUtil; import org.apache.hadoop.hbase.regionserver.NoSuchColumnFamilyException; import org.apache.hadoop.hbase.regionserver.wal.WALUtil; import org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint; -import org.apache.hadoop.hbase.replication.regionserver.ReplicationSinkManager.SinkPeer; -import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CommonFSUtils; import org.apache.hadoop.hbase.util.Threads; @@ -72,7 +65,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; /** @@ -100,8 +92,6 @@ public class HBaseInterClusterReplicationEndpoint extends HBaseReplicationEndpoi public static final String REPLICATION_DROP_ON_DELETED_COLUMN_FAMILY_KEY = "hbase.replication.drop.on.deleted.columnfamily"; - private AsyncClusterConnection conn; - private Configuration conf; // How long should we sleep for each retry private long sleepForRetries; // Maximum number of retries before taking bold actions @@ -114,8 +104,6 @@ public class HBaseInterClusterReplicationEndpoint extends HBaseReplicationEndpoi private int replicationRpcLimit; //Metrics for this source private MetricsSource metrics; - // Handles connecting to peer region servers - private ReplicationSinkManager replicationSinkMgr; private boolean peersSelected = false; private String replicationClusterId = ""; private ThreadPoolExecutor exec; @@ -130,29 +118,9 @@ public class HBaseInterClusterReplicationEndpoint extends HBaseReplicationEndpoi //Initialising as 0 to guarantee at least one logging message private long lastSinkFetchTime = 0; - /* - * Some implementations of HBaseInterClusterReplicationEndpoint may require instantiate different - * Connection implementations, or initialize it in a different way, so defining createConnection - * as protected for possible overridings. - */ - protected AsyncClusterConnection createConnection(Configuration conf) throws IOException { - return ClusterConnectionFactory.createAsyncClusterConnection(conf, - null, User.getCurrent()); - } - - /* - * Some implementations of HBaseInterClusterReplicationEndpoint may require instantiate different - * ReplicationSinkManager implementations, or initialize it in a different way, - * so defining createReplicationSinkManager as protected for possible overridings. - */ - protected ReplicationSinkManager createReplicationSinkManager(AsyncClusterConnection conn) { - return new ReplicationSinkManager(conn, this, this.conf); - } - @Override public void init(Context context) throws IOException { super.init(context); - this.conf = HBaseConfiguration.create(ctx.getConfiguration()); decorateConf(); this.maxRetriesMultiplier = this.conf.getInt("replication.source.maxretriesmultiplier", 300); this.socketTimeoutMultiplier = this.conf.getInt("replication.source.socketTimeoutMultiplier", @@ -164,15 +132,9 @@ public void init(Context context) throws IOException { DEFAULT_MAX_TERMINATION_WAIT_MULTIPLIER); this.maxTerminationWait = maxTerminationWaitMultiplier * this.conf.getLong(HConstants.HBASE_RPC_TIMEOUT_KEY, HConstants.DEFAULT_HBASE_RPC_TIMEOUT); - // TODO: This connection is replication specific or we should make it particular to - // replication and make replication specific settings such as compression or codec to use - // passing Cells. - this.conn = createConnection(this.conf); this.sleepForRetries = this.conf.getLong("replication.source.sleepforretries", 1000); this.metrics = context.getMetrics(); - // ReplicationQueueInfo parses the peerId out of the znode for us - this.replicationSinkMgr = createReplicationSinkManager(conn); // per sink thread pool this.maxThreads = this.conf.getInt(HConstants.REPLICATION_SOURCE_MAXTHREADS_KEY, HConstants.REPLICATION_SOURCE_MAXTHREADS_DEFAULT); @@ -211,14 +173,11 @@ private void decorateConf() { } private void connectToPeers() { - getRegionServers(); - int sleepMultiplier = 1; - // Connect to peer cluster first, unless we have to stop - while (this.isRunning() && replicationSinkMgr.getNumSinks() == 0) { - replicationSinkMgr.chooseSinks(); - if (this.isRunning() && replicationSinkMgr.getNumSinks() == 0) { + while (this.isRunning() && getNumSinks() == 0) { + chooseSinks(); + if (this.isRunning() && getNumSinks() == 0) { if (sleepForRetries("Waiting for peers", sleepMultiplier)) { sleepMultiplier++; } @@ -232,7 +191,7 @@ private void connectToPeers() { * @param sleepMultiplier by how many times the default sleeping time is augmented * @return True if sleepMultiplier is < maxRetriesMultiplier */ - protected boolean sleepForRetries(String msg, int sleepMultiplier) { + private boolean sleepForRetries(String msg, int sleepMultiplier) { try { if (LOG.isTraceEnabled()) { LOG.trace("{} {}, sleeping {} times {}", @@ -240,8 +199,9 @@ protected boolean sleepForRetries(String msg, int sleepMultiplier) { } Thread.sleep(this.sleepForRetries * sleepMultiplier); } catch (InterruptedException e) { + Thread.currentThread().interrupt(); if (LOG.isDebugEnabled()) { - LOG.debug("{} Interrupted while sleeping between retries", logPeerId()); + LOG.debug("{} {} Interrupted while sleeping between retries", msg, logPeerId()); } } return sleepMultiplier < maxRetriesMultiplier; @@ -253,7 +213,7 @@ private int getEstimatedEntrySize(Entry e) { } private List> createParallelBatches(final List entries) { - int numSinks = Math.max(replicationSinkMgr.getNumSinks(), 1); + int numSinks = Math.max(getNumSinks(), 1); int n = Math.min(Math.min(this.maxThreads, entries.size() / 100 + 1), numSinks); List> entryLists = Stream.generate(ArrayList::new).limit(n).collect(Collectors.toList()); @@ -301,7 +261,6 @@ private List> createBatches(final List entries) { /** * Check if there's an {@link TableNotFoundException} in the caused by stacktrace. */ - @VisibleForTesting public static boolean isTableNotFoundException(Throwable io) { if (io instanceof RemoteException) { io = ((RemoteException) io).unwrapRemoteException(); @@ -320,7 +279,6 @@ public static boolean isTableNotFoundException(Throwable io) { /** * Check if there's an {@link NoSuchColumnFamilyException} in the caused by stacktrace. */ - @VisibleForTesting public static boolean isNoSuchColumnFamilyException(Throwable io) { if (io instanceof RemoteException) { io = ((RemoteException) io).unwrapRemoteException(); @@ -336,7 +294,6 @@ public static boolean isNoSuchColumnFamilyException(Throwable io) { return false; } - @VisibleForTesting List> filterNotExistTableEdits(final List> oldEntryList) { List> entryList = new ArrayList<>(); Map existMap = new HashMap<>(); @@ -380,7 +337,6 @@ List> filterNotExistTableEdits(final List> oldEntryList) return entryList; } - @VisibleForTesting List> filterNotExistColumnFamilyEdits(final List> oldEntryList) { List> entryList = new ArrayList<>(); Map> existColumnFamilyMap = new HashMap<>(); @@ -442,19 +398,6 @@ List> filterNotExistColumnFamilyEdits(final List> oldEnt return entryList; } - private void reconnectToPeerCluster() { - AsyncClusterConnection connection = null; - try { - connection = - ClusterConnectionFactory.createAsyncClusterConnection(conf, null, User.getCurrent()); - } catch (IOException ioe) { - LOG.warn("{} Failed to create connection for peer cluster", logPeerId(), ioe); - } - if (connection != null) { - this.conn = connection; - } - } - private long parallelReplicate(CompletionService pool, ReplicateContext replicateContext, List> batches) throws IOException { int futures = 0; @@ -513,7 +456,7 @@ public boolean replicate(ReplicateContext replicateContext) { peersSelected = true; } - int numSinks = replicationSinkMgr.getNumSinks(); + int numSinks = getNumSinks(); if (numSinks == 0) { if((System.currentTimeMillis() - lastSinkFetchTime) >= (maxRetriesMultiplier*1000)) { LOG.warn( @@ -534,9 +477,6 @@ public boolean replicate(ReplicateContext replicateContext) { } continue; } - if (this.conn == null) { - reconnectToPeerCluster(); - } try { // replicate the batches to sink side. parallelReplicate(pool, replicateContext, batches); @@ -561,7 +501,7 @@ public boolean replicate(ReplicateContext replicateContext) { } else { LOG.warn("{} Peer encountered RemoteException, rechecking all sinks: ", logPeerId(), ioe); - replicationSinkMgr.chooseSinks(); + chooseSinks(); } } else { if (ioe instanceof SocketTimeoutException) { @@ -574,7 +514,7 @@ public boolean replicate(ReplicateContext replicateContext) { this.socketTimeoutMultiplier); } else if (ioe instanceof ConnectException || ioe instanceof UnknownHostException) { LOG.warn("{} Peer is unavailable, rechecking all sinks: ", logPeerId(), ioe); - replicationSinkMgr.chooseSinks(); + chooseSinks(); } else { LOG.warn("{} Can't replicate because of a local or network error: ", logPeerId(), ioe); } @@ -594,14 +534,6 @@ protected boolean isPeerEnabled() { @Override protected void doStop() { disconnect(); // don't call super.doStop() - if (this.conn != null) { - try { - this.conn.close(); - this.conn = null; - } catch (IOException e) { - LOG.warn("{} Failed to close the connection", logPeerId()); - } - } // Allow currently running replication tasks to finish exec.shutdown(); try { @@ -618,7 +550,6 @@ protected void doStop() { notifyStopped(); } - @VisibleForTesting protected int replicateEntries(List entries, int batchIndex, int timeout) throws IOException { SinkPeer sinkPeer = null; @@ -629,10 +560,10 @@ protected int replicateEntries(List entries, int batchIndex, int timeout) LOG.trace("{} Replicating batch {} of {} entries with total size {} bytes to {}", logPeerId(), entriesHashCode, entries.size(), size, replicationClusterId); } - sinkPeer = replicationSinkMgr.getReplicationSink(); + sinkPeer = getReplicationSink(); AsyncRegionServerAdmin rsAdmin = sinkPeer.getRegionServer(); try { - ReplicationProtbufUtil.replicateWALEntry(rsAdmin, + ReplicationProtobufUtil.replicateWALEntry(rsAdmin, entries.toArray(new Entry[entries.size()]), replicationClusterId, baseNamespaceDir, hfileArchiveDir, timeout); if (LOG.isTraceEnabled()) { @@ -644,10 +575,10 @@ protected int replicateEntries(List entries, int batchIndex, int timeout) } throw e; } - replicationSinkMgr.reportSinkSuccess(sinkPeer); + reportSinkSuccess(sinkPeer); } catch (IOException ioe) { if (sinkPeer != null) { - replicationSinkMgr.reportBadSink(sinkPeer); + reportBadSink(sinkPeer); } throw ioe; } @@ -674,7 +605,6 @@ private int serialReplicateRegionEntries(List entries, int batchIndex, in return batchIndex; } - @VisibleForTesting protected Callable createReplicator(List entries, int batchIndex, int timeout) { return isSerial ? () -> serialReplicateRegionEntries(entries, batchIndex, timeout) : () -> replicateEntries(entries, batchIndex, timeout); @@ -683,5 +613,4 @@ protected Callable createReplicator(List entries, int batchIndex private String logPeerId(){ return "[Source for peer " + this.ctx.getPeerId() + "]:"; } - } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSource.java index 0f73576feaf8..098418da950b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSource.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSource.java @@ -32,8 +32,6 @@ import org.apache.hadoop.hbase.metrics.BaseSource; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; - /** * This class is for maintaining the various replication statistics for a source and publishing them * through the metrics interfaces. @@ -450,7 +448,7 @@ public String getMetricsName() { return globalSourceSource.getMetricsName(); } - @VisibleForTesting + @InterfaceAudience.Private public Map getSingleSourceSourceByTable() { return singleSourceSourceByTable; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/NoopReplicationQueueStorage.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/NoopReplicationQueueStorage.java new file mode 100644 index 000000000000..4ad41fc69838 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/NoopReplicationQueueStorage.java @@ -0,0 +1,140 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.replication.regionserver; + +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.SortedSet; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.replication.ReplicationException; +import org.apache.hadoop.hbase.replication.ReplicationQueueStorage; +import org.apache.hadoop.hbase.util.Pair; +import org.apache.yetus.audience.InterfaceAudience; + +/** + * Noop queue storage -- does nothing. + */ +@InterfaceAudience.Private +class NoopReplicationQueueStorage implements ReplicationQueueStorage { + NoopReplicationQueueStorage() {} + + @Override + public void removeQueue(ServerName serverName, String queueId) throws ReplicationException {} + + @Override + public void addWAL(ServerName serverName, String queueId, String fileName) + throws ReplicationException {} + + @Override + public void removeWAL(ServerName serverName, String queueId, String fileName) + throws ReplicationException { } + + @Override + public void setWALPosition(ServerName serverName, String queueId, String fileName, long position, + Map lastSeqIds) throws ReplicationException {} + + @Override + public long getLastSequenceId(String encodedRegionName, String peerId) + throws ReplicationException { + return 0; + } + + @Override + public void setLastSequenceIds(String peerId, Map lastSeqIds) + throws ReplicationException {} + + @Override + public void removeLastSequenceIds(String peerId) throws ReplicationException {} + + @Override + public void removeLastSequenceIds(String peerId, List encodedRegionNames) + throws ReplicationException {} + + @Override + public long getWALPosition(ServerName serverName, String queueId, String fileName) + throws ReplicationException { + return 0; + } + + @Override + public List getWALsInQueue(ServerName serverName, String queueId) + throws ReplicationException { + return Collections.EMPTY_LIST; + } + + @Override + public List getAllQueues(ServerName serverName) throws ReplicationException { + return Collections.EMPTY_LIST; + } + + @Override + public Pair> claimQueue(ServerName sourceServerName, String queueId, + ServerName destServerName) throws ReplicationException { + return null; + } + + @Override + public void removeReplicatorIfQueueIsEmpty(ServerName serverName) + throws ReplicationException {} + + @Override + public List getListOfReplicators() throws ReplicationException { + return Collections.EMPTY_LIST; + } + + @Override + public Set getAllWALs() throws ReplicationException { + return Collections.EMPTY_SET; + } + + @Override + public void addPeerToHFileRefs(String peerId) throws ReplicationException {} + + @Override + public void removePeerFromHFileRefs(String peerId) throws ReplicationException {} + + @Override + public void addHFileRefs(String peerId, List> pairs) + throws ReplicationException {} + + @Override + public void removeHFileRefs(String peerId, List files) throws ReplicationException {} + + @Override + public List getAllPeersFromHFileRefsQueue() throws ReplicationException { + return Collections.EMPTY_LIST; + } + + @Override + public List getReplicableHFiles(String peerId) throws ReplicationException { + return Collections.EMPTY_LIST; + } + + @Override + public Set getAllHFileRefs() throws ReplicationException { + return Collections.EMPTY_SET; + } + + @Override + public String getRsNode(ServerName serverName) { + return null; + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.java index 00aa026093fa..b66b7f1b3478 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -64,8 +64,8 @@ protected RecoveredReplicationSourceShipper createNewShipper(String walGroupId, public void locateRecoveredPaths(PriorityBlockingQueue queue) throws IOException { boolean hasPathChanged = false; - PriorityBlockingQueue newPaths = - new PriorityBlockingQueue(queueSizePerGroup, new LogsComparator()); + PriorityBlockingQueue newPaths = new PriorityBlockingQueue(queueSizePerGroup, + new AbstractFSWALProvider.WALStartTimeComparator()); pathsLoop: for (Path path : queue) { if (fs.exists(path)) { // still in same location, don't need to do anything newPaths.add(path); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java index 6e090770b756..17e7a53c6ec2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java @@ -182,7 +182,16 @@ private void getRegionLocations(CompletableFuture future, private void replicate(CompletableFuture future, RegionLocations locs, TableDescriptor tableDesc, byte[] encodedRegionName, byte[] row, List entries) { if (locs.size() == 1) { - // Could this happen? + LOG.info("Only one location for {}.{}, refresh the location cache only for meta now", + tableDesc.getTableName(), Bytes.toString(encodedRegionName)); + + // This could happen to meta table. In case of meta table comes with no replica and + // later it is changed to multiple replicas. The cached location for meta may only has + // the primary region. In this case, it needs to clean up and refresh the cached meta + // locations. + if (tableDesc.isMetaTable()) { + connection.getRegionLocator(tableDesc.getTableName()).clearRegionLocationCache(); + } future.complete(Long.valueOf(entries.size())); return; } @@ -368,7 +377,7 @@ public boolean replicate(ReplicateContext replicateContext) { ctx.getMetrics().incrLogEditsFiltered(skippedEdits); return true; } else { - LOG.warn("Failed to replicate all entris, retry={}", retryCounter.getAttemptTimes()); + LOG.warn("Failed to replicate all entries, retry={}", retryCounter.getAttemptTimes()); if (!retryCounter.shouldRetry()) { return false; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplaySyncReplicationWALCallable.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplaySyncReplicationWALCallable.java index 907faba3e404..e03bbe2b1c65 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplaySyncReplicationWALCallable.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplaySyncReplicationWALCallable.java @@ -29,7 +29,7 @@ import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.executor.EventType; import org.apache.hadoop.hbase.procedure2.RSProcedureCallable; -import org.apache.hadoop.hbase.protobuf.ReplicationProtbufUtil; +import org.apache.hadoop.hbase.protobuf.ReplicationProtobufUtil; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.regionserver.wal.WALUtil; import org.apache.hadoop.hbase.util.KeyLocker; @@ -123,7 +123,7 @@ private void replayWAL(String wal) throws IOException { try (Reader reader = getReader(wal)) { List entries = readWALEntries(reader); while (!entries.isEmpty()) { - Pair pair = ReplicationProtbufUtil + Pair pair = ReplicationProtobufUtil .buildReplicateWALEntryRequest(entries.toArray(new Entry[entries.size()])); ReplicateWALEntryRequest request = pair.getFirst(); rs.getReplicationSinkService().replicateLogEntries(request.getEntryList(), diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java index 195877bf5f3c..c6b05b427082 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,20 +20,18 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; -import java.util.OptionalLong; import java.util.UUID; -import java.util.concurrent.Executors; -import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.CompatibilitySingletonFactory; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.ScheduledChore; import org.apache.hadoop.hbase.Server; +import org.apache.hadoop.hbase.Stoppable; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.regionserver.ReplicationSinkService; import org.apache.hadoop.hbase.regionserver.ReplicationSourceService; import org.apache.hadoop.hbase.replication.ReplicationFactory; import org.apache.hadoop.hbase.replication.ReplicationPeers; @@ -44,6 +42,7 @@ import org.apache.hadoop.hbase.replication.SyncReplicationState; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.wal.SyncReplicationWALProvider; +import org.apache.hadoop.hbase.wal.WALFactory; import org.apache.hadoop.hbase.wal.WALProvider; import org.apache.hadoop.hbase.zookeeper.ZKClusterId; import org.apache.yetus.audience.InterfaceAudience; @@ -51,15 +50,11 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; - -import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WALEntry; - /** * Gateway to Replication. Used by {@link org.apache.hadoop.hbase.regionserver.HRegionServer}. */ @InterfaceAudience.Private -public class Replication implements ReplicationSourceService, ReplicationSinkService { +public class Replication implements ReplicationSourceService { private static final Logger LOG = LoggerFactory.getLogger(Replication.class); private boolean isReplicationForBulkLoadDataEnabled; @@ -68,13 +63,10 @@ public class Replication implements ReplicationSourceService, ReplicationSinkSer private ReplicationPeers replicationPeers; private ReplicationTracker replicationTracker; private Configuration conf; - private ReplicationSink replicationSink; private SyncReplicationPeerInfoProvider syncReplicationPeerInfoProvider; // Hosting server private Server server; - /** Statistics thread schedule pool */ - private ScheduledExecutorService scheduleThreadPool; - private int statsThreadPeriod; + private int statsPeriodInSecond; // ReplicationLoad to access replication metrics private ReplicationLoad replicationLoad; private MetricsReplicationGlobalSourceSource globalMetricsSource; @@ -89,16 +81,11 @@ public Replication() { @Override public void initialize(Server server, FileSystem fs, Path logDir, Path oldLogDir, - WALProvider walProvider) throws IOException { + WALFactory walFactory) throws IOException { this.server = server; this.conf = this.server.getConfiguration(); this.isReplicationForBulkLoadDataEnabled = ReplicationUtils.isReplicationForBulkLoadDataEnabled(this.conf); - this.scheduleThreadPool = Executors.newScheduledThreadPool(1, - new ThreadFactoryBuilder() - .setNameFormat(server.getServerName().toShortString() + "Replication Statistics #%d") - .setDaemon(true) - .build()); if (this.isReplicationForBulkLoadDataEnabled) { if (conf.get(HConstants.REPLICATION_CLUSTER_ID) == null || conf.get(HConstants.REPLICATION_CLUSTER_ID).isEmpty()) { @@ -129,12 +116,13 @@ public void initialize(Server server, FileSystem fs, Path logDir, Path oldLogDir this.globalMetricsSource = CompatibilitySingletonFactory .getInstance(MetricsReplicationSourceFactory.class).getGlobalSource(); this.replicationManager = new ReplicationSourceManager(queueStorage, replicationPeers, - replicationTracker, conf, this.server, fs, logDir, oldLogDir, clusterId, - walProvider != null ? walProvider.getWALFileLengthProvider() : p -> OptionalLong.empty(), + replicationTracker, conf, this.server, fs, logDir, oldLogDir, clusterId, walFactory, mapping, globalMetricsSource); this.syncReplicationPeerInfoProvider = new SyncReplicationPeerInfoProviderImpl(replicationPeers, mapping); PeerActionListener peerActionListener = PeerActionListener.DUMMY; + // Get the user-space WAL provider + WALProvider walProvider = walFactory != null? walFactory.getWALProvider(): null; if (walProvider != null) { walProvider .addWALActionsListener(new ReplicationSourceWALActionListener(conf, replicationManager)); @@ -154,9 +142,8 @@ public void initialize(Server server, FileSystem fs, Path logDir, Path oldLogDir p.getSyncReplicationState(), p.getNewSyncReplicationState(), 0)); } } - this.statsThreadPeriod = + this.statsPeriodInSecond = this.conf.getInt("replication.stats.thread.period.seconds", 5 * 60); - LOG.debug("Replication stats-in-log period={} seconds", this.statsThreadPeriod); this.replicationLoad = new ReplicationLoad(); this.peerProcedureHandler = @@ -173,53 +160,19 @@ public PeerProcedureHandler getPeerProcedureHandler() { */ @Override public void stopReplicationService() { - join(); - } - - /** - * Join with the replication threads - */ - public void join() { this.replicationManager.join(); - if (this.replicationSink != null) { - this.replicationSink.stopReplicationSinkServices(); - } - scheduleThreadPool.shutdown(); - } - - /** - * Carry on the list of log entries down to the sink - * @param entries list of entries to replicate - * @param cells The data -- the cells -- that entries describes (the entries do not - * contain the Cells we are replicating; they are passed here on the side in this - * CellScanner). - * @param replicationClusterId Id which will uniquely identify source cluster FS client - * configurations in the replication configuration directory - * @param sourceBaseNamespaceDirPath Path that point to the source cluster base namespace - * directory required for replicating hfiles - * @param sourceHFileArchiveDirPath Path that point to the source cluster hfile archive directory - * @throws IOException - */ - @Override - public void replicateLogEntries(List entries, CellScanner cells, - String replicationClusterId, String sourceBaseNamespaceDirPath, - String sourceHFileArchiveDirPath) throws IOException { - this.replicationSink.replicateEntries(entries, cells, replicationClusterId, - sourceBaseNamespaceDirPath, sourceHFileArchiveDirPath); } /** * If replication is enabled and this cluster is a master, * it starts - * @throws IOException */ @Override public void startReplicationService() throws IOException { this.replicationManager.init(); - this.replicationSink = new ReplicationSink(this.conf); - this.scheduleThreadPool.scheduleAtFixedRate( - new ReplicationStatisticsTask(this.replicationSink, this.replicationManager), - statsThreadPeriod, statsThreadPeriod, TimeUnit.SECONDS); + this.server.getChoreService().scheduleChore( + new ReplicationStatisticsChore("ReplicationSourceStatistics", server, + (int) TimeUnit.SECONDS.toMillis(statsPeriodInSecond))); LOG.info("{} started", this.server.toString()); } @@ -244,21 +197,15 @@ void addHFileRefsToQueue(TableName tableName, byte[] family, List allSources = new ArrayList<>(); allSources.addAll(this.replicationManager.getSources()); allSources.addAll(this.replicationManager.getOldSources()); - // get sink - MetricsSink sinkMetrics = this.replicationSink.getSinkMetrics(); - this.replicationLoad.buildReplicationLoad(allSources, sinkMetrics); + this.replicationLoad.buildReplicationLoad(allSources, null); + return this.replicationLoad; } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationLoad.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationLoad.java index e011e0af737c..6fb21dcfbcc0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationLoad.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationLoad.java @@ -36,7 +36,6 @@ public class ReplicationLoad { // Empty load instance. public static final ReplicationLoad EMPTY_REPLICATIONLOAD = new ReplicationLoad(); - private MetricsSink sinkMetrics; private List replicationLoadSourceEntries; private ClusterStatusProtos.ReplicationLoadSink replicationLoadSink; @@ -49,21 +48,22 @@ public ReplicationLoad() { /** * buildReplicationLoad * @param sources List of ReplicationSource instances for which metrics should be reported - * @param skMetrics + * @param sinkMetrics metrics of the replication sink */ public void buildReplicationLoad(final List sources, - final MetricsSink skMetrics) { - this.sinkMetrics = skMetrics; + final MetricsSink sinkMetrics) { - // build the SinkLoad - ClusterStatusProtos.ReplicationLoadSink.Builder rLoadSinkBuild = + if (sinkMetrics != null) { + // build the SinkLoad + ClusterStatusProtos.ReplicationLoadSink.Builder rLoadSinkBuild = ClusterStatusProtos.ReplicationLoadSink.newBuilder(); - rLoadSinkBuild.setAgeOfLastAppliedOp(sinkMetrics.getAgeOfLastAppliedOp()); - rLoadSinkBuild.setTimeStampsOfLastAppliedOp(sinkMetrics.getTimestampOfLastAppliedOp()); - rLoadSinkBuild.setTimestampStarted(sinkMetrics.getStartTimestamp()); - rLoadSinkBuild.setTotalOpsProcessed(sinkMetrics.getAppliedOps()); - this.replicationLoadSink = rLoadSinkBuild.build(); + rLoadSinkBuild.setAgeOfLastAppliedOp(sinkMetrics.getAgeOfLastAppliedOp()); + rLoadSinkBuild.setTimeStampsOfLastAppliedOp(sinkMetrics.getTimestampOfLastAppliedOp()); + rLoadSinkBuild.setTimestampStarted(sinkMetrics.getStartTimestamp()); + rLoadSinkBuild.setTotalOpsProcessed(sinkMetrics.getAppliedOps()); + this.replicationLoadSink = rLoadSinkBuild.build(); + } this.replicationLoadSourceEntries = new ArrayList<>(); for (ReplicationSourceInterface source : sources) { @@ -157,5 +157,4 @@ public List getReplicationLoadSourceE public String toString() { return this.sourceToString() + System.getProperty("line.separator") + this.sinkToString(); } - } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSinkManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSinkManager.java deleted file mode 100644 index db12dc0a6fdf..000000000000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSinkManager.java +++ /dev/null @@ -1,193 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.replication.regionserver; - -import java.io.IOException; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.concurrent.ThreadLocalRandom; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.client.AsyncClusterConnection; -import org.apache.hadoop.hbase.client.AsyncRegionServerAdmin; -import org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; -import org.apache.hbase.thirdparty.com.google.common.collect.Lists; -import org.apache.hbase.thirdparty.com.google.common.collect.Maps; - -/** - * Maintains a collection of peers to replicate to, and randomly selects a - * single peer to replicate to per set of data to replicate. Also handles - * keeping track of peer availability. - */ -@InterfaceAudience.Private -public class ReplicationSinkManager { - - private static final Logger LOG = LoggerFactory.getLogger(ReplicationSinkManager.class); - - /** - * Default maximum number of times a replication sink can be reported as bad before - * it will no longer be provided as a sink for replication without the pool of - * replication sinks being refreshed. - */ - static final int DEFAULT_BAD_SINK_THRESHOLD = 3; - - /** - * Default ratio of the total number of peer cluster region servers to consider - * replicating to. - */ - static final float DEFAULT_REPLICATION_SOURCE_RATIO = 0.5f; - - - private final AsyncClusterConnection conn; - - private final HBaseReplicationEndpoint endpoint; - - // Count of "bad replication sink" reports per peer sink - private final Map badReportCounts; - - // Ratio of total number of potential peer region servers to be used - private final float ratio; - - // Maximum number of times a sink can be reported as bad before the pool of - // replication sinks is refreshed - private final int badSinkThreshold; - - // A timestamp of the last time the list of replication peers changed - private long lastUpdateToPeers; - - // The current pool of sinks to which replication can be performed - private List sinks = Lists.newArrayList(); - - /** - * Instantiate for a single replication peer cluster. - * @param conn connection to the peer cluster - * @param endpoint replication endpoint for inter cluster replication - * @param conf HBase configuration, used for determining replication source ratio and bad peer - * threshold - */ - public ReplicationSinkManager(AsyncClusterConnection conn, HBaseReplicationEndpoint endpoint, - Configuration conf) { - this.conn = conn; - this.endpoint = endpoint; - this.badReportCounts = Maps.newHashMap(); - this.ratio = conf.getFloat("replication.source.ratio", DEFAULT_REPLICATION_SOURCE_RATIO); - this.badSinkThreshold = - conf.getInt("replication.bad.sink.threshold", DEFAULT_BAD_SINK_THRESHOLD); - } - - /** - * Get a randomly-chosen replication sink to replicate to. - * @return a replication sink to replicate to - */ - public synchronized SinkPeer getReplicationSink() throws IOException { - if (endpoint.getLastRegionServerUpdate() > this.lastUpdateToPeers || sinks.isEmpty()) { - LOG.info("Current list of sinks is out of date or empty, updating"); - chooseSinks(); - } - - if (sinks.isEmpty()) { - throw new IOException("No replication sinks are available"); - } - ServerName serverName = sinks.get(ThreadLocalRandom.current().nextInt(sinks.size())); - return new SinkPeer(serverName, conn.getRegionServerAdmin(serverName)); - } - - /** - * Report a {@code SinkPeer} as being bad (i.e. an attempt to replicate to it - * failed). If a single SinkPeer is reported as bad more than - * replication.bad.sink.threshold times, it will be removed - * from the pool of potential replication targets. - * - * @param sinkPeer - * The SinkPeer that had a failed replication attempt on it - */ - public synchronized void reportBadSink(SinkPeer sinkPeer) { - ServerName serverName = sinkPeer.getServerName(); - int badReportCount = (badReportCounts.containsKey(serverName) - ? badReportCounts.get(serverName) : 0) + 1; - badReportCounts.put(serverName, badReportCount); - if (badReportCount > badSinkThreshold) { - this.sinks.remove(serverName); - if (sinks.isEmpty()) { - chooseSinks(); - } - } - } - - /** - * Report that a {@code SinkPeer} successfully replicated a chunk of data. - * - * @param sinkPeer - * The SinkPeer that had a failed replication attempt on it - */ - public synchronized void reportSinkSuccess(SinkPeer sinkPeer) { - badReportCounts.remove(sinkPeer.getServerName()); - } - - /** - * Refresh the list of sinks. - */ - public synchronized void chooseSinks() { - List slaveAddresses = endpoint.getRegionServers(); - if(slaveAddresses.isEmpty()){ - LOG.warn("No sinks available at peer. Will not be able to replicate"); - } - Collections.shuffle(slaveAddresses, ThreadLocalRandom.current()); - int numSinks = (int) Math.ceil(slaveAddresses.size() * ratio); - sinks = slaveAddresses.subList(0, numSinks); - lastUpdateToPeers = System.currentTimeMillis(); - badReportCounts.clear(); - } - - public synchronized int getNumSinks() { - return sinks.size(); - } - - @VisibleForTesting - protected List getSinksForTesting() { - return Collections.unmodifiableList(sinks); - } - - /** - * Wraps a replication region server sink to provide the ability to identify - * it. - */ - public static class SinkPeer { - private ServerName serverName; - private AsyncRegionServerAdmin regionServer; - - public SinkPeer(ServerName serverName, AsyncRegionServerAdmin regionServer) { - this.serverName = serverName; - this.regionServer = regionServer; - } - - ServerName getServerName() { - return serverName; - } - - public AsyncRegionServerAdmin getRegionServer() { - return regionServer; - } - } -} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java index dc0276dc7075..317db6628f59 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java @@ -24,7 +24,6 @@ import java.util.ArrayList; import java.util.Collection; import java.util.Collections; -import java.util.Comparator; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -38,9 +37,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; import java.util.function.Predicate; - import org.apache.commons.lang3.StringUtils; -import org.apache.commons.lang3.mutable.MutableBoolean; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -67,10 +64,10 @@ import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; import org.apache.hadoop.hbase.wal.WAL.Entry; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.collect.Lists; /** @@ -129,7 +126,9 @@ public class ReplicationSource implements ReplicationSourceInterface { //so that it doesn't try submit another initialize thread. //NOTE: this should only be set to false at the end of initialize method, prior to return. private AtomicBoolean startupOngoing = new AtomicBoolean(false); - + //Flag that signalizes uncaught error happening while starting up the source + // and a retry should be attempted + private AtomicBoolean retryStartup = new AtomicBoolean(false); /** * A filter (or a chain of filters) for WAL entries; filters out edits. @@ -141,7 +140,6 @@ public class ReplicationSource implements ReplicationSourceInterface { private long defaultBandwidth; private long currentBandwidth; private WALFileLengthProvider walFileLengthProvider; - @VisibleForTesting protected final ConcurrentHashMap workerThreads = new ConcurrentHashMap<>(); @@ -223,6 +221,7 @@ public void init(Configuration conf, FileSystem fs, ReplicationSourceManager man this.replicationQueueInfo = new ReplicationQueueInfo(queueId); this.logQueueWarnThreshold = this.conf.getInt("replication.source.log.queue.warn", 2); + // A defaultBandwidth of '0' means no bandwidth; i.e. no throttling. defaultBandwidth = this.conf.getLong("replication.source.per.peer.node.bandwidth", 0); currentBandwidth = getCurrentBandwidth(); this.throttler = new ReplicationThrottler((double) currentBandwidth / 10.0); @@ -249,34 +248,36 @@ public void enqueueLog(Path wal) { LOG.trace("NOT replicating {}", wal); return; } - String logPrefix = AbstractFSWALProvider.getWALPrefixFromWALName(wal.getName()); - PriorityBlockingQueue queue = queues.get(logPrefix); + // Use WAL prefix as the WALGroupId for this peer. + String walPrefix = AbstractFSWALProvider.getWALPrefixFromWALName(wal.getName()); + PriorityBlockingQueue queue = queues.get(walPrefix); if (queue == null) { - queue = new PriorityBlockingQueue<>(queueSizePerGroup, new LogsComparator()); + queue = new PriorityBlockingQueue<>(queueSizePerGroup, + new AbstractFSWALProvider.WALStartTimeComparator()); // make sure that we do not use an empty queue when setting up a ReplicationSource, otherwise // the shipper may quit immediately queue.put(wal); - queues.put(logPrefix, queue); + queues.put(walPrefix, queue); if (this.isSourceActive() && this.walEntryFilter != null) { // new wal group observed after source startup, start a new worker thread to track it // notice: it's possible that wal enqueued when this.running is set but worker thread // still not launched, so it's necessary to check workerThreads before start the worker - tryStartNewShipper(logPrefix, queue); + tryStartNewShipper(walPrefix, queue); } } else { queue.put(wal); } if (LOG.isTraceEnabled()) { - LOG.trace("{} Added wal {} to queue of source {}.", logPeerId(), logPrefix, + LOG.trace("{} Added wal {} to queue of source {}.", logPeerId(), walPrefix, this.replicationQueueInfo.getQueueId()); } this.metrics.incrSizeOfLogQueue(); // This will wal a warning for each new wal that gets created above the warn threshold int queueSize = queue.size(); if (queueSize > this.logQueueWarnThreshold) { - LOG.warn("{} WAL group {} queue size: {} exceeds value of " - + "replication.source.log.queue.warn: {}", logPeerId(), - logPrefix, queueSize, logQueueWarnThreshold); + LOG.warn("{} WAL group {} queue size: {} exceeds value of " + + "replication.source.log.queue.warn {}", logPeerId(), walPrefix, queueSize, + logQueueWarnThreshold); } } @@ -372,16 +373,10 @@ private void initializeWALEntryFilter(UUID peerClusterId) { private void tryStartNewShipper(String walGroupId, PriorityBlockingQueue queue) { workerThreads.compute(walGroupId, (key, value) -> { if (value != null) { - if (LOG.isDebugEnabled()) { - LOG.debug( - "{} Someone has beat us to start a worker thread for wal group {}", - logPeerId(), key); - } + LOG.debug("{} preempted start of shipping worker walGroupId={}", logPeerId(), walGroupId); return value; } else { - if (LOG.isDebugEnabled()) { - LOG.debug("{} Starting up worker for wal group {}", logPeerId(), key); - } + LOG.debug("{} starting shipping worker for walGroupId={}", logPeerId(), walGroupId); ReplicationSourceShipper worker = createNewShipper(walGroupId, queue); ReplicationSourceWALReader walReader = createNewWALReader(walGroupId, queue, worker.getStartPosition()); @@ -457,10 +452,8 @@ private ReplicationSourceWALReader createNewWALReader(String walGroupId, /** * Call after {@link #initializeWALEntryFilter(UUID)} else it will be null. - * @return The WAL Entry Filter Chain this ReplicationSource will use on WAL files filtering - * out WALEntry edits. + * @return WAL Entry Filter Chain to use on WAL files filtering *out* WALEntry edits. */ - @VisibleForTesting WALEntryFilter getWalEntryFilter() { return walEntryFilter; } @@ -527,7 +520,7 @@ private void checkBandwidthChangeAndResetThrottler() { private long getCurrentBandwidth() { long peerBandwidth = replicationPeer.getPeerBandwidth(); - // user can set peer bandwidth to 0 to use default bandwidth + // User can set peer bandwidth to 0 to use default bandwidth. return peerBandwidth != 0 ? peerBandwidth : defaultBandwidth; } @@ -577,6 +570,7 @@ private void initialize() { if (sleepForRetries("Error starting ReplicationEndpoint", sleepMultiplier)) { sleepMultiplier++; } else { + retryStartup.set(!this.abortOnError); this.startupOngoing.set(false); throw new RuntimeException("Exhausted retries to start replication endpoint."); } @@ -584,6 +578,7 @@ private void initialize() { } if (!this.isSourceActive()) { + retryStartup.set(!this.abortOnError); this.startupOngoing.set(false); throw new IllegalStateException("Source should be active."); } @@ -607,14 +602,15 @@ private void initialize() { } if(!this.isSourceActive()) { + retryStartup.set(!this.abortOnError); this.startupOngoing.set(false); throw new IllegalStateException("Source should be active."); } - LOG.info("{} Source: {}, is now replicating from cluster: {}; to peer cluster: {};", - logPeerId(), this.replicationQueueInfo.getQueueId(), clusterId, peerClusterId); - + LOG.info("{} queueId={} (queues={}) is replicating from cluster={} to cluster={}", + logPeerId(), this.replicationQueueInfo.getQueueId(), this.queues.size(), clusterId, + peerClusterId); initializeWALEntryFilter(peerClusterId); - // start workers + // Start workers for (Map.Entry> entry : queues.entrySet()) { String walGroupId = entry.getKey(); PriorityBlockingQueue queue = entry.getValue(); @@ -624,26 +620,38 @@ private void initialize() { } @Override - public void startup() { - //Flag that signalizes uncaught error happening while starting up the source - // and a retry should be attempted - MutableBoolean retryStartup = new MutableBoolean(true); + public ReplicationSourceInterface startup() { + if (this.sourceRunning) { + return this; + } this.sourceRunning = true; - do { - if(retryStartup.booleanValue()) { - retryStartup.setValue(false); - startupOngoing.set(true); - // mark we are running now - initThread = new Thread(this::initialize); - Threads.setDaemonThreadRunning(initThread, - Thread.currentThread().getName() + ".replicationSource," + this.queueId, - (t,e) -> { - sourceRunning = false; - uncaughtException(t, e, null, null); - retryStartup.setValue(!this.abortOnError); - }); - } - } while (this.startupOngoing.get() && !this.abortOnError); + startupOngoing.set(true); + initThread = new Thread(this::initialize); + Threads.setDaemonThreadRunning(initThread, + Thread.currentThread().getName() + ".replicationSource," + this.queueId, + (t,e) -> { + //if first initialization attempt failed, and abortOnError is false, we will + //keep looping in this thread until initialize eventually succeeds, + //while the server main startup one can go on with its work. + sourceRunning = false; + uncaughtException(t, e, null, null); + retryStartup.set(!this.abortOnError); + do { + if(retryStartup.get()) { + this.sourceRunning = true; + startupOngoing.set(true); + retryStartup.set(false); + try { + initialize(); + } catch(Throwable error){ + sourceRunning = false; + uncaughtException(t, error, null, null); + retryStartup.set(!this.abortOnError); + } + } + } while ((this.startupOngoing.get() || this.retryStartup.get()) && !this.abortOnError); + }); + return this; } @Override @@ -661,7 +669,8 @@ public void terminate(String reason, Exception cause, boolean clearMetrics) { terminate(reason, cause, clearMetrics, true); } - public void terminate(String reason, Exception cause, boolean clearMetrics, boolean join) { + public void terminate(String reason, Exception cause, boolean clearMetrics, + boolean join) { if (cause == null) { LOG.info("{} Closing source {} because: {}", logPeerId(), this.queueId, reason); } else { @@ -677,6 +686,7 @@ public void terminate(String reason, Exception cause, boolean clearMetrics, bool Threads.shutdown(initThread, this.sleepForRetries); } Collection workers = workerThreads.values(); + for (ReplicationSourceShipper worker : workers) { worker.stopWorker(); if(worker.entryReader != null) { @@ -684,6 +694,10 @@ public void terminate(String reason, Exception cause, boolean clearMetrics, bool } } + if (this.replicationEndpoint != null) { + this.replicationEndpoint.stop(); + } + for (ReplicationSourceShipper worker : workers) { if (worker.isAlive() || worker.entryReader.isAlive()) { try { @@ -702,11 +716,11 @@ public void terminate(String reason, Exception cause, boolean clearMetrics, bool worker.entryReader.interrupt(); } } + //If worker is already stopped but there was still entries batched, + //we need to clear buffer used for non processed entries + worker.clearWALEntryBatch(); } - if (this.replicationEndpoint != null) { - this.replicationEndpoint.stop(); - } if (join) { for (ReplicationSourceShipper worker : workers) { Threads.shutdown(worker, this.sleepForRetries); @@ -751,31 +765,6 @@ public boolean isSourceActive() { return !this.server.isStopped() && this.sourceRunning; } - /** - * Comparator used to compare logs together based on their start time - */ - public static class LogsComparator implements Comparator { - - @Override - public int compare(Path o1, Path o2) { - return Long.compare(getTS(o1), getTS(o2)); - } - - /** - *

    - * Split a path to get the start time - *

    - *

    - * For example: 10.20.20.171%3A60020.1277499063250 - *

    - * @param p path to split - * @return start time - */ - private static long getTS(Path p) { - return AbstractFSWALProvider.getWALStartTimeFromWALName(p.getName()); - } - } - public ReplicationQueueInfo getReplicationQueueInfo() { return replicationQueueInfo; } @@ -846,7 +835,8 @@ Server getServer() { return server; } - ReplicationQueueStorage getQueueStorage() { + @Override + public ReplicationQueueStorage getReplicationQueueStorage() { return queueStorage; } @@ -855,6 +845,6 @@ void removeWorker(ReplicationSourceShipper worker) { } private String logPeerId(){ - return "[Source for peer " + this.getPeer().getId() + "]:"; + return "peerId=" + this.getPeerId() + ","; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceFactory.java index d613049d3893..8863f141f1ad 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceFactory.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceFactory.java @@ -19,19 +19,22 @@ package org.apache.hadoop.hbase.replication.regionserver; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.replication.ReplicationQueueInfo; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.replication.ReplicationQueueInfo; /** * Constructs a {@link ReplicationSourceInterface} + * Note, not used to create specialized ReplicationSources + * @see CatalogReplicationSource */ @InterfaceAudience.Private -public class ReplicationSourceFactory { - +public final class ReplicationSourceFactory { private static final Logger LOG = LoggerFactory.getLogger(ReplicationSourceFactory.class); + private ReplicationSourceFactory() {} + static ReplicationSourceInterface create(Configuration conf, String queueId) { ReplicationQueueInfo replicationQueueInfo = new ReplicationQueueInfo(queueId); boolean isQueueRecovered = replicationQueueInfo.isQueueRecovered(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceInterface.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceInterface.java index 0bd90cf1ee89..27e4b79c141b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceInterface.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceInterface.java @@ -23,7 +23,6 @@ import java.util.List; import java.util.Map; import java.util.UUID; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -43,7 +42,6 @@ */ @InterfaceAudience.Private public interface ReplicationSourceInterface { - /** * Initializer for the source * @param conf the configuration to use @@ -76,7 +74,7 @@ void addHFileRefs(TableName tableName, byte[] family, List> pai /** * Start the replication */ - void startup(); + ReplicationSourceInterface startup(); /** * End the replication @@ -174,7 +172,6 @@ default boolean isSyncReplication() { /** * Try to throttle when the peer config with a bandwidth * @param batchSize entries size will be pushed - * @throws InterruptedException */ void tryThrottle(int batchSize) throws InterruptedException; @@ -206,4 +203,21 @@ default Map getWalGroupStatus() { default boolean isRecovered() { return false; } + + /** + * @return The instance of queueStorage used by this ReplicationSource. + */ + ReplicationQueueStorage getReplicationQueueStorage(); + + /** + * Log the current position to storage. Also clean old logs from the replication queue. + * Use to bypass the default call to + * {@link ReplicationSourceManager#logPositionAndCleanOldLogs(ReplicationSourceInterface, + * WALEntryBatch)} whem implementation does not need to persist state to backing storage. + * @param entryBatch the wal entry batch we just shipped + * @return The instance of queueStorage used by this ReplicationSource. + */ + default void logPositionAndCleanOldLogs(WALEntryBatch entryBatch) { + getSourceManager().logPositionAndCleanOldLogs(this, entryBatch); + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java index 0940b5a1abcc..303a091ac98f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,6 +27,7 @@ import java.util.List; import java.util.Map; import java.util.NavigableSet; +import java.util.OptionalLong; import java.util.Set; import java.util.SortedSet; import java.util.TreeSet; @@ -40,6 +41,7 @@ import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; @@ -49,6 +51,9 @@ import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL; +import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener; import org.apache.hadoop.hbase.replication.ReplicationException; import org.apache.hadoop.hbase.replication.ReplicationListener; import org.apache.hadoop.hbase.replication.ReplicationPeer; @@ -62,14 +67,17 @@ import org.apache.hadoop.hbase.replication.ReplicationUtils; import org.apache.hadoop.hbase.replication.SyncReplicationState; import org.apache.hadoop.hbase.util.Pair; +import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil; import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; import org.apache.hadoop.hbase.wal.SyncReplicationWALProvider; +import org.apache.hadoop.hbase.wal.WAL; +import org.apache.hadoop.hbase.wal.WALFactory; +import org.apache.hadoop.hbase.wal.WALProvider; import org.apache.yetus.audience.InterfaceAudience; import org.apache.zookeeper.KeeperException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.com.google.common.collect.Sets; import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; @@ -126,7 +134,15 @@ public class ReplicationSourceManager implements ReplicationListener { private final ConcurrentMap sources; // List of all the sources we got from died RSs private final List oldsources; + + /** + * Storage for queues that need persistance; e.g. Replication state so can be recovered + * after a crash. queueStorage upkeep is spread about this class and passed + * to ReplicationSource instances for these to do updates themselves. Not all ReplicationSource + * instances keep state. + */ private final ReplicationQueueStorage queueStorage; + private final ReplicationTracker replicationTracker; private final ReplicationPeers replicationPeers; // UUID for this cluster @@ -153,7 +169,7 @@ public class ReplicationSourceManager implements ReplicationListener { private final Path logDir; // Path to the wal archive private final Path oldLogDir; - private final WALFileLengthProvider walFileLengthProvider; + private final WALFactory walFactory; // The number of ms that we wait before moving znodes, HBASE-3596 private final long sleepBeforeFailover; // Homemade executer service for replication @@ -173,22 +189,29 @@ public class ReplicationSourceManager implements ReplicationListener { private final long totalBufferLimit; private final MetricsReplicationGlobalSourceSource globalMetrics; + /** + * A special ReplicationSource for hbase:meta Region Read Replicas. + * Usually this reference remains empty. If an hbase:meta Region is opened on this server, we + * will create an instance of a hbase:meta CatalogReplicationSource and it will live the life of + * the Server thereafter; i.e. we will not shut it down even if the hbase:meta moves away from + * this server (in case it later gets moved back). We synchronize on this instance testing for + * presence and if absent, while creating so only created and started once. + */ + AtomicReference catalogReplicationSource = new AtomicReference<>(); + /** * Creates a replication manager and sets the watch on all the other registered region servers * @param queueStorage the interface for manipulating replication queues - * @param replicationPeers - * @param replicationTracker * @param conf the configuration to use * @param server the server for this region server * @param fs the file system to use * @param logDir the directory that contains all wal directories of live RSs * @param oldLogDir the directory where old logs are archived - * @param clusterId */ public ReplicationSourceManager(ReplicationQueueStorage queueStorage, ReplicationPeers replicationPeers, ReplicationTracker replicationTracker, Configuration conf, Server server, FileSystem fs, Path logDir, Path oldLogDir, UUID clusterId, - WALFileLengthProvider walFileLengthProvider, + WALFactory walFactory, SyncReplicationPeerMappingManager syncReplicationPeerMappingManager, MetricsReplicationGlobalSourceSource globalMetrics) throws IOException { this.sources = new ConcurrentHashMap<>(); @@ -206,7 +229,7 @@ public ReplicationSourceManager(ReplicationQueueStorage queueStorage, // 30 seconds this.sleepBeforeFailover = conf.getLong("replication.sleep.before.failover", 30000); this.clusterId = clusterId; - this.walFileLengthProvider = walFileLengthProvider; + this.walFactory = walFactory; this.syncReplicationPeerMappingManager = syncReplicationPeerMappingManager; this.replicationTracker.registerListener(this); // It's preferable to failover 1 RS at a time, but with good zk servers @@ -260,8 +283,7 @@ private void adoptAbandonedQueues() { if (currentReplicators == null || currentReplicators.isEmpty()) { return; } - List otherRegionServers = replicationTracker.getListOfRegionServers().stream() - .map(ServerName::valueOf).collect(Collectors.toList()); + List otherRegionServers = replicationTracker.getListOfRegionServers(); LOG.info( "Current list of replicators: " + currentReplicators + " other RSs: " + otherRegionServers); @@ -346,18 +368,21 @@ public void removePeer(String peerId) { } /** - * Factory method to create a replication source - * @param queueId the id of the replication queue - * @return the created source + * @return a new 'classic' user-space replication source. + * @param queueId the id of the replication queue to associate the ReplicationSource with. + * @see #createCatalogReplicationSource(RegionInfo) for creating a ReplicationSource for meta. */ private ReplicationSourceInterface createSource(String queueId, ReplicationPeer replicationPeer) throws IOException { ReplicationSourceInterface src = ReplicationSourceFactory.create(conf, queueId); - - MetricsSource metrics = new MetricsSource(queueId); - // init replication source + // Init the just created replication source. Pass the default walProvider's wal file length + // provider. Presumption is we replicate user-space Tables only. For hbase:meta region replica + // replication, see #createCatalogReplicationSource(). + WALFileLengthProvider walFileLengthProvider = + this.walFactory.getWALProvider() != null? + this.walFactory.getWALProvider().getWALFileLengthProvider() : p -> OptionalLong.empty(); src.init(conf, fs, this, queueStorage, replicationPeer, server, queueId, clusterId, - walFileLengthProvider, metrics); + walFileLengthProvider, new MetricsSource(queueId)); return src; } @@ -368,7 +393,6 @@ private ReplicationSourceInterface createSource(String queueId, ReplicationPeer * @param peerId the id of the replication peer * @return the source that was created */ - @VisibleForTesting ReplicationSourceInterface addSource(String peerId) throws IOException { ReplicationPeer peer = replicationPeers.getPeer(peerId); ReplicationSourceInterface src = createSource(peerId, peer); @@ -646,7 +670,6 @@ public void logPositionAndCleanOldLogs(ReplicationSourceInterface source, * @param inclusive whether we should also remove the given log file * @param source the replication source */ - @VisibleForTesting void cleanOldLogs(String log, boolean inclusive, ReplicationSourceInterface source) { String logPrefix = AbstractFSWALProvider.getWALPrefixFromWALName(log); if (source.isRecovered()) { @@ -750,7 +773,6 @@ private void cleanOldLogs(NavigableSet wals, ReplicationSourceInterface } // public because of we call it in TestReplicationEmptyWALRecovery - @VisibleForTesting public void preLogRoll(Path newLog) throws IOException { String logName = newLog.getName(); String logPrefix = AbstractFSWALProvider.getWALPrefixFromWALName(logName); @@ -800,7 +822,6 @@ public void preLogRoll(Path newLog) throws IOException { } // public because of we call it in TestReplicationEmptyWALRecovery - @VisibleForTesting public void postLogRoll(Path newLog) throws IOException { // This only updates the sources we own, not the recovered ones for (ReplicationSourceInterface source : this.sources.values()) { @@ -852,7 +873,6 @@ class NodeFailoverWorker extends Thread { // the rs will abort (See HBASE-20475). private final Map peersSnapshot; - @VisibleForTesting public NodeFailoverWorker(ServerName deadRS) { super("Failover-for-" + deadRS); this.deadRS = deadRS; @@ -978,7 +998,7 @@ public void run() { wals.add(wal); } oldsources.add(src); - LOG.trace("Added source for recovered queue: " + src.getQueueId()); + LOG.info("Added source for recovered queue {}", src.getQueueId()); for (String wal : walsSet) { LOG.trace("Enqueueing log from recovered queue for source: " + src.getQueueId()); src.enqueueLog(new Path(oldLogDir, wal)); @@ -1007,7 +1027,6 @@ public void join() { * Get a copy of the wals of the normal sources on this rs * @return a sorted set of wal names */ - @VisibleForTesting public Map>> getWALs() { return Collections.unmodifiableMap(walsById); } @@ -1016,7 +1035,6 @@ public Map>> getWALs() { * Get a copy of the wals of the recovered sources on this rs * @return a sorted set of wal names */ - @VisibleForTesting Map>> getWalsByIdRecoveredQueues() { return Collections.unmodifiableMap(walsByIdRecoveredQueues); } @@ -1041,12 +1059,10 @@ public List getOldSources() { * Get the normal source for a given peer * @return the normal source for the give peer if it exists, otherwise null. */ - @VisibleForTesting public ReplicationSourceInterface getSource(String peerId) { return this.sources.get(peerId); } - @VisibleForTesting List getAllQueues() throws IOException { List allQueues = Collections.emptyList(); try { @@ -1057,21 +1073,18 @@ List getAllQueues() throws IOException { return allQueues; } - @VisibleForTesting int getSizeOfLatestPath() { synchronized (latestPaths) { return latestPaths.size(); } } - @VisibleForTesting Set getLastestPath() { synchronized (latestPaths) { return Sets.newHashSet(latestPaths.values()); } } - @VisibleForTesting public AtomicLong getTotalBufferUsed() { return totalBufferUsed; } @@ -1154,4 +1167,76 @@ int activeFailoverTaskCount() { MetricsReplicationGlobalSourceSource getGlobalMetrics() { return this.globalMetrics; } + + /** + * Add an hbase:meta Catalog replication source. Called on open of an hbase:meta Region. + * Create it once only. If exists already, use the existing one. + * @see #removeCatalogReplicationSource(RegionInfo) + * @see #addSource(String) This is specialization on the addSource method. + */ + public ReplicationSourceInterface addCatalogReplicationSource(RegionInfo regionInfo) + throws IOException { + // Poor-man's putIfAbsent + synchronized (this.catalogReplicationSource) { + ReplicationSourceInterface rs = this.catalogReplicationSource.get(); + return rs != null ? rs : + this.catalogReplicationSource.getAndSet(createCatalogReplicationSource(regionInfo)); + } + } + + /** + * Remove the hbase:meta Catalog replication source. + * Called when we close hbase:meta. + * @see #addCatalogReplicationSource(RegionInfo regionInfo) + */ + public void removeCatalogReplicationSource(RegionInfo regionInfo) { + // Nothing to do. Leave any CatalogReplicationSource in place in case an hbase:meta Region + // comes back to this server. + } + + /** + * Create, initialize, and start the Catalog ReplicationSource. + * Presumes called one-time only (caller must ensure one-time only call). + * This ReplicationSource is NOT created via {@link ReplicationSourceFactory}. + * @see #addSource(String) This is a specialization of the addSource call. + * @see #catalogReplicationSource for a note on this ReplicationSource's lifecycle (and more on + * why the special handling). + */ + private ReplicationSourceInterface createCatalogReplicationSource(RegionInfo regionInfo) + throws IOException { + // Instantiate meta walProvider. Instantiated here or over in the #warmupRegion call made by the + // Master on a 'move' operation. Need to do extra work if we did NOT instantiate the provider. + WALProvider walProvider = this.walFactory.getMetaWALProvider(); + boolean instantiate = walProvider == null; + if (instantiate) { + walProvider = this.walFactory.getMetaProvider(); + } + // Here we do a specialization on what {@link ReplicationSourceFactory} does. There is no need + // for persisting offset into WALs up in zookeeper (via ReplicationQueueInfo) as the catalog + // read replicas feature that makes use of the source does a reset on a crash of the WAL + // source process. See "4.1 Skip maintaining zookeeper replication queue (offsets/WALs)" in the + // design doc attached to HBASE-18070 'Enable memstore replication for meta replica' for detail. + CatalogReplicationSourcePeer peer = new CatalogReplicationSourcePeer(this.conf, + this.clusterId.toString(), "meta_" + ServerRegionReplicaUtil.REGION_REPLICA_REPLICATION_PEER); + final ReplicationSourceInterface crs = new CatalogReplicationSource(); + crs.init(conf, fs, this, new NoopReplicationQueueStorage(), peer, server, peer.getId(), + clusterId, walProvider.getWALFileLengthProvider(), new MetricsSource(peer.getId())); + // Add listener on the provider so we can pick up the WAL to replicate on roll. + WALActionsListener listener = new WALActionsListener() { + @Override public void postLogRoll(Path oldPath, Path newPath) throws IOException { + crs.enqueueLog(newPath); + } + }; + walProvider.addWALActionsListener(listener); + if (!instantiate) { + // If we did not instantiate provider, need to add our listener on already-created WAL + // instance too (listeners are passed by provider to WAL instance on creation but if provider + // created already, our listener add above is missed). And add the current WAL file to the + // Replication Source so it can start replicating it. + WAL wal = walProvider.getWAL(regionInfo); + wal.registerWALActionsListener(listener); + crs.enqueueLog(((AbstractFSWAL)wal).getCurrentFileName()); + } + return crs.startup(); + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipper.java index 45eb91c2e72e..78bf42fb9045 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipper.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipper.java @@ -19,10 +19,11 @@ import static org.apache.hadoop.hbase.replication.ReplicationUtils.getAdaptiveTimeout; import static org.apache.hadoop.hbase.replication.ReplicationUtils.sleepForRetries; - import java.io.IOException; import java.util.List; import java.util.concurrent.PriorityBlockingQueue; +import java.util.concurrent.atomic.LongAccumulator; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; @@ -35,7 +36,6 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; - import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.BulkLoadDescriptor; import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.StoreDescriptor; @@ -271,7 +271,7 @@ private boolean updateLogPosition(WALEntryBatch batch) { // position and the file will be removed soon in cleanOldLogs. if (batch.isEndOfFile() || !batch.getLastWalPath().equals(currentPath) || batch.getLastWalPosition() != currentPosition) { - source.getSourceManager().logPositionAndCleanOldLogs(source, batch); + source.logPositionAndCleanOldLogs(batch); updated = true; } // if end of file is true, then we can just skip to the next file in queue. @@ -325,4 +325,56 @@ void stopWorker() { public boolean isFinished() { return state == WorkerState.FINISHED; } + + /** + * Attempts to properly update ReplicationSourceManager.totalBufferUser, + * in case there were unprocessed entries batched by the reader to the shipper, + * but the shipper didn't manage to ship those because the replication source is being terminated. + * In that case, it iterates through the batched entries and decrease the pending + * entries size from ReplicationSourceManager.totalBufferUser + *

    + * NOTES + * 1) This method should only be called upon replication source termination. + * It blocks waiting for both shipper and reader threads termination, + * to make sure no race conditions + * when updating ReplicationSourceManager.totalBufferUser. + * + * 2) It does not attempt to terminate reader and shipper threads. Those must + * have been triggered interruption/termination prior to calling this method. + */ + void clearWALEntryBatch() { + long timeout = System.currentTimeMillis() + this.shipEditsTimeout; + while(this.isAlive() || this.entryReader.isAlive()){ + try { + if (System.currentTimeMillis() >= timeout) { + LOG.warn("Shipper clearWALEntryBatch method timed out whilst waiting reader/shipper " + + "thread to stop. Not cleaning buffer usage. Shipper alive: {}; Reader alive: {}", + this.source.getPeerId(), this.isAlive(), this.entryReader.isAlive()); + return; + } else { + // Wait both shipper and reader threads to stop + Thread.sleep(this.sleepForRetries); + } + } catch (InterruptedException e) { + LOG.warn("{} Interrupted while waiting {} to stop on clearWALEntryBatch. " + + "Not cleaning buffer usage: {}", this.source.getPeerId(), this.getName(), e); + return; + } + } + LongAccumulator totalToDecrement = new LongAccumulator((a,b) -> a + b, 0); + entryReader.entryBatchQueue.forEach(w -> { + entryReader.entryBatchQueue.remove(w); + w.getWalEntries().forEach(e -> { + long entrySizeExcludeBulkLoad = ReplicationSourceWALReader.getEntrySizeExcludeBulkLoad(e); + totalToDecrement.accumulate(entrySizeExcludeBulkLoad); + }); + }); + if( LOG.isTraceEnabled()) { + LOG.trace("Decrementing totalBufferUsed by {}B while stopping Replication WAL Readers.", + totalToDecrement.longValue()); + } + long newBufferUsed = source.getSourceManager().getTotalBufferUsed() + .addAndGet(-totalToDecrement.longValue()); + source.getSourceManager().getGlobalMetrics().setWALReaderEditsBufferBytes(newBufferUsed); + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALActionListener.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALActionListener.java index d25ab072eb1e..a38a96a3816e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALActionListener.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALActionListener.java @@ -27,8 +27,6 @@ import org.apache.hadoop.hbase.wal.WALKeyImpl; import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; - /** * Used to receive new wals. */ @@ -65,7 +63,6 @@ public void visitLogEntryBeforeWrite(WALKey logKey, WALEdit logEdit) throws IOEx * @param logKey Key that may get scoped according to its edits * @param logEdit Edits used to lookup the scopes */ - @VisibleForTesting static void scopeWALEdits(WALKey logKey, WALEdit logEdit, Configuration conf) { // For bulk load replication we need meta family to know the file we want to replicate. if (ReplicationUtils.isReplicationForBulkLoadDataEnabled(conf)) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReader.java index c71db1bf785b..be262a6d9504 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReader.java @@ -60,7 +60,8 @@ class ReplicationSourceWALReader extends Thread { private final WALEntryFilter filter; private final ReplicationSource source; - private final BlockingQueue entryBatchQueue; + @InterfaceAudience.Private + final BlockingQueue entryBatchQueue; // max (heap) size of each batch - multiply by number of batches in queue to get total private final long replicationBatchSizeCapacity; // max count of each batch - multiply by number of batches in queue to get total @@ -246,8 +247,10 @@ private void handleEmptyWALEntryBatch() throws InterruptedException { // (highly likely we've closed the current log), we've hit the max retries, and autorecovery is // enabled, then dump the log private void handleEofException(IOException e) { + // Dump the log even if logQueue size is 1 if the source is from recovered Source + // since we don't add current log to recovered source queue so it is safe to remove. if ((e instanceof EOFException || e.getCause() instanceof EOFException) && - logQueue.size() > 1 && this.eofAutoRecovery) { + (source.isRecovered() || logQueue.size() > 1) && this.eofAutoRecovery) { try { if (fs.getFileStatus(logQueue.peek()).getLen() == 0) { LOG.warn("Forcing removal of 0 length log in queue: " + logQueue.peek()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java index 98490f137dbe..a43be29e26bc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.client.AsyncClusterConnection; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.util.CommonFSUtils; +import org.apache.hadoop.hbase.wal.WALFactory; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; @@ -82,7 +83,8 @@ public boolean isAborted() { System.out.println("Start Replication Server start"); Replication replication = new Replication(); - replication.initialize(new DummyServer(zkw), fs, logDir, oldLogDir, null); + replication.initialize(new DummyServer(zkw), fs, logDir, oldLogDir, + new WALFactory(conf, "test", null, false)); ReplicationSourceManager manager = replication.getReplicationManager(); manager.init().get(); while (manager.activeFailoverTaskCount() > 0) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SerialReplicationChecker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SerialReplicationChecker.java index 6b3c34a462a4..fdc1e5414d00 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SerialReplicationChecker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SerialReplicationChecker.java @@ -37,7 +37,6 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; - import org.apache.hbase.thirdparty.com.google.common.cache.Cache; import org.apache.hbase.thirdparty.com.google.common.cache.CacheBuilder; import org.apache.hbase.thirdparty.com.google.common.cache.CacheLoader; @@ -140,7 +139,7 @@ public MutableLong load(String key) throws Exception { public SerialReplicationChecker(Configuration conf, ReplicationSource source) { this.peerId = source.getPeerId(); - this.storage = source.getQueueStorage(); + this.storage = source.getReplicationQueueStorage(); this.conn = source.getServer().getConnection(); this.waitTimeMs = conf.getLong(REPLICATION_SERIALLY_WAITING_KEY, REPLICATION_SERIALLY_WAITING_DEFAULT); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryStream.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryStream.java index 0454e817b0e3..a95ee13c7379 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryStream.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryStream.java @@ -174,7 +174,7 @@ private void setCurrentPath(Path path) { private void tryAdvanceEntry() throws IOException { if (checkReader()) { boolean beingWritten = readNextEntryAndRecordReaderPosition(); - LOG.trace("reading wal file {}. Current open for write: {}", this.currentPath, beingWritten); + LOG.trace("Reading WAL {}; currently open for write={}", this.currentPath, beingWritten); if (currentEntry == null && !beingWritten) { // no more entries in this log file, and the file is already closed, i.e, rolled // Before dequeueing, we should always get one more attempt at reading. @@ -222,7 +222,7 @@ private boolean checkAllBytesParsed() throws IOException { if (currentPositionOfReader < stat.getLen()) { final long skippedBytes = stat.getLen() - currentPositionOfReader; LOG.debug( - "Reached the end of WAL file '{}'. It was not closed cleanly," + + "Reached the end of WAL {}. It was not closed cleanly," + " so we did not parse {} bytes of data. This is normally ok.", currentPath, skippedBytes); metrics.incrUncleanlyClosedWALs(); @@ -230,7 +230,7 @@ private boolean checkAllBytesParsed() throws IOException { } } else if (currentPositionOfReader + trailerSize < stat.getLen()) { LOG.warn( - "Processing end of WAL file '{}'. At position {}, which is too far away from" + + "Processing end of WAL {} at position {}, which is too far away from" + " reported file length {}. Restarting WAL reading (see HBASE-15983 for details). {}", currentPath, currentPositionOfReader, stat.getLen(), getCurrentPathStat()); setPosition(0); @@ -241,7 +241,7 @@ private boolean checkAllBytesParsed() throws IOException { } } if (LOG.isTraceEnabled()) { - LOG.trace("Reached the end of log " + this.currentPath + ", and the length of the file is " + + LOG.trace("Reached the end of " + this.currentPath + " and length of the file is " + (stat == null ? "N/A" : stat.getLen())); } metrics.incrCompletedWAL(); @@ -249,7 +249,7 @@ private boolean checkAllBytesParsed() throws IOException { } private void dequeueCurrentLog() throws IOException { - LOG.debug("Reached the end of log {}", currentPath); + LOG.debug("EOF, closing {}", currentPath); closeReader(); logQueue.remove(); setPosition(0); @@ -264,7 +264,7 @@ private boolean readNextEntryAndRecordReaderPosition() throws IOException { long readerPos = reader.getPosition(); OptionalLong fileLength = walFileLengthProvider.getLogFileSizeIfBeingWritten(currentPath); if (fileLength.isPresent() && readerPos > fileLength.getAsLong()) { - // see HBASE-14004, for AsyncFSWAL which uses fan-out, it is possible that we read uncommitted + // See HBASE-14004, for AsyncFSWAL which uses fan-out, it is possible that we read uncommitted // data, so we need to make sure that we do not read beyond the committed file length. if (LOG.isDebugEnabled()) { LOG.debug("The provider tells us the valid length for " + currentPath + " is " + diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALFileLengthProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALFileLengthProvider.java index 010fa6900525..c60faa9e5db8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALFileLengthProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALFileLengthProvider.java @@ -25,6 +25,9 @@ /** * Used by replication to prevent replicating unacked log entries. See * https://issues.apache.org/jira/browse/HBASE-14004 for more details. + * WALFileLengthProvider exists because we do not want to reference WALFactory and WALProvider + * directly in the replication code so in the future it will be easier to decouple them. + * Each walProvider will have its own implementation. */ @InterfaceAudience.Private @FunctionalInterface diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java index 50ddb416e911..89db7695fe21 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hbase.rsgroup; +import edu.umd.cs.findbugs.annotations.NonNull; import java.io.IOException; import java.util.ArrayList; import java.util.Collections; @@ -25,7 +26,6 @@ import java.util.Map; import java.util.Set; import java.util.TreeMap; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ClusterMetrics; import org.apache.hadoop.hbase.HBaseIOException; @@ -47,7 +47,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.com.google.common.collect.ArrayListMultimap; import org.apache.hbase.thirdparty.com.google.common.collect.ListMultimap; import org.apache.hbase.thirdparty.com.google.common.collect.Lists; @@ -174,25 +173,25 @@ public List balanceCluster( } @Override - public Map> roundRobinAssignment( - List regions, List servers) throws IOException { + @NonNull + public Map> roundRobinAssignment(List regions, + List servers) throws IOException { Map> assignments = Maps.newHashMap(); List, List>> pairs = generateGroupAssignments(regions, servers); for (Pair, List> pair : pairs) { - Map> result = this.internalBalancer - .roundRobinAssignment(pair.getFirst(), pair.getSecond()); - if (result != null) { - result.forEach((server, regionInfos) -> - assignments.computeIfAbsent(server, s -> Lists.newArrayList()).addAll(regionInfos)); - } + Map> result = + this.internalBalancer.roundRobinAssignment(pair.getFirst(), pair.getSecond()); + result.forEach((server, regionInfos) -> assignments + .computeIfAbsent(server, s -> Lists.newArrayList()).addAll(regionInfos)); } return assignments; } @Override + @NonNull public Map> retainAssignment(Map regions, - List servers) throws HBaseIOException { + List servers) throws HBaseIOException { try { Map> assignments = new TreeMap<>(); List, List>> pairs = @@ -203,8 +202,8 @@ public Map> retainAssignment(Map currentAssignmentMap.put(r, regions.get(r))); Map> pairResult = this.internalBalancer.retainAssignment(currentAssignmentMap, pair.getSecond()); - pairResult.forEach((server, rs) -> - assignments.computeIfAbsent(server, s -> Lists.newArrayList()).addAll(rs)); + pairResult.forEach((server, rs) -> assignments + .computeIfAbsent(server, s -> Lists.newArrayList()).addAll(rs)); } return assignments; } catch (IOException e) { @@ -249,7 +248,7 @@ private List, List>> generateGroupAssignments( } if (!fallbackRegions.isEmpty()) { List candidates = null; - if (fallbackEnabled) { + if (isFallbackEnabled()) { candidates = getFallBackCandidates(servers); } candidates = (candidates == null || candidates.isEmpty()) ? @@ -384,6 +383,9 @@ public boolean isOnline() { return this.rsGroupInfoManager.isOnline(); } + public boolean isFallbackEnabled() { + return fallbackEnabled; + } @Override public void regionOnline(RegionInfo regionInfo, ServerName sn) { @@ -395,7 +397,12 @@ public void regionOffline(RegionInfo regionInfo) { @Override public void onConfigurationChange(Configuration conf) { - //DO nothing for now + boolean newFallbackEnabled = conf.getBoolean(FALLBACK_GROUP_ENABLE_KEY, false); + if (fallbackEnabled != newFallbackEnabled) { + LOG.info("Changing the value of {} from {} to {}", FALLBACK_GROUP_ENABLE_KEY, + fallbackEnabled, newFallbackEnabled); + fallbackEnabled = newFallbackEnabled; + } } @Override @@ -407,7 +414,6 @@ public boolean isStopped() { return false; } - @VisibleForTesting public void setRsGroupInfoManager(RSGroupInfoManager rsGroupInfoManager) { this.rsGroupInfoManager = rsGroupInfoManager; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java index e1d9d66e4ec3..3ef9365456fd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java @@ -24,7 +24,6 @@ import java.util.Collections; import java.util.HashMap; import java.util.HashSet; -import java.util.Iterator; import java.util.LinkedList; import java.util.List; import java.util.Map; @@ -33,6 +32,7 @@ import java.util.Set; import java.util.SortedSet; import java.util.TreeSet; +import java.util.concurrent.Future; import java.util.function.Function; import java.util.stream.Collectors; import org.apache.commons.lang3.StringUtils; @@ -78,6 +78,7 @@ import org.apache.hadoop.hbase.regionserver.DisabledRegionSplitPolicy; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FutureUtils; +import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; @@ -88,7 +89,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableMap; import org.apache.hbase.thirdparty.com.google.common.collect.Lists; import org.apache.hbase.thirdparty.com.google.common.collect.Maps; @@ -100,6 +100,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MultiRowMutationProtos.MutateRowsRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MultiRowMutationProtos.MutateRowsResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupProtos; + /** * This is an implementation of {@link RSGroupInfoManager} which makes use of an HBase table as the * persistence store for the group information. It also makes use of zookeeper to store group @@ -124,31 +125,24 @@ final class RSGroupInfoManagerImpl implements RSGroupInfoManager { private static final Logger LOG = LoggerFactory.getLogger(RSGroupInfoManagerImpl.class); // Assigned before user tables - @VisibleForTesting static final TableName RSGROUP_TABLE_NAME = TableName.valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "rsgroup"); - @VisibleForTesting static final String KEEP_ONE_SERVER_IN_DEFAULT_ERROR_MESSAGE = "should keep at least " + "one server in 'default' RSGroup."; /** Define the config key of retries threshold when movements failed */ - @VisibleForTesting static final String FAILED_MOVE_MAX_RETRY = "hbase.rsgroup.move.max.retry"; /** Define the default number of retries */ - @VisibleForTesting static final int DEFAULT_MAX_RETRY_VALUE = 50; private static final String RS_GROUP_ZNODE = "rsgroup"; - @VisibleForTesting static final byte[] META_FAMILY_BYTES = Bytes.toBytes("m"); - @VisibleForTesting static final byte[] META_QUALIFIER_BYTES = Bytes.toBytes("i"); - @VisibleForTesting static final String MIGRATE_THREAD_NAME = "Migrate-RSGroup-Tables"; private static final byte[] ROW_KEY = { 0 }; @@ -326,6 +320,7 @@ public synchronized Set

    moveServers(Set
    servers, String srcGro String dstGroup) throws IOException { RSGroupInfo src = getRSGroupInfo(srcGroup); RSGroupInfo dst = getRSGroupInfo(dstGroup); + Set
    movedServers = new HashSet<>(); // If destination is 'default' rsgroup, only add servers that are online. If not online, drop // it. If not 'default' group, add server to 'dst' rsgroup EVEN IF IT IS NOT online (could be a // rsgroup of dead servers that are to come back later). @@ -342,12 +337,13 @@ public synchronized Set
    moveServers(Set
    servers, String srcGro } } dst.addServer(el); + movedServers.add(el); } Map newGroupMap = Maps.newHashMap(holder.groupName2Group); newGroupMap.put(src.getName(), src); newGroupMap.put(dst.getName(), dst); flushConfig(newGroupMap); - return dst.getServers(); + return movedServers; } @Override @@ -471,7 +467,7 @@ private List retrieveGroupListFromZookeeper() throws IOException { } for (String znode : children) { byte[] data = ZKUtil.getData(watcher, ZNodePaths.joinZNode(groupBasePath, znode)); - if (data.length > 0) { + if (data != null && data.length > 0) { ProtobufUtil.expectPBMagicPrefix(data); ByteArrayInputStream bis = new ByteArrayInputStream(data, ProtobufUtil.lengthOfPBMagic(), data.length); @@ -956,84 +952,129 @@ private void addRegion(final LinkedList regions, RegionInfo hri) { /** * Move every region from servers which are currently located on these servers, but should not be * located there. - * @param servers the servers that will move to new group - * @param targetGroupName the target group name + * @param movedServers the servers that are moved to new group + * @param srcGrpServers all servers in the source group, excluding the movedServers + * @param targetGroupName the target group + * @param sourceGroupName the source group * @throws IOException if moving the server and tables fail */ - private void moveServerRegionsFromGroup(Set
    servers, String targetGroupName) - throws IOException { - moveRegionsBetweenGroups(servers, targetGroupName, rs -> getRegions(rs), info -> { - try { - String groupName = RSGroupUtil.getRSGroupInfo(masterServices, this, info.getTable()) + private void moveServerRegionsFromGroup(Set
    movedServers, Set
    srcGrpServers, + String targetGroupName, String sourceGroupName) throws IOException { + moveRegionsBetweenGroups(movedServers, srcGrpServers, targetGroupName, sourceGroupName, + rs -> getRegions(rs), info -> { + try { + String groupName = RSGroupUtil.getRSGroupInfo(masterServices, this, info.getTable()) .map(RSGroupInfo::getName).orElse(RSGroupInfo.DEFAULT_GROUP); - return groupName.equals(targetGroupName); - } catch (IOException e) { - LOG.warn("Failed to test group for region {} and target group {}", info, targetGroupName); - return false; - } - }, rs -> rs.getHostname()); + return groupName.equals(targetGroupName); + } catch (IOException e) { + LOG.warn("Failed to test group for region {} and target group {}", info, + targetGroupName); + return false; + } + }); } - private void moveRegionsBetweenGroups(Set regionsOwners, String targetGroupName, - Function> getRegionsInfo, Function validation, - Function getOwnerName) throws IOException { - boolean hasRegionsToMove; + private void moveRegionsBetweenGroups(Set regionsOwners, Set
    newRegionsOwners, + String targetGroupName, String sourceGroupName, Function> getRegionsInfo, + Function validation) throws IOException { + // Get server names corresponding to given Addresses + List movedServerNames = new ArrayList<>(regionsOwners.size()); + List srcGrpServerNames = new ArrayList<>(newRegionsOwners.size()); + for (ServerName serverName : masterServices.getServerManager().getOnlineServers().keySet()) { + // In case region move failed in previous attempt, regionsOwners and newRegionsOwners + // can have the same servers. So for all servers below both conditions to be checked + if (newRegionsOwners.contains(serverName.getAddress())) { + srcGrpServerNames.add(serverName); + } + if (regionsOwners.contains(serverName.getAddress())) { + movedServerNames.add(serverName); + } + } + List>> assignmentFutures = new ArrayList<>(); int retry = 0; - Set allOwners = new HashSet<>(regionsOwners); Set failedRegions = new HashSet<>(); IOException toThrow = null; do { - hasRegionsToMove = false; - for (Iterator iter = allOwners.iterator(); iter.hasNext(); ) { - T owner = iter.next(); + assignmentFutures.clear(); + failedRegions.clear(); + for (ServerName owner : movedServerNames) { // Get regions that are associated with this server and filter regions by group tables. - for (RegionInfo region : getRegionsInfo.apply(owner)) { + for (RegionInfo region : getRegionsInfo.apply((T) owner.getAddress())) { if (!validation.apply(region)) { LOG.info("Moving region {}, which do not belong to RSGroup {}", - region.getShortNameToLog(), targetGroupName); + region.getShortNameToLog(), targetGroupName); + // Move region back to source RSGroup servers + ServerName dest = + masterServices.getLoadBalancer().randomAssignment(region, srcGrpServerNames); + if (dest == null) { + failedRegions.add(region.getRegionNameAsString()); + continue; + } + RegionPlan rp = new RegionPlan(region, owner, dest); try { - this.masterServices.getAssignmentManager().move(region); - failedRegions.remove(region.getRegionNameAsString()); + Future future = masterServices.getAssignmentManager().moveAsync(rp); + assignmentFutures.add(Pair.newPair(region, future)); } catch (IOException ioe) { - LOG.debug("Move region {} from group failed, will retry, current retry time is {}", - region.getShortNameToLog(), retry, ioe); - toThrow = ioe; failedRegions.add(region.getRegionNameAsString()); + LOG.debug("Move region {} failed, will retry, current retry time is {}", + region.getShortNameToLog(), retry, ioe); + toThrow = ioe; } - if (masterServices.getAssignmentManager().getRegionStates(). - getRegionState(region).isFailedOpen()) { - continue; - } - hasRegionsToMove = true; } } - - if (!hasRegionsToMove) { - LOG.info("No more regions to move from {} to RSGroup", getOwnerName.apply(owner)); - iter.remove(); - } } - - retry++; - try { - wait(1000); - } catch (InterruptedException e) { - LOG.warn("Sleep interrupted", e); - Thread.currentThread().interrupt(); + waitForRegionMovement(assignmentFutures, failedRegions, sourceGroupName, retry); + if (failedRegions.isEmpty()) { + LOG.info("All regions from {} are moved back to {}", movedServerNames, sourceGroupName); + return; + } else { + try { + wait(1000); + } catch (InterruptedException e) { + LOG.warn("Sleep interrupted", e); + Thread.currentThread().interrupt(); + } + retry++; } - } while (hasRegionsToMove && retry <= - masterServices.getConfiguration().getInt(FAILED_MOVE_MAX_RETRY, DEFAULT_MAX_RETRY_VALUE)); + } while (!failedRegions.isEmpty() && retry <= masterServices.getConfiguration() + .getInt(FAILED_MOVE_MAX_RETRY, DEFAULT_MAX_RETRY_VALUE)); //has up to max retry time or there are no more regions to move - if (hasRegionsToMove) { + if (!failedRegions.isEmpty()) { // print failed moved regions, for later process conveniently String msg = String - .format("move regions for group %s failed, failed regions: %s", targetGroupName, - failedRegions); + .format("move regions for group %s failed, failed regions: %s", sourceGroupName, + failedRegions); LOG.error(msg); throw new DoNotRetryIOException( - msg + ", just record the last failed region's cause, more details in server log", - toThrow); + msg + ", just record the last failed region's cause, more details in server log", toThrow); + } + } + + /** + * Wait for all the region move to complete. Keep waiting for other region movement + * completion even if some region movement fails. + */ + private void waitForRegionMovement(List>> regionMoveFutures, + Set failedRegions, String sourceGroupName, int retryCount) { + LOG.info("Moving {} region(s) to group {}, current retry={}", regionMoveFutures.size(), + sourceGroupName, retryCount); + for (Pair> pair : regionMoveFutures) { + try { + pair.getSecond().get(); + if (masterServices.getAssignmentManager().getRegionStates(). + getRegionState(pair.getFirst()).isFailedOpen()) { + failedRegions.add(pair.getFirst().getRegionNameAsString()); + } + } catch (InterruptedException e) { + //Dont return form there lets wait for other regions to complete movement. + failedRegions.add(pair.getFirst().getRegionNameAsString()); + LOG.warn("Sleep interrupted", e); + } catch (Exception e) { + failedRegions.add(pair.getFirst().getRegionNameAsString()); + LOG.error("Move region {} to group {} failed, will retry on next attempt", + pair.getFirst().getShortNameToLog(), sourceGroupName, e); + } } } @@ -1072,7 +1113,6 @@ private Map rsGroupGetRegionsInTransition(String groupName) * parent region cases. This method is invoked by {@link #balanceRSGroup} * @return A clone of current assignments for this group. */ - @VisibleForTesting Map>> getRSGroupAssignmentsByTable( TableStateManager tableStateManager, String groupName) throws IOException { Map>> result = Maps.newHashMap(); @@ -1185,7 +1225,6 @@ public void moveServers(Set
    servers, String targetGroupName) throws IOE if (StringUtils.isEmpty(targetGroupName)) { throw new ConstraintException("RSGroup cannot be null."); } - getRSGroupInfo(targetGroupName); // Hold a lock on the manager instance while moving servers to prevent // another writer changing our state while we are working. @@ -1194,9 +1233,9 @@ public void moveServers(Set
    servers, String targetGroupName) throws IOE Address firstServer = servers.iterator().next(); RSGroupInfo srcGrp = getRSGroupOfServer(firstServer); if (srcGrp == null) { - // Be careful. This exception message is tested for in TestRSGroupsBase... - throw new ConstraintException("Source RSGroup for server " + firstServer - + " does not exist."); + // Be careful. This exception message is tested for in TestRSGroupAdmin2... + throw new ConstraintException("Server " + firstServer + + " is either offline or it does not exist."); } // Only move online servers (when moving from 'default') or servers from other @@ -1230,7 +1269,7 @@ public void moveServers(Set
    servers, String targetGroupName) throws IOE // MovedServers may be < passed in 'servers'. Set
    movedServers = moveServers(servers, srcGrp.getName(), targetGroupName); - moveServerRegionsFromGroup(movedServers, targetGroupName); + moveServerRegionsFromGroup(movedServers, srcGrp.getServers(), targetGroupName, srcGrp.getName()); LOG.info("Move servers done: {} => {}", srcGrp.getName(), targetGroupName); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupMajorCompactionTTL.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupMajorCompactionTTL.java index 172561dea197..aec38ee49052 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupMajorCompactionTTL.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupMajorCompactionTTL.java @@ -32,7 +32,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine; import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLineParser; import org.apache.hbase.thirdparty.org.apache.commons.cli.DefaultParser; @@ -49,7 +48,7 @@ public class RSGroupMajorCompactionTTL extends MajorCompactorTTL { private static final Logger LOG = LoggerFactory.getLogger(RSGroupMajorCompactionTTL.class); - @VisibleForTesting + @InterfaceAudience.Private RSGroupMajorCompactionTTL() { super(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java index cb664bb2d2f8..75bc73ccdcd8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java @@ -429,7 +429,6 @@ private enum OpType { DELETE("delete"), CHECK_AND_PUT("checkAndPut"), CHECK_AND_DELETE("checkAndDelete"), - INCREMENT_COLUMN_VALUE("incrementColumnValue"), APPEND("append"), INCREMENT("increment"); @@ -805,10 +804,7 @@ public void postCompletedCreateTableAction( + PermissionStorage.ACL_TABLE_NAME + " is not yet created. " + getClass().getSimpleName() + " should be configured as the first Coprocessor"); } else { - String owner = desc.getOwnerString(); - // default the table owner to current user, if not specified. - if (owner == null) - owner = getActiveUser(c).getShortName(); + String owner = getActiveUser(c).getShortName(); final UserPermission userPermission = new UserPermission(owner, Permission.newBuilder(desc.getTableName()).withActions(Action.values()).build()); // switch to the real hbase master user for doing the RPC on the ACL table @@ -907,8 +903,7 @@ public void postModifyTable(ObserverContext c, Tab TableDescriptor oldDesc, TableDescriptor currentDesc) throws IOException { final Configuration conf = c.getEnvironment().getConfiguration(); // default the table owner to current user, if not specified. - final String owner = (currentDesc.getOwnerString() != null) ? currentDesc.getOwnerString() : - getActiveUser(c).getShortName(); + final String owner = getActiveUser(c).getShortName(); User.runAsLoginUser(new PrivilegedExceptionAction() { @Override public Void run() throws Exception { @@ -1503,18 +1498,27 @@ public void preBatchMutate(ObserverContext c, // We have a failure with table, cf and q perm checks and now giving a chance for cell // perm check OpType opType; + long timestamp; if (m instanceof Put) { checkForReservedTagPresence(user, m); opType = OpType.PUT; + timestamp = m.getTimestamp(); } else if (m instanceof Delete) { opType = OpType.DELETE; + timestamp = m.getTimestamp(); + } else if (m instanceof Increment) { + opType = OpType.INCREMENT; + timestamp = ((Increment) m).getTimeRange().getMax(); + } else if (m instanceof Append) { + opType = OpType.APPEND; + timestamp = ((Append) m).getTimeRange().getMax(); } else { - // If the operation type is not Put or Delete, do nothing + // If the operation type is not Put/Delete/Increment/Append, do nothing continue; } AuthResult authResult = null; if (checkCoveringPermission(user, opType, c.getEnvironment(), m.getRow(), - m.getFamilyCellMap(), m.getTimestamp(), Action.WRITE)) { + m.getFamilyCellMap(), timestamp, Action.WRITE)) { authResult = AuthResult.allow(opType.toString(), "Covering cell set", user, Action.WRITE, table, m.getFamilyCellMap()); } else { @@ -1695,32 +1699,6 @@ public Result preAppend(ObserverContext c, Append return null; } - @Override - public Result preAppendAfterRowLock(final ObserverContext c, - final Append append) throws IOException { - if (append.getAttribute(CHECK_COVERING_PERM) != null) { - // We had failure with table, cf and q perm checks and now giving a chance for cell - // perm check - TableName table = c.getEnvironment().getRegion().getRegionInfo().getTable(); - AuthResult authResult = null; - User user = getActiveUser(c); - if (checkCoveringPermission(user, OpType.APPEND, c.getEnvironment(), append.getRow(), - append.getFamilyCellMap(), append.getTimeRange().getMax(), Action.WRITE)) { - authResult = AuthResult.allow(OpType.APPEND.toString(), - "Covering cell set", user, Action.WRITE, table, append.getFamilyCellMap()); - } else { - authResult = AuthResult.deny(OpType.APPEND.toString(), - "Covering cell set", user, Action.WRITE, table, append.getFamilyCellMap()); - } - AccessChecker.logResult(authResult); - if (authorizationEnabled && !authResult.isAllowed()) { - throw new AccessDeniedException("Insufficient permissions " + - authResult.toContextString()); - } - } - return null; - } - @Override public Result preIncrement(final ObserverContext c, final Increment increment) @@ -1756,32 +1734,6 @@ public Result preIncrement(final ObserverContext c return null; } - @Override - public Result preIncrementAfterRowLock(final ObserverContext c, - final Increment increment) throws IOException { - if (increment.getAttribute(CHECK_COVERING_PERM) != null) { - // We had failure with table, cf and q perm checks and now giving a chance for cell - // perm check - TableName table = c.getEnvironment().getRegion().getRegionInfo().getTable(); - AuthResult authResult = null; - User user = getActiveUser(c); - if (checkCoveringPermission(user, OpType.INCREMENT, c.getEnvironment(), increment.getRow(), - increment.getFamilyCellMap(), increment.getTimeRange().getMax(), Action.WRITE)) { - authResult = AuthResult.allow(OpType.INCREMENT.toString(), "Covering cell set", - user, Action.WRITE, table, increment.getFamilyCellMap()); - } else { - authResult = AuthResult.deny(OpType.INCREMENT.toString(), "Covering cell set", - user, Action.WRITE, table, increment.getFamilyCellMap()); - } - AccessChecker.logResult(authResult); - if (authorizationEnabled && !authResult.isAllowed()) { - throw new AccessDeniedException("Insufficient permissions " + - authResult.toContextString()); - } - } - return null; - } - @Override public List> postIncrementBeforeWAL( ObserverContext ctx, Mutation mutation, @@ -1896,13 +1848,6 @@ public void postScannerClose(final ObserverContext scannerOwners.remove(s); } - @Override - public boolean postScannerFilterRow(final ObserverContext e, - final InternalScanner s, final Cell curRowCell, final boolean hasMore) throws IOException { - // 'default' in RegionObserver might do unnecessary copy for Off heap backed Cells. - return hasMore; - } - /** * Verify, when servicing an RPC, that the caller is the scanner owner. * If so, we assume that access control is correctly enforced based on diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/PermissionStorage.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/PermissionStorage.java index 1006e54cd9d6..268bc36fc45b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/PermissionStorage.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/PermissionStorage.java @@ -70,7 +70,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.com.google.common.collect.ArrayListMultimap; import org.apache.hbase.thirdparty.com.google.common.collect.ListMultimap; import org.apache.hbase.thirdparty.com.google.common.collect.Lists; @@ -501,7 +500,6 @@ public static ListMultimap getTablePermissions(Configura null, false); } - @VisibleForTesting public static ListMultimap getNamespacePermissions(Configuration conf, String namespace) throws IOException { return getPermissions(conf, Bytes.toBytes(toNamespaceEntry(namespace)), null, null, null, null, @@ -780,7 +778,6 @@ private static List readWritableUserPermission(DataInput in, return list; } - @VisibleForTesting public static ListMultimap readUserPermission(byte[] data, Configuration conf) throws DeserializationException { if (ProtobufUtil.isPBMagicPrefix(data)) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclCleaner.java index 8ce4266748c4..72da07cee5ea 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclCleaner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclCleaner.java @@ -25,7 +25,6 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.cleaner.BaseHFileCleanerDelegate; @@ -34,8 +33,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; - /** * Implementation of a file cleaner that checks if a empty directory with no subdirs and subfiles is * deletable when user scan snapshot feature is enabled @@ -98,7 +95,7 @@ && tableExists(TableName.valueOf(dir.getParent().getName(), dir.getName()))) { } } - @VisibleForTesting + @InterfaceAudience.Private static boolean isArchiveDataDir(Path path) { if (path != null && path.getName().equals(HConstants.BASE_NAMESPACE_DIR)) { Path parent = path.getParent(); @@ -107,12 +104,12 @@ static boolean isArchiveDataDir(Path path) { return false; } - @VisibleForTesting + @InterfaceAudience.Private static boolean isArchiveNamespaceDir(Path path) { return path != null && isArchiveDataDir(path.getParent()); } - @VisibleForTesting + @InterfaceAudience.Private static boolean isArchiveTableDir(Path path) { return path != null && isArchiveNamespaceDir(path.getParent()); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclController.java index 5c4ba0d68505..acb6940697a8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclController.java @@ -26,7 +26,6 @@ import java.util.Optional; import java.util.Set; import java.util.stream.Collectors; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; @@ -65,7 +64,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.com.google.common.collect.Sets; /** @@ -179,8 +177,7 @@ public void postCompletedCreateTableAction(ObserverContext c, } } } - if (!sanityFailure) { + if (!sanityFailure && (m instanceof Put || m instanceof Delete)) { if (cellVisibility != null) { String labelsExp = cellVisibility.getExpression(); List visibilityTags = labelCache.get(labelsExp); @@ -360,7 +357,7 @@ public void preBatchMutate(ObserverContext c, if (m instanceof Put) { Put p = (Put) m; p.add(cell); - } else if (m instanceof Delete) { + } else { Delete d = (Delete) m; d.add(cell); } @@ -470,35 +467,6 @@ private Pair checkForReservedVisibilityTagPresence(Cell cell, return pair; } - /** - * Checks whether cell contains any tag with type as VISIBILITY_TAG_TYPE. This - * tag type is reserved and should not be explicitly set by user. There are - * two versions of this method one that accepts pair and other without pair. - * In case of preAppend and preIncrement the additional operations are not - * needed like checking for STRING_VIS_TAG_TYPE and hence the API without pair - * could be used. - * - * @param cell - * @throws IOException - */ - private boolean checkForReservedVisibilityTagPresence(Cell cell) throws IOException { - // Bypass this check when the operation is done by a system/super user. - // This is done because, while Replication, the Cells coming to the peer - // cluster with reserved - // typed tags and this is fine and should get added to the peer cluster - // table - if (isSystemOrSuperUser()) { - return true; - } - Iterator tagsItr = PrivateCellUtil.tagsIterator(cell); - while (tagsItr.hasNext()) { - if (RESERVED_VIS_TAG_TYPES.contains(tagsItr.next().getType())) { - return false; - } - } - return true; - } - private void removeReplicationVisibilityTag(List tags) throws IOException { Iterator iterator = tags.iterator(); while (iterator.hasNext()) { @@ -657,36 +625,6 @@ private boolean isSystemOrSuperUser() throws IOException { return Superusers.isSuperUser(VisibilityUtils.getActiveUser()); } - @Override - public Result preAppend(ObserverContext e, Append append) - throws IOException { - // If authorization is not enabled, we don't care about reserved tags - if (!authorizationEnabled) { - return null; - } - for (CellScanner cellScanner = append.cellScanner(); cellScanner.advance();) { - if (!checkForReservedVisibilityTagPresence(cellScanner.current())) { - throw new FailedSanityCheckException("Append contains cell with reserved type tag"); - } - } - return null; - } - - @Override - public Result preIncrement(ObserverContext e, Increment increment) - throws IOException { - // If authorization is not enabled, we don't care about reserved tags - if (!authorizationEnabled) { - return null; - } - for (CellScanner cellScanner = increment.cellScanner(); cellScanner.advance();) { - if (!checkForReservedVisibilityTagPresence(cellScanner.current())) { - throw new FailedSanityCheckException("Increment contains cell with reserved type tag"); - } - } - return null; - } - @Override public List> postIncrementBeforeWAL( ObserverContext ctx, Mutation mutation, @@ -740,13 +678,6 @@ private Cell createNewCellWithTags(Mutation mutation, Cell newCell) throws IOExc return PrivateCellUtil.createCell(newCell, tags); } - @Override - public boolean postScannerFilterRow(final ObserverContext e, - final InternalScanner s, final Cell curRowCell, final boolean hasMore) throws IOException { - // 'default' in RegionObserver might do unnecessary copy for Off heap backed Cells. - return hasMore; - } - /****************************** VisibilityEndpoint service related methods ******************************/ @Override public synchronized void addLabels(RpcController controller, VisibilityLabelsRequest request, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/CreateSnapshot.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/CreateSnapshot.java index d0fc80337f85..f8e54c9c459c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/CreateSnapshot.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/CreateSnapshot.java @@ -67,24 +67,13 @@ protected void processOptions(CommandLine cmd) { @Override protected int doWork() throws Exception { - Connection connection = null; - Admin admin = null; - try { - connection = ConnectionFactory.createConnection(getConf()); - admin = connection.getAdmin(); - admin.snapshot(new SnapshotDescription(snapshotName, tableName, snapshotType)); + try (Connection connection = ConnectionFactory.createConnection(getConf()); + Admin admin = connection.getAdmin()) { + admin.snapshot(new SnapshotDescription(snapshotName, tableName, snapshotType)); } catch (Exception e) { - System.err.println("failed to take the snapshot: " + e.getMessage()); - return -1; - } finally { - if (admin != null) { - admin.close(); - } - if (connection != null) { - connection.close(); - } + System.err.println("failed to take the snapshot: " + e.getMessage()); + return -1; } - return 0; + return 0; } - } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java index b54eab1372a3..c059792ca68e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java @@ -343,13 +343,8 @@ public static void writeSnapshotInfo(SnapshotDescription snapshot, Path workingD FsPermission perms = CommonFSUtils.getFilePermissions(fs, fs.getConf(), HConstants.DATA_FILE_UMASK_KEY); Path snapshotInfo = new Path(workingDir, SnapshotDescriptionUtils.SNAPSHOTINFO_FILE); - try { - FSDataOutputStream out = CommonFSUtils.create(fs, snapshotInfo, perms, true); - try { - snapshot.writeTo(out); - } finally { - out.close(); - } + try (FSDataOutputStream out = CommonFSUtils.create(fs, snapshotInfo, perms, true)){ + snapshot.writeTo(out); } catch (IOException e) { // if we get an exception, try to remove the snapshot info if (!fs.delete(snapshotInfo, false)) { @@ -370,15 +365,8 @@ public static void writeSnapshotInfo(SnapshotDescription snapshot, Path workingD public static SnapshotDescription readSnapshotInfo(FileSystem fs, Path snapshotDir) throws CorruptedSnapshotException { Path snapshotInfo = new Path(snapshotDir, SNAPSHOTINFO_FILE); - try { - FSDataInputStream in = null; - try { - in = fs.open(snapshotInfo); - SnapshotDescription desc = SnapshotDescription.parseFrom(in); - return desc; - } finally { - if (in != null) in.close(); - } + try (FSDataInputStream in = fs.open(snapshotInfo)){ + return SnapshotDescription.parseFrom(in); } catch (IOException e) { throw new CorruptedSnapshotException("Couldn't read snapshot info from:" + snapshotInfo, e); } @@ -434,10 +422,8 @@ public static boolean isSnapshotOwner(org.apache.hadoop.hbase.client.SnapshotDes } public static boolean isSecurityAvailable(Configuration conf) throws IOException { - try (Connection conn = ConnectionFactory.createConnection(conf)) { - try (Admin admin = conn.getAdmin()) { - return admin.tableExists(PermissionStorage.ACL_TABLE_NAME); - } + try (Connection conn = ConnectionFactory.createConnection(conf); Admin admin = conn.getAdmin()) { + return admin.tableExists(PermissionStorage.ACL_TABLE_NAME); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java index 61bf192eb894..5e82cad6b494 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java @@ -51,12 +51,11 @@ import org.apache.hadoop.hbase.util.CommonFSUtils; import org.apache.hadoop.hbase.util.FSTableDescriptors; import org.apache.hadoop.hbase.util.Threads; -import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hbase.thirdparty.com.google.protobuf.CodedInputStream; import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; @@ -197,7 +196,6 @@ public void addMobRegion(RegionInfo regionInfo) throws IOException { addMobRegion(regionInfo, visitor); } - @VisibleForTesting protected void addMobRegion(RegionInfo regionInfo, RegionVisitor visitor) throws IOException { // 1. dump region meta info into the snapshot directory final String snapshotName = desc.getName(); @@ -245,7 +243,6 @@ public void addRegion(final HRegion region) throws IOException { addRegion(region, visitor); } - @VisibleForTesting protected void addRegion(final HRegion region, RegionVisitor visitor) throws IOException { // 1. dump region meta info into the snapshot directory final String snapshotName = desc.getName(); @@ -294,7 +291,6 @@ public void addRegion(final Path tableDir, final RegionInfo regionInfo) throws I addRegion(tableDir, regionInfo, visitor); } - @VisibleForTesting protected void addRegion(final Path tableDir, final RegionInfo regionInfo, RegionVisitor visitor) throws IOException { boolean isMobRegion = MobUtils.isMobRegionInfo(regionInfo); @@ -582,11 +578,8 @@ private void convertToV2SingleManifest() throws IOException { */ private void writeDataManifest(final SnapshotDataManifest manifest) throws IOException { - FSDataOutputStream stream = workingDirFs.create(new Path(workingDir, DATA_MANIFEST_NAME)); - try { + try (FSDataOutputStream stream = workingDirFs.create(new Path(workingDir, DATA_MANIFEST_NAME))) { manifest.writeTo(stream); - } finally { - stream.close(); } } @@ -594,9 +587,7 @@ private void writeDataManifest(final SnapshotDataManifest manifest) * Read the SnapshotDataManifest file */ private SnapshotDataManifest readDataManifest() throws IOException { - FSDataInputStream in = null; - try { - in = workingDirFs.open(new Path(workingDir, DATA_MANIFEST_NAME)); + try (FSDataInputStream in = workingDirFs.open(new Path(workingDir, DATA_MANIFEST_NAME))) { CodedInputStream cin = CodedInputStream.newInstance(in); cin.setSizeLimit(manifestSizeLimit); return SnapshotDataManifest.parseFrom(cin); @@ -604,8 +595,6 @@ private SnapshotDataManifest readDataManifest() throws IOException { return null; } catch (InvalidProtocolBufferException e) { throw new CorruptedSnapshotException("unable to parse data manifest " + e.getMessage(), e); - } finally { - if (in != null) in.close(); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV2.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV2.java index 4f3df2fddc90..ae914f69b5cc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV2.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV2.java @@ -93,12 +93,9 @@ public void regionClose(final SnapshotRegionManifest.Builder region) throws IOEx FileSystem workingDirFs = snapshotDir.getFileSystem(this.conf); if (workingDirFs.exists(snapshotDir)) { SnapshotRegionManifest manifest = region.build(); - FSDataOutputStream stream = workingDirFs.create( - getRegionManifestPath(snapshotDir, manifest)); - try { + try (FSDataOutputStream stream = workingDirFs.create( + getRegionManifestPath(snapshotDir, manifest))) { manifest.writeTo(stream); - } finally { - stream.close(); } } else { LOG.warn("can't write manifest without parent dir, maybe it has been deleted by master?"); @@ -157,14 +154,10 @@ public boolean accept(Path path) { completionService.submit(new Callable() { @Override public SnapshotRegionManifest call() throws IOException { - FSDataInputStream stream = fs.open(st.getPath()); - CodedInputStream cin = CodedInputStream.newInstance(stream); - cin.setSizeLimit(manifestSizeLimit); - - try { + try (FSDataInputStream stream = fs.open(st.getPath())) { + CodedInputStream cin = CodedInputStream.newInstance(stream); + cin.setSizeLimit(manifestSizeLimit); return SnapshotRegionManifest.parseFrom(cin); - } finally { - stream.close(); } } }); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFilesTool.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFilesTool.java index 6c62a3d6ea1d..b0b086e145a9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFilesTool.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFilesTool.java @@ -83,9 +83,9 @@ import org.apache.hadoop.hbase.io.hfile.ReaderContext; import org.apache.hadoop.hbase.io.hfile.ReaderContextBuilder; import org.apache.hadoop.hbase.regionserver.BloomType; -import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.regionserver.StoreFileInfo; import org.apache.hadoop.hbase.regionserver.StoreFileWriter; +import org.apache.hadoop.hbase.regionserver.StoreUtils; import org.apache.hadoop.hbase.security.UserProvider; import org.apache.hadoop.hbase.security.token.FsDelegationToken; import org.apache.hadoop.hbase.util.Bytes; @@ -94,16 +94,15 @@ import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.com.google.common.collect.HashMultimap; import org.apache.hbase.thirdparty.com.google.common.collect.Lists; import org.apache.hbase.thirdparty.com.google.common.collect.Maps; import org.apache.hbase.thirdparty.com.google.common.collect.Multimap; import org.apache.hbase.thirdparty.com.google.common.collect.Multimaps; import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * The implementation for {@link BulkLoadHFiles}, and also can be executed from command line as a @@ -386,7 +385,7 @@ public void loadHFileQueue(AsyncClusterConnection conn, TableName tableName, * @param lqis hfiles should be loaded * @return empty list if success, list of items to retry on recoverable failure */ - @VisibleForTesting + @InterfaceAudience.Private protected CompletableFuture> tryAtomicRegionLoad( final AsyncClusterConnection conn, final TableName tableName, boolean copyFiles, final byte[] first, Collection lqis) { @@ -434,7 +433,7 @@ protected CompletableFuture> tryAtomicRegionLoad( *

    * protected for testing. */ - @VisibleForTesting + @InterfaceAudience.Private protected void bulkLoadPhase(AsyncClusterConnection conn, TableName tableName, Deque queue, Multimap regionGroups, boolean copyFiles, Map item2RegionMap) throws IOException { @@ -615,6 +614,45 @@ private List splitStoreFile(LoadQueueItem item, TableDescriptor t return lqis; } + /** + * @param startEndKeys the start/end keys of regions belong to this table, the list in ascending + * order by start key + * @param key the key need to find which region belong to + * @return region index + */ + private int getRegionIndex(List> startEndKeys, byte[] key) { + int idx = Collections.binarySearch(startEndKeys, Pair.newPair(key, HConstants.EMPTY_END_ROW), + (p1, p2) -> Bytes.compareTo(p1.getFirst(), p2.getFirst())); + if (idx < 0) { + // not on boundary, returns -(insertion index). Calculate region it + // would be in. + idx = -(idx + 1) - 1; + } + return idx; + } + + /** + * we can consider there is a region hole or overlap in following conditions. 1) if idx < 0,then + * first region info is lost. 2) if the endkey of a region is not equal to the startkey of the + * next region. 3) if the endkey of the last region is not empty. + */ + private void checkRegionIndexValid(int idx, List> startEndKeys, + TableName tableName) throws IOException { + if (idx < 0) { + throw new IOException("The first region info for table " + tableName + + " can't be found in hbase:meta.Please use hbck tool to fix it first."); + } else if ((idx == startEndKeys.size() - 1) + && !Bytes.equals(startEndKeys.get(idx).getSecond(), HConstants.EMPTY_BYTE_ARRAY)) { + throw new IOException("The last region info for table " + tableName + + " can't be found in hbase:meta.Please use hbck tool to fix it first."); + } else if (idx + 1 < startEndKeys.size() && !(Bytes.compareTo(startEndKeys.get(idx).getSecond(), + startEndKeys.get(idx + 1).getFirst()) == 0)) { + throw new IOException("The endkey of one region for table " + tableName + + " is not equal to the startkey of the next region in hbase:meta." + + "Please use hbck tool to fix it first."); + } + } + /** * Attempt to assign the given load queue item into its target region group. If the hfile boundary * no longer fits into a region, physically splits the hfile such that the new bottom half will @@ -623,7 +661,7 @@ private List splitStoreFile(LoadQueueItem item, TableDescriptor t * protected for testing * @throws IOException if an IO failure is encountered */ - @VisibleForTesting + @InterfaceAudience.Private protected Pair, String> groupOrSplit(AsyncClusterConnection conn, TableName tableName, Multimap regionGroups, LoadQueueItem item, List> startEndKeys) throws IOException { @@ -647,51 +685,30 @@ CacheConfig.DISABLED, true, getConf())) { return null; } if (Bytes.compareTo(first.get(), last.get()) > 0) { - throw new IllegalArgumentException("Invalid range: " + Bytes.toStringBinary(first.get()) + - " > " + Bytes.toStringBinary(last.get())); - } - int idx = - Collections.binarySearch(startEndKeys, Pair.newPair(first.get(), HConstants.EMPTY_END_ROW), - (p1, p2) -> Bytes.compareTo(p1.getFirst(), p2.getFirst())); - if (idx < 0) { - // not on boundary, returns -(insertion index). Calculate region it - // would be in. - idx = -(idx + 1) - 1; - } - int indexForCallable = idx; - - /* - * we can consider there is a region hole in following conditions. 1) if idx < 0,then first - * region info is lost. 2) if the endkey of a region is not equal to the startkey of the next - * region. 3) if the endkey of the last region is not empty. - */ - if (indexForCallable < 0) { - throw new IOException("The first region info for table " + tableName + - " can't be found in hbase:meta.Please use hbck tool to fix it first."); - } else if ((indexForCallable == startEndKeys.size() - 1) && - !Bytes.equals(startEndKeys.get(indexForCallable).getSecond(), HConstants.EMPTY_BYTE_ARRAY)) { - throw new IOException("The last region info for table " + tableName + - " can't be found in hbase:meta.Please use hbck tool to fix it first."); - } else if (indexForCallable + 1 < startEndKeys.size() && - !(Bytes.compareTo(startEndKeys.get(indexForCallable).getSecond(), - startEndKeys.get(indexForCallable + 1).getFirst()) == 0)) { - throw new IOException("The endkey of one region for table " + tableName + - " is not equal to the startkey of the next region in hbase:meta." + - "Please use hbck tool to fix it first."); + throw new IllegalArgumentException("Invalid range: " + Bytes.toStringBinary(first.get()) + + " > " + Bytes.toStringBinary(last.get())); } - - boolean lastKeyInRange = Bytes.compareTo(last.get(), startEndKeys.get(idx).getSecond()) < 0 || - Bytes.equals(startEndKeys.get(idx).getSecond(), HConstants.EMPTY_BYTE_ARRAY); + int firstKeyRegionIdx = getRegionIndex(startEndKeys, first.get()); + checkRegionIndexValid(firstKeyRegionIdx, startEndKeys, tableName); + boolean lastKeyInRange = + Bytes.compareTo(last.get(), startEndKeys.get(firstKeyRegionIdx).getSecond()) < 0 || Bytes + .equals(startEndKeys.get(firstKeyRegionIdx).getSecond(), HConstants.EMPTY_BYTE_ARRAY); if (!lastKeyInRange) { - Pair startEndKey = startEndKeys.get(indexForCallable); - List lqis = - splitStoreFile(item, FutureUtils.get(conn.getAdmin().getDescriptor(tableName)), - startEndKey.getSecond()); + int lastKeyRegionIdx = getRegionIndex(startEndKeys, last.get()); + int splitIdx = (firstKeyRegionIdx + lastKeyRegionIdx) / 2; + // make sure the splitPoint is valid in case region overlap occur, maybe the splitPoint bigger + // than hfile.endkey w/o this check + if (splitIdx != firstKeyRegionIdx) { + checkRegionIndexValid(splitIdx, startEndKeys, tableName); + } + byte[] splitPoint = startEndKeys.get(splitIdx).getSecond(); + List lqis = splitStoreFile(item, + FutureUtils.get(conn.getAdmin().getDescriptor(tableName)), splitPoint); return new Pair<>(lqis, null); } // group regions. - regionGroups.put(ByteBuffer.wrap(startEndKeys.get(idx).getFirst()), item); + regionGroups.put(ByteBuffer.wrap(startEndKeys.get(firstKeyRegionIdx).getFirst()), item); return null; } @@ -699,7 +716,7 @@ CacheConfig.DISABLED, true, getConf())) { * Split a storefile into a top and bottom half, maintaining the metadata, recreating bloom * filters, etc. */ - @VisibleForTesting + @InterfaceAudience.Private static void splitStoreFile(Configuration conf, Path inFile, ColumnFamilyDescriptor familyDesc, byte[] splitKey, Path bottomOut, Path topOut) throws IOException { // Open reader with no block cache, and not in-memory @@ -732,8 +749,8 @@ private static void copyHFileHalf(Configuration conf, Path inFile, Path outFile, Algorithm compression = familyDescriptor.getCompressionType(); BloomType bloomFilterType = familyDescriptor.getBloomFilterType(); HFileContext hFileContext = new HFileContextBuilder().withCompression(compression) - .withChecksumType(HStore.getChecksumType(conf)) - .withBytesPerCheckSum(HStore.getBytesPerChecksum(conf)).withBlockSize(blocksize) + .withChecksumType(StoreUtils.getChecksumType(conf)) + .withBytesPerCheckSum(StoreUtils.getBytesPerChecksum(conf)).withBlockSize(blocksize) .withDataBlockEncoding(familyDescriptor.getDataBlockEncoding()).withIncludesTags(true) .build(); halfWriter = new StoreFileWriter.Builder(conf, cacheConf, fs).withFilePath(outFile) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java index b49d2cebede1..89ad398c0fba 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java @@ -23,7 +23,6 @@ import java.util.Map; import java.util.concurrent.ExecutorService; import org.apache.hadoop.conf.Configuration; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Public @@ -33,7 +32,7 @@ static Canary create(Configuration conf, ExecutorService executor) { return new CanaryTool(conf, executor); } - @VisibleForTesting + @InterfaceAudience.Private static Canary create(Configuration conf, ExecutorService executor, CanaryTool.Sink sink) { return new CanaryTool(conf, executor, sink); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/CanaryTool.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/CanaryTool.java index d42e62991596..f2c2d467ec02 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/CanaryTool.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/CanaryTool.java @@ -99,7 +99,7 @@ import org.apache.zookeeper.data.Stat; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; + import org.apache.hbase.thirdparty.com.google.common.collect.Lists; /** @@ -140,7 +140,7 @@ private void putUpWebUI() throws IOException { try { InfoServer infoServer = new InfoServer("canary", addr, port, false, conf); infoServer.addUnprivilegedServlet("canary", "/canary-status", CanaryStatusServlet.class); - infoServer.setAttribute("sink", this.sink); + infoServer.setAttribute("sink", getSink(conf, RegionStdOutSink.class)); infoServer.start(); LOG.info("Bind Canary http info server to {}:{} ", addr, port); } catch (BindException e) { @@ -791,7 +791,7 @@ public CanaryTool(ExecutorService executor) { this(executor, null); } - @VisibleForTesting + @InterfaceAudience.Private CanaryTool(ExecutorService executor, Sink sink) { this.executor = executor; this.sink = sink; @@ -979,8 +979,10 @@ public int run(String[] args) throws Exception { monitorTargets = new String[length]; System.arraycopy(args, index, monitorTargets, 0, length); } - - putUpWebUI(); + if (interval > 0) { + //Only show the web page in daemon mode + putUpWebUI(); + } if (zookeeperMode) { return checkZooKeeper(); } else if (regionServerMode) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/coprocessor/CoprocessorValidator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/coprocessor/CoprocessorValidator.java index 0e76f0ed8c23..766224e5d381 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/coprocessor/CoprocessorValidator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/coprocessor/CoprocessorValidator.java @@ -37,7 +37,6 @@ import java.util.regex.Pattern; import java.util.stream.Collectors; import java.util.stream.Stream; - import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.client.Admin; @@ -53,7 +52,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine; @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) @@ -166,7 +164,7 @@ public void validateClasses(ClassLoader classLoader, String[] classNames, validateClasses(classLoader, Arrays.asList(classNames), violations); } - @VisibleForTesting + @InterfaceAudience.Private protected void validateTables(ClassLoader classLoader, Admin admin, Pattern pattern, List violations) throws IOException { List tableDescriptors = admin.listTableDescriptors(pattern); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterUtil.java index b932c4d92371..c7afb0e5f915 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterUtil.java @@ -29,8 +29,6 @@ import org.apache.hadoop.hbase.regionserver.BloomType; import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; - /** * Utility methods related to BloomFilters */ @@ -91,7 +89,6 @@ public static long computeBitSize(long maxKeys, double errorRate) { * This gets used in {@link #contains(ByteBuff, int, int, Hash, int, HashKey)} * @param random The random number source to use, or null to compute actual hashes */ - @VisibleForTesting public static void setRandomGeneratorForTest(Random random) { randomGeneratorForTest = random; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/EncryptionTest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/EncryptionTest.java index 2687d3b033a9..0ccf0f4d8b44 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/EncryptionTest.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/EncryptionTest.java @@ -66,7 +66,7 @@ public static void testKeyProvider(final Configuration conf) throws IOException throw new IOException("Key provider " + providerClassName + " failed test: " + e.getMessage(), e); } - } else if (result.booleanValue() == false) { + } else if (!result) { throw new IOException("Key provider " + providerClassName + " previously failed test"); } } @@ -91,7 +91,7 @@ public static void testCipherProvider(final Configuration conf) throws IOExcepti throw new IOException("Cipher provider " + providerClassName + " failed test: " + e.getMessage(), e); } - } else if (result.booleanValue() == false) { + } else if (!result) { throw new IOException("Cipher provider " + providerClassName + " previously failed test"); } } @@ -99,18 +99,23 @@ public static void testCipherProvider(final Configuration conf) throws IOExcepti /** * Check that the specified cipher can be loaded and initialized, or throw * an exception. Verifies key and cipher provider configuration as a - * prerequisite for cipher verification. + * prerequisite for cipher verification. Also verifies if encryption is enabled globally. * - * @param conf - * @param cipher - * @param key - * @throws IOException + * @param conf HBase configuration + * @param cipher chiper algorith to use for the column family + * @param key encryption key + * @throws IOException in case of encryption configuration error */ public static void testEncryption(final Configuration conf, final String cipher, byte[] key) throws IOException { if (cipher == null) { return; } + if(!Encryption.isEncryptionEnabled(conf)) { + String message = String.format("Cipher %s failed test: encryption is disabled on the cluster", + cipher); + throw new IOException(message); + } testKeyProvider(conf); testCipherProvider(conf); Boolean result = cipherResults.get(cipher); @@ -149,7 +154,7 @@ public static void testEncryption(final Configuration conf, final String cipher, cipherResults.put(cipher, false); throw new IOException("Cipher " + cipher + " failed test: " + e.getMessage(), e); } - } else if (result.booleanValue() == false) { + } else if (!result) { throw new IOException("Cipher " + cipher + " previously failed test"); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java index afff1c139311..4fb231234e64 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java @@ -24,7 +24,6 @@ import java.util.Map; import java.util.TreeMap; import java.util.concurrent.ConcurrentHashMap; -import java.util.function.Function; import java.util.regex.Matcher; import java.util.regex.Pattern; import org.apache.commons.lang3.NotImplementedException; @@ -51,7 +50,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.com.google.common.primitives.Ints; /** @@ -81,9 +79,7 @@ public class FSTableDescriptors implements TableDescriptors { private final boolean usecache; private volatile boolean fsvisited; - @VisibleForTesting long cachehits = 0; - @VisibleForTesting long invocations = 0; /** @@ -118,23 +114,18 @@ public FSTableDescriptors(final FileSystem fs, final Path rootdir, final boolean this.usecache = usecache; } - @VisibleForTesting public static void tryUpdateMetaTableDescriptor(Configuration conf) throws IOException { tryUpdateAndGetMetaTableDescriptor(conf, CommonFSUtils.getCurrentFileSystem(conf), - CommonFSUtils.getRootDir(conf), null); + CommonFSUtils.getRootDir(conf)); } public static TableDescriptor tryUpdateAndGetMetaTableDescriptor(Configuration conf, - FileSystem fs, Path rootdir, - Function metaObserver) throws IOException { + FileSystem fs, Path rootdir) throws IOException { // see if we already have meta descriptor on fs. Write one if not. try { return getTableDescriptorFromFs(fs, rootdir, TableName.META_TABLE_NAME); } catch (TableInfoMissingException e) { TableDescriptorBuilder builder = createMetaTableDescriptorBuilder(conf); - if (metaObserver != null) { - builder = metaObserver.apply(builder); - } TableDescriptor td = builder.build(); LOG.info("Creating new hbase:meta table descriptor {}", td); TableName tableName = td.getTableName(); @@ -197,7 +188,6 @@ private static TableDescriptorBuilder createMetaTableDescriptorBuilder(final Con .setPriority(Coprocessor.PRIORITY_SYSTEM).build()); } - @VisibleForTesting protected boolean isUsecache() { return this.usecache; } @@ -304,7 +294,6 @@ public void update(TableDescriptor td, boolean cacheOnly) throws IOException { } } - @VisibleForTesting Path updateTableDescriptor(TableDescriptor td) throws IOException { TableName tableName = td.getTableName(); Path tableDir = getTableDir(tableName); @@ -415,7 +404,6 @@ private static FileStatus getCurrentTableInfoStatus(FileSystem fs, Path dir, /** * Compare {@link FileStatus} instances by {@link Path#getName()}. Returns in reverse order. */ - @VisibleForTesting static final Comparator TABLEINFO_FILESTATUS_COMPARATOR = new Comparator() { @Override @@ -427,7 +415,6 @@ public int compare(FileStatus left, FileStatus right) { /** * Return the table directory in HDFS */ - @VisibleForTesting Path getTableDir(final TableName tableName) { return CommonFSUtils.getTableDir(rootdir, tableName); } @@ -442,7 +429,6 @@ public boolean accept(Path p) { /** * Width of the sequenceid that is a suffix on a tableinfo file. */ - @VisibleForTesting static final int WIDTH_OF_SEQUENCE_ID = 10; /** @@ -472,7 +458,6 @@ private static String formatTableInfoSequenceId(final int number) { * @param p Path to a .tableinfo file. * @return The current editid or 0 if none found. */ - @VisibleForTesting static int getTableInfoSequenceId(final Path p) { if (p == null) { return 0; @@ -492,7 +477,6 @@ static int getTableInfoSequenceId(final Path p) { * @param sequenceid * @return Name of tableinfo file. */ - @VisibleForTesting static String getTableInfoFileName(final int sequenceid) { return TABLEINFO_FILE_PREFIX + "." + formatTableInfoSequenceId(sequenceid); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java index 31e7d3fb8b7b..413b6ba78620 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java @@ -83,16 +83,15 @@ import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.util.Progressable; -import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.com.google.common.base.Throwables; import org.apache.hbase.thirdparty.com.google.common.collect.Iterators; import org.apache.hbase.thirdparty.com.google.common.collect.Sets; import org.apache.hbase.thirdparty.com.google.common.primitives.Ints; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.FSProtos; @@ -108,7 +107,7 @@ public final class FSUtils { private static final int DEFAULT_THREAD_POOLSIZE = 2; /** Set to true on Windows platforms */ - @VisibleForTesting // currently only used in testing. TODO refactor into a test class + // currently only used in testing. TODO refactor into a test class public static final boolean WINDOWS = System.getProperty("os.name").startsWith("Windows"); private FSUtils() { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java index a52f3fb1c173..28f0d5eb887b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java @@ -134,6 +134,11 @@ import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; +import org.apache.hbase.thirdparty.com.google.common.base.Joiner; +import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; +import org.apache.hbase.thirdparty.com.google.common.collect.Lists; +import org.apache.hbase.thirdparty.com.google.common.collect.Sets; +import org.apache.hbase.thirdparty.com.google.common.io.Closeables; import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; @@ -141,12 +146,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; -import org.apache.hbase.thirdparty.com.google.common.base.Joiner; -import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; -import org.apache.hbase.thirdparty.com.google.common.collect.Lists; -import org.apache.hbase.thirdparty.com.google.common.collect.Sets; - /** * HBaseFsck (hbck) is a tool for checking and repairing region consistency and * table integrity problems in a corrupted HBase. This tool was written for hbase-1.x. It does not @@ -213,7 +212,7 @@ public class HBaseFsck extends Configured implements Closeable { * Here is where hbase-1.x used to default the lock for hbck1. * It puts in place a lock when it goes to write/make changes. */ - @VisibleForTesting + @InterfaceAudience.Private public static final String HBCK_LOCK_FILE = "hbase-hbck.lock"; private static final int DEFAULT_MAX_LOCK_FILE_ATTEMPTS = 5; private static final int DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds @@ -400,7 +399,7 @@ private static RetryCounterFactory createZnodeRetryCounterFactory(Configuration /** * @return Return the tmp dir this tool writes too. */ - @VisibleForTesting + @InterfaceAudience.Private public static Path getTmpDir(Configuration conf) throws IOException { return new Path(CommonFSUtils.getRootDir(conf), HConstants.HBASE_TEMP_DIRECTORY); } @@ -513,7 +512,7 @@ private void unlockHbck() { RetryCounter retryCounter = lockFileRetryCounterFactory.create(); do { try { - IOUtils.closeQuietly(hbckOutFd); + Closeables.close(hbckOutFd, true); CommonFSUtils.delete(CommonFSUtils.getCurrentFileSystem(getConf()), HBCK_LOCK_PATH, true); LOG.info("Finishing hbck"); return; @@ -566,7 +565,7 @@ public void connect() throws IOException { Runtime.getRuntime().addShutdownHook(new Thread() { @Override public void run() { - IOUtils.closeQuietly(HBaseFsck.this); + IOUtils.closeQuietly(HBaseFsck.this, e -> LOG.warn("", e)); cleanupHbckZnode(); unlockHbck(); } @@ -865,9 +864,9 @@ public void close() throws IOException { zkw.close(); zkw = null; } - IOUtils.closeQuietly(admin); - IOUtils.closeQuietly(meta); - IOUtils.closeQuietly(connection); + IOUtils.closeQuietly(admin, e -> LOG.warn("", e)); + IOUtils.closeQuietly(meta, e -> LOG.warn("", e)); + IOUtils.closeQuietly(connection, e -> LOG.warn("", e)); } } @@ -3847,7 +3846,7 @@ public HBaseFsck exec(ExecutorService exec, String[] args) setRetCode(code); } } finally { - IOUtils.closeQuietly(this); + IOUtils.closeQuietly(this, e -> LOG.warn("", e)); } return this; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdReadWriteLock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdReadWriteLock.java index 8b05fa9dbe32..10cc4e98d39a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdReadWriteLock.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdReadWriteLock.java @@ -21,7 +21,6 @@ import java.util.concurrent.locks.ReentrantReadWriteLock; import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; /** * Allows multiple concurrent clients to lock on a numeric id with ReentrantReadWriteLock. The @@ -43,7 +42,6 @@ public abstract class IdReadWriteLock { public abstract ReentrantReadWriteLock getLock(T id); - @VisibleForTesting public void waitForWaiters(T id, int numWaiters) throws InterruptedException { for (ReentrantReadWriteLock readWriteLock;;) { readWriteLock = getLock(id); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdReadWriteLockStrongRef.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdReadWriteLockStrongRef.java index e119784fb893..179b7d4a732e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdReadWriteLockStrongRef.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdReadWriteLockStrongRef.java @@ -22,14 +22,12 @@ import java.util.concurrent.locks.ReentrantReadWriteLock; import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; @InterfaceAudience.Private public class IdReadWriteLockStrongRef extends IdReadWriteLock { final ConcurrentHashMap map = new ConcurrentHashMap<>(); - @VisibleForTesting @Override public ReentrantReadWriteLock getLock(T id) { ReentrantReadWriteLock existing = map.get(id); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdReadWriteLockWithObjectPool.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdReadWriteLockWithObjectPool.java index 130ad1454670..5492a8537d22 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdReadWriteLockWithObjectPool.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdReadWriteLockWithObjectPool.java @@ -23,8 +23,6 @@ import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; - @InterfaceAudience.Private public class IdReadWriteLockWithObjectPool extends IdReadWriteLock{ // The number of lock we want to easily support. It's not a maximum. @@ -84,7 +82,6 @@ public ReentrantReadWriteLock getLock(T id) { } /** For testing */ - @VisibleForTesting int purgeAndGetEntryPoolSize() { gc(); Threads.sleep(200); @@ -97,7 +94,6 @@ private void gc() { System.gc(); } - @VisibleForTesting public ReferenceType getReferenceType() { return this.refType; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/LossyCounting.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/LossyCounting.java index 9d7cb566c653..9ade12d578c4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/LossyCounting.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/LossyCounting.java @@ -31,7 +31,7 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; + import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; /** @@ -115,7 +115,6 @@ public void add(T key) { /** * sweep low frequency data */ - @VisibleForTesting public void sweep() { for(Map.Entry entry : data.entrySet()) { if(entry.getValue() < currentTerm) { @@ -168,7 +167,7 @@ class SweepRunnable implements Runnable { } } - @VisibleForTesting public Future getSweepFuture() { + public Future getSweepFuture() { return fut.get(); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java index b4e586392cf2..a3a0c7b23a63 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java @@ -186,7 +186,7 @@ public static RegionInfo createRegion(final Configuration conf, final Path rootD } } finally { // 3. Close the new region to flush to disk. Close log file too. - region.close(); + region.close(false, true); } return region.getRegionInfo(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java index 03e674d54cc5..778d66da63d8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java @@ -59,6 +59,7 @@ import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.DoNotRetryRegionException; import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.master.RackManager; import org.apache.hadoop.hbase.master.assignment.AssignmentManager; import org.apache.hadoop.hbase.net.Address; import org.apache.hadoop.hbase.rsgroup.RSGroupInfo; @@ -66,7 +67,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine; import org.apache.hbase.thirdparty.org.apache.commons.collections4.CollectionUtils; @@ -102,6 +102,7 @@ public class RegionMover extends AbstractHBaseTool implements Closeable { private int port; private Connection conn; private Admin admin; + private RackManager rackManager; private RegionMover(RegionMoverBuilder builder) throws IOException { this.hostname = builder.hostname; @@ -115,6 +116,12 @@ private RegionMover(RegionMoverBuilder builder) throws IOException { setConf(builder.conf); this.conn = ConnectionFactory.createConnection(conf); this.admin = conn.getAdmin(); + // Only while running unit tests, builder.rackManager will not be null for the convenience of + // providing custom rackManager. Otherwise for regular workflow/user triggered action, + // builder.rackManager is supposed to be null. Hence, setter of builder.rackManager is + // provided as @InterfaceAudience.Private and it is commented that this is just + // to be used by unit test. + rackManager = builder.rackManager == null ? new RackManager(conf) : builder.rackManager; } private RegionMover() { @@ -122,8 +129,8 @@ private RegionMover() { @Override public void close() { - IOUtils.closeQuietly(this.admin); - IOUtils.closeQuietly(this.conn); + IOUtils.closeQuietly(this.admin, e -> LOG.warn("failed to close admin", e)); + IOUtils.closeQuietly(this.conn, e -> LOG.warn("failed to close conn", e)); } /** @@ -141,9 +148,10 @@ public static class RegionMoverBuilder { private String excludeFile = null; private String designatedFile = null; private String defaultDir = System.getProperty("java.io.tmpdir"); - @VisibleForTesting + @InterfaceAudience.Private final int port; private final Configuration conf; + private RackManager rackManager; public RegionMoverBuilder(String hostname) { this(hostname, createConf()); @@ -246,6 +254,19 @@ public RegionMoverBuilder timeout(int timeout) { return this; } + /** + * Set specific rackManager implementation. + * This setter method is for testing purpose only. + * + * @param rackManager rackManager impl + * @return RegionMoverBuilder object + */ + @InterfaceAudience.Private + public RegionMoverBuilder rackManager(RackManager rackManager) { + this.rackManager = rackManager; + return this; + } + /** * This method builds the appropriate RegionMover object which can then be used to load/unload * using load and unload methods @@ -329,9 +350,31 @@ private void loadRegions(List regionsToMove) * server,hence it is best effort.We do not unload regions to hostnames given in * {@link #excludeFile}. If designatedFile is present with some contents, we will unload regions * to hostnames provided in {@link #designatedFile} + * * @return true if unloading succeeded, false otherwise */ public boolean unload() throws InterruptedException, ExecutionException, TimeoutException { + return unloadRegions(false); + } + + /** + * Unload regions from given {@link #hostname} using ack/noAck mode and {@link #maxthreads}.In + * noAck mode we do not make sure that region is successfully online on the target region + * server,hence it is best effort.We do not unload regions to hostnames given in + * {@link #excludeFile}. If designatedFile is present with some contents, we will unload regions + * to hostnames provided in {@link #designatedFile}. + * While unloading regions, destination RegionServers are selected from different rack i.e + * regions should not move to any RegionServers that belong to same rack as source RegionServer. + * + * @return true if unloading succeeded, false otherwise + */ + public boolean unloadFromRack() + throws InterruptedException, ExecutionException, TimeoutException { + return unloadRegions(true); + } + + private boolean unloadRegions(boolean unloadFromRack) throws InterruptedException, + ExecutionException, TimeoutException { deleteFile(this.filename); ExecutorService unloadPool = Executors.newFixedThreadPool(1); Future unloadTask = unloadPool.submit(() -> { @@ -356,6 +399,23 @@ public boolean unload() throws InterruptedException, ExecutionException, Timeout // Remove RS present in the exclude file includeExcludeRegionServers(excludeFile, regionServers, false); + if (unloadFromRack) { + // remove regionServers that belong to same rack (as source host) since the goal is to + // unload regions from source regionServer to destination regionServers + // that belong to different rack only. + String sourceRack = rackManager.getRack(server); + List racks = rackManager.getRack(regionServers); + Iterator iterator = regionServers.iterator(); + int i = 0; + while (iterator.hasNext()) { + iterator.next(); + if (racks.size() > i && racks.get(i) != null && racks.get(i).equals(sourceRack)) { + iterator.remove(); + } + i++; + } + } + // Remove decommissioned RS Set decommissionedRS = new HashSet<>(admin.listDecommissionedRegionServers()); if (CollectionUtils.isNotEmpty(decommissionedRS)) { @@ -385,8 +445,8 @@ public boolean unload() throws InterruptedException, ExecutionException, Timeout return waitTaskToFinish(unloadPool, unloadTask, "unloading"); } - @VisibleForTesting - Collection filterRSGroupServers(RSGroupInfo rsgroup, + @InterfaceAudience.Private + Collection filterRSGroupServers(RSGroupInfo rsgroup, Collection onlineServers) { if (rsgroup.getName().equals(RSGroupInfo.DEFAULT_GROUP)) { return onlineServers; @@ -652,7 +712,7 @@ private void stripMaster(List regionServers) throws IOException { private ServerName stripServer(List regionServers, String hostname, int port) { for (Iterator iter = regionServers.iterator(); iter.hasNext();) { ServerName server = iter.next(); - if (server.getAddress().getHostname().equalsIgnoreCase(hostname) && + if (server.getAddress().getHostName().equalsIgnoreCase(hostname) && server.getAddress().getPort() == port) { iter.remove(); return server; @@ -664,7 +724,7 @@ private ServerName stripServer(List regionServers, String hostname, @Override protected void addOptions() { this.addRequiredOptWithArg("r", "regionserverhost", "region server |"); - this.addRequiredOptWithArg("o", "operation", "Expected: load/unload"); + this.addRequiredOptWithArg("o", "operation", "Expected: load/unload/unload_from_rack"); this.addOptWithArg("m", "maxthreads", "Define the maximum number of threads to use to unload and reload the regions"); this.addOptWithArg("x", "excludefile", @@ -717,6 +777,8 @@ protected int doWork() throws Exception { success = rm.load(); } else if (loadUnload.equalsIgnoreCase("unload")) { success = rm.unload(); + } else if (loadUnload.equalsIgnoreCase("unload_from_rack")) { + success = rm.unloadFromRack(); } else { printUsage(); success = false; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ServerRegionReplicaUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ServerRegionReplicaUtil.java index b83749d9c337..5583a477a554 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ServerRegionReplicaUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ServerRegionReplicaUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,15 +23,16 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.ReplicationPeerNotFoundException; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; -import org.apache.hadoop.hbase.client.Connection; -import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.io.HFileLink; import org.apache.hadoop.hbase.io.Reference; +import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.StoreFileInfo; +import org.apache.hadoop.hbase.replication.ReplicationException; import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; import org.apache.hadoop.hbase.replication.regionserver.RegionReplicaReplicationEndpoint; import org.apache.hadoop.hbase.zookeeper.ZKConfig; @@ -58,7 +59,15 @@ public class ServerRegionReplicaUtil extends RegionReplicaUtil { public static final String REGION_REPLICA_REPLICATION_CONF_KEY = "hbase.region.replica.replication.enabled"; private static final boolean DEFAULT_REGION_REPLICA_REPLICATION = false; - private static final String REGION_REPLICA_REPLICATION_PEER = "region_replica_replication"; + public static final String REGION_REPLICA_REPLICATION_PEER = "region_replica_replication"; + + /** + * Same as for {@link #REGION_REPLICA_REPLICATION_CONF_KEY} but for catalog replication. + */ + public static final String REGION_REPLICA_REPLICATION_CATALOG_CONF_KEY + = "hbase.region.replica.replication.catalog.enabled"; + private static final boolean DEFAULT_REGION_REPLICA_REPLICATION_CATALOG = false; + /** * Enables or disables refreshing store files of secondary region replicas when the memory is @@ -117,7 +126,6 @@ public static boolean shouldReplayRecoveredEdits(HRegion region) { * files of the primary region, so an HFileLink is used to construct the StoreFileInfo. This * way ensures that the secondary will be able to continue reading the store files even if * they are moved to archive after compaction - * @throws IOException */ public static StoreFileInfo getStoreFileInfo(Configuration conf, FileSystem fs, RegionInfo regionInfo, RegionInfo regionInfoForFs, String familyName, Path path) @@ -154,47 +162,62 @@ public static StoreFileInfo getStoreFileInfo(Configuration conf, FileSystem fs, } /** - * Create replication peer for replicating to region replicas if needed. - * @param conf configuration to use - * @throws IOException + * Create replication peer for replicating user-space Region Read Replicas. + * This methods should only be called at master side. */ - public static void setupRegionReplicaReplication(Configuration conf) throws IOException { - if (!isRegionReplicaReplicationEnabled(conf)) { + public static void setupRegionReplicaReplication(MasterServices services) + throws IOException, ReplicationException { + if (!isRegionReplicaReplicationEnabled(services.getConfiguration())) { + return; + } + if (services.getReplicationPeerManager().getPeerConfig(REGION_REPLICA_REPLICATION_PEER) + .isPresent()) { return; } + LOG.info("Region replica replication peer id=" + REGION_REPLICA_REPLICATION_PEER + + " not exist. Creating..."); + ReplicationPeerConfig peerConfig = ReplicationPeerConfig.newBuilder() + .setClusterKey(ZKConfig.getZooKeeperClusterKey(services.getConfiguration())) + .setReplicationEndpointImpl(RegionReplicaReplicationEndpoint.class.getName()).build(); + services.addReplicationPeer(REGION_REPLICA_REPLICATION_PEER, peerConfig, true); + } - try (Connection connection = ConnectionFactory.createConnection(conf); - Admin admin = connection.getAdmin()) { - ReplicationPeerConfig peerConfig = null; - try { - peerConfig = admin.getReplicationPeerConfig(REGION_REPLICA_REPLICATION_PEER); - } catch (ReplicationPeerNotFoundException e) { - LOG.warn( - "Region replica replication peer id=" + REGION_REPLICA_REPLICATION_PEER + " not exist", - e); - } + /** + * @return True if Region Read Replica is enabled for tn (whether hbase:meta or + * user-space tables). + */ + public static boolean isRegionReplicaReplicationEnabled(Configuration conf, TableName tn) { + return isMetaRegionReplicaReplicationEnabled(conf, tn) || + isRegionReplicaReplicationEnabled(conf); + } - if (peerConfig == null) { - LOG.info("Region replica replication peer id=" + REGION_REPLICA_REPLICATION_PEER - + " not exist. Creating..."); - peerConfig = new ReplicationPeerConfig(); - peerConfig.setClusterKey(ZKConfig.getZooKeeperClusterKey(conf)); - peerConfig.setReplicationEndpointImpl(RegionReplicaReplicationEndpoint.class.getName()); - admin.addReplicationPeer(REGION_REPLICA_REPLICATION_PEER, peerConfig); - } - } + /** + * @return True if Region Read Replica is enabled for user-space tables. + */ + private static boolean isRegionReplicaReplicationEnabled(Configuration conf) { + return conf.getBoolean(REGION_REPLICA_REPLICATION_CONF_KEY, DEFAULT_REGION_REPLICA_REPLICATION); } - public static boolean isRegionReplicaReplicationEnabled(Configuration conf) { - return conf.getBoolean(REGION_REPLICA_REPLICATION_CONF_KEY, - DEFAULT_REGION_REPLICA_REPLICATION); + /** + * @return True if hbase:meta Region Read Replica is enabled. + */ + public static boolean isMetaRegionReplicaReplicationEnabled(Configuration conf, TableName tn) { + return TableName.isMetaTableName(tn) && + conf.getBoolean(REGION_REPLICA_REPLICATION_CATALOG_CONF_KEY, + DEFAULT_REGION_REPLICA_REPLICATION_CATALOG); } + /** + * @return True if wait for primary to flush is enabled for user-space tables. + */ public static boolean isRegionReplicaWaitForPrimaryFlushEnabled(Configuration conf) { return conf.getBoolean(REGION_REPLICA_WAIT_FOR_PRIMARY_FLUSH_CONF_KEY, DEFAULT_REGION_REPLICA_WAIT_FOR_PRIMARY_FLUSH); } + /** + * @return True if we are to refresh user-space hfiles in Region Read Replicas. + */ public static boolean isRegionReplicaStoreFileRefreshEnabled(Configuration conf) { return conf.getBoolean(REGION_REPLICA_STORE_FILE_REFRESH, DEFAULT_REGION_REPLICA_STORE_FILE_REFRESH); @@ -205,11 +228,4 @@ public static double getRegionReplicaStoreFileRefreshMultiplier(Configuration co DEFAULT_REGION_REPLICA_STORE_FILE_REFRESH_MEMSTORE_MULTIPLIER); } - /** - * Return the peer id used for replicating to secondary region replicas - */ - public static String getReplicationPeerId() { - return REGION_REPLICA_REPLICATION_PEER; - } - } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/TableDescriptorChecker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/TableDescriptorChecker.java index 906ae454d6a8..30c07b325a17 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/TableDescriptorChecker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/TableDescriptorChecker.java @@ -23,9 +23,11 @@ import org.apache.hadoop.hbase.CompoundConfiguration; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.regionserver.DefaultStoreEngine; import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost; @@ -81,10 +83,11 @@ public static void sanityCheck(final Configuration c, final TableDescriptor td) // check max file size long maxFileSizeLowerLimit = 2 * 1024 * 1024L; // 2M is the default lower limit - long maxFileSize = td.getMaxFileSize(); - if (maxFileSize < 0) { - maxFileSize = conf.getLong(HConstants.HREGION_MAX_FILESIZE, maxFileSizeLowerLimit); - } + // if not set MAX_FILESIZE in TableDescriptor, and not set HREGION_MAX_FILESIZE in + // hbase-site.xml, use maxFileSizeLowerLimit instead to skip this check + long maxFileSize = td.getValue(TableDescriptorBuilder.MAX_FILESIZE) == null ? + conf.getLong(HConstants.HREGION_MAX_FILESIZE, maxFileSizeLowerLimit) : + Long.parseLong(td.getValue(TableDescriptorBuilder.MAX_FILESIZE)); if (maxFileSize < conf.getLong("hbase.hregion.max.filesize.limit", maxFileSizeLowerLimit)) { String message = "MAX_FILESIZE for table descriptor or " + "\"hbase.hregion.max.filesize\" (" + @@ -95,10 +98,11 @@ public static void sanityCheck(final Configuration c, final TableDescriptor td) // check flush size long flushSizeLowerLimit = 1024 * 1024L; // 1M is the default lower limit - long flushSize = td.getMemStoreFlushSize(); - if (flushSize < 0) { - flushSize = conf.getLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, flushSizeLowerLimit); - } + // if not set MEMSTORE_FLUSHSIZE in TableDescriptor, and not set HREGION_MEMSTORE_FLUSH_SIZE in + // hbase-site.xml, use flushSizeLowerLimit instead to skip this check + long flushSize = td.getValue(TableDescriptorBuilder.MEMSTORE_FLUSHSIZE) == null ? + conf.getLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, flushSizeLowerLimit) : + Long.parseLong(td.getValue(TableDescriptorBuilder.MEMSTORE_FLUSHSIZE)); if (flushSize < conf.getLong("hbase.hregion.memstore.flush.size.limit", flushSizeLowerLimit)) { String message = "MEMSTORE_FLUSHSIZE for table descriptor or " + "\"hbase.hregion.memstore.flush.size\" (" + flushSize + @@ -150,6 +154,11 @@ public static void sanityCheck(final Configuration c, final TableDescriptor td) warnOrThrowExceptionForFailure(logWarn, message, null); } + // Meta table shouldn't be set as read only, otherwise it will impact region assignments + if (td.isReadOnly() && TableName.isMetaTableName(td.getTableName())) { + warnOrThrowExceptionForFailure(false, "Meta table can't be set as read only.", null); + } + for (ColumnFamilyDescriptor hcd : td.getColumnFamilies()) { if (hcd.getTimeToLive() <= 0) { String message = "TTL for column family " + hcd.getNameAsString() + " must be positive."; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/ClusterCompactionQueues.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/ClusterCompactionQueues.java index c0d34d9397e2..9be182d245f7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/ClusterCompactionQueues.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/ClusterCompactionQueues.java @@ -25,7 +25,7 @@ import java.util.concurrent.locks.ReentrantReadWriteLock; import org.apache.hadoop.hbase.ServerName; import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; + import org.apache.hbase.thirdparty.com.google.common.collect.Lists; import org.apache.hbase.thirdparty.com.google.common.collect.Maps; import org.apache.hbase.thirdparty.com.google.common.collect.Sets; @@ -82,7 +82,7 @@ int getCompactionRequestsLeftToFinish() { } } - @VisibleForTesting List getQueue(ServerName serverName) { + List getQueue(ServerName serverName) { lock.readLock().lock(); try { return compactionQueues.get(serverName); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/MajorCompactionRequest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/MajorCompactionRequest.java index aecfc37d8efa..22ec6cb89ec4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/MajorCompactionRequest.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/MajorCompactionRequest.java @@ -38,7 +38,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.com.google.common.collect.Sets; @InterfaceAudience.Private @@ -55,7 +54,6 @@ class MajorCompactionRequest { this.region = region; } - @VisibleForTesting MajorCompactionRequest(Configuration configuration, RegionInfo region, Set stores) { this(configuration, region); @@ -81,7 +79,6 @@ void setStores(Set stores) { this.stores = stores; } - @VisibleForTesting Optional createRequest(Configuration configuration, Set stores, long timestamp) throws IOException { Set familiesToCompact = getStoresRequiringCompaction(stores, timestamp); @@ -145,7 +142,6 @@ protected boolean shouldIncludeStore(HRegionFileSystem fileSystem, String family return false; } - @VisibleForTesting Connection getConnection(Configuration configuration) throws IOException { return ConnectionFactory.createConnection(configuration); } @@ -166,13 +162,11 @@ protected boolean familyHasReferenceFile(HRegionFileSystem fileSystem, String fa } - @VisibleForTesting List getReferenceFilePaths(FileSystem fileSystem, Path familyDir) throws IOException { return FSUtils.getReferenceFilePaths(fileSystem, familyDir); } - @VisibleForTesting HRegionFileSystem getFileSystem(Connection connection) throws IOException { Admin admin = connection.getAdmin(); return HRegionFileSystem.openRegionFromFileSystem(admin.getConfiguration(), diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/MajorCompactionTTLRequest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/MajorCompactionTTLRequest.java index 4d2b341ecd59..0eda45918414 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/MajorCompactionTTLRequest.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/MajorCompactionTTLRequest.java @@ -22,7 +22,6 @@ import java.util.Collection; import java.util.Map; import java.util.Optional; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; @@ -34,7 +33,7 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; + import org.apache.hbase.thirdparty.com.google.common.collect.Maps; /** @@ -55,7 +54,6 @@ static Optional newRequest(Configuration conf, RegionInf return request.createRequest(conf, htd); } - @VisibleForTesting private Optional createRequest(Configuration conf, TableDescriptor htd) throws IOException { Map familiesToCompact = getStoresRequiringCompaction(htd); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/MajorCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/MajorCompactor.java index 151b49286c18..370a3e8de448 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/MajorCompactor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/MajorCompactor.java @@ -29,7 +29,6 @@ import java.util.concurrent.Executors; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.hbase.HBaseConfiguration; @@ -50,7 +49,7 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; + import org.apache.hbase.thirdparty.com.google.common.base.Joiner; import org.apache.hbase.thirdparty.com.google.common.base.Splitter; import org.apache.hbase.thirdparty.com.google.common.collect.Iterables; @@ -156,7 +155,8 @@ public void shutdown() throws Exception { LOG.info("All regions major compacted successfully"); } - @VisibleForTesting void initializeWorkQueues() throws IOException { + @InterfaceAudience.Private + void initializeWorkQueues() throws IOException { if (storesToCompact.isEmpty()) { connection.getTable(tableName).getDescriptor().getColumnFamilyNames() .forEach(a -> storesToCompact.add(Bytes.toString(a))); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/MajorCompactorTTL.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/MajorCompactorTTL.java index 321cbe03386d..7c92c530fe11 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/MajorCompactorTTL.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/MajorCompactorTTL.java @@ -23,7 +23,6 @@ import java.util.Optional; import java.util.Set; import java.util.concurrent.Executors; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; @@ -38,7 +37,7 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; + import org.apache.hbase.thirdparty.com.google.common.collect.Sets; import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine; import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLineParser; @@ -58,7 +57,7 @@ public class MajorCompactorTTL extends MajorCompactor { private TableDescriptor htd; - @VisibleForTesting + @InterfaceAudience.Private public MajorCompactorTTL(Configuration conf, TableDescriptor htd, int concurrency, long sleepForMs) throws IOException { this.connection = ConnectionFactory.createConnection(conf); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.java index 9a398f251acd..6a6c530c3b64 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.java @@ -50,6 +50,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; + /** * This class marches through all of the region's hfiles and verifies that * they are all valid files. One just needs to instantiate the class, use @@ -180,6 +181,9 @@ protected void checkColFamDir(Path cfDir) throws IOException { missing.add(cfDir); return; } + + LOG.info("Checking Column Family Directory {}. Number of entries = {}", cfDir, hfs.size()); + for (FileStatus hfFs : hfs) { Path hf = hfFs.getPath(); checkHFile(hf); @@ -213,6 +217,9 @@ protected void checkMobColFamDir(Path cfDir) throws IOException { missedMobFiles.add(cfDir); return; } + + LOG.info("Checking MOB Column Family Directory {}. Number of entries = {}", cfDir, hfs.size()); + for (FileStatus hfFs : hfs) { Path hf = hfFs.getPath(); checkMobFile(hf); @@ -284,6 +291,9 @@ private void checkMobRegionDir(Path regionDir) throws IOException { missedMobFiles.add(regionDir); return; } + + LOG.info("Checking MOB Region Directory {}. Number of entries = {}", regionDir, hfs.length); + for (FileStatus hfFs : hfs) { Path hf = hfFs.getPath(); checkMobColFamDir(hf); @@ -318,6 +328,8 @@ protected void checkRegionDir(Path regionDir) throws IOException { return; } + LOG.info("Checking Region Directory {}. Number of entries = {}", regionDir, cfs.size()); + for (FileStatus cfFs : cfs) { Path cfDir = cfFs.getPath(); checkColFamDir(cfDir); @@ -342,6 +354,8 @@ void checkTableDir(Path tableDir) throws IOException { return; } + LOG.info("Checking Table Directory {}. Number of entries (including mob) = {}", tableDir, rds.size() + 1); + // Parallelize check at the region dir level List rdcs = new ArrayList<>(rds.size() + 1); List> rdFutures; @@ -546,6 +560,8 @@ public void report(HbckErrorReporter out) { : "CORRUPTED"; // print mob-related report + out.print("Checked " + mobFilesChecked.get() + " Mob files for corruption"); + out.print(" Mob files corrupted: " + corruptedMobFiles.size()); if (inQuarantineMode) { out.print(" Mob files successfully quarantined: " + quarantinedMobFiles.size()); for (Path sq : quarantinedMobFiles) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java index 94ae70467793..720e2c26aa40 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java @@ -17,10 +17,12 @@ */ package org.apache.hadoop.hbase.wal; + import java.io.FileNotFoundException; import java.io.IOException; import java.util.ArrayList; import java.util.Collections; +import java.util.Comparator; import java.util.List; import java.util.Objects; import java.util.concurrent.atomic.AtomicBoolean; @@ -32,6 +34,7 @@ import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.RegionInfo; @@ -45,7 +48,7 @@ import org.apache.yetus.audience.InterfaceStability; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; + import org.apache.hbase.thirdparty.com.google.common.collect.Lists; /** @@ -87,6 +90,7 @@ public interface Reader extends WAL.Reader { protected AtomicBoolean initialized = new AtomicBoolean(false); // for default wal provider, logPrefix won't change protected String logPrefix; + protected Abortable abortable; /** * We use walCreateLock to prevent wal recreation in different threads, and also prevent getWALs @@ -101,7 +105,8 @@ public interface Reader extends WAL.Reader { * null */ @Override - public void init(WALFactory factory, Configuration conf, String providerId) throws IOException { + public void init(WALFactory factory, Configuration conf, String providerId, Abortable abortable) + throws IOException { if (!initialized.compareAndSet(false, true)) { throw new IllegalStateException("WALProvider.init should only be called once."); } @@ -118,6 +123,7 @@ public void init(WALFactory factory, Configuration conf, String providerId) thro } } logPrefix = sb.toString(); + this.abortable = abortable; doInit(conf); } @@ -210,7 +216,6 @@ public long getLogFileSize() { /** * returns the number of rolled WAL files. */ - @VisibleForTesting public static int getNumRolledLogFiles(WAL wal) { return ((AbstractFSWAL) wal).getNumRolledLogFiles(); } @@ -218,7 +223,6 @@ public static int getNumRolledLogFiles(WAL wal) { /** * returns the size of rolled WAL files. */ - @VisibleForTesting public static long getLogFileSize(WAL wal) { return ((AbstractFSWAL) wal).getLogFileSize(); } @@ -226,7 +230,6 @@ public static long getLogFileSize(WAL wal) { /** * return the current filename from the current wal. */ - @VisibleForTesting public static Path getCurrentFileName(final WAL wal) { return ((AbstractFSWAL) wal).getCurrentFileName(); } @@ -234,7 +237,6 @@ public static Path getCurrentFileName(final WAL wal) { /** * request a log roll, but don't actually do it. */ - @VisibleForTesting static void requestLogRoll(final WAL wal) { ((AbstractFSWAL) wal).requestLogRoll(); } @@ -242,7 +244,6 @@ static void requestLogRoll(final WAL wal) { // should be package private; more visible for use in AbstractFSWAL public static final String WAL_FILE_NAME_DELIMITER = "."; /** The hbase:meta region's WAL filename extension */ - @VisibleForTesting public static final String META_WAL_PROVIDER_ID = ".meta"; static final String DEFAULT_PROVIDER_ID = "default"; @@ -251,32 +252,36 @@ static void requestLogRoll(final WAL wal) { public static final String SPLITTING_EXT = "-splitting"; /** - * It returns the file create timestamp from the file name. For name format see + * Pattern used to validate a WAL file name see {@link #validateWALFilename(String)} for + * description. + */ + private static final Pattern WAL_FILE_NAME_PATTERN = + Pattern.compile("(.+)\\.(\\d+)(\\.[0-9A-Za-z]+)?"); + + /** + * Define for when no timestamp found. + */ + private static final long NO_TIMESTAMP = -1L; + + /** + * It returns the file create timestamp (the 'FileNum') from the file name. For name format see * {@link #validateWALFilename(String)} public until remaining tests move to o.a.h.h.wal * @param wal must not be null * @return the file number that is part of the WAL file name */ - @VisibleForTesting public static long extractFileNumFromWAL(final WAL wal) { - final Path walName = ((AbstractFSWAL) wal).getCurrentFileName(); - if (walName == null) { + final Path walPath = ((AbstractFSWAL) wal).getCurrentFileName(); + if (walPath == null) { throw new IllegalArgumentException("The WAL path couldn't be null"); } - Matcher matcher = WAL_FILE_NAME_PATTERN.matcher(walName.getName()); - if (matcher.matches()) { - return Long.parseLong(matcher.group(2)); - } else { - throw new IllegalArgumentException(walName.getName() + " is not a valid wal file name"); + String name = walPath.getName(); + long timestamp = getTimestamp(name); + if (timestamp == NO_TIMESTAMP) { + throw new IllegalArgumentException(name + " is not a valid wal file name"); } + return timestamp; } - /** - * Pattern used to validate a WAL file name see {@link #validateWALFilename(String)} for - * description. - */ - private static final Pattern WAL_FILE_NAME_PATTERN = - Pattern.compile("(.+)\\.(\\d+)(\\.[0-9A-Za-z]+)?"); - /** * A WAL file name is of the format: <wal-name>{@link #WAL_FILE_NAME_DELIMITER} * <file-creation-timestamp>[.<suffix>]. provider-name is usually made up of a @@ -288,6 +293,23 @@ public static boolean validateWALFilename(String filename) { return WAL_FILE_NAME_PATTERN.matcher(filename).matches(); } + /** + * Split a WAL filename to get a start time. WALs usually have the time we start writing to them + * with as part of their name, usually the suffix. Sometimes there will be an extra suffix as when + * it is a WAL for the meta table. For example, WALs might look like this + * 10.20.20.171%3A60020.1277499063250 where 1277499063250 is the + * timestamp. Could also be a meta WAL which adds a '.meta' suffix or a + * synchronous replication WAL which adds a '.syncrep' suffix. Check for these. File also may have + * no timestamp on it. For example the recovered.edits files are WALs but are named in ascending + * order. Here is an example: 0000000000000016310. Allow for this. + * @param name Name of the WAL file. + * @return Timestamp or {@link #NO_TIMESTAMP}. + */ + public static long getTimestamp(String name) { + Matcher matcher = WAL_FILE_NAME_PATTERN.matcher(name); + return matcher.matches() ? Long.parseLong(matcher.group(2)): NO_TIMESTAMP; + } + /** * Construct the directory name for all WALs on a given server. Dir names currently look like this * for WALs: hbase//WALs/kalashnikov.att.net,61634,1486865297088. @@ -418,6 +440,31 @@ public static boolean isMetaFile(String p) { return p != null && p.endsWith(META_WAL_PROVIDER_ID); } + /** + * Comparator used to compare WAL files together based on their start time. + * Just compares start times and nothing else. + */ + public static class WALStartTimeComparator implements Comparator { + @Override + public int compare(Path o1, Path o2) { + return Long.compare(getTS(o1), getTS(o2)); + } + + /** + * Split a path to get the start time + * For example: 10.20.20.171%3A60020.1277499063250 + * Could also be a meta WAL which adds a '.meta' suffix or a synchronous replication WAL + * which adds a '.syncrep' suffix. Check. + * @param p path to split + * @return start time + */ + private static long getTS(Path p) { + return getTimestamp(p.getName()); + } + } + + + public static boolean isArchivedLogFile(Path p) { String oldLog = Path.SEPARATOR + HConstants.HREGION_OLDLOGDIR_NAME + Path.SEPARATOR; return p.toString().contains(oldLog); @@ -545,8 +592,4 @@ private static String getWALNameGroupFromWALName(String name, int group) { public static String getWALPrefixFromWALName(String name) { return getWALNameGroupFromWALName(name, 1); } - - public static long getWALStartTimeFromWALName(String name) { - return Long.parseLong(getWALNameGroupFromWALName(name, 2)); - } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractWALRoller.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractWALRoller.java index a5a0ee3a3225..4d89c4753844 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractWALRoller.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractWALRoller.java @@ -28,6 +28,7 @@ import java.util.concurrent.ConcurrentMap; import java.util.concurrent.atomic.AtomicBoolean; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL; @@ -86,6 +87,11 @@ public void logRollRequested(WALActionsListener.RollRequestReason reason) { AbstractWALRoller.this.notifyAll(); } } + + @Override + public void postLogArchive(Path oldPath, Path newPath) throws IOException { + afterWALArchive(oldPath, newPath); + } }); } } @@ -190,7 +196,6 @@ public void run() { LOG.warn("WAL has been closed. Skipping rolling of writer and just remove it", e); iter.remove(); } - afterRoll(wal); } } catch (FailedLogCloseException | ConnectException e) { abort("Failed log close in log roller", e); @@ -206,10 +211,7 @@ public void run() { LOG.info("LogRoller exiting."); } - /** - * Called after we finish rolling the give {@code wal}. - */ - protected void afterRoll(WAL wal) { + protected void afterWALArchive(Path oldPath, Path newPath) { } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AsyncFSWALProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AsyncFSWALProvider.java index 062b3688d3e4..3a2ffa7600bb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AsyncFSWALProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AsyncFSWALProvider.java @@ -65,11 +65,11 @@ void init(FileSystem fs, Path path, Configuration c, boolean overwritable, long @Override protected AsyncFSWAL createWAL() throws IOException { - return new AsyncFSWAL(CommonFSUtils.getWALFileSystem(conf), CommonFSUtils.getWALRootDir(conf), - getWALDirectoryName(factory.factoryId), + return new AsyncFSWAL(CommonFSUtils.getWALFileSystem(conf), this.abortable, + CommonFSUtils.getWALRootDir(conf), getWALDirectoryName(factory.factoryId), getWALArchiveDirectoryName(conf, factory.factoryId), conf, listeners, true, logPrefix, - META_WAL_PROVIDER_ID.equals(providerId) ? META_WAL_PROVIDER_ID : null, - eventLoopGroup, channelClass); + META_WAL_PROVIDER_ID.equals(providerId) ? META_WAL_PROVIDER_ID : null, eventLoopGroup, + channelClass); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedRecoveredHFilesOutputSink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedRecoveredHFilesOutputSink.java index 9623bd1c7220..50bc5fe62fb8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedRecoveredHFilesOutputSink.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedRecoveredHFilesOutputSink.java @@ -39,8 +39,8 @@ import org.apache.hadoop.hbase.io.hfile.HFileContext; import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder; import org.apache.hadoop.hbase.regionserver.CellSet; -import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.regionserver.StoreFileWriter; +import org.apache.hadoop.hbase.regionserver.StoreUtils; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.wal.EntryBuffers.RegionEntryBuffer; import org.apache.hadoop.hbase.wal.WAL.Entry; @@ -199,8 +199,8 @@ private StoreFileWriter createRecoveredHFileWriter(TableName tableName, String r new StoreFileWriter.Builder(walSplitter.conf, CacheConfig.DISABLED, walSplitter.rootFS) .withOutputDir(outputDir); HFileContext hFileContext = new HFileContextBuilder(). - withChecksumType(HStore.getChecksumType(walSplitter.conf)). - withBytesPerCheckSum(HStore.getBytesPerChecksum(walSplitter.conf)). + withChecksumType(StoreUtils.getChecksumType(walSplitter.conf)). + withBytesPerCheckSum(StoreUtils.getBytesPerChecksum(walSplitter.conf)). withCellComparator(isMetaTable? MetaCellComparator.META_COMPARATOR: CellComparatorImpl.COMPARATOR).build(); return writerBuilder.withFileContext(hFileContext).build(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DisabledWALProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DisabledWALProvider.java index 0ff2195eaa04..6e5a0538296c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DisabledWALProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DisabledWALProvider.java @@ -25,8 +25,10 @@ import java.util.Set; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.atomic.AtomicBoolean; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.PrivateCellUtil; @@ -55,7 +57,8 @@ class DisabledWALProvider implements WALProvider { WAL disabled; @Override - public void init(WALFactory factory, Configuration conf, String providerId) throws IOException { + public void init(WALFactory factory, Configuration conf, String providerId, Abortable abortable) + throws IOException { if (null != disabled) { throw new IllegalStateException("WALProvider.init should only be called once."); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/EntryBuffers.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/EntryBuffers.java index 6348e5cc8824..0ca1219bd26f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/EntryBuffers.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/EntryBuffers.java @@ -34,8 +34,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; - /** * Class which accumulates edits and separates them into a buffer per region while simultaneously * accounting RAM usage. Blocks if the RAM usage crosses a predefined threshold. Writer threads then @@ -129,7 +127,6 @@ void doneWriting(RegionEntryBuffer buffer) { } } - @VisibleForTesting synchronized boolean isRegionCurrentlyWriting(byte[] region) { return currentlyWriting.contains(region); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/FSHLogProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/FSHLogProvider.java index 3b91c2475cfe..e64d70f50981 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/FSHLogProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/FSHLogProvider.java @@ -67,7 +67,7 @@ public static Writer createWriter(final Configuration conf, final FileSystem fs, * Public because of FSHLog. Should be package-private */ public static Writer createWriter(final Configuration conf, final FileSystem fs, final Path path, - final boolean overwritable, long blocksize) throws IOException { + final boolean overwritable, long blocksize) throws IOException { // Configuration already does caching for the Class lookup. Class logWriterClass = conf.getClass("hbase.regionserver.hlog.writer.impl", ProtobufLogWriter.class, @@ -101,8 +101,8 @@ public static Writer createWriter(final Configuration conf, final FileSystem fs, @Override protected FSHLog createWAL() throws IOException { - return new FSHLog(CommonFSUtils.getWALFileSystem(conf), CommonFSUtils.getWALRootDir(conf), - getWALDirectoryName(factory.factoryId), + return new FSHLog(CommonFSUtils.getWALFileSystem(conf), abortable, + CommonFSUtils.getWALRootDir(conf), getWALDirectoryName(factory.factoryId), getWALArchiveDirectoryName(conf, factory.factoryId), conf, listeners, true, logPrefix, META_WAL_PROVIDER_ID.equals(providerId) ? META_WAL_PROVIDER_ID : null); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RegionGroupingProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RegionGroupingProvider.java index 764d3d521ace..20d043b6ae26 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RegionGroupingProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RegionGroupingProvider.java @@ -28,7 +28,9 @@ import java.util.concurrent.ConcurrentMap; import java.util.concurrent.locks.Lock; import java.util.stream.Collectors; + import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.regionserver.wal.MetricsWAL; @@ -137,14 +139,17 @@ RegionGroupingStrategy getStrategy(final Configuration conf, final String key, private List listeners = new ArrayList<>(); private String providerId; private Class providerClass; + private Abortable abortable; @Override - public void init(WALFactory factory, Configuration conf, String providerId) throws IOException { + public void init(WALFactory factory, Configuration conf, String providerId, Abortable abortable) + throws IOException { if (null != strategy) { throw new IllegalStateException("WALProvider.init should only be called once."); } this.conf = conf; this.factory = factory; + this.abortable = abortable; if (META_WAL_PROVIDER_ID.equals(providerId)) { // do not change the provider id if it is for meta @@ -171,7 +176,7 @@ public void init(WALFactory factory, Configuration conf, String providerId) thro private WALProvider createProvider(String group) throws IOException { WALProvider provider = WALFactory.createProvider(providerClass); provider.init(factory, conf, - META_WAL_PROVIDER_ID.equals(providerId) ? META_WAL_PROVIDER_ID : group); + META_WAL_PROVIDER_ID.equals(providerId) ? META_WAL_PROVIDER_ID : group, this.abortable); provider.addWALActionsListener(new MetricsWAL()); return provider; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SyncReplicationWALProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SyncReplicationWALProvider.java index 9859c204649f..f57ec31c531a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SyncReplicationWALProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SyncReplicationWALProvider.java @@ -36,6 +36,7 @@ import java.util.stream.Collectors; import java.util.stream.Stream; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.regionserver.wal.DualAsyncFSWAL; @@ -52,7 +53,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.com.google.common.base.Throwables; import org.apache.hbase.thirdparty.com.google.common.collect.Streams; import org.apache.hbase.thirdparty.io.netty.channel.Channel; @@ -71,7 +71,6 @@ public class SyncReplicationWALProvider implements WALProvider, PeerActionListen private static final Logger LOG = LoggerFactory.getLogger(SyncReplicationWALProvider.class); // only for injecting errors for testcase, do not use it for other purpose. - @VisibleForTesting public static final String DUAL_WAL_IMPL = "hbase.wal.sync.impl"; private final WALProvider provider; @@ -108,11 +107,12 @@ public void setPeerInfoProvider(SyncReplicationPeerInfoProvider peerInfoProvider } @Override - public void init(WALFactory factory, Configuration conf, String providerId) throws IOException { + public void init(WALFactory factory, Configuration conf, String providerId, Abortable abortable) + throws IOException { if (!initialized.compareAndSet(false, true)) { throw new IllegalStateException("WALProvider.init should only be called once."); } - provider.init(factory, conf, providerId); + provider.init(factory, conf, providerId, abortable); this.conf = conf; this.factory = factory; Pair> eventLoopGroupAndChannelClass = @@ -344,7 +344,6 @@ public static Optional getSyncReplicationPeerIdFromWALName(String name) } } - @VisibleForTesting WALProvider getWrappedProvider() { return provider; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WAL.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WAL.java index 747b2770d457..2a434a73b672 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WAL.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WAL.java @@ -31,8 +31,6 @@ import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; - /** * A Write Ahead Log (WAL) provides service for reading, writing waledits. This interface provides * APIs for WAL users (such as RegionServer) to use the WAL (do append, sync, etc). @@ -221,7 +219,6 @@ default void sync(long txid, boolean forceSync) throws IOException { * @deprecated Since version 1.2.0. Removing because not used and exposes subtle internal * workings. Use {@link #getEarliestMemStoreSeqNum(byte[], byte[])} */ - @VisibleForTesting @Deprecated long getEarliestMemStoreSeqNum(byte[] encodedRegionName); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALEdit.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALEdit.java index 1fae7a5d0ed9..61f36fab74af 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALEdit.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALEdit.java @@ -36,8 +36,6 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; - import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.CompactionDescriptor; import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor; @@ -93,14 +91,14 @@ public class WALEdit implements HeapSize { * {@link #isCompactionMarker(Cell)} */ @Deprecated - @VisibleForTesting + @InterfaceAudience.Private public static final byte[] COMPACTION = Bytes.toBytes("HBASE::COMPACTION"); /** * @deprecated Since 2.3.0. Make it protected, internal-use only. */ @Deprecated - @VisibleForTesting + @InterfaceAudience.Private public static final byte [] FLUSH = Bytes.toBytes("HBASE::FLUSH"); /** @@ -130,7 +128,7 @@ public class WALEdit implements HeapSize { private static final byte [] REGION_EVENT_CLOSE = createRegionEventDescriptorQualifier(RegionEventDescriptor.EventType.REGION_CLOSE); - @VisibleForTesting + @InterfaceAudience.Private public static final byte [] BULK_LOAD = Bytes.toBytes("HBASE::BULK_LOAD"); private final transient boolean replay; @@ -332,7 +330,7 @@ public static WALEdit createRegionEventWALEdit(RegionInfo hri, return createRegionEventWALEdit(getRowForRegion(hri), regionEventDesc); } - @VisibleForTesting + @InterfaceAudience.Private public static WALEdit createRegionEventWALEdit(byte [] rowForRegion, RegionEventDescriptor regionEventDesc) { KeyValue kv = new KeyValue(rowForRegion, METAFAMILY, @@ -345,7 +343,7 @@ public static WALEdit createRegionEventWALEdit(byte [] rowForRegion, * @return Cell qualifier for the passed in RegionEventDescriptor Type; e.g. we'll * return something like a byte array with HBASE::REGION_EVENT::REGION_OPEN in it. */ - @VisibleForTesting + @InterfaceAudience.Private public static byte [] createRegionEventDescriptorQualifier(RegionEventDescriptor.EventType t) { return Bytes.toBytes(REGION_EVENT_PREFIX_STR + t.toString()); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java index 26b87277a13a..82e974deb359 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,9 +21,11 @@ import java.io.InterruptedIOException; import java.util.List; import java.util.concurrent.atomic.AtomicReference; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.regionserver.wal.MetricsWAL; import org.apache.hadoop.hbase.regionserver.wal.ProtobufLogReader; @@ -36,8 +38,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; - /** * Entry point for users of the Write Ahead Log. * Acts as the shim between internal use and the particular WALProvider we use to handle wal @@ -66,7 +66,7 @@ public class WALFactory { /** * Maps between configuration names for providers and implementation classes. */ - static enum Providers { + enum Providers { defaultProvider(AsyncFSWALProvider.class), filesystem(FSHLogProvider.class), multiwal(RegionGroupingProvider.class), @@ -86,6 +86,7 @@ static enum Providers { public static final String WAL_ENABLED = "hbase.regionserver.hlog.enabled"; final String factoryId; + final Abortable abortable; private final WALProvider provider; // The meta updates are written to a different wal. If this // regionserver holds meta regions, then this ref will be non-null. @@ -119,14 +120,13 @@ private WALFactory(Configuration conf) { // this instance can't create wals, just reader/writers. provider = null; factoryId = SINGLETON_ID; + this.abortable = null; } - @VisibleForTesting Providers getDefaultProvider() { return Providers.defaultProvider; } - @VisibleForTesting public Class getProviderClass(String key, String defaultValue) { try { Providers provider = Providers.valueOf(conf.get(key, defaultValue)); @@ -175,7 +175,7 @@ static WALProvider createProvider(Class clazz) throws IOE public WALFactory(Configuration conf, String factoryId) throws IOException { // default enableSyncReplicationWALProvider is true, only disable SyncReplicationWALProvider // for HMaster or HRegionServer which take system table only. See HBASE-19999 - this(conf, factoryId, true); + this(conf, factoryId, null, true); } /** @@ -183,11 +183,12 @@ public WALFactory(Configuration conf, String factoryId) throws IOException { * instances. * @param factoryId a unique identifier for this factory. used i.e. by filesystem implementations * to make a directory + * @param abortable the server associated with this WAL file * @param enableSyncReplicationWALProvider whether wrap the wal provider to a * {@link SyncReplicationWALProvider} */ - public WALFactory(Configuration conf, String factoryId, boolean enableSyncReplicationWALProvider) - throws IOException { + public WALFactory(Configuration conf, String factoryId, Abortable abortable, + boolean enableSyncReplicationWALProvider) throws IOException { // until we've moved reader/writer construction down into providers, this initialization must // happen prior to provider initialization, in case they need to instantiate a reader/writer. timeoutMillis = conf.getInt("hbase.hlog.open.timeout", 300000); @@ -196,20 +197,21 @@ public WALFactory(Configuration conf, String factoryId, boolean enableSyncReplic AbstractFSWALProvider.Reader.class); this.conf = conf; this.factoryId = factoryId; + this.abortable = abortable; // end required early initialization if (conf.getBoolean(WAL_ENABLED, true)) { WALProvider provider = createProvider(getProviderClass(WAL_PROVIDER, DEFAULT_WAL_PROVIDER)); if (enableSyncReplicationWALProvider) { provider = new SyncReplicationWALProvider(provider); } - provider.init(this, conf, null); + provider.init(this, conf, null, this.abortable); provider.addWALActionsListener(new MetricsWAL()); this.provider = provider; } else { // special handling of existing configuration behavior. LOG.warn("Running with WAL disabled."); provider = new DisabledWALProvider(); - provider.init(this, conf, factoryId); + provider.init(this, conf, factoryId, null); } } @@ -255,8 +257,12 @@ public List getWALs() { return provider.getWALs(); } - @VisibleForTesting - WALProvider getMetaProvider() throws IOException { + /** + * Called when we lazily create a hbase:meta WAL OR from ReplicationSourceManager ahead of + * creating the first hbase:meta WAL so we can register a listener. + * @see #getMetaWALProvider() + */ + public WALProvider getMetaProvider() throws IOException { for (;;) { WALProvider provider = this.metaProvider.get(); if (provider != null) { @@ -274,7 +280,7 @@ WALProvider getMetaProvider() throws IOException { clz = getProviderClass(META_WAL_PROVIDER, conf.get(WAL_PROVIDER, DEFAULT_WAL_PROVIDER)); } provider = createProvider(clz); - provider.init(this, conf, AbstractFSWALProvider.META_WAL_PROVIDER_ID); + provider.init(this, conf, AbstractFSWALProvider.META_WAL_PROVIDER_ID, this.abortable); provider.addWALActionsListener(new MetricsWAL()); if (metaProvider.compareAndSet(null, provider)) { return provider; @@ -286,10 +292,10 @@ WALProvider getMetaProvider() throws IOException { } /** - * @param region the region which we want to get a WAL for it. Could be null. + * @param region the region which we want to get a WAL for. Could be null. */ public WAL getWAL(RegionInfo region) throws IOException { - // use different WAL for hbase:meta + // Use different WAL for hbase:meta. Instantiates the meta WALProvider if not already up. if (region != null && region.isMetaRegion() && region.getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID) { return getMetaProvider().getWAL(region); @@ -307,7 +313,6 @@ public Reader createReader(final FileSystem fs, final Path path) throws IOExcept * to reopen it multiple times, use {@link WAL.Reader#reset()} instead of this method * then just seek back to the last known good position. * @return A WAL reader. Close when done with it. - * @throws IOException */ public Reader createReader(final FileSystem fs, final Path path, CancelableProgressable reporter) throws IOException { @@ -395,7 +400,6 @@ public Writer createWALWriter(final FileSystem fs, final Path path) throws IOExc * Uses defaults. * @return an overwritable writer for recovered edits. caller should close. */ - @VisibleForTesting public Writer createRecoveredEditsWriter(final FileSystem fs, final Path path) throws IOException { return FSHLogProvider.createWriter(conf, fs, path, true); @@ -475,7 +479,6 @@ static Writer createRecoveredEditsWriter(final FileSystem fs, final Path path, * Uses defaults. * @return a writer that won't overwrite files. Caller must close. */ - @VisibleForTesting public static Writer createWALWriter(final FileSystem fs, final Path path, final Configuration configuration) throws IOException { @@ -486,6 +489,10 @@ public final WALProvider getWALProvider() { return this.provider; } + /** + * @return Current metaProvider... may be null if not yet initialized. + * @see #getMetaProvider() + */ public final WALProvider getMetaWALProvider() { return this.metaProvider.get(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKeyImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKeyImpl.java index 33e034342d7d..4c3fc4edc787 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKeyImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKeyImpl.java @@ -25,7 +25,6 @@ import java.util.NavigableMap; import java.util.TreeMap; import java.util.UUID; - import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; @@ -37,7 +36,6 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.com.google.protobuf.ByteString; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; @@ -130,7 +128,7 @@ public WALKeyImpl(final NavigableMap replicationScope) { new ArrayList<>(), HConstants.NO_NONCE, HConstants.NO_NONCE, null, replicationScope, null); } - @VisibleForTesting + @InterfaceAudience.Private public WALKeyImpl(final byte[] encodedRegionName, final TableName tablename, long logSeqNum, final long now, UUID clusterId) { List clusterIds = new ArrayList<>(1); @@ -139,7 +137,7 @@ public WALKeyImpl(final byte[] encodedRegionName, final TableName tablename, lon HConstants.NO_NONCE, null, null, null); } - @VisibleForTesting + @InterfaceAudience.Private public WALKeyImpl(final byte[] encodedRegionName, final TableName tablename, long logSeqNum, final long now, UUID clusterId, MultiVersionConcurrencyControl mvcc) { List clusterIds = new ArrayList<>(1); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALPrettyPrinter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALPrettyPrinter.java index a37efec610eb..07bcb1067ffc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALPrettyPrinter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALPrettyPrinter.java @@ -46,7 +46,7 @@ import org.apache.yetus.audience.InterfaceStability; import org.slf4j.Logger; import org.slf4j.LoggerFactory; - +import org.apache.hbase.thirdparty.com.google.common.base.Strings; import org.apache.hbase.thirdparty.com.google.gson.Gson; import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine; import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLineParser; @@ -85,7 +85,12 @@ public class WALPrettyPrinter { // List of tables for filter private final Set tableSet; private String region; + + // exact row which needs to be filtered private String row; + // prefix of rows which needs to be filtered + private String rowPrefix; + private boolean outputOnlyRowKey; // enable in order to output a single list of transactions from several files private boolean persistentOutput; @@ -107,6 +112,7 @@ public WALPrettyPrinter() { tableSet = new HashSet<>(); region = null; row = null; + rowPrefix = null; outputOnlyRowKey = false; persistentOutput = false; firstTxn = true; @@ -181,6 +187,17 @@ public void setRowFilter(String row) { this.row = row; } + /** + * sets the rowPrefix key prefix by which output will be filtered + * + * @param rowPrefix + * when not null, serves as a filter; only log entries with rows + * having this prefix will be printed + */ + public void setRowPrefixFilter(String rowPrefix) { + this.rowPrefix = rowPrefix; + } + /** * Option to print the row key only in case you just need the row keys from the WAL */ @@ -301,15 +318,12 @@ public void processFile(final Configuration conf, final Path p) List> actions = new ArrayList<>(); for (Cell cell : edit.getCells()) { // add atomic operation to txn - Map op = new HashMap<>(toStringMap(cell, outputOnlyRowKey)); - if (outputValues) { - op.put("value", Bytes.toStringBinary(CellUtil.cloneValue(cell))); - } - // check row output filter - if (row == null || ((String) op.get("row")).equals(row)) { - actions.add(op); + Map op = + new HashMap<>(toStringMap(cell, outputOnlyRowKey, rowPrefix, row, outputValues)); + if (op.isEmpty()) { + continue; } - op.put("total_size_sum", cell.heapSize()); + actions.add(op); } if (actions.isEmpty()) { continue; @@ -326,15 +340,19 @@ public void processFile(final Configuration conf, final Path p) out.print(GSON.toJson(txn)); } else { // Pretty output, complete with indentation by atomic action - out.println(String.format(outputTmpl, + if (!outputOnlyRowKey) { + out.println(String.format(outputTmpl, txn.get("sequence"), txn.get("table"), txn.get("region"), new Date(writeTime))); + } for (int i = 0; i < actions.size(); i++) { Map op = actions.get(i); - printCell(out, op, outputValues); + printCell(out, op, outputValues, outputOnlyRowKey); } } - out.println("edit heap size: " + entry.getEdit().heapSize()); - out.println("position: " + log.getPosition()); + if (!outputOnlyRowKey) { + out.println("edit heap size: " + entry.getEdit().heapSize()); + out.println("position: " + log.getPosition()); + } } } finally { log.close(); @@ -344,9 +362,17 @@ public void processFile(final Configuration conf, final Path p) } } - public static void printCell(PrintStream out, Map op, boolean outputValues) { - out.println("row=" + op.get("row") + ", type=" + op.get("type") + ", column=" + - op.get("family") + ":" + op.get("qualifier")); + public static void printCell(PrintStream out, Map op, + boolean outputValues, boolean outputOnlyRowKey) { + String rowDetails = "row=" + op.get("row"); + if (outputOnlyRowKey) { + out.println(rowDetails); + return; + } + + rowDetails += ", column=" + op.get("family") + ":" + op.get("qualifier"); + rowDetails += ", type=" + op.get("type"); + out.println(rowDetails); if (op.get("tag") != null) { out.println(" tag: " + op.get("tag")); } @@ -356,11 +382,20 @@ public static void printCell(PrintStream out, Map op, boolean ou out.println("cell total size sum: " + op.get("total_size_sum")); } - public static Map toStringMap(Cell cell, boolean printRowKeyOnly) { + public static Map toStringMap(Cell cell, + boolean printRowKeyOnly, String rowPrefix, String row, boolean outputValues) { Map stringMap = new HashMap<>(); - stringMap.put("row", - Bytes.toStringBinary(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength())); + String rowKey = Bytes.toStringBinary(cell.getRowArray(), + cell.getRowOffset(), cell.getRowLength()); + // Row and row prefix are mutually options so both cannot be true at the + // same time. We can include checks in the same condition + // Check if any of the filters are satisfied by the row, if not return empty map + if ((!Strings.isNullOrEmpty(rowPrefix) && !rowKey.startsWith(rowPrefix)) || + (!Strings.isNullOrEmpty(row) && !rowKey.equals(row))) { + return stringMap; + } + stringMap.put("row", rowKey); if (printRowKeyOnly) { return stringMap; } @@ -372,6 +407,7 @@ public static Map toStringMap(Cell cell, boolean printRowKeyOnly cell.getQualifierLength())); stringMap.put("timestamp", cell.getTimestamp()); stringMap.put("vlen", cell.getValueLength()); + stringMap.put("total_size_sum", cell.heapSize()); if (cell.getTagsLength() > 0) { List tagsString = new ArrayList<>(); Iterator tagsIterator = PrivateCellUtil.tagsIterator(cell); @@ -382,11 +418,14 @@ public static Map toStringMap(Cell cell, boolean printRowKeyOnly } stringMap.put("tag", tagsString); } + if (outputValues) { + stringMap.put("value", Bytes.toStringBinary(CellUtil.cloneValue(cell))); + } return stringMap; } public static Map toStringMap(Cell cell) { - return toStringMap(cell, false); + return toStringMap(cell, false, null, null, false); } public static void main(String[] args) throws IOException { @@ -417,6 +456,7 @@ public static void run(String[] args) throws IOException { options.addOption("k", "outputOnlyRowKey", false, "Print only row keys"); options.addOption("w", "row", true, "Row to filter by. Pass row name."); + options.addOption("f", "rowPrefix", true, "Row prefix to filter by."); options.addOption("g", "goto", true, "Position to seek to in the file"); WALPrettyPrinter printer = new WALPrettyPrinter(); @@ -450,8 +490,17 @@ public static void run(String[] args) throws IOException { printer.setSequenceFilter(Long.parseLong(cmd.getOptionValue("s"))); } if (cmd.hasOption("w")) { + if (cmd.hasOption("f")) { + throw new ParseException("Row and Row-prefix cannot be supplied together"); + } printer.setRowFilter(cmd.getOptionValue("w")); } + if (cmd.hasOption("f")) { + if (cmd.hasOption("w")) { + throw new ParseException("Row and Row-prefix cannot be supplied together"); + } + printer.setRowPrefixFilter(cmd.getOptionValue("f")); + } if (cmd.hasOption("g")) { printer.setPosition(Long.parseLong(cmd.getOptionValue("g"))); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALProvider.java index c3bd14995077..01c1d11ead70 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALProvider.java @@ -23,7 +23,9 @@ import java.util.List; import java.util.OptionalLong; import java.util.concurrent.CompletableFuture; + import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL; import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener; @@ -46,7 +48,8 @@ public interface WALProvider { * @param conf may not be null * @param providerId differentiate between providers from one factory. may be null */ - void init(WALFactory factory, Configuration conf, String providerId) throws IOException; + void init(WALFactory factory, Configuration conf, String providerId, Abortable server) + throws IOException; /** * @param region the region which we want to get a WAL for it. Could be null. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitUtil.java index d392366ff5da..6361ffc4cb3f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitUtil.java @@ -16,6 +16,7 @@ * limitations under the License. */ package org.apache.hadoop.hbase.wal; + import java.io.FileNotFoundException; import java.io.IOException; import java.util.ArrayList; @@ -56,7 +57,7 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; + import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; @@ -157,7 +158,6 @@ public static void moveWAL(FileSystem fs, Path p, Path targetDir) throws IOExcep * @return Path to file into which to dump split log edits. */ @SuppressWarnings("deprecation") - @VisibleForTesting static Path getRegionSplitEditsPath(TableName tableName, byte[] encodedRegionName, long seqId, String fileNameBeingSplit, String tmpDirName, Configuration conf) throws IOException { FileSystem walFS = CommonFSUtils.getWALFileSystem(conf); @@ -207,7 +207,6 @@ static Path getCompletedRecoveredEditsFilePath(Path srcPath, long maximumEditWAL return new Path(srcPath.getParent(), fileName); } - @VisibleForTesting static String formatRecoveredEditsFileName(final long seqid) { return String.format("%019d", seqid); } @@ -345,7 +344,6 @@ public static Path moveAsideBadEditsFile(final FileSystem fs, final Path edits) /** * Is the given file a region open sequence id file. */ - @VisibleForTesting public static boolean isSequenceIdFile(final Path file) { return file.getName().endsWith(SEQUENCE_ID_FILE_SUFFIX) || file.getName().endsWith(OLD_SEQUENCE_ID_FILE_SUFFIX); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java index 0bea35eaf898..ed684868cdd1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java @@ -29,8 +29,6 @@ import java.util.TreeMap; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicReference; -import edu.umd.cs.findbugs.annotations.Nullable; -import org.apache.commons.lang3.ArrayUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; @@ -57,12 +55,12 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; + import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.com.google.protobuf.TextFormat; + import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.StoreSequenceId; -import javax.validation.constraints.Null; /** * Split RegionServer WAL files. Splits the WAL into new files, @@ -144,7 +142,6 @@ public class WALSplitter { this(factory, conf, walRootDir, walFS, rootDir, rootFS, null, null, null); } - @VisibleForTesting WALSplitter(final WALFactory factory, Configuration conf, Path walRootDir, FileSystem walFS, Path rootDir, FileSystem rootFS, LastSequenceId idChecker, SplitLogWorkerCoordination splitLogWorkerCoordination, RegionServerServices rsServices) { @@ -214,23 +211,21 @@ public static boolean splitLogFile(Path walDir, FileStatus logfile, FileSystem w * which uses this method to do log splitting. * @return List of output files created by the split. */ - @VisibleForTesting public static List split(Path walRootDir, Path walsDir, Path archiveDir, FileSystem walFS, Configuration conf, final WALFactory factory) throws IOException { Path rootDir = CommonFSUtils.getRootDir(conf); FileSystem rootFS = rootDir.getFileSystem(conf); WALSplitter splitter = new WALSplitter(factory, conf, walRootDir, walFS, rootDir, rootFS); - final FileStatus[] wals = + final List wals = SplitLogManager.getFileList(conf, Collections.singletonList(walsDir), null); List splits = new ArrayList<>(); - if (ArrayUtils.isNotEmpty(wals)) { + if (!wals.isEmpty()) { for (FileStatus wal: wals) { SplitWALResult splitWALResult = splitter.splitWAL(wal, null); if (splitWALResult.isFinished()) { WALSplitUtil.archive(wal.getPath(), splitWALResult.isCorrupt(), archiveDir, walFS, conf); - if (splitter.outputSink.splits != null) { - splits.addAll(splitter.outputSink.splits); - } + //splitter.outputSink.splits is mark as final, do not need null check + splits.addAll(splitter.outputSink.splits); } } } @@ -287,7 +282,6 @@ private void createOutputSinkAndEntryBuffers() { * WAL splitting implementation, splits one WAL file. * @param walStatus should be for an actual WAL file. */ - @VisibleForTesting SplitWALResult splitWAL(FileStatus walStatus, CancelableProgressable cancel) throws IOException { Path wal = walStatus.getPath(); Preconditions.checkArgument(walStatus.isFile(), "Not a regular file " + wal.toString()); diff --git a/hbase-server/src/main/resources/hbase-webapps/master/hbck.jsp b/hbase-server/src/main/resources/hbase-webapps/master/hbck.jsp index 12757c6ad5ee..69b95e1a118e 100644 --- a/hbase-server/src/main/resources/hbase-webapps/master/hbck.jsp +++ b/hbase-server/src/main/resources/hbase-webapps/master/hbck.jsp @@ -247,8 +247,8 @@ <% for (Pair p : report.getHoles()) { %> - <%= p.getFirst().getEncodedName() %> - <%= p.getSecond().getEncodedName() %> + <%= p.getFirst().getRegionNameAsString() %> + <%= p.getSecond().getRegionNameAsString() %> <% } %> @@ -275,14 +275,14 @@ <% for (Pair p : report.getOverlaps()) { %> <% if (report.getMergedRegions().containsKey(p.getFirst())) { %> - <%= p.getFirst().getEncodedName() %> + <%= p.getFirst().getRegionNameAsString() %> <% } else { %> - <%= p.getFirst().getEncodedName() %> + <%= p.getFirst().getRegionNameAsString() %> <% } %> <% if (report.getMergedRegions().containsKey(p.getSecond())) { %> - <%= p.getSecond().getEncodedName() %> + <%= p.getSecond().getRegionNameAsString() %> <% } else { %> - <%= p.getSecond().getEncodedName() %> + <%= p.getSecond().getRegionNameAsString() %> <% } %> <% } %> @@ -318,7 +318,7 @@ <% for (Pair p: report.getUnknownServers()) { %> - <%= p.getFirst().getEncodedName() %> + <%= p.getFirst().getRegionNameAsString() %> <%= p.getSecond() %> <% } %> diff --git a/hbase-server/src/main/resources/hbase-webapps/master/procedures.jsp b/hbase-server/src/main/resources/hbase-webapps/master/procedures.jsp index 8e1e23805abd..fba9a42b94e9 100644 --- a/hbase-server/src/main/resources/hbase-webapps/master/procedures.jsp +++ b/hbase-server/src/main/resources/hbase-webapps/master/procedures.jsp @@ -123,7 +123,7 @@

    We do not list procedures that have completed successfully; their number makes it hard to spot the problematics.

    - +
    diff --git a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp index e46b2778546d..b800e72f37b7 100644 --- a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp +++ b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp @@ -30,6 +30,7 @@ import="java.util.HashSet" import="java.util.Optional" import="java.util.TreeMap" + import="java.util.concurrent.TimeoutException" import="java.util.concurrent.TimeUnit" import="org.apache.commons.lang3.StringEscapeUtils" import="org.apache.hadoop.conf.Configuration" @@ -52,6 +53,7 @@ import="org.apache.hadoop.hbase.client.RegionLocator" import="org.apache.hadoop.hbase.client.RegionReplicaUtil" import="org.apache.hadoop.hbase.client.Table" + import="org.apache.hadoop.hbase.client.TableState" import="org.apache.hadoop.hbase.client.ColumnFamilyDescriptor" import="org.apache.hadoop.hbase.http.InfoServer" import="org.apache.hadoop.hbase.master.HMaster" @@ -153,8 +155,8 @@ Table table = master.getConnection().getTable(TableName.valueOf(fqtn)); boolean showFragmentation = conf.getBoolean("hbase.master.ui.fragmentation.enabled", false); boolean readOnly = !InfoServer.canUserModifyUI(request, getServletContext(), conf); - int numMetaReplicas = conf.getInt(HConstants.META_REPLICAS_NUM, - HConstants.DEFAULT_META_REPLICA_NUM); + int numMetaReplicas = + master.getTableDescriptors().get(TableName.META_TABLE_NAME).getRegionReplication(); Map frags = null; if (showFragmentation) { frags = FSUtils.getTableFragmentation(master); @@ -646,29 +648,16 @@ - +
    Id Parent
    Enabled<%= master.getAssignmentManager().isTableEnabled(table.getName()) %><%= master.getTableStateManager().isTableState(table.getName(), TableState.State.ENABLED) %> Is the table enabled
    Compaction <% - if (master.getAssignmentManager().isTableEnabled(table.getName())) { - try { - CompactionState compactionState = admin.getCompactionState(table.getName()).get(); - %><%= compactionState %><% - } catch (Exception e) { - - if(e.getCause() != null && e.getCause().getCause() instanceof NotServingRegionException) { - %><%= CompactionState.NONE %><% - } else { - // Nothing really to do here - for(StackTraceElement element : e.getStackTrace()) { - %><%= StringEscapeUtils.escapeHtml4(element.toString()) %><% - } - %> Unknown <% - } - } + if (master.getTableStateManager().isTableState(table.getName(), TableState.State.ENABLED)) { + CompactionState compactionState = master.getCompactionState(table.getName()); + %><%= compactionState==null?"UNKNOWN":compactionState %><% } else { %><%= CompactionState.NONE %><% } diff --git a/hbase-server/src/main/resources/hbase-webapps/master/userSnapshots.jsp b/hbase-server/src/main/resources/hbase-webapps/master/userSnapshots.jsp new file mode 100644 index 000000000000..0b741e1089fd --- /dev/null +++ b/hbase-server/src/main/resources/hbase-webapps/master/userSnapshots.jsp @@ -0,0 +1,58 @@ +<%-- +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +--%> +<%@ page contentType="text/plain;charset=UTF-8" + import="java.util.List" + import="java.util.Date" + import="org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription" + import="org.apache.hadoop.hbase.master.HMaster" + import="org.apache.hadoop.hbase.TableName" + import="org.apache.hadoop.hbase.util.PrettyPrinter" +%> +<% + HMaster master = (HMaster) getServletContext().getAttribute(HMaster.MASTER); + List snapshots = master.isInitialized() ? + master.getSnapshotManager().getCompletedSnapshots() : null; +%> +<%if (snapshots != null && snapshots.size() > 0) { %> + + + + + + + + + <% for (SnapshotDescription snapshotDesc : snapshots){ %> + <% TableName snapshotTable = TableName.valueOf(snapshotDesc.getTable()); %> + + + + + + + + + <% } %> +

    <%= snapshots.size() %> snapshot(s) in set. [Snapshot Storefile stats]

    +
    Snapshot NameTableCreation TimeOwnerTTL
    <%= snapshotDesc.getName() %> <%= snapshotTable.getNameAsString() %> + <%= new Date(snapshotDesc.getCreationTime()) %><%= snapshotDesc.getOwner() %> + <%= snapshotDesc.getTtl() == 0 ? "FOREVER": PrettyPrinter.format(String.valueOf(snapshotDesc.getTtl()), PrettyPrinter.Unit.TIME_INTERVAL) %> +
    +<% } %> diff --git a/hbase-server/src/main/resources/hbase-webapps/static/css/bootstrap-theme.css b/hbase-server/src/main/resources/hbase-webapps/static/css/bootstrap-theme.css deleted file mode 100755 index 10c9ff578722..000000000000 --- a/hbase-server/src/main/resources/hbase-webapps/static/css/bootstrap-theme.css +++ /dev/null @@ -1,394 +0,0 @@ -/*! - * Bootstrap v3.0.0 - * - * Copyright 2013 Twitter, Inc - * Licensed under the Apache License v2.0 - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Designed and built with all the love in the world by @mdo and @fat. - */ - -.btn-default, -.btn-primary, -.btn-success, -.btn-info, -.btn-warning, -.btn-danger { - text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.2); - -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.15), 0 1px 1px rgba(0, 0, 0, 0.075); - box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.15), 0 1px 1px rgba(0, 0, 0, 0.075); -} - -.btn-default:active, -.btn-primary:active, -.btn-success:active, -.btn-info:active, -.btn-warning:active, -.btn-danger:active, -.btn-default.active, -.btn-primary.active, -.btn-success.active, -.btn-info.active, -.btn-warning.active, -.btn-danger.active { - -webkit-box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125); - box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125); -} - -.btn:active, -.btn.active { - background-image: none; -} - -.btn-default { - text-shadow: 0 1px 0 #fff; - background-image: -webkit-gradient(linear, left 0%, left 100%, from(#ffffff), to(#e6e6e6)); - background-image: -webkit-linear-gradient(top, #ffffff, 0%, #e6e6e6, 100%); - background-image: -moz-linear-gradient(top, #ffffff 0%, #e6e6e6 100%); - background-image: linear-gradient(to bottom, #ffffff 0%, #e6e6e6 100%); - background-repeat: repeat-x; - border-color: #e0e0e0; - border-color: #ccc; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff', endColorstr='#ffe6e6e6', GradientType=0); -} - -.btn-default:active, -.btn-default.active { - background-color: #e6e6e6; - border-color: #e0e0e0; -} - -.btn-primary { - background-image: -webkit-gradient(linear, left 0%, left 100%, from(#428bca), to(#3071a9)); - background-image: -webkit-linear-gradient(top, #428bca, 0%, #3071a9, 100%); - background-image: -moz-linear-gradient(top, #428bca 0%, #3071a9 100%); - background-image: linear-gradient(to bottom, #428bca 0%, #3071a9 100%); - background-repeat: repeat-x; - border-color: #2d6ca2; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca', endColorstr='#ff3071a9', GradientType=0); -} - -.btn-primary:active, -.btn-primary.active { - background-color: #3071a9; - border-color: #2d6ca2; -} - -.btn-success { - background-image: -webkit-gradient(linear, left 0%, left 100%, from(#5cb85c), to(#449d44)); - background-image: -webkit-linear-gradient(top, #5cb85c, 0%, #449d44, 100%); - background-image: -moz-linear-gradient(top, #5cb85c 0%, #449d44 100%); - background-image: linear-gradient(to bottom, #5cb85c 0%, #449d44 100%); - background-repeat: repeat-x; - border-color: #419641; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5cb85c', endColorstr='#ff449d44', GradientType=0); -} - -.btn-success:active, -.btn-success.active { - background-color: #449d44; - border-color: #419641; -} - -.btn-warning { - background-image: -webkit-gradient(linear, left 0%, left 100%, from(#f0ad4e), to(#ec971f)); - background-image: -webkit-linear-gradient(top, #f0ad4e, 0%, #ec971f, 100%); - background-image: -moz-linear-gradient(top, #f0ad4e 0%, #ec971f 100%); - background-image: linear-gradient(to bottom, #f0ad4e 0%, #ec971f 100%); - background-repeat: repeat-x; - border-color: #eb9316; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff0ad4e', endColorstr='#ffec971f', GradientType=0); -} - -.btn-warning:active, -.btn-warning.active { - background-color: #ec971f; - border-color: #eb9316; -} - -.btn-danger { - background-image: -webkit-gradient(linear, left 0%, left 100%, from(#d9534f), to(#c9302c)); - background-image: -webkit-linear-gradient(top, #d9534f, 0%, #c9302c, 100%); - background-image: -moz-linear-gradient(top, #d9534f 0%, #c9302c 100%); - background-image: linear-gradient(to bottom, #d9534f 0%, #c9302c 100%); - background-repeat: repeat-x; - border-color: #c12e2a; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9534f', endColorstr='#ffc9302c', GradientType=0); -} - -.btn-danger:active, -.btn-danger.active { - background-color: #c9302c; - border-color: #c12e2a; -} - -.btn-info { - background-image: -webkit-gradient(linear, left 0%, left 100%, from(#5bc0de), to(#31b0d5)); - background-image: -webkit-linear-gradient(top, #5bc0de, 0%, #31b0d5, 100%); - background-image: -moz-linear-gradient(top, #5bc0de 0%, #31b0d5 100%); - background-image: linear-gradient(to bottom, #5bc0de 0%, #31b0d5 100%); - background-repeat: repeat-x; - border-color: #2aabd2; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de', endColorstr='#ff31b0d5', GradientType=0); -} - -.btn-info:active, -.btn-info.active { - background-color: #31b0d5; - border-color: #2aabd2; -} - -.thumbnail, -.img-thumbnail { - -webkit-box-shadow: 0 1px 2px rgba(0, 0, 0, 0.075); - box-shadow: 0 1px 2px rgba(0, 0, 0, 0.075); -} - -.dropdown-menu > li > a:hover, -.dropdown-menu > li > a:focus, -.dropdown-menu > .active > a, -.dropdown-menu > .active > a:hover, -.dropdown-menu > .active > a:focus { - background-color: #357ebd; - background-image: -webkit-gradient(linear, left 0%, left 100%, from(#428bca), to(#357ebd)); - background-image: -webkit-linear-gradient(top, #428bca, 0%, #357ebd, 100%); - background-image: -moz-linear-gradient(top, #428bca 0%, #357ebd 100%); - background-image: linear-gradient(to bottom, #428bca 0%, #357ebd 100%); - background-repeat: repeat-x; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca', endColorstr='#ff357ebd', GradientType=0); -} - -.navbar { - background-image: -webkit-gradient(linear, left 0%, left 100%, from(#ffffff), to(#f8f8f8)); - background-image: -webkit-linear-gradient(top, #ffffff, 0%, #f8f8f8, 100%); - background-image: -moz-linear-gradient(top, #ffffff 0%, #f8f8f8 100%); - background-image: linear-gradient(to bottom, #ffffff 0%, #f8f8f8 100%); - background-repeat: repeat-x; - border-radius: 4px; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff', endColorstr='#fff8f8f8', GradientType=0); - -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.15), 0 1px 5px rgba(0, 0, 0, 0.075); - box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.15), 0 1px 5px rgba(0, 0, 0, 0.075); -} - -.navbar .navbar-nav > .active > a { - background-color: #f8f8f8; -} - -.navbar-brand, -.navbar-nav > li > a { - text-shadow: 0 1px 0 rgba(255, 255, 255, 0.25); -} - -.navbar-inverse { - background-image: -webkit-gradient(linear, left 0%, left 100%, from(#3c3c3c), to(#222222)); - background-image: -webkit-linear-gradient(top, #3c3c3c, 0%, #222222, 100%); - background-image: -moz-linear-gradient(top, #3c3c3c 0%, #222222 100%); - background-image: linear-gradient(to bottom, #3c3c3c 0%, #222222 100%); - background-repeat: repeat-x; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff3c3c3c', endColorstr='#ff222222', GradientType=0); -} - -.navbar-inverse .navbar-nav > .active > a { - background-color: #222222; -} - -.navbar-inverse .navbar-brand, -.navbar-inverse .navbar-nav > li > a { - text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.25); -} - -.navbar-static-top, -.navbar-fixed-top, -.navbar-fixed-bottom { - border-radius: 0; -} - -.alert { - text-shadow: 0 1px 0 rgba(255, 255, 255, 0.2); - -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.25), 0 1px 2px rgba(0, 0, 0, 0.05); - box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.25), 0 1px 2px rgba(0, 0, 0, 0.05); -} - -.alert-success { - background-image: -webkit-gradient(linear, left 0%, left 100%, from(#dff0d8), to(#c8e5bc)); - background-image: -webkit-linear-gradient(top, #dff0d8, 0%, #c8e5bc, 100%); - background-image: -moz-linear-gradient(top, #dff0d8 0%, #c8e5bc 100%); - background-image: linear-gradient(to bottom, #dff0d8 0%, #c8e5bc 100%); - background-repeat: repeat-x; - border-color: #b2dba1; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdff0d8', endColorstr='#ffc8e5bc', GradientType=0); -} - -.alert-info { - background-image: -webkit-gradient(linear, left 0%, left 100%, from(#d9edf7), to(#b9def0)); - background-image: -webkit-linear-gradient(top, #d9edf7, 0%, #b9def0, 100%); - background-image: -moz-linear-gradient(top, #d9edf7 0%, #b9def0 100%); - background-image: linear-gradient(to bottom, #d9edf7 0%, #b9def0 100%); - background-repeat: repeat-x; - border-color: #9acfea; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9edf7', endColorstr='#ffb9def0', GradientType=0); -} - -.alert-warning { - background-image: -webkit-gradient(linear, left 0%, left 100%, from(#fcf8e3), to(#f8efc0)); - background-image: -webkit-linear-gradient(top, #fcf8e3, 0%, #f8efc0, 100%); - background-image: -moz-linear-gradient(top, #fcf8e3 0%, #f8efc0 100%); - background-image: linear-gradient(to bottom, #fcf8e3 0%, #f8efc0 100%); - background-repeat: repeat-x; - border-color: #f5e79e; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffcf8e3', endColorstr='#fff8efc0', GradientType=0); -} - -.alert-danger { - background-image: -webkit-gradient(linear, left 0%, left 100%, from(#f2dede), to(#e7c3c3)); - background-image: -webkit-linear-gradient(top, #f2dede, 0%, #e7c3c3, 100%); - background-image: -moz-linear-gradient(top, #f2dede 0%, #e7c3c3 100%); - background-image: linear-gradient(to bottom, #f2dede 0%, #e7c3c3 100%); - background-repeat: repeat-x; - border-color: #dca7a7; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2dede', endColorstr='#ffe7c3c3', GradientType=0); -} - -.progress { - background-image: -webkit-gradient(linear, left 0%, left 100%, from(#ebebeb), to(#f5f5f5)); - background-image: -webkit-linear-gradient(top, #ebebeb, 0%, #f5f5f5, 100%); - background-image: -moz-linear-gradient(top, #ebebeb 0%, #f5f5f5 100%); - background-image: linear-gradient(to bottom, #ebebeb 0%, #f5f5f5 100%); - background-repeat: repeat-x; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffebebeb', endColorstr='#fff5f5f5', GradientType=0); -} - -.progress-bar { - background-image: -webkit-gradient(linear, left 0%, left 100%, from(#428bca), to(#3071a9)); - background-image: -webkit-linear-gradient(top, #428bca, 0%, #3071a9, 100%); - background-image: -moz-linear-gradient(top, #428bca 0%, #3071a9 100%); - background-image: linear-gradient(to bottom, #428bca 0%, #3071a9 100%); - background-repeat: repeat-x; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca', endColorstr='#ff3071a9', GradientType=0); -} - -.progress-bar-success { - background-image: -webkit-gradient(linear, left 0%, left 100%, from(#5cb85c), to(#449d44)); - background-image: -webkit-linear-gradient(top, #5cb85c, 0%, #449d44, 100%); - background-image: -moz-linear-gradient(top, #5cb85c 0%, #449d44 100%); - background-image: linear-gradient(to bottom, #5cb85c 0%, #449d44 100%); - background-repeat: repeat-x; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5cb85c', endColorstr='#ff449d44', GradientType=0); -} - -.progress-bar-info { - background-image: -webkit-gradient(linear, left 0%, left 100%, from(#5bc0de), to(#31b0d5)); - background-image: -webkit-linear-gradient(top, #5bc0de, 0%, #31b0d5, 100%); - background-image: -moz-linear-gradient(top, #5bc0de 0%, #31b0d5 100%); - background-image: linear-gradient(to bottom, #5bc0de 0%, #31b0d5 100%); - background-repeat: repeat-x; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de', endColorstr='#ff31b0d5', GradientType=0); -} - -.progress-bar-warning { - background-image: -webkit-gradient(linear, left 0%, left 100%, from(#f0ad4e), to(#ec971f)); - background-image: -webkit-linear-gradient(top, #f0ad4e, 0%, #ec971f, 100%); - background-image: -moz-linear-gradient(top, #f0ad4e 0%, #ec971f 100%); - background-image: linear-gradient(to bottom, #f0ad4e 0%, #ec971f 100%); - background-repeat: repeat-x; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff0ad4e', endColorstr='#ffec971f', GradientType=0); -} - -.progress-bar-danger { - background-image: -webkit-gradient(linear, left 0%, left 100%, from(#d9534f), to(#c9302c)); - background-image: -webkit-linear-gradient(top, #d9534f, 0%, #c9302c, 100%); - background-image: -moz-linear-gradient(top, #d9534f 0%, #c9302c 100%); - background-image: linear-gradient(to bottom, #d9534f 0%, #c9302c 100%); - background-repeat: repeat-x; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9534f', endColorstr='#ffc9302c', GradientType=0); -} - -.list-group { - border-radius: 4px; - -webkit-box-shadow: 0 1px 2px rgba(0, 0, 0, 0.075); - box-shadow: 0 1px 2px rgba(0, 0, 0, 0.075); -} - -.list-group-item.active, -.list-group-item.active:hover, -.list-group-item.active:focus { - text-shadow: 0 -1px 0 #3071a9; - background-image: -webkit-gradient(linear, left 0%, left 100%, from(#428bca), to(#3278b3)); - background-image: -webkit-linear-gradient(top, #428bca, 0%, #3278b3, 100%); - background-image: -moz-linear-gradient(top, #428bca 0%, #3278b3 100%); - background-image: linear-gradient(to bottom, #428bca 0%, #3278b3 100%); - background-repeat: repeat-x; - border-color: #3278b3; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca', endColorstr='#ff3278b3', GradientType=0); -} - -.panel { - -webkit-box-shadow: 0 1px 2px rgba(0, 0, 0, 0.05); - box-shadow: 0 1px 2px rgba(0, 0, 0, 0.05); -} - -.panel-default > .panel-heading { - background-image: -webkit-gradient(linear, left 0%, left 100%, from(#f5f5f5), to(#e8e8e8)); - background-image: -webkit-linear-gradient(top, #f5f5f5, 0%, #e8e8e8, 100%); - background-image: -moz-linear-gradient(top, #f5f5f5 0%, #e8e8e8 100%); - background-image: linear-gradient(to bottom, #f5f5f5 0%, #e8e8e8 100%); - background-repeat: repeat-x; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff5f5f5', endColorstr='#ffe8e8e8', GradientType=0); -} - -.panel-primary > .panel-heading { - background-image: -webkit-gradient(linear, left 0%, left 100%, from(#428bca), to(#357ebd)); - background-image: -webkit-linear-gradient(top, #428bca, 0%, #357ebd, 100%); - background-image: -moz-linear-gradient(top, #428bca 0%, #357ebd 100%); - background-image: linear-gradient(to bottom, #428bca 0%, #357ebd 100%); - background-repeat: repeat-x; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca', endColorstr='#ff357ebd', GradientType=0); -} - -.panel-success > .panel-heading { - background-image: -webkit-gradient(linear, left 0%, left 100%, from(#dff0d8), to(#d0e9c6)); - background-image: -webkit-linear-gradient(top, #dff0d8, 0%, #d0e9c6, 100%); - background-image: -moz-linear-gradient(top, #dff0d8 0%, #d0e9c6 100%); - background-image: linear-gradient(to bottom, #dff0d8 0%, #d0e9c6 100%); - background-repeat: repeat-x; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdff0d8', endColorstr='#ffd0e9c6', GradientType=0); -} - -.panel-info > .panel-heading { - background-image: -webkit-gradient(linear, left 0%, left 100%, from(#d9edf7), to(#c4e3f3)); - background-image: -webkit-linear-gradient(top, #d9edf7, 0%, #c4e3f3, 100%); - background-image: -moz-linear-gradient(top, #d9edf7 0%, #c4e3f3 100%); - background-image: linear-gradient(to bottom, #d9edf7 0%, #c4e3f3 100%); - background-repeat: repeat-x; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9edf7', endColorstr='#ffc4e3f3', GradientType=0); -} - -.panel-warning > .panel-heading { - background-image: -webkit-gradient(linear, left 0%, left 100%, from(#fcf8e3), to(#faf2cc)); - background-image: -webkit-linear-gradient(top, #fcf8e3, 0%, #faf2cc, 100%); - background-image: -moz-linear-gradient(top, #fcf8e3 0%, #faf2cc 100%); - background-image: linear-gradient(to bottom, #fcf8e3 0%, #faf2cc 100%); - background-repeat: repeat-x; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffcf8e3', endColorstr='#fffaf2cc', GradientType=0); -} - -.panel-danger > .panel-heading { - background-image: -webkit-gradient(linear, left 0%, left 100%, from(#f2dede), to(#ebcccc)); - background-image: -webkit-linear-gradient(top, #f2dede, 0%, #ebcccc, 100%); - background-image: -moz-linear-gradient(top, #f2dede 0%, #ebcccc 100%); - background-image: linear-gradient(to bottom, #f2dede 0%, #ebcccc 100%); - background-repeat: repeat-x; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2dede', endColorstr='#ffebcccc', GradientType=0); -} - -.well { - background-image: -webkit-gradient(linear, left 0%, left 100%, from(#e8e8e8), to(#f5f5f5)); - background-image: -webkit-linear-gradient(top, #e8e8e8, 0%, #f5f5f5, 100%); - background-image: -moz-linear-gradient(top, #e8e8e8 0%, #f5f5f5 100%); - background-image: linear-gradient(to bottom, #e8e8e8 0%, #f5f5f5 100%); - background-repeat: repeat-x; - border-color: #dcdcdc; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffe8e8e8', endColorstr='#fff5f5f5', GradientType=0); - -webkit-box-shadow: inset 0 1px 3px rgba(0, 0, 0, 0.05), 0 1px 0 rgba(255, 255, 255, 0.1); - box-shadow: inset 0 1px 3px rgba(0, 0, 0, 0.05), 0 1px 0 rgba(255, 255, 255, 0.1); -} \ No newline at end of file diff --git a/hbase-server/src/main/resources/hbase-webapps/static/css/bootstrap-theme.min.css b/hbase-server/src/main/resources/hbase-webapps/static/css/bootstrap-theme.min.css old mode 100755 new mode 100644 index c31428b07eac..2a69f48c7f5b --- a/hbase-server/src/main/resources/hbase-webapps/static/css/bootstrap-theme.min.css +++ b/hbase-server/src/main/resources/hbase-webapps/static/css/bootstrap-theme.min.css @@ -1,10 +1,6 @@ /*! - * Bootstrap v3.0.0 - * - * Copyright 2013 Twitter, Inc - * Licensed under the Apache License v2.0 - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Designed and built with all the love in the world by @mdo and @fat. - */ -.btn-default,.btn-primary,.btn-success,.btn-info,.btn-warning,.btn-danger{text-shadow:0 -1px 0 rgba(0,0,0,0.2);-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,0.15),0 1px 1px rgba(0,0,0,0.075);box-shadow:inset 0 1px 0 rgba(255,255,255,0.15),0 1px 1px rgba(0,0,0,0.075)}.btn-default:active,.btn-primary:active,.btn-success:active,.btn-info:active,.btn-warning:active,.btn-danger:active,.btn-default.active,.btn-primary.active,.btn-success.active,.btn-info.active,.btn-warning.active,.btn-danger.active{-webkit-box-shadow:inset 0 3px 5px rgba(0,0,0,0.125);box-shadow:inset 0 3px 5px rgba(0,0,0,0.125)}.btn:active,.btn.active{background-image:none}.btn-default{text-shadow:0 1px 0 #fff;background-image:-webkit-gradient(linear,left 0,left 100%,from(#fff),to(#e6e6e6));background-image:-webkit-linear-gradient(top,#fff,0%,#e6e6e6,100%);background-image:-moz-linear-gradient(top,#fff 0,#e6e6e6 100%);background-image:linear-gradient(to bottom,#fff 0,#e6e6e6 100%);background-repeat:repeat-x;border-color:#e0e0e0;border-color:#ccc;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff',endColorstr='#ffe6e6e6',GradientType=0)}.btn-default:active,.btn-default.active{background-color:#e6e6e6;border-color:#e0e0e0}.btn-primary{background-image:-webkit-gradient(linear,left 0,left 100%,from(#428bca),to(#3071a9));background-image:-webkit-linear-gradient(top,#428bca,0%,#3071a9,100%);background-image:-moz-linear-gradient(top,#428bca 0,#3071a9 100%);background-image:linear-gradient(to bottom,#428bca 0,#3071a9 100%);background-repeat:repeat-x;border-color:#2d6ca2;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca',endColorstr='#ff3071a9',GradientType=0)}.btn-primary:active,.btn-primary.active{background-color:#3071a9;border-color:#2d6ca2}.btn-success{background-image:-webkit-gradient(linear,left 0,left 100%,from(#5cb85c),to(#449d44));background-image:-webkit-linear-gradient(top,#5cb85c,0%,#449d44,100%);background-image:-moz-linear-gradient(top,#5cb85c 0,#449d44 100%);background-image:linear-gradient(to bottom,#5cb85c 0,#449d44 100%);background-repeat:repeat-x;border-color:#419641;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5cb85c',endColorstr='#ff449d44',GradientType=0)}.btn-success:active,.btn-success.active{background-color:#449d44;border-color:#419641}.btn-warning{background-image:-webkit-gradient(linear,left 0,left 100%,from(#f0ad4e),to(#ec971f));background-image:-webkit-linear-gradient(top,#f0ad4e,0%,#ec971f,100%);background-image:-moz-linear-gradient(top,#f0ad4e 0,#ec971f 100%);background-image:linear-gradient(to bottom,#f0ad4e 0,#ec971f 100%);background-repeat:repeat-x;border-color:#eb9316;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff0ad4e',endColorstr='#ffec971f',GradientType=0)}.btn-warning:active,.btn-warning.active{background-color:#ec971f;border-color:#eb9316}.btn-danger{background-image:-webkit-gradient(linear,left 0,left 100%,from(#d9534f),to(#c9302c));background-image:-webkit-linear-gradient(top,#d9534f,0%,#c9302c,100%);background-image:-moz-linear-gradient(top,#d9534f 0,#c9302c 100%);background-image:linear-gradient(to bottom,#d9534f 0,#c9302c 100%);background-repeat:repeat-x;border-color:#c12e2a;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9534f',endColorstr='#ffc9302c',GradientType=0)}.btn-danger:active,.btn-danger.active{background-color:#c9302c;border-color:#c12e2a}.btn-info{background-image:-webkit-gradient(linear,left 0,left 100%,from(#5bc0de),to(#31b0d5));background-image:-webkit-linear-gradient(top,#5bc0de,0%,#31b0d5,100%);background-image:-moz-linear-gradient(top,#5bc0de 0,#31b0d5 100%);background-image:linear-gradient(to bottom,#5bc0de 0,#31b0d5 100%);background-repeat:repeat-x;border-color:#2aabd2;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de',endColorstr='#ff31b0d5',GradientType=0)}.btn-info:active,.btn-info.active{background-color:#31b0d5;border-color:#2aabd2}.thumbnail,.img-thumbnail{-webkit-box-shadow:0 1px 2px rgba(0,0,0,0.075);box-shadow:0 1px 2px rgba(0,0,0,0.075)}.dropdown-menu>li>a:hover,.dropdown-menu>li>a:focus,.dropdown-menu>.active>a,.dropdown-menu>.active>a:hover,.dropdown-menu>.active>a:focus{background-color:#357ebd;background-image:-webkit-gradient(linear,left 0,left 100%,from(#428bca),to(#357ebd));background-image:-webkit-linear-gradient(top,#428bca,0%,#357ebd,100%);background-image:-moz-linear-gradient(top,#428bca 0,#357ebd 100%);background-image:linear-gradient(to bottom,#428bca 0,#357ebd 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca',endColorstr='#ff357ebd',GradientType=0)}.navbar{background-image:-webkit-gradient(linear,left 0,left 100%,from(#fff),to(#f8f8f8));background-image:-webkit-linear-gradient(top,#fff,0%,#f8f8f8,100%);background-image:-moz-linear-gradient(top,#fff 0,#f8f8f8 100%);background-image:linear-gradient(to bottom,#fff 0,#f8f8f8 100%);background-repeat:repeat-x;border-radius:4px;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff',endColorstr='#fff8f8f8',GradientType=0);-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,0.15),0 1px 5px rgba(0,0,0,0.075);box-shadow:inset 0 1px 0 rgba(255,255,255,0.15),0 1px 5px rgba(0,0,0,0.075)}.navbar .navbar-nav>.active>a{background-color:#f8f8f8}.navbar-brand,.navbar-nav>li>a{text-shadow:0 1px 0 rgba(255,255,255,0.25)}.navbar-inverse{background-image:-webkit-gradient(linear,left 0,left 100%,from(#3c3c3c),to(#222));background-image:-webkit-linear-gradient(top,#3c3c3c,0%,#222,100%);background-image:-moz-linear-gradient(top,#3c3c3c 0,#222 100%);background-image:linear-gradient(to bottom,#3c3c3c 0,#222 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff3c3c3c',endColorstr='#ff222222',GradientType=0)}.navbar-inverse .navbar-nav>.active>a{background-color:#222}.navbar-inverse .navbar-brand,.navbar-inverse .navbar-nav>li>a{text-shadow:0 -1px 0 rgba(0,0,0,0.25)}.navbar-static-top,.navbar-fixed-top,.navbar-fixed-bottom{border-radius:0}.alert{text-shadow:0 1px 0 rgba(255,255,255,0.2);-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,0.25),0 1px 2px rgba(0,0,0,0.05);box-shadow:inset 0 1px 0 rgba(255,255,255,0.25),0 1px 2px rgba(0,0,0,0.05)}.alert-success{background-image:-webkit-gradient(linear,left 0,left 100%,from(#dff0d8),to(#c8e5bc));background-image:-webkit-linear-gradient(top,#dff0d8,0%,#c8e5bc,100%);background-image:-moz-linear-gradient(top,#dff0d8 0,#c8e5bc 100%);background-image:linear-gradient(to bottom,#dff0d8 0,#c8e5bc 100%);background-repeat:repeat-x;border-color:#b2dba1;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdff0d8',endColorstr='#ffc8e5bc',GradientType=0)}.alert-info{background-image:-webkit-gradient(linear,left 0,left 100%,from(#d9edf7),to(#b9def0));background-image:-webkit-linear-gradient(top,#d9edf7,0%,#b9def0,100%);background-image:-moz-linear-gradient(top,#d9edf7 0,#b9def0 100%);background-image:linear-gradient(to bottom,#d9edf7 0,#b9def0 100%);background-repeat:repeat-x;border-color:#9acfea;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9edf7',endColorstr='#ffb9def0',GradientType=0)}.alert-warning{background-image:-webkit-gradient(linear,left 0,left 100%,from(#fcf8e3),to(#f8efc0));background-image:-webkit-linear-gradient(top,#fcf8e3,0%,#f8efc0,100%);background-image:-moz-linear-gradient(top,#fcf8e3 0,#f8efc0 100%);background-image:linear-gradient(to bottom,#fcf8e3 0,#f8efc0 100%);background-repeat:repeat-x;border-color:#f5e79e;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffcf8e3',endColorstr='#fff8efc0',GradientType=0)}.alert-danger{background-image:-webkit-gradient(linear,left 0,left 100%,from(#f2dede),to(#e7c3c3));background-image:-webkit-linear-gradient(top,#f2dede,0%,#e7c3c3,100%);background-image:-moz-linear-gradient(top,#f2dede 0,#e7c3c3 100%);background-image:linear-gradient(to bottom,#f2dede 0,#e7c3c3 100%);background-repeat:repeat-x;border-color:#dca7a7;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2dede',endColorstr='#ffe7c3c3',GradientType=0)}.progress{background-image:-webkit-gradient(linear,left 0,left 100%,from(#ebebeb),to(#f5f5f5));background-image:-webkit-linear-gradient(top,#ebebeb,0%,#f5f5f5,100%);background-image:-moz-linear-gradient(top,#ebebeb 0,#f5f5f5 100%);background-image:linear-gradient(to bottom,#ebebeb 0,#f5f5f5 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffebebeb',endColorstr='#fff5f5f5',GradientType=0)}.progress-bar{background-image:-webkit-gradient(linear,left 0,left 100%,from(#428bca),to(#3071a9));background-image:-webkit-linear-gradient(top,#428bca,0%,#3071a9,100%);background-image:-moz-linear-gradient(top,#428bca 0,#3071a9 100%);background-image:linear-gradient(to bottom,#428bca 0,#3071a9 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca',endColorstr='#ff3071a9',GradientType=0)}.progress-bar-success{background-image:-webkit-gradient(linear,left 0,left 100%,from(#5cb85c),to(#449d44));background-image:-webkit-linear-gradient(top,#5cb85c,0%,#449d44,100%);background-image:-moz-linear-gradient(top,#5cb85c 0,#449d44 100%);background-image:linear-gradient(to bottom,#5cb85c 0,#449d44 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5cb85c',endColorstr='#ff449d44',GradientType=0)}.progress-bar-info{background-image:-webkit-gradient(linear,left 0,left 100%,from(#5bc0de),to(#31b0d5));background-image:-webkit-linear-gradient(top,#5bc0de,0%,#31b0d5,100%);background-image:-moz-linear-gradient(top,#5bc0de 0,#31b0d5 100%);background-image:linear-gradient(to bottom,#5bc0de 0,#31b0d5 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de',endColorstr='#ff31b0d5',GradientType=0)}.progress-bar-warning{background-image:-webkit-gradient(linear,left 0,left 100%,from(#f0ad4e),to(#ec971f));background-image:-webkit-linear-gradient(top,#f0ad4e,0%,#ec971f,100%);background-image:-moz-linear-gradient(top,#f0ad4e 0,#ec971f 100%);background-image:linear-gradient(to bottom,#f0ad4e 0,#ec971f 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff0ad4e',endColorstr='#ffec971f',GradientType=0)}.progress-bar-danger{background-image:-webkit-gradient(linear,left 0,left 100%,from(#d9534f),to(#c9302c));background-image:-webkit-linear-gradient(top,#d9534f,0%,#c9302c,100%);background-image:-moz-linear-gradient(top,#d9534f 0,#c9302c 100%);background-image:linear-gradient(to bottom,#d9534f 0,#c9302c 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9534f',endColorstr='#ffc9302c',GradientType=0)}.list-group{border-radius:4px;-webkit-box-shadow:0 1px 2px rgba(0,0,0,0.075);box-shadow:0 1px 2px rgba(0,0,0,0.075)}.list-group-item.active,.list-group-item.active:hover,.list-group-item.active:focus{text-shadow:0 -1px 0 #3071a9;background-image:-webkit-gradient(linear,left 0,left 100%,from(#428bca),to(#3278b3));background-image:-webkit-linear-gradient(top,#428bca,0%,#3278b3,100%);background-image:-moz-linear-gradient(top,#428bca 0,#3278b3 100%);background-image:linear-gradient(to bottom,#428bca 0,#3278b3 100%);background-repeat:repeat-x;border-color:#3278b3;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca',endColorstr='#ff3278b3',GradientType=0)}.panel{-webkit-box-shadow:0 1px 2px rgba(0,0,0,0.05);box-shadow:0 1px 2px rgba(0,0,0,0.05)}.panel-default>.panel-heading{background-image:-webkit-gradient(linear,left 0,left 100%,from(#f5f5f5),to(#e8e8e8));background-image:-webkit-linear-gradient(top,#f5f5f5,0%,#e8e8e8,100%);background-image:-moz-linear-gradient(top,#f5f5f5 0,#e8e8e8 100%);background-image:linear-gradient(to bottom,#f5f5f5 0,#e8e8e8 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff5f5f5',endColorstr='#ffe8e8e8',GradientType=0)}.panel-primary>.panel-heading{background-image:-webkit-gradient(linear,left 0,left 100%,from(#428bca),to(#357ebd));background-image:-webkit-linear-gradient(top,#428bca,0%,#357ebd,100%);background-image:-moz-linear-gradient(top,#428bca 0,#357ebd 100%);background-image:linear-gradient(to bottom,#428bca 0,#357ebd 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca',endColorstr='#ff357ebd',GradientType=0)}.panel-success>.panel-heading{background-image:-webkit-gradient(linear,left 0,left 100%,from(#dff0d8),to(#d0e9c6));background-image:-webkit-linear-gradient(top,#dff0d8,0%,#d0e9c6,100%);background-image:-moz-linear-gradient(top,#dff0d8 0,#d0e9c6 100%);background-image:linear-gradient(to bottom,#dff0d8 0,#d0e9c6 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdff0d8',endColorstr='#ffd0e9c6',GradientType=0)}.panel-info>.panel-heading{background-image:-webkit-gradient(linear,left 0,left 100%,from(#d9edf7),to(#c4e3f3));background-image:-webkit-linear-gradient(top,#d9edf7,0%,#c4e3f3,100%);background-image:-moz-linear-gradient(top,#d9edf7 0,#c4e3f3 100%);background-image:linear-gradient(to bottom,#d9edf7 0,#c4e3f3 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9edf7',endColorstr='#ffc4e3f3',GradientType=0)}.panel-warning>.panel-heading{background-image:-webkit-gradient(linear,left 0,left 100%,from(#fcf8e3),to(#faf2cc));background-image:-webkit-linear-gradient(top,#fcf8e3,0%,#faf2cc,100%);background-image:-moz-linear-gradient(top,#fcf8e3 0,#faf2cc 100%);background-image:linear-gradient(to bottom,#fcf8e3 0,#faf2cc 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffcf8e3',endColorstr='#fffaf2cc',GradientType=0)}.panel-danger>.panel-heading{background-image:-webkit-gradient(linear,left 0,left 100%,from(#f2dede),to(#ebcccc));background-image:-webkit-linear-gradient(top,#f2dede,0%,#ebcccc,100%);background-image:-moz-linear-gradient(top,#f2dede 0,#ebcccc 100%);background-image:linear-gradient(to bottom,#f2dede 0,#ebcccc 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2dede',endColorstr='#ffebcccc',GradientType=0)}.well{background-image:-webkit-gradient(linear,left 0,left 100%,from(#e8e8e8),to(#f5f5f5));background-image:-webkit-linear-gradient(top,#e8e8e8,0%,#f5f5f5,100%);background-image:-moz-linear-gradient(top,#e8e8e8 0,#f5f5f5 100%);background-image:linear-gradient(to bottom,#e8e8e8 0,#f5f5f5 100%);background-repeat:repeat-x;border-color:#dcdcdc;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffe8e8e8',endColorstr='#fff5f5f5',GradientType=0);-webkit-box-shadow:inset 0 1px 3px rgba(0,0,0,0.05),0 1px 0 rgba(255,255,255,0.1);box-shadow:inset 0 1px 3px rgba(0,0,0,0.05),0 1px 0 rgba(255,255,255,0.1)} \ No newline at end of file + * Bootstrap v3.4.1 (https://getbootstrap.com/) + * Copyright 2011-2019 Twitter, Inc. + * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE) + */.btn-danger,.btn-default,.btn-info,.btn-primary,.btn-success,.btn-warning{text-shadow:0 -1px 0 rgba(0,0,0,.2);-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,.15),0 1px 1px rgba(0,0,0,.075);box-shadow:inset 0 1px 0 rgba(255,255,255,.15),0 1px 1px rgba(0,0,0,.075)}.btn-danger.active,.btn-danger:active,.btn-default.active,.btn-default:active,.btn-info.active,.btn-info:active,.btn-primary.active,.btn-primary:active,.btn-success.active,.btn-success:active,.btn-warning.active,.btn-warning:active{-webkit-box-shadow:inset 0 3px 5px rgba(0,0,0,.125);box-shadow:inset 0 3px 5px rgba(0,0,0,.125)}.btn-danger.disabled,.btn-danger[disabled],.btn-default.disabled,.btn-default[disabled],.btn-info.disabled,.btn-info[disabled],.btn-primary.disabled,.btn-primary[disabled],.btn-success.disabled,.btn-success[disabled],.btn-warning.disabled,.btn-warning[disabled],fieldset[disabled] .btn-danger,fieldset[disabled] .btn-default,fieldset[disabled] .btn-info,fieldset[disabled] .btn-primary,fieldset[disabled] .btn-success,fieldset[disabled] .btn-warning{-webkit-box-shadow:none;box-shadow:none}.btn-danger .badge,.btn-default .badge,.btn-info .badge,.btn-primary .badge,.btn-success .badge,.btn-warning .badge{text-shadow:none}.btn.active,.btn:active{background-image:none}.btn-default{background-image:-webkit-linear-gradient(top,#fff 0,#e0e0e0 100%);background-image:-o-linear-gradient(top,#fff 0,#e0e0e0 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#fff),to(#e0e0e0));background-image:linear-gradient(to bottom,#fff 0,#e0e0e0 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff', endColorstr='#ffe0e0e0', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-color:#dbdbdb;text-shadow:0 1px 0 #fff;border-color:#ccc}.btn-default:focus,.btn-default:hover{background-color:#e0e0e0;background-position:0 -15px}.btn-default.active,.btn-default:active{background-color:#e0e0e0;border-color:#dbdbdb}.btn-default.disabled,.btn-default.disabled.active,.btn-default.disabled.focus,.btn-default.disabled:active,.btn-default.disabled:focus,.btn-default.disabled:hover,.btn-default[disabled],.btn-default[disabled].active,.btn-default[disabled].focus,.btn-default[disabled]:active,.btn-default[disabled]:focus,.btn-default[disabled]:hover,fieldset[disabled] .btn-default,fieldset[disabled] .btn-default.active,fieldset[disabled] .btn-default.focus,fieldset[disabled] .btn-default:active,fieldset[disabled] .btn-default:focus,fieldset[disabled] .btn-default:hover{background-color:#e0e0e0;background-image:none}.btn-primary{background-image:-webkit-linear-gradient(top,#337ab7 0,#265a88 100%);background-image:-o-linear-gradient(top,#337ab7 0,#265a88 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#337ab7),to(#265a88));background-image:linear-gradient(to bottom,#337ab7 0,#265a88 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff265a88', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-color:#245580}.btn-primary:focus,.btn-primary:hover{background-color:#265a88;background-position:0 -15px}.btn-primary.active,.btn-primary:active{background-color:#265a88;border-color:#245580}.btn-primary.disabled,.btn-primary.disabled.active,.btn-primary.disabled.focus,.btn-primary.disabled:active,.btn-primary.disabled:focus,.btn-primary.disabled:hover,.btn-primary[disabled],.btn-primary[disabled].active,.btn-primary[disabled].focus,.btn-primary[disabled]:active,.btn-primary[disabled]:focus,.btn-primary[disabled]:hover,fieldset[disabled] .btn-primary,fieldset[disabled] .btn-primary.active,fieldset[disabled] .btn-primary.focus,fieldset[disabled] .btn-primary:active,fieldset[disabled] .btn-primary:focus,fieldset[disabled] .btn-primary:hover{background-color:#265a88;background-image:none}.btn-success{background-image:-webkit-linear-gradient(top,#5cb85c 0,#419641 100%);background-image:-o-linear-gradient(top,#5cb85c 0,#419641 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#5cb85c),to(#419641));background-image:linear-gradient(to bottom,#5cb85c 0,#419641 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5cb85c', endColorstr='#ff419641', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-color:#3e8f3e}.btn-success:focus,.btn-success:hover{background-color:#419641;background-position:0 -15px}.btn-success.active,.btn-success:active{background-color:#419641;border-color:#3e8f3e}.btn-success.disabled,.btn-success.disabled.active,.btn-success.disabled.focus,.btn-success.disabled:active,.btn-success.disabled:focus,.btn-success.disabled:hover,.btn-success[disabled],.btn-success[disabled].active,.btn-success[disabled].focus,.btn-success[disabled]:active,.btn-success[disabled]:focus,.btn-success[disabled]:hover,fieldset[disabled] .btn-success,fieldset[disabled] .btn-success.active,fieldset[disabled] .btn-success.focus,fieldset[disabled] .btn-success:active,fieldset[disabled] .btn-success:focus,fieldset[disabled] .btn-success:hover{background-color:#419641;background-image:none}.btn-info{background-image:-webkit-linear-gradient(top,#5bc0de 0,#2aabd2 100%);background-image:-o-linear-gradient(top,#5bc0de 0,#2aabd2 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#5bc0de),to(#2aabd2));background-image:linear-gradient(to bottom,#5bc0de 0,#2aabd2 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de', endColorstr='#ff2aabd2', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-color:#28a4c9}.btn-info:focus,.btn-info:hover{background-color:#2aabd2;background-position:0 -15px}.btn-info.active,.btn-info:active{background-color:#2aabd2;border-color:#28a4c9}.btn-info.disabled,.btn-info.disabled.active,.btn-info.disabled.focus,.btn-info.disabled:active,.btn-info.disabled:focus,.btn-info.disabled:hover,.btn-info[disabled],.btn-info[disabled].active,.btn-info[disabled].focus,.btn-info[disabled]:active,.btn-info[disabled]:focus,.btn-info[disabled]:hover,fieldset[disabled] .btn-info,fieldset[disabled] .btn-info.active,fieldset[disabled] .btn-info.focus,fieldset[disabled] .btn-info:active,fieldset[disabled] .btn-info:focus,fieldset[disabled] .btn-info:hover{background-color:#2aabd2;background-image:none}.btn-warning{background-image:-webkit-linear-gradient(top,#f0ad4e 0,#eb9316 100%);background-image:-o-linear-gradient(top,#f0ad4e 0,#eb9316 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#f0ad4e),to(#eb9316));background-image:linear-gradient(to bottom,#f0ad4e 0,#eb9316 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff0ad4e', endColorstr='#ffeb9316', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-color:#e38d13}.btn-warning:focus,.btn-warning:hover{background-color:#eb9316;background-position:0 -15px}.btn-warning.active,.btn-warning:active{background-color:#eb9316;border-color:#e38d13}.btn-warning.disabled,.btn-warning.disabled.active,.btn-warning.disabled.focus,.btn-warning.disabled:active,.btn-warning.disabled:focus,.btn-warning.disabled:hover,.btn-warning[disabled],.btn-warning[disabled].active,.btn-warning[disabled].focus,.btn-warning[disabled]:active,.btn-warning[disabled]:focus,.btn-warning[disabled]:hover,fieldset[disabled] .btn-warning,fieldset[disabled] .btn-warning.active,fieldset[disabled] .btn-warning.focus,fieldset[disabled] .btn-warning:active,fieldset[disabled] .btn-warning:focus,fieldset[disabled] .btn-warning:hover{background-color:#eb9316;background-image:none}.btn-danger{background-image:-webkit-linear-gradient(top,#d9534f 0,#c12e2a 100%);background-image:-o-linear-gradient(top,#d9534f 0,#c12e2a 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#d9534f),to(#c12e2a));background-image:linear-gradient(to bottom,#d9534f 0,#c12e2a 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9534f', endColorstr='#ffc12e2a', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-color:#b92c28}.btn-danger:focus,.btn-danger:hover{background-color:#c12e2a;background-position:0 -15px}.btn-danger.active,.btn-danger:active{background-color:#c12e2a;border-color:#b92c28}.btn-danger.disabled,.btn-danger.disabled.active,.btn-danger.disabled.focus,.btn-danger.disabled:active,.btn-danger.disabled:focus,.btn-danger.disabled:hover,.btn-danger[disabled],.btn-danger[disabled].active,.btn-danger[disabled].focus,.btn-danger[disabled]:active,.btn-danger[disabled]:focus,.btn-danger[disabled]:hover,fieldset[disabled] .btn-danger,fieldset[disabled] .btn-danger.active,fieldset[disabled] .btn-danger.focus,fieldset[disabled] .btn-danger:active,fieldset[disabled] .btn-danger:focus,fieldset[disabled] .btn-danger:hover{background-color:#c12e2a;background-image:none}.img-thumbnail,.thumbnail{-webkit-box-shadow:0 1px 2px rgba(0,0,0,.075);box-shadow:0 1px 2px rgba(0,0,0,.075)}.dropdown-menu>li>a:focus,.dropdown-menu>li>a:hover{background-image:-webkit-linear-gradient(top,#f5f5f5 0,#e8e8e8 100%);background-image:-o-linear-gradient(top,#f5f5f5 0,#e8e8e8 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#f5f5f5),to(#e8e8e8));background-image:linear-gradient(to bottom,#f5f5f5 0,#e8e8e8 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff5f5f5', endColorstr='#ffe8e8e8', GradientType=0);background-repeat:repeat-x;background-color:#e8e8e8}.dropdown-menu>.active>a,.dropdown-menu>.active>a:focus,.dropdown-menu>.active>a:hover{background-image:-webkit-linear-gradient(top,#337ab7 0,#2e6da4 100%);background-image:-o-linear-gradient(top,#337ab7 0,#2e6da4 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#337ab7),to(#2e6da4));background-image:linear-gradient(to bottom,#337ab7 0,#2e6da4 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2e6da4', GradientType=0);background-repeat:repeat-x;background-color:#2e6da4}.navbar-default{background-image:-webkit-linear-gradient(top,#fff 0,#f8f8f8 100%);background-image:-o-linear-gradient(top,#fff 0,#f8f8f8 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#fff),to(#f8f8f8));background-image:linear-gradient(to bottom,#fff 0,#f8f8f8 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff', endColorstr='#fff8f8f8', GradientType=0);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);border-radius:4px;-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,.15),0 1px 5px rgba(0,0,0,.075);box-shadow:inset 0 1px 0 rgba(255,255,255,.15),0 1px 5px rgba(0,0,0,.075)}.navbar-default .navbar-nav>.active>a,.navbar-default .navbar-nav>.open>a{background-image:-webkit-linear-gradient(top,#dbdbdb 0,#e2e2e2 100%);background-image:-o-linear-gradient(top,#dbdbdb 0,#e2e2e2 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#dbdbdb),to(#e2e2e2));background-image:linear-gradient(to bottom,#dbdbdb 0,#e2e2e2 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdbdbdb', endColorstr='#ffe2e2e2', GradientType=0);background-repeat:repeat-x;-webkit-box-shadow:inset 0 3px 9px rgba(0,0,0,.075);box-shadow:inset 0 3px 9px rgba(0,0,0,.075)}.navbar-brand,.navbar-nav>li>a{text-shadow:0 1px 0 rgba(255,255,255,.25)}.navbar-inverse{background-image:-webkit-linear-gradient(top,#3c3c3c 0,#222 100%);background-image:-o-linear-gradient(top,#3c3c3c 0,#222 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#3c3c3c),to(#222));background-image:linear-gradient(to bottom,#3c3c3c 0,#222 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff3c3c3c', endColorstr='#ff222222', GradientType=0);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);border-radius:4px}.navbar-inverse .navbar-nav>.active>a,.navbar-inverse .navbar-nav>.open>a{background-image:-webkit-linear-gradient(top,#080808 0,#0f0f0f 100%);background-image:-o-linear-gradient(top,#080808 0,#0f0f0f 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#080808),to(#0f0f0f));background-image:linear-gradient(to bottom,#080808 0,#0f0f0f 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff080808', endColorstr='#ff0f0f0f', GradientType=0);background-repeat:repeat-x;-webkit-box-shadow:inset 0 3px 9px rgba(0,0,0,.25);box-shadow:inset 0 3px 9px rgba(0,0,0,.25)}.navbar-inverse .navbar-brand,.navbar-inverse .navbar-nav>li>a{text-shadow:0 -1px 0 rgba(0,0,0,.25)}.navbar-fixed-bottom,.navbar-fixed-top,.navbar-static-top{border-radius:0}@media (max-width:767px){.navbar .navbar-nav .open .dropdown-menu>.active>a,.navbar .navbar-nav .open .dropdown-menu>.active>a:focus,.navbar .navbar-nav .open .dropdown-menu>.active>a:hover{color:#fff;background-image:-webkit-linear-gradient(top,#337ab7 0,#2e6da4 100%);background-image:-o-linear-gradient(top,#337ab7 0,#2e6da4 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#337ab7),to(#2e6da4));background-image:linear-gradient(to bottom,#337ab7 0,#2e6da4 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2e6da4', GradientType=0);background-repeat:repeat-x}}.alert{text-shadow:0 1px 0 rgba(255,255,255,.2);-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,.25),0 1px 2px rgba(0,0,0,.05);box-shadow:inset 0 1px 0 rgba(255,255,255,.25),0 1px 2px rgba(0,0,0,.05)}.alert-success{background-image:-webkit-linear-gradient(top,#dff0d8 0,#c8e5bc 100%);background-image:-o-linear-gradient(top,#dff0d8 0,#c8e5bc 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#dff0d8),to(#c8e5bc));background-image:linear-gradient(to bottom,#dff0d8 0,#c8e5bc 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdff0d8', endColorstr='#ffc8e5bc', GradientType=0);background-repeat:repeat-x;border-color:#b2dba1}.alert-info{background-image:-webkit-linear-gradient(top,#d9edf7 0,#b9def0 100%);background-image:-o-linear-gradient(top,#d9edf7 0,#b9def0 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#d9edf7),to(#b9def0));background-image:linear-gradient(to bottom,#d9edf7 0,#b9def0 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9edf7', endColorstr='#ffb9def0', GradientType=0);background-repeat:repeat-x;border-color:#9acfea}.alert-warning{background-image:-webkit-linear-gradient(top,#fcf8e3 0,#f8efc0 100%);background-image:-o-linear-gradient(top,#fcf8e3 0,#f8efc0 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#fcf8e3),to(#f8efc0));background-image:linear-gradient(to bottom,#fcf8e3 0,#f8efc0 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffcf8e3', endColorstr='#fff8efc0', GradientType=0);background-repeat:repeat-x;border-color:#f5e79e}.alert-danger{background-image:-webkit-linear-gradient(top,#f2dede 0,#e7c3c3 100%);background-image:-o-linear-gradient(top,#f2dede 0,#e7c3c3 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#f2dede),to(#e7c3c3));background-image:linear-gradient(to bottom,#f2dede 0,#e7c3c3 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2dede', endColorstr='#ffe7c3c3', GradientType=0);background-repeat:repeat-x;border-color:#dca7a7}.progress{background-image:-webkit-linear-gradient(top,#ebebeb 0,#f5f5f5 100%);background-image:-o-linear-gradient(top,#ebebeb 0,#f5f5f5 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#ebebeb),to(#f5f5f5));background-image:linear-gradient(to bottom,#ebebeb 0,#f5f5f5 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffebebeb', endColorstr='#fff5f5f5', GradientType=0);background-repeat:repeat-x}.progress-bar{background-image:-webkit-linear-gradient(top,#337ab7 0,#286090 100%);background-image:-o-linear-gradient(top,#337ab7 0,#286090 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#337ab7),to(#286090));background-image:linear-gradient(to bottom,#337ab7 0,#286090 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff286090', GradientType=0);background-repeat:repeat-x}.progress-bar-success{background-image:-webkit-linear-gradient(top,#5cb85c 0,#449d44 100%);background-image:-o-linear-gradient(top,#5cb85c 0,#449d44 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#5cb85c),to(#449d44));background-image:linear-gradient(to bottom,#5cb85c 0,#449d44 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5cb85c', endColorstr='#ff449d44', GradientType=0);background-repeat:repeat-x}.progress-bar-info{background-image:-webkit-linear-gradient(top,#5bc0de 0,#31b0d5 100%);background-image:-o-linear-gradient(top,#5bc0de 0,#31b0d5 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#5bc0de),to(#31b0d5));background-image:linear-gradient(to bottom,#5bc0de 0,#31b0d5 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de', endColorstr='#ff31b0d5', GradientType=0);background-repeat:repeat-x}.progress-bar-warning{background-image:-webkit-linear-gradient(top,#f0ad4e 0,#ec971f 100%);background-image:-o-linear-gradient(top,#f0ad4e 0,#ec971f 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#f0ad4e),to(#ec971f));background-image:linear-gradient(to bottom,#f0ad4e 0,#ec971f 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff0ad4e', endColorstr='#ffec971f', GradientType=0);background-repeat:repeat-x}.progress-bar-danger{background-image:-webkit-linear-gradient(top,#d9534f 0,#c9302c 100%);background-image:-o-linear-gradient(top,#d9534f 0,#c9302c 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#d9534f),to(#c9302c));background-image:linear-gradient(to bottom,#d9534f 0,#c9302c 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9534f', endColorstr='#ffc9302c', GradientType=0);background-repeat:repeat-x}.progress-bar-striped{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent)}.list-group{border-radius:4px;-webkit-box-shadow:0 1px 2px rgba(0,0,0,.075);box-shadow:0 1px 2px rgba(0,0,0,.075)}.list-group-item.active,.list-group-item.active:focus,.list-group-item.active:hover{text-shadow:0 -1px 0 #286090;background-image:-webkit-linear-gradient(top,#337ab7 0,#2b669a 100%);background-image:-o-linear-gradient(top,#337ab7 0,#2b669a 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#337ab7),to(#2b669a));background-image:linear-gradient(to bottom,#337ab7 0,#2b669a 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2b669a', GradientType=0);background-repeat:repeat-x;border-color:#2b669a}.list-group-item.active .badge,.list-group-item.active:focus .badge,.list-group-item.active:hover .badge{text-shadow:none}.panel{-webkit-box-shadow:0 1px 2px rgba(0,0,0,.05);box-shadow:0 1px 2px rgba(0,0,0,.05)}.panel-default>.panel-heading{background-image:-webkit-linear-gradient(top,#f5f5f5 0,#e8e8e8 100%);background-image:-o-linear-gradient(top,#f5f5f5 0,#e8e8e8 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#f5f5f5),to(#e8e8e8));background-image:linear-gradient(to bottom,#f5f5f5 0,#e8e8e8 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff5f5f5', endColorstr='#ffe8e8e8', GradientType=0);background-repeat:repeat-x}.panel-primary>.panel-heading{background-image:-webkit-linear-gradient(top,#337ab7 0,#2e6da4 100%);background-image:-o-linear-gradient(top,#337ab7 0,#2e6da4 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#337ab7),to(#2e6da4));background-image:linear-gradient(to bottom,#337ab7 0,#2e6da4 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2e6da4', GradientType=0);background-repeat:repeat-x}.panel-success>.panel-heading{background-image:-webkit-linear-gradient(top,#dff0d8 0,#d0e9c6 100%);background-image:-o-linear-gradient(top,#dff0d8 0,#d0e9c6 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#dff0d8),to(#d0e9c6));background-image:linear-gradient(to bottom,#dff0d8 0,#d0e9c6 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdff0d8', endColorstr='#ffd0e9c6', GradientType=0);background-repeat:repeat-x}.panel-info>.panel-heading{background-image:-webkit-linear-gradient(top,#d9edf7 0,#c4e3f3 100%);background-image:-o-linear-gradient(top,#d9edf7 0,#c4e3f3 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#d9edf7),to(#c4e3f3));background-image:linear-gradient(to bottom,#d9edf7 0,#c4e3f3 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9edf7', endColorstr='#ffc4e3f3', GradientType=0);background-repeat:repeat-x}.panel-warning>.panel-heading{background-image:-webkit-linear-gradient(top,#fcf8e3 0,#faf2cc 100%);background-image:-o-linear-gradient(top,#fcf8e3 0,#faf2cc 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#fcf8e3),to(#faf2cc));background-image:linear-gradient(to bottom,#fcf8e3 0,#faf2cc 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffcf8e3', endColorstr='#fffaf2cc', GradientType=0);background-repeat:repeat-x}.panel-danger>.panel-heading{background-image:-webkit-linear-gradient(top,#f2dede 0,#ebcccc 100%);background-image:-o-linear-gradient(top,#f2dede 0,#ebcccc 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#f2dede),to(#ebcccc));background-image:linear-gradient(to bottom,#f2dede 0,#ebcccc 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2dede', endColorstr='#ffebcccc', GradientType=0);background-repeat:repeat-x}.well{background-image:-webkit-linear-gradient(top,#e8e8e8 0,#f5f5f5 100%);background-image:-o-linear-gradient(top,#e8e8e8 0,#f5f5f5 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#e8e8e8),to(#f5f5f5));background-image:linear-gradient(to bottom,#e8e8e8 0,#f5f5f5 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffe8e8e8', endColorstr='#fff5f5f5', GradientType=0);background-repeat:repeat-x;border-color:#dcdcdc;-webkit-box-shadow:inset 0 1px 3px rgba(0,0,0,.05),0 1px 0 rgba(255,255,255,.1);box-shadow:inset 0 1px 3px rgba(0,0,0,.05),0 1px 0 rgba(255,255,255,.1)} +/*# sourceMappingURL=bootstrap-theme.min.css.map */ \ No newline at end of file diff --git a/hbase-server/src/main/resources/hbase-webapps/static/css/bootstrap.css b/hbase-server/src/main/resources/hbase-webapps/static/css/bootstrap.css deleted file mode 100755 index bbda4eed4afd..000000000000 --- a/hbase-server/src/main/resources/hbase-webapps/static/css/bootstrap.css +++ /dev/null @@ -1,6805 +0,0 @@ -/*! - * Bootstrap v3.0.0 - * - * Copyright 2013 Twitter, Inc - * Licensed under the Apache License v2.0 - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Designed and built with all the love in the world by @mdo and @fat. - */ - -/*! normalize.css v2.1.0 | MIT License | git.io/normalize */ - -article, -aside, -details, -figcaption, -figure, -footer, -header, -hgroup, -main, -nav, -section, -summary { - display: block; -} - -audio, -canvas, -video { - display: inline-block; -} - -audio:not([controls]) { - display: none; - height: 0; -} - -[hidden] { - display: none; -} - -html { - font-family: sans-serif; - -webkit-text-size-adjust: 100%; - -ms-text-size-adjust: 100%; -} - -body { - margin: 0; -} - -a:focus { - outline: thin dotted; -} - -a:active, -a:hover { - outline: 0; -} - -h1 { - margin: 0.67em 0; - font-size: 2em; -} - -abbr[title] { - border-bottom: 1px dotted; -} - -b, -strong { - font-weight: bold; -} - -dfn { - font-style: italic; -} - -hr { - height: 0; - -moz-box-sizing: content-box; - box-sizing: content-box; -} - -mark { - color: #000; - background: #ff0; -} - -code, -kbd, -pre, -samp { - font-family: monospace, serif; - font-size: 1em; -} - -pre { - white-space: pre-wrap; -} - -q { - quotes: "\201C" "\201D" "\2018" "\2019"; -} - -small { - font-size: 80%; -} - -sub, -sup { - position: relative; - font-size: 75%; - line-height: 0; - vertical-align: baseline; -} - -sup { - top: -0.5em; -} - -sub { - bottom: -0.25em; -} - -img { - border: 0; -} - -svg:not(:root) { - overflow: hidden; -} - -figure { - margin: 0; -} - -fieldset { - padding: 0.35em 0.625em 0.75em; - margin: 0 2px; - border: 1px solid #c0c0c0; -} - -legend { - padding: 0; - border: 0; -} - -button, -input, -select, -textarea { - margin: 0; - font-family: inherit; - font-size: 100%; -} - -button, -input { - line-height: normal; -} - -button, -select { - text-transform: none; -} - -button, -html input[type="button"], -input[type="reset"], -input[type="submit"] { - cursor: pointer; - -webkit-appearance: button; -} - -button[disabled], -html input[disabled] { - cursor: default; -} - -input[type="checkbox"], -input[type="radio"] { - padding: 0; - box-sizing: border-box; -} - -input[type="search"] { - -webkit-box-sizing: content-box; - -moz-box-sizing: content-box; - box-sizing: content-box; - -webkit-appearance: textfield; -} - -input[type="search"]::-webkit-search-cancel-button, -input[type="search"]::-webkit-search-decoration { - -webkit-appearance: none; -} - -button::-moz-focus-inner, -input::-moz-focus-inner { - padding: 0; - border: 0; -} - -textarea { - overflow: auto; - vertical-align: top; -} - -table { - border-collapse: collapse; - border-spacing: 0; -} - -@media print { - * { - color: #000 !important; - text-shadow: none !important; - background: transparent !important; - box-shadow: none !important; - } - a, - a:visited { - text-decoration: underline; - } - a[href]:after { - content: " (" attr(href) ")"; - } - abbr[title]:after { - content: " (" attr(title) ")"; - } - .ir a:after, - a[href^="javascript:"]:after, - a[href^="#"]:after { - content: ""; - } - pre, - blockquote { - border: 1px solid #999; - page-break-inside: avoid; - } - thead { - display: table-header-group; - } - tr, - img { - page-break-inside: avoid; - } - img { - max-width: 100% !important; - } - @page { - margin: 2cm .5cm; - } - p, - h2, - h3 { - orphans: 3; - widows: 3; - } - h2, - h3 { - page-break-after: avoid; - } - .navbar { - display: none; - } - .table td, - .table th { - background-color: #fff !important; - } - .btn > .caret, - .dropup > .btn > .caret { - border-top-color: #000 !important; - } - .label { - border: 1px solid #000; - } - .table { - border-collapse: collapse !important; - } - .table-bordered th, - .table-bordered td { - border: 1px solid #ddd !important; - } -} - -*, -*:before, -*:after { - -webkit-box-sizing: border-box; - -moz-box-sizing: border-box; - box-sizing: border-box; -} - -html { - font-size: 62.5%; - -webkit-tap-highlight-color: rgba(0, 0, 0, 0); -} - -body { - font-family: "Helvetica Neue", Helvetica, Arial, sans-serif; - font-size: 14px; - line-height: 1.428571429; - color: #333333; - background-color: #ffffff; -} - -input, -button, -select, -textarea { - font-family: inherit; - font-size: inherit; - line-height: inherit; -} - -button, -input, -select[multiple], -textarea { - background-image: none; -} - -a { - color: #428bca; - text-decoration: none; -} - -a:hover, -a:focus { - color: #2a6496; - text-decoration: underline; -} - -a:focus { - outline: thin dotted #333; - outline: 5px auto -webkit-focus-ring-color; - outline-offset: -2px; -} - -img { - vertical-align: middle; -} - -.img-responsive { - display: block; - height: auto; - max-width: 100%; -} - -.img-rounded { - border-radius: 6px; -} - -.img-thumbnail { - display: inline-block; - height: auto; - max-width: 100%; - padding: 4px; - line-height: 1.428571429; - background-color: #ffffff; - border: 1px solid #dddddd; - border-radius: 4px; - -webkit-transition: all 0.2s ease-in-out; - transition: all 0.2s ease-in-out; -} - -.img-circle { - border-radius: 50%; -} - -hr { - margin-top: 20px; - margin-bottom: 20px; - border: 0; - border-top: 1px solid #eeeeee; -} - -.sr-only { - position: absolute; - width: 1px; - height: 1px; - padding: 0; - margin: -1px; - overflow: hidden; - clip: rect(0 0 0 0); - border: 0; -} - -p { - margin: 0 0 10px; -} - -.lead { - margin-bottom: 20px; - font-size: 16.099999999999998px; - font-weight: 200; - line-height: 1.4; -} - -@media (min-width: 768px) { - .lead { - font-size: 21px; - } -} - -small { - font-size: 85%; -} - -cite { - font-style: normal; -} - -.text-muted { - color: #999999; -} - -.text-primary { - color: #428bca; -} - -.text-warning { - color: #c09853; -} - -.text-danger { - color: #b94a48; -} - -.text-success { - color: #468847; -} - -.text-info { - color: #3a87ad; -} - -.text-left { - text-align: left; -} - -.text-right { - text-align: right; -} - -.text-center { - text-align: center; -} - -h1, -h2, -h3, -h4, -h5, -h6, -.h1, -.h2, -.h3, -.h4, -.h5, -.h6 { - font-family: "Helvetica Neue", Helvetica, Arial, sans-serif; - font-weight: 500; - line-height: 1.1; -} - -h1 small, -h2 small, -h3 small, -h4 small, -h5 small, -h6 small, -.h1 small, -.h2 small, -.h3 small, -.h4 small, -.h5 small, -.h6 small { - font-weight: normal; - line-height: 1; - color: #999999; -} - -h1, -h2, -h3 { - margin-top: 20px; - margin-bottom: 10px; -} - -h4, -h5, -h6 { - margin-top: 10px; - margin-bottom: 10px; -} - -h1, -.h1 { - font-size: 36px; -} - -h2, -.h2 { - font-size: 30px; -} - -h3, -.h3 { - font-size: 24px; -} - -h4, -.h4 { - font-size: 18px; -} - -h5, -.h5 { - font-size: 14px; -} - -h6, -.h6 { - font-size: 12px; -} - -h1 small, -.h1 small { - font-size: 24px; -} - -h2 small, -.h2 small { - font-size: 18px; -} - -h3 small, -.h3 small, -h4 small, -.h4 small { - font-size: 14px; -} - -.page-header { - padding-bottom: 9px; - margin: 40px 0 20px; - border-bottom: 1px solid #eeeeee; -} - -ul, -ol { - margin-top: 0; - margin-bottom: 10px; -} - -ul ul, -ol ul, -ul ol, -ol ol { - margin-bottom: 0; -} - -.list-unstyled { - padding-left: 0; - list-style: none; -} - -.list-inline { - padding-left: 0; - list-style: none; -} - -.list-inline > li { - display: inline-block; - padding-right: 5px; - padding-left: 5px; -} - -dl { - margin-bottom: 20px; -} - -dt, -dd { - line-height: 1.428571429; -} - -dt { - font-weight: bold; -} - -dd { - margin-left: 0; -} - -@media (min-width: 768px) { - .dl-horizontal dt { - float: left; - width: 160px; - overflow: hidden; - clear: left; - text-align: right; - text-overflow: ellipsis; - white-space: nowrap; - } - .dl-horizontal dd { - margin-left: 180px; - } - .dl-horizontal dd:before, - .dl-horizontal dd:after { - display: table; - content: " "; - } - .dl-horizontal dd:after { - clear: both; - } - .dl-horizontal dd:before, - .dl-horizontal dd:after { - display: table; - content: " "; - } - .dl-horizontal dd:after { - clear: both; - } -} - -abbr[title], -abbr[data-original-title] { - cursor: help; - border-bottom: 1px dotted #999999; -} - -abbr.initialism { - font-size: 90%; - text-transform: uppercase; -} - -blockquote { - padding: 10px 20px; - margin: 0 0 20px; - border-left: 5px solid #eeeeee; -} - -blockquote p { - font-size: 17.5px; - font-weight: 300; - line-height: 1.25; -} - -blockquote p:last-child { - margin-bottom: 0; -} - -blockquote small { - display: block; - line-height: 1.428571429; - color: #999999; -} - -blockquote small:before { - content: '\2014 \00A0'; -} - -blockquote.pull-right { - padding-right: 15px; - padding-left: 0; - border-right: 5px solid #eeeeee; - border-left: 0; -} - -blockquote.pull-right p, -blockquote.pull-right small { - text-align: right; -} - -blockquote.pull-right small:before { - content: ''; -} - -blockquote.pull-right small:after { - content: '\00A0 \2014'; -} - -q:before, -q:after, -blockquote:before, -blockquote:after { - content: ""; -} - -address { - display: block; - margin-bottom: 20px; - font-style: normal; - line-height: 1.428571429; -} - -code, -pre { - font-family: Monaco, Menlo, Consolas, "Courier New", monospace; -} - -code { - padding: 2px 4px; - font-size: 90%; - color: #c7254e; - white-space: nowrap; - background-color: #f9f2f4; - border-radius: 4px; -} - -pre { - display: block; - padding: 9.5px; - margin: 0 0 10px; - font-size: 13px; - line-height: 1.428571429; - color: #333333; - word-break: break-all; - word-wrap: break-word; - background-color: #f5f5f5; - border: 1px solid #cccccc; - border-radius: 4px; -} - -pre.prettyprint { - margin-bottom: 20px; -} - -pre code { - padding: 0; - font-size: inherit; - color: inherit; - white-space: pre-wrap; - background-color: transparent; - border: 0; -} - -.pre-scrollable { - max-height: 340px; - overflow-y: scroll; -} - -.container { - padding-right: 15px; - padding-left: 15px; - margin-right: auto; - margin-left: auto; -} - -.container:before, -.container:after { - display: table; - content: " "; -} - -.container:after { - clear: both; -} - -.container:before, -.container:after { - display: table; - content: " "; -} - -.container:after { - clear: both; -} - -.row { - margin-right: -15px; - margin-left: -15px; -} - -.row:before, -.row:after { - display: table; - content: " "; -} - -.row:after { - clear: both; -} - -.row:before, -.row:after { - display: table; - content: " "; -} - -.row:after { - clear: both; -} - -.col-xs-1, -.col-xs-2, -.col-xs-3, -.col-xs-4, -.col-xs-5, -.col-xs-6, -.col-xs-7, -.col-xs-8, -.col-xs-9, -.col-xs-10, -.col-xs-11, -.col-xs-12, -.col-sm-1, -.col-sm-2, -.col-sm-3, -.col-sm-4, -.col-sm-5, -.col-sm-6, -.col-sm-7, -.col-sm-8, -.col-sm-9, -.col-sm-10, -.col-sm-11, -.col-sm-12, -.col-md-1, -.col-md-2, -.col-md-3, -.col-md-4, -.col-md-5, -.col-md-6, -.col-md-7, -.col-md-8, -.col-md-9, -.col-md-10, -.col-md-11, -.col-md-12, -.col-lg-1, -.col-lg-2, -.col-lg-3, -.col-lg-4, -.col-lg-5, -.col-lg-6, -.col-lg-7, -.col-lg-8, -.col-lg-9, -.col-lg-10, -.col-lg-11, -.col-lg-12 { - position: relative; - min-height: 1px; - padding-right: 15px; - padding-left: 15px; -} - -.col-xs-1, -.col-xs-2, -.col-xs-3, -.col-xs-4, -.col-xs-5, -.col-xs-6, -.col-xs-7, -.col-xs-8, -.col-xs-9, -.col-xs-10, -.col-xs-11 { - float: left; -} - -.col-xs-1 { - width: 8.333333333333332%; -} - -.col-xs-2 { - width: 16.666666666666664%; -} - -.col-xs-3 { - width: 25%; -} - -.col-xs-4 { - width: 33.33333333333333%; -} - -.col-xs-5 { - width: 41.66666666666667%; -} - -.col-xs-6 { - width: 50%; -} - -.col-xs-7 { - width: 58.333333333333336%; -} - -.col-xs-8 { - width: 66.66666666666666%; -} - -.col-xs-9 { - width: 75%; -} - -.col-xs-10 { - width: 83.33333333333334%; -} - -.col-xs-11 { - width: 91.66666666666666%; -} - -.col-xs-12 { - width: 100%; -} - -@media (min-width: 768px) { - .container { - max-width: 750px; - } - .col-sm-1, - .col-sm-2, - .col-sm-3, - .col-sm-4, - .col-sm-5, - .col-sm-6, - .col-sm-7, - .col-sm-8, - .col-sm-9, - .col-sm-10, - .col-sm-11 { - float: left; - } - .col-sm-1 { - width: 8.333333333333332%; - } - .col-sm-2 { - width: 16.666666666666664%; - } - .col-sm-3 { - width: 25%; - } - .col-sm-4 { - width: 33.33333333333333%; - } - .col-sm-5 { - width: 41.66666666666667%; - } - .col-sm-6 { - width: 50%; - } - .col-sm-7 { - width: 58.333333333333336%; - } - .col-sm-8 { - width: 66.66666666666666%; - } - .col-sm-9 { - width: 75%; - } - .col-sm-10 { - width: 83.33333333333334%; - } - .col-sm-11 { - width: 91.66666666666666%; - } - .col-sm-12 { - width: 100%; - } - .col-sm-push-1 { - left: 8.333333333333332%; - } - .col-sm-push-2 { - left: 16.666666666666664%; - } - .col-sm-push-3 { - left: 25%; - } - .col-sm-push-4 { - left: 33.33333333333333%; - } - .col-sm-push-5 { - left: 41.66666666666667%; - } - .col-sm-push-6 { - left: 50%; - } - .col-sm-push-7 { - left: 58.333333333333336%; - } - .col-sm-push-8 { - left: 66.66666666666666%; - } - .col-sm-push-9 { - left: 75%; - } - .col-sm-push-10 { - left: 83.33333333333334%; - } - .col-sm-push-11 { - left: 91.66666666666666%; - } - .col-sm-pull-1 { - right: 8.333333333333332%; - } - .col-sm-pull-2 { - right: 16.666666666666664%; - } - .col-sm-pull-3 { - right: 25%; - } - .col-sm-pull-4 { - right: 33.33333333333333%; - } - .col-sm-pull-5 { - right: 41.66666666666667%; - } - .col-sm-pull-6 { - right: 50%; - } - .col-sm-pull-7 { - right: 58.333333333333336%; - } - .col-sm-pull-8 { - right: 66.66666666666666%; - } - .col-sm-pull-9 { - right: 75%; - } - .col-sm-pull-10 { - right: 83.33333333333334%; - } - .col-sm-pull-11 { - right: 91.66666666666666%; - } - .col-sm-offset-1 { - margin-left: 8.333333333333332%; - } - .col-sm-offset-2 { - margin-left: 16.666666666666664%; - } - .col-sm-offset-3 { - margin-left: 25%; - } - .col-sm-offset-4 { - margin-left: 33.33333333333333%; - } - .col-sm-offset-5 { - margin-left: 41.66666666666667%; - } - .col-sm-offset-6 { - margin-left: 50%; - } - .col-sm-offset-7 { - margin-left: 58.333333333333336%; - } - .col-sm-offset-8 { - margin-left: 66.66666666666666%; - } - .col-sm-offset-9 { - margin-left: 75%; - } - .col-sm-offset-10 { - margin-left: 83.33333333333334%; - } - .col-sm-offset-11 { - margin-left: 91.66666666666666%; - } -} - -@media (min-width: 992px) { - .container { - max-width: 970px; - } - .col-md-1, - .col-md-2, - .col-md-3, - .col-md-4, - .col-md-5, - .col-md-6, - .col-md-7, - .col-md-8, - .col-md-9, - .col-md-10, - .col-md-11 { - float: left; - } - .col-md-1 { - width: 8.333333333333332%; - } - .col-md-2 { - width: 16.666666666666664%; - } - .col-md-3 { - width: 25%; - } - .col-md-4 { - width: 33.33333333333333%; - } - .col-md-5 { - width: 41.66666666666667%; - } - .col-md-6 { - width: 50%; - } - .col-md-7 { - width: 58.333333333333336%; - } - .col-md-8 { - width: 66.66666666666666%; - } - .col-md-9 { - width: 75%; - } - .col-md-10 { - width: 83.33333333333334%; - } - .col-md-11 { - width: 91.66666666666666%; - } - .col-md-12 { - width: 100%; - } - .col-md-push-0 { - left: auto; - } - .col-md-push-1 { - left: 8.333333333333332%; - } - .col-md-push-2 { - left: 16.666666666666664%; - } - .col-md-push-3 { - left: 25%; - } - .col-md-push-4 { - left: 33.33333333333333%; - } - .col-md-push-5 { - left: 41.66666666666667%; - } - .col-md-push-6 { - left: 50%; - } - .col-md-push-7 { - left: 58.333333333333336%; - } - .col-md-push-8 { - left: 66.66666666666666%; - } - .col-md-push-9 { - left: 75%; - } - .col-md-push-10 { - left: 83.33333333333334%; - } - .col-md-push-11 { - left: 91.66666666666666%; - } - .col-md-pull-0 { - right: auto; - } - .col-md-pull-1 { - right: 8.333333333333332%; - } - .col-md-pull-2 { - right: 16.666666666666664%; - } - .col-md-pull-3 { - right: 25%; - } - .col-md-pull-4 { - right: 33.33333333333333%; - } - .col-md-pull-5 { - right: 41.66666666666667%; - } - .col-md-pull-6 { - right: 50%; - } - .col-md-pull-7 { - right: 58.333333333333336%; - } - .col-md-pull-8 { - right: 66.66666666666666%; - } - .col-md-pull-9 { - right: 75%; - } - .col-md-pull-10 { - right: 83.33333333333334%; - } - .col-md-pull-11 { - right: 91.66666666666666%; - } - .col-md-offset-0 { - margin-left: 0; - } - .col-md-offset-1 { - margin-left: 8.333333333333332%; - } - .col-md-offset-2 { - margin-left: 16.666666666666664%; - } - .col-md-offset-3 { - margin-left: 25%; - } - .col-md-offset-4 { - margin-left: 33.33333333333333%; - } - .col-md-offset-5 { - margin-left: 41.66666666666667%; - } - .col-md-offset-6 { - margin-left: 50%; - } - .col-md-offset-7 { - margin-left: 58.333333333333336%; - } - .col-md-offset-8 { - margin-left: 66.66666666666666%; - } - .col-md-offset-9 { - margin-left: 75%; - } - .col-md-offset-10 { - margin-left: 83.33333333333334%; - } - .col-md-offset-11 { - margin-left: 91.66666666666666%; - } -} - -@media (min-width: 1200px) { - .container { - max-width: 1170px; - } - .col-lg-1, - .col-lg-2, - .col-lg-3, - .col-lg-4, - .col-lg-5, - .col-lg-6, - .col-lg-7, - .col-lg-8, - .col-lg-9, - .col-lg-10, - .col-lg-11 { - float: left; - } - .col-lg-1 { - width: 8.333333333333332%; - } - .col-lg-2 { - width: 16.666666666666664%; - } - .col-lg-3 { - width: 25%; - } - .col-lg-4 { - width: 33.33333333333333%; - } - .col-lg-5 { - width: 41.66666666666667%; - } - .col-lg-6 { - width: 50%; - } - .col-lg-7 { - width: 58.333333333333336%; - } - .col-lg-8 { - width: 66.66666666666666%; - } - .col-lg-9 { - width: 75%; - } - .col-lg-10 { - width: 83.33333333333334%; - } - .col-lg-11 { - width: 91.66666666666666%; - } - .col-lg-12 { - width: 100%; - } - .col-lg-push-0 { - left: auto; - } - .col-lg-push-1 { - left: 8.333333333333332%; - } - .col-lg-push-2 { - left: 16.666666666666664%; - } - .col-lg-push-3 { - left: 25%; - } - .col-lg-push-4 { - left: 33.33333333333333%; - } - .col-lg-push-5 { - left: 41.66666666666667%; - } - .col-lg-push-6 { - left: 50%; - } - .col-lg-push-7 { - left: 58.333333333333336%; - } - .col-lg-push-8 { - left: 66.66666666666666%; - } - .col-lg-push-9 { - left: 75%; - } - .col-lg-push-10 { - left: 83.33333333333334%; - } - .col-lg-push-11 { - left: 91.66666666666666%; - } - .col-lg-pull-0 { - right: auto; - } - .col-lg-pull-1 { - right: 8.333333333333332%; - } - .col-lg-pull-2 { - right: 16.666666666666664%; - } - .col-lg-pull-3 { - right: 25%; - } - .col-lg-pull-4 { - right: 33.33333333333333%; - } - .col-lg-pull-5 { - right: 41.66666666666667%; - } - .col-lg-pull-6 { - right: 50%; - } - .col-lg-pull-7 { - right: 58.333333333333336%; - } - .col-lg-pull-8 { - right: 66.66666666666666%; - } - .col-lg-pull-9 { - right: 75%; - } - .col-lg-pull-10 { - right: 83.33333333333334%; - } - .col-lg-pull-11 { - right: 91.66666666666666%; - } - .col-lg-offset-0 { - margin-left: 0; - } - .col-lg-offset-1 { - margin-left: 8.333333333333332%; - } - .col-lg-offset-2 { - margin-left: 16.666666666666664%; - } - .col-lg-offset-3 { - margin-left: 25%; - } - .col-lg-offset-4 { - margin-left: 33.33333333333333%; - } - .col-lg-offset-5 { - margin-left: 41.66666666666667%; - } - .col-lg-offset-6 { - margin-left: 50%; - } - .col-lg-offset-7 { - margin-left: 58.333333333333336%; - } - .col-lg-offset-8 { - margin-left: 66.66666666666666%; - } - .col-lg-offset-9 { - margin-left: 75%; - } - .col-lg-offset-10 { - margin-left: 83.33333333333334%; - } - .col-lg-offset-11 { - margin-left: 91.66666666666666%; - } -} - -table { - max-width: 100%; - background-color: transparent; -} - -th { - text-align: left; -} - -.table { - width: 100%; - margin-bottom: 20px; -} - -.table thead > tr > th, -.table tbody > tr > th, -.table tfoot > tr > th, -.table thead > tr > td, -.table tbody > tr > td, -.table tfoot > tr > td { - padding: 8px; - line-height: 1.428571429; - vertical-align: top; - border-top: 1px solid #dddddd; -} - -.table thead > tr > th { - vertical-align: bottom; - border-bottom: 2px solid #dddddd; -} - -.table caption + thead tr:first-child th, -.table colgroup + thead tr:first-child th, -.table thead:first-child tr:first-child th, -.table caption + thead tr:first-child td, -.table colgroup + thead tr:first-child td, -.table thead:first-child tr:first-child td { - border-top: 0; -} - -.table tbody + tbody { - border-top: 2px solid #dddddd; -} - -.table .table { - background-color: #ffffff; -} - -.table-condensed thead > tr > th, -.table-condensed tbody > tr > th, -.table-condensed tfoot > tr > th, -.table-condensed thead > tr > td, -.table-condensed tbody > tr > td, -.table-condensed tfoot > tr > td { - padding: 5px; -} - -.table-bordered { - border: 1px solid #dddddd; -} - -.table-bordered > thead > tr > th, -.table-bordered > tbody > tr > th, -.table-bordered > tfoot > tr > th, -.table-bordered > thead > tr > td, -.table-bordered > tbody > tr > td, -.table-bordered > tfoot > tr > td { - border: 1px solid #dddddd; -} - -.table-bordered > thead > tr > th, -.table-bordered > thead > tr > td { - border-bottom-width: 2px; -} - -.table-striped > tbody > tr:nth-child(odd) > td, -.table-striped > tbody > tr:nth-child(odd) > th { - background-color: #f9f9f9; -} - -.table-hover > tbody > tr:hover > td, -.table-hover > tbody > tr:hover > th { - background-color: #f5f5f5; -} - -table col[class*="col-"] { - display: table-column; - float: none; -} - -table td[class*="col-"], -table th[class*="col-"] { - display: table-cell; - float: none; -} - -.table > thead > tr > td.active, -.table > tbody > tr > td.active, -.table > tfoot > tr > td.active, -.table > thead > tr > th.active, -.table > tbody > tr > th.active, -.table > tfoot > tr > th.active, -.table > thead > tr.active > td, -.table > tbody > tr.active > td, -.table > tfoot > tr.active > td, -.table > thead > tr.active > th, -.table > tbody > tr.active > th, -.table > tfoot > tr.active > th { - background-color: #f5f5f5; -} - -.table > thead > tr > td.success, -.table > tbody > tr > td.success, -.table > tfoot > tr > td.success, -.table > thead > tr > th.success, -.table > tbody > tr > th.success, -.table > tfoot > tr > th.success, -.table > thead > tr.success > td, -.table > tbody > tr.success > td, -.table > tfoot > tr.success > td, -.table > thead > tr.success > th, -.table > tbody > tr.success > th, -.table > tfoot > tr.success > th { - background-color: #dff0d8; - border-color: #d6e9c6; -} - -.table-hover > tbody > tr > td.success:hover, -.table-hover > tbody > tr > th.success:hover, -.table-hover > tbody > tr.success:hover > td { - background-color: #d0e9c6; - border-color: #c9e2b3; -} - -.table > thead > tr > td.danger, -.table > tbody > tr > td.danger, -.table > tfoot > tr > td.danger, -.table > thead > tr > th.danger, -.table > tbody > tr > th.danger, -.table > tfoot > tr > th.danger, -.table > thead > tr.danger > td, -.table > tbody > tr.danger > td, -.table > tfoot > tr.danger > td, -.table > thead > tr.danger > th, -.table > tbody > tr.danger > th, -.table > tfoot > tr.danger > th { - background-color: #f2dede; - border-color: #eed3d7; -} - -.table-hover > tbody > tr > td.danger:hover, -.table-hover > tbody > tr > th.danger:hover, -.table-hover > tbody > tr.danger:hover > td { - background-color: #ebcccc; - border-color: #e6c1c7; -} - -.table > thead > tr > td.warning, -.table > tbody > tr > td.warning, -.table > tfoot > tr > td.warning, -.table > thead > tr > th.warning, -.table > tbody > tr > th.warning, -.table > tfoot > tr > th.warning, -.table > thead > tr.warning > td, -.table > tbody > tr.warning > td, -.table > tfoot > tr.warning > td, -.table > thead > tr.warning > th, -.table > tbody > tr.warning > th, -.table > tfoot > tr.warning > th { - background-color: #fcf8e3; - border-color: #fbeed5; -} - -.table-hover > tbody > tr > td.warning:hover, -.table-hover > tbody > tr > th.warning:hover, -.table-hover > tbody > tr.warning:hover > td { - background-color: #faf2cc; - border-color: #f8e5be; -} - -@media (max-width: 768px) { - .table-responsive { - width: 100%; - margin-bottom: 15px; - overflow-x: scroll; - overflow-y: hidden; - border: 1px solid #dddddd; - } - .table-responsive > .table { - margin-bottom: 0; - background-color: #fff; - } - .table-responsive > .table > thead > tr > th, - .table-responsive > .table > tbody > tr > th, - .table-responsive > .table > tfoot > tr > th, - .table-responsive > .table > thead > tr > td, - .table-responsive > .table > tbody > tr > td, - .table-responsive > .table > tfoot > tr > td { - white-space: nowrap; - } - .table-responsive > .table-bordered { - border: 0; - } - .table-responsive > .table-bordered > thead > tr > th:first-child, - .table-responsive > .table-bordered > tbody > tr > th:first-child, - .table-responsive > .table-bordered > tfoot > tr > th:first-child, - .table-responsive > .table-bordered > thead > tr > td:first-child, - .table-responsive > .table-bordered > tbody > tr > td:first-child, - .table-responsive > .table-bordered > tfoot > tr > td:first-child { - border-left: 0; - } - .table-responsive > .table-bordered > thead > tr > th:last-child, - .table-responsive > .table-bordered > tbody > tr > th:last-child, - .table-responsive > .table-bordered > tfoot > tr > th:last-child, - .table-responsive > .table-bordered > thead > tr > td:last-child, - .table-responsive > .table-bordered > tbody > tr > td:last-child, - .table-responsive > .table-bordered > tfoot > tr > td:last-child { - border-right: 0; - } - .table-responsive > .table-bordered > thead > tr:last-child > th, - .table-responsive > .table-bordered > tbody > tr:last-child > th, - .table-responsive > .table-bordered > tfoot > tr:last-child > th, - .table-responsive > .table-bordered > thead > tr:last-child > td, - .table-responsive > .table-bordered > tbody > tr:last-child > td, - .table-responsive > .table-bordered > tfoot > tr:last-child > td { - border-bottom: 0; - } -} - -fieldset { - padding: 0; - margin: 0; - border: 0; -} - -legend { - display: block; - width: 100%; - padding: 0; - margin-bottom: 20px; - font-size: 21px; - line-height: inherit; - color: #333333; - border: 0; - border-bottom: 1px solid #e5e5e5; -} - -label { - display: inline-block; - margin-bottom: 5px; - font-weight: bold; -} - -input[type="search"] { - -webkit-box-sizing: border-box; - -moz-box-sizing: border-box; - box-sizing: border-box; -} - -input[type="radio"], -input[type="checkbox"] { - margin: 4px 0 0; - margin-top: 1px \9; - /* IE8-9 */ - - line-height: normal; -} - -input[type="file"] { - display: block; -} - -select[multiple], -select[size] { - height: auto; -} - -select optgroup { - font-family: inherit; - font-size: inherit; - font-style: inherit; -} - -input[type="file"]:focus, -input[type="radio"]:focus, -input[type="checkbox"]:focus { - outline: thin dotted #333; - outline: 5px auto -webkit-focus-ring-color; - outline-offset: -2px; -} - -input[type="number"]::-webkit-outer-spin-button, -input[type="number"]::-webkit-inner-spin-button { - height: auto; -} - -.form-control:-moz-placeholder { - color: #999999; -} - -.form-control::-moz-placeholder { - color: #999999; -} - -.form-control:-ms-input-placeholder { - color: #999999; -} - -.form-control::-webkit-input-placeholder { - color: #999999; -} - -.form-control { - display: block; - width: 100%; - height: 34px; - padding: 6px 12px; - font-size: 14px; - line-height: 1.428571429; - color: #555555; - vertical-align: middle; - background-color: #ffffff; - border: 1px solid #cccccc; - border-radius: 4px; - -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075); - box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075); - -webkit-transition: border-color ease-in-out 0.15s, box-shadow ease-in-out 0.15s; - transition: border-color ease-in-out 0.15s, box-shadow ease-in-out 0.15s; -} - -.form-control:focus { - border-color: #66afe9; - outline: 0; - -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 8px rgba(102, 175, 233, 0.6); - box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 8px rgba(102, 175, 233, 0.6); -} - -.form-control[disabled], -.form-control[readonly], -fieldset[disabled] .form-control { - cursor: not-allowed; - background-color: #eeeeee; -} - -textarea.form-control { - height: auto; -} - -.form-group { - margin-bottom: 15px; -} - -.radio, -.checkbox { - display: block; - min-height: 20px; - padding-left: 20px; - margin-top: 10px; - margin-bottom: 10px; - vertical-align: middle; -} - -.radio label, -.checkbox label { - display: inline; - margin-bottom: 0; - font-weight: normal; - cursor: pointer; -} - -.radio input[type="radio"], -.radio-inline input[type="radio"], -.checkbox input[type="checkbox"], -.checkbox-inline input[type="checkbox"] { - float: left; - margin-left: -20px; -} - -.radio + .radio, -.checkbox + .checkbox { - margin-top: -5px; -} - -.radio-inline, -.checkbox-inline { - display: inline-block; - padding-left: 20px; - margin-bottom: 0; - font-weight: normal; - vertical-align: middle; - cursor: pointer; -} - -.radio-inline + .radio-inline, -.checkbox-inline + .checkbox-inline { - margin-top: 0; - margin-left: 10px; -} - -input[type="radio"][disabled], -input[type="checkbox"][disabled], -.radio[disabled], -.radio-inline[disabled], -.checkbox[disabled], -.checkbox-inline[disabled], -fieldset[disabled] input[type="radio"], -fieldset[disabled] input[type="checkbox"], -fieldset[disabled] .radio, -fieldset[disabled] .radio-inline, -fieldset[disabled] .checkbox, -fieldset[disabled] .checkbox-inline { - cursor: not-allowed; -} - -.input-sm { - height: 30px; - padding: 5px 10px; - font-size: 12px; - line-height: 1.5; - border-radius: 3px; -} - -select.input-sm { - height: 30px; - line-height: 30px; -} - -textarea.input-sm { - height: auto; -} - -.input-lg { - height: 45px; - padding: 10px 16px; - font-size: 18px; - line-height: 1.33; - border-radius: 6px; -} - -select.input-lg { - height: 45px; - line-height: 45px; -} - -textarea.input-lg { - height: auto; -} - -.has-warning .help-block, -.has-warning .control-label { - color: #c09853; -} - -.has-warning .form-control { - border-color: #c09853; - -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075); - box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075); -} - -.has-warning .form-control:focus { - border-color: #a47e3c; - -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #dbc59e; - box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #dbc59e; -} - -.has-warning .input-group-addon { - color: #c09853; - background-color: #fcf8e3; - border-color: #c09853; -} - -.has-error .help-block, -.has-error .control-label { - color: #b94a48; -} - -.has-error .form-control { - border-color: #b94a48; - -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075); - box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075); -} - -.has-error .form-control:focus { - border-color: #953b39; - -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #d59392; - box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #d59392; -} - -.has-error .input-group-addon { - color: #b94a48; - background-color: #f2dede; - border-color: #b94a48; -} - -.has-success .help-block, -.has-success .control-label { - color: #468847; -} - -.has-success .form-control { - border-color: #468847; - -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075); - box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075); -} - -.has-success .form-control:focus { - border-color: #356635; - -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #7aba7b; - box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #7aba7b; -} - -.has-success .input-group-addon { - color: #468847; - background-color: #dff0d8; - border-color: #468847; -} - -.form-control-static { - padding-top: 7px; - margin-bottom: 0; -} - -.help-block { - display: block; - margin-top: 5px; - margin-bottom: 10px; - color: #737373; -} - -@media (min-width: 768px) { - .form-inline .form-group { - display: inline-block; - margin-bottom: 0; - vertical-align: middle; - } - .form-inline .form-control { - display: inline-block; - } - .form-inline .radio, - .form-inline .checkbox { - display: inline-block; - padding-left: 0; - margin-top: 0; - margin-bottom: 0; - } - .form-inline .radio input[type="radio"], - .form-inline .checkbox input[type="checkbox"] { - float: none; - margin-left: 0; - } -} - -.form-horizontal .control-label, -.form-horizontal .radio, -.form-horizontal .checkbox, -.form-horizontal .radio-inline, -.form-horizontal .checkbox-inline { - padding-top: 7px; - margin-top: 0; - margin-bottom: 0; -} - -.form-horizontal .form-group { - margin-right: -15px; - margin-left: -15px; -} - -.form-horizontal .form-group:before, -.form-horizontal .form-group:after { - display: table; - content: " "; -} - -.form-horizontal .form-group:after { - clear: both; -} - -.form-horizontal .form-group:before, -.form-horizontal .form-group:after { - display: table; - content: " "; -} - -.form-horizontal .form-group:after { - clear: both; -} - -@media (min-width: 768px) { - .form-horizontal .control-label { - text-align: right; - } -} - -.btn { - display: inline-block; - padding: 6px 12px; - margin-bottom: 0; - font-size: 14px; - font-weight: normal; - line-height: 1.428571429; - text-align: center; - white-space: nowrap; - vertical-align: middle; - cursor: pointer; - border: 1px solid transparent; - border-radius: 4px; - -webkit-user-select: none; - -moz-user-select: none; - -ms-user-select: none; - -o-user-select: none; - user-select: none; -} - -.btn:focus { - outline: thin dotted #333; - outline: 5px auto -webkit-focus-ring-color; - outline-offset: -2px; -} - -.btn:hover, -.btn:focus { - color: #333333; - text-decoration: none; -} - -.btn:active, -.btn.active { - background-image: none; - outline: 0; - -webkit-box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125); - box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125); -} - -.btn.disabled, -.btn[disabled], -fieldset[disabled] .btn { - pointer-events: none; - cursor: not-allowed; - opacity: 0.65; - filter: alpha(opacity=65); - -webkit-box-shadow: none; - box-shadow: none; -} - -.btn-default { - color: #333333; - background-color: #ffffff; - border-color: #cccccc; -} - -.btn-default:hover, -.btn-default:focus, -.btn-default:active, -.btn-default.active, -.open .dropdown-toggle.btn-default { - color: #333333; - background-color: #ebebeb; - border-color: #adadad; -} - -.btn-default:active, -.btn-default.active, -.open .dropdown-toggle.btn-default { - background-image: none; -} - -.btn-default.disabled, -.btn-default[disabled], -fieldset[disabled] .btn-default, -.btn-default.disabled:hover, -.btn-default[disabled]:hover, -fieldset[disabled] .btn-default:hover, -.btn-default.disabled:focus, -.btn-default[disabled]:focus, -fieldset[disabled] .btn-default:focus, -.btn-default.disabled:active, -.btn-default[disabled]:active, -fieldset[disabled] .btn-default:active, -.btn-default.disabled.active, -.btn-default[disabled].active, -fieldset[disabled] .btn-default.active { - background-color: #ffffff; - border-color: #cccccc; -} - -.btn-primary { - color: #ffffff; - background-color: #428bca; - border-color: #357ebd; -} - -.btn-primary:hover, -.btn-primary:focus, -.btn-primary:active, -.btn-primary.active, -.open .dropdown-toggle.btn-primary { - color: #ffffff; - background-color: #3276b1; - border-color: #285e8e; -} - -.btn-primary:active, -.btn-primary.active, -.open .dropdown-toggle.btn-primary { - background-image: none; -} - -.btn-primary.disabled, -.btn-primary[disabled], -fieldset[disabled] .btn-primary, -.btn-primary.disabled:hover, -.btn-primary[disabled]:hover, -fieldset[disabled] .btn-primary:hover, -.btn-primary.disabled:focus, -.btn-primary[disabled]:focus, -fieldset[disabled] .btn-primary:focus, -.btn-primary.disabled:active, -.btn-primary[disabled]:active, -fieldset[disabled] .btn-primary:active, -.btn-primary.disabled.active, -.btn-primary[disabled].active, -fieldset[disabled] .btn-primary.active { - background-color: #428bca; - border-color: #357ebd; -} - -.btn-warning { - color: #ffffff; - background-color: #f0ad4e; - border-color: #eea236; -} - -.btn-warning:hover, -.btn-warning:focus, -.btn-warning:active, -.btn-warning.active, -.open .dropdown-toggle.btn-warning { - color: #ffffff; - background-color: #ed9c28; - border-color: #d58512; -} - -.btn-warning:active, -.btn-warning.active, -.open .dropdown-toggle.btn-warning { - background-image: none; -} - -.btn-warning.disabled, -.btn-warning[disabled], -fieldset[disabled] .btn-warning, -.btn-warning.disabled:hover, -.btn-warning[disabled]:hover, -fieldset[disabled] .btn-warning:hover, -.btn-warning.disabled:focus, -.btn-warning[disabled]:focus, -fieldset[disabled] .btn-warning:focus, -.btn-warning.disabled:active, -.btn-warning[disabled]:active, -fieldset[disabled] .btn-warning:active, -.btn-warning.disabled.active, -.btn-warning[disabled].active, -fieldset[disabled] .btn-warning.active { - background-color: #f0ad4e; - border-color: #eea236; -} - -.btn-danger { - color: #ffffff; - background-color: #d9534f; - border-color: #d43f3a; -} - -.btn-danger:hover, -.btn-danger:focus, -.btn-danger:active, -.btn-danger.active, -.open .dropdown-toggle.btn-danger { - color: #ffffff; - background-color: #d2322d; - border-color: #ac2925; -} - -.btn-danger:active, -.btn-danger.active, -.open .dropdown-toggle.btn-danger { - background-image: none; -} - -.btn-danger.disabled, -.btn-danger[disabled], -fieldset[disabled] .btn-danger, -.btn-danger.disabled:hover, -.btn-danger[disabled]:hover, -fieldset[disabled] .btn-danger:hover, -.btn-danger.disabled:focus, -.btn-danger[disabled]:focus, -fieldset[disabled] .btn-danger:focus, -.btn-danger.disabled:active, -.btn-danger[disabled]:active, -fieldset[disabled] .btn-danger:active, -.btn-danger.disabled.active, -.btn-danger[disabled].active, -fieldset[disabled] .btn-danger.active { - background-color: #d9534f; - border-color: #d43f3a; -} - -.btn-success { - color: #ffffff; - background-color: #5cb85c; - border-color: #4cae4c; -} - -.btn-success:hover, -.btn-success:focus, -.btn-success:active, -.btn-success.active, -.open .dropdown-toggle.btn-success { - color: #ffffff; - background-color: #47a447; - border-color: #398439; -} - -.btn-success:active, -.btn-success.active, -.open .dropdown-toggle.btn-success { - background-image: none; -} - -.btn-success.disabled, -.btn-success[disabled], -fieldset[disabled] .btn-success, -.btn-success.disabled:hover, -.btn-success[disabled]:hover, -fieldset[disabled] .btn-success:hover, -.btn-success.disabled:focus, -.btn-success[disabled]:focus, -fieldset[disabled] .btn-success:focus, -.btn-success.disabled:active, -.btn-success[disabled]:active, -fieldset[disabled] .btn-success:active, -.btn-success.disabled.active, -.btn-success[disabled].active, -fieldset[disabled] .btn-success.active { - background-color: #5cb85c; - border-color: #4cae4c; -} - -.btn-info { - color: #ffffff; - background-color: #5bc0de; - border-color: #46b8da; -} - -.btn-info:hover, -.btn-info:focus, -.btn-info:active, -.btn-info.active, -.open .dropdown-toggle.btn-info { - color: #ffffff; - background-color: #39b3d7; - border-color: #269abc; -} - -.btn-info:active, -.btn-info.active, -.open .dropdown-toggle.btn-info { - background-image: none; -} - -.btn-info.disabled, -.btn-info[disabled], -fieldset[disabled] .btn-info, -.btn-info.disabled:hover, -.btn-info[disabled]:hover, -fieldset[disabled] .btn-info:hover, -.btn-info.disabled:focus, -.btn-info[disabled]:focus, -fieldset[disabled] .btn-info:focus, -.btn-info.disabled:active, -.btn-info[disabled]:active, -fieldset[disabled] .btn-info:active, -.btn-info.disabled.active, -.btn-info[disabled].active, -fieldset[disabled] .btn-info.active { - background-color: #5bc0de; - border-color: #46b8da; -} - -.btn-link { - font-weight: normal; - color: #428bca; - cursor: pointer; - border-radius: 0; -} - -.btn-link, -.btn-link:active, -.btn-link[disabled], -fieldset[disabled] .btn-link { - background-color: transparent; - -webkit-box-shadow: none; - box-shadow: none; -} - -.btn-link, -.btn-link:hover, -.btn-link:focus, -.btn-link:active { - border-color: transparent; -} - -.btn-link:hover, -.btn-link:focus { - color: #2a6496; - text-decoration: underline; - background-color: transparent; -} - -.btn-link[disabled]:hover, -fieldset[disabled] .btn-link:hover, -.btn-link[disabled]:focus, -fieldset[disabled] .btn-link:focus { - color: #999999; - text-decoration: none; -} - -.btn-lg { - padding: 10px 16px; - font-size: 18px; - line-height: 1.33; - border-radius: 6px; -} - -.btn-sm, -.btn-xs { - padding: 5px 10px; - font-size: 12px; - line-height: 1.5; - border-radius: 3px; -} - -.btn-xs { - padding: 1px 5px; -} - -.btn-block { - display: block; - width: 100%; - padding-right: 0; - padding-left: 0; -} - -.btn-block + .btn-block { - margin-top: 5px; -} - -input[type="submit"].btn-block, -input[type="reset"].btn-block, -input[type="button"].btn-block { - width: 100%; -} - -.fade { - opacity: 0; - -webkit-transition: opacity 0.15s linear; - transition: opacity 0.15s linear; -} - -.fade.in { - opacity: 1; -} - -.collapse { - display: none; -} - -.collapse.in { - display: block; -} - -.collapsing { - position: relative; - height: 0; - overflow: hidden; - -webkit-transition: height 0.35s ease; - transition: height 0.35s ease; -} - -@font-face { - font-family: 'Glyphicons Halflings'; - src: url('../fonts/glyphicons-halflings-regular.eot'); - src: url('../fonts/glyphicons-halflings-regular.eot?#iefix') format('embedded-opentype'), url('../fonts/glyphicons-halflings-regular.woff') format('woff'), url('../fonts/glyphicons-halflings-regular.ttf') format('truetype'), url('../fonts/glyphicons-halflings-regular.svg#glyphicons-halflingsregular') format('svg'); -} - -.glyphicon { - position: relative; - top: 1px; - display: inline-block; - font-family: 'Glyphicons Halflings'; - -webkit-font-smoothing: antialiased; - font-style: normal; - font-weight: normal; - line-height: 1; -} - -.glyphicon-asterisk:before { - content: "\2a"; -} - -.glyphicon-plus:before { - content: "\2b"; -} - -.glyphicon-euro:before { - content: "\20ac"; -} - -.glyphicon-minus:before { - content: "\2212"; -} - -.glyphicon-cloud:before { - content: "\2601"; -} - -.glyphicon-envelope:before { - content: "\2709"; -} - -.glyphicon-pencil:before { - content: "\270f"; -} - -.glyphicon-glass:before { - content: "\e001"; -} - -.glyphicon-music:before { - content: "\e002"; -} - -.glyphicon-search:before { - content: "\e003"; -} - -.glyphicon-heart:before { - content: "\e005"; -} - -.glyphicon-star:before { - content: "\e006"; -} - -.glyphicon-star-empty:before { - content: "\e007"; -} - -.glyphicon-user:before { - content: "\e008"; -} - -.glyphicon-film:before { - content: "\e009"; -} - -.glyphicon-th-large:before { - content: "\e010"; -} - -.glyphicon-th:before { - content: "\e011"; -} - -.glyphicon-th-list:before { - content: "\e012"; -} - -.glyphicon-ok:before { - content: "\e013"; -} - -.glyphicon-remove:before { - content: "\e014"; -} - -.glyphicon-zoom-in:before { - content: "\e015"; -} - -.glyphicon-zoom-out:before { - content: "\e016"; -} - -.glyphicon-off:before { - content: "\e017"; -} - -.glyphicon-signal:before { - content: "\e018"; -} - -.glyphicon-cog:before { - content: "\e019"; -} - -.glyphicon-trash:before { - content: "\e020"; -} - -.glyphicon-home:before { - content: "\e021"; -} - -.glyphicon-file:before { - content: "\e022"; -} - -.glyphicon-time:before { - content: "\e023"; -} - -.glyphicon-road:before { - content: "\e024"; -} - -.glyphicon-download-alt:before { - content: "\e025"; -} - -.glyphicon-download:before { - content: "\e026"; -} - -.glyphicon-upload:before { - content: "\e027"; -} - -.glyphicon-inbox:before { - content: "\e028"; -} - -.glyphicon-play-circle:before { - content: "\e029"; -} - -.glyphicon-repeat:before { - content: "\e030"; -} - -.glyphicon-refresh:before { - content: "\e031"; -} - -.glyphicon-list-alt:before { - content: "\e032"; -} - -.glyphicon-flag:before { - content: "\e034"; -} - -.glyphicon-headphones:before { - content: "\e035"; -} - -.glyphicon-volume-off:before { - content: "\e036"; -} - -.glyphicon-volume-down:before { - content: "\e037"; -} - -.glyphicon-volume-up:before { - content: "\e038"; -} - -.glyphicon-qrcode:before { - content: "\e039"; -} - -.glyphicon-barcode:before { - content: "\e040"; -} - -.glyphicon-tag:before { - content: "\e041"; -} - -.glyphicon-tags:before { - content: "\e042"; -} - -.glyphicon-book:before { - content: "\e043"; -} - -.glyphicon-print:before { - content: "\e045"; -} - -.glyphicon-font:before { - content: "\e047"; -} - -.glyphicon-bold:before { - content: "\e048"; -} - -.glyphicon-italic:before { - content: "\e049"; -} - -.glyphicon-text-height:before { - content: "\e050"; -} - -.glyphicon-text-width:before { - content: "\e051"; -} - -.glyphicon-align-left:before { - content: "\e052"; -} - -.glyphicon-align-center:before { - content: "\e053"; -} - -.glyphicon-align-right:before { - content: "\e054"; -} - -.glyphicon-align-justify:before { - content: "\e055"; -} - -.glyphicon-list:before { - content: "\e056"; -} - -.glyphicon-indent-left:before { - content: "\e057"; -} - -.glyphicon-indent-right:before { - content: "\e058"; -} - -.glyphicon-facetime-video:before { - content: "\e059"; -} - -.glyphicon-picture:before { - content: "\e060"; -} - -.glyphicon-map-marker:before { - content: "\e062"; -} - -.glyphicon-adjust:before { - content: "\e063"; -} - -.glyphicon-tint:before { - content: "\e064"; -} - -.glyphicon-edit:before { - content: "\e065"; -} - -.glyphicon-share:before { - content: "\e066"; -} - -.glyphicon-check:before { - content: "\e067"; -} - -.glyphicon-move:before { - content: "\e068"; -} - -.glyphicon-step-backward:before { - content: "\e069"; -} - -.glyphicon-fast-backward:before { - content: "\e070"; -} - -.glyphicon-backward:before { - content: "\e071"; -} - -.glyphicon-play:before { - content: "\e072"; -} - -.glyphicon-pause:before { - content: "\e073"; -} - -.glyphicon-stop:before { - content: "\e074"; -} - -.glyphicon-forward:before { - content: "\e075"; -} - -.glyphicon-fast-forward:before { - content: "\e076"; -} - -.glyphicon-step-forward:before { - content: "\e077"; -} - -.glyphicon-eject:before { - content: "\e078"; -} - -.glyphicon-chevron-left:before { - content: "\e079"; -} - -.glyphicon-chevron-right:before { - content: "\e080"; -} - -.glyphicon-plus-sign:before { - content: "\e081"; -} - -.glyphicon-minus-sign:before { - content: "\e082"; -} - -.glyphicon-remove-sign:before { - content: "\e083"; -} - -.glyphicon-ok-sign:before { - content: "\e084"; -} - -.glyphicon-question-sign:before { - content: "\e085"; -} - -.glyphicon-info-sign:before { - content: "\e086"; -} - -.glyphicon-screenshot:before { - content: "\e087"; -} - -.glyphicon-remove-circle:before { - content: "\e088"; -} - -.glyphicon-ok-circle:before { - content: "\e089"; -} - -.glyphicon-ban-circle:before { - content: "\e090"; -} - -.glyphicon-arrow-left:before { - content: "\e091"; -} - -.glyphicon-arrow-right:before { - content: "\e092"; -} - -.glyphicon-arrow-up:before { - content: "\e093"; -} - -.glyphicon-arrow-down:before { - content: "\e094"; -} - -.glyphicon-share-alt:before { - content: "\e095"; -} - -.glyphicon-resize-full:before { - content: "\e096"; -} - -.glyphicon-resize-small:before { - content: "\e097"; -} - -.glyphicon-exclamation-sign:before { - content: "\e101"; -} - -.glyphicon-gift:before { - content: "\e102"; -} - -.glyphicon-leaf:before { - content: "\e103"; -} - -.glyphicon-eye-open:before { - content: "\e105"; -} - -.glyphicon-eye-close:before { - content: "\e106"; -} - -.glyphicon-warning-sign:before { - content: "\e107"; -} - -.glyphicon-plane:before { - content: "\e108"; -} - -.glyphicon-random:before { - content: "\e110"; -} - -.glyphicon-comment:before { - content: "\e111"; -} - -.glyphicon-magnet:before { - content: "\e112"; -} - -.glyphicon-chevron-up:before { - content: "\e113"; -} - -.glyphicon-chevron-down:before { - content: "\e114"; -} - -.glyphicon-retweet:before { - content: "\e115"; -} - -.glyphicon-shopping-cart:before { - content: "\e116"; -} - -.glyphicon-folder-close:before { - content: "\e117"; -} - -.glyphicon-folder-open:before { - content: "\e118"; -} - -.glyphicon-resize-vertical:before { - content: "\e119"; -} - -.glyphicon-resize-horizontal:before { - content: "\e120"; -} - -.glyphicon-hdd:before { - content: "\e121"; -} - -.glyphicon-bullhorn:before { - content: "\e122"; -} - -.glyphicon-certificate:before { - content: "\e124"; -} - -.glyphicon-thumbs-up:before { - content: "\e125"; -} - -.glyphicon-thumbs-down:before { - content: "\e126"; -} - -.glyphicon-hand-right:before { - content: "\e127"; -} - -.glyphicon-hand-left:before { - content: "\e128"; -} - -.glyphicon-hand-up:before { - content: "\e129"; -} - -.glyphicon-hand-down:before { - content: "\e130"; -} - -.glyphicon-circle-arrow-right:before { - content: "\e131"; -} - -.glyphicon-circle-arrow-left:before { - content: "\e132"; -} - -.glyphicon-circle-arrow-up:before { - content: "\e133"; -} - -.glyphicon-circle-arrow-down:before { - content: "\e134"; -} - -.glyphicon-globe:before { - content: "\e135"; -} - -.glyphicon-tasks:before { - content: "\e137"; -} - -.glyphicon-filter:before { - content: "\e138"; -} - -.glyphicon-fullscreen:before { - content: "\e140"; -} - -.glyphicon-dashboard:before { - content: "\e141"; -} - -.glyphicon-heart-empty:before { - content: "\e143"; -} - -.glyphicon-link:before { - content: "\e144"; -} - -.glyphicon-phone:before { - content: "\e145"; -} - -.glyphicon-usd:before { - content: "\e148"; -} - -.glyphicon-gbp:before { - content: "\e149"; -} - -.glyphicon-sort:before { - content: "\e150"; -} - -.glyphicon-sort-by-alphabet:before { - content: "\e151"; -} - -.glyphicon-sort-by-alphabet-alt:before { - content: "\e152"; -} - -.glyphicon-sort-by-order:before { - content: "\e153"; -} - -.glyphicon-sort-by-order-alt:before { - content: "\e154"; -} - -.glyphicon-sort-by-attributes:before { - content: "\e155"; -} - -.glyphicon-sort-by-attributes-alt:before { - content: "\e156"; -} - -.glyphicon-unchecked:before { - content: "\e157"; -} - -.glyphicon-expand:before { - content: "\e158"; -} - -.glyphicon-collapse-down:before { - content: "\e159"; -} - -.glyphicon-collapse-up:before { - content: "\e160"; -} - -.glyphicon-log-in:before { - content: "\e161"; -} - -.glyphicon-flash:before { - content: "\e162"; -} - -.glyphicon-log-out:before { - content: "\e163"; -} - -.glyphicon-new-window:before { - content: "\e164"; -} - -.glyphicon-record:before { - content: "\e165"; -} - -.glyphicon-save:before { - content: "\e166"; -} - -.glyphicon-open:before { - content: "\e167"; -} - -.glyphicon-saved:before { - content: "\e168"; -} - -.glyphicon-import:before { - content: "\e169"; -} - -.glyphicon-export:before { - content: "\e170"; -} - -.glyphicon-send:before { - content: "\e171"; -} - -.glyphicon-floppy-disk:before { - content: "\e172"; -} - -.glyphicon-floppy-saved:before { - content: "\e173"; -} - -.glyphicon-floppy-remove:before { - content: "\e174"; -} - -.glyphicon-floppy-save:before { - content: "\e175"; -} - -.glyphicon-floppy-open:before { - content: "\e176"; -} - -.glyphicon-credit-card:before { - content: "\e177"; -} - -.glyphicon-transfer:before { - content: "\e178"; -} - -.glyphicon-cutlery:before { - content: "\e179"; -} - -.glyphicon-header:before { - content: "\e180"; -} - -.glyphicon-compressed:before { - content: "\e181"; -} - -.glyphicon-earphone:before { - content: "\e182"; -} - -.glyphicon-phone-alt:before { - content: "\e183"; -} - -.glyphicon-tower:before { - content: "\e184"; -} - -.glyphicon-stats:before { - content: "\e185"; -} - -.glyphicon-sd-video:before { - content: "\e186"; -} - -.glyphicon-hd-video:before { - content: "\e187"; -} - -.glyphicon-subtitles:before { - content: "\e188"; -} - -.glyphicon-sound-stereo:before { - content: "\e189"; -} - -.glyphicon-sound-dolby:before { - content: "\e190"; -} - -.glyphicon-sound-5-1:before { - content: "\e191"; -} - -.glyphicon-sound-6-1:before { - content: "\e192"; -} - -.glyphicon-sound-7-1:before { - content: "\e193"; -} - -.glyphicon-copyright-mark:before { - content: "\e194"; -} - -.glyphicon-registration-mark:before { - content: "\e195"; -} - -.glyphicon-cloud-download:before { - content: "\e197"; -} - -.glyphicon-cloud-upload:before { - content: "\e198"; -} - -.glyphicon-tree-conifer:before { - content: "\e199"; -} - -.glyphicon-tree-deciduous:before { - content: "\e200"; -} - -.glyphicon-briefcase:before { - content: "\1f4bc"; -} - -.glyphicon-calendar:before { - content: "\1f4c5"; -} - -.glyphicon-pushpin:before { - content: "\1f4cc"; -} - -.glyphicon-paperclip:before { - content: "\1f4ce"; -} - -.glyphicon-camera:before { - content: "\1f4f7"; -} - -.glyphicon-lock:before { - content: "\1f512"; -} - -.glyphicon-bell:before { - content: "\1f514"; -} - -.glyphicon-bookmark:before { - content: "\1f516"; -} - -.glyphicon-fire:before { - content: "\1f525"; -} - -.glyphicon-wrench:before { - content: "\1f527"; -} - -.caret { - display: inline-block; - width: 0; - height: 0; - margin-left: 2px; - vertical-align: middle; - border-top: 4px solid #000000; - border-right: 4px solid transparent; - border-bottom: 0 dotted; - border-left: 4px solid transparent; - content: ""; -} - -.dropdown { - position: relative; -} - -.dropdown-toggle:focus { - outline: 0; -} - -.dropdown-menu { - position: absolute; - top: 100%; - left: 0; - z-index: 1000; - display: none; - float: left; - min-width: 160px; - padding: 5px 0; - margin: 2px 0 0; - font-size: 14px; - list-style: none; - background-color: #ffffff; - border: 1px solid #cccccc; - border: 1px solid rgba(0, 0, 0, 0.15); - border-radius: 4px; - -webkit-box-shadow: 0 6px 12px rgba(0, 0, 0, 0.175); - box-shadow: 0 6px 12px rgba(0, 0, 0, 0.175); - background-clip: padding-box; -} - -.dropdown-menu.pull-right { - right: 0; - left: auto; -} - -.dropdown-menu .divider { - height: 1px; - margin: 9px 0; - overflow: hidden; - background-color: #e5e5e5; -} - -.dropdown-menu > li > a { - display: block; - padding: 3px 20px; - clear: both; - font-weight: normal; - line-height: 1.428571429; - color: #333333; - white-space: nowrap; -} - -.dropdown-menu > li > a:hover, -.dropdown-menu > li > a:focus { - color: #ffffff; - text-decoration: none; - background-color: #428bca; -} - -.dropdown-menu > .active > a, -.dropdown-menu > .active > a:hover, -.dropdown-menu > .active > a:focus { - color: #ffffff; - text-decoration: none; - background-color: #428bca; - outline: 0; -} - -.dropdown-menu > .disabled > a, -.dropdown-menu > .disabled > a:hover, -.dropdown-menu > .disabled > a:focus { - color: #999999; -} - -.dropdown-menu > .disabled > a:hover, -.dropdown-menu > .disabled > a:focus { - text-decoration: none; - cursor: not-allowed; - background-color: transparent; - background-image: none; - filter: progid:DXImageTransform.Microsoft.gradient(enabled=false); -} - -.open > .dropdown-menu { - display: block; -} - -.open > a { - outline: 0; -} - -.dropdown-header { - display: block; - padding: 3px 20px; - font-size: 12px; - line-height: 1.428571429; - color: #999999; -} - -.dropdown-backdrop { - position: fixed; - top: 0; - right: 0; - bottom: 0; - left: 0; - z-index: 990; -} - -.pull-right > .dropdown-menu { - right: 0; - left: auto; -} - -.dropup .caret, -.navbar-fixed-bottom .dropdown .caret { - border-top: 0 dotted; - border-bottom: 4px solid #000000; - content: ""; -} - -.dropup .dropdown-menu, -.navbar-fixed-bottom .dropdown .dropdown-menu { - top: auto; - bottom: 100%; - margin-bottom: 1px; -} - -@media (min-width: 768px) { - .navbar-right .dropdown-menu { - right: 0; - left: auto; - } -} - -.btn-default .caret { - border-top-color: #333333; -} - -.btn-primary .caret, -.btn-success .caret, -.btn-warning .caret, -.btn-danger .caret, -.btn-info .caret { - border-top-color: #fff; -} - -.dropup .btn-default .caret { - border-bottom-color: #333333; -} - -.dropup .btn-primary .caret, -.dropup .btn-success .caret, -.dropup .btn-warning .caret, -.dropup .btn-danger .caret, -.dropup .btn-info .caret { - border-bottom-color: #fff; -} - -.btn-group, -.btn-group-vertical { - position: relative; - display: inline-block; - vertical-align: middle; -} - -.btn-group > .btn, -.btn-group-vertical > .btn { - position: relative; - float: left; -} - -.btn-group > .btn:hover, -.btn-group-vertical > .btn:hover, -.btn-group > .btn:focus, -.btn-group-vertical > .btn:focus, -.btn-group > .btn:active, -.btn-group-vertical > .btn:active, -.btn-group > .btn.active, -.btn-group-vertical > .btn.active { - z-index: 2; -} - -.btn-group > .btn:focus, -.btn-group-vertical > .btn:focus { - outline: none; -} - -.btn-group .btn + .btn, -.btn-group .btn + .btn-group, -.btn-group .btn-group + .btn, -.btn-group .btn-group + .btn-group { - margin-left: -1px; -} - -.btn-toolbar:before, -.btn-toolbar:after { - display: table; - content: " "; -} - -.btn-toolbar:after { - clear: both; -} - -.btn-toolbar:before, -.btn-toolbar:after { - display: table; - content: " "; -} - -.btn-toolbar:after { - clear: both; -} - -.btn-toolbar .btn-group { - float: left; -} - -.btn-toolbar > .btn + .btn, -.btn-toolbar > .btn-group + .btn, -.btn-toolbar > .btn + .btn-group, -.btn-toolbar > .btn-group + .btn-group { - margin-left: 5px; -} - -.btn-group > .btn:not(:first-child):not(:last-child):not(.dropdown-toggle) { - border-radius: 0; -} - -.btn-group > .btn:first-child { - margin-left: 0; -} - -.btn-group > .btn:first-child:not(:last-child):not(.dropdown-toggle) { - border-top-right-radius: 0; - border-bottom-right-radius: 0; -} - -.btn-group > .btn:last-child:not(:first-child), -.btn-group > .dropdown-toggle:not(:first-child) { - border-bottom-left-radius: 0; - border-top-left-radius: 0; -} - -.btn-group > .btn-group { - float: left; -} - -.btn-group > .btn-group:not(:first-child):not(:last-child) > .btn { - border-radius: 0; -} - -.btn-group > .btn-group:first-child > .btn:last-child, -.btn-group > .btn-group:first-child > .dropdown-toggle { - border-top-right-radius: 0; - border-bottom-right-radius: 0; -} - -.btn-group > .btn-group:last-child > .btn:first-child { - border-bottom-left-radius: 0; - border-top-left-radius: 0; -} - -.btn-group .dropdown-toggle:active, -.btn-group.open .dropdown-toggle { - outline: 0; -} - -.btn-group-xs > .btn { - padding: 5px 10px; - padding: 1px 5px; - font-size: 12px; - line-height: 1.5; - border-radius: 3px; -} - -.btn-group-sm > .btn { - padding: 5px 10px; - font-size: 12px; - line-height: 1.5; - border-radius: 3px; -} - -.btn-group-lg > .btn { - padding: 10px 16px; - font-size: 18px; - line-height: 1.33; - border-radius: 6px; -} - -.btn-group > .btn + .dropdown-toggle { - padding-right: 8px; - padding-left: 8px; -} - -.btn-group > .btn-lg + .dropdown-toggle { - padding-right: 12px; - padding-left: 12px; -} - -.btn-group.open .dropdown-toggle { - -webkit-box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125); - box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125); -} - -.btn .caret { - margin-left: 0; -} - -.btn-lg .caret { - border-width: 5px 5px 0; - border-bottom-width: 0; -} - -.dropup .btn-lg .caret { - border-width: 0 5px 5px; -} - -.btn-group-vertical > .btn, -.btn-group-vertical > .btn-group { - display: block; - float: none; - width: 100%; - max-width: 100%; -} - -.btn-group-vertical > .btn-group:before, -.btn-group-vertical > .btn-group:after { - display: table; - content: " "; -} - -.btn-group-vertical > .btn-group:after { - clear: both; -} - -.btn-group-vertical > .btn-group:before, -.btn-group-vertical > .btn-group:after { - display: table; - content: " "; -} - -.btn-group-vertical > .btn-group:after { - clear: both; -} - -.btn-group-vertical > .btn-group > .btn { - float: none; -} - -.btn-group-vertical > .btn + .btn, -.btn-group-vertical > .btn + .btn-group, -.btn-group-vertical > .btn-group + .btn, -.btn-group-vertical > .btn-group + .btn-group { - margin-top: -1px; - margin-left: 0; -} - -.btn-group-vertical > .btn:not(:first-child):not(:last-child) { - border-radius: 0; -} - -.btn-group-vertical > .btn:first-child:not(:last-child) { - border-top-right-radius: 4px; - border-bottom-right-radius: 0; - border-bottom-left-radius: 0; -} - -.btn-group-vertical > .btn:last-child:not(:first-child) { - border-top-right-radius: 0; - border-bottom-left-radius: 4px; - border-top-left-radius: 0; -} - -.btn-group-vertical > .btn-group:not(:first-child):not(:last-child) > .btn { - border-radius: 0; -} - -.btn-group-vertical > .btn-group:first-child > .btn:last-child, -.btn-group-vertical > .btn-group:first-child > .dropdown-toggle { - border-bottom-right-radius: 0; - border-bottom-left-radius: 0; -} - -.btn-group-vertical > .btn-group:last-child > .btn:first-child { - border-top-right-radius: 0; - border-top-left-radius: 0; -} - -.btn-group-justified { - display: table; - width: 100%; - border-collapse: separate; - table-layout: fixed; -} - -.btn-group-justified .btn { - display: table-cell; - float: none; - width: 1%; -} - -[data-toggle="buttons"] > .btn > input[type="radio"], -[data-toggle="buttons"] > .btn > input[type="checkbox"] { - display: none; -} - -.input-group { - position: relative; - display: table; - border-collapse: separate; -} - -.input-group.col { - float: none; - padding-right: 0; - padding-left: 0; -} - -.input-group .form-control { - width: 100%; - margin-bottom: 0; -} - -.input-group-lg > .form-control, -.input-group-lg > .input-group-addon, -.input-group-lg > .input-group-btn > .btn { - height: 45px; - padding: 10px 16px; - font-size: 18px; - line-height: 1.33; - border-radius: 6px; -} - -select.input-group-lg > .form-control, -select.input-group-lg > .input-group-addon, -select.input-group-lg > .input-group-btn > .btn { - height: 45px; - line-height: 45px; -} - -textarea.input-group-lg > .form-control, -textarea.input-group-lg > .input-group-addon, -textarea.input-group-lg > .input-group-btn > .btn { - height: auto; -} - -.input-group-sm > .form-control, -.input-group-sm > .input-group-addon, -.input-group-sm > .input-group-btn > .btn { - height: 30px; - padding: 5px 10px; - font-size: 12px; - line-height: 1.5; - border-radius: 3px; -} - -select.input-group-sm > .form-control, -select.input-group-sm > .input-group-addon, -select.input-group-sm > .input-group-btn > .btn { - height: 30px; - line-height: 30px; -} - -textarea.input-group-sm > .form-control, -textarea.input-group-sm > .input-group-addon, -textarea.input-group-sm > .input-group-btn > .btn { - height: auto; -} - -.input-group-addon, -.input-group-btn, -.input-group .form-control { - display: table-cell; -} - -.input-group-addon:not(:first-child):not(:last-child), -.input-group-btn:not(:first-child):not(:last-child), -.input-group .form-control:not(:first-child):not(:last-child) { - border-radius: 0; -} - -.input-group-addon, -.input-group-btn { - width: 1%; - white-space: nowrap; - vertical-align: middle; -} - -.input-group-addon { - padding: 6px 12px; - font-size: 14px; - font-weight: normal; - line-height: 1; - text-align: center; - background-color: #eeeeee; - border: 1px solid #cccccc; - border-radius: 4px; -} - -.input-group-addon.input-sm { - padding: 5px 10px; - font-size: 12px; - border-radius: 3px; -} - -.input-group-addon.input-lg { - padding: 10px 16px; - font-size: 18px; - border-radius: 6px; -} - -.input-group-addon input[type="radio"], -.input-group-addon input[type="checkbox"] { - margin-top: 0; -} - -.input-group .form-control:first-child, -.input-group-addon:first-child, -.input-group-btn:first-child > .btn, -.input-group-btn:first-child > .dropdown-toggle, -.input-group-btn:last-child > .btn:not(:last-child):not(.dropdown-toggle) { - border-top-right-radius: 0; - border-bottom-right-radius: 0; -} - -.input-group-addon:first-child { - border-right: 0; -} - -.input-group .form-control:last-child, -.input-group-addon:last-child, -.input-group-btn:last-child > .btn, -.input-group-btn:last-child > .dropdown-toggle, -.input-group-btn:first-child > .btn:not(:first-child) { - border-bottom-left-radius: 0; - border-top-left-radius: 0; -} - -.input-group-addon:last-child { - border-left: 0; -} - -.input-group-btn { - position: relative; - white-space: nowrap; -} - -.input-group-btn > .btn { - position: relative; -} - -.input-group-btn > .btn + .btn { - margin-left: -4px; -} - -.input-group-btn > .btn:hover, -.input-group-btn > .btn:active { - z-index: 2; -} - -.nav { - padding-left: 0; - margin-bottom: 0; - list-style: none; -} - -.nav:before, -.nav:after { - display: table; - content: " "; -} - -.nav:after { - clear: both; -} - -.nav:before, -.nav:after { - display: table; - content: " "; -} - -.nav:after { - clear: both; -} - -.nav > li { - position: relative; - display: block; -} - -.nav > li > a { - position: relative; - display: block; - padding: 10px 15px; -} - -.nav > li > a:hover, -.nav > li > a:focus { - text-decoration: none; - background-color: #eeeeee; -} - -.nav > li.disabled > a { - color: #999999; -} - -.nav > li.disabled > a:hover, -.nav > li.disabled > a:focus { - color: #999999; - text-decoration: none; - cursor: not-allowed; - background-color: transparent; -} - -.nav .open > a, -.nav .open > a:hover, -.nav .open > a:focus { - background-color: #eeeeee; - border-color: #428bca; -} - -.nav .nav-divider { - height: 1px; - margin: 9px 0; - overflow: hidden; - background-color: #e5e5e5; -} - -.nav > li > a > img { - max-width: none; -} - -.nav-tabs { - border-bottom: 1px solid #dddddd; -} - -.nav-tabs > li { - float: left; - margin-bottom: -1px; -} - -.nav-tabs > li > a { - margin-right: 2px; - line-height: 1.428571429; - border: 1px solid transparent; - border-radius: 4px 4px 0 0; -} - -.nav-tabs > li > a:hover { - border-color: #eeeeee #eeeeee #dddddd; -} - -.nav-tabs > li.active > a, -.nav-tabs > li.active > a:hover, -.nav-tabs > li.active > a:focus { - color: #555555; - cursor: default; - background-color: #ffffff; - border: 1px solid #dddddd; - border-bottom-color: transparent; -} - -.nav-tabs.nav-justified { - width: 100%; - border-bottom: 0; -} - -.nav-tabs.nav-justified > li { - float: none; -} - -.nav-tabs.nav-justified > li > a { - text-align: center; -} - -@media (min-width: 768px) { - .nav-tabs.nav-justified > li { - display: table-cell; - width: 1%; - } -} - -.nav-tabs.nav-justified > li > a { - margin-right: 0; - border-bottom: 1px solid #dddddd; -} - -.nav-tabs.nav-justified > .active > a { - border-bottom-color: #ffffff; -} - -.nav-pills > li { - float: left; -} - -.nav-pills > li > a { - border-radius: 5px; -} - -.nav-pills > li + li { - margin-left: 2px; -} - -.nav-pills > li.active > a, -.nav-pills > li.active > a:hover, -.nav-pills > li.active > a:focus { - color: #ffffff; - background-color: #428bca; -} - -.nav-stacked > li { - float: none; -} - -.nav-stacked > li + li { - margin-top: 2px; - margin-left: 0; -} - -.nav-justified { - width: 100%; -} - -.nav-justified > li { - float: none; -} - -.nav-justified > li > a { - text-align: center; -} - -@media (min-width: 768px) { - .nav-justified > li { - display: table-cell; - width: 1%; - } -} - -.nav-tabs-justified { - border-bottom: 0; -} - -.nav-tabs-justified > li > a { - margin-right: 0; - border-bottom: 1px solid #dddddd; -} - -.nav-tabs-justified > .active > a { - border-bottom-color: #ffffff; -} - -.tabbable:before, -.tabbable:after { - display: table; - content: " "; -} - -.tabbable:after { - clear: both; -} - -.tabbable:before, -.tabbable:after { - display: table; - content: " "; -} - -.tabbable:after { - clear: both; -} - -.tab-content > .tab-pane, -.pill-content > .pill-pane { - display: none; -} - -.tab-content > .active, -.pill-content > .active { - display: block; -} - -.nav .caret { - border-top-color: #428bca; - border-bottom-color: #428bca; -} - -.nav a:hover .caret { - border-top-color: #2a6496; - border-bottom-color: #2a6496; -} - -.nav-tabs .dropdown-menu { - margin-top: -1px; - border-top-right-radius: 0; - border-top-left-radius: 0; -} - -.navbar { - position: relative; - z-index: 1000; - min-height: 50px; - margin-bottom: 20px; - border: 1px solid transparent; -} - -.navbar:before, -.navbar:after { - display: table; - content: " "; -} - -.navbar:after { - clear: both; -} - -.navbar:before, -.navbar:after { - display: table; - content: " "; -} - -.navbar:after { - clear: both; -} - -@media (min-width: 768px) { - .navbar { - border-radius: 4px; - } -} - -.navbar-header:before, -.navbar-header:after { - display: table; - content: " "; -} - -.navbar-header:after { - clear: both; -} - -.navbar-header:before, -.navbar-header:after { - display: table; - content: " "; -} - -.navbar-header:after { - clear: both; -} - -@media (min-width: 768px) { - .navbar-header { - float: left; - } -} - -.navbar-collapse { - max-height: 340px; - padding-right: 15px; - padding-left: 15px; - overflow-x: visible; - border-top: 1px solid transparent; - box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.1); - -webkit-overflow-scrolling: touch; -} - -.navbar-collapse:before, -.navbar-collapse:after { - display: table; - content: " "; -} - -.navbar-collapse:after { - clear: both; -} - -.navbar-collapse:before, -.navbar-collapse:after { - display: table; - content: " "; -} - -.navbar-collapse:after { - clear: both; -} - -.navbar-collapse.in { - overflow-y: auto; -} - -@media (min-width: 768px) { - .navbar-collapse { - width: auto; - border-top: 0; - box-shadow: none; - } - .navbar-collapse.collapse { - display: block !important; - height: auto !important; - padding-bottom: 0; - overflow: visible !important; - } - .navbar-collapse.in { - overflow-y: visible; - } - .navbar-collapse .navbar-nav.navbar-left:first-child { - margin-left: -15px; - } - .navbar-collapse .navbar-nav.navbar-right:last-child { - margin-right: -15px; - } - .navbar-collapse .navbar-text:last-child { - margin-right: 0; - } -} - -.container > .navbar-header, -.container > .navbar-collapse { - margin-right: -15px; - margin-left: -15px; -} - -@media (min-width: 768px) { - .container > .navbar-header, - .container > .navbar-collapse { - margin-right: 0; - margin-left: 0; - } -} - -.navbar-static-top { - border-width: 0 0 1px; -} - -@media (min-width: 768px) { - .navbar-static-top { - border-radius: 0; - } -} - -.navbar-fixed-top, -.navbar-fixed-bottom { - position: fixed; - right: 0; - left: 0; - border-width: 0 0 1px; -} - -@media (min-width: 768px) { - .navbar-fixed-top, - .navbar-fixed-bottom { - border-radius: 0; - } -} - -.navbar-fixed-top { - top: 0; - z-index: 1030; -} - -.navbar-fixed-bottom { - bottom: 0; - margin-bottom: 0; -} - -.navbar-brand { - float: left; - padding: 15px 15px; - font-size: 18px; - line-height: 20px; -} - -.navbar-brand:hover, -.navbar-brand:focus { - text-decoration: none; -} - -@media (min-width: 768px) { - .navbar > .container .navbar-brand { - margin-left: -15px; - } -} - -.navbar-toggle { - position: relative; - float: right; - padding: 9px 10px; - margin-top: 8px; - margin-right: 15px; - margin-bottom: 8px; - background-color: transparent; - border: 1px solid transparent; - border-radius: 4px; -} - -.navbar-toggle .icon-bar { - display: block; - width: 22px; - height: 2px; - border-radius: 1px; -} - -.navbar-toggle .icon-bar + .icon-bar { - margin-top: 4px; -} - -@media (min-width: 768px) { - .navbar-toggle { - display: none; - } -} - -.navbar-nav { - margin: 7.5px -15px; -} - -.navbar-nav > li > a { - padding-top: 10px; - padding-bottom: 10px; - line-height: 20px; -} - -@media (max-width: 767px) { - .navbar-nav .open .dropdown-menu { - position: static; - float: none; - width: auto; - margin-top: 0; - background-color: transparent; - border: 0; - box-shadow: none; - } - .navbar-nav .open .dropdown-menu > li > a, - .navbar-nav .open .dropdown-menu .dropdown-header { - padding: 5px 15px 5px 25px; - } - .navbar-nav .open .dropdown-menu > li > a { - line-height: 20px; - } - .navbar-nav .open .dropdown-menu > li > a:hover, - .navbar-nav .open .dropdown-menu > li > a:focus { - background-image: none; - } -} - -@media (min-width: 768px) { - .navbar-nav { - float: left; - margin: 0; - } - .navbar-nav > li { - float: left; - } - .navbar-nav > li > a { - padding-top: 15px; - padding-bottom: 15px; - } -} - -@media (min-width: 768px) { - .navbar-left { - float: left !important; - } - .navbar-right { - float: right !important; - } -} - -.navbar-form { - padding: 10px 15px; - margin-top: 8px; - margin-right: -15px; - margin-bottom: 8px; - margin-left: -15px; - border-top: 1px solid transparent; - border-bottom: 1px solid transparent; - -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.1), 0 1px 0 rgba(255, 255, 255, 0.1); - box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.1), 0 1px 0 rgba(255, 255, 255, 0.1); -} - -@media (min-width: 768px) { - .navbar-form .form-group { - display: inline-block; - margin-bottom: 0; - vertical-align: middle; - } - .navbar-form .form-control { - display: inline-block; - } - .navbar-form .radio, - .navbar-form .checkbox { - display: inline-block; - padding-left: 0; - margin-top: 0; - margin-bottom: 0; - } - .navbar-form .radio input[type="radio"], - .navbar-form .checkbox input[type="checkbox"] { - float: none; - margin-left: 0; - } -} - -@media (max-width: 767px) { - .navbar-form .form-group { - margin-bottom: 5px; - } -} - -@media (min-width: 768px) { - .navbar-form { - width: auto; - padding-top: 0; - padding-bottom: 0; - margin-right: 0; - margin-left: 0; - border: 0; - -webkit-box-shadow: none; - box-shadow: none; - } -} - -.navbar-nav > li > .dropdown-menu { - margin-top: 0; - border-top-right-radius: 0; - border-top-left-radius: 0; -} - -.navbar-fixed-bottom .navbar-nav > li > .dropdown-menu { - border-bottom-right-radius: 0; - border-bottom-left-radius: 0; -} - -.navbar-nav.pull-right > li > .dropdown-menu, -.navbar-nav > li > .dropdown-menu.pull-right { - right: 0; - left: auto; -} - -.navbar-btn { - margin-top: 8px; - margin-bottom: 8px; -} - -.navbar-text { - float: left; - margin-top: 15px; - margin-bottom: 15px; -} - -@media (min-width: 768px) { - .navbar-text { - margin-right: 15px; - margin-left: 15px; - } -} - -.navbar-default { - background-color: #f8f8f8; - border-color: #e7e7e7; -} - -.navbar-default .navbar-brand { - color: #777777; -} - -.navbar-default .navbar-brand:hover, -.navbar-default .navbar-brand:focus { - color: #5e5e5e; - background-color: transparent; -} - -.navbar-default .navbar-text { - color: #777777; -} - -.navbar-default .navbar-nav > li > a { - color: #777777; -} - -.navbar-default .navbar-nav > li > a:hover, -.navbar-default .navbar-nav > li > a:focus { - color: #333333; - background-color: transparent; -} - -.navbar-default .navbar-nav > .active > a, -.navbar-default .navbar-nav > .active > a:hover, -.navbar-default .navbar-nav > .active > a:focus { - color: #555555; - background-color: #e7e7e7; -} - -.navbar-default .navbar-nav > .disabled > a, -.navbar-default .navbar-nav > .disabled > a:hover, -.navbar-default .navbar-nav > .disabled > a:focus { - color: #cccccc; - background-color: transparent; -} - -.navbar-default .navbar-toggle { - border-color: #dddddd; -} - -.navbar-default .navbar-toggle:hover, -.navbar-default .navbar-toggle:focus { - background-color: #dddddd; -} - -.navbar-default .navbar-toggle .icon-bar { - background-color: #cccccc; -} - -.navbar-default .navbar-collapse, -.navbar-default .navbar-form { - border-color: #e6e6e6; -} - -.navbar-default .navbar-nav > .dropdown > a:hover .caret, -.navbar-default .navbar-nav > .dropdown > a:focus .caret { - border-top-color: #333333; - border-bottom-color: #333333; -} - -.navbar-default .navbar-nav > .open > a, -.navbar-default .navbar-nav > .open > a:hover, -.navbar-default .navbar-nav > .open > a:focus { - color: #555555; - background-color: #e7e7e7; -} - -.navbar-default .navbar-nav > .open > a .caret, -.navbar-default .navbar-nav > .open > a:hover .caret, -.navbar-default .navbar-nav > .open > a:focus .caret { - border-top-color: #555555; - border-bottom-color: #555555; -} - -.navbar-default .navbar-nav > .dropdown > a .caret { - border-top-color: #777777; - border-bottom-color: #777777; -} - -@media (max-width: 767px) { - .navbar-default .navbar-nav .open .dropdown-menu > li > a { - color: #777777; - } - .navbar-default .navbar-nav .open .dropdown-menu > li > a:hover, - .navbar-default .navbar-nav .open .dropdown-menu > li > a:focus { - color: #333333; - background-color: transparent; - } - .navbar-default .navbar-nav .open .dropdown-menu > .active > a, - .navbar-default .navbar-nav .open .dropdown-menu > .active > a:hover, - .navbar-default .navbar-nav .open .dropdown-menu > .active > a:focus { - color: #555555; - background-color: #e7e7e7; - } - .navbar-default .navbar-nav .open .dropdown-menu > .disabled > a, - .navbar-default .navbar-nav .open .dropdown-menu > .disabled > a:hover, - .navbar-default .navbar-nav .open .dropdown-menu > .disabled > a:focus { - color: #cccccc; - background-color: transparent; - } -} - -.navbar-default .navbar-link { - color: #777777; -} - -.navbar-default .navbar-link:hover { - color: #333333; -} - -.navbar-inverse { - background-color: #222222; - border-color: #080808; -} - -.navbar-inverse .navbar-brand { - color: #999999; -} - -.navbar-inverse .navbar-brand:hover, -.navbar-inverse .navbar-brand:focus { - color: #ffffff; - background-color: transparent; -} - -.navbar-inverse .navbar-text { - color: #999999; -} - -.navbar-inverse .navbar-nav > li > a { - color: #999999; -} - -.navbar-inverse .navbar-nav > li > a:hover, -.navbar-inverse .navbar-nav > li > a:focus { - color: #ffffff; - background-color: transparent; -} - -.navbar-inverse .navbar-nav > .active > a, -.navbar-inverse .navbar-nav > .active > a:hover, -.navbar-inverse .navbar-nav > .active > a:focus { - color: #ffffff; - background-color: #080808; -} - -.navbar-inverse .navbar-nav > .disabled > a, -.navbar-inverse .navbar-nav > .disabled > a:hover, -.navbar-inverse .navbar-nav > .disabled > a:focus { - color: #444444; - background-color: transparent; -} - -.navbar-inverse .navbar-toggle { - border-color: #333333; -} - -.navbar-inverse .navbar-toggle:hover, -.navbar-inverse .navbar-toggle:focus { - background-color: #333333; -} - -.navbar-inverse .navbar-toggle .icon-bar { - background-color: #ffffff; -} - -.navbar-inverse .navbar-collapse, -.navbar-inverse .navbar-form { - border-color: #101010; -} - -.navbar-inverse .navbar-nav > .open > a, -.navbar-inverse .navbar-nav > .open > a:hover, -.navbar-inverse .navbar-nav > .open > a:focus { - color: #ffffff; - background-color: #080808; -} - -.navbar-inverse .navbar-nav > .dropdown > a:hover .caret { - border-top-color: #ffffff; - border-bottom-color: #ffffff; -} - -.navbar-inverse .navbar-nav > .dropdown > a .caret { - border-top-color: #999999; - border-bottom-color: #999999; -} - -.navbar-inverse .navbar-nav > .open > a .caret, -.navbar-inverse .navbar-nav > .open > a:hover .caret, -.navbar-inverse .navbar-nav > .open > a:focus .caret { - border-top-color: #ffffff; - border-bottom-color: #ffffff; -} - -@media (max-width: 767px) { - .navbar-inverse .navbar-nav .open .dropdown-menu > .dropdown-header { - border-color: #080808; - } - .navbar-inverse .navbar-nav .open .dropdown-menu > li > a { - color: #999999; - } - .navbar-inverse .navbar-nav .open .dropdown-menu > li > a:hover, - .navbar-inverse .navbar-nav .open .dropdown-menu > li > a:focus { - color: #ffffff; - background-color: transparent; - } - .navbar-inverse .navbar-nav .open .dropdown-menu > .active > a, - .navbar-inverse .navbar-nav .open .dropdown-menu > .active > a:hover, - .navbar-inverse .navbar-nav .open .dropdown-menu > .active > a:focus { - color: #ffffff; - background-color: #080808; - } - .navbar-inverse .navbar-nav .open .dropdown-menu > .disabled > a, - .navbar-inverse .navbar-nav .open .dropdown-menu > .disabled > a:hover, - .navbar-inverse .navbar-nav .open .dropdown-menu > .disabled > a:focus { - color: #444444; - background-color: transparent; - } -} - -.navbar-inverse .navbar-link { - color: #999999; -} - -.navbar-inverse .navbar-link:hover { - color: #ffffff; -} - -.breadcrumb { - padding: 8px 15px; - margin-bottom: 20px; - list-style: none; - background-color: #f5f5f5; - border-radius: 4px; -} - -.breadcrumb > li { - display: inline-block; -} - -.breadcrumb > li + li:before { - padding: 0 5px; - color: #cccccc; - content: "/\00a0"; -} - -.breadcrumb > .active { - color: #999999; -} - -.pagination { - display: inline-block; - padding-left: 0; - margin: 20px 0; - border-radius: 4px; -} - -.pagination > li { - display: inline; -} - -.pagination > li > a, -.pagination > li > span { - position: relative; - float: left; - padding: 6px 12px; - margin-left: -1px; - line-height: 1.428571429; - text-decoration: none; - background-color: #ffffff; - border: 1px solid #dddddd; -} - -.pagination > li:first-child > a, -.pagination > li:first-child > span { - margin-left: 0; - border-bottom-left-radius: 4px; - border-top-left-radius: 4px; -} - -.pagination > li:last-child > a, -.pagination > li:last-child > span { - border-top-right-radius: 4px; - border-bottom-right-radius: 4px; -} - -.pagination > li > a:hover, -.pagination > li > span:hover, -.pagination > li > a:focus, -.pagination > li > span:focus { - background-color: #eeeeee; -} - -.pagination > .active > a, -.pagination > .active > span, -.pagination > .active > a:hover, -.pagination > .active > span:hover, -.pagination > .active > a:focus, -.pagination > .active > span:focus { - z-index: 2; - color: #ffffff; - cursor: default; - background-color: #428bca; - border-color: #428bca; -} - -.pagination > .disabled > span, -.pagination > .disabled > a, -.pagination > .disabled > a:hover, -.pagination > .disabled > a:focus { - color: #999999; - cursor: not-allowed; - background-color: #ffffff; - border-color: #dddddd; -} - -.pagination-lg > li > a, -.pagination-lg > li > span { - padding: 10px 16px; - font-size: 18px; -} - -.pagination-lg > li:first-child > a, -.pagination-lg > li:first-child > span { - border-bottom-left-radius: 6px; - border-top-left-radius: 6px; -} - -.pagination-lg > li:last-child > a, -.pagination-lg > li:last-child > span { - border-top-right-radius: 6px; - border-bottom-right-radius: 6px; -} - -.pagination-sm > li > a, -.pagination-sm > li > span { - padding: 5px 10px; - font-size: 12px; -} - -.pagination-sm > li:first-child > a, -.pagination-sm > li:first-child > span { - border-bottom-left-radius: 3px; - border-top-left-radius: 3px; -} - -.pagination-sm > li:last-child > a, -.pagination-sm > li:last-child > span { - border-top-right-radius: 3px; - border-bottom-right-radius: 3px; -} - -.pager { - padding-left: 0; - margin: 20px 0; - text-align: center; - list-style: none; -} - -.pager:before, -.pager:after { - display: table; - content: " "; -} - -.pager:after { - clear: both; -} - -.pager:before, -.pager:after { - display: table; - content: " "; -} - -.pager:after { - clear: both; -} - -.pager li { - display: inline; -} - -.pager li > a, -.pager li > span { - display: inline-block; - padding: 5px 14px; - background-color: #ffffff; - border: 1px solid #dddddd; - border-radius: 15px; -} - -.pager li > a:hover, -.pager li > a:focus { - text-decoration: none; - background-color: #eeeeee; -} - -.pager .next > a, -.pager .next > span { - float: right; -} - -.pager .previous > a, -.pager .previous > span { - float: left; -} - -.pager .disabled > a, -.pager .disabled > a:hover, -.pager .disabled > a:focus, -.pager .disabled > span { - color: #999999; - cursor: not-allowed; - background-color: #ffffff; -} - -.label { - display: inline; - padding: .2em .6em .3em; - font-size: 75%; - font-weight: bold; - line-height: 1; - color: #ffffff; - text-align: center; - white-space: nowrap; - vertical-align: baseline; - border-radius: .25em; -} - -.label[href]:hover, -.label[href]:focus { - color: #ffffff; - text-decoration: none; - cursor: pointer; -} - -.label:empty { - display: none; -} - -.label-default { - background-color: #999999; -} - -.label-default[href]:hover, -.label-default[href]:focus { - background-color: #808080; -} - -.label-primary { - background-color: #428bca; -} - -.label-primary[href]:hover, -.label-primary[href]:focus { - background-color: #3071a9; -} - -.label-success { - background-color: #5cb85c; -} - -.label-success[href]:hover, -.label-success[href]:focus { - background-color: #449d44; -} - -.label-info { - background-color: #5bc0de; -} - -.label-info[href]:hover, -.label-info[href]:focus { - background-color: #31b0d5; -} - -.label-warning { - background-color: #f0ad4e; -} - -.label-warning[href]:hover, -.label-warning[href]:focus { - background-color: #ec971f; -} - -.label-danger { - background-color: #d9534f; -} - -.label-danger[href]:hover, -.label-danger[href]:focus { - background-color: #c9302c; -} - -.badge { - display: inline-block; - min-width: 10px; - padding: 3px 7px; - font-size: 12px; - font-weight: bold; - line-height: 1; - color: #ffffff; - text-align: center; - white-space: nowrap; - vertical-align: baseline; - background-color: #999999; - border-radius: 10px; -} - -.badge:empty { - display: none; -} - -a.badge:hover, -a.badge:focus { - color: #ffffff; - text-decoration: none; - cursor: pointer; -} - -.btn .badge { - position: relative; - top: -1px; -} - -a.list-group-item.active > .badge, -.nav-pills > .active > a > .badge { - color: #428bca; - background-color: #ffffff; -} - -.nav-pills > li > a > .badge { - margin-left: 3px; -} - -.jumbotron { - padding: 30px; - margin-bottom: 30px; - font-size: 21px; - font-weight: 200; - line-height: 2.1428571435; - color: inherit; - background-color: #eeeeee; -} - -.jumbotron h1 { - line-height: 1; - color: inherit; -} - -.jumbotron p { - line-height: 1.4; -} - -.container .jumbotron { - border-radius: 6px; -} - -@media screen and (min-width: 768px) { - .jumbotron { - padding-top: 48px; - padding-bottom: 48px; - } - .container .jumbotron { - padding-right: 60px; - padding-left: 60px; - } - .jumbotron h1 { - font-size: 63px; - } -} - -.thumbnail { - display: inline-block; - display: block; - height: auto; - max-width: 100%; - padding: 4px; - line-height: 1.428571429; - background-color: #ffffff; - border: 1px solid #dddddd; - border-radius: 4px; - -webkit-transition: all 0.2s ease-in-out; - transition: all 0.2s ease-in-out; -} - -.thumbnail > img { - display: block; - height: auto; - max-width: 100%; -} - -a.thumbnail:hover, -a.thumbnail:focus { - border-color: #428bca; -} - -.thumbnail > img { - margin-right: auto; - margin-left: auto; -} - -.thumbnail .caption { - padding: 9px; - color: #333333; -} - -.alert { - padding: 15px; - margin-bottom: 20px; - border: 1px solid transparent; - border-radius: 4px; -} - -.alert h4 { - margin-top: 0; - color: inherit; -} - -.alert .alert-link { - font-weight: bold; -} - -.alert > p, -.alert > ul { - margin-bottom: 0; -} - -.alert > p + p { - margin-top: 5px; -} - -.alert-dismissable { - padding-right: 35px; -} - -.alert-dismissable .close { - position: relative; - top: -2px; - right: -21px; - color: inherit; -} - -.alert-success { - color: #468847; - background-color: #dff0d8; - border-color: #d6e9c6; -} - -.alert-success hr { - border-top-color: #c9e2b3; -} - -.alert-success .alert-link { - color: #356635; -} - -.alert-info { - color: #3a87ad; - background-color: #d9edf7; - border-color: #bce8f1; -} - -.alert-info hr { - border-top-color: #a6e1ec; -} - -.alert-info .alert-link { - color: #2d6987; -} - -.alert-warning { - color: #c09853; - background-color: #fcf8e3; - border-color: #fbeed5; -} - -.alert-warning hr { - border-top-color: #f8e5be; -} - -.alert-warning .alert-link { - color: #a47e3c; -} - -.alert-danger { - color: #b94a48; - background-color: #f2dede; - border-color: #eed3d7; -} - -.alert-danger hr { - border-top-color: #e6c1c7; -} - -.alert-danger .alert-link { - color: #953b39; -} - -@-webkit-keyframes progress-bar-stripes { - from { - background-position: 40px 0; - } - to { - background-position: 0 0; - } -} - -@-moz-keyframes progress-bar-stripes { - from { - background-position: 40px 0; - } - to { - background-position: 0 0; - } -} - -@-o-keyframes progress-bar-stripes { - from { - background-position: 0 0; - } - to { - background-position: 40px 0; - } -} - -@keyframes progress-bar-stripes { - from { - background-position: 40px 0; - } - to { - background-position: 0 0; - } -} - -.progress { - height: 20px; - margin-bottom: 20px; - overflow: hidden; - background-color: #f5f5f5; - border-radius: 4px; - -webkit-box-shadow: inset 0 1px 2px rgba(0, 0, 0, 0.1); - box-shadow: inset 0 1px 2px rgba(0, 0, 0, 0.1); -} - -.progress-bar { - float: left; - width: 0; - height: 100%; - font-size: 12px; - color: #ffffff; - text-align: center; - background-color: #428bca; - -webkit-box-shadow: inset 0 -1px 0 rgba(0, 0, 0, 0.15); - box-shadow: inset 0 -1px 0 rgba(0, 0, 0, 0.15); - -webkit-transition: width 0.6s ease; - transition: width 0.6s ease; -} - -.progress-striped .progress-bar { - background-image: -webkit-gradient(linear, 0 100%, 100% 0, color-stop(0.25, rgba(255, 255, 255, 0.15)), color-stop(0.25, transparent), color-stop(0.5, transparent), color-stop(0.5, rgba(255, 255, 255, 0.15)), color-stop(0.75, rgba(255, 255, 255, 0.15)), color-stop(0.75, transparent), to(transparent)); - background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); - background-image: -moz-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); - background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); - background-size: 40px 40px; -} - -.progress.active .progress-bar { - -webkit-animation: progress-bar-stripes 2s linear infinite; - -moz-animation: progress-bar-stripes 2s linear infinite; - -ms-animation: progress-bar-stripes 2s linear infinite; - -o-animation: progress-bar-stripes 2s linear infinite; - animation: progress-bar-stripes 2s linear infinite; -} - -.progress-bar-success { - background-color: #5cb85c; -} - -.progress-striped .progress-bar-success { - background-image: -webkit-gradient(linear, 0 100%, 100% 0, color-stop(0.25, rgba(255, 255, 255, 0.15)), color-stop(0.25, transparent), color-stop(0.5, transparent), color-stop(0.5, rgba(255, 255, 255, 0.15)), color-stop(0.75, rgba(255, 255, 255, 0.15)), color-stop(0.75, transparent), to(transparent)); - background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); - background-image: -moz-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); - background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); -} - -.progress-bar-info { - background-color: #5bc0de; -} - -.progress-striped .progress-bar-info { - background-image: -webkit-gradient(linear, 0 100%, 100% 0, color-stop(0.25, rgba(255, 255, 255, 0.15)), color-stop(0.25, transparent), color-stop(0.5, transparent), color-stop(0.5, rgba(255, 255, 255, 0.15)), color-stop(0.75, rgba(255, 255, 255, 0.15)), color-stop(0.75, transparent), to(transparent)); - background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); - background-image: -moz-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); - background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); -} - -.progress-bar-warning { - background-color: #f0ad4e; -} - -.progress-striped .progress-bar-warning { - background-image: -webkit-gradient(linear, 0 100%, 100% 0, color-stop(0.25, rgba(255, 255, 255, 0.15)), color-stop(0.25, transparent), color-stop(0.5, transparent), color-stop(0.5, rgba(255, 255, 255, 0.15)), color-stop(0.75, rgba(255, 255, 255, 0.15)), color-stop(0.75, transparent), to(transparent)); - background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); - background-image: -moz-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); - background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); -} - -.progress-bar-danger { - background-color: #d9534f; -} - -.progress-striped .progress-bar-danger { - background-image: -webkit-gradient(linear, 0 100%, 100% 0, color-stop(0.25, rgba(255, 255, 255, 0.15)), color-stop(0.25, transparent), color-stop(0.5, transparent), color-stop(0.5, rgba(255, 255, 255, 0.15)), color-stop(0.75, rgba(255, 255, 255, 0.15)), color-stop(0.75, transparent), to(transparent)); - background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); - background-image: -moz-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); - background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); -} - -.media, -.media-body { - overflow: hidden; - zoom: 1; -} - -.media, -.media .media { - margin-top: 15px; -} - -.media:first-child { - margin-top: 0; -} - -.media-object { - display: block; -} - -.media-heading { - margin: 0 0 5px; -} - -.media > .pull-left { - margin-right: 10px; -} - -.media > .pull-right { - margin-left: 10px; -} - -.media-list { - padding-left: 0; - list-style: none; -} - -.list-group { - padding-left: 0; - margin-bottom: 20px; -} - -.list-group-item { - position: relative; - display: block; - padding: 10px 15px; - margin-bottom: -1px; - background-color: #ffffff; - border: 1px solid #dddddd; -} - -.list-group-item:first-child { - border-top-right-radius: 4px; - border-top-left-radius: 4px; -} - -.list-group-item:last-child { - margin-bottom: 0; - border-bottom-right-radius: 4px; - border-bottom-left-radius: 4px; -} - -.list-group-item > .badge { - float: right; -} - -.list-group-item > .badge + .badge { - margin-right: 5px; -} - -a.list-group-item { - color: #555555; -} - -a.list-group-item .list-group-item-heading { - color: #333333; -} - -a.list-group-item:hover, -a.list-group-item:focus { - text-decoration: none; - background-color: #f5f5f5; -} - -.list-group-item.active, -.list-group-item.active:hover, -.list-group-item.active:focus { - z-index: 2; - color: #ffffff; - background-color: #428bca; - border-color: #428bca; -} - -.list-group-item.active .list-group-item-heading, -.list-group-item.active:hover .list-group-item-heading, -.list-group-item.active:focus .list-group-item-heading { - color: inherit; -} - -.list-group-item.active .list-group-item-text, -.list-group-item.active:hover .list-group-item-text, -.list-group-item.active:focus .list-group-item-text { - color: #e1edf7; -} - -.list-group-item-heading { - margin-top: 0; - margin-bottom: 5px; -} - -.list-group-item-text { - margin-bottom: 0; - line-height: 1.3; -} - -.panel { - margin-bottom: 20px; - background-color: #ffffff; - border: 1px solid transparent; - border-radius: 4px; - -webkit-box-shadow: 0 1px 1px rgba(0, 0, 0, 0.05); - box-shadow: 0 1px 1px rgba(0, 0, 0, 0.05); -} - -.panel-body { - padding: 15px; -} - -.panel-body:before, -.panel-body:after { - display: table; - content: " "; -} - -.panel-body:after { - clear: both; -} - -.panel-body:before, -.panel-body:after { - display: table; - content: " "; -} - -.panel-body:after { - clear: both; -} - -.panel > .list-group { - margin-bottom: 0; -} - -.panel > .list-group .list-group-item { - border-width: 1px 0; -} - -.panel > .list-group .list-group-item:first-child { - border-top-right-radius: 0; - border-top-left-radius: 0; -} - -.panel > .list-group .list-group-item:last-child { - border-bottom: 0; -} - -.panel-heading + .list-group .list-group-item:first-child { - border-top-width: 0; -} - -.panel > .table { - margin-bottom: 0; -} - -.panel > .panel-body + .table { - border-top: 1px solid #dddddd; -} - -.panel-heading { - padding: 10px 15px; - border-bottom: 1px solid transparent; - border-top-right-radius: 3px; - border-top-left-radius: 3px; -} - -.panel-title { - margin-top: 0; - margin-bottom: 0; - font-size: 16px; -} - -.panel-title > a { - color: inherit; -} - -.panel-footer { - padding: 10px 15px; - background-color: #f5f5f5; - border-top: 1px solid #dddddd; - border-bottom-right-radius: 3px; - border-bottom-left-radius: 3px; -} - -.panel-group .panel { - margin-bottom: 0; - overflow: hidden; - border-radius: 4px; -} - -.panel-group .panel + .panel { - margin-top: 5px; -} - -.panel-group .panel-heading { - border-bottom: 0; -} - -.panel-group .panel-heading + .panel-collapse .panel-body { - border-top: 1px solid #dddddd; -} - -.panel-group .panel-footer { - border-top: 0; -} - -.panel-group .panel-footer + .panel-collapse .panel-body { - border-bottom: 1px solid #dddddd; -} - -.panel-default { - border-color: #dddddd; -} - -.panel-default > .panel-heading { - color: #333333; - background-color: #f5f5f5; - border-color: #dddddd; -} - -.panel-default > .panel-heading + .panel-collapse .panel-body { - border-top-color: #dddddd; -} - -.panel-default > .panel-footer + .panel-collapse .panel-body { - border-bottom-color: #dddddd; -} - -.panel-primary { - border-color: #428bca; -} - -.panel-primary > .panel-heading { - color: #ffffff; - background-color: #428bca; - border-color: #428bca; -} - -.panel-primary > .panel-heading + .panel-collapse .panel-body { - border-top-color: #428bca; -} - -.panel-primary > .panel-footer + .panel-collapse .panel-body { - border-bottom-color: #428bca; -} - -.panel-success { - border-color: #d6e9c6; -} - -.panel-success > .panel-heading { - color: #468847; - background-color: #dff0d8; - border-color: #d6e9c6; -} - -.panel-success > .panel-heading + .panel-collapse .panel-body { - border-top-color: #d6e9c6; -} - -.panel-success > .panel-footer + .panel-collapse .panel-body { - border-bottom-color: #d6e9c6; -} - -.panel-warning { - border-color: #fbeed5; -} - -.panel-warning > .panel-heading { - color: #c09853; - background-color: #fcf8e3; - border-color: #fbeed5; -} - -.panel-warning > .panel-heading + .panel-collapse .panel-body { - border-top-color: #fbeed5; -} - -.panel-warning > .panel-footer + .panel-collapse .panel-body { - border-bottom-color: #fbeed5; -} - -.panel-danger { - border-color: #eed3d7; -} - -.panel-danger > .panel-heading { - color: #b94a48; - background-color: #f2dede; - border-color: #eed3d7; -} - -.panel-danger > .panel-heading + .panel-collapse .panel-body { - border-top-color: #eed3d7; -} - -.panel-danger > .panel-footer + .panel-collapse .panel-body { - border-bottom-color: #eed3d7; -} - -.panel-info { - border-color: #bce8f1; -} - -.panel-info > .panel-heading { - color: #3a87ad; - background-color: #d9edf7; - border-color: #bce8f1; -} - -.panel-info > .panel-heading + .panel-collapse .panel-body { - border-top-color: #bce8f1; -} - -.panel-info > .panel-footer + .panel-collapse .panel-body { - border-bottom-color: #bce8f1; -} - -.well { - min-height: 20px; - padding: 19px; - margin-bottom: 20px; - background-color: #f5f5f5; - border: 1px solid #e3e3e3; - border-radius: 4px; - -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.05); - box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.05); -} - -.well blockquote { - border-color: #ddd; - border-color: rgba(0, 0, 0, 0.15); -} - -.well-lg { - padding: 24px; - border-radius: 6px; -} - -.well-sm { - padding: 9px; - border-radius: 3px; -} - -.close { - float: right; - font-size: 21px; - font-weight: bold; - line-height: 1; - color: #000000; - text-shadow: 0 1px 0 #ffffff; - opacity: 0.2; - filter: alpha(opacity=20); -} - -.close:hover, -.close:focus { - color: #000000; - text-decoration: none; - cursor: pointer; - opacity: 0.5; - filter: alpha(opacity=50); -} - -button.close { - padding: 0; - cursor: pointer; - background: transparent; - border: 0; - -webkit-appearance: none; -} - -.modal-open { - overflow: hidden; -} - -body.modal-open, -.modal-open .navbar-fixed-top, -.modal-open .navbar-fixed-bottom { - margin-right: 15px; -} - -.modal { - position: fixed; - top: 0; - right: 0; - bottom: 0; - left: 0; - z-index: 1040; - display: none; - overflow: auto; - overflow-y: scroll; -} - -.modal.fade .modal-dialog { - -webkit-transform: translate(0, -25%); - -ms-transform: translate(0, -25%); - transform: translate(0, -25%); - -webkit-transition: -webkit-transform 0.3s ease-out; - -moz-transition: -moz-transform 0.3s ease-out; - -o-transition: -o-transform 0.3s ease-out; - transition: transform 0.3s ease-out; -} - -.modal.in .modal-dialog { - -webkit-transform: translate(0, 0); - -ms-transform: translate(0, 0); - transform: translate(0, 0); -} - -.modal-dialog { - z-index: 1050; - width: auto; - padding: 10px; - margin-right: auto; - margin-left: auto; -} - -.modal-content { - position: relative; - background-color: #ffffff; - border: 1px solid #999999; - border: 1px solid rgba(0, 0, 0, 0.2); - border-radius: 6px; - outline: none; - -webkit-box-shadow: 0 3px 9px rgba(0, 0, 0, 0.5); - box-shadow: 0 3px 9px rgba(0, 0, 0, 0.5); - background-clip: padding-box; -} - -.modal-backdrop { - position: fixed; - top: 0; - right: 0; - bottom: 0; - left: 0; - z-index: 1030; - background-color: #000000; -} - -.modal-backdrop.fade { - opacity: 0; - filter: alpha(opacity=0); -} - -.modal-backdrop.in { - opacity: 0.5; - filter: alpha(opacity=50); -} - -.modal-header { - min-height: 16.428571429px; - padding: 15px; - border-bottom: 1px solid #e5e5e5; -} - -.modal-header .close { - margin-top: -2px; -} - -.modal-title { - margin: 0; - line-height: 1.428571429; -} - -.modal-body { - position: relative; - padding: 20px; -} - -.modal-footer { - padding: 19px 20px 20px; - margin-top: 15px; - text-align: right; - border-top: 1px solid #e5e5e5; -} - -.modal-footer:before, -.modal-footer:after { - display: table; - content: " "; -} - -.modal-footer:after { - clear: both; -} - -.modal-footer:before, -.modal-footer:after { - display: table; - content: " "; -} - -.modal-footer:after { - clear: both; -} - -.modal-footer .btn + .btn { - margin-bottom: 0; - margin-left: 5px; -} - -.modal-footer .btn-group .btn + .btn { - margin-left: -1px; -} - -.modal-footer .btn-block + .btn-block { - margin-left: 0; -} - -@media screen and (min-width: 768px) { - .modal-dialog { - right: auto; - left: 50%; - width: 600px; - padding-top: 30px; - padding-bottom: 30px; - } - .modal-content { - -webkit-box-shadow: 0 5px 15px rgba(0, 0, 0, 0.5); - box-shadow: 0 5px 15px rgba(0, 0, 0, 0.5); - } -} - -.tooltip { - position: absolute; - z-index: 1030; - display: block; - font-size: 12px; - line-height: 1.4; - opacity: 0; - filter: alpha(opacity=0); - visibility: visible; -} - -.tooltip.in { - opacity: 0.9; - filter: alpha(opacity=90); -} - -.tooltip.top { - padding: 5px 0; - margin-top: -3px; -} - -.tooltip.right { - padding: 0 5px; - margin-left: 3px; -} - -.tooltip.bottom { - padding: 5px 0; - margin-top: 3px; -} - -.tooltip.left { - padding: 0 5px; - margin-left: -3px; -} - -.tooltip-inner { - max-width: 200px; - padding: 3px 8px; - color: #ffffff; - text-align: center; - text-decoration: none; - background-color: #000000; - border-radius: 4px; -} - -.tooltip-arrow { - position: absolute; - width: 0; - height: 0; - border-color: transparent; - border-style: solid; -} - -.tooltip.top .tooltip-arrow { - bottom: 0; - left: 50%; - margin-left: -5px; - border-top-color: #000000; - border-width: 5px 5px 0; -} - -.tooltip.top-left .tooltip-arrow { - bottom: 0; - left: 5px; - border-top-color: #000000; - border-width: 5px 5px 0; -} - -.tooltip.top-right .tooltip-arrow { - right: 5px; - bottom: 0; - border-top-color: #000000; - border-width: 5px 5px 0; -} - -.tooltip.right .tooltip-arrow { - top: 50%; - left: 0; - margin-top: -5px; - border-right-color: #000000; - border-width: 5px 5px 5px 0; -} - -.tooltip.left .tooltip-arrow { - top: 50%; - right: 0; - margin-top: -5px; - border-left-color: #000000; - border-width: 5px 0 5px 5px; -} - -.tooltip.bottom .tooltip-arrow { - top: 0; - left: 50%; - margin-left: -5px; - border-bottom-color: #000000; - border-width: 0 5px 5px; -} - -.tooltip.bottom-left .tooltip-arrow { - top: 0; - left: 5px; - border-bottom-color: #000000; - border-width: 0 5px 5px; -} - -.tooltip.bottom-right .tooltip-arrow { - top: 0; - right: 5px; - border-bottom-color: #000000; - border-width: 0 5px 5px; -} - -.popover { - position: absolute; - top: 0; - left: 0; - z-index: 1010; - display: none; - max-width: 276px; - padding: 1px; - text-align: left; - white-space: normal; - background-color: #ffffff; - border: 1px solid #cccccc; - border: 1px solid rgba(0, 0, 0, 0.2); - border-radius: 6px; - -webkit-box-shadow: 0 5px 10px rgba(0, 0, 0, 0.2); - box-shadow: 0 5px 10px rgba(0, 0, 0, 0.2); - background-clip: padding-box; -} - -.popover.top { - margin-top: -10px; -} - -.popover.right { - margin-left: 10px; -} - -.popover.bottom { - margin-top: 10px; -} - -.popover.left { - margin-left: -10px; -} - -.popover-title { - padding: 8px 14px; - margin: 0; - font-size: 14px; - font-weight: normal; - line-height: 18px; - background-color: #f7f7f7; - border-bottom: 1px solid #ebebeb; - border-radius: 5px 5px 0 0; -} - -.popover-content { - padding: 9px 14px; -} - -.popover .arrow, -.popover .arrow:after { - position: absolute; - display: block; - width: 0; - height: 0; - border-color: transparent; - border-style: solid; -} - -.popover .arrow { - border-width: 11px; -} - -.popover .arrow:after { - border-width: 10px; - content: ""; -} - -.popover.top .arrow { - bottom: -11px; - left: 50%; - margin-left: -11px; - border-top-color: #999999; - border-top-color: rgba(0, 0, 0, 0.25); - border-bottom-width: 0; -} - -.popover.top .arrow:after { - bottom: 1px; - margin-left: -10px; - border-top-color: #ffffff; - border-bottom-width: 0; - content: " "; -} - -.popover.right .arrow { - top: 50%; - left: -11px; - margin-top: -11px; - border-right-color: #999999; - border-right-color: rgba(0, 0, 0, 0.25); - border-left-width: 0; -} - -.popover.right .arrow:after { - bottom: -10px; - left: 1px; - border-right-color: #ffffff; - border-left-width: 0; - content: " "; -} - -.popover.bottom .arrow { - top: -11px; - left: 50%; - margin-left: -11px; - border-bottom-color: #999999; - border-bottom-color: rgba(0, 0, 0, 0.25); - border-top-width: 0; -} - -.popover.bottom .arrow:after { - top: 1px; - margin-left: -10px; - border-bottom-color: #ffffff; - border-top-width: 0; - content: " "; -} - -.popover.left .arrow { - top: 50%; - right: -11px; - margin-top: -11px; - border-left-color: #999999; - border-left-color: rgba(0, 0, 0, 0.25); - border-right-width: 0; -} - -.popover.left .arrow:after { - right: 1px; - bottom: -10px; - border-left-color: #ffffff; - border-right-width: 0; - content: " "; -} - -.carousel { - position: relative; -} - -.carousel-inner { - position: relative; - width: 100%; - overflow: hidden; -} - -.carousel-inner > .item { - position: relative; - display: none; - -webkit-transition: 0.6s ease-in-out left; - transition: 0.6s ease-in-out left; -} - -.carousel-inner > .item > img, -.carousel-inner > .item > a > img { - display: block; - height: auto; - max-width: 100%; - line-height: 1; -} - -.carousel-inner > .active, -.carousel-inner > .next, -.carousel-inner > .prev { - display: block; -} - -.carousel-inner > .active { - left: 0; -} - -.carousel-inner > .next, -.carousel-inner > .prev { - position: absolute; - top: 0; - width: 100%; -} - -.carousel-inner > .next { - left: 100%; -} - -.carousel-inner > .prev { - left: -100%; -} - -.carousel-inner > .next.left, -.carousel-inner > .prev.right { - left: 0; -} - -.carousel-inner > .active.left { - left: -100%; -} - -.carousel-inner > .active.right { - left: 100%; -} - -.carousel-control { - position: absolute; - top: 0; - bottom: 0; - left: 0; - width: 15%; - font-size: 20px; - color: #ffffff; - text-align: center; - text-shadow: 0 1px 2px rgba(0, 0, 0, 0.6); - opacity: 0.5; - filter: alpha(opacity=50); -} - -.carousel-control.left { - background-image: -webkit-gradient(linear, 0 top, 100% top, from(rgba(0, 0, 0, 0.5)), to(rgba(0, 0, 0, 0.0001))); - background-image: -webkit-linear-gradient(left, color-stop(rgba(0, 0, 0, 0.5) 0), color-stop(rgba(0, 0, 0, 0.0001) 100%)); - background-image: -moz-linear-gradient(left, rgba(0, 0, 0, 0.5) 0, rgba(0, 0, 0, 0.0001) 100%); - background-image: linear-gradient(to right, rgba(0, 0, 0, 0.5) 0, rgba(0, 0, 0, 0.0001) 100%); - background-repeat: repeat-x; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#80000000', endColorstr='#00000000', GradientType=1); -} - -.carousel-control.right { - right: 0; - left: auto; - background-image: -webkit-gradient(linear, 0 top, 100% top, from(rgba(0, 0, 0, 0.0001)), to(rgba(0, 0, 0, 0.5))); - background-image: -webkit-linear-gradient(left, color-stop(rgba(0, 0, 0, 0.0001) 0), color-stop(rgba(0, 0, 0, 0.5) 100%)); - background-image: -moz-linear-gradient(left, rgba(0, 0, 0, 0.0001) 0, rgba(0, 0, 0, 0.5) 100%); - background-image: linear-gradient(to right, rgba(0, 0, 0, 0.0001) 0, rgba(0, 0, 0, 0.5) 100%); - background-repeat: repeat-x; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#00000000', endColorstr='#80000000', GradientType=1); -} - -.carousel-control:hover, -.carousel-control:focus { - color: #ffffff; - text-decoration: none; - opacity: 0.9; - filter: alpha(opacity=90); -} - -.carousel-control .icon-prev, -.carousel-control .icon-next, -.carousel-control .glyphicon-chevron-left, -.carousel-control .glyphicon-chevron-right { - position: absolute; - top: 50%; - left: 50%; - z-index: 5; - display: inline-block; -} - -.carousel-control .icon-prev, -.carousel-control .icon-next { - width: 20px; - height: 20px; - margin-top: -10px; - margin-left: -10px; - font-family: serif; -} - -.carousel-control .icon-prev:before { - content: '\2039'; -} - -.carousel-control .icon-next:before { - content: '\203a'; -} - -.carousel-indicators { - position: absolute; - bottom: 10px; - left: 50%; - z-index: 15; - width: 60%; - padding-left: 0; - margin-left: -30%; - text-align: center; - list-style: none; -} - -.carousel-indicators li { - display: inline-block; - width: 10px; - height: 10px; - margin: 1px; - text-indent: -999px; - cursor: pointer; - border: 1px solid #ffffff; - border-radius: 10px; -} - -.carousel-indicators .active { - width: 12px; - height: 12px; - margin: 0; - background-color: #ffffff; -} - -.carousel-caption { - position: absolute; - right: 15%; - bottom: 20px; - left: 15%; - z-index: 10; - padding-top: 20px; - padding-bottom: 20px; - color: #ffffff; - text-align: center; - text-shadow: 0 1px 2px rgba(0, 0, 0, 0.6); -} - -.carousel-caption .btn { - text-shadow: none; -} - -@media screen and (min-width: 768px) { - .carousel-control .icon-prev, - .carousel-control .icon-next { - width: 30px; - height: 30px; - margin-top: -15px; - margin-left: -15px; - font-size: 30px; - } - .carousel-caption { - right: 20%; - left: 20%; - padding-bottom: 30px; - } - .carousel-indicators { - bottom: 20px; - } -} - -.clearfix:before, -.clearfix:after { - display: table; - content: " "; -} - -.clearfix:after { - clear: both; -} - -.pull-right { - float: right !important; -} - -.pull-left { - float: left !important; -} - -.hide { - display: none !important; -} - -.show { - display: block !important; -} - -.invisible { - visibility: hidden; -} - -.text-hide { - font: 0/0 a; - color: transparent; - text-shadow: none; - background-color: transparent; - border: 0; -} - -.affix { - position: fixed; -} - -@-ms-viewport { - width: device-width; -} - -@media screen and (max-width: 400px) { - @-ms-viewport { - width: 320px; - } -} - -.hidden { - display: none !important; - visibility: hidden !important; -} - -.visible-xs { - display: none !important; -} - -tr.visible-xs { - display: none !important; -} - -th.visible-xs, -td.visible-xs { - display: none !important; -} - -@media (max-width: 767px) { - .visible-xs { - display: block !important; - } - tr.visible-xs { - display: table-row !important; - } - th.visible-xs, - td.visible-xs { - display: table-cell !important; - } -} - -@media (min-width: 768px) and (max-width: 991px) { - .visible-xs.visible-sm { - display: block !important; - } - tr.visible-xs.visible-sm { - display: table-row !important; - } - th.visible-xs.visible-sm, - td.visible-xs.visible-sm { - display: table-cell !important; - } -} - -@media (min-width: 992px) and (max-width: 1199px) { - .visible-xs.visible-md { - display: block !important; - } - tr.visible-xs.visible-md { - display: table-row !important; - } - th.visible-xs.visible-md, - td.visible-xs.visible-md { - display: table-cell !important; - } -} - -@media (min-width: 1200px) { - .visible-xs.visible-lg { - display: block !important; - } - tr.visible-xs.visible-lg { - display: table-row !important; - } - th.visible-xs.visible-lg, - td.visible-xs.visible-lg { - display: table-cell !important; - } -} - -.visible-sm { - display: none !important; -} - -tr.visible-sm { - display: none !important; -} - -th.visible-sm, -td.visible-sm { - display: none !important; -} - -@media (max-width: 767px) { - .visible-sm.visible-xs { - display: block !important; - } - tr.visible-sm.visible-xs { - display: table-row !important; - } - th.visible-sm.visible-xs, - td.visible-sm.visible-xs { - display: table-cell !important; - } -} - -@media (min-width: 768px) and (max-width: 991px) { - .visible-sm { - display: block !important; - } - tr.visible-sm { - display: table-row !important; - } - th.visible-sm, - td.visible-sm { - display: table-cell !important; - } -} - -@media (min-width: 992px) and (max-width: 1199px) { - .visible-sm.visible-md { - display: block !important; - } - tr.visible-sm.visible-md { - display: table-row !important; - } - th.visible-sm.visible-md, - td.visible-sm.visible-md { - display: table-cell !important; - } -} - -@media (min-width: 1200px) { - .visible-sm.visible-lg { - display: block !important; - } - tr.visible-sm.visible-lg { - display: table-row !important; - } - th.visible-sm.visible-lg, - td.visible-sm.visible-lg { - display: table-cell !important; - } -} - -.visible-md { - display: none !important; -} - -tr.visible-md { - display: none !important; -} - -th.visible-md, -td.visible-md { - display: none !important; -} - -@media (max-width: 767px) { - .visible-md.visible-xs { - display: block !important; - } - tr.visible-md.visible-xs { - display: table-row !important; - } - th.visible-md.visible-xs, - td.visible-md.visible-xs { - display: table-cell !important; - } -} - -@media (min-width: 768px) and (max-width: 991px) { - .visible-md.visible-sm { - display: block !important; - } - tr.visible-md.visible-sm { - display: table-row !important; - } - th.visible-md.visible-sm, - td.visible-md.visible-sm { - display: table-cell !important; - } -} - -@media (min-width: 992px) and (max-width: 1199px) { - .visible-md { - display: block !important; - } - tr.visible-md { - display: table-row !important; - } - th.visible-md, - td.visible-md { - display: table-cell !important; - } -} - -@media (min-width: 1200px) { - .visible-md.visible-lg { - display: block !important; - } - tr.visible-md.visible-lg { - display: table-row !important; - } - th.visible-md.visible-lg, - td.visible-md.visible-lg { - display: table-cell !important; - } -} - -.visible-lg { - display: none !important; -} - -tr.visible-lg { - display: none !important; -} - -th.visible-lg, -td.visible-lg { - display: none !important; -} - -@media (max-width: 767px) { - .visible-lg.visible-xs { - display: block !important; - } - tr.visible-lg.visible-xs { - display: table-row !important; - } - th.visible-lg.visible-xs, - td.visible-lg.visible-xs { - display: table-cell !important; - } -} - -@media (min-width: 768px) and (max-width: 991px) { - .visible-lg.visible-sm { - display: block !important; - } - tr.visible-lg.visible-sm { - display: table-row !important; - } - th.visible-lg.visible-sm, - td.visible-lg.visible-sm { - display: table-cell !important; - } -} - -@media (min-width: 992px) and (max-width: 1199px) { - .visible-lg.visible-md { - display: block !important; - } - tr.visible-lg.visible-md { - display: table-row !important; - } - th.visible-lg.visible-md, - td.visible-lg.visible-md { - display: table-cell !important; - } -} - -@media (min-width: 1200px) { - .visible-lg { - display: block !important; - } - tr.visible-lg { - display: table-row !important; - } - th.visible-lg, - td.visible-lg { - display: table-cell !important; - } -} - -.hidden-xs { - display: block !important; -} - -tr.hidden-xs { - display: table-row !important; -} - -th.hidden-xs, -td.hidden-xs { - display: table-cell !important; -} - -@media (max-width: 767px) { - .hidden-xs { - display: none !important; - } - tr.hidden-xs { - display: none !important; - } - th.hidden-xs, - td.hidden-xs { - display: none !important; - } -} - -@media (min-width: 768px) and (max-width: 991px) { - .hidden-xs.hidden-sm { - display: none !important; - } - tr.hidden-xs.hidden-sm { - display: none !important; - } - th.hidden-xs.hidden-sm, - td.hidden-xs.hidden-sm { - display: none !important; - } -} - -@media (min-width: 992px) and (max-width: 1199px) { - .hidden-xs.hidden-md { - display: none !important; - } - tr.hidden-xs.hidden-md { - display: none !important; - } - th.hidden-xs.hidden-md, - td.hidden-xs.hidden-md { - display: none !important; - } -} - -@media (min-width: 1200px) { - .hidden-xs.hidden-lg { - display: none !important; - } - tr.hidden-xs.hidden-lg { - display: none !important; - } - th.hidden-xs.hidden-lg, - td.hidden-xs.hidden-lg { - display: none !important; - } -} - -.hidden-sm { - display: block !important; -} - -tr.hidden-sm { - display: table-row !important; -} - -th.hidden-sm, -td.hidden-sm { - display: table-cell !important; -} - -@media (max-width: 767px) { - .hidden-sm.hidden-xs { - display: none !important; - } - tr.hidden-sm.hidden-xs { - display: none !important; - } - th.hidden-sm.hidden-xs, - td.hidden-sm.hidden-xs { - display: none !important; - } -} - -@media (min-width: 768px) and (max-width: 991px) { - .hidden-sm { - display: none !important; - } - tr.hidden-sm { - display: none !important; - } - th.hidden-sm, - td.hidden-sm { - display: none !important; - } -} - -@media (min-width: 992px) and (max-width: 1199px) { - .hidden-sm.hidden-md { - display: none !important; - } - tr.hidden-sm.hidden-md { - display: none !important; - } - th.hidden-sm.hidden-md, - td.hidden-sm.hidden-md { - display: none !important; - } -} - -@media (min-width: 1200px) { - .hidden-sm.hidden-lg { - display: none !important; - } - tr.hidden-sm.hidden-lg { - display: none !important; - } - th.hidden-sm.hidden-lg, - td.hidden-sm.hidden-lg { - display: none !important; - } -} - -.hidden-md { - display: block !important; -} - -tr.hidden-md { - display: table-row !important; -} - -th.hidden-md, -td.hidden-md { - display: table-cell !important; -} - -@media (max-width: 767px) { - .hidden-md.hidden-xs { - display: none !important; - } - tr.hidden-md.hidden-xs { - display: none !important; - } - th.hidden-md.hidden-xs, - td.hidden-md.hidden-xs { - display: none !important; - } -} - -@media (min-width: 768px) and (max-width: 991px) { - .hidden-md.hidden-sm { - display: none !important; - } - tr.hidden-md.hidden-sm { - display: none !important; - } - th.hidden-md.hidden-sm, - td.hidden-md.hidden-sm { - display: none !important; - } -} - -@media (min-width: 992px) and (max-width: 1199px) { - .hidden-md { - display: none !important; - } - tr.hidden-md { - display: none !important; - } - th.hidden-md, - td.hidden-md { - display: none !important; - } -} - -@media (min-width: 1200px) { - .hidden-md.hidden-lg { - display: none !important; - } - tr.hidden-md.hidden-lg { - display: none !important; - } - th.hidden-md.hidden-lg, - td.hidden-md.hidden-lg { - display: none !important; - } -} - -.hidden-lg { - display: block !important; -} - -tr.hidden-lg { - display: table-row !important; -} - -th.hidden-lg, -td.hidden-lg { - display: table-cell !important; -} - -@media (max-width: 767px) { - .hidden-lg.hidden-xs { - display: none !important; - } - tr.hidden-lg.hidden-xs { - display: none !important; - } - th.hidden-lg.hidden-xs, - td.hidden-lg.hidden-xs { - display: none !important; - } -} - -@media (min-width: 768px) and (max-width: 991px) { - .hidden-lg.hidden-sm { - display: none !important; - } - tr.hidden-lg.hidden-sm { - display: none !important; - } - th.hidden-lg.hidden-sm, - td.hidden-lg.hidden-sm { - display: none !important; - } -} - -@media (min-width: 992px) and (max-width: 1199px) { - .hidden-lg.hidden-md { - display: none !important; - } - tr.hidden-lg.hidden-md { - display: none !important; - } - th.hidden-lg.hidden-md, - td.hidden-lg.hidden-md { - display: none !important; - } -} - -@media (min-width: 1200px) { - .hidden-lg { - display: none !important; - } - tr.hidden-lg { - display: none !important; - } - th.hidden-lg, - td.hidden-lg { - display: none !important; - } -} - -.visible-print { - display: none !important; -} - -tr.visible-print { - display: none !important; -} - -th.visible-print, -td.visible-print { - display: none !important; -} - -@media print { - .visible-print { - display: block !important; - } - tr.visible-print { - display: table-row !important; - } - th.visible-print, - td.visible-print { - display: table-cell !important; - } - .hidden-print { - display: none !important; - } - tr.hidden-print { - display: none !important; - } - th.hidden-print, - td.hidden-print { - display: none !important; - } -} \ No newline at end of file diff --git a/hbase-server/src/main/resources/hbase-webapps/static/css/bootstrap.min.css b/hbase-server/src/main/resources/hbase-webapps/static/css/bootstrap.min.css old mode 100755 new mode 100644 index a553c4f5e08a..5b96335ff6a0 --- a/hbase-server/src/main/resources/hbase-webapps/static/css/bootstrap.min.css +++ b/hbase-server/src/main/resources/hbase-webapps/static/css/bootstrap.min.css @@ -1,9 +1,6 @@ /*! - * Bootstrap v3.0.0 - * - * Copyright 2013 Twitter, Inc - * Licensed under the Apache License v2.0 - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Designed and built with all the love in the world by @mdo and @fat. - *//*! normalize.css v2.1.0 | MIT License | git.io/normalize */article,aside,details,figcaption,figure,footer,header,hgroup,main,nav,section,summary{display:block}audio,canvas,video{display:inline-block}audio:not([controls]){display:none;height:0}[hidden]{display:none}html{font-family:sans-serif;-webkit-text-size-adjust:100%;-ms-text-size-adjust:100%}body{margin:0}a:focus{outline:thin dotted}a:active,a:hover{outline:0}h1{margin:.67em 0;font-size:2em}abbr[title]{border-bottom:1px dotted}b,strong{font-weight:bold}dfn{font-style:italic}hr{height:0;-moz-box-sizing:content-box;box-sizing:content-box}mark{color:#000;background:#ff0}code,kbd,pre,samp{font-family:monospace,serif;font-size:1em}pre{white-space:pre-wrap}q{quotes:"\201C" "\201D" "\2018" "\2019"}small{font-size:80%}sub,sup{position:relative;font-size:75%;line-height:0;vertical-align:baseline}sup{top:-0.5em}sub{bottom:-0.25em}img{border:0}svg:not(:root){overflow:hidden}figure{margin:0}fieldset{padding:.35em .625em .75em;margin:0 2px;border:1px solid #c0c0c0}legend{padding:0;border:0}button,input,select,textarea{margin:0;font-family:inherit;font-size:100%}button,input{line-height:normal}button,select{text-transform:none}button,html input[type="button"],input[type="reset"],input[type="submit"]{cursor:pointer;-webkit-appearance:button}button[disabled],html input[disabled]{cursor:default}input[type="checkbox"],input[type="radio"]{padding:0;box-sizing:border-box}input[type="search"]{-webkit-box-sizing:content-box;-moz-box-sizing:content-box;box-sizing:content-box;-webkit-appearance:textfield}input[type="search"]::-webkit-search-cancel-button,input[type="search"]::-webkit-search-decoration{-webkit-appearance:none}button::-moz-focus-inner,input::-moz-focus-inner{padding:0;border:0}textarea{overflow:auto;vertical-align:top}table{border-collapse:collapse;border-spacing:0}@media print{*{color:#000!important;text-shadow:none!important;background:transparent!important;box-shadow:none!important}a,a:visited{text-decoration:underline}a[href]:after{content:" (" attr(href) ")"}abbr[title]:after{content:" (" attr(title) ")"}.ir a:after,a[href^="javascript:"]:after,a[href^="#"]:after{content:""}pre,blockquote{border:1px solid #999;page-break-inside:avoid}thead{display:table-header-group}tr,img{page-break-inside:avoid}img{max-width:100%!important}@page{margin:2cm .5cm}p,h2,h3{orphans:3;widows:3}h2,h3{page-break-after:avoid}.navbar{display:none}.table td,.table th{background-color:#fff!important}.btn>.caret,.dropup>.btn>.caret{border-top-color:#000!important}.label{border:1px solid #000}.table{border-collapse:collapse!important}.table-bordered th,.table-bordered td{border:1px solid #ddd!important}}*,*:before,*:after{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}html{font-size:62.5%;-webkit-tap-highlight-color:rgba(0,0,0,0)}body{font-family:"Helvetica Neue",Helvetica,Arial,sans-serif;font-size:14px;line-height:1.428571429;color:#333;background-color:#fff}input,button,select,textarea{font-family:inherit;font-size:inherit;line-height:inherit}button,input,select[multiple],textarea{background-image:none}a{color:#428bca;text-decoration:none}a:hover,a:focus{color:#2a6496;text-decoration:underline}a:focus{outline:thin dotted #333;outline:5px auto -webkit-focus-ring-color;outline-offset:-2px}img{vertical-align:middle}.img-responsive{display:block;height:auto;max-width:100%}.img-rounded{border-radius:6px}.img-thumbnail{display:inline-block;height:auto;max-width:100%;padding:4px;line-height:1.428571429;background-color:#fff;border:1px solid #ddd;border-radius:4px;-webkit-transition:all .2s ease-in-out;transition:all .2s ease-in-out}.img-circle{border-radius:50%}hr{margin-top:20px;margin-bottom:20px;border:0;border-top:1px solid #eee}.sr-only{position:absolute;width:1px;height:1px;padding:0;margin:-1px;overflow:hidden;clip:rect(0 0 0 0);border:0}p{margin:0 0 10px}.lead{margin-bottom:20px;font-size:16.099999999999998px;font-weight:200;line-height:1.4}@media(min-width:768px){.lead{font-size:21px}}small{font-size:85%}cite{font-style:normal}.text-muted{color:#999}.text-primary{color:#428bca}.text-warning{color:#c09853}.text-danger{color:#b94a48}.text-success{color:#468847}.text-info{color:#3a87ad}.text-left{text-align:left}.text-right{text-align:right}.text-center{text-align:center}h1,h2,h3,h4,h5,h6,.h1,.h2,.h3,.h4,.h5,.h6{font-family:"Helvetica Neue",Helvetica,Arial,sans-serif;font-weight:500;line-height:1.1}h1 small,h2 small,h3 small,h4 small,h5 small,h6 small,.h1 small,.h2 small,.h3 small,.h4 small,.h5 small,.h6 small{font-weight:normal;line-height:1;color:#999}h1,h2,h3{margin-top:20px;margin-bottom:10px}h4,h5,h6{margin-top:10px;margin-bottom:10px}h1,.h1{font-size:36px}h2,.h2{font-size:30px}h3,.h3{font-size:24px}h4,.h4{font-size:18px}h5,.h5{font-size:14px}h6,.h6{font-size:12px}h1 small,.h1 small{font-size:24px}h2 small,.h2 small{font-size:18px}h3 small,.h3 small,h4 small,.h4 small{font-size:14px}.page-header{padding-bottom:9px;margin:40px 0 20px;border-bottom:1px solid #eee}ul,ol{margin-top:0;margin-bottom:10px}ul ul,ol ul,ul ol,ol ol{margin-bottom:0}.list-unstyled{padding-left:0;list-style:none}.list-inline{padding-left:0;list-style:none}.list-inline>li{display:inline-block;padding-right:5px;padding-left:5px}dl{margin-bottom:20px}dt,dd{line-height:1.428571429}dt{font-weight:bold}dd{margin-left:0}@media(min-width:768px){.dl-horizontal dt{float:left;width:160px;overflow:hidden;clear:left;text-align:right;text-overflow:ellipsis;white-space:nowrap}.dl-horizontal dd{margin-left:180px}.dl-horizontal dd:before,.dl-horizontal dd:after{display:table;content:" "}.dl-horizontal dd:after{clear:both}.dl-horizontal dd:before,.dl-horizontal dd:after{display:table;content:" "}.dl-horizontal dd:after{clear:both}}abbr[title],abbr[data-original-title]{cursor:help;border-bottom:1px dotted #999}abbr.initialism{font-size:90%;text-transform:uppercase}blockquote{padding:10px 20px;margin:0 0 20px;border-left:5px solid #eee}blockquote p{font-size:17.5px;font-weight:300;line-height:1.25}blockquote p:last-child{margin-bottom:0}blockquote small{display:block;line-height:1.428571429;color:#999}blockquote small:before{content:'\2014 \00A0'}blockquote.pull-right{padding-right:15px;padding-left:0;border-right:5px solid #eee;border-left:0}blockquote.pull-right p,blockquote.pull-right small{text-align:right}blockquote.pull-right small:before{content:''}blockquote.pull-right small:after{content:'\00A0 \2014'}q:before,q:after,blockquote:before,blockquote:after{content:""}address{display:block;margin-bottom:20px;font-style:normal;line-height:1.428571429}code,pre{font-family:Monaco,Menlo,Consolas,"Courier New",monospace}code{padding:2px 4px;font-size:90%;color:#c7254e;white-space:nowrap;background-color:#f9f2f4;border-radius:4px}pre{display:block;padding:9.5px;margin:0 0 10px;font-size:13px;line-height:1.428571429;color:#333;word-break:break-all;word-wrap:break-word;background-color:#f5f5f5;border:1px solid #ccc;border-radius:4px}pre.prettyprint{margin-bottom:20px}pre code{padding:0;font-size:inherit;color:inherit;white-space:pre-wrap;background-color:transparent;border:0}.pre-scrollable{max-height:340px;overflow-y:scroll}.container{padding-right:15px;padding-left:15px;margin-right:auto;margin-left:auto}.container:before,.container:after{display:table;content:" "}.container:after{clear:both}.container:before,.container:after{display:table;content:" "}.container:after{clear:both}.row{margin-right:-15px;margin-left:-15px}.row:before,.row:after{display:table;content:" "}.row:after{clear:both}.row:before,.row:after{display:table;content:" "}.row:after{clear:both}.col-xs-1,.col-xs-2,.col-xs-3,.col-xs-4,.col-xs-5,.col-xs-6,.col-xs-7,.col-xs-8,.col-xs-9,.col-xs-10,.col-xs-11,.col-xs-12,.col-sm-1,.col-sm-2,.col-sm-3,.col-sm-4,.col-sm-5,.col-sm-6,.col-sm-7,.col-sm-8,.col-sm-9,.col-sm-10,.col-sm-11,.col-sm-12,.col-md-1,.col-md-2,.col-md-3,.col-md-4,.col-md-5,.col-md-6,.col-md-7,.col-md-8,.col-md-9,.col-md-10,.col-md-11,.col-md-12,.col-lg-1,.col-lg-2,.col-lg-3,.col-lg-4,.col-lg-5,.col-lg-6,.col-lg-7,.col-lg-8,.col-lg-9,.col-lg-10,.col-lg-11,.col-lg-12{position:relative;min-height:1px;padding-right:15px;padding-left:15px}.col-xs-1,.col-xs-2,.col-xs-3,.col-xs-4,.col-xs-5,.col-xs-6,.col-xs-7,.col-xs-8,.col-xs-9,.col-xs-10,.col-xs-11{float:left}.col-xs-1{width:8.333333333333332%}.col-xs-2{width:16.666666666666664%}.col-xs-3{width:25%}.col-xs-4{width:33.33333333333333%}.col-xs-5{width:41.66666666666667%}.col-xs-6{width:50%}.col-xs-7{width:58.333333333333336%}.col-xs-8{width:66.66666666666666%}.col-xs-9{width:75%}.col-xs-10{width:83.33333333333334%}.col-xs-11{width:91.66666666666666%}.col-xs-12{width:100%}@media(min-width:768px){.container{max-width:750px}.col-sm-1,.col-sm-2,.col-sm-3,.col-sm-4,.col-sm-5,.col-sm-6,.col-sm-7,.col-sm-8,.col-sm-9,.col-sm-10,.col-sm-11{float:left}.col-sm-1{width:8.333333333333332%}.col-sm-2{width:16.666666666666664%}.col-sm-3{width:25%}.col-sm-4{width:33.33333333333333%}.col-sm-5{width:41.66666666666667%}.col-sm-6{width:50%}.col-sm-7{width:58.333333333333336%}.col-sm-8{width:66.66666666666666%}.col-sm-9{width:75%}.col-sm-10{width:83.33333333333334%}.col-sm-11{width:91.66666666666666%}.col-sm-12{width:100%}.col-sm-push-1{left:8.333333333333332%}.col-sm-push-2{left:16.666666666666664%}.col-sm-push-3{left:25%}.col-sm-push-4{left:33.33333333333333%}.col-sm-push-5{left:41.66666666666667%}.col-sm-push-6{left:50%}.col-sm-push-7{left:58.333333333333336%}.col-sm-push-8{left:66.66666666666666%}.col-sm-push-9{left:75%}.col-sm-push-10{left:83.33333333333334%}.col-sm-push-11{left:91.66666666666666%}.col-sm-pull-1{right:8.333333333333332%}.col-sm-pull-2{right:16.666666666666664%}.col-sm-pull-3{right:25%}.col-sm-pull-4{right:33.33333333333333%}.col-sm-pull-5{right:41.66666666666667%}.col-sm-pull-6{right:50%}.col-sm-pull-7{right:58.333333333333336%}.col-sm-pull-8{right:66.66666666666666%}.col-sm-pull-9{right:75%}.col-sm-pull-10{right:83.33333333333334%}.col-sm-pull-11{right:91.66666666666666%}.col-sm-offset-1{margin-left:8.333333333333332%}.col-sm-offset-2{margin-left:16.666666666666664%}.col-sm-offset-3{margin-left:25%}.col-sm-offset-4{margin-left:33.33333333333333%}.col-sm-offset-5{margin-left:41.66666666666667%}.col-sm-offset-6{margin-left:50%}.col-sm-offset-7{margin-left:58.333333333333336%}.col-sm-offset-8{margin-left:66.66666666666666%}.col-sm-offset-9{margin-left:75%}.col-sm-offset-10{margin-left:83.33333333333334%}.col-sm-offset-11{margin-left:91.66666666666666%}}@media(min-width:992px){.container{max-width:970px}.col-md-1,.col-md-2,.col-md-3,.col-md-4,.col-md-5,.col-md-6,.col-md-7,.col-md-8,.col-md-9,.col-md-10,.col-md-11{float:left}.col-md-1{width:8.333333333333332%}.col-md-2{width:16.666666666666664%}.col-md-3{width:25%}.col-md-4{width:33.33333333333333%}.col-md-5{width:41.66666666666667%}.col-md-6{width:50%}.col-md-7{width:58.333333333333336%}.col-md-8{width:66.66666666666666%}.col-md-9{width:75%}.col-md-10{width:83.33333333333334%}.col-md-11{width:91.66666666666666%}.col-md-12{width:100%}.col-md-push-0{left:auto}.col-md-push-1{left:8.333333333333332%}.col-md-push-2{left:16.666666666666664%}.col-md-push-3{left:25%}.col-md-push-4{left:33.33333333333333%}.col-md-push-5{left:41.66666666666667%}.col-md-push-6{left:50%}.col-md-push-7{left:58.333333333333336%}.col-md-push-8{left:66.66666666666666%}.col-md-push-9{left:75%}.col-md-push-10{left:83.33333333333334%}.col-md-push-11{left:91.66666666666666%}.col-md-pull-0{right:auto}.col-md-pull-1{right:8.333333333333332%}.col-md-pull-2{right:16.666666666666664%}.col-md-pull-3{right:25%}.col-md-pull-4{right:33.33333333333333%}.col-md-pull-5{right:41.66666666666667%}.col-md-pull-6{right:50%}.col-md-pull-7{right:58.333333333333336%}.col-md-pull-8{right:66.66666666666666%}.col-md-pull-9{right:75%}.col-md-pull-10{right:83.33333333333334%}.col-md-pull-11{right:91.66666666666666%}.col-md-offset-0{margin-left:0}.col-md-offset-1{margin-left:8.333333333333332%}.col-md-offset-2{margin-left:16.666666666666664%}.col-md-offset-3{margin-left:25%}.col-md-offset-4{margin-left:33.33333333333333%}.col-md-offset-5{margin-left:41.66666666666667%}.col-md-offset-6{margin-left:50%}.col-md-offset-7{margin-left:58.333333333333336%}.col-md-offset-8{margin-left:66.66666666666666%}.col-md-offset-9{margin-left:75%}.col-md-offset-10{margin-left:83.33333333333334%}.col-md-offset-11{margin-left:91.66666666666666%}}@media(min-width:1200px){.container{max-width:1170px}.col-lg-1,.col-lg-2,.col-lg-3,.col-lg-4,.col-lg-5,.col-lg-6,.col-lg-7,.col-lg-8,.col-lg-9,.col-lg-10,.col-lg-11{float:left}.col-lg-1{width:8.333333333333332%}.col-lg-2{width:16.666666666666664%}.col-lg-3{width:25%}.col-lg-4{width:33.33333333333333%}.col-lg-5{width:41.66666666666667%}.col-lg-6{width:50%}.col-lg-7{width:58.333333333333336%}.col-lg-8{width:66.66666666666666%}.col-lg-9{width:75%}.col-lg-10{width:83.33333333333334%}.col-lg-11{width:91.66666666666666%}.col-lg-12{width:100%}.col-lg-push-0{left:auto}.col-lg-push-1{left:8.333333333333332%}.col-lg-push-2{left:16.666666666666664%}.col-lg-push-3{left:25%}.col-lg-push-4{left:33.33333333333333%}.col-lg-push-5{left:41.66666666666667%}.col-lg-push-6{left:50%}.col-lg-push-7{left:58.333333333333336%}.col-lg-push-8{left:66.66666666666666%}.col-lg-push-9{left:75%}.col-lg-push-10{left:83.33333333333334%}.col-lg-push-11{left:91.66666666666666%}.col-lg-pull-0{right:auto}.col-lg-pull-1{right:8.333333333333332%}.col-lg-pull-2{right:16.666666666666664%}.col-lg-pull-3{right:25%}.col-lg-pull-4{right:33.33333333333333%}.col-lg-pull-5{right:41.66666666666667%}.col-lg-pull-6{right:50%}.col-lg-pull-7{right:58.333333333333336%}.col-lg-pull-8{right:66.66666666666666%}.col-lg-pull-9{right:75%}.col-lg-pull-10{right:83.33333333333334%}.col-lg-pull-11{right:91.66666666666666%}.col-lg-offset-0{margin-left:0}.col-lg-offset-1{margin-left:8.333333333333332%}.col-lg-offset-2{margin-left:16.666666666666664%}.col-lg-offset-3{margin-left:25%}.col-lg-offset-4{margin-left:33.33333333333333%}.col-lg-offset-5{margin-left:41.66666666666667%}.col-lg-offset-6{margin-left:50%}.col-lg-offset-7{margin-left:58.333333333333336%}.col-lg-offset-8{margin-left:66.66666666666666%}.col-lg-offset-9{margin-left:75%}.col-lg-offset-10{margin-left:83.33333333333334%}.col-lg-offset-11{margin-left:91.66666666666666%}}table{max-width:100%;background-color:transparent}th{text-align:left}.table{width:100%;margin-bottom:20px}.table thead>tr>th,.table tbody>tr>th,.table tfoot>tr>th,.table thead>tr>td,.table tbody>tr>td,.table tfoot>tr>td{padding:8px;line-height:1.428571429;vertical-align:top;border-top:1px solid #ddd}.table thead>tr>th{vertical-align:bottom;border-bottom:2px solid #ddd}.table caption+thead tr:first-child th,.table colgroup+thead tr:first-child th,.table thead:first-child tr:first-child th,.table caption+thead tr:first-child td,.table colgroup+thead tr:first-child td,.table thead:first-child tr:first-child td{border-top:0}.table tbody+tbody{border-top:2px solid #ddd}.table .table{background-color:#fff}.table-condensed thead>tr>th,.table-condensed tbody>tr>th,.table-condensed tfoot>tr>th,.table-condensed thead>tr>td,.table-condensed tbody>tr>td,.table-condensed tfoot>tr>td{padding:5px}.table-bordered{border:1px solid #ddd}.table-bordered>thead>tr>th,.table-bordered>tbody>tr>th,.table-bordered>tfoot>tr>th,.table-bordered>thead>tr>td,.table-bordered>tbody>tr>td,.table-bordered>tfoot>tr>td{border:1px solid #ddd}.table-bordered>thead>tr>th,.table-bordered>thead>tr>td{border-bottom-width:2px}.table-striped>tbody>tr:nth-child(odd)>td,.table-striped>tbody>tr:nth-child(odd)>th{background-color:#f9f9f9}.table-hover>tbody>tr:hover>td,.table-hover>tbody>tr:hover>th{background-color:#f5f5f5}table col[class*="col-"]{display:table-column;float:none}table td[class*="col-"],table th[class*="col-"]{display:table-cell;float:none}.table>thead>tr>td.active,.table>tbody>tr>td.active,.table>tfoot>tr>td.active,.table>thead>tr>th.active,.table>tbody>tr>th.active,.table>tfoot>tr>th.active,.table>thead>tr.active>td,.table>tbody>tr.active>td,.table>tfoot>tr.active>td,.table>thead>tr.active>th,.table>tbody>tr.active>th,.table>tfoot>tr.active>th{background-color:#f5f5f5}.table>thead>tr>td.success,.table>tbody>tr>td.success,.table>tfoot>tr>td.success,.table>thead>tr>th.success,.table>tbody>tr>th.success,.table>tfoot>tr>th.success,.table>thead>tr.success>td,.table>tbody>tr.success>td,.table>tfoot>tr.success>td,.table>thead>tr.success>th,.table>tbody>tr.success>th,.table>tfoot>tr.success>th{background-color:#dff0d8;border-color:#d6e9c6}.table-hover>tbody>tr>td.success:hover,.table-hover>tbody>tr>th.success:hover,.table-hover>tbody>tr.success:hover>td{background-color:#d0e9c6;border-color:#c9e2b3}.table>thead>tr>td.danger,.table>tbody>tr>td.danger,.table>tfoot>tr>td.danger,.table>thead>tr>th.danger,.table>tbody>tr>th.danger,.table>tfoot>tr>th.danger,.table>thead>tr.danger>td,.table>tbody>tr.danger>td,.table>tfoot>tr.danger>td,.table>thead>tr.danger>th,.table>tbody>tr.danger>th,.table>tfoot>tr.danger>th{background-color:#f2dede;border-color:#eed3d7}.table-hover>tbody>tr>td.danger:hover,.table-hover>tbody>tr>th.danger:hover,.table-hover>tbody>tr.danger:hover>td{background-color:#ebcccc;border-color:#e6c1c7}.table>thead>tr>td.warning,.table>tbody>tr>td.warning,.table>tfoot>tr>td.warning,.table>thead>tr>th.warning,.table>tbody>tr>th.warning,.table>tfoot>tr>th.warning,.table>thead>tr.warning>td,.table>tbody>tr.warning>td,.table>tfoot>tr.warning>td,.table>thead>tr.warning>th,.table>tbody>tr.warning>th,.table>tfoot>tr.warning>th{background-color:#fcf8e3;border-color:#fbeed5}.table-hover>tbody>tr>td.warning:hover,.table-hover>tbody>tr>th.warning:hover,.table-hover>tbody>tr.warning:hover>td{background-color:#faf2cc;border-color:#f8e5be}@media(max-width:768px){.table-responsive{width:100%;margin-bottom:15px;overflow-x:scroll;overflow-y:hidden;border:1px solid #ddd}.table-responsive>.table{margin-bottom:0;background-color:#fff}.table-responsive>.table>thead>tr>th,.table-responsive>.table>tbody>tr>th,.table-responsive>.table>tfoot>tr>th,.table-responsive>.table>thead>tr>td,.table-responsive>.table>tbody>tr>td,.table-responsive>.table>tfoot>tr>td{white-space:nowrap}.table-responsive>.table-bordered{border:0}.table-responsive>.table-bordered>thead>tr>th:first-child,.table-responsive>.table-bordered>tbody>tr>th:first-child,.table-responsive>.table-bordered>tfoot>tr>th:first-child,.table-responsive>.table-bordered>thead>tr>td:first-child,.table-responsive>.table-bordered>tbody>tr>td:first-child,.table-responsive>.table-bordered>tfoot>tr>td:first-child{border-left:0}.table-responsive>.table-bordered>thead>tr>th:last-child,.table-responsive>.table-bordered>tbody>tr>th:last-child,.table-responsive>.table-bordered>tfoot>tr>th:last-child,.table-responsive>.table-bordered>thead>tr>td:last-child,.table-responsive>.table-bordered>tbody>tr>td:last-child,.table-responsive>.table-bordered>tfoot>tr>td:last-child{border-right:0}.table-responsive>.table-bordered>thead>tr:last-child>th,.table-responsive>.table-bordered>tbody>tr:last-child>th,.table-responsive>.table-bordered>tfoot>tr:last-child>th,.table-responsive>.table-bordered>thead>tr:last-child>td,.table-responsive>.table-bordered>tbody>tr:last-child>td,.table-responsive>.table-bordered>tfoot>tr:last-child>td{border-bottom:0}}fieldset{padding:0;margin:0;border:0}legend{display:block;width:100%;padding:0;margin-bottom:20px;font-size:21px;line-height:inherit;color:#333;border:0;border-bottom:1px solid #e5e5e5}label{display:inline-block;margin-bottom:5px;font-weight:bold}input[type="search"]{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}input[type="radio"],input[type="checkbox"]{margin:4px 0 0;margin-top:1px \9;line-height:normal}input[type="file"]{display:block}select[multiple],select[size]{height:auto}select optgroup{font-family:inherit;font-size:inherit;font-style:inherit}input[type="file"]:focus,input[type="radio"]:focus,input[type="checkbox"]:focus{outline:thin dotted #333;outline:5px auto -webkit-focus-ring-color;outline-offset:-2px}input[type="number"]::-webkit-outer-spin-button,input[type="number"]::-webkit-inner-spin-button{height:auto}.form-control:-moz-placeholder{color:#999}.form-control::-moz-placeholder{color:#999}.form-control:-ms-input-placeholder{color:#999}.form-control::-webkit-input-placeholder{color:#999}.form-control{display:block;width:100%;height:34px;padding:6px 12px;font-size:14px;line-height:1.428571429;color:#555;vertical-align:middle;background-color:#fff;border:1px solid #ccc;border-radius:4px;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075);box-shadow:inset 0 1px 1px rgba(0,0,0,0.075);-webkit-transition:border-color ease-in-out .15s,box-shadow ease-in-out .15s;transition:border-color ease-in-out .15s,box-shadow ease-in-out .15s}.form-control:focus{border-color:#66afe9;outline:0;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 8px rgba(102,175,233,0.6);box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 8px rgba(102,175,233,0.6)}.form-control[disabled],.form-control[readonly],fieldset[disabled] .form-control{cursor:not-allowed;background-color:#eee}textarea.form-control{height:auto}.form-group{margin-bottom:15px}.radio,.checkbox{display:block;min-height:20px;padding-left:20px;margin-top:10px;margin-bottom:10px;vertical-align:middle}.radio label,.checkbox label{display:inline;margin-bottom:0;font-weight:normal;cursor:pointer}.radio input[type="radio"],.radio-inline input[type="radio"],.checkbox input[type="checkbox"],.checkbox-inline input[type="checkbox"]{float:left;margin-left:-20px}.radio+.radio,.checkbox+.checkbox{margin-top:-5px}.radio-inline,.checkbox-inline{display:inline-block;padding-left:20px;margin-bottom:0;font-weight:normal;vertical-align:middle;cursor:pointer}.radio-inline+.radio-inline,.checkbox-inline+.checkbox-inline{margin-top:0;margin-left:10px}input[type="radio"][disabled],input[type="checkbox"][disabled],.radio[disabled],.radio-inline[disabled],.checkbox[disabled],.checkbox-inline[disabled],fieldset[disabled] input[type="radio"],fieldset[disabled] input[type="checkbox"],fieldset[disabled] .radio,fieldset[disabled] .radio-inline,fieldset[disabled] .checkbox,fieldset[disabled] .checkbox-inline{cursor:not-allowed}.input-sm{height:30px;padding:5px 10px;font-size:12px;line-height:1.5;border-radius:3px}select.input-sm{height:30px;line-height:30px}textarea.input-sm{height:auto}.input-lg{height:45px;padding:10px 16px;font-size:18px;line-height:1.33;border-radius:6px}select.input-lg{height:45px;line-height:45px}textarea.input-lg{height:auto}.has-warning .help-block,.has-warning .control-label{color:#c09853}.has-warning .form-control{border-color:#c09853;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075);box-shadow:inset 0 1px 1px rgba(0,0,0,0.075)}.has-warning .form-control:focus{border-color:#a47e3c;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 6px #dbc59e;box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 6px #dbc59e}.has-warning .input-group-addon{color:#c09853;background-color:#fcf8e3;border-color:#c09853}.has-error .help-block,.has-error .control-label{color:#b94a48}.has-error .form-control{border-color:#b94a48;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075);box-shadow:inset 0 1px 1px rgba(0,0,0,0.075)}.has-error .form-control:focus{border-color:#953b39;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 6px #d59392;box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 6px #d59392}.has-error .input-group-addon{color:#b94a48;background-color:#f2dede;border-color:#b94a48}.has-success .help-block,.has-success .control-label{color:#468847}.has-success .form-control{border-color:#468847;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075);box-shadow:inset 0 1px 1px rgba(0,0,0,0.075)}.has-success .form-control:focus{border-color:#356635;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 6px #7aba7b;box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 6px #7aba7b}.has-success .input-group-addon{color:#468847;background-color:#dff0d8;border-color:#468847}.form-control-static{padding-top:7px;margin-bottom:0}.help-block{display:block;margin-top:5px;margin-bottom:10px;color:#737373}@media(min-width:768px){.form-inline .form-group{display:inline-block;margin-bottom:0;vertical-align:middle}.form-inline .form-control{display:inline-block}.form-inline .radio,.form-inline .checkbox{display:inline-block;padding-left:0;margin-top:0;margin-bottom:0}.form-inline .radio input[type="radio"],.form-inline .checkbox input[type="checkbox"]{float:none;margin-left:0}}.form-horizontal .control-label,.form-horizontal .radio,.form-horizontal .checkbox,.form-horizontal .radio-inline,.form-horizontal .checkbox-inline{padding-top:7px;margin-top:0;margin-bottom:0}.form-horizontal .form-group{margin-right:-15px;margin-left:-15px}.form-horizontal .form-group:before,.form-horizontal .form-group:after{display:table;content:" "}.form-horizontal .form-group:after{clear:both}.form-horizontal .form-group:before,.form-horizontal .form-group:after{display:table;content:" "}.form-horizontal .form-group:after{clear:both}@media(min-width:768px){.form-horizontal .control-label{text-align:right}}.btn{display:inline-block;padding:6px 12px;margin-bottom:0;font-size:14px;font-weight:normal;line-height:1.428571429;text-align:center;white-space:nowrap;vertical-align:middle;cursor:pointer;border:1px solid transparent;border-radius:4px;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;-o-user-select:none;user-select:none}.btn:focus{outline:thin dotted #333;outline:5px auto -webkit-focus-ring-color;outline-offset:-2px}.btn:hover,.btn:focus{color:#333;text-decoration:none}.btn:active,.btn.active{background-image:none;outline:0;-webkit-box-shadow:inset 0 3px 5px rgba(0,0,0,0.125);box-shadow:inset 0 3px 5px rgba(0,0,0,0.125)}.btn.disabled,.btn[disabled],fieldset[disabled] .btn{pointer-events:none;cursor:not-allowed;opacity:.65;filter:alpha(opacity=65);-webkit-box-shadow:none;box-shadow:none}.btn-default{color:#333;background-color:#fff;border-color:#ccc}.btn-default:hover,.btn-default:focus,.btn-default:active,.btn-default.active,.open .dropdown-toggle.btn-default{color:#333;background-color:#ebebeb;border-color:#adadad}.btn-default:active,.btn-default.active,.open .dropdown-toggle.btn-default{background-image:none}.btn-default.disabled,.btn-default[disabled],fieldset[disabled] .btn-default,.btn-default.disabled:hover,.btn-default[disabled]:hover,fieldset[disabled] .btn-default:hover,.btn-default.disabled:focus,.btn-default[disabled]:focus,fieldset[disabled] .btn-default:focus,.btn-default.disabled:active,.btn-default[disabled]:active,fieldset[disabled] .btn-default:active,.btn-default.disabled.active,.btn-default[disabled].active,fieldset[disabled] .btn-default.active{background-color:#fff;border-color:#ccc}.btn-primary{color:#fff;background-color:#428bca;border-color:#357ebd}.btn-primary:hover,.btn-primary:focus,.btn-primary:active,.btn-primary.active,.open .dropdown-toggle.btn-primary{color:#fff;background-color:#3276b1;border-color:#285e8e}.btn-primary:active,.btn-primary.active,.open .dropdown-toggle.btn-primary{background-image:none}.btn-primary.disabled,.btn-primary[disabled],fieldset[disabled] .btn-primary,.btn-primary.disabled:hover,.btn-primary[disabled]:hover,fieldset[disabled] .btn-primary:hover,.btn-primary.disabled:focus,.btn-primary[disabled]:focus,fieldset[disabled] .btn-primary:focus,.btn-primary.disabled:active,.btn-primary[disabled]:active,fieldset[disabled] .btn-primary:active,.btn-primary.disabled.active,.btn-primary[disabled].active,fieldset[disabled] .btn-primary.active{background-color:#428bca;border-color:#357ebd}.btn-warning{color:#fff;background-color:#f0ad4e;border-color:#eea236}.btn-warning:hover,.btn-warning:focus,.btn-warning:active,.btn-warning.active,.open .dropdown-toggle.btn-warning{color:#fff;background-color:#ed9c28;border-color:#d58512}.btn-warning:active,.btn-warning.active,.open .dropdown-toggle.btn-warning{background-image:none}.btn-warning.disabled,.btn-warning[disabled],fieldset[disabled] .btn-warning,.btn-warning.disabled:hover,.btn-warning[disabled]:hover,fieldset[disabled] .btn-warning:hover,.btn-warning.disabled:focus,.btn-warning[disabled]:focus,fieldset[disabled] .btn-warning:focus,.btn-warning.disabled:active,.btn-warning[disabled]:active,fieldset[disabled] .btn-warning:active,.btn-warning.disabled.active,.btn-warning[disabled].active,fieldset[disabled] .btn-warning.active{background-color:#f0ad4e;border-color:#eea236}.btn-danger{color:#fff;background-color:#d9534f;border-color:#d43f3a}.btn-danger:hover,.btn-danger:focus,.btn-danger:active,.btn-danger.active,.open .dropdown-toggle.btn-danger{color:#fff;background-color:#d2322d;border-color:#ac2925}.btn-danger:active,.btn-danger.active,.open .dropdown-toggle.btn-danger{background-image:none}.btn-danger.disabled,.btn-danger[disabled],fieldset[disabled] .btn-danger,.btn-danger.disabled:hover,.btn-danger[disabled]:hover,fieldset[disabled] .btn-danger:hover,.btn-danger.disabled:focus,.btn-danger[disabled]:focus,fieldset[disabled] .btn-danger:focus,.btn-danger.disabled:active,.btn-danger[disabled]:active,fieldset[disabled] .btn-danger:active,.btn-danger.disabled.active,.btn-danger[disabled].active,fieldset[disabled] .btn-danger.active{background-color:#d9534f;border-color:#d43f3a}.btn-success{color:#fff;background-color:#5cb85c;border-color:#4cae4c}.btn-success:hover,.btn-success:focus,.btn-success:active,.btn-success.active,.open .dropdown-toggle.btn-success{color:#fff;background-color:#47a447;border-color:#398439}.btn-success:active,.btn-success.active,.open .dropdown-toggle.btn-success{background-image:none}.btn-success.disabled,.btn-success[disabled],fieldset[disabled] .btn-success,.btn-success.disabled:hover,.btn-success[disabled]:hover,fieldset[disabled] .btn-success:hover,.btn-success.disabled:focus,.btn-success[disabled]:focus,fieldset[disabled] .btn-success:focus,.btn-success.disabled:active,.btn-success[disabled]:active,fieldset[disabled] .btn-success:active,.btn-success.disabled.active,.btn-success[disabled].active,fieldset[disabled] .btn-success.active{background-color:#5cb85c;border-color:#4cae4c}.btn-info{color:#fff;background-color:#5bc0de;border-color:#46b8da}.btn-info:hover,.btn-info:focus,.btn-info:active,.btn-info.active,.open .dropdown-toggle.btn-info{color:#fff;background-color:#39b3d7;border-color:#269abc}.btn-info:active,.btn-info.active,.open .dropdown-toggle.btn-info{background-image:none}.btn-info.disabled,.btn-info[disabled],fieldset[disabled] .btn-info,.btn-info.disabled:hover,.btn-info[disabled]:hover,fieldset[disabled] .btn-info:hover,.btn-info.disabled:focus,.btn-info[disabled]:focus,fieldset[disabled] .btn-info:focus,.btn-info.disabled:active,.btn-info[disabled]:active,fieldset[disabled] .btn-info:active,.btn-info.disabled.active,.btn-info[disabled].active,fieldset[disabled] .btn-info.active{background-color:#5bc0de;border-color:#46b8da}.btn-link{font-weight:normal;color:#428bca;cursor:pointer;border-radius:0}.btn-link,.btn-link:active,.btn-link[disabled],fieldset[disabled] .btn-link{background-color:transparent;-webkit-box-shadow:none;box-shadow:none}.btn-link,.btn-link:hover,.btn-link:focus,.btn-link:active{border-color:transparent}.btn-link:hover,.btn-link:focus{color:#2a6496;text-decoration:underline;background-color:transparent}.btn-link[disabled]:hover,fieldset[disabled] .btn-link:hover,.btn-link[disabled]:focus,fieldset[disabled] .btn-link:focus{color:#999;text-decoration:none}.btn-lg{padding:10px 16px;font-size:18px;line-height:1.33;border-radius:6px}.btn-sm,.btn-xs{padding:5px 10px;font-size:12px;line-height:1.5;border-radius:3px}.btn-xs{padding:1px 5px}.btn-block{display:block;width:100%;padding-right:0;padding-left:0}.btn-block+.btn-block{margin-top:5px}input[type="submit"].btn-block,input[type="reset"].btn-block,input[type="button"].btn-block{width:100%}.fade{opacity:0;-webkit-transition:opacity .15s linear;transition:opacity .15s linear}.fade.in{opacity:1}.collapse{display:none}.collapse.in{display:block}.collapsing{position:relative;height:0;overflow:hidden;-webkit-transition:height .35s ease;transition:height .35s ease}@font-face{font-family:'Glyphicons Halflings';src:url('../fonts/glyphicons-halflings-regular.eot');src:url('../fonts/glyphicons-halflings-regular.eot?#iefix') format('embedded-opentype'),url('../fonts/glyphicons-halflings-regular.woff') format('woff'),url('../fonts/glyphicons-halflings-regular.ttf') format('truetype'),url('../fonts/glyphicons-halflings-regular.svg#glyphicons-halflingsregular') format('svg')}.glyphicon{position:relative;top:1px;display:inline-block;font-family:'Glyphicons Halflings';-webkit-font-smoothing:antialiased;font-style:normal;font-weight:normal;line-height:1}.glyphicon-asterisk:before{content:"\2a"}.glyphicon-plus:before{content:"\2b"}.glyphicon-euro:before{content:"\20ac"}.glyphicon-minus:before{content:"\2212"}.glyphicon-cloud:before{content:"\2601"}.glyphicon-envelope:before{content:"\2709"}.glyphicon-pencil:before{content:"\270f"}.glyphicon-glass:before{content:"\e001"}.glyphicon-music:before{content:"\e002"}.glyphicon-search:before{content:"\e003"}.glyphicon-heart:before{content:"\e005"}.glyphicon-star:before{content:"\e006"}.glyphicon-star-empty:before{content:"\e007"}.glyphicon-user:before{content:"\e008"}.glyphicon-film:before{content:"\e009"}.glyphicon-th-large:before{content:"\e010"}.glyphicon-th:before{content:"\e011"}.glyphicon-th-list:before{content:"\e012"}.glyphicon-ok:before{content:"\e013"}.glyphicon-remove:before{content:"\e014"}.glyphicon-zoom-in:before{content:"\e015"}.glyphicon-zoom-out:before{content:"\e016"}.glyphicon-off:before{content:"\e017"}.glyphicon-signal:before{content:"\e018"}.glyphicon-cog:before{content:"\e019"}.glyphicon-trash:before{content:"\e020"}.glyphicon-home:before{content:"\e021"}.glyphicon-file:before{content:"\e022"}.glyphicon-time:before{content:"\e023"}.glyphicon-road:before{content:"\e024"}.glyphicon-download-alt:before{content:"\e025"}.glyphicon-download:before{content:"\e026"}.glyphicon-upload:before{content:"\e027"}.glyphicon-inbox:before{content:"\e028"}.glyphicon-play-circle:before{content:"\e029"}.glyphicon-repeat:before{content:"\e030"}.glyphicon-refresh:before{content:"\e031"}.glyphicon-list-alt:before{content:"\e032"}.glyphicon-flag:before{content:"\e034"}.glyphicon-headphones:before{content:"\e035"}.glyphicon-volume-off:before{content:"\e036"}.glyphicon-volume-down:before{content:"\e037"}.glyphicon-volume-up:before{content:"\e038"}.glyphicon-qrcode:before{content:"\e039"}.glyphicon-barcode:before{content:"\e040"}.glyphicon-tag:before{content:"\e041"}.glyphicon-tags:before{content:"\e042"}.glyphicon-book:before{content:"\e043"}.glyphicon-print:before{content:"\e045"}.glyphicon-font:before{content:"\e047"}.glyphicon-bold:before{content:"\e048"}.glyphicon-italic:before{content:"\e049"}.glyphicon-text-height:before{content:"\e050"}.glyphicon-text-width:before{content:"\e051"}.glyphicon-align-left:before{content:"\e052"}.glyphicon-align-center:before{content:"\e053"}.glyphicon-align-right:before{content:"\e054"}.glyphicon-align-justify:before{content:"\e055"}.glyphicon-list:before{content:"\e056"}.glyphicon-indent-left:before{content:"\e057"}.glyphicon-indent-right:before{content:"\e058"}.glyphicon-facetime-video:before{content:"\e059"}.glyphicon-picture:before{content:"\e060"}.glyphicon-map-marker:before{content:"\e062"}.glyphicon-adjust:before{content:"\e063"}.glyphicon-tint:before{content:"\e064"}.glyphicon-edit:before{content:"\e065"}.glyphicon-share:before{content:"\e066"}.glyphicon-check:before{content:"\e067"}.glyphicon-move:before{content:"\e068"}.glyphicon-step-backward:before{content:"\e069"}.glyphicon-fast-backward:before{content:"\e070"}.glyphicon-backward:before{content:"\e071"}.glyphicon-play:before{content:"\e072"}.glyphicon-pause:before{content:"\e073"}.glyphicon-stop:before{content:"\e074"}.glyphicon-forward:before{content:"\e075"}.glyphicon-fast-forward:before{content:"\e076"}.glyphicon-step-forward:before{content:"\e077"}.glyphicon-eject:before{content:"\e078"}.glyphicon-chevron-left:before{content:"\e079"}.glyphicon-chevron-right:before{content:"\e080"}.glyphicon-plus-sign:before{content:"\e081"}.glyphicon-minus-sign:before{content:"\e082"}.glyphicon-remove-sign:before{content:"\e083"}.glyphicon-ok-sign:before{content:"\e084"}.glyphicon-question-sign:before{content:"\e085"}.glyphicon-info-sign:before{content:"\e086"}.glyphicon-screenshot:before{content:"\e087"}.glyphicon-remove-circle:before{content:"\e088"}.glyphicon-ok-circle:before{content:"\e089"}.glyphicon-ban-circle:before{content:"\e090"}.glyphicon-arrow-left:before{content:"\e091"}.glyphicon-arrow-right:before{content:"\e092"}.glyphicon-arrow-up:before{content:"\e093"}.glyphicon-arrow-down:before{content:"\e094"}.glyphicon-share-alt:before{content:"\e095"}.glyphicon-resize-full:before{content:"\e096"}.glyphicon-resize-small:before{content:"\e097"}.glyphicon-exclamation-sign:before{content:"\e101"}.glyphicon-gift:before{content:"\e102"}.glyphicon-leaf:before{content:"\e103"}.glyphicon-eye-open:before{content:"\e105"}.glyphicon-eye-close:before{content:"\e106"}.glyphicon-warning-sign:before{content:"\e107"}.glyphicon-plane:before{content:"\e108"}.glyphicon-random:before{content:"\e110"}.glyphicon-comment:before{content:"\e111"}.glyphicon-magnet:before{content:"\e112"}.glyphicon-chevron-up:before{content:"\e113"}.glyphicon-chevron-down:before{content:"\e114"}.glyphicon-retweet:before{content:"\e115"}.glyphicon-shopping-cart:before{content:"\e116"}.glyphicon-folder-close:before{content:"\e117"}.glyphicon-folder-open:before{content:"\e118"}.glyphicon-resize-vertical:before{content:"\e119"}.glyphicon-resize-horizontal:before{content:"\e120"}.glyphicon-hdd:before{content:"\e121"}.glyphicon-bullhorn:before{content:"\e122"}.glyphicon-certificate:before{content:"\e124"}.glyphicon-thumbs-up:before{content:"\e125"}.glyphicon-thumbs-down:before{content:"\e126"}.glyphicon-hand-right:before{content:"\e127"}.glyphicon-hand-left:before{content:"\e128"}.glyphicon-hand-up:before{content:"\e129"}.glyphicon-hand-down:before{content:"\e130"}.glyphicon-circle-arrow-right:before{content:"\e131"}.glyphicon-circle-arrow-left:before{content:"\e132"}.glyphicon-circle-arrow-up:before{content:"\e133"}.glyphicon-circle-arrow-down:before{content:"\e134"}.glyphicon-globe:before{content:"\e135"}.glyphicon-tasks:before{content:"\e137"}.glyphicon-filter:before{content:"\e138"}.glyphicon-fullscreen:before{content:"\e140"}.glyphicon-dashboard:before{content:"\e141"}.glyphicon-heart-empty:before{content:"\e143"}.glyphicon-link:before{content:"\e144"}.glyphicon-phone:before{content:"\e145"}.glyphicon-usd:before{content:"\e148"}.glyphicon-gbp:before{content:"\e149"}.glyphicon-sort:before{content:"\e150"}.glyphicon-sort-by-alphabet:before{content:"\e151"}.glyphicon-sort-by-alphabet-alt:before{content:"\e152"}.glyphicon-sort-by-order:before{content:"\e153"}.glyphicon-sort-by-order-alt:before{content:"\e154"}.glyphicon-sort-by-attributes:before{content:"\e155"}.glyphicon-sort-by-attributes-alt:before{content:"\e156"}.glyphicon-unchecked:before{content:"\e157"}.glyphicon-expand:before{content:"\e158"}.glyphicon-collapse-down:before{content:"\e159"}.glyphicon-collapse-up:before{content:"\e160"}.glyphicon-log-in:before{content:"\e161"}.glyphicon-flash:before{content:"\e162"}.glyphicon-log-out:before{content:"\e163"}.glyphicon-new-window:before{content:"\e164"}.glyphicon-record:before{content:"\e165"}.glyphicon-save:before{content:"\e166"}.glyphicon-open:before{content:"\e167"}.glyphicon-saved:before{content:"\e168"}.glyphicon-import:before{content:"\e169"}.glyphicon-export:before{content:"\e170"}.glyphicon-send:before{content:"\e171"}.glyphicon-floppy-disk:before{content:"\e172"}.glyphicon-floppy-saved:before{content:"\e173"}.glyphicon-floppy-remove:before{content:"\e174"}.glyphicon-floppy-save:before{content:"\e175"}.glyphicon-floppy-open:before{content:"\e176"}.glyphicon-credit-card:before{content:"\e177"}.glyphicon-transfer:before{content:"\e178"}.glyphicon-cutlery:before{content:"\e179"}.glyphicon-header:before{content:"\e180"}.glyphicon-compressed:before{content:"\e181"}.glyphicon-earphone:before{content:"\e182"}.glyphicon-phone-alt:before{content:"\e183"}.glyphicon-tower:before{content:"\e184"}.glyphicon-stats:before{content:"\e185"}.glyphicon-sd-video:before{content:"\e186"}.glyphicon-hd-video:before{content:"\e187"}.glyphicon-subtitles:before{content:"\e188"}.glyphicon-sound-stereo:before{content:"\e189"}.glyphicon-sound-dolby:before{content:"\e190"}.glyphicon-sound-5-1:before{content:"\e191"}.glyphicon-sound-6-1:before{content:"\e192"}.glyphicon-sound-7-1:before{content:"\e193"}.glyphicon-copyright-mark:before{content:"\e194"}.glyphicon-registration-mark:before{content:"\e195"}.glyphicon-cloud-download:before{content:"\e197"}.glyphicon-cloud-upload:before{content:"\e198"}.glyphicon-tree-conifer:before{content:"\e199"}.glyphicon-tree-deciduous:before{content:"\e200"}.glyphicon-briefcase:before{content:"\1f4bc"}.glyphicon-calendar:before{content:"\1f4c5"}.glyphicon-pushpin:before{content:"\1f4cc"}.glyphicon-paperclip:before{content:"\1f4ce"}.glyphicon-camera:before{content:"\1f4f7"}.glyphicon-lock:before{content:"\1f512"}.glyphicon-bell:before{content:"\1f514"}.glyphicon-bookmark:before{content:"\1f516"}.glyphicon-fire:before{content:"\1f525"}.glyphicon-wrench:before{content:"\1f527"}.caret{display:inline-block;width:0;height:0;margin-left:2px;vertical-align:middle;border-top:4px solid #000;border-right:4px solid transparent;border-bottom:0 dotted;border-left:4px solid transparent;content:""}.dropdown{position:relative}.dropdown-toggle:focus{outline:0}.dropdown-menu{position:absolute;top:100%;left:0;z-index:1000;display:none;float:left;min-width:160px;padding:5px 0;margin:2px 0 0;font-size:14px;list-style:none;background-color:#fff;border:1px solid #ccc;border:1px solid rgba(0,0,0,0.15);border-radius:4px;-webkit-box-shadow:0 6px 12px rgba(0,0,0,0.175);box-shadow:0 6px 12px rgba(0,0,0,0.175);background-clip:padding-box}.dropdown-menu.pull-right{right:0;left:auto}.dropdown-menu .divider{height:1px;margin:9px 0;overflow:hidden;background-color:#e5e5e5}.dropdown-menu>li>a{display:block;padding:3px 20px;clear:both;font-weight:normal;line-height:1.428571429;color:#333;white-space:nowrap}.dropdown-menu>li>a:hover,.dropdown-menu>li>a:focus{color:#fff;text-decoration:none;background-color:#428bca}.dropdown-menu>.active>a,.dropdown-menu>.active>a:hover,.dropdown-menu>.active>a:focus{color:#fff;text-decoration:none;background-color:#428bca;outline:0}.dropdown-menu>.disabled>a,.dropdown-menu>.disabled>a:hover,.dropdown-menu>.disabled>a:focus{color:#999}.dropdown-menu>.disabled>a:hover,.dropdown-menu>.disabled>a:focus{text-decoration:none;cursor:not-allowed;background-color:transparent;background-image:none;filter:progid:DXImageTransform.Microsoft.gradient(enabled=false)}.open>.dropdown-menu{display:block}.open>a{outline:0}.dropdown-header{display:block;padding:3px 20px;font-size:12px;line-height:1.428571429;color:#999}.dropdown-backdrop{position:fixed;top:0;right:0;bottom:0;left:0;z-index:990}.pull-right>.dropdown-menu{right:0;left:auto}.dropup .caret,.navbar-fixed-bottom .dropdown .caret{border-top:0 dotted;border-bottom:4px solid #000;content:""}.dropup .dropdown-menu,.navbar-fixed-bottom .dropdown .dropdown-menu{top:auto;bottom:100%;margin-bottom:1px}@media(min-width:768px){.navbar-right .dropdown-menu{right:0;left:auto}}.btn-default .caret{border-top-color:#333}.btn-primary .caret,.btn-success .caret,.btn-warning .caret,.btn-danger .caret,.btn-info .caret{border-top-color:#fff}.dropup .btn-default .caret{border-bottom-color:#333}.dropup .btn-primary .caret,.dropup .btn-success .caret,.dropup .btn-warning .caret,.dropup .btn-danger .caret,.dropup .btn-info .caret{border-bottom-color:#fff}.btn-group,.btn-group-vertical{position:relative;display:inline-block;vertical-align:middle}.btn-group>.btn,.btn-group-vertical>.btn{position:relative;float:left}.btn-group>.btn:hover,.btn-group-vertical>.btn:hover,.btn-group>.btn:focus,.btn-group-vertical>.btn:focus,.btn-group>.btn:active,.btn-group-vertical>.btn:active,.btn-group>.btn.active,.btn-group-vertical>.btn.active{z-index:2}.btn-group>.btn:focus,.btn-group-vertical>.btn:focus{outline:0}.btn-group .btn+.btn,.btn-group .btn+.btn-group,.btn-group .btn-group+.btn,.btn-group .btn-group+.btn-group{margin-left:-1px}.btn-toolbar:before,.btn-toolbar:after{display:table;content:" "}.btn-toolbar:after{clear:both}.btn-toolbar:before,.btn-toolbar:after{display:table;content:" "}.btn-toolbar:after{clear:both}.btn-toolbar .btn-group{float:left}.btn-toolbar>.btn+.btn,.btn-toolbar>.btn-group+.btn,.btn-toolbar>.btn+.btn-group,.btn-toolbar>.btn-group+.btn-group{margin-left:5px}.btn-group>.btn:not(:first-child):not(:last-child):not(.dropdown-toggle){border-radius:0}.btn-group>.btn:first-child{margin-left:0}.btn-group>.btn:first-child:not(:last-child):not(.dropdown-toggle){border-top-right-radius:0;border-bottom-right-radius:0}.btn-group>.btn:last-child:not(:first-child),.btn-group>.dropdown-toggle:not(:first-child){border-bottom-left-radius:0;border-top-left-radius:0}.btn-group>.btn-group{float:left}.btn-group>.btn-group:not(:first-child):not(:last-child)>.btn{border-radius:0}.btn-group>.btn-group:first-child>.btn:last-child,.btn-group>.btn-group:first-child>.dropdown-toggle{border-top-right-radius:0;border-bottom-right-radius:0}.btn-group>.btn-group:last-child>.btn:first-child{border-bottom-left-radius:0;border-top-left-radius:0}.btn-group .dropdown-toggle:active,.btn-group.open .dropdown-toggle{outline:0}.btn-group-xs>.btn{padding:5px 10px;padding:1px 5px;font-size:12px;line-height:1.5;border-radius:3px}.btn-group-sm>.btn{padding:5px 10px;font-size:12px;line-height:1.5;border-radius:3px}.btn-group-lg>.btn{padding:10px 16px;font-size:18px;line-height:1.33;border-radius:6px}.btn-group>.btn+.dropdown-toggle{padding-right:8px;padding-left:8px}.btn-group>.btn-lg+.dropdown-toggle{padding-right:12px;padding-left:12px}.btn-group.open .dropdown-toggle{-webkit-box-shadow:inset 0 3px 5px rgba(0,0,0,0.125);box-shadow:inset 0 3px 5px rgba(0,0,0,0.125)}.btn .caret{margin-left:0}.btn-lg .caret{border-width:5px 5px 0;border-bottom-width:0}.dropup .btn-lg .caret{border-width:0 5px 5px}.btn-group-vertical>.btn,.btn-group-vertical>.btn-group{display:block;float:none;width:100%;max-width:100%}.btn-group-vertical>.btn-group:before,.btn-group-vertical>.btn-group:after{display:table;content:" "}.btn-group-vertical>.btn-group:after{clear:both}.btn-group-vertical>.btn-group:before,.btn-group-vertical>.btn-group:after{display:table;content:" "}.btn-group-vertical>.btn-group:after{clear:both}.btn-group-vertical>.btn-group>.btn{float:none}.btn-group-vertical>.btn+.btn,.btn-group-vertical>.btn+.btn-group,.btn-group-vertical>.btn-group+.btn,.btn-group-vertical>.btn-group+.btn-group{margin-top:-1px;margin-left:0}.btn-group-vertical>.btn:not(:first-child):not(:last-child){border-radius:0}.btn-group-vertical>.btn:first-child:not(:last-child){border-top-right-radius:4px;border-bottom-right-radius:0;border-bottom-left-radius:0}.btn-group-vertical>.btn:last-child:not(:first-child){border-top-right-radius:0;border-bottom-left-radius:4px;border-top-left-radius:0}.btn-group-vertical>.btn-group:not(:first-child):not(:last-child)>.btn{border-radius:0}.btn-group-vertical>.btn-group:first-child>.btn:last-child,.btn-group-vertical>.btn-group:first-child>.dropdown-toggle{border-bottom-right-radius:0;border-bottom-left-radius:0}.btn-group-vertical>.btn-group:last-child>.btn:first-child{border-top-right-radius:0;border-top-left-radius:0}.btn-group-justified{display:table;width:100%;border-collapse:separate;table-layout:fixed}.btn-group-justified .btn{display:table-cell;float:none;width:1%}[data-toggle="buttons"]>.btn>input[type="radio"],[data-toggle="buttons"]>.btn>input[type="checkbox"]{display:none}.input-group{position:relative;display:table;border-collapse:separate}.input-group.col{float:none;padding-right:0;padding-left:0}.input-group .form-control{width:100%;margin-bottom:0}.input-group-lg>.form-control,.input-group-lg>.input-group-addon,.input-group-lg>.input-group-btn>.btn{height:45px;padding:10px 16px;font-size:18px;line-height:1.33;border-radius:6px}select.input-group-lg>.form-control,select.input-group-lg>.input-group-addon,select.input-group-lg>.input-group-btn>.btn{height:45px;line-height:45px}textarea.input-group-lg>.form-control,textarea.input-group-lg>.input-group-addon,textarea.input-group-lg>.input-group-btn>.btn{height:auto}.input-group-sm>.form-control,.input-group-sm>.input-group-addon,.input-group-sm>.input-group-btn>.btn{height:30px;padding:5px 10px;font-size:12px;line-height:1.5;border-radius:3px}select.input-group-sm>.form-control,select.input-group-sm>.input-group-addon,select.input-group-sm>.input-group-btn>.btn{height:30px;line-height:30px}textarea.input-group-sm>.form-control,textarea.input-group-sm>.input-group-addon,textarea.input-group-sm>.input-group-btn>.btn{height:auto}.input-group-addon,.input-group-btn,.input-group .form-control{display:table-cell}.input-group-addon:not(:first-child):not(:last-child),.input-group-btn:not(:first-child):not(:last-child),.input-group .form-control:not(:first-child):not(:last-child){border-radius:0}.input-group-addon,.input-group-btn{width:1%;white-space:nowrap;vertical-align:middle}.input-group-addon{padding:6px 12px;font-size:14px;font-weight:normal;line-height:1;text-align:center;background-color:#eee;border:1px solid #ccc;border-radius:4px}.input-group-addon.input-sm{padding:5px 10px;font-size:12px;border-radius:3px}.input-group-addon.input-lg{padding:10px 16px;font-size:18px;border-radius:6px}.input-group-addon input[type="radio"],.input-group-addon input[type="checkbox"]{margin-top:0}.input-group .form-control:first-child,.input-group-addon:first-child,.input-group-btn:first-child>.btn,.input-group-btn:first-child>.dropdown-toggle,.input-group-btn:last-child>.btn:not(:last-child):not(.dropdown-toggle){border-top-right-radius:0;border-bottom-right-radius:0}.input-group-addon:first-child{border-right:0}.input-group .form-control:last-child,.input-group-addon:last-child,.input-group-btn:last-child>.btn,.input-group-btn:last-child>.dropdown-toggle,.input-group-btn:first-child>.btn:not(:first-child){border-bottom-left-radius:0;border-top-left-radius:0}.input-group-addon:last-child{border-left:0}.input-group-btn{position:relative;white-space:nowrap}.input-group-btn>.btn{position:relative}.input-group-btn>.btn+.btn{margin-left:-4px}.input-group-btn>.btn:hover,.input-group-btn>.btn:active{z-index:2}.nav{padding-left:0;margin-bottom:0;list-style:none}.nav:before,.nav:after{display:table;content:" "}.nav:after{clear:both}.nav:before,.nav:after{display:table;content:" "}.nav:after{clear:both}.nav>li{position:relative;display:block}.nav>li>a{position:relative;display:block;padding:10px 15px}.nav>li>a:hover,.nav>li>a:focus{text-decoration:none;background-color:#eee}.nav>li.disabled>a{color:#999}.nav>li.disabled>a:hover,.nav>li.disabled>a:focus{color:#999;text-decoration:none;cursor:not-allowed;background-color:transparent}.nav .open>a,.nav .open>a:hover,.nav .open>a:focus{background-color:#eee;border-color:#428bca}.nav .nav-divider{height:1px;margin:9px 0;overflow:hidden;background-color:#e5e5e5}.nav>li>a>img{max-width:none}.nav-tabs{border-bottom:1px solid #ddd}.nav-tabs>li{float:left;margin-bottom:-1px}.nav-tabs>li>a{margin-right:2px;line-height:1.428571429;border:1px solid transparent;border-radius:4px 4px 0 0}.nav-tabs>li>a:hover{border-color:#eee #eee #ddd}.nav-tabs>li.active>a,.nav-tabs>li.active>a:hover,.nav-tabs>li.active>a:focus{color:#555;cursor:default;background-color:#fff;border:1px solid #ddd;border-bottom-color:transparent}.nav-tabs.nav-justified{width:100%;border-bottom:0}.nav-tabs.nav-justified>li{float:none}.nav-tabs.nav-justified>li>a{text-align:center}@media(min-width:768px){.nav-tabs.nav-justified>li{display:table-cell;width:1%}}.nav-tabs.nav-justified>li>a{margin-right:0;border-bottom:1px solid #ddd}.nav-tabs.nav-justified>.active>a{border-bottom-color:#fff}.nav-pills>li{float:left}.nav-pills>li>a{border-radius:5px}.nav-pills>li+li{margin-left:2px}.nav-pills>li.active>a,.nav-pills>li.active>a:hover,.nav-pills>li.active>a:focus{color:#fff;background-color:#428bca}.nav-stacked>li{float:none}.nav-stacked>li+li{margin-top:2px;margin-left:0}.nav-justified{width:100%}.nav-justified>li{float:none}.nav-justified>li>a{text-align:center}@media(min-width:768px){.nav-justified>li{display:table-cell;width:1%}}.nav-tabs-justified{border-bottom:0}.nav-tabs-justified>li>a{margin-right:0;border-bottom:1px solid #ddd}.nav-tabs-justified>.active>a{border-bottom-color:#fff}.tabbable:before,.tabbable:after{display:table;content:" "}.tabbable:after{clear:both}.tabbable:before,.tabbable:after{display:table;content:" "}.tabbable:after{clear:both}.tab-content>.tab-pane,.pill-content>.pill-pane{display:none}.tab-content>.active,.pill-content>.active{display:block}.nav .caret{border-top-color:#428bca;border-bottom-color:#428bca}.nav a:hover .caret{border-top-color:#2a6496;border-bottom-color:#2a6496}.nav-tabs .dropdown-menu{margin-top:-1px;border-top-right-radius:0;border-top-left-radius:0}.navbar{position:relative;z-index:1000;min-height:50px;margin-bottom:20px;border:1px solid transparent}.navbar:before,.navbar:after{display:table;content:" "}.navbar:after{clear:both}.navbar:before,.navbar:after{display:table;content:" "}.navbar:after{clear:both}@media(min-width:768px){.navbar{border-radius:4px}}.navbar-header:before,.navbar-header:after{display:table;content:" "}.navbar-header:after{clear:both}.navbar-header:before,.navbar-header:after{display:table;content:" "}.navbar-header:after{clear:both}@media(min-width:768px){.navbar-header{float:left}}.navbar-collapse{max-height:340px;padding-right:15px;padding-left:15px;overflow-x:visible;border-top:1px solid transparent;box-shadow:inset 0 1px 0 rgba(255,255,255,0.1);-webkit-overflow-scrolling:touch}.navbar-collapse:before,.navbar-collapse:after{display:table;content:" "}.navbar-collapse:after{clear:both}.navbar-collapse:before,.navbar-collapse:after{display:table;content:" "}.navbar-collapse:after{clear:both}.navbar-collapse.in{overflow-y:auto}@media(min-width:768px){.navbar-collapse{width:auto;border-top:0;box-shadow:none}.navbar-collapse.collapse{display:block!important;height:auto!important;padding-bottom:0;overflow:visible!important}.navbar-collapse.in{overflow-y:visible}.navbar-collapse .navbar-nav.navbar-left:first-child{margin-left:-15px}.navbar-collapse .navbar-nav.navbar-right:last-child{margin-right:-15px}.navbar-collapse .navbar-text:last-child{margin-right:0}}.container>.navbar-header,.container>.navbar-collapse{margin-right:-15px;margin-left:-15px}@media(min-width:768px){.container>.navbar-header,.container>.navbar-collapse{margin-right:0;margin-left:0}}.navbar-static-top{border-width:0 0 1px}@media(min-width:768px){.navbar-static-top{border-radius:0}}.navbar-fixed-top,.navbar-fixed-bottom{position:fixed;right:0;left:0;border-width:0 0 1px}@media(min-width:768px){.navbar-fixed-top,.navbar-fixed-bottom{border-radius:0}}.navbar-fixed-top{top:0;z-index:1030}.navbar-fixed-bottom{bottom:0;margin-bottom:0}.navbar-brand{float:left;padding:15px 15px;font-size:18px;line-height:20px}.navbar-brand:hover,.navbar-brand:focus{text-decoration:none}@media(min-width:768px){.navbar>.container .navbar-brand{margin-left:-15px}}.navbar-toggle{position:relative;float:right;padding:9px 10px;margin-top:8px;margin-right:15px;margin-bottom:8px;background-color:transparent;border:1px solid transparent;border-radius:4px}.navbar-toggle .icon-bar{display:block;width:22px;height:2px;border-radius:1px}.navbar-toggle .icon-bar+.icon-bar{margin-top:4px}@media(min-width:768px){.navbar-toggle{display:none}}.navbar-nav{margin:7.5px -15px}.navbar-nav>li>a{padding-top:10px;padding-bottom:10px;line-height:20px}@media(max-width:767px){.navbar-nav .open .dropdown-menu{position:static;float:none;width:auto;margin-top:0;background-color:transparent;border:0;box-shadow:none}.navbar-nav .open .dropdown-menu>li>a,.navbar-nav .open .dropdown-menu .dropdown-header{padding:5px 15px 5px 25px}.navbar-nav .open .dropdown-menu>li>a{line-height:20px}.navbar-nav .open .dropdown-menu>li>a:hover,.navbar-nav .open .dropdown-menu>li>a:focus{background-image:none}}@media(min-width:768px){.navbar-nav{float:left;margin:0}.navbar-nav>li{float:left}.navbar-nav>li>a{padding-top:15px;padding-bottom:15px}}@media(min-width:768px){.navbar-left{float:left!important}.navbar-right{float:right!important}}.navbar-form{padding:10px 15px;margin-top:8px;margin-right:-15px;margin-bottom:8px;margin-left:-15px;border-top:1px solid transparent;border-bottom:1px solid transparent;-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,0.1),0 1px 0 rgba(255,255,255,0.1);box-shadow:inset 0 1px 0 rgba(255,255,255,0.1),0 1px 0 rgba(255,255,255,0.1)}@media(min-width:768px){.navbar-form .form-group{display:inline-block;margin-bottom:0;vertical-align:middle}.navbar-form .form-control{display:inline-block}.navbar-form .radio,.navbar-form .checkbox{display:inline-block;padding-left:0;margin-top:0;margin-bottom:0}.navbar-form .radio input[type="radio"],.navbar-form .checkbox input[type="checkbox"]{float:none;margin-left:0}}@media(max-width:767px){.navbar-form .form-group{margin-bottom:5px}}@media(min-width:768px){.navbar-form{width:auto;padding-top:0;padding-bottom:0;margin-right:0;margin-left:0;border:0;-webkit-box-shadow:none;box-shadow:none}}.navbar-nav>li>.dropdown-menu{margin-top:0;border-top-right-radius:0;border-top-left-radius:0}.navbar-fixed-bottom .navbar-nav>li>.dropdown-menu{border-bottom-right-radius:0;border-bottom-left-radius:0}.navbar-nav.pull-right>li>.dropdown-menu,.navbar-nav>li>.dropdown-menu.pull-right{right:0;left:auto}.navbar-btn{margin-top:8px;margin-bottom:8px}.navbar-text{float:left;margin-top:15px;margin-bottom:15px}@media(min-width:768px){.navbar-text{margin-right:15px;margin-left:15px}}.navbar-default{background-color:#f8f8f8;border-color:#e7e7e7}.navbar-default .navbar-brand{color:#777}.navbar-default .navbar-brand:hover,.navbar-default .navbar-brand:focus{color:#5e5e5e;background-color:transparent}.navbar-default .navbar-text{color:#777}.navbar-default .navbar-nav>li>a{color:#777}.navbar-default .navbar-nav>li>a:hover,.navbar-default .navbar-nav>li>a:focus{color:#333;background-color:transparent}.navbar-default .navbar-nav>.active>a,.navbar-default .navbar-nav>.active>a:hover,.navbar-default .navbar-nav>.active>a:focus{color:#555;background-color:#e7e7e7}.navbar-default .navbar-nav>.disabled>a,.navbar-default .navbar-nav>.disabled>a:hover,.navbar-default .navbar-nav>.disabled>a:focus{color:#ccc;background-color:transparent}.navbar-default .navbar-toggle{border-color:#ddd}.navbar-default .navbar-toggle:hover,.navbar-default .navbar-toggle:focus{background-color:#ddd}.navbar-default .navbar-toggle .icon-bar{background-color:#ccc}.navbar-default .navbar-collapse,.navbar-default .navbar-form{border-color:#e6e6e6}.navbar-default .navbar-nav>.dropdown>a:hover .caret,.navbar-default .navbar-nav>.dropdown>a:focus .caret{border-top-color:#333;border-bottom-color:#333}.navbar-default .navbar-nav>.open>a,.navbar-default .navbar-nav>.open>a:hover,.navbar-default .navbar-nav>.open>a:focus{color:#555;background-color:#e7e7e7}.navbar-default .navbar-nav>.open>a .caret,.navbar-default .navbar-nav>.open>a:hover .caret,.navbar-default .navbar-nav>.open>a:focus .caret{border-top-color:#555;border-bottom-color:#555}.navbar-default .navbar-nav>.dropdown>a .caret{border-top-color:#777;border-bottom-color:#777}@media(max-width:767px){.navbar-default .navbar-nav .open .dropdown-menu>li>a{color:#777}.navbar-default .navbar-nav .open .dropdown-menu>li>a:hover,.navbar-default .navbar-nav .open .dropdown-menu>li>a:focus{color:#333;background-color:transparent}.navbar-default .navbar-nav .open .dropdown-menu>.active>a,.navbar-default .navbar-nav .open .dropdown-menu>.active>a:hover,.navbar-default .navbar-nav .open .dropdown-menu>.active>a:focus{color:#555;background-color:#e7e7e7}.navbar-default .navbar-nav .open .dropdown-menu>.disabled>a,.navbar-default .navbar-nav .open .dropdown-menu>.disabled>a:hover,.navbar-default .navbar-nav .open .dropdown-menu>.disabled>a:focus{color:#ccc;background-color:transparent}}.navbar-default .navbar-link{color:#777}.navbar-default .navbar-link:hover{color:#333}.navbar-inverse{background-color:#222;border-color:#080808}.navbar-inverse .navbar-brand{color:#999}.navbar-inverse .navbar-brand:hover,.navbar-inverse .navbar-brand:focus{color:#fff;background-color:transparent}.navbar-inverse .navbar-text{color:#999}.navbar-inverse .navbar-nav>li>a{color:#999}.navbar-inverse .navbar-nav>li>a:hover,.navbar-inverse .navbar-nav>li>a:focus{color:#fff;background-color:transparent}.navbar-inverse .navbar-nav>.active>a,.navbar-inverse .navbar-nav>.active>a:hover,.navbar-inverse .navbar-nav>.active>a:focus{color:#fff;background-color:#080808}.navbar-inverse .navbar-nav>.disabled>a,.navbar-inverse .navbar-nav>.disabled>a:hover,.navbar-inverse .navbar-nav>.disabled>a:focus{color:#444;background-color:transparent}.navbar-inverse .navbar-toggle{border-color:#333}.navbar-inverse .navbar-toggle:hover,.navbar-inverse .navbar-toggle:focus{background-color:#333}.navbar-inverse .navbar-toggle .icon-bar{background-color:#fff}.navbar-inverse .navbar-collapse,.navbar-inverse .navbar-form{border-color:#101010}.navbar-inverse .navbar-nav>.open>a,.navbar-inverse .navbar-nav>.open>a:hover,.navbar-inverse .navbar-nav>.open>a:focus{color:#fff;background-color:#080808}.navbar-inverse .navbar-nav>.dropdown>a:hover .caret{border-top-color:#fff;border-bottom-color:#fff}.navbar-inverse .navbar-nav>.dropdown>a .caret{border-top-color:#999;border-bottom-color:#999}.navbar-inverse .navbar-nav>.open>a .caret,.navbar-inverse .navbar-nav>.open>a:hover .caret,.navbar-inverse .navbar-nav>.open>a:focus .caret{border-top-color:#fff;border-bottom-color:#fff}@media(max-width:767px){.navbar-inverse .navbar-nav .open .dropdown-menu>.dropdown-header{border-color:#080808}.navbar-inverse .navbar-nav .open .dropdown-menu>li>a{color:#999}.navbar-inverse .navbar-nav .open .dropdown-menu>li>a:hover,.navbar-inverse .navbar-nav .open .dropdown-menu>li>a:focus{color:#fff;background-color:transparent}.navbar-inverse .navbar-nav .open .dropdown-menu>.active>a,.navbar-inverse .navbar-nav .open .dropdown-menu>.active>a:hover,.navbar-inverse .navbar-nav .open .dropdown-menu>.active>a:focus{color:#fff;background-color:#080808}.navbar-inverse .navbar-nav .open .dropdown-menu>.disabled>a,.navbar-inverse .navbar-nav .open .dropdown-menu>.disabled>a:hover,.navbar-inverse .navbar-nav .open .dropdown-menu>.disabled>a:focus{color:#444;background-color:transparent}}.navbar-inverse .navbar-link{color:#999}.navbar-inverse .navbar-link:hover{color:#fff}.breadcrumb{padding:8px 15px;margin-bottom:20px;list-style:none;background-color:#f5f5f5;border-radius:4px}.breadcrumb>li{display:inline-block}.breadcrumb>li+li:before{padding:0 5px;color:#ccc;content:"/\00a0"}.breadcrumb>.active{color:#999}.pagination{display:inline-block;padding-left:0;margin:20px 0;border-radius:4px}.pagination>li{display:inline}.pagination>li>a,.pagination>li>span{position:relative;float:left;padding:6px 12px;margin-left:-1px;line-height:1.428571429;text-decoration:none;background-color:#fff;border:1px solid #ddd}.pagination>li:first-child>a,.pagination>li:first-child>span{margin-left:0;border-bottom-left-radius:4px;border-top-left-radius:4px}.pagination>li:last-child>a,.pagination>li:last-child>span{border-top-right-radius:4px;border-bottom-right-radius:4px}.pagination>li>a:hover,.pagination>li>span:hover,.pagination>li>a:focus,.pagination>li>span:focus{background-color:#eee}.pagination>.active>a,.pagination>.active>span,.pagination>.active>a:hover,.pagination>.active>span:hover,.pagination>.active>a:focus,.pagination>.active>span:focus{z-index:2;color:#fff;cursor:default;background-color:#428bca;border-color:#428bca}.pagination>.disabled>span,.pagination>.disabled>a,.pagination>.disabled>a:hover,.pagination>.disabled>a:focus{color:#999;cursor:not-allowed;background-color:#fff;border-color:#ddd}.pagination-lg>li>a,.pagination-lg>li>span{padding:10px 16px;font-size:18px}.pagination-lg>li:first-child>a,.pagination-lg>li:first-child>span{border-bottom-left-radius:6px;border-top-left-radius:6px}.pagination-lg>li:last-child>a,.pagination-lg>li:last-child>span{border-top-right-radius:6px;border-bottom-right-radius:6px}.pagination-sm>li>a,.pagination-sm>li>span{padding:5px 10px;font-size:12px}.pagination-sm>li:first-child>a,.pagination-sm>li:first-child>span{border-bottom-left-radius:3px;border-top-left-radius:3px}.pagination-sm>li:last-child>a,.pagination-sm>li:last-child>span{border-top-right-radius:3px;border-bottom-right-radius:3px}.pager{padding-left:0;margin:20px 0;text-align:center;list-style:none}.pager:before,.pager:after{display:table;content:" "}.pager:after{clear:both}.pager:before,.pager:after{display:table;content:" "}.pager:after{clear:both}.pager li{display:inline}.pager li>a,.pager li>span{display:inline-block;padding:5px 14px;background-color:#fff;border:1px solid #ddd;border-radius:15px}.pager li>a:hover,.pager li>a:focus{text-decoration:none;background-color:#eee}.pager .next>a,.pager .next>span{float:right}.pager .previous>a,.pager .previous>span{float:left}.pager .disabled>a,.pager .disabled>a:hover,.pager .disabled>a:focus,.pager .disabled>span{color:#999;cursor:not-allowed;background-color:#fff}.label{display:inline;padding:.2em .6em .3em;font-size:75%;font-weight:bold;line-height:1;color:#fff;text-align:center;white-space:nowrap;vertical-align:baseline;border-radius:.25em}.label[href]:hover,.label[href]:focus{color:#fff;text-decoration:none;cursor:pointer}.label:empty{display:none}.label-default{background-color:#999}.label-default[href]:hover,.label-default[href]:focus{background-color:#808080}.label-primary{background-color:#428bca}.label-primary[href]:hover,.label-primary[href]:focus{background-color:#3071a9}.label-success{background-color:#5cb85c}.label-success[href]:hover,.label-success[href]:focus{background-color:#449d44}.label-info{background-color:#5bc0de}.label-info[href]:hover,.label-info[href]:focus{background-color:#31b0d5}.label-warning{background-color:#f0ad4e}.label-warning[href]:hover,.label-warning[href]:focus{background-color:#ec971f}.label-danger{background-color:#d9534f}.label-danger[href]:hover,.label-danger[href]:focus{background-color:#c9302c}.badge{display:inline-block;min-width:10px;padding:3px 7px;font-size:12px;font-weight:bold;line-height:1;color:#fff;text-align:center;white-space:nowrap;vertical-align:baseline;background-color:#999;border-radius:10px}.badge:empty{display:none}a.badge:hover,a.badge:focus{color:#fff;text-decoration:none;cursor:pointer}.btn .badge{position:relative;top:-1px}a.list-group-item.active>.badge,.nav-pills>.active>a>.badge{color:#428bca;background-color:#fff}.nav-pills>li>a>.badge{margin-left:3px}.jumbotron{padding:30px;margin-bottom:30px;font-size:21px;font-weight:200;line-height:2.1428571435;color:inherit;background-color:#eee}.jumbotron h1{line-height:1;color:inherit}.jumbotron p{line-height:1.4}.container .jumbotron{border-radius:6px}@media screen and (min-width:768px){.jumbotron{padding-top:48px;padding-bottom:48px}.container .jumbotron{padding-right:60px;padding-left:60px}.jumbotron h1{font-size:63px}}.thumbnail{display:inline-block;display:block;height:auto;max-width:100%;padding:4px;line-height:1.428571429;background-color:#fff;border:1px solid #ddd;border-radius:4px;-webkit-transition:all .2s ease-in-out;transition:all .2s ease-in-out}.thumbnail>img{display:block;height:auto;max-width:100%}a.thumbnail:hover,a.thumbnail:focus{border-color:#428bca}.thumbnail>img{margin-right:auto;margin-left:auto}.thumbnail .caption{padding:9px;color:#333}.alert{padding:15px;margin-bottom:20px;border:1px solid transparent;border-radius:4px}.alert h4{margin-top:0;color:inherit}.alert .alert-link{font-weight:bold}.alert>p,.alert>ul{margin-bottom:0}.alert>p+p{margin-top:5px}.alert-dismissable{padding-right:35px}.alert-dismissable .close{position:relative;top:-2px;right:-21px;color:inherit}.alert-success{color:#468847;background-color:#dff0d8;border-color:#d6e9c6}.alert-success hr{border-top-color:#c9e2b3}.alert-success .alert-link{color:#356635}.alert-info{color:#3a87ad;background-color:#d9edf7;border-color:#bce8f1}.alert-info hr{border-top-color:#a6e1ec}.alert-info .alert-link{color:#2d6987}.alert-warning{color:#c09853;background-color:#fcf8e3;border-color:#fbeed5}.alert-warning hr{border-top-color:#f8e5be}.alert-warning .alert-link{color:#a47e3c}.alert-danger{color:#b94a48;background-color:#f2dede;border-color:#eed3d7}.alert-danger hr{border-top-color:#e6c1c7}.alert-danger .alert-link{color:#953b39}@-webkit-keyframes progress-bar-stripes{from{background-position:40px 0}to{background-position:0 0}}@-moz-keyframes progress-bar-stripes{from{background-position:40px 0}to{background-position:0 0}}@-o-keyframes progress-bar-stripes{from{background-position:0 0}to{background-position:40px 0}}@keyframes progress-bar-stripes{from{background-position:40px 0}to{background-position:0 0}}.progress{height:20px;margin-bottom:20px;overflow:hidden;background-color:#f5f5f5;border-radius:4px;-webkit-box-shadow:inset 0 1px 2px rgba(0,0,0,0.1);box-shadow:inset 0 1px 2px rgba(0,0,0,0.1)}.progress-bar{float:left;width:0;height:100%;font-size:12px;color:#fff;text-align:center;background-color:#428bca;-webkit-box-shadow:inset 0 -1px 0 rgba(0,0,0,0.15);box-shadow:inset 0 -1px 0 rgba(0,0,0,0.15);-webkit-transition:width .6s ease;transition:width .6s ease}.progress-striped .progress-bar{background-image:-webkit-gradient(linear,0 100%,100% 0,color-stop(0.25,rgba(255,255,255,0.15)),color-stop(0.25,transparent),color-stop(0.5,transparent),color-stop(0.5,rgba(255,255,255,0.15)),color-stop(0.75,rgba(255,255,255,0.15)),color-stop(0.75,transparent),to(transparent));background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:-moz-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-size:40px 40px}.progress.active .progress-bar{-webkit-animation:progress-bar-stripes 2s linear infinite;-moz-animation:progress-bar-stripes 2s linear infinite;-ms-animation:progress-bar-stripes 2s linear infinite;-o-animation:progress-bar-stripes 2s linear infinite;animation:progress-bar-stripes 2s linear infinite}.progress-bar-success{background-color:#5cb85c}.progress-striped .progress-bar-success{background-image:-webkit-gradient(linear,0 100%,100% 0,color-stop(0.25,rgba(255,255,255,0.15)),color-stop(0.25,transparent),color-stop(0.5,transparent),color-stop(0.5,rgba(255,255,255,0.15)),color-stop(0.75,rgba(255,255,255,0.15)),color-stop(0.75,transparent),to(transparent));background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:-moz-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent)}.progress-bar-info{background-color:#5bc0de}.progress-striped .progress-bar-info{background-image:-webkit-gradient(linear,0 100%,100% 0,color-stop(0.25,rgba(255,255,255,0.15)),color-stop(0.25,transparent),color-stop(0.5,transparent),color-stop(0.5,rgba(255,255,255,0.15)),color-stop(0.75,rgba(255,255,255,0.15)),color-stop(0.75,transparent),to(transparent));background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:-moz-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent)}.progress-bar-warning{background-color:#f0ad4e}.progress-striped .progress-bar-warning{background-image:-webkit-gradient(linear,0 100%,100% 0,color-stop(0.25,rgba(255,255,255,0.15)),color-stop(0.25,transparent),color-stop(0.5,transparent),color-stop(0.5,rgba(255,255,255,0.15)),color-stop(0.75,rgba(255,255,255,0.15)),color-stop(0.75,transparent),to(transparent));background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:-moz-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent)}.progress-bar-danger{background-color:#d9534f}.progress-striped .progress-bar-danger{background-image:-webkit-gradient(linear,0 100%,100% 0,color-stop(0.25,rgba(255,255,255,0.15)),color-stop(0.25,transparent),color-stop(0.5,transparent),color-stop(0.5,rgba(255,255,255,0.15)),color-stop(0.75,rgba(255,255,255,0.15)),color-stop(0.75,transparent),to(transparent));background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:-moz-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent)}.media,.media-body{overflow:hidden;zoom:1}.media,.media .media{margin-top:15px}.media:first-child{margin-top:0}.media-object{display:block}.media-heading{margin:0 0 5px}.media>.pull-left{margin-right:10px}.media>.pull-right{margin-left:10px}.media-list{padding-left:0;list-style:none}.list-group{padding-left:0;margin-bottom:20px}.list-group-item{position:relative;display:block;padding:10px 15px;margin-bottom:-1px;background-color:#fff;border:1px solid #ddd}.list-group-item:first-child{border-top-right-radius:4px;border-top-left-radius:4px}.list-group-item:last-child{margin-bottom:0;border-bottom-right-radius:4px;border-bottom-left-radius:4px}.list-group-item>.badge{float:right}.list-group-item>.badge+.badge{margin-right:5px}a.list-group-item{color:#555}a.list-group-item .list-group-item-heading{color:#333}a.list-group-item:hover,a.list-group-item:focus{text-decoration:none;background-color:#f5f5f5}.list-group-item.active,.list-group-item.active:hover,.list-group-item.active:focus{z-index:2;color:#fff;background-color:#428bca;border-color:#428bca}.list-group-item.active .list-group-item-heading,.list-group-item.active:hover .list-group-item-heading,.list-group-item.active:focus .list-group-item-heading{color:inherit}.list-group-item.active .list-group-item-text,.list-group-item.active:hover .list-group-item-text,.list-group-item.active:focus .list-group-item-text{color:#e1edf7}.list-group-item-heading{margin-top:0;margin-bottom:5px}.list-group-item-text{margin-bottom:0;line-height:1.3}.panel{margin-bottom:20px;background-color:#fff;border:1px solid transparent;border-radius:4px;-webkit-box-shadow:0 1px 1px rgba(0,0,0,0.05);box-shadow:0 1px 1px rgba(0,0,0,0.05)}.panel-body{padding:15px}.panel-body:before,.panel-body:after{display:table;content:" "}.panel-body:after{clear:both}.panel-body:before,.panel-body:after{display:table;content:" "}.panel-body:after{clear:both}.panel>.list-group{margin-bottom:0}.panel>.list-group .list-group-item{border-width:1px 0}.panel>.list-group .list-group-item:first-child{border-top-right-radius:0;border-top-left-radius:0}.panel>.list-group .list-group-item:last-child{border-bottom:0}.panel-heading+.list-group .list-group-item:first-child{border-top-width:0}.panel>.table{margin-bottom:0}.panel>.panel-body+.table{border-top:1px solid #ddd}.panel-heading{padding:10px 15px;border-bottom:1px solid transparent;border-top-right-radius:3px;border-top-left-radius:3px}.panel-title{margin-top:0;margin-bottom:0;font-size:16px}.panel-title>a{color:inherit}.panel-footer{padding:10px 15px;background-color:#f5f5f5;border-top:1px solid #ddd;border-bottom-right-radius:3px;border-bottom-left-radius:3px}.panel-group .panel{margin-bottom:0;overflow:hidden;border-radius:4px}.panel-group .panel+.panel{margin-top:5px}.panel-group .panel-heading{border-bottom:0}.panel-group .panel-heading+.panel-collapse .panel-body{border-top:1px solid #ddd}.panel-group .panel-footer{border-top:0}.panel-group .panel-footer+.panel-collapse .panel-body{border-bottom:1px solid #ddd}.panel-default{border-color:#ddd}.panel-default>.panel-heading{color:#333;background-color:#f5f5f5;border-color:#ddd}.panel-default>.panel-heading+.panel-collapse .panel-body{border-top-color:#ddd}.panel-default>.panel-footer+.panel-collapse .panel-body{border-bottom-color:#ddd}.panel-primary{border-color:#428bca}.panel-primary>.panel-heading{color:#fff;background-color:#428bca;border-color:#428bca}.panel-primary>.panel-heading+.panel-collapse .panel-body{border-top-color:#428bca}.panel-primary>.panel-footer+.panel-collapse .panel-body{border-bottom-color:#428bca}.panel-success{border-color:#d6e9c6}.panel-success>.panel-heading{color:#468847;background-color:#dff0d8;border-color:#d6e9c6}.panel-success>.panel-heading+.panel-collapse .panel-body{border-top-color:#d6e9c6}.panel-success>.panel-footer+.panel-collapse .panel-body{border-bottom-color:#d6e9c6}.panel-warning{border-color:#fbeed5}.panel-warning>.panel-heading{color:#c09853;background-color:#fcf8e3;border-color:#fbeed5}.panel-warning>.panel-heading+.panel-collapse .panel-body{border-top-color:#fbeed5}.panel-warning>.panel-footer+.panel-collapse .panel-body{border-bottom-color:#fbeed5}.panel-danger{border-color:#eed3d7}.panel-danger>.panel-heading{color:#b94a48;background-color:#f2dede;border-color:#eed3d7}.panel-danger>.panel-heading+.panel-collapse .panel-body{border-top-color:#eed3d7}.panel-danger>.panel-footer+.panel-collapse .panel-body{border-bottom-color:#eed3d7}.panel-info{border-color:#bce8f1}.panel-info>.panel-heading{color:#3a87ad;background-color:#d9edf7;border-color:#bce8f1}.panel-info>.panel-heading+.panel-collapse .panel-body{border-top-color:#bce8f1}.panel-info>.panel-footer+.panel-collapse .panel-body{border-bottom-color:#bce8f1}.well{min-height:20px;padding:19px;margin-bottom:20px;background-color:#f5f5f5;border:1px solid #e3e3e3;border-radius:4px;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.05);box-shadow:inset 0 1px 1px rgba(0,0,0,0.05)}.well blockquote{border-color:#ddd;border-color:rgba(0,0,0,0.15)}.well-lg{padding:24px;border-radius:6px}.well-sm{padding:9px;border-radius:3px}.close{float:right;font-size:21px;font-weight:bold;line-height:1;color:#000;text-shadow:0 1px 0 #fff;opacity:.2;filter:alpha(opacity=20)}.close:hover,.close:focus{color:#000;text-decoration:none;cursor:pointer;opacity:.5;filter:alpha(opacity=50)}button.close{padding:0;cursor:pointer;background:transparent;border:0;-webkit-appearance:none}.modal-open{overflow:hidden}body.modal-open,.modal-open .navbar-fixed-top,.modal-open .navbar-fixed-bottom{margin-right:15px}.modal{position:fixed;top:0;right:0;bottom:0;left:0;z-index:1040;display:none;overflow:auto;overflow-y:scroll}.modal.fade .modal-dialog{-webkit-transform:translate(0,-25%);-ms-transform:translate(0,-25%);transform:translate(0,-25%);-webkit-transition:-webkit-transform .3s ease-out;-moz-transition:-moz-transform .3s ease-out;-o-transition:-o-transform .3s ease-out;transition:transform .3s ease-out}.modal.in .modal-dialog{-webkit-transform:translate(0,0);-ms-transform:translate(0,0);transform:translate(0,0)}.modal-dialog{z-index:1050;width:auto;padding:10px;margin-right:auto;margin-left:auto}.modal-content{position:relative;background-color:#fff;border:1px solid #999;border:1px solid rgba(0,0,0,0.2);border-radius:6px;outline:0;-webkit-box-shadow:0 3px 9px rgba(0,0,0,0.5);box-shadow:0 3px 9px rgba(0,0,0,0.5);background-clip:padding-box}.modal-backdrop{position:fixed;top:0;right:0;bottom:0;left:0;z-index:1030;background-color:#000}.modal-backdrop.fade{opacity:0;filter:alpha(opacity=0)}.modal-backdrop.in{opacity:.5;filter:alpha(opacity=50)}.modal-header{min-height:16.428571429px;padding:15px;border-bottom:1px solid #e5e5e5}.modal-header .close{margin-top:-2px}.modal-title{margin:0;line-height:1.428571429}.modal-body{position:relative;padding:20px}.modal-footer{padding:19px 20px 20px;margin-top:15px;text-align:right;border-top:1px solid #e5e5e5}.modal-footer:before,.modal-footer:after{display:table;content:" "}.modal-footer:after{clear:both}.modal-footer:before,.modal-footer:after{display:table;content:" "}.modal-footer:after{clear:both}.modal-footer .btn+.btn{margin-bottom:0;margin-left:5px}.modal-footer .btn-group .btn+.btn{margin-left:-1px}.modal-footer .btn-block+.btn-block{margin-left:0}@media screen and (min-width:768px){.modal-dialog{right:auto;left:50%;width:600px;padding-top:30px;padding-bottom:30px}.modal-content{-webkit-box-shadow:0 5px 15px rgba(0,0,0,0.5);box-shadow:0 5px 15px rgba(0,0,0,0.5)}}.tooltip{position:absolute;z-index:1030;display:block;font-size:12px;line-height:1.4;opacity:0;filter:alpha(opacity=0);visibility:visible}.tooltip.in{opacity:.9;filter:alpha(opacity=90)}.tooltip.top{padding:5px 0;margin-top:-3px}.tooltip.right{padding:0 5px;margin-left:3px}.tooltip.bottom{padding:5px 0;margin-top:3px}.tooltip.left{padding:0 5px;margin-left:-3px}.tooltip-inner{max-width:200px;padding:3px 8px;color:#fff;text-align:center;text-decoration:none;background-color:#000;border-radius:4px}.tooltip-arrow{position:absolute;width:0;height:0;border-color:transparent;border-style:solid}.tooltip.top .tooltip-arrow{bottom:0;left:50%;margin-left:-5px;border-top-color:#000;border-width:5px 5px 0}.tooltip.top-left .tooltip-arrow{bottom:0;left:5px;border-top-color:#000;border-width:5px 5px 0}.tooltip.top-right .tooltip-arrow{right:5px;bottom:0;border-top-color:#000;border-width:5px 5px 0}.tooltip.right .tooltip-arrow{top:50%;left:0;margin-top:-5px;border-right-color:#000;border-width:5px 5px 5px 0}.tooltip.left .tooltip-arrow{top:50%;right:0;margin-top:-5px;border-left-color:#000;border-width:5px 0 5px 5px}.tooltip.bottom .tooltip-arrow{top:0;left:50%;margin-left:-5px;border-bottom-color:#000;border-width:0 5px 5px}.tooltip.bottom-left .tooltip-arrow{top:0;left:5px;border-bottom-color:#000;border-width:0 5px 5px}.tooltip.bottom-right .tooltip-arrow{top:0;right:5px;border-bottom-color:#000;border-width:0 5px 5px}.popover{position:absolute;top:0;left:0;z-index:1010;display:none;max-width:276px;padding:1px;text-align:left;white-space:normal;background-color:#fff;border:1px solid #ccc;border:1px solid rgba(0,0,0,0.2);border-radius:6px;-webkit-box-shadow:0 5px 10px rgba(0,0,0,0.2);box-shadow:0 5px 10px rgba(0,0,0,0.2);background-clip:padding-box}.popover.top{margin-top:-10px}.popover.right{margin-left:10px}.popover.bottom{margin-top:10px}.popover.left{margin-left:-10px}.popover-title{padding:8px 14px;margin:0;font-size:14px;font-weight:normal;line-height:18px;background-color:#f7f7f7;border-bottom:1px solid #ebebeb;border-radius:5px 5px 0 0}.popover-content{padding:9px 14px}.popover .arrow,.popover .arrow:after{position:absolute;display:block;width:0;height:0;border-color:transparent;border-style:solid}.popover .arrow{border-width:11px}.popover .arrow:after{border-width:10px;content:""}.popover.top .arrow{bottom:-11px;left:50%;margin-left:-11px;border-top-color:#999;border-top-color:rgba(0,0,0,0.25);border-bottom-width:0}.popover.top .arrow:after{bottom:1px;margin-left:-10px;border-top-color:#fff;border-bottom-width:0;content:" "}.popover.right .arrow{top:50%;left:-11px;margin-top:-11px;border-right-color:#999;border-right-color:rgba(0,0,0,0.25);border-left-width:0}.popover.right .arrow:after{bottom:-10px;left:1px;border-right-color:#fff;border-left-width:0;content:" "}.popover.bottom .arrow{top:-11px;left:50%;margin-left:-11px;border-bottom-color:#999;border-bottom-color:rgba(0,0,0,0.25);border-top-width:0}.popover.bottom .arrow:after{top:1px;margin-left:-10px;border-bottom-color:#fff;border-top-width:0;content:" "}.popover.left .arrow{top:50%;right:-11px;margin-top:-11px;border-left-color:#999;border-left-color:rgba(0,0,0,0.25);border-right-width:0}.popover.left .arrow:after{right:1px;bottom:-10px;border-left-color:#fff;border-right-width:0;content:" "}.carousel{position:relative}.carousel-inner{position:relative;width:100%;overflow:hidden}.carousel-inner>.item{position:relative;display:none;-webkit-transition:.6s ease-in-out left;transition:.6s ease-in-out left}.carousel-inner>.item>img,.carousel-inner>.item>a>img{display:block;height:auto;max-width:100%;line-height:1}.carousel-inner>.active,.carousel-inner>.next,.carousel-inner>.prev{display:block}.carousel-inner>.active{left:0}.carousel-inner>.next,.carousel-inner>.prev{position:absolute;top:0;width:100%}.carousel-inner>.next{left:100%}.carousel-inner>.prev{left:-100%}.carousel-inner>.next.left,.carousel-inner>.prev.right{left:0}.carousel-inner>.active.left{left:-100%}.carousel-inner>.active.right{left:100%}.carousel-control{position:absolute;top:0;bottom:0;left:0;width:15%;font-size:20px;color:#fff;text-align:center;text-shadow:0 1px 2px rgba(0,0,0,0.6);opacity:.5;filter:alpha(opacity=50)}.carousel-control.left{background-image:-webkit-gradient(linear,0 top,100% top,from(rgba(0,0,0,0.5)),to(rgba(0,0,0,0.0001)));background-image:-webkit-linear-gradient(left,color-stop(rgba(0,0,0,0.5) 0),color-stop(rgba(0,0,0,0.0001) 100%));background-image:-moz-linear-gradient(left,rgba(0,0,0,0.5) 0,rgba(0,0,0,0.0001) 100%);background-image:linear-gradient(to right,rgba(0,0,0,0.5) 0,rgba(0,0,0,0.0001) 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#80000000',endColorstr='#00000000',GradientType=1)}.carousel-control.right{right:0;left:auto;background-image:-webkit-gradient(linear,0 top,100% top,from(rgba(0,0,0,0.0001)),to(rgba(0,0,0,0.5)));background-image:-webkit-linear-gradient(left,color-stop(rgba(0,0,0,0.0001) 0),color-stop(rgba(0,0,0,0.5) 100%));background-image:-moz-linear-gradient(left,rgba(0,0,0,0.0001) 0,rgba(0,0,0,0.5) 100%);background-image:linear-gradient(to right,rgba(0,0,0,0.0001) 0,rgba(0,0,0,0.5) 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#00000000',endColorstr='#80000000',GradientType=1)}.carousel-control:hover,.carousel-control:focus{color:#fff;text-decoration:none;opacity:.9;filter:alpha(opacity=90)}.carousel-control .icon-prev,.carousel-control .icon-next,.carousel-control .glyphicon-chevron-left,.carousel-control .glyphicon-chevron-right{position:absolute;top:50%;left:50%;z-index:5;display:inline-block}.carousel-control .icon-prev,.carousel-control .icon-next{width:20px;height:20px;margin-top:-10px;margin-left:-10px;font-family:serif}.carousel-control .icon-prev:before{content:'\2039'}.carousel-control .icon-next:before{content:'\203a'}.carousel-indicators{position:absolute;bottom:10px;left:50%;z-index:15;width:60%;padding-left:0;margin-left:-30%;text-align:center;list-style:none}.carousel-indicators li{display:inline-block;width:10px;height:10px;margin:1px;text-indent:-999px;cursor:pointer;border:1px solid #fff;border-radius:10px}.carousel-indicators .active{width:12px;height:12px;margin:0;background-color:#fff}.carousel-caption{position:absolute;right:15%;bottom:20px;left:15%;z-index:10;padding-top:20px;padding-bottom:20px;color:#fff;text-align:center;text-shadow:0 1px 2px rgba(0,0,0,0.6)}.carousel-caption .btn{text-shadow:none}@media screen and (min-width:768px){.carousel-control .icon-prev,.carousel-control .icon-next{width:30px;height:30px;margin-top:-15px;margin-left:-15px;font-size:30px}.carousel-caption{right:20%;left:20%;padding-bottom:30px}.carousel-indicators{bottom:20px}}.clearfix:before,.clearfix:after{display:table;content:" "}.clearfix:after{clear:both}.pull-right{float:right!important}.pull-left{float:left!important}.hide{display:none!important}.show{display:block!important}.invisible{visibility:hidden}.text-hide{font:0/0 a;color:transparent;text-shadow:none;background-color:transparent;border:0}.affix{position:fixed}@-ms-viewport{width:device-width}@media screen and (max-width:400px){@-ms-viewport{width:320px}}.hidden{display:none!important;visibility:hidden!important}.visible-xs{display:none!important}tr.visible-xs{display:none!important}th.visible-xs,td.visible-xs{display:none!important}@media(max-width:767px){.visible-xs{display:block!important}tr.visible-xs{display:table-row!important}th.visible-xs,td.visible-xs{display:table-cell!important}}@media(min-width:768px) and (max-width:991px){.visible-xs.visible-sm{display:block!important}tr.visible-xs.visible-sm{display:table-row!important}th.visible-xs.visible-sm,td.visible-xs.visible-sm{display:table-cell!important}}@media(min-width:992px) and (max-width:1199px){.visible-xs.visible-md{display:block!important}tr.visible-xs.visible-md{display:table-row!important}th.visible-xs.visible-md,td.visible-xs.visible-md{display:table-cell!important}}@media(min-width:1200px){.visible-xs.visible-lg{display:block!important}tr.visible-xs.visible-lg{display:table-row!important}th.visible-xs.visible-lg,td.visible-xs.visible-lg{display:table-cell!important}}.visible-sm{display:none!important}tr.visible-sm{display:none!important}th.visible-sm,td.visible-sm{display:none!important}@media(max-width:767px){.visible-sm.visible-xs{display:block!important}tr.visible-sm.visible-xs{display:table-row!important}th.visible-sm.visible-xs,td.visible-sm.visible-xs{display:table-cell!important}}@media(min-width:768px) and (max-width:991px){.visible-sm{display:block!important}tr.visible-sm{display:table-row!important}th.visible-sm,td.visible-sm{display:table-cell!important}}@media(min-width:992px) and (max-width:1199px){.visible-sm.visible-md{display:block!important}tr.visible-sm.visible-md{display:table-row!important}th.visible-sm.visible-md,td.visible-sm.visible-md{display:table-cell!important}}@media(min-width:1200px){.visible-sm.visible-lg{display:block!important}tr.visible-sm.visible-lg{display:table-row!important}th.visible-sm.visible-lg,td.visible-sm.visible-lg{display:table-cell!important}}.visible-md{display:none!important}tr.visible-md{display:none!important}th.visible-md,td.visible-md{display:none!important}@media(max-width:767px){.visible-md.visible-xs{display:block!important}tr.visible-md.visible-xs{display:table-row!important}th.visible-md.visible-xs,td.visible-md.visible-xs{display:table-cell!important}}@media(min-width:768px) and (max-width:991px){.visible-md.visible-sm{display:block!important}tr.visible-md.visible-sm{display:table-row!important}th.visible-md.visible-sm,td.visible-md.visible-sm{display:table-cell!important}}@media(min-width:992px) and (max-width:1199px){.visible-md{display:block!important}tr.visible-md{display:table-row!important}th.visible-md,td.visible-md{display:table-cell!important}}@media(min-width:1200px){.visible-md.visible-lg{display:block!important}tr.visible-md.visible-lg{display:table-row!important}th.visible-md.visible-lg,td.visible-md.visible-lg{display:table-cell!important}}.visible-lg{display:none!important}tr.visible-lg{display:none!important}th.visible-lg,td.visible-lg{display:none!important}@media(max-width:767px){.visible-lg.visible-xs{display:block!important}tr.visible-lg.visible-xs{display:table-row!important}th.visible-lg.visible-xs,td.visible-lg.visible-xs{display:table-cell!important}}@media(min-width:768px) and (max-width:991px){.visible-lg.visible-sm{display:block!important}tr.visible-lg.visible-sm{display:table-row!important}th.visible-lg.visible-sm,td.visible-lg.visible-sm{display:table-cell!important}}@media(min-width:992px) and (max-width:1199px){.visible-lg.visible-md{display:block!important}tr.visible-lg.visible-md{display:table-row!important}th.visible-lg.visible-md,td.visible-lg.visible-md{display:table-cell!important}}@media(min-width:1200px){.visible-lg{display:block!important}tr.visible-lg{display:table-row!important}th.visible-lg,td.visible-lg{display:table-cell!important}}.hidden-xs{display:block!important}tr.hidden-xs{display:table-row!important}th.hidden-xs,td.hidden-xs{display:table-cell!important}@media(max-width:767px){.hidden-xs{display:none!important}tr.hidden-xs{display:none!important}th.hidden-xs,td.hidden-xs{display:none!important}}@media(min-width:768px) and (max-width:991px){.hidden-xs.hidden-sm{display:none!important}tr.hidden-xs.hidden-sm{display:none!important}th.hidden-xs.hidden-sm,td.hidden-xs.hidden-sm{display:none!important}}@media(min-width:992px) and (max-width:1199px){.hidden-xs.hidden-md{display:none!important}tr.hidden-xs.hidden-md{display:none!important}th.hidden-xs.hidden-md,td.hidden-xs.hidden-md{display:none!important}}@media(min-width:1200px){.hidden-xs.hidden-lg{display:none!important}tr.hidden-xs.hidden-lg{display:none!important}th.hidden-xs.hidden-lg,td.hidden-xs.hidden-lg{display:none!important}}.hidden-sm{display:block!important}tr.hidden-sm{display:table-row!important}th.hidden-sm,td.hidden-sm{display:table-cell!important}@media(max-width:767px){.hidden-sm.hidden-xs{display:none!important}tr.hidden-sm.hidden-xs{display:none!important}th.hidden-sm.hidden-xs,td.hidden-sm.hidden-xs{display:none!important}}@media(min-width:768px) and (max-width:991px){.hidden-sm{display:none!important}tr.hidden-sm{display:none!important}th.hidden-sm,td.hidden-sm{display:none!important}}@media(min-width:992px) and (max-width:1199px){.hidden-sm.hidden-md{display:none!important}tr.hidden-sm.hidden-md{display:none!important}th.hidden-sm.hidden-md,td.hidden-sm.hidden-md{display:none!important}}@media(min-width:1200px){.hidden-sm.hidden-lg{display:none!important}tr.hidden-sm.hidden-lg{display:none!important}th.hidden-sm.hidden-lg,td.hidden-sm.hidden-lg{display:none!important}}.hidden-md{display:block!important}tr.hidden-md{display:table-row!important}th.hidden-md,td.hidden-md{display:table-cell!important}@media(max-width:767px){.hidden-md.hidden-xs{display:none!important}tr.hidden-md.hidden-xs{display:none!important}th.hidden-md.hidden-xs,td.hidden-md.hidden-xs{display:none!important}}@media(min-width:768px) and (max-width:991px){.hidden-md.hidden-sm{display:none!important}tr.hidden-md.hidden-sm{display:none!important}th.hidden-md.hidden-sm,td.hidden-md.hidden-sm{display:none!important}}@media(min-width:992px) and (max-width:1199px){.hidden-md{display:none!important}tr.hidden-md{display:none!important}th.hidden-md,td.hidden-md{display:none!important}}@media(min-width:1200px){.hidden-md.hidden-lg{display:none!important}tr.hidden-md.hidden-lg{display:none!important}th.hidden-md.hidden-lg,td.hidden-md.hidden-lg{display:none!important}}.hidden-lg{display:block!important}tr.hidden-lg{display:table-row!important}th.hidden-lg,td.hidden-lg{display:table-cell!important}@media(max-width:767px){.hidden-lg.hidden-xs{display:none!important}tr.hidden-lg.hidden-xs{display:none!important}th.hidden-lg.hidden-xs,td.hidden-lg.hidden-xs{display:none!important}}@media(min-width:768px) and (max-width:991px){.hidden-lg.hidden-sm{display:none!important}tr.hidden-lg.hidden-sm{display:none!important}th.hidden-lg.hidden-sm,td.hidden-lg.hidden-sm{display:none!important}}@media(min-width:992px) and (max-width:1199px){.hidden-lg.hidden-md{display:none!important}tr.hidden-lg.hidden-md{display:none!important}th.hidden-lg.hidden-md,td.hidden-lg.hidden-md{display:none!important}}@media(min-width:1200px){.hidden-lg{display:none!important}tr.hidden-lg{display:none!important}th.hidden-lg,td.hidden-lg{display:none!important}}.visible-print{display:none!important}tr.visible-print{display:none!important}th.visible-print,td.visible-print{display:none!important}@media print{.visible-print{display:block!important}tr.visible-print{display:table-row!important}th.visible-print,td.visible-print{display:table-cell!important}.hidden-print{display:none!important}tr.hidden-print{display:none!important}th.hidden-print,td.hidden-print{display:none!important}} \ No newline at end of file + * Bootstrap v3.4.1 (https://getbootstrap.com/) + * Copyright 2011-2019 Twitter, Inc. + * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE) + *//*! normalize.css v3.0.3 | MIT License | github.com/necolas/normalize.css */html{font-family:sans-serif;-ms-text-size-adjust:100%;-webkit-text-size-adjust:100%}body{margin:0}article,aside,details,figcaption,figure,footer,header,hgroup,main,menu,nav,section,summary{display:block}audio,canvas,progress,video{display:inline-block;vertical-align:baseline}audio:not([controls]){display:none;height:0}[hidden],template{display:none}a{background-color:transparent}a:active,a:hover{outline:0}abbr[title]{border-bottom:none;text-decoration:underline;-webkit-text-decoration:underline dotted;-moz-text-decoration:underline dotted;text-decoration:underline dotted}b,strong{font-weight:700}dfn{font-style:italic}h1{font-size:2em;margin:.67em 0}mark{background:#ff0;color:#000}small{font-size:80%}sub,sup{font-size:75%;line-height:0;position:relative;vertical-align:baseline}sup{top:-.5em}sub{bottom:-.25em}img{border:0}svg:not(:root){overflow:hidden}figure{margin:1em 40px}hr{-webkit-box-sizing:content-box;-moz-box-sizing:content-box;box-sizing:content-box;height:0}pre{overflow:auto}code,kbd,pre,samp{font-family:monospace,monospace;font-size:1em}button,input,optgroup,select,textarea{color:inherit;font:inherit;margin:0}button{overflow:visible}button,select{text-transform:none}button,html input[type=button],input[type=reset],input[type=submit]{-webkit-appearance:button;cursor:pointer}button[disabled],html input[disabled]{cursor:default}button::-moz-focus-inner,input::-moz-focus-inner{border:0;padding:0}input{line-height:normal}input[type=checkbox],input[type=radio]{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box;padding:0}input[type=number]::-webkit-inner-spin-button,input[type=number]::-webkit-outer-spin-button{height:auto}input[type=search]{-webkit-appearance:textfield;-webkit-box-sizing:content-box;-moz-box-sizing:content-box;box-sizing:content-box}input[type=search]::-webkit-search-cancel-button,input[type=search]::-webkit-search-decoration{-webkit-appearance:none}fieldset{border:1px solid silver;margin:0 2px;padding:.35em .625em .75em}legend{border:0;padding:0}textarea{overflow:auto}optgroup{font-weight:700}table{border-collapse:collapse;border-spacing:0}td,th{padding:0}/*! Source: https://github.com/h5bp/html5-boilerplate/blob/master/src/css/main.css */@media print{*,:after,:before{color:#000!important;text-shadow:none!important;background:0 0!important;-webkit-box-shadow:none!important;box-shadow:none!important}a,a:visited{text-decoration:underline}a[href]:after{content:" (" attr(href) ")"}abbr[title]:after{content:" (" attr(title) ")"}a[href^="#"]:after,a[href^="javascript:"]:after{content:""}blockquote,pre{border:1px solid #999;page-break-inside:avoid}thead{display:table-header-group}img,tr{page-break-inside:avoid}img{max-width:100%!important}h2,h3,p{orphans:3;widows:3}h2,h3{page-break-after:avoid}.navbar{display:none}.btn>.caret,.dropup>.btn>.caret{border-top-color:#000!important}.label{border:1px solid #000}.table{border-collapse:collapse!important}.table td,.table th{background-color:#fff!important}.table-bordered td,.table-bordered th{border:1px solid #ddd!important}}@font-face{font-family:"Glyphicons Halflings";src:url(../fonts/glyphicons-halflings-regular.eot);src:url(../fonts/glyphicons-halflings-regular.eot?#iefix) format("embedded-opentype"),url(../fonts/glyphicons-halflings-regular.woff2) format("woff2"),url(../fonts/glyphicons-halflings-regular.woff) format("woff"),url(../fonts/glyphicons-halflings-regular.ttf) format("truetype"),url(../fonts/glyphicons-halflings-regular.svg#glyphicons_halflingsregular) format("svg")}.glyphicon{position:relative;top:1px;display:inline-block;font-family:"Glyphicons Halflings";font-style:normal;font-weight:400;line-height:1;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}.glyphicon-asterisk:before{content:"\002a"}.glyphicon-plus:before{content:"\002b"}.glyphicon-eur:before,.glyphicon-euro:before{content:"\20ac"}.glyphicon-minus:before{content:"\2212"}.glyphicon-cloud:before{content:"\2601"}.glyphicon-envelope:before{content:"\2709"}.glyphicon-pencil:before{content:"\270f"}.glyphicon-glass:before{content:"\e001"}.glyphicon-music:before{content:"\e002"}.glyphicon-search:before{content:"\e003"}.glyphicon-heart:before{content:"\e005"}.glyphicon-star:before{content:"\e006"}.glyphicon-star-empty:before{content:"\e007"}.glyphicon-user:before{content:"\e008"}.glyphicon-film:before{content:"\e009"}.glyphicon-th-large:before{content:"\e010"}.glyphicon-th:before{content:"\e011"}.glyphicon-th-list:before{content:"\e012"}.glyphicon-ok:before{content:"\e013"}.glyphicon-remove:before{content:"\e014"}.glyphicon-zoom-in:before{content:"\e015"}.glyphicon-zoom-out:before{content:"\e016"}.glyphicon-off:before{content:"\e017"}.glyphicon-signal:before{content:"\e018"}.glyphicon-cog:before{content:"\e019"}.glyphicon-trash:before{content:"\e020"}.glyphicon-home:before{content:"\e021"}.glyphicon-file:before{content:"\e022"}.glyphicon-time:before{content:"\e023"}.glyphicon-road:before{content:"\e024"}.glyphicon-download-alt:before{content:"\e025"}.glyphicon-download:before{content:"\e026"}.glyphicon-upload:before{content:"\e027"}.glyphicon-inbox:before{content:"\e028"}.glyphicon-play-circle:before{content:"\e029"}.glyphicon-repeat:before{content:"\e030"}.glyphicon-refresh:before{content:"\e031"}.glyphicon-list-alt:before{content:"\e032"}.glyphicon-lock:before{content:"\e033"}.glyphicon-flag:before{content:"\e034"}.glyphicon-headphones:before{content:"\e035"}.glyphicon-volume-off:before{content:"\e036"}.glyphicon-volume-down:before{content:"\e037"}.glyphicon-volume-up:before{content:"\e038"}.glyphicon-qrcode:before{content:"\e039"}.glyphicon-barcode:before{content:"\e040"}.glyphicon-tag:before{content:"\e041"}.glyphicon-tags:before{content:"\e042"}.glyphicon-book:before{content:"\e043"}.glyphicon-bookmark:before{content:"\e044"}.glyphicon-print:before{content:"\e045"}.glyphicon-camera:before{content:"\e046"}.glyphicon-font:before{content:"\e047"}.glyphicon-bold:before{content:"\e048"}.glyphicon-italic:before{content:"\e049"}.glyphicon-text-height:before{content:"\e050"}.glyphicon-text-width:before{content:"\e051"}.glyphicon-align-left:before{content:"\e052"}.glyphicon-align-center:before{content:"\e053"}.glyphicon-align-right:before{content:"\e054"}.glyphicon-align-justify:before{content:"\e055"}.glyphicon-list:before{content:"\e056"}.glyphicon-indent-left:before{content:"\e057"}.glyphicon-indent-right:before{content:"\e058"}.glyphicon-facetime-video:before{content:"\e059"}.glyphicon-picture:before{content:"\e060"}.glyphicon-map-marker:before{content:"\e062"}.glyphicon-adjust:before{content:"\e063"}.glyphicon-tint:before{content:"\e064"}.glyphicon-edit:before{content:"\e065"}.glyphicon-share:before{content:"\e066"}.glyphicon-check:before{content:"\e067"}.glyphicon-move:before{content:"\e068"}.glyphicon-step-backward:before{content:"\e069"}.glyphicon-fast-backward:before{content:"\e070"}.glyphicon-backward:before{content:"\e071"}.glyphicon-play:before{content:"\e072"}.glyphicon-pause:before{content:"\e073"}.glyphicon-stop:before{content:"\e074"}.glyphicon-forward:before{content:"\e075"}.glyphicon-fast-forward:before{content:"\e076"}.glyphicon-step-forward:before{content:"\e077"}.glyphicon-eject:before{content:"\e078"}.glyphicon-chevron-left:before{content:"\e079"}.glyphicon-chevron-right:before{content:"\e080"}.glyphicon-plus-sign:before{content:"\e081"}.glyphicon-minus-sign:before{content:"\e082"}.glyphicon-remove-sign:before{content:"\e083"}.glyphicon-ok-sign:before{content:"\e084"}.glyphicon-question-sign:before{content:"\e085"}.glyphicon-info-sign:before{content:"\e086"}.glyphicon-screenshot:before{content:"\e087"}.glyphicon-remove-circle:before{content:"\e088"}.glyphicon-ok-circle:before{content:"\e089"}.glyphicon-ban-circle:before{content:"\e090"}.glyphicon-arrow-left:before{content:"\e091"}.glyphicon-arrow-right:before{content:"\e092"}.glyphicon-arrow-up:before{content:"\e093"}.glyphicon-arrow-down:before{content:"\e094"}.glyphicon-share-alt:before{content:"\e095"}.glyphicon-resize-full:before{content:"\e096"}.glyphicon-resize-small:before{content:"\e097"}.glyphicon-exclamation-sign:before{content:"\e101"}.glyphicon-gift:before{content:"\e102"}.glyphicon-leaf:before{content:"\e103"}.glyphicon-fire:before{content:"\e104"}.glyphicon-eye-open:before{content:"\e105"}.glyphicon-eye-close:before{content:"\e106"}.glyphicon-warning-sign:before{content:"\e107"}.glyphicon-plane:before{content:"\e108"}.glyphicon-calendar:before{content:"\e109"}.glyphicon-random:before{content:"\e110"}.glyphicon-comment:before{content:"\e111"}.glyphicon-magnet:before{content:"\e112"}.glyphicon-chevron-up:before{content:"\e113"}.glyphicon-chevron-down:before{content:"\e114"}.glyphicon-retweet:before{content:"\e115"}.glyphicon-shopping-cart:before{content:"\e116"}.glyphicon-folder-close:before{content:"\e117"}.glyphicon-folder-open:before{content:"\e118"}.glyphicon-resize-vertical:before{content:"\e119"}.glyphicon-resize-horizontal:before{content:"\e120"}.glyphicon-hdd:before{content:"\e121"}.glyphicon-bullhorn:before{content:"\e122"}.glyphicon-bell:before{content:"\e123"}.glyphicon-certificate:before{content:"\e124"}.glyphicon-thumbs-up:before{content:"\e125"}.glyphicon-thumbs-down:before{content:"\e126"}.glyphicon-hand-right:before{content:"\e127"}.glyphicon-hand-left:before{content:"\e128"}.glyphicon-hand-up:before{content:"\e129"}.glyphicon-hand-down:before{content:"\e130"}.glyphicon-circle-arrow-right:before{content:"\e131"}.glyphicon-circle-arrow-left:before{content:"\e132"}.glyphicon-circle-arrow-up:before{content:"\e133"}.glyphicon-circle-arrow-down:before{content:"\e134"}.glyphicon-globe:before{content:"\e135"}.glyphicon-wrench:before{content:"\e136"}.glyphicon-tasks:before{content:"\e137"}.glyphicon-filter:before{content:"\e138"}.glyphicon-briefcase:before{content:"\e139"}.glyphicon-fullscreen:before{content:"\e140"}.glyphicon-dashboard:before{content:"\e141"}.glyphicon-paperclip:before{content:"\e142"}.glyphicon-heart-empty:before{content:"\e143"}.glyphicon-link:before{content:"\e144"}.glyphicon-phone:before{content:"\e145"}.glyphicon-pushpin:before{content:"\e146"}.glyphicon-usd:before{content:"\e148"}.glyphicon-gbp:before{content:"\e149"}.glyphicon-sort:before{content:"\e150"}.glyphicon-sort-by-alphabet:before{content:"\e151"}.glyphicon-sort-by-alphabet-alt:before{content:"\e152"}.glyphicon-sort-by-order:before{content:"\e153"}.glyphicon-sort-by-order-alt:before{content:"\e154"}.glyphicon-sort-by-attributes:before{content:"\e155"}.glyphicon-sort-by-attributes-alt:before{content:"\e156"}.glyphicon-unchecked:before{content:"\e157"}.glyphicon-expand:before{content:"\e158"}.glyphicon-collapse-down:before{content:"\e159"}.glyphicon-collapse-up:before{content:"\e160"}.glyphicon-log-in:before{content:"\e161"}.glyphicon-flash:before{content:"\e162"}.glyphicon-log-out:before{content:"\e163"}.glyphicon-new-window:before{content:"\e164"}.glyphicon-record:before{content:"\e165"}.glyphicon-save:before{content:"\e166"}.glyphicon-open:before{content:"\e167"}.glyphicon-saved:before{content:"\e168"}.glyphicon-import:before{content:"\e169"}.glyphicon-export:before{content:"\e170"}.glyphicon-send:before{content:"\e171"}.glyphicon-floppy-disk:before{content:"\e172"}.glyphicon-floppy-saved:before{content:"\e173"}.glyphicon-floppy-remove:before{content:"\e174"}.glyphicon-floppy-save:before{content:"\e175"}.glyphicon-floppy-open:before{content:"\e176"}.glyphicon-credit-card:before{content:"\e177"}.glyphicon-transfer:before{content:"\e178"}.glyphicon-cutlery:before{content:"\e179"}.glyphicon-header:before{content:"\e180"}.glyphicon-compressed:before{content:"\e181"}.glyphicon-earphone:before{content:"\e182"}.glyphicon-phone-alt:before{content:"\e183"}.glyphicon-tower:before{content:"\e184"}.glyphicon-stats:before{content:"\e185"}.glyphicon-sd-video:before{content:"\e186"}.glyphicon-hd-video:before{content:"\e187"}.glyphicon-subtitles:before{content:"\e188"}.glyphicon-sound-stereo:before{content:"\e189"}.glyphicon-sound-dolby:before{content:"\e190"}.glyphicon-sound-5-1:before{content:"\e191"}.glyphicon-sound-6-1:before{content:"\e192"}.glyphicon-sound-7-1:before{content:"\e193"}.glyphicon-copyright-mark:before{content:"\e194"}.glyphicon-registration-mark:before{content:"\e195"}.glyphicon-cloud-download:before{content:"\e197"}.glyphicon-cloud-upload:before{content:"\e198"}.glyphicon-tree-conifer:before{content:"\e199"}.glyphicon-tree-deciduous:before{content:"\e200"}.glyphicon-cd:before{content:"\e201"}.glyphicon-save-file:before{content:"\e202"}.glyphicon-open-file:before{content:"\e203"}.glyphicon-level-up:before{content:"\e204"}.glyphicon-copy:before{content:"\e205"}.glyphicon-paste:before{content:"\e206"}.glyphicon-alert:before{content:"\e209"}.glyphicon-equalizer:before{content:"\e210"}.glyphicon-king:before{content:"\e211"}.glyphicon-queen:before{content:"\e212"}.glyphicon-pawn:before{content:"\e213"}.glyphicon-bishop:before{content:"\e214"}.glyphicon-knight:before{content:"\e215"}.glyphicon-baby-formula:before{content:"\e216"}.glyphicon-tent:before{content:"\26fa"}.glyphicon-blackboard:before{content:"\e218"}.glyphicon-bed:before{content:"\e219"}.glyphicon-apple:before{content:"\f8ff"}.glyphicon-erase:before{content:"\e221"}.glyphicon-hourglass:before{content:"\231b"}.glyphicon-lamp:before{content:"\e223"}.glyphicon-duplicate:before{content:"\e224"}.glyphicon-piggy-bank:before{content:"\e225"}.glyphicon-scissors:before{content:"\e226"}.glyphicon-bitcoin:before{content:"\e227"}.glyphicon-btc:before{content:"\e227"}.glyphicon-xbt:before{content:"\e227"}.glyphicon-yen:before{content:"\00a5"}.glyphicon-jpy:before{content:"\00a5"}.glyphicon-ruble:before{content:"\20bd"}.glyphicon-rub:before{content:"\20bd"}.glyphicon-scale:before{content:"\e230"}.glyphicon-ice-lolly:before{content:"\e231"}.glyphicon-ice-lolly-tasted:before{content:"\e232"}.glyphicon-education:before{content:"\e233"}.glyphicon-option-horizontal:before{content:"\e234"}.glyphicon-option-vertical:before{content:"\e235"}.glyphicon-menu-hamburger:before{content:"\e236"}.glyphicon-modal-window:before{content:"\e237"}.glyphicon-oil:before{content:"\e238"}.glyphicon-grain:before{content:"\e239"}.glyphicon-sunglasses:before{content:"\e240"}.glyphicon-text-size:before{content:"\e241"}.glyphicon-text-color:before{content:"\e242"}.glyphicon-text-background:before{content:"\e243"}.glyphicon-object-align-top:before{content:"\e244"}.glyphicon-object-align-bottom:before{content:"\e245"}.glyphicon-object-align-horizontal:before{content:"\e246"}.glyphicon-object-align-left:before{content:"\e247"}.glyphicon-object-align-vertical:before{content:"\e248"}.glyphicon-object-align-right:before{content:"\e249"}.glyphicon-triangle-right:before{content:"\e250"}.glyphicon-triangle-left:before{content:"\e251"}.glyphicon-triangle-bottom:before{content:"\e252"}.glyphicon-triangle-top:before{content:"\e253"}.glyphicon-console:before{content:"\e254"}.glyphicon-superscript:before{content:"\e255"}.glyphicon-subscript:before{content:"\e256"}.glyphicon-menu-left:before{content:"\e257"}.glyphicon-menu-right:before{content:"\e258"}.glyphicon-menu-down:before{content:"\e259"}.glyphicon-menu-up:before{content:"\e260"}*{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}:after,:before{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}html{font-size:10px;-webkit-tap-highlight-color:rgba(0,0,0,0)}body{font-family:"Helvetica Neue",Helvetica,Arial,sans-serif;font-size:14px;line-height:1.42857143;color:#333;background-color:#fff}button,input,select,textarea{font-family:inherit;font-size:inherit;line-height:inherit}a{color:#337ab7;text-decoration:none}a:focus,a:hover{color:#23527c;text-decoration:underline}a:focus{outline:5px auto -webkit-focus-ring-color;outline-offset:-2px}figure{margin:0}img{vertical-align:middle}.carousel-inner>.item>a>img,.carousel-inner>.item>img,.img-responsive,.thumbnail a>img,.thumbnail>img{display:block;max-width:100%;height:auto}.img-rounded{border-radius:6px}.img-thumbnail{padding:4px;line-height:1.42857143;background-color:#fff;border:1px solid #ddd;border-radius:4px;-webkit-transition:all .2s ease-in-out;-o-transition:all .2s ease-in-out;transition:all .2s ease-in-out;display:inline-block;max-width:100%;height:auto}.img-circle{border-radius:50%}hr{margin-top:20px;margin-bottom:20px;border:0;border-top:1px solid #eee}.sr-only{position:absolute;width:1px;height:1px;padding:0;margin:-1px;overflow:hidden;clip:rect(0,0,0,0);border:0}.sr-only-focusable:active,.sr-only-focusable:focus{position:static;width:auto;height:auto;margin:0;overflow:visible;clip:auto}[role=button]{cursor:pointer}.h1,.h2,.h3,.h4,.h5,.h6,h1,h2,h3,h4,h5,h6{font-family:inherit;font-weight:500;line-height:1.1;color:inherit}.h1 .small,.h1 small,.h2 .small,.h2 small,.h3 .small,.h3 small,.h4 .small,.h4 small,.h5 .small,.h5 small,.h6 .small,.h6 small,h1 .small,h1 small,h2 .small,h2 small,h3 .small,h3 small,h4 .small,h4 small,h5 .small,h5 small,h6 .small,h6 small{font-weight:400;line-height:1;color:#777}.h1,.h2,.h3,h1,h2,h3{margin-top:20px;margin-bottom:10px}.h1 .small,.h1 small,.h2 .small,.h2 small,.h3 .small,.h3 small,h1 .small,h1 small,h2 .small,h2 small,h3 .small,h3 small{font-size:65%}.h4,.h5,.h6,h4,h5,h6{margin-top:10px;margin-bottom:10px}.h4 .small,.h4 small,.h5 .small,.h5 small,.h6 .small,.h6 small,h4 .small,h4 small,h5 .small,h5 small,h6 .small,h6 small{font-size:75%}.h1,h1{font-size:36px}.h2,h2{font-size:30px}.h3,h3{font-size:24px}.h4,h4{font-size:18px}.h5,h5{font-size:14px}.h6,h6{font-size:12px}p{margin:0 0 10px}.lead{margin-bottom:20px;font-size:16px;font-weight:300;line-height:1.4}@media (min-width:768px){.lead{font-size:21px}}.small,small{font-size:85%}.mark,mark{padding:.2em;background-color:#fcf8e3}.text-left{text-align:left}.text-right{text-align:right}.text-center{text-align:center}.text-justify{text-align:justify}.text-nowrap{white-space:nowrap}.text-lowercase{text-transform:lowercase}.text-uppercase{text-transform:uppercase}.text-capitalize{text-transform:capitalize}.text-muted{color:#777}.text-primary{color:#337ab7}a.text-primary:focus,a.text-primary:hover{color:#286090}.text-success{color:#3c763d}a.text-success:focus,a.text-success:hover{color:#2b542c}.text-info{color:#31708f}a.text-info:focus,a.text-info:hover{color:#245269}.text-warning{color:#8a6d3b}a.text-warning:focus,a.text-warning:hover{color:#66512c}.text-danger{color:#a94442}a.text-danger:focus,a.text-danger:hover{color:#843534}.bg-primary{color:#fff;background-color:#337ab7}a.bg-primary:focus,a.bg-primary:hover{background-color:#286090}.bg-success{background-color:#dff0d8}a.bg-success:focus,a.bg-success:hover{background-color:#c1e2b3}.bg-info{background-color:#d9edf7}a.bg-info:focus,a.bg-info:hover{background-color:#afd9ee}.bg-warning{background-color:#fcf8e3}a.bg-warning:focus,a.bg-warning:hover{background-color:#f7ecb5}.bg-danger{background-color:#f2dede}a.bg-danger:focus,a.bg-danger:hover{background-color:#e4b9b9}.page-header{padding-bottom:9px;margin:40px 0 20px;border-bottom:1px solid #eee}ol,ul{margin-top:0;margin-bottom:10px}ol ol,ol ul,ul ol,ul ul{margin-bottom:0}.list-unstyled{padding-left:0;list-style:none}.list-inline{padding-left:0;list-style:none;margin-left:-5px}.list-inline>li{display:inline-block;padding-right:5px;padding-left:5px}dl{margin-top:0;margin-bottom:20px}dd,dt{line-height:1.42857143}dt{font-weight:700}dd{margin-left:0}@media (min-width:768px){.dl-horizontal dt{float:left;width:160px;clear:left;text-align:right;overflow:hidden;text-overflow:ellipsis;white-space:nowrap}.dl-horizontal dd{margin-left:180px}}abbr[data-original-title],abbr[title]{cursor:help}.initialism{font-size:90%;text-transform:uppercase}blockquote{padding:10px 20px;margin:0 0 20px;font-size:17.5px;border-left:5px solid #eee}blockquote ol:last-child,blockquote p:last-child,blockquote ul:last-child{margin-bottom:0}blockquote .small,blockquote footer,blockquote small{display:block;font-size:80%;line-height:1.42857143;color:#777}blockquote .small:before,blockquote footer:before,blockquote small:before{content:"\2014 \00A0"}.blockquote-reverse,blockquote.pull-right{padding-right:15px;padding-left:0;text-align:right;border-right:5px solid #eee;border-left:0}.blockquote-reverse .small:before,.blockquote-reverse footer:before,.blockquote-reverse small:before,blockquote.pull-right .small:before,blockquote.pull-right footer:before,blockquote.pull-right small:before{content:""}.blockquote-reverse .small:after,.blockquote-reverse footer:after,.blockquote-reverse small:after,blockquote.pull-right .small:after,blockquote.pull-right footer:after,blockquote.pull-right small:after{content:"\00A0 \2014"}address{margin-bottom:20px;font-style:normal;line-height:1.42857143}code,kbd,pre,samp{font-family:Menlo,Monaco,Consolas,"Courier New",monospace}code{padding:2px 4px;font-size:90%;color:#c7254e;background-color:#f9f2f4;border-radius:4px}kbd{padding:2px 4px;font-size:90%;color:#fff;background-color:#333;border-radius:3px;-webkit-box-shadow:inset 0 -1px 0 rgba(0,0,0,.25);box-shadow:inset 0 -1px 0 rgba(0,0,0,.25)}kbd kbd{padding:0;font-size:100%;font-weight:700;-webkit-box-shadow:none;box-shadow:none}pre{display:block;padding:9.5px;margin:0 0 10px;font-size:13px;line-height:1.42857143;color:#333;word-break:break-all;word-wrap:break-word;background-color:#f5f5f5;border:1px solid #ccc;border-radius:4px}pre code{padding:0;font-size:inherit;color:inherit;white-space:pre-wrap;background-color:transparent;border-radius:0}.pre-scrollable{max-height:340px;overflow-y:scroll}.container{padding-right:15px;padding-left:15px;margin-right:auto;margin-left:auto}@media (min-width:768px){.container{width:750px}}@media (min-width:992px){.container{width:970px}}@media (min-width:1200px){.container{width:1170px}}.container-fluid{padding-right:15px;padding-left:15px;margin-right:auto;margin-left:auto}.row{margin-right:-15px;margin-left:-15px}.row-no-gutters{margin-right:0;margin-left:0}.row-no-gutters [class*=col-]{padding-right:0;padding-left:0}.col-lg-1,.col-lg-10,.col-lg-11,.col-lg-12,.col-lg-2,.col-lg-3,.col-lg-4,.col-lg-5,.col-lg-6,.col-lg-7,.col-lg-8,.col-lg-9,.col-md-1,.col-md-10,.col-md-11,.col-md-12,.col-md-2,.col-md-3,.col-md-4,.col-md-5,.col-md-6,.col-md-7,.col-md-8,.col-md-9,.col-sm-1,.col-sm-10,.col-sm-11,.col-sm-12,.col-sm-2,.col-sm-3,.col-sm-4,.col-sm-5,.col-sm-6,.col-sm-7,.col-sm-8,.col-sm-9,.col-xs-1,.col-xs-10,.col-xs-11,.col-xs-12,.col-xs-2,.col-xs-3,.col-xs-4,.col-xs-5,.col-xs-6,.col-xs-7,.col-xs-8,.col-xs-9{position:relative;min-height:1px;padding-right:15px;padding-left:15px}.col-xs-1,.col-xs-10,.col-xs-11,.col-xs-12,.col-xs-2,.col-xs-3,.col-xs-4,.col-xs-5,.col-xs-6,.col-xs-7,.col-xs-8,.col-xs-9{float:left}.col-xs-12{width:100%}.col-xs-11{width:91.66666667%}.col-xs-10{width:83.33333333%}.col-xs-9{width:75%}.col-xs-8{width:66.66666667%}.col-xs-7{width:58.33333333%}.col-xs-6{width:50%}.col-xs-5{width:41.66666667%}.col-xs-4{width:33.33333333%}.col-xs-3{width:25%}.col-xs-2{width:16.66666667%}.col-xs-1{width:8.33333333%}.col-xs-pull-12{right:100%}.col-xs-pull-11{right:91.66666667%}.col-xs-pull-10{right:83.33333333%}.col-xs-pull-9{right:75%}.col-xs-pull-8{right:66.66666667%}.col-xs-pull-7{right:58.33333333%}.col-xs-pull-6{right:50%}.col-xs-pull-5{right:41.66666667%}.col-xs-pull-4{right:33.33333333%}.col-xs-pull-3{right:25%}.col-xs-pull-2{right:16.66666667%}.col-xs-pull-1{right:8.33333333%}.col-xs-pull-0{right:auto}.col-xs-push-12{left:100%}.col-xs-push-11{left:91.66666667%}.col-xs-push-10{left:83.33333333%}.col-xs-push-9{left:75%}.col-xs-push-8{left:66.66666667%}.col-xs-push-7{left:58.33333333%}.col-xs-push-6{left:50%}.col-xs-push-5{left:41.66666667%}.col-xs-push-4{left:33.33333333%}.col-xs-push-3{left:25%}.col-xs-push-2{left:16.66666667%}.col-xs-push-1{left:8.33333333%}.col-xs-push-0{left:auto}.col-xs-offset-12{margin-left:100%}.col-xs-offset-11{margin-left:91.66666667%}.col-xs-offset-10{margin-left:83.33333333%}.col-xs-offset-9{margin-left:75%}.col-xs-offset-8{margin-left:66.66666667%}.col-xs-offset-7{margin-left:58.33333333%}.col-xs-offset-6{margin-left:50%}.col-xs-offset-5{margin-left:41.66666667%}.col-xs-offset-4{margin-left:33.33333333%}.col-xs-offset-3{margin-left:25%}.col-xs-offset-2{margin-left:16.66666667%}.col-xs-offset-1{margin-left:8.33333333%}.col-xs-offset-0{margin-left:0}@media (min-width:768px){.col-sm-1,.col-sm-10,.col-sm-11,.col-sm-12,.col-sm-2,.col-sm-3,.col-sm-4,.col-sm-5,.col-sm-6,.col-sm-7,.col-sm-8,.col-sm-9{float:left}.col-sm-12{width:100%}.col-sm-11{width:91.66666667%}.col-sm-10{width:83.33333333%}.col-sm-9{width:75%}.col-sm-8{width:66.66666667%}.col-sm-7{width:58.33333333%}.col-sm-6{width:50%}.col-sm-5{width:41.66666667%}.col-sm-4{width:33.33333333%}.col-sm-3{width:25%}.col-sm-2{width:16.66666667%}.col-sm-1{width:8.33333333%}.col-sm-pull-12{right:100%}.col-sm-pull-11{right:91.66666667%}.col-sm-pull-10{right:83.33333333%}.col-sm-pull-9{right:75%}.col-sm-pull-8{right:66.66666667%}.col-sm-pull-7{right:58.33333333%}.col-sm-pull-6{right:50%}.col-sm-pull-5{right:41.66666667%}.col-sm-pull-4{right:33.33333333%}.col-sm-pull-3{right:25%}.col-sm-pull-2{right:16.66666667%}.col-sm-pull-1{right:8.33333333%}.col-sm-pull-0{right:auto}.col-sm-push-12{left:100%}.col-sm-push-11{left:91.66666667%}.col-sm-push-10{left:83.33333333%}.col-sm-push-9{left:75%}.col-sm-push-8{left:66.66666667%}.col-sm-push-7{left:58.33333333%}.col-sm-push-6{left:50%}.col-sm-push-5{left:41.66666667%}.col-sm-push-4{left:33.33333333%}.col-sm-push-3{left:25%}.col-sm-push-2{left:16.66666667%}.col-sm-push-1{left:8.33333333%}.col-sm-push-0{left:auto}.col-sm-offset-12{margin-left:100%}.col-sm-offset-11{margin-left:91.66666667%}.col-sm-offset-10{margin-left:83.33333333%}.col-sm-offset-9{margin-left:75%}.col-sm-offset-8{margin-left:66.66666667%}.col-sm-offset-7{margin-left:58.33333333%}.col-sm-offset-6{margin-left:50%}.col-sm-offset-5{margin-left:41.66666667%}.col-sm-offset-4{margin-left:33.33333333%}.col-sm-offset-3{margin-left:25%}.col-sm-offset-2{margin-left:16.66666667%}.col-sm-offset-1{margin-left:8.33333333%}.col-sm-offset-0{margin-left:0}}@media (min-width:992px){.col-md-1,.col-md-10,.col-md-11,.col-md-12,.col-md-2,.col-md-3,.col-md-4,.col-md-5,.col-md-6,.col-md-7,.col-md-8,.col-md-9{float:left}.col-md-12{width:100%}.col-md-11{width:91.66666667%}.col-md-10{width:83.33333333%}.col-md-9{width:75%}.col-md-8{width:66.66666667%}.col-md-7{width:58.33333333%}.col-md-6{width:50%}.col-md-5{width:41.66666667%}.col-md-4{width:33.33333333%}.col-md-3{width:25%}.col-md-2{width:16.66666667%}.col-md-1{width:8.33333333%}.col-md-pull-12{right:100%}.col-md-pull-11{right:91.66666667%}.col-md-pull-10{right:83.33333333%}.col-md-pull-9{right:75%}.col-md-pull-8{right:66.66666667%}.col-md-pull-7{right:58.33333333%}.col-md-pull-6{right:50%}.col-md-pull-5{right:41.66666667%}.col-md-pull-4{right:33.33333333%}.col-md-pull-3{right:25%}.col-md-pull-2{right:16.66666667%}.col-md-pull-1{right:8.33333333%}.col-md-pull-0{right:auto}.col-md-push-12{left:100%}.col-md-push-11{left:91.66666667%}.col-md-push-10{left:83.33333333%}.col-md-push-9{left:75%}.col-md-push-8{left:66.66666667%}.col-md-push-7{left:58.33333333%}.col-md-push-6{left:50%}.col-md-push-5{left:41.66666667%}.col-md-push-4{left:33.33333333%}.col-md-push-3{left:25%}.col-md-push-2{left:16.66666667%}.col-md-push-1{left:8.33333333%}.col-md-push-0{left:auto}.col-md-offset-12{margin-left:100%}.col-md-offset-11{margin-left:91.66666667%}.col-md-offset-10{margin-left:83.33333333%}.col-md-offset-9{margin-left:75%}.col-md-offset-8{margin-left:66.66666667%}.col-md-offset-7{margin-left:58.33333333%}.col-md-offset-6{margin-left:50%}.col-md-offset-5{margin-left:41.66666667%}.col-md-offset-4{margin-left:33.33333333%}.col-md-offset-3{margin-left:25%}.col-md-offset-2{margin-left:16.66666667%}.col-md-offset-1{margin-left:8.33333333%}.col-md-offset-0{margin-left:0}}@media (min-width:1200px){.col-lg-1,.col-lg-10,.col-lg-11,.col-lg-12,.col-lg-2,.col-lg-3,.col-lg-4,.col-lg-5,.col-lg-6,.col-lg-7,.col-lg-8,.col-lg-9{float:left}.col-lg-12{width:100%}.col-lg-11{width:91.66666667%}.col-lg-10{width:83.33333333%}.col-lg-9{width:75%}.col-lg-8{width:66.66666667%}.col-lg-7{width:58.33333333%}.col-lg-6{width:50%}.col-lg-5{width:41.66666667%}.col-lg-4{width:33.33333333%}.col-lg-3{width:25%}.col-lg-2{width:16.66666667%}.col-lg-1{width:8.33333333%}.col-lg-pull-12{right:100%}.col-lg-pull-11{right:91.66666667%}.col-lg-pull-10{right:83.33333333%}.col-lg-pull-9{right:75%}.col-lg-pull-8{right:66.66666667%}.col-lg-pull-7{right:58.33333333%}.col-lg-pull-6{right:50%}.col-lg-pull-5{right:41.66666667%}.col-lg-pull-4{right:33.33333333%}.col-lg-pull-3{right:25%}.col-lg-pull-2{right:16.66666667%}.col-lg-pull-1{right:8.33333333%}.col-lg-pull-0{right:auto}.col-lg-push-12{left:100%}.col-lg-push-11{left:91.66666667%}.col-lg-push-10{left:83.33333333%}.col-lg-push-9{left:75%}.col-lg-push-8{left:66.66666667%}.col-lg-push-7{left:58.33333333%}.col-lg-push-6{left:50%}.col-lg-push-5{left:41.66666667%}.col-lg-push-4{left:33.33333333%}.col-lg-push-3{left:25%}.col-lg-push-2{left:16.66666667%}.col-lg-push-1{left:8.33333333%}.col-lg-push-0{left:auto}.col-lg-offset-12{margin-left:100%}.col-lg-offset-11{margin-left:91.66666667%}.col-lg-offset-10{margin-left:83.33333333%}.col-lg-offset-9{margin-left:75%}.col-lg-offset-8{margin-left:66.66666667%}.col-lg-offset-7{margin-left:58.33333333%}.col-lg-offset-6{margin-left:50%}.col-lg-offset-5{margin-left:41.66666667%}.col-lg-offset-4{margin-left:33.33333333%}.col-lg-offset-3{margin-left:25%}.col-lg-offset-2{margin-left:16.66666667%}.col-lg-offset-1{margin-left:8.33333333%}.col-lg-offset-0{margin-left:0}}table{background-color:transparent}table col[class*=col-]{position:static;display:table-column;float:none}table td[class*=col-],table th[class*=col-]{position:static;display:table-cell;float:none}caption{padding-top:8px;padding-bottom:8px;color:#777;text-align:left}th{text-align:left}.table{width:100%;max-width:100%;margin-bottom:20px}.table>tbody>tr>td,.table>tbody>tr>th,.table>tfoot>tr>td,.table>tfoot>tr>th,.table>thead>tr>td,.table>thead>tr>th{padding:8px;line-height:1.42857143;vertical-align:top;border-top:1px solid #ddd}.table>thead>tr>th{vertical-align:bottom;border-bottom:2px solid #ddd}.table>caption+thead>tr:first-child>td,.table>caption+thead>tr:first-child>th,.table>colgroup+thead>tr:first-child>td,.table>colgroup+thead>tr:first-child>th,.table>thead:first-child>tr:first-child>td,.table>thead:first-child>tr:first-child>th{border-top:0}.table>tbody+tbody{border-top:2px solid #ddd}.table .table{background-color:#fff}.table-condensed>tbody>tr>td,.table-condensed>tbody>tr>th,.table-condensed>tfoot>tr>td,.table-condensed>tfoot>tr>th,.table-condensed>thead>tr>td,.table-condensed>thead>tr>th{padding:5px}.table-bordered{border:1px solid #ddd}.table-bordered>tbody>tr>td,.table-bordered>tbody>tr>th,.table-bordered>tfoot>tr>td,.table-bordered>tfoot>tr>th,.table-bordered>thead>tr>td,.table-bordered>thead>tr>th{border:1px solid #ddd}.table-bordered>thead>tr>td,.table-bordered>thead>tr>th{border-bottom-width:2px}.table-striped>tbody>tr:nth-of-type(odd){background-color:#f9f9f9}.table-hover>tbody>tr:hover{background-color:#f5f5f5}.table>tbody>tr.active>td,.table>tbody>tr.active>th,.table>tbody>tr>td.active,.table>tbody>tr>th.active,.table>tfoot>tr.active>td,.table>tfoot>tr.active>th,.table>tfoot>tr>td.active,.table>tfoot>tr>th.active,.table>thead>tr.active>td,.table>thead>tr.active>th,.table>thead>tr>td.active,.table>thead>tr>th.active{background-color:#f5f5f5}.table-hover>tbody>tr.active:hover>td,.table-hover>tbody>tr.active:hover>th,.table-hover>tbody>tr:hover>.active,.table-hover>tbody>tr>td.active:hover,.table-hover>tbody>tr>th.active:hover{background-color:#e8e8e8}.table>tbody>tr.success>td,.table>tbody>tr.success>th,.table>tbody>tr>td.success,.table>tbody>tr>th.success,.table>tfoot>tr.success>td,.table>tfoot>tr.success>th,.table>tfoot>tr>td.success,.table>tfoot>tr>th.success,.table>thead>tr.success>td,.table>thead>tr.success>th,.table>thead>tr>td.success,.table>thead>tr>th.success{background-color:#dff0d8}.table-hover>tbody>tr.success:hover>td,.table-hover>tbody>tr.success:hover>th,.table-hover>tbody>tr:hover>.success,.table-hover>tbody>tr>td.success:hover,.table-hover>tbody>tr>th.success:hover{background-color:#d0e9c6}.table>tbody>tr.info>td,.table>tbody>tr.info>th,.table>tbody>tr>td.info,.table>tbody>tr>th.info,.table>tfoot>tr.info>td,.table>tfoot>tr.info>th,.table>tfoot>tr>td.info,.table>tfoot>tr>th.info,.table>thead>tr.info>td,.table>thead>tr.info>th,.table>thead>tr>td.info,.table>thead>tr>th.info{background-color:#d9edf7}.table-hover>tbody>tr.info:hover>td,.table-hover>tbody>tr.info:hover>th,.table-hover>tbody>tr:hover>.info,.table-hover>tbody>tr>td.info:hover,.table-hover>tbody>tr>th.info:hover{background-color:#c4e3f3}.table>tbody>tr.warning>td,.table>tbody>tr.warning>th,.table>tbody>tr>td.warning,.table>tbody>tr>th.warning,.table>tfoot>tr.warning>td,.table>tfoot>tr.warning>th,.table>tfoot>tr>td.warning,.table>tfoot>tr>th.warning,.table>thead>tr.warning>td,.table>thead>tr.warning>th,.table>thead>tr>td.warning,.table>thead>tr>th.warning{background-color:#fcf8e3}.table-hover>tbody>tr.warning:hover>td,.table-hover>tbody>tr.warning:hover>th,.table-hover>tbody>tr:hover>.warning,.table-hover>tbody>tr>td.warning:hover,.table-hover>tbody>tr>th.warning:hover{background-color:#faf2cc}.table>tbody>tr.danger>td,.table>tbody>tr.danger>th,.table>tbody>tr>td.danger,.table>tbody>tr>th.danger,.table>tfoot>tr.danger>td,.table>tfoot>tr.danger>th,.table>tfoot>tr>td.danger,.table>tfoot>tr>th.danger,.table>thead>tr.danger>td,.table>thead>tr.danger>th,.table>thead>tr>td.danger,.table>thead>tr>th.danger{background-color:#f2dede}.table-hover>tbody>tr.danger:hover>td,.table-hover>tbody>tr.danger:hover>th,.table-hover>tbody>tr:hover>.danger,.table-hover>tbody>tr>td.danger:hover,.table-hover>tbody>tr>th.danger:hover{background-color:#ebcccc}.table-responsive{min-height:.01%;overflow-x:auto}@media screen and (max-width:767px){.table-responsive{width:100%;margin-bottom:15px;overflow-y:hidden;-ms-overflow-style:-ms-autohiding-scrollbar;border:1px solid #ddd}.table-responsive>.table{margin-bottom:0}.table-responsive>.table>tbody>tr>td,.table-responsive>.table>tbody>tr>th,.table-responsive>.table>tfoot>tr>td,.table-responsive>.table>tfoot>tr>th,.table-responsive>.table>thead>tr>td,.table-responsive>.table>thead>tr>th{white-space:nowrap}.table-responsive>.table-bordered{border:0}.table-responsive>.table-bordered>tbody>tr>td:first-child,.table-responsive>.table-bordered>tbody>tr>th:first-child,.table-responsive>.table-bordered>tfoot>tr>td:first-child,.table-responsive>.table-bordered>tfoot>tr>th:first-child,.table-responsive>.table-bordered>thead>tr>td:first-child,.table-responsive>.table-bordered>thead>tr>th:first-child{border-left:0}.table-responsive>.table-bordered>tbody>tr>td:last-child,.table-responsive>.table-bordered>tbody>tr>th:last-child,.table-responsive>.table-bordered>tfoot>tr>td:last-child,.table-responsive>.table-bordered>tfoot>tr>th:last-child,.table-responsive>.table-bordered>thead>tr>td:last-child,.table-responsive>.table-bordered>thead>tr>th:last-child{border-right:0}.table-responsive>.table-bordered>tbody>tr:last-child>td,.table-responsive>.table-bordered>tbody>tr:last-child>th,.table-responsive>.table-bordered>tfoot>tr:last-child>td,.table-responsive>.table-bordered>tfoot>tr:last-child>th{border-bottom:0}}fieldset{min-width:0;padding:0;margin:0;border:0}legend{display:block;width:100%;padding:0;margin-bottom:20px;font-size:21px;line-height:inherit;color:#333;border:0;border-bottom:1px solid #e5e5e5}label{display:inline-block;max-width:100%;margin-bottom:5px;font-weight:700}input[type=search]{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box;-webkit-appearance:none;-moz-appearance:none;appearance:none}input[type=checkbox],input[type=radio]{margin:4px 0 0;margin-top:1px\9;line-height:normal}fieldset[disabled] input[type=checkbox],fieldset[disabled] input[type=radio],input[type=checkbox].disabled,input[type=checkbox][disabled],input[type=radio].disabled,input[type=radio][disabled]{cursor:not-allowed}input[type=file]{display:block}input[type=range]{display:block;width:100%}select[multiple],select[size]{height:auto}input[type=checkbox]:focus,input[type=file]:focus,input[type=radio]:focus{outline:5px auto -webkit-focus-ring-color;outline-offset:-2px}output{display:block;padding-top:7px;font-size:14px;line-height:1.42857143;color:#555}.form-control{display:block;width:100%;height:34px;padding:6px 12px;font-size:14px;line-height:1.42857143;color:#555;background-color:#fff;background-image:none;border:1px solid #ccc;border-radius:4px;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075);box-shadow:inset 0 1px 1px rgba(0,0,0,.075);-webkit-transition:border-color ease-in-out .15s,box-shadow ease-in-out .15s;-o-transition:border-color ease-in-out .15s,box-shadow ease-in-out .15s;-webkit-transition:border-color ease-in-out .15s,-webkit-box-shadow ease-in-out .15s;transition:border-color ease-in-out .15s,-webkit-box-shadow ease-in-out .15s;transition:border-color ease-in-out .15s,box-shadow ease-in-out .15s;transition:border-color ease-in-out .15s,box-shadow ease-in-out .15s,-webkit-box-shadow ease-in-out .15s}.form-control:focus{border-color:#66afe9;outline:0;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 8px rgba(102,175,233,.6);box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 8px rgba(102,175,233,.6)}.form-control::-moz-placeholder{color:#999;opacity:1}.form-control:-ms-input-placeholder{color:#999}.form-control::-webkit-input-placeholder{color:#999}.form-control::-ms-expand{background-color:transparent;border:0}.form-control[disabled],.form-control[readonly],fieldset[disabled] .form-control{background-color:#eee;opacity:1}.form-control[disabled],fieldset[disabled] .form-control{cursor:not-allowed}textarea.form-control{height:auto}@media screen and (-webkit-min-device-pixel-ratio:0){input[type=date].form-control,input[type=datetime-local].form-control,input[type=month].form-control,input[type=time].form-control{line-height:34px}.input-group-sm input[type=date],.input-group-sm input[type=datetime-local],.input-group-sm input[type=month],.input-group-sm input[type=time],input[type=date].input-sm,input[type=datetime-local].input-sm,input[type=month].input-sm,input[type=time].input-sm{line-height:30px}.input-group-lg input[type=date],.input-group-lg input[type=datetime-local],.input-group-lg input[type=month],.input-group-lg input[type=time],input[type=date].input-lg,input[type=datetime-local].input-lg,input[type=month].input-lg,input[type=time].input-lg{line-height:46px}}.form-group{margin-bottom:15px}.checkbox,.radio{position:relative;display:block;margin-top:10px;margin-bottom:10px}.checkbox.disabled label,.radio.disabled label,fieldset[disabled] .checkbox label,fieldset[disabled] .radio label{cursor:not-allowed}.checkbox label,.radio label{min-height:20px;padding-left:20px;margin-bottom:0;font-weight:400;cursor:pointer}.checkbox input[type=checkbox],.checkbox-inline input[type=checkbox],.radio input[type=radio],.radio-inline input[type=radio]{position:absolute;margin-top:4px\9;margin-left:-20px}.checkbox+.checkbox,.radio+.radio{margin-top:-5px}.checkbox-inline,.radio-inline{position:relative;display:inline-block;padding-left:20px;margin-bottom:0;font-weight:400;vertical-align:middle;cursor:pointer}.checkbox-inline.disabled,.radio-inline.disabled,fieldset[disabled] .checkbox-inline,fieldset[disabled] .radio-inline{cursor:not-allowed}.checkbox-inline+.checkbox-inline,.radio-inline+.radio-inline{margin-top:0;margin-left:10px}.form-control-static{min-height:34px;padding-top:7px;padding-bottom:7px;margin-bottom:0}.form-control-static.input-lg,.form-control-static.input-sm{padding-right:0;padding-left:0}.input-sm{height:30px;padding:5px 10px;font-size:12px;line-height:1.5;border-radius:3px}select.input-sm{height:30px;line-height:30px}select[multiple].input-sm,textarea.input-sm{height:auto}.form-group-sm .form-control{height:30px;padding:5px 10px;font-size:12px;line-height:1.5;border-radius:3px}.form-group-sm select.form-control{height:30px;line-height:30px}.form-group-sm select[multiple].form-control,.form-group-sm textarea.form-control{height:auto}.form-group-sm .form-control-static{height:30px;min-height:32px;padding:6px 10px;font-size:12px;line-height:1.5}.input-lg{height:46px;padding:10px 16px;font-size:18px;line-height:1.3333333;border-radius:6px}select.input-lg{height:46px;line-height:46px}select[multiple].input-lg,textarea.input-lg{height:auto}.form-group-lg .form-control{height:46px;padding:10px 16px;font-size:18px;line-height:1.3333333;border-radius:6px}.form-group-lg select.form-control{height:46px;line-height:46px}.form-group-lg select[multiple].form-control,.form-group-lg textarea.form-control{height:auto}.form-group-lg .form-control-static{height:46px;min-height:38px;padding:11px 16px;font-size:18px;line-height:1.3333333}.has-feedback{position:relative}.has-feedback .form-control{padding-right:42.5px}.form-control-feedback{position:absolute;top:0;right:0;z-index:2;display:block;width:34px;height:34px;line-height:34px;text-align:center;pointer-events:none}.form-group-lg .form-control+.form-control-feedback,.input-group-lg+.form-control-feedback,.input-lg+.form-control-feedback{width:46px;height:46px;line-height:46px}.form-group-sm .form-control+.form-control-feedback,.input-group-sm+.form-control-feedback,.input-sm+.form-control-feedback{width:30px;height:30px;line-height:30px}.has-success .checkbox,.has-success .checkbox-inline,.has-success .control-label,.has-success .help-block,.has-success .radio,.has-success .radio-inline,.has-success.checkbox label,.has-success.checkbox-inline label,.has-success.radio label,.has-success.radio-inline label{color:#3c763d}.has-success .form-control{border-color:#3c763d;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075);box-shadow:inset 0 1px 1px rgba(0,0,0,.075)}.has-success .form-control:focus{border-color:#2b542c;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #67b168;box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #67b168}.has-success .input-group-addon{color:#3c763d;background-color:#dff0d8;border-color:#3c763d}.has-success .form-control-feedback{color:#3c763d}.has-warning .checkbox,.has-warning .checkbox-inline,.has-warning .control-label,.has-warning .help-block,.has-warning .radio,.has-warning .radio-inline,.has-warning.checkbox label,.has-warning.checkbox-inline label,.has-warning.radio label,.has-warning.radio-inline label{color:#8a6d3b}.has-warning .form-control{border-color:#8a6d3b;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075);box-shadow:inset 0 1px 1px rgba(0,0,0,.075)}.has-warning .form-control:focus{border-color:#66512c;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #c0a16b;box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #c0a16b}.has-warning .input-group-addon{color:#8a6d3b;background-color:#fcf8e3;border-color:#8a6d3b}.has-warning .form-control-feedback{color:#8a6d3b}.has-error .checkbox,.has-error .checkbox-inline,.has-error .control-label,.has-error .help-block,.has-error .radio,.has-error .radio-inline,.has-error.checkbox label,.has-error.checkbox-inline label,.has-error.radio label,.has-error.radio-inline label{color:#a94442}.has-error .form-control{border-color:#a94442;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075);box-shadow:inset 0 1px 1px rgba(0,0,0,.075)}.has-error .form-control:focus{border-color:#843534;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #ce8483;box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #ce8483}.has-error .input-group-addon{color:#a94442;background-color:#f2dede;border-color:#a94442}.has-error .form-control-feedback{color:#a94442}.has-feedback label~.form-control-feedback{top:25px}.has-feedback label.sr-only~.form-control-feedback{top:0}.help-block{display:block;margin-top:5px;margin-bottom:10px;color:#737373}@media (min-width:768px){.form-inline .form-group{display:inline-block;margin-bottom:0;vertical-align:middle}.form-inline .form-control{display:inline-block;width:auto;vertical-align:middle}.form-inline .form-control-static{display:inline-block}.form-inline .input-group{display:inline-table;vertical-align:middle}.form-inline .input-group .form-control,.form-inline .input-group .input-group-addon,.form-inline .input-group .input-group-btn{width:auto}.form-inline .input-group>.form-control{width:100%}.form-inline .control-label{margin-bottom:0;vertical-align:middle}.form-inline .checkbox,.form-inline .radio{display:inline-block;margin-top:0;margin-bottom:0;vertical-align:middle}.form-inline .checkbox label,.form-inline .radio label{padding-left:0}.form-inline .checkbox input[type=checkbox],.form-inline .radio input[type=radio]{position:relative;margin-left:0}.form-inline .has-feedback .form-control-feedback{top:0}}.form-horizontal .checkbox,.form-horizontal .checkbox-inline,.form-horizontal .radio,.form-horizontal .radio-inline{padding-top:7px;margin-top:0;margin-bottom:0}.form-horizontal .checkbox,.form-horizontal .radio{min-height:27px}.form-horizontal .form-group{margin-right:-15px;margin-left:-15px}@media (min-width:768px){.form-horizontal .control-label{padding-top:7px;margin-bottom:0;text-align:right}}.form-horizontal .has-feedback .form-control-feedback{right:15px}@media (min-width:768px){.form-horizontal .form-group-lg .control-label{padding-top:11px;font-size:18px}}@media (min-width:768px){.form-horizontal .form-group-sm .control-label{padding-top:6px;font-size:12px}}.btn{display:inline-block;margin-bottom:0;font-weight:400;text-align:center;white-space:nowrap;vertical-align:middle;-ms-touch-action:manipulation;touch-action:manipulation;cursor:pointer;background-image:none;border:1px solid transparent;padding:6px 12px;font-size:14px;line-height:1.42857143;border-radius:4px;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none}.btn.active.focus,.btn.active:focus,.btn.focus,.btn:active.focus,.btn:active:focus,.btn:focus{outline:5px auto -webkit-focus-ring-color;outline-offset:-2px}.btn.focus,.btn:focus,.btn:hover{color:#333;text-decoration:none}.btn.active,.btn:active{background-image:none;outline:0;-webkit-box-shadow:inset 0 3px 5px rgba(0,0,0,.125);box-shadow:inset 0 3px 5px rgba(0,0,0,.125)}.btn.disabled,.btn[disabled],fieldset[disabled] .btn{cursor:not-allowed;filter:alpha(opacity=65);opacity:.65;-webkit-box-shadow:none;box-shadow:none}a.btn.disabled,fieldset[disabled] a.btn{pointer-events:none}.btn-default{color:#333;background-color:#fff;border-color:#ccc}.btn-default.focus,.btn-default:focus{color:#333;background-color:#e6e6e6;border-color:#8c8c8c}.btn-default:hover{color:#333;background-color:#e6e6e6;border-color:#adadad}.btn-default.active,.btn-default:active,.open>.dropdown-toggle.btn-default{color:#333;background-color:#e6e6e6;background-image:none;border-color:#adadad}.btn-default.active.focus,.btn-default.active:focus,.btn-default.active:hover,.btn-default:active.focus,.btn-default:active:focus,.btn-default:active:hover,.open>.dropdown-toggle.btn-default.focus,.open>.dropdown-toggle.btn-default:focus,.open>.dropdown-toggle.btn-default:hover{color:#333;background-color:#d4d4d4;border-color:#8c8c8c}.btn-default.disabled.focus,.btn-default.disabled:focus,.btn-default.disabled:hover,.btn-default[disabled].focus,.btn-default[disabled]:focus,.btn-default[disabled]:hover,fieldset[disabled] .btn-default.focus,fieldset[disabled] .btn-default:focus,fieldset[disabled] .btn-default:hover{background-color:#fff;border-color:#ccc}.btn-default .badge{color:#fff;background-color:#333}.btn-primary{color:#fff;background-color:#337ab7;border-color:#2e6da4}.btn-primary.focus,.btn-primary:focus{color:#fff;background-color:#286090;border-color:#122b40}.btn-primary:hover{color:#fff;background-color:#286090;border-color:#204d74}.btn-primary.active,.btn-primary:active,.open>.dropdown-toggle.btn-primary{color:#fff;background-color:#286090;background-image:none;border-color:#204d74}.btn-primary.active.focus,.btn-primary.active:focus,.btn-primary.active:hover,.btn-primary:active.focus,.btn-primary:active:focus,.btn-primary:active:hover,.open>.dropdown-toggle.btn-primary.focus,.open>.dropdown-toggle.btn-primary:focus,.open>.dropdown-toggle.btn-primary:hover{color:#fff;background-color:#204d74;border-color:#122b40}.btn-primary.disabled.focus,.btn-primary.disabled:focus,.btn-primary.disabled:hover,.btn-primary[disabled].focus,.btn-primary[disabled]:focus,.btn-primary[disabled]:hover,fieldset[disabled] .btn-primary.focus,fieldset[disabled] .btn-primary:focus,fieldset[disabled] .btn-primary:hover{background-color:#337ab7;border-color:#2e6da4}.btn-primary .badge{color:#337ab7;background-color:#fff}.btn-success{color:#fff;background-color:#5cb85c;border-color:#4cae4c}.btn-success.focus,.btn-success:focus{color:#fff;background-color:#449d44;border-color:#255625}.btn-success:hover{color:#fff;background-color:#449d44;border-color:#398439}.btn-success.active,.btn-success:active,.open>.dropdown-toggle.btn-success{color:#fff;background-color:#449d44;background-image:none;border-color:#398439}.btn-success.active.focus,.btn-success.active:focus,.btn-success.active:hover,.btn-success:active.focus,.btn-success:active:focus,.btn-success:active:hover,.open>.dropdown-toggle.btn-success.focus,.open>.dropdown-toggle.btn-success:focus,.open>.dropdown-toggle.btn-success:hover{color:#fff;background-color:#398439;border-color:#255625}.btn-success.disabled.focus,.btn-success.disabled:focus,.btn-success.disabled:hover,.btn-success[disabled].focus,.btn-success[disabled]:focus,.btn-success[disabled]:hover,fieldset[disabled] .btn-success.focus,fieldset[disabled] .btn-success:focus,fieldset[disabled] .btn-success:hover{background-color:#5cb85c;border-color:#4cae4c}.btn-success .badge{color:#5cb85c;background-color:#fff}.btn-info{color:#fff;background-color:#5bc0de;border-color:#46b8da}.btn-info.focus,.btn-info:focus{color:#fff;background-color:#31b0d5;border-color:#1b6d85}.btn-info:hover{color:#fff;background-color:#31b0d5;border-color:#269abc}.btn-info.active,.btn-info:active,.open>.dropdown-toggle.btn-info{color:#fff;background-color:#31b0d5;background-image:none;border-color:#269abc}.btn-info.active.focus,.btn-info.active:focus,.btn-info.active:hover,.btn-info:active.focus,.btn-info:active:focus,.btn-info:active:hover,.open>.dropdown-toggle.btn-info.focus,.open>.dropdown-toggle.btn-info:focus,.open>.dropdown-toggle.btn-info:hover{color:#fff;background-color:#269abc;border-color:#1b6d85}.btn-info.disabled.focus,.btn-info.disabled:focus,.btn-info.disabled:hover,.btn-info[disabled].focus,.btn-info[disabled]:focus,.btn-info[disabled]:hover,fieldset[disabled] .btn-info.focus,fieldset[disabled] .btn-info:focus,fieldset[disabled] .btn-info:hover{background-color:#5bc0de;border-color:#46b8da}.btn-info .badge{color:#5bc0de;background-color:#fff}.btn-warning{color:#fff;background-color:#f0ad4e;border-color:#eea236}.btn-warning.focus,.btn-warning:focus{color:#fff;background-color:#ec971f;border-color:#985f0d}.btn-warning:hover{color:#fff;background-color:#ec971f;border-color:#d58512}.btn-warning.active,.btn-warning:active,.open>.dropdown-toggle.btn-warning{color:#fff;background-color:#ec971f;background-image:none;border-color:#d58512}.btn-warning.active.focus,.btn-warning.active:focus,.btn-warning.active:hover,.btn-warning:active.focus,.btn-warning:active:focus,.btn-warning:active:hover,.open>.dropdown-toggle.btn-warning.focus,.open>.dropdown-toggle.btn-warning:focus,.open>.dropdown-toggle.btn-warning:hover{color:#fff;background-color:#d58512;border-color:#985f0d}.btn-warning.disabled.focus,.btn-warning.disabled:focus,.btn-warning.disabled:hover,.btn-warning[disabled].focus,.btn-warning[disabled]:focus,.btn-warning[disabled]:hover,fieldset[disabled] .btn-warning.focus,fieldset[disabled] .btn-warning:focus,fieldset[disabled] .btn-warning:hover{background-color:#f0ad4e;border-color:#eea236}.btn-warning .badge{color:#f0ad4e;background-color:#fff}.btn-danger{color:#fff;background-color:#d9534f;border-color:#d43f3a}.btn-danger.focus,.btn-danger:focus{color:#fff;background-color:#c9302c;border-color:#761c19}.btn-danger:hover{color:#fff;background-color:#c9302c;border-color:#ac2925}.btn-danger.active,.btn-danger:active,.open>.dropdown-toggle.btn-danger{color:#fff;background-color:#c9302c;background-image:none;border-color:#ac2925}.btn-danger.active.focus,.btn-danger.active:focus,.btn-danger.active:hover,.btn-danger:active.focus,.btn-danger:active:focus,.btn-danger:active:hover,.open>.dropdown-toggle.btn-danger.focus,.open>.dropdown-toggle.btn-danger:focus,.open>.dropdown-toggle.btn-danger:hover{color:#fff;background-color:#ac2925;border-color:#761c19}.btn-danger.disabled.focus,.btn-danger.disabled:focus,.btn-danger.disabled:hover,.btn-danger[disabled].focus,.btn-danger[disabled]:focus,.btn-danger[disabled]:hover,fieldset[disabled] .btn-danger.focus,fieldset[disabled] .btn-danger:focus,fieldset[disabled] .btn-danger:hover{background-color:#d9534f;border-color:#d43f3a}.btn-danger .badge{color:#d9534f;background-color:#fff}.btn-link{font-weight:400;color:#337ab7;border-radius:0}.btn-link,.btn-link.active,.btn-link:active,.btn-link[disabled],fieldset[disabled] .btn-link{background-color:transparent;-webkit-box-shadow:none;box-shadow:none}.btn-link,.btn-link:active,.btn-link:focus,.btn-link:hover{border-color:transparent}.btn-link:focus,.btn-link:hover{color:#23527c;text-decoration:underline;background-color:transparent}.btn-link[disabled]:focus,.btn-link[disabled]:hover,fieldset[disabled] .btn-link:focus,fieldset[disabled] .btn-link:hover{color:#777;text-decoration:none}.btn-group-lg>.btn,.btn-lg{padding:10px 16px;font-size:18px;line-height:1.3333333;border-radius:6px}.btn-group-sm>.btn,.btn-sm{padding:5px 10px;font-size:12px;line-height:1.5;border-radius:3px}.btn-group-xs>.btn,.btn-xs{padding:1px 5px;font-size:12px;line-height:1.5;border-radius:3px}.btn-block{display:block;width:100%}.btn-block+.btn-block{margin-top:5px}input[type=button].btn-block,input[type=reset].btn-block,input[type=submit].btn-block{width:100%}.fade{opacity:0;-webkit-transition:opacity .15s linear;-o-transition:opacity .15s linear;transition:opacity .15s linear}.fade.in{opacity:1}.collapse{display:none}.collapse.in{display:block}tr.collapse.in{display:table-row}tbody.collapse.in{display:table-row-group}.collapsing{position:relative;height:0;overflow:hidden;-webkit-transition-property:height,visibility;-o-transition-property:height,visibility;transition-property:height,visibility;-webkit-transition-duration:.35s;-o-transition-duration:.35s;transition-duration:.35s;-webkit-transition-timing-function:ease;-o-transition-timing-function:ease;transition-timing-function:ease}.caret{display:inline-block;width:0;height:0;margin-left:2px;vertical-align:middle;border-top:4px dashed;border-top:4px solid\9;border-right:4px solid transparent;border-left:4px solid transparent}.dropdown,.dropup{position:relative}.dropdown-toggle:focus{outline:0}.dropdown-menu{position:absolute;top:100%;left:0;z-index:1000;display:none;float:left;min-width:160px;padding:5px 0;margin:2px 0 0;font-size:14px;text-align:left;list-style:none;background-color:#fff;background-clip:padding-box;border:1px solid #ccc;border:1px solid rgba(0,0,0,.15);border-radius:4px;-webkit-box-shadow:0 6px 12px rgba(0,0,0,.175);box-shadow:0 6px 12px rgba(0,0,0,.175)}.dropdown-menu.pull-right{right:0;left:auto}.dropdown-menu .divider{height:1px;margin:9px 0;overflow:hidden;background-color:#e5e5e5}.dropdown-menu>li>a{display:block;padding:3px 20px;clear:both;font-weight:400;line-height:1.42857143;color:#333;white-space:nowrap}.dropdown-menu>li>a:focus,.dropdown-menu>li>a:hover{color:#262626;text-decoration:none;background-color:#f5f5f5}.dropdown-menu>.active>a,.dropdown-menu>.active>a:focus,.dropdown-menu>.active>a:hover{color:#fff;text-decoration:none;background-color:#337ab7;outline:0}.dropdown-menu>.disabled>a,.dropdown-menu>.disabled>a:focus,.dropdown-menu>.disabled>a:hover{color:#777}.dropdown-menu>.disabled>a:focus,.dropdown-menu>.disabled>a:hover{text-decoration:none;cursor:not-allowed;background-color:transparent;background-image:none;filter:progid:DXImageTransform.Microsoft.gradient(enabled=false)}.open>.dropdown-menu{display:block}.open>a{outline:0}.dropdown-menu-right{right:0;left:auto}.dropdown-menu-left{right:auto;left:0}.dropdown-header{display:block;padding:3px 20px;font-size:12px;line-height:1.42857143;color:#777;white-space:nowrap}.dropdown-backdrop{position:fixed;top:0;right:0;bottom:0;left:0;z-index:990}.pull-right>.dropdown-menu{right:0;left:auto}.dropup .caret,.navbar-fixed-bottom .dropdown .caret{content:"";border-top:0;border-bottom:4px dashed;border-bottom:4px solid\9}.dropup .dropdown-menu,.navbar-fixed-bottom .dropdown .dropdown-menu{top:auto;bottom:100%;margin-bottom:2px}@media (min-width:768px){.navbar-right .dropdown-menu{right:0;left:auto}.navbar-right .dropdown-menu-left{right:auto;left:0}}.btn-group,.btn-group-vertical{position:relative;display:inline-block;vertical-align:middle}.btn-group-vertical>.btn,.btn-group>.btn{position:relative;float:left}.btn-group-vertical>.btn.active,.btn-group-vertical>.btn:active,.btn-group-vertical>.btn:focus,.btn-group-vertical>.btn:hover,.btn-group>.btn.active,.btn-group>.btn:active,.btn-group>.btn:focus,.btn-group>.btn:hover{z-index:2}.btn-group .btn+.btn,.btn-group .btn+.btn-group,.btn-group .btn-group+.btn,.btn-group .btn-group+.btn-group{margin-left:-1px}.btn-toolbar{margin-left:-5px}.btn-toolbar .btn,.btn-toolbar .btn-group,.btn-toolbar .input-group{float:left}.btn-toolbar>.btn,.btn-toolbar>.btn-group,.btn-toolbar>.input-group{margin-left:5px}.btn-group>.btn:not(:first-child):not(:last-child):not(.dropdown-toggle){border-radius:0}.btn-group>.btn:first-child{margin-left:0}.btn-group>.btn:first-child:not(:last-child):not(.dropdown-toggle){border-top-right-radius:0;border-bottom-right-radius:0}.btn-group>.btn:last-child:not(:first-child),.btn-group>.dropdown-toggle:not(:first-child){border-top-left-radius:0;border-bottom-left-radius:0}.btn-group>.btn-group{float:left}.btn-group>.btn-group:not(:first-child):not(:last-child)>.btn{border-radius:0}.btn-group>.btn-group:first-child:not(:last-child)>.btn:last-child,.btn-group>.btn-group:first-child:not(:last-child)>.dropdown-toggle{border-top-right-radius:0;border-bottom-right-radius:0}.btn-group>.btn-group:last-child:not(:first-child)>.btn:first-child{border-top-left-radius:0;border-bottom-left-radius:0}.btn-group .dropdown-toggle:active,.btn-group.open .dropdown-toggle{outline:0}.btn-group>.btn+.dropdown-toggle{padding-right:8px;padding-left:8px}.btn-group>.btn-lg+.dropdown-toggle{padding-right:12px;padding-left:12px}.btn-group.open .dropdown-toggle{-webkit-box-shadow:inset 0 3px 5px rgba(0,0,0,.125);box-shadow:inset 0 3px 5px rgba(0,0,0,.125)}.btn-group.open .dropdown-toggle.btn-link{-webkit-box-shadow:none;box-shadow:none}.btn .caret{margin-left:0}.btn-lg .caret{border-width:5px 5px 0;border-bottom-width:0}.dropup .btn-lg .caret{border-width:0 5px 5px}.btn-group-vertical>.btn,.btn-group-vertical>.btn-group,.btn-group-vertical>.btn-group>.btn{display:block;float:none;width:100%;max-width:100%}.btn-group-vertical>.btn-group>.btn{float:none}.btn-group-vertical>.btn+.btn,.btn-group-vertical>.btn+.btn-group,.btn-group-vertical>.btn-group+.btn,.btn-group-vertical>.btn-group+.btn-group{margin-top:-1px;margin-left:0}.btn-group-vertical>.btn:not(:first-child):not(:last-child){border-radius:0}.btn-group-vertical>.btn:first-child:not(:last-child){border-top-left-radius:4px;border-top-right-radius:4px;border-bottom-right-radius:0;border-bottom-left-radius:0}.btn-group-vertical>.btn:last-child:not(:first-child){border-top-left-radius:0;border-top-right-radius:0;border-bottom-right-radius:4px;border-bottom-left-radius:4px}.btn-group-vertical>.btn-group:not(:first-child):not(:last-child)>.btn{border-radius:0}.btn-group-vertical>.btn-group:first-child:not(:last-child)>.btn:last-child,.btn-group-vertical>.btn-group:first-child:not(:last-child)>.dropdown-toggle{border-bottom-right-radius:0;border-bottom-left-radius:0}.btn-group-vertical>.btn-group:last-child:not(:first-child)>.btn:first-child{border-top-left-radius:0;border-top-right-radius:0}.btn-group-justified{display:table;width:100%;table-layout:fixed;border-collapse:separate}.btn-group-justified>.btn,.btn-group-justified>.btn-group{display:table-cell;float:none;width:1%}.btn-group-justified>.btn-group .btn{width:100%}.btn-group-justified>.btn-group .dropdown-menu{left:auto}[data-toggle=buttons]>.btn input[type=checkbox],[data-toggle=buttons]>.btn input[type=radio],[data-toggle=buttons]>.btn-group>.btn input[type=checkbox],[data-toggle=buttons]>.btn-group>.btn input[type=radio]{position:absolute;clip:rect(0,0,0,0);pointer-events:none}.input-group{position:relative;display:table;border-collapse:separate}.input-group[class*=col-]{float:none;padding-right:0;padding-left:0}.input-group .form-control{position:relative;z-index:2;float:left;width:100%;margin-bottom:0}.input-group .form-control:focus{z-index:3}.input-group-lg>.form-control,.input-group-lg>.input-group-addon,.input-group-lg>.input-group-btn>.btn{height:46px;padding:10px 16px;font-size:18px;line-height:1.3333333;border-radius:6px}select.input-group-lg>.form-control,select.input-group-lg>.input-group-addon,select.input-group-lg>.input-group-btn>.btn{height:46px;line-height:46px}select[multiple].input-group-lg>.form-control,select[multiple].input-group-lg>.input-group-addon,select[multiple].input-group-lg>.input-group-btn>.btn,textarea.input-group-lg>.form-control,textarea.input-group-lg>.input-group-addon,textarea.input-group-lg>.input-group-btn>.btn{height:auto}.input-group-sm>.form-control,.input-group-sm>.input-group-addon,.input-group-sm>.input-group-btn>.btn{height:30px;padding:5px 10px;font-size:12px;line-height:1.5;border-radius:3px}select.input-group-sm>.form-control,select.input-group-sm>.input-group-addon,select.input-group-sm>.input-group-btn>.btn{height:30px;line-height:30px}select[multiple].input-group-sm>.form-control,select[multiple].input-group-sm>.input-group-addon,select[multiple].input-group-sm>.input-group-btn>.btn,textarea.input-group-sm>.form-control,textarea.input-group-sm>.input-group-addon,textarea.input-group-sm>.input-group-btn>.btn{height:auto}.input-group .form-control,.input-group-addon,.input-group-btn{display:table-cell}.input-group .form-control:not(:first-child):not(:last-child),.input-group-addon:not(:first-child):not(:last-child),.input-group-btn:not(:first-child):not(:last-child){border-radius:0}.input-group-addon,.input-group-btn{width:1%;white-space:nowrap;vertical-align:middle}.input-group-addon{padding:6px 12px;font-size:14px;font-weight:400;line-height:1;color:#555;text-align:center;background-color:#eee;border:1px solid #ccc;border-radius:4px}.input-group-addon.input-sm{padding:5px 10px;font-size:12px;border-radius:3px}.input-group-addon.input-lg{padding:10px 16px;font-size:18px;border-radius:6px}.input-group-addon input[type=checkbox],.input-group-addon input[type=radio]{margin-top:0}.input-group .form-control:first-child,.input-group-addon:first-child,.input-group-btn:first-child>.btn,.input-group-btn:first-child>.btn-group>.btn,.input-group-btn:first-child>.dropdown-toggle,.input-group-btn:last-child>.btn-group:not(:last-child)>.btn,.input-group-btn:last-child>.btn:not(:last-child):not(.dropdown-toggle){border-top-right-radius:0;border-bottom-right-radius:0}.input-group-addon:first-child{border-right:0}.input-group .form-control:last-child,.input-group-addon:last-child,.input-group-btn:first-child>.btn-group:not(:first-child)>.btn,.input-group-btn:first-child>.btn:not(:first-child),.input-group-btn:last-child>.btn,.input-group-btn:last-child>.btn-group>.btn,.input-group-btn:last-child>.dropdown-toggle{border-top-left-radius:0;border-bottom-left-radius:0}.input-group-addon:last-child{border-left:0}.input-group-btn{position:relative;font-size:0;white-space:nowrap}.input-group-btn>.btn{position:relative}.input-group-btn>.btn+.btn{margin-left:-1px}.input-group-btn>.btn:active,.input-group-btn>.btn:focus,.input-group-btn>.btn:hover{z-index:2}.input-group-btn:first-child>.btn,.input-group-btn:first-child>.btn-group{margin-right:-1px}.input-group-btn:last-child>.btn,.input-group-btn:last-child>.btn-group{z-index:2;margin-left:-1px}.nav{padding-left:0;margin-bottom:0;list-style:none}.nav>li{position:relative;display:block}.nav>li>a{position:relative;display:block;padding:10px 15px}.nav>li>a:focus,.nav>li>a:hover{text-decoration:none;background-color:#eee}.nav>li.disabled>a{color:#777}.nav>li.disabled>a:focus,.nav>li.disabled>a:hover{color:#777;text-decoration:none;cursor:not-allowed;background-color:transparent}.nav .open>a,.nav .open>a:focus,.nav .open>a:hover{background-color:#eee;border-color:#337ab7}.nav .nav-divider{height:1px;margin:9px 0;overflow:hidden;background-color:#e5e5e5}.nav>li>a>img{max-width:none}.nav-tabs{border-bottom:1px solid #ddd}.nav-tabs>li{float:left;margin-bottom:-1px}.nav-tabs>li>a{margin-right:2px;line-height:1.42857143;border:1px solid transparent;border-radius:4px 4px 0 0}.nav-tabs>li>a:hover{border-color:#eee #eee #ddd}.nav-tabs>li.active>a,.nav-tabs>li.active>a:focus,.nav-tabs>li.active>a:hover{color:#555;cursor:default;background-color:#fff;border:1px solid #ddd;border-bottom-color:transparent}.nav-tabs.nav-justified{width:100%;border-bottom:0}.nav-tabs.nav-justified>li{float:none}.nav-tabs.nav-justified>li>a{margin-bottom:5px;text-align:center}.nav-tabs.nav-justified>.dropdown .dropdown-menu{top:auto;left:auto}@media (min-width:768px){.nav-tabs.nav-justified>li{display:table-cell;width:1%}.nav-tabs.nav-justified>li>a{margin-bottom:0}}.nav-tabs.nav-justified>li>a{margin-right:0;border-radius:4px}.nav-tabs.nav-justified>.active>a,.nav-tabs.nav-justified>.active>a:focus,.nav-tabs.nav-justified>.active>a:hover{border:1px solid #ddd}@media (min-width:768px){.nav-tabs.nav-justified>li>a{border-bottom:1px solid #ddd;border-radius:4px 4px 0 0}.nav-tabs.nav-justified>.active>a,.nav-tabs.nav-justified>.active>a:focus,.nav-tabs.nav-justified>.active>a:hover{border-bottom-color:#fff}}.nav-pills>li{float:left}.nav-pills>li>a{border-radius:4px}.nav-pills>li+li{margin-left:2px}.nav-pills>li.active>a,.nav-pills>li.active>a:focus,.nav-pills>li.active>a:hover{color:#fff;background-color:#337ab7}.nav-stacked>li{float:none}.nav-stacked>li+li{margin-top:2px;margin-left:0}.nav-justified{width:100%}.nav-justified>li{float:none}.nav-justified>li>a{margin-bottom:5px;text-align:center}.nav-justified>.dropdown .dropdown-menu{top:auto;left:auto}@media (min-width:768px){.nav-justified>li{display:table-cell;width:1%}.nav-justified>li>a{margin-bottom:0}}.nav-tabs-justified{border-bottom:0}.nav-tabs-justified>li>a{margin-right:0;border-radius:4px}.nav-tabs-justified>.active>a,.nav-tabs-justified>.active>a:focus,.nav-tabs-justified>.active>a:hover{border:1px solid #ddd}@media (min-width:768px){.nav-tabs-justified>li>a{border-bottom:1px solid #ddd;border-radius:4px 4px 0 0}.nav-tabs-justified>.active>a,.nav-tabs-justified>.active>a:focus,.nav-tabs-justified>.active>a:hover{border-bottom-color:#fff}}.tab-content>.tab-pane{display:none}.tab-content>.active{display:block}.nav-tabs .dropdown-menu{margin-top:-1px;border-top-left-radius:0;border-top-right-radius:0}.navbar{position:relative;min-height:50px;margin-bottom:20px;border:1px solid transparent}@media (min-width:768px){.navbar{border-radius:4px}}@media (min-width:768px){.navbar-header{float:left}}.navbar-collapse{padding-right:15px;padding-left:15px;overflow-x:visible;border-top:1px solid transparent;-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,.1);box-shadow:inset 0 1px 0 rgba(255,255,255,.1);-webkit-overflow-scrolling:touch}.navbar-collapse.in{overflow-y:auto}@media (min-width:768px){.navbar-collapse{width:auto;border-top:0;-webkit-box-shadow:none;box-shadow:none}.navbar-collapse.collapse{display:block!important;height:auto!important;padding-bottom:0;overflow:visible!important}.navbar-collapse.in{overflow-y:visible}.navbar-fixed-bottom .navbar-collapse,.navbar-fixed-top .navbar-collapse,.navbar-static-top .navbar-collapse{padding-right:0;padding-left:0}}.navbar-fixed-bottom,.navbar-fixed-top{position:fixed;right:0;left:0;z-index:1030}.navbar-fixed-bottom .navbar-collapse,.navbar-fixed-top .navbar-collapse{max-height:340px}@media (max-device-width:480px) and (orientation:landscape){.navbar-fixed-bottom .navbar-collapse,.navbar-fixed-top .navbar-collapse{max-height:200px}}@media (min-width:768px){.navbar-fixed-bottom,.navbar-fixed-top{border-radius:0}}.navbar-fixed-top{top:0;border-width:0 0 1px}.navbar-fixed-bottom{bottom:0;margin-bottom:0;border-width:1px 0 0}.container-fluid>.navbar-collapse,.container-fluid>.navbar-header,.container>.navbar-collapse,.container>.navbar-header{margin-right:-15px;margin-left:-15px}@media (min-width:768px){.container-fluid>.navbar-collapse,.container-fluid>.navbar-header,.container>.navbar-collapse,.container>.navbar-header{margin-right:0;margin-left:0}}.navbar-static-top{z-index:1000;border-width:0 0 1px}@media (min-width:768px){.navbar-static-top{border-radius:0}}.navbar-brand{float:left;height:50px;padding:15px 15px;font-size:18px;line-height:20px}.navbar-brand:focus,.navbar-brand:hover{text-decoration:none}.navbar-brand>img{display:block}@media (min-width:768px){.navbar>.container .navbar-brand,.navbar>.container-fluid .navbar-brand{margin-left:-15px}}.navbar-toggle{position:relative;float:right;padding:9px 10px;margin-right:15px;margin-top:8px;margin-bottom:8px;background-color:transparent;background-image:none;border:1px solid transparent;border-radius:4px}.navbar-toggle:focus{outline:0}.navbar-toggle .icon-bar{display:block;width:22px;height:2px;border-radius:1px}.navbar-toggle .icon-bar+.icon-bar{margin-top:4px}@media (min-width:768px){.navbar-toggle{display:none}}.navbar-nav{margin:7.5px -15px}.navbar-nav>li>a{padding-top:10px;padding-bottom:10px;line-height:20px}@media (max-width:767px){.navbar-nav .open .dropdown-menu{position:static;float:none;width:auto;margin-top:0;background-color:transparent;border:0;-webkit-box-shadow:none;box-shadow:none}.navbar-nav .open .dropdown-menu .dropdown-header,.navbar-nav .open .dropdown-menu>li>a{padding:5px 15px 5px 25px}.navbar-nav .open .dropdown-menu>li>a{line-height:20px}.navbar-nav .open .dropdown-menu>li>a:focus,.navbar-nav .open .dropdown-menu>li>a:hover{background-image:none}}@media (min-width:768px){.navbar-nav{float:left;margin:0}.navbar-nav>li{float:left}.navbar-nav>li>a{padding-top:15px;padding-bottom:15px}}.navbar-form{padding:10px 15px;margin-right:-15px;margin-left:-15px;border-top:1px solid transparent;border-bottom:1px solid transparent;-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,.1),0 1px 0 rgba(255,255,255,.1);box-shadow:inset 0 1px 0 rgba(255,255,255,.1),0 1px 0 rgba(255,255,255,.1);margin-top:8px;margin-bottom:8px}@media (min-width:768px){.navbar-form .form-group{display:inline-block;margin-bottom:0;vertical-align:middle}.navbar-form .form-control{display:inline-block;width:auto;vertical-align:middle}.navbar-form .form-control-static{display:inline-block}.navbar-form .input-group{display:inline-table;vertical-align:middle}.navbar-form .input-group .form-control,.navbar-form .input-group .input-group-addon,.navbar-form .input-group .input-group-btn{width:auto}.navbar-form .input-group>.form-control{width:100%}.navbar-form .control-label{margin-bottom:0;vertical-align:middle}.navbar-form .checkbox,.navbar-form .radio{display:inline-block;margin-top:0;margin-bottom:0;vertical-align:middle}.navbar-form .checkbox label,.navbar-form .radio label{padding-left:0}.navbar-form .checkbox input[type=checkbox],.navbar-form .radio input[type=radio]{position:relative;margin-left:0}.navbar-form .has-feedback .form-control-feedback{top:0}}@media (max-width:767px){.navbar-form .form-group{margin-bottom:5px}.navbar-form .form-group:last-child{margin-bottom:0}}@media (min-width:768px){.navbar-form{width:auto;padding-top:0;padding-bottom:0;margin-right:0;margin-left:0;border:0;-webkit-box-shadow:none;box-shadow:none}}.navbar-nav>li>.dropdown-menu{margin-top:0;border-top-left-radius:0;border-top-right-radius:0}.navbar-fixed-bottom .navbar-nav>li>.dropdown-menu{margin-bottom:0;border-top-left-radius:4px;border-top-right-radius:4px;border-bottom-right-radius:0;border-bottom-left-radius:0}.navbar-btn{margin-top:8px;margin-bottom:8px}.navbar-btn.btn-sm{margin-top:10px;margin-bottom:10px}.navbar-btn.btn-xs{margin-top:14px;margin-bottom:14px}.navbar-text{margin-top:15px;margin-bottom:15px}@media (min-width:768px){.navbar-text{float:left;margin-right:15px;margin-left:15px}}@media (min-width:768px){.navbar-left{float:left!important}.navbar-right{float:right!important;margin-right:-15px}.navbar-right~.navbar-right{margin-right:0}}.navbar-default{background-color:#f8f8f8;border-color:#e7e7e7}.navbar-default .navbar-brand{color:#777}.navbar-default .navbar-brand:focus,.navbar-default .navbar-brand:hover{color:#5e5e5e;background-color:transparent}.navbar-default .navbar-text{color:#777}.navbar-default .navbar-nav>li>a{color:#777}.navbar-default .navbar-nav>li>a:focus,.navbar-default .navbar-nav>li>a:hover{color:#333;background-color:transparent}.navbar-default .navbar-nav>.active>a,.navbar-default .navbar-nav>.active>a:focus,.navbar-default .navbar-nav>.active>a:hover{color:#555;background-color:#e7e7e7}.navbar-default .navbar-nav>.disabled>a,.navbar-default .navbar-nav>.disabled>a:focus,.navbar-default .navbar-nav>.disabled>a:hover{color:#ccc;background-color:transparent}.navbar-default .navbar-nav>.open>a,.navbar-default .navbar-nav>.open>a:focus,.navbar-default .navbar-nav>.open>a:hover{color:#555;background-color:#e7e7e7}@media (max-width:767px){.navbar-default .navbar-nav .open .dropdown-menu>li>a{color:#777}.navbar-default .navbar-nav .open .dropdown-menu>li>a:focus,.navbar-default .navbar-nav .open .dropdown-menu>li>a:hover{color:#333;background-color:transparent}.navbar-default .navbar-nav .open .dropdown-menu>.active>a,.navbar-default .navbar-nav .open .dropdown-menu>.active>a:focus,.navbar-default .navbar-nav .open .dropdown-menu>.active>a:hover{color:#555;background-color:#e7e7e7}.navbar-default .navbar-nav .open .dropdown-menu>.disabled>a,.navbar-default .navbar-nav .open .dropdown-menu>.disabled>a:focus,.navbar-default .navbar-nav .open .dropdown-menu>.disabled>a:hover{color:#ccc;background-color:transparent}}.navbar-default .navbar-toggle{border-color:#ddd}.navbar-default .navbar-toggle:focus,.navbar-default .navbar-toggle:hover{background-color:#ddd}.navbar-default .navbar-toggle .icon-bar{background-color:#888}.navbar-default .navbar-collapse,.navbar-default .navbar-form{border-color:#e7e7e7}.navbar-default .navbar-link{color:#777}.navbar-default .navbar-link:hover{color:#333}.navbar-default .btn-link{color:#777}.navbar-default .btn-link:focus,.navbar-default .btn-link:hover{color:#333}.navbar-default .btn-link[disabled]:focus,.navbar-default .btn-link[disabled]:hover,fieldset[disabled] .navbar-default .btn-link:focus,fieldset[disabled] .navbar-default .btn-link:hover{color:#ccc}.navbar-inverse{background-color:#222;border-color:#080808}.navbar-inverse .navbar-brand{color:#9d9d9d}.navbar-inverse .navbar-brand:focus,.navbar-inverse .navbar-brand:hover{color:#fff;background-color:transparent}.navbar-inverse .navbar-text{color:#9d9d9d}.navbar-inverse .navbar-nav>li>a{color:#9d9d9d}.navbar-inverse .navbar-nav>li>a:focus,.navbar-inverse .navbar-nav>li>a:hover{color:#fff;background-color:transparent}.navbar-inverse .navbar-nav>.active>a,.navbar-inverse .navbar-nav>.active>a:focus,.navbar-inverse .navbar-nav>.active>a:hover{color:#fff;background-color:#080808}.navbar-inverse .navbar-nav>.disabled>a,.navbar-inverse .navbar-nav>.disabled>a:focus,.navbar-inverse .navbar-nav>.disabled>a:hover{color:#444;background-color:transparent}.navbar-inverse .navbar-nav>.open>a,.navbar-inverse .navbar-nav>.open>a:focus,.navbar-inverse .navbar-nav>.open>a:hover{color:#fff;background-color:#080808}@media (max-width:767px){.navbar-inverse .navbar-nav .open .dropdown-menu>.dropdown-header{border-color:#080808}.navbar-inverse .navbar-nav .open .dropdown-menu .divider{background-color:#080808}.navbar-inverse .navbar-nav .open .dropdown-menu>li>a{color:#9d9d9d}.navbar-inverse .navbar-nav .open .dropdown-menu>li>a:focus,.navbar-inverse .navbar-nav .open .dropdown-menu>li>a:hover{color:#fff;background-color:transparent}.navbar-inverse .navbar-nav .open .dropdown-menu>.active>a,.navbar-inverse .navbar-nav .open .dropdown-menu>.active>a:focus,.navbar-inverse .navbar-nav .open .dropdown-menu>.active>a:hover{color:#fff;background-color:#080808}.navbar-inverse .navbar-nav .open .dropdown-menu>.disabled>a,.navbar-inverse .navbar-nav .open .dropdown-menu>.disabled>a:focus,.navbar-inverse .navbar-nav .open .dropdown-menu>.disabled>a:hover{color:#444;background-color:transparent}}.navbar-inverse .navbar-toggle{border-color:#333}.navbar-inverse .navbar-toggle:focus,.navbar-inverse .navbar-toggle:hover{background-color:#333}.navbar-inverse .navbar-toggle .icon-bar{background-color:#fff}.navbar-inverse .navbar-collapse,.navbar-inverse .navbar-form{border-color:#101010}.navbar-inverse .navbar-link{color:#9d9d9d}.navbar-inverse .navbar-link:hover{color:#fff}.navbar-inverse .btn-link{color:#9d9d9d}.navbar-inverse .btn-link:focus,.navbar-inverse .btn-link:hover{color:#fff}.navbar-inverse .btn-link[disabled]:focus,.navbar-inverse .btn-link[disabled]:hover,fieldset[disabled] .navbar-inverse .btn-link:focus,fieldset[disabled] .navbar-inverse .btn-link:hover{color:#444}.breadcrumb{padding:8px 15px;margin-bottom:20px;list-style:none;background-color:#f5f5f5;border-radius:4px}.breadcrumb>li{display:inline-block}.breadcrumb>li+li:before{padding:0 5px;color:#ccc;content:"/\00a0"}.breadcrumb>.active{color:#777}.pagination{display:inline-block;padding-left:0;margin:20px 0;border-radius:4px}.pagination>li{display:inline}.pagination>li>a,.pagination>li>span{position:relative;float:left;padding:6px 12px;margin-left:-1px;line-height:1.42857143;color:#337ab7;text-decoration:none;background-color:#fff;border:1px solid #ddd}.pagination>li>a:focus,.pagination>li>a:hover,.pagination>li>span:focus,.pagination>li>span:hover{z-index:2;color:#23527c;background-color:#eee;border-color:#ddd}.pagination>li:first-child>a,.pagination>li:first-child>span{margin-left:0;border-top-left-radius:4px;border-bottom-left-radius:4px}.pagination>li:last-child>a,.pagination>li:last-child>span{border-top-right-radius:4px;border-bottom-right-radius:4px}.pagination>.active>a,.pagination>.active>a:focus,.pagination>.active>a:hover,.pagination>.active>span,.pagination>.active>span:focus,.pagination>.active>span:hover{z-index:3;color:#fff;cursor:default;background-color:#337ab7;border-color:#337ab7}.pagination>.disabled>a,.pagination>.disabled>a:focus,.pagination>.disabled>a:hover,.pagination>.disabled>span,.pagination>.disabled>span:focus,.pagination>.disabled>span:hover{color:#777;cursor:not-allowed;background-color:#fff;border-color:#ddd}.pagination-lg>li>a,.pagination-lg>li>span{padding:10px 16px;font-size:18px;line-height:1.3333333}.pagination-lg>li:first-child>a,.pagination-lg>li:first-child>span{border-top-left-radius:6px;border-bottom-left-radius:6px}.pagination-lg>li:last-child>a,.pagination-lg>li:last-child>span{border-top-right-radius:6px;border-bottom-right-radius:6px}.pagination-sm>li>a,.pagination-sm>li>span{padding:5px 10px;font-size:12px;line-height:1.5}.pagination-sm>li:first-child>a,.pagination-sm>li:first-child>span{border-top-left-radius:3px;border-bottom-left-radius:3px}.pagination-sm>li:last-child>a,.pagination-sm>li:last-child>span{border-top-right-radius:3px;border-bottom-right-radius:3px}.pager{padding-left:0;margin:20px 0;text-align:center;list-style:none}.pager li{display:inline}.pager li>a,.pager li>span{display:inline-block;padding:5px 14px;background-color:#fff;border:1px solid #ddd;border-radius:15px}.pager li>a:focus,.pager li>a:hover{text-decoration:none;background-color:#eee}.pager .next>a,.pager .next>span{float:right}.pager .previous>a,.pager .previous>span{float:left}.pager .disabled>a,.pager .disabled>a:focus,.pager .disabled>a:hover,.pager .disabled>span{color:#777;cursor:not-allowed;background-color:#fff}.label{display:inline;padding:.2em .6em .3em;font-size:75%;font-weight:700;line-height:1;color:#fff;text-align:center;white-space:nowrap;vertical-align:baseline;border-radius:.25em}a.label:focus,a.label:hover{color:#fff;text-decoration:none;cursor:pointer}.label:empty{display:none}.btn .label{position:relative;top:-1px}.label-default{background-color:#777}.label-default[href]:focus,.label-default[href]:hover{background-color:#5e5e5e}.label-primary{background-color:#337ab7}.label-primary[href]:focus,.label-primary[href]:hover{background-color:#286090}.label-success{background-color:#5cb85c}.label-success[href]:focus,.label-success[href]:hover{background-color:#449d44}.label-info{background-color:#5bc0de}.label-info[href]:focus,.label-info[href]:hover{background-color:#31b0d5}.label-warning{background-color:#f0ad4e}.label-warning[href]:focus,.label-warning[href]:hover{background-color:#ec971f}.label-danger{background-color:#d9534f}.label-danger[href]:focus,.label-danger[href]:hover{background-color:#c9302c}.badge{display:inline-block;min-width:10px;padding:3px 7px;font-size:12px;font-weight:700;line-height:1;color:#fff;text-align:center;white-space:nowrap;vertical-align:middle;background-color:#777;border-radius:10px}.badge:empty{display:none}.btn .badge{position:relative;top:-1px}.btn-group-xs>.btn .badge,.btn-xs .badge{top:0;padding:1px 5px}a.badge:focus,a.badge:hover{color:#fff;text-decoration:none;cursor:pointer}.list-group-item.active>.badge,.nav-pills>.active>a>.badge{color:#337ab7;background-color:#fff}.list-group-item>.badge{float:right}.list-group-item>.badge+.badge{margin-right:5px}.nav-pills>li>a>.badge{margin-left:3px}.jumbotron{padding-top:30px;padding-bottom:30px;margin-bottom:30px;color:inherit;background-color:#eee}.jumbotron .h1,.jumbotron h1{color:inherit}.jumbotron p{margin-bottom:15px;font-size:21px;font-weight:200}.jumbotron>hr{border-top-color:#d5d5d5}.container .jumbotron,.container-fluid .jumbotron{padding-right:15px;padding-left:15px;border-radius:6px}.jumbotron .container{max-width:100%}@media screen and (min-width:768px){.jumbotron{padding-top:48px;padding-bottom:48px}.container .jumbotron,.container-fluid .jumbotron{padding-right:60px;padding-left:60px}.jumbotron .h1,.jumbotron h1{font-size:63px}}.thumbnail{display:block;padding:4px;margin-bottom:20px;line-height:1.42857143;background-color:#fff;border:1px solid #ddd;border-radius:4px;-webkit-transition:border .2s ease-in-out;-o-transition:border .2s ease-in-out;transition:border .2s ease-in-out}.thumbnail a>img,.thumbnail>img{margin-right:auto;margin-left:auto}a.thumbnail.active,a.thumbnail:focus,a.thumbnail:hover{border-color:#337ab7}.thumbnail .caption{padding:9px;color:#333}.alert{padding:15px;margin-bottom:20px;border:1px solid transparent;border-radius:4px}.alert h4{margin-top:0;color:inherit}.alert .alert-link{font-weight:700}.alert>p,.alert>ul{margin-bottom:0}.alert>p+p{margin-top:5px}.alert-dismissable,.alert-dismissible{padding-right:35px}.alert-dismissable .close,.alert-dismissible .close{position:relative;top:-2px;right:-21px;color:inherit}.alert-success{color:#3c763d;background-color:#dff0d8;border-color:#d6e9c6}.alert-success hr{border-top-color:#c9e2b3}.alert-success .alert-link{color:#2b542c}.alert-info{color:#31708f;background-color:#d9edf7;border-color:#bce8f1}.alert-info hr{border-top-color:#a6e1ec}.alert-info .alert-link{color:#245269}.alert-warning{color:#8a6d3b;background-color:#fcf8e3;border-color:#faebcc}.alert-warning hr{border-top-color:#f7e1b5}.alert-warning .alert-link{color:#66512c}.alert-danger{color:#a94442;background-color:#f2dede;border-color:#ebccd1}.alert-danger hr{border-top-color:#e4b9c0}.alert-danger .alert-link{color:#843534}@-webkit-keyframes progress-bar-stripes{from{background-position:40px 0}to{background-position:0 0}}@-o-keyframes progress-bar-stripes{from{background-position:40px 0}to{background-position:0 0}}@keyframes progress-bar-stripes{from{background-position:40px 0}to{background-position:0 0}}.progress{height:20px;margin-bottom:20px;overflow:hidden;background-color:#f5f5f5;border-radius:4px;-webkit-box-shadow:inset 0 1px 2px rgba(0,0,0,.1);box-shadow:inset 0 1px 2px rgba(0,0,0,.1)}.progress-bar{float:left;width:0%;height:100%;font-size:12px;line-height:20px;color:#fff;text-align:center;background-color:#337ab7;-webkit-box-shadow:inset 0 -1px 0 rgba(0,0,0,.15);box-shadow:inset 0 -1px 0 rgba(0,0,0,.15);-webkit-transition:width .6s ease;-o-transition:width .6s ease;transition:width .6s ease}.progress-bar-striped,.progress-striped .progress-bar{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);-webkit-background-size:40px 40px;background-size:40px 40px}.progress-bar.active,.progress.active .progress-bar{-webkit-animation:progress-bar-stripes 2s linear infinite;-o-animation:progress-bar-stripes 2s linear infinite;animation:progress-bar-stripes 2s linear infinite}.progress-bar-success{background-color:#5cb85c}.progress-striped .progress-bar-success{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent)}.progress-bar-info{background-color:#5bc0de}.progress-striped .progress-bar-info{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent)}.progress-bar-warning{background-color:#f0ad4e}.progress-striped .progress-bar-warning{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent)}.progress-bar-danger{background-color:#d9534f}.progress-striped .progress-bar-danger{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent)}.media{margin-top:15px}.media:first-child{margin-top:0}.media,.media-body{overflow:hidden;zoom:1}.media-body{width:10000px}.media-object{display:block}.media-object.img-thumbnail{max-width:none}.media-right,.media>.pull-right{padding-left:10px}.media-left,.media>.pull-left{padding-right:10px}.media-body,.media-left,.media-right{display:table-cell;vertical-align:top}.media-middle{vertical-align:middle}.media-bottom{vertical-align:bottom}.media-heading{margin-top:0;margin-bottom:5px}.media-list{padding-left:0;list-style:none}.list-group{padding-left:0;margin-bottom:20px}.list-group-item{position:relative;display:block;padding:10px 15px;margin-bottom:-1px;background-color:#fff;border:1px solid #ddd}.list-group-item:first-child{border-top-left-radius:4px;border-top-right-radius:4px}.list-group-item:last-child{margin-bottom:0;border-bottom-right-radius:4px;border-bottom-left-radius:4px}.list-group-item.disabled,.list-group-item.disabled:focus,.list-group-item.disabled:hover{color:#777;cursor:not-allowed;background-color:#eee}.list-group-item.disabled .list-group-item-heading,.list-group-item.disabled:focus .list-group-item-heading,.list-group-item.disabled:hover .list-group-item-heading{color:inherit}.list-group-item.disabled .list-group-item-text,.list-group-item.disabled:focus .list-group-item-text,.list-group-item.disabled:hover .list-group-item-text{color:#777}.list-group-item.active,.list-group-item.active:focus,.list-group-item.active:hover{z-index:2;color:#fff;background-color:#337ab7;border-color:#337ab7}.list-group-item.active .list-group-item-heading,.list-group-item.active .list-group-item-heading>.small,.list-group-item.active .list-group-item-heading>small,.list-group-item.active:focus .list-group-item-heading,.list-group-item.active:focus .list-group-item-heading>.small,.list-group-item.active:focus .list-group-item-heading>small,.list-group-item.active:hover .list-group-item-heading,.list-group-item.active:hover .list-group-item-heading>.small,.list-group-item.active:hover .list-group-item-heading>small{color:inherit}.list-group-item.active .list-group-item-text,.list-group-item.active:focus .list-group-item-text,.list-group-item.active:hover .list-group-item-text{color:#c7ddef}a.list-group-item,button.list-group-item{color:#555}a.list-group-item .list-group-item-heading,button.list-group-item .list-group-item-heading{color:#333}a.list-group-item:focus,a.list-group-item:hover,button.list-group-item:focus,button.list-group-item:hover{color:#555;text-decoration:none;background-color:#f5f5f5}button.list-group-item{width:100%;text-align:left}.list-group-item-success{color:#3c763d;background-color:#dff0d8}a.list-group-item-success,button.list-group-item-success{color:#3c763d}a.list-group-item-success .list-group-item-heading,button.list-group-item-success .list-group-item-heading{color:inherit}a.list-group-item-success:focus,a.list-group-item-success:hover,button.list-group-item-success:focus,button.list-group-item-success:hover{color:#3c763d;background-color:#d0e9c6}a.list-group-item-success.active,a.list-group-item-success.active:focus,a.list-group-item-success.active:hover,button.list-group-item-success.active,button.list-group-item-success.active:focus,button.list-group-item-success.active:hover{color:#fff;background-color:#3c763d;border-color:#3c763d}.list-group-item-info{color:#31708f;background-color:#d9edf7}a.list-group-item-info,button.list-group-item-info{color:#31708f}a.list-group-item-info .list-group-item-heading,button.list-group-item-info .list-group-item-heading{color:inherit}a.list-group-item-info:focus,a.list-group-item-info:hover,button.list-group-item-info:focus,button.list-group-item-info:hover{color:#31708f;background-color:#c4e3f3}a.list-group-item-info.active,a.list-group-item-info.active:focus,a.list-group-item-info.active:hover,button.list-group-item-info.active,button.list-group-item-info.active:focus,button.list-group-item-info.active:hover{color:#fff;background-color:#31708f;border-color:#31708f}.list-group-item-warning{color:#8a6d3b;background-color:#fcf8e3}a.list-group-item-warning,button.list-group-item-warning{color:#8a6d3b}a.list-group-item-warning .list-group-item-heading,button.list-group-item-warning .list-group-item-heading{color:inherit}a.list-group-item-warning:focus,a.list-group-item-warning:hover,button.list-group-item-warning:focus,button.list-group-item-warning:hover{color:#8a6d3b;background-color:#faf2cc}a.list-group-item-warning.active,a.list-group-item-warning.active:focus,a.list-group-item-warning.active:hover,button.list-group-item-warning.active,button.list-group-item-warning.active:focus,button.list-group-item-warning.active:hover{color:#fff;background-color:#8a6d3b;border-color:#8a6d3b}.list-group-item-danger{color:#a94442;background-color:#f2dede}a.list-group-item-danger,button.list-group-item-danger{color:#a94442}a.list-group-item-danger .list-group-item-heading,button.list-group-item-danger .list-group-item-heading{color:inherit}a.list-group-item-danger:focus,a.list-group-item-danger:hover,button.list-group-item-danger:focus,button.list-group-item-danger:hover{color:#a94442;background-color:#ebcccc}a.list-group-item-danger.active,a.list-group-item-danger.active:focus,a.list-group-item-danger.active:hover,button.list-group-item-danger.active,button.list-group-item-danger.active:focus,button.list-group-item-danger.active:hover{color:#fff;background-color:#a94442;border-color:#a94442}.list-group-item-heading{margin-top:0;margin-bottom:5px}.list-group-item-text{margin-bottom:0;line-height:1.3}.panel{margin-bottom:20px;background-color:#fff;border:1px solid transparent;border-radius:4px;-webkit-box-shadow:0 1px 1px rgba(0,0,0,.05);box-shadow:0 1px 1px rgba(0,0,0,.05)}.panel-body{padding:15px}.panel-heading{padding:10px 15px;border-bottom:1px solid transparent;border-top-left-radius:3px;border-top-right-radius:3px}.panel-heading>.dropdown .dropdown-toggle{color:inherit}.panel-title{margin-top:0;margin-bottom:0;font-size:16px;color:inherit}.panel-title>.small,.panel-title>.small>a,.panel-title>a,.panel-title>small,.panel-title>small>a{color:inherit}.panel-footer{padding:10px 15px;background-color:#f5f5f5;border-top:1px solid #ddd;border-bottom-right-radius:3px;border-bottom-left-radius:3px}.panel>.list-group,.panel>.panel-collapse>.list-group{margin-bottom:0}.panel>.list-group .list-group-item,.panel>.panel-collapse>.list-group .list-group-item{border-width:1px 0;border-radius:0}.panel>.list-group:first-child .list-group-item:first-child,.panel>.panel-collapse>.list-group:first-child .list-group-item:first-child{border-top:0;border-top-left-radius:3px;border-top-right-radius:3px}.panel>.list-group:last-child .list-group-item:last-child,.panel>.panel-collapse>.list-group:last-child .list-group-item:last-child{border-bottom:0;border-bottom-right-radius:3px;border-bottom-left-radius:3px}.panel>.panel-heading+.panel-collapse>.list-group .list-group-item:first-child{border-top-left-radius:0;border-top-right-radius:0}.panel-heading+.list-group .list-group-item:first-child{border-top-width:0}.list-group+.panel-footer{border-top-width:0}.panel>.panel-collapse>.table,.panel>.table,.panel>.table-responsive>.table{margin-bottom:0}.panel>.panel-collapse>.table caption,.panel>.table caption,.panel>.table-responsive>.table caption{padding-right:15px;padding-left:15px}.panel>.table-responsive:first-child>.table:first-child,.panel>.table:first-child{border-top-left-radius:3px;border-top-right-radius:3px}.panel>.table-responsive:first-child>.table:first-child>tbody:first-child>tr:first-child,.panel>.table-responsive:first-child>.table:first-child>thead:first-child>tr:first-child,.panel>.table:first-child>tbody:first-child>tr:first-child,.panel>.table:first-child>thead:first-child>tr:first-child{border-top-left-radius:3px;border-top-right-radius:3px}.panel>.table-responsive:first-child>.table:first-child>tbody:first-child>tr:first-child td:first-child,.panel>.table-responsive:first-child>.table:first-child>tbody:first-child>tr:first-child th:first-child,.panel>.table-responsive:first-child>.table:first-child>thead:first-child>tr:first-child td:first-child,.panel>.table-responsive:first-child>.table:first-child>thead:first-child>tr:first-child th:first-child,.panel>.table:first-child>tbody:first-child>tr:first-child td:first-child,.panel>.table:first-child>tbody:first-child>tr:first-child th:first-child,.panel>.table:first-child>thead:first-child>tr:first-child td:first-child,.panel>.table:first-child>thead:first-child>tr:first-child th:first-child{border-top-left-radius:3px}.panel>.table-responsive:first-child>.table:first-child>tbody:first-child>tr:first-child td:last-child,.panel>.table-responsive:first-child>.table:first-child>tbody:first-child>tr:first-child th:last-child,.panel>.table-responsive:first-child>.table:first-child>thead:first-child>tr:first-child td:last-child,.panel>.table-responsive:first-child>.table:first-child>thead:first-child>tr:first-child th:last-child,.panel>.table:first-child>tbody:first-child>tr:first-child td:last-child,.panel>.table:first-child>tbody:first-child>tr:first-child th:last-child,.panel>.table:first-child>thead:first-child>tr:first-child td:last-child,.panel>.table:first-child>thead:first-child>tr:first-child th:last-child{border-top-right-radius:3px}.panel>.table-responsive:last-child>.table:last-child,.panel>.table:last-child{border-bottom-right-radius:3px;border-bottom-left-radius:3px}.panel>.table-responsive:last-child>.table:last-child>tbody:last-child>tr:last-child,.panel>.table-responsive:last-child>.table:last-child>tfoot:last-child>tr:last-child,.panel>.table:last-child>tbody:last-child>tr:last-child,.panel>.table:last-child>tfoot:last-child>tr:last-child{border-bottom-right-radius:3px;border-bottom-left-radius:3px}.panel>.table-responsive:last-child>.table:last-child>tbody:last-child>tr:last-child td:first-child,.panel>.table-responsive:last-child>.table:last-child>tbody:last-child>tr:last-child th:first-child,.panel>.table-responsive:last-child>.table:last-child>tfoot:last-child>tr:last-child td:first-child,.panel>.table-responsive:last-child>.table:last-child>tfoot:last-child>tr:last-child th:first-child,.panel>.table:last-child>tbody:last-child>tr:last-child td:first-child,.panel>.table:last-child>tbody:last-child>tr:last-child th:first-child,.panel>.table:last-child>tfoot:last-child>tr:last-child td:first-child,.panel>.table:last-child>tfoot:last-child>tr:last-child th:first-child{border-bottom-left-radius:3px}.panel>.table-responsive:last-child>.table:last-child>tbody:last-child>tr:last-child td:last-child,.panel>.table-responsive:last-child>.table:last-child>tbody:last-child>tr:last-child th:last-child,.panel>.table-responsive:last-child>.table:last-child>tfoot:last-child>tr:last-child td:last-child,.panel>.table-responsive:last-child>.table:last-child>tfoot:last-child>tr:last-child th:last-child,.panel>.table:last-child>tbody:last-child>tr:last-child td:last-child,.panel>.table:last-child>tbody:last-child>tr:last-child th:last-child,.panel>.table:last-child>tfoot:last-child>tr:last-child td:last-child,.panel>.table:last-child>tfoot:last-child>tr:last-child th:last-child{border-bottom-right-radius:3px}.panel>.panel-body+.table,.panel>.panel-body+.table-responsive,.panel>.table+.panel-body,.panel>.table-responsive+.panel-body{border-top:1px solid #ddd}.panel>.table>tbody:first-child>tr:first-child td,.panel>.table>tbody:first-child>tr:first-child th{border-top:0}.panel>.table-bordered,.panel>.table-responsive>.table-bordered{border:0}.panel>.table-bordered>tbody>tr>td:first-child,.panel>.table-bordered>tbody>tr>th:first-child,.panel>.table-bordered>tfoot>tr>td:first-child,.panel>.table-bordered>tfoot>tr>th:first-child,.panel>.table-bordered>thead>tr>td:first-child,.panel>.table-bordered>thead>tr>th:first-child,.panel>.table-responsive>.table-bordered>tbody>tr>td:first-child,.panel>.table-responsive>.table-bordered>tbody>tr>th:first-child,.panel>.table-responsive>.table-bordered>tfoot>tr>td:first-child,.panel>.table-responsive>.table-bordered>tfoot>tr>th:first-child,.panel>.table-responsive>.table-bordered>thead>tr>td:first-child,.panel>.table-responsive>.table-bordered>thead>tr>th:first-child{border-left:0}.panel>.table-bordered>tbody>tr>td:last-child,.panel>.table-bordered>tbody>tr>th:last-child,.panel>.table-bordered>tfoot>tr>td:last-child,.panel>.table-bordered>tfoot>tr>th:last-child,.panel>.table-bordered>thead>tr>td:last-child,.panel>.table-bordered>thead>tr>th:last-child,.panel>.table-responsive>.table-bordered>tbody>tr>td:last-child,.panel>.table-responsive>.table-bordered>tbody>tr>th:last-child,.panel>.table-responsive>.table-bordered>tfoot>tr>td:last-child,.panel>.table-responsive>.table-bordered>tfoot>tr>th:last-child,.panel>.table-responsive>.table-bordered>thead>tr>td:last-child,.panel>.table-responsive>.table-bordered>thead>tr>th:last-child{border-right:0}.panel>.table-bordered>tbody>tr:first-child>td,.panel>.table-bordered>tbody>tr:first-child>th,.panel>.table-bordered>thead>tr:first-child>td,.panel>.table-bordered>thead>tr:first-child>th,.panel>.table-responsive>.table-bordered>tbody>tr:first-child>td,.panel>.table-responsive>.table-bordered>tbody>tr:first-child>th,.panel>.table-responsive>.table-bordered>thead>tr:first-child>td,.panel>.table-responsive>.table-bordered>thead>tr:first-child>th{border-bottom:0}.panel>.table-bordered>tbody>tr:last-child>td,.panel>.table-bordered>tbody>tr:last-child>th,.panel>.table-bordered>tfoot>tr:last-child>td,.panel>.table-bordered>tfoot>tr:last-child>th,.panel>.table-responsive>.table-bordered>tbody>tr:last-child>td,.panel>.table-responsive>.table-bordered>tbody>tr:last-child>th,.panel>.table-responsive>.table-bordered>tfoot>tr:last-child>td,.panel>.table-responsive>.table-bordered>tfoot>tr:last-child>th{border-bottom:0}.panel>.table-responsive{margin-bottom:0;border:0}.panel-group{margin-bottom:20px}.panel-group .panel{margin-bottom:0;border-radius:4px}.panel-group .panel+.panel{margin-top:5px}.panel-group .panel-heading{border-bottom:0}.panel-group .panel-heading+.panel-collapse>.list-group,.panel-group .panel-heading+.panel-collapse>.panel-body{border-top:1px solid #ddd}.panel-group .panel-footer{border-top:0}.panel-group .panel-footer+.panel-collapse .panel-body{border-bottom:1px solid #ddd}.panel-default{border-color:#ddd}.panel-default>.panel-heading{color:#333;background-color:#f5f5f5;border-color:#ddd}.panel-default>.panel-heading+.panel-collapse>.panel-body{border-top-color:#ddd}.panel-default>.panel-heading .badge{color:#f5f5f5;background-color:#333}.panel-default>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#ddd}.panel-primary{border-color:#337ab7}.panel-primary>.panel-heading{color:#fff;background-color:#337ab7;border-color:#337ab7}.panel-primary>.panel-heading+.panel-collapse>.panel-body{border-top-color:#337ab7}.panel-primary>.panel-heading .badge{color:#337ab7;background-color:#fff}.panel-primary>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#337ab7}.panel-success{border-color:#d6e9c6}.panel-success>.panel-heading{color:#3c763d;background-color:#dff0d8;border-color:#d6e9c6}.panel-success>.panel-heading+.panel-collapse>.panel-body{border-top-color:#d6e9c6}.panel-success>.panel-heading .badge{color:#dff0d8;background-color:#3c763d}.panel-success>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#d6e9c6}.panel-info{border-color:#bce8f1}.panel-info>.panel-heading{color:#31708f;background-color:#d9edf7;border-color:#bce8f1}.panel-info>.panel-heading+.panel-collapse>.panel-body{border-top-color:#bce8f1}.panel-info>.panel-heading .badge{color:#d9edf7;background-color:#31708f}.panel-info>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#bce8f1}.panel-warning{border-color:#faebcc}.panel-warning>.panel-heading{color:#8a6d3b;background-color:#fcf8e3;border-color:#faebcc}.panel-warning>.panel-heading+.panel-collapse>.panel-body{border-top-color:#faebcc}.panel-warning>.panel-heading .badge{color:#fcf8e3;background-color:#8a6d3b}.panel-warning>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#faebcc}.panel-danger{border-color:#ebccd1}.panel-danger>.panel-heading{color:#a94442;background-color:#f2dede;border-color:#ebccd1}.panel-danger>.panel-heading+.panel-collapse>.panel-body{border-top-color:#ebccd1}.panel-danger>.panel-heading .badge{color:#f2dede;background-color:#a94442}.panel-danger>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#ebccd1}.embed-responsive{position:relative;display:block;height:0;padding:0;overflow:hidden}.embed-responsive .embed-responsive-item,.embed-responsive embed,.embed-responsive iframe,.embed-responsive object,.embed-responsive video{position:absolute;top:0;bottom:0;left:0;width:100%;height:100%;border:0}.embed-responsive-16by9{padding-bottom:56.25%}.embed-responsive-4by3{padding-bottom:75%}.well{min-height:20px;padding:19px;margin-bottom:20px;background-color:#f5f5f5;border:1px solid #e3e3e3;border-radius:4px;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.05);box-shadow:inset 0 1px 1px rgba(0,0,0,.05)}.well blockquote{border-color:#ddd;border-color:rgba(0,0,0,.15)}.well-lg{padding:24px;border-radius:6px}.well-sm{padding:9px;border-radius:3px}.close{float:right;font-size:21px;font-weight:700;line-height:1;color:#000;text-shadow:0 1px 0 #fff;filter:alpha(opacity=20);opacity:.2}.close:focus,.close:hover{color:#000;text-decoration:none;cursor:pointer;filter:alpha(opacity=50);opacity:.5}button.close{padding:0;cursor:pointer;background:0 0;border:0;-webkit-appearance:none;-moz-appearance:none;appearance:none}.modal-open{overflow:hidden}.modal{position:fixed;top:0;right:0;bottom:0;left:0;z-index:1050;display:none;overflow:hidden;-webkit-overflow-scrolling:touch;outline:0}.modal.fade .modal-dialog{-webkit-transform:translate(0,-25%);-ms-transform:translate(0,-25%);-o-transform:translate(0,-25%);transform:translate(0,-25%);-webkit-transition:-webkit-transform .3s ease-out;-o-transition:-o-transform .3s ease-out;transition:-webkit-transform .3s ease-out;transition:transform .3s ease-out;transition:transform .3s ease-out,-webkit-transform .3s ease-out,-o-transform .3s ease-out}.modal.in .modal-dialog{-webkit-transform:translate(0,0);-ms-transform:translate(0,0);-o-transform:translate(0,0);transform:translate(0,0)}.modal-open .modal{overflow-x:hidden;overflow-y:auto}.modal-dialog{position:relative;width:auto;margin:10px}.modal-content{position:relative;background-color:#fff;background-clip:padding-box;border:1px solid #999;border:1px solid rgba(0,0,0,.2);border-radius:6px;-webkit-box-shadow:0 3px 9px rgba(0,0,0,.5);box-shadow:0 3px 9px rgba(0,0,0,.5);outline:0}.modal-backdrop{position:fixed;top:0;right:0;bottom:0;left:0;z-index:1040;background-color:#000}.modal-backdrop.fade{filter:alpha(opacity=0);opacity:0}.modal-backdrop.in{filter:alpha(opacity=50);opacity:.5}.modal-header{padding:15px;border-bottom:1px solid #e5e5e5}.modal-header .close{margin-top:-2px}.modal-title{margin:0;line-height:1.42857143}.modal-body{position:relative;padding:15px}.modal-footer{padding:15px;text-align:right;border-top:1px solid #e5e5e5}.modal-footer .btn+.btn{margin-bottom:0;margin-left:5px}.modal-footer .btn-group .btn+.btn{margin-left:-1px}.modal-footer .btn-block+.btn-block{margin-left:0}.modal-scrollbar-measure{position:absolute;top:-9999px;width:50px;height:50px;overflow:scroll}@media (min-width:768px){.modal-dialog{width:600px;margin:30px auto}.modal-content{-webkit-box-shadow:0 5px 15px rgba(0,0,0,.5);box-shadow:0 5px 15px rgba(0,0,0,.5)}.modal-sm{width:300px}}@media (min-width:992px){.modal-lg{width:900px}}.tooltip{position:absolute;z-index:1070;display:block;font-family:"Helvetica Neue",Helvetica,Arial,sans-serif;font-style:normal;font-weight:400;line-height:1.42857143;line-break:auto;text-align:left;text-align:start;text-decoration:none;text-shadow:none;text-transform:none;letter-spacing:normal;word-break:normal;word-spacing:normal;word-wrap:normal;white-space:normal;font-size:12px;filter:alpha(opacity=0);opacity:0}.tooltip.in{filter:alpha(opacity=90);opacity:.9}.tooltip.top{padding:5px 0;margin-top:-3px}.tooltip.right{padding:0 5px;margin-left:3px}.tooltip.bottom{padding:5px 0;margin-top:3px}.tooltip.left{padding:0 5px;margin-left:-3px}.tooltip.top .tooltip-arrow{bottom:0;left:50%;margin-left:-5px;border-width:5px 5px 0;border-top-color:#000}.tooltip.top-left .tooltip-arrow{right:5px;bottom:0;margin-bottom:-5px;border-width:5px 5px 0;border-top-color:#000}.tooltip.top-right .tooltip-arrow{bottom:0;left:5px;margin-bottom:-5px;border-width:5px 5px 0;border-top-color:#000}.tooltip.right .tooltip-arrow{top:50%;left:0;margin-top:-5px;border-width:5px 5px 5px 0;border-right-color:#000}.tooltip.left .tooltip-arrow{top:50%;right:0;margin-top:-5px;border-width:5px 0 5px 5px;border-left-color:#000}.tooltip.bottom .tooltip-arrow{top:0;left:50%;margin-left:-5px;border-width:0 5px 5px;border-bottom-color:#000}.tooltip.bottom-left .tooltip-arrow{top:0;right:5px;margin-top:-5px;border-width:0 5px 5px;border-bottom-color:#000}.tooltip.bottom-right .tooltip-arrow{top:0;left:5px;margin-top:-5px;border-width:0 5px 5px;border-bottom-color:#000}.tooltip-inner{max-width:200px;padding:3px 8px;color:#fff;text-align:center;background-color:#000;border-radius:4px}.tooltip-arrow{position:absolute;width:0;height:0;border-color:transparent;border-style:solid}.popover{position:absolute;top:0;left:0;z-index:1060;display:none;max-width:276px;padding:1px;font-family:"Helvetica Neue",Helvetica,Arial,sans-serif;font-style:normal;font-weight:400;line-height:1.42857143;line-break:auto;text-align:left;text-align:start;text-decoration:none;text-shadow:none;text-transform:none;letter-spacing:normal;word-break:normal;word-spacing:normal;word-wrap:normal;white-space:normal;font-size:14px;background-color:#fff;background-clip:padding-box;border:1px solid #ccc;border:1px solid rgba(0,0,0,.2);border-radius:6px;-webkit-box-shadow:0 5px 10px rgba(0,0,0,.2);box-shadow:0 5px 10px rgba(0,0,0,.2)}.popover.top{margin-top:-10px}.popover.right{margin-left:10px}.popover.bottom{margin-top:10px}.popover.left{margin-left:-10px}.popover>.arrow{border-width:11px}.popover>.arrow,.popover>.arrow:after{position:absolute;display:block;width:0;height:0;border-color:transparent;border-style:solid}.popover>.arrow:after{content:"";border-width:10px}.popover.top>.arrow{bottom:-11px;left:50%;margin-left:-11px;border-top-color:#999;border-top-color:rgba(0,0,0,.25);border-bottom-width:0}.popover.top>.arrow:after{bottom:1px;margin-left:-10px;content:" ";border-top-color:#fff;border-bottom-width:0}.popover.right>.arrow{top:50%;left:-11px;margin-top:-11px;border-right-color:#999;border-right-color:rgba(0,0,0,.25);border-left-width:0}.popover.right>.arrow:after{bottom:-10px;left:1px;content:" ";border-right-color:#fff;border-left-width:0}.popover.bottom>.arrow{top:-11px;left:50%;margin-left:-11px;border-top-width:0;border-bottom-color:#999;border-bottom-color:rgba(0,0,0,.25)}.popover.bottom>.arrow:after{top:1px;margin-left:-10px;content:" ";border-top-width:0;border-bottom-color:#fff}.popover.left>.arrow{top:50%;right:-11px;margin-top:-11px;border-right-width:0;border-left-color:#999;border-left-color:rgba(0,0,0,.25)}.popover.left>.arrow:after{right:1px;bottom:-10px;content:" ";border-right-width:0;border-left-color:#fff}.popover-title{padding:8px 14px;margin:0;font-size:14px;background-color:#f7f7f7;border-bottom:1px solid #ebebeb;border-radius:5px 5px 0 0}.popover-content{padding:9px 14px}.carousel{position:relative}.carousel-inner{position:relative;width:100%;overflow:hidden}.carousel-inner>.item{position:relative;display:none;-webkit-transition:.6s ease-in-out left;-o-transition:.6s ease-in-out left;transition:.6s ease-in-out left}.carousel-inner>.item>a>img,.carousel-inner>.item>img{line-height:1}@media all and (transform-3d),(-webkit-transform-3d){.carousel-inner>.item{-webkit-transition:-webkit-transform .6s ease-in-out;-o-transition:-o-transform .6s ease-in-out;transition:-webkit-transform .6s ease-in-out;transition:transform .6s ease-in-out;transition:transform .6s ease-in-out,-webkit-transform .6s ease-in-out,-o-transform .6s ease-in-out;-webkit-backface-visibility:hidden;backface-visibility:hidden;-webkit-perspective:1000px;perspective:1000px}.carousel-inner>.item.active.right,.carousel-inner>.item.next{-webkit-transform:translate3d(100%,0,0);transform:translate3d(100%,0,0);left:0}.carousel-inner>.item.active.left,.carousel-inner>.item.prev{-webkit-transform:translate3d(-100%,0,0);transform:translate3d(-100%,0,0);left:0}.carousel-inner>.item.active,.carousel-inner>.item.next.left,.carousel-inner>.item.prev.right{-webkit-transform:translate3d(0,0,0);transform:translate3d(0,0,0);left:0}}.carousel-inner>.active,.carousel-inner>.next,.carousel-inner>.prev{display:block}.carousel-inner>.active{left:0}.carousel-inner>.next,.carousel-inner>.prev{position:absolute;top:0;width:100%}.carousel-inner>.next{left:100%}.carousel-inner>.prev{left:-100%}.carousel-inner>.next.left,.carousel-inner>.prev.right{left:0}.carousel-inner>.active.left{left:-100%}.carousel-inner>.active.right{left:100%}.carousel-control{position:absolute;top:0;bottom:0;left:0;width:15%;font-size:20px;color:#fff;text-align:center;text-shadow:0 1px 2px rgba(0,0,0,.6);background-color:rgba(0,0,0,0);filter:alpha(opacity=50);opacity:.5}.carousel-control.left{background-image:-webkit-linear-gradient(left,rgba(0,0,0,.5) 0,rgba(0,0,0,.0001) 100%);background-image:-o-linear-gradient(left,rgba(0,0,0,.5) 0,rgba(0,0,0,.0001) 100%);background-image:-webkit-gradient(linear,left top,right top,from(rgba(0,0,0,.5)),to(rgba(0,0,0,.0001)));background-image:linear-gradient(to right,rgba(0,0,0,.5) 0,rgba(0,0,0,.0001) 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#80000000', endColorstr='#00000000', GradientType=1);background-repeat:repeat-x}.carousel-control.right{right:0;left:auto;background-image:-webkit-linear-gradient(left,rgba(0,0,0,.0001) 0,rgba(0,0,0,.5) 100%);background-image:-o-linear-gradient(left,rgba(0,0,0,.0001) 0,rgba(0,0,0,.5) 100%);background-image:-webkit-gradient(linear,left top,right top,from(rgba(0,0,0,.0001)),to(rgba(0,0,0,.5)));background-image:linear-gradient(to right,rgba(0,0,0,.0001) 0,rgba(0,0,0,.5) 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#00000000', endColorstr='#80000000', GradientType=1);background-repeat:repeat-x}.carousel-control:focus,.carousel-control:hover{color:#fff;text-decoration:none;outline:0;filter:alpha(opacity=90);opacity:.9}.carousel-control .glyphicon-chevron-left,.carousel-control .glyphicon-chevron-right,.carousel-control .icon-next,.carousel-control .icon-prev{position:absolute;top:50%;z-index:5;display:inline-block;margin-top:-10px}.carousel-control .glyphicon-chevron-left,.carousel-control .icon-prev{left:50%;margin-left:-10px}.carousel-control .glyphicon-chevron-right,.carousel-control .icon-next{right:50%;margin-right:-10px}.carousel-control .icon-next,.carousel-control .icon-prev{width:20px;height:20px;font-family:serif;line-height:1}.carousel-control .icon-prev:before{content:"\2039"}.carousel-control .icon-next:before{content:"\203a"}.carousel-indicators{position:absolute;bottom:10px;left:50%;z-index:15;width:60%;padding-left:0;margin-left:-30%;text-align:center;list-style:none}.carousel-indicators li{display:inline-block;width:10px;height:10px;margin:1px;text-indent:-999px;cursor:pointer;background-color:#000\9;background-color:rgba(0,0,0,0);border:1px solid #fff;border-radius:10px}.carousel-indicators .active{width:12px;height:12px;margin:0;background-color:#fff}.carousel-caption{position:absolute;right:15%;bottom:20px;left:15%;z-index:10;padding-top:20px;padding-bottom:20px;color:#fff;text-align:center;text-shadow:0 1px 2px rgba(0,0,0,.6)}.carousel-caption .btn{text-shadow:none}@media screen and (min-width:768px){.carousel-control .glyphicon-chevron-left,.carousel-control .glyphicon-chevron-right,.carousel-control .icon-next,.carousel-control .icon-prev{width:30px;height:30px;margin-top:-10px;font-size:30px}.carousel-control .glyphicon-chevron-left,.carousel-control .icon-prev{margin-left:-10px}.carousel-control .glyphicon-chevron-right,.carousel-control .icon-next{margin-right:-10px}.carousel-caption{right:20%;left:20%;padding-bottom:30px}.carousel-indicators{bottom:20px}}.btn-group-vertical>.btn-group:after,.btn-group-vertical>.btn-group:before,.btn-toolbar:after,.btn-toolbar:before,.clearfix:after,.clearfix:before,.container-fluid:after,.container-fluid:before,.container:after,.container:before,.dl-horizontal dd:after,.dl-horizontal dd:before,.form-horizontal .form-group:after,.form-horizontal .form-group:before,.modal-footer:after,.modal-footer:before,.modal-header:after,.modal-header:before,.nav:after,.nav:before,.navbar-collapse:after,.navbar-collapse:before,.navbar-header:after,.navbar-header:before,.navbar:after,.navbar:before,.pager:after,.pager:before,.panel-body:after,.panel-body:before,.row:after,.row:before{display:table;content:" "}.btn-group-vertical>.btn-group:after,.btn-toolbar:after,.clearfix:after,.container-fluid:after,.container:after,.dl-horizontal dd:after,.form-horizontal .form-group:after,.modal-footer:after,.modal-header:after,.nav:after,.navbar-collapse:after,.navbar-header:after,.navbar:after,.pager:after,.panel-body:after,.row:after{clear:both}.center-block{display:block;margin-right:auto;margin-left:auto}.pull-right{float:right!important}.pull-left{float:left!important}.hide{display:none!important}.show{display:block!important}.invisible{visibility:hidden}.text-hide{font:0/0 a;color:transparent;text-shadow:none;background-color:transparent;border:0}.hidden{display:none!important}.affix{position:fixed}@-ms-viewport{width:device-width}.visible-lg,.visible-md,.visible-sm,.visible-xs{display:none!important}.visible-lg-block,.visible-lg-inline,.visible-lg-inline-block,.visible-md-block,.visible-md-inline,.visible-md-inline-block,.visible-sm-block,.visible-sm-inline,.visible-sm-inline-block,.visible-xs-block,.visible-xs-inline,.visible-xs-inline-block{display:none!important}@media (max-width:767px){.visible-xs{display:block!important}table.visible-xs{display:table!important}tr.visible-xs{display:table-row!important}td.visible-xs,th.visible-xs{display:table-cell!important}}@media (max-width:767px){.visible-xs-block{display:block!important}}@media (max-width:767px){.visible-xs-inline{display:inline!important}}@media (max-width:767px){.visible-xs-inline-block{display:inline-block!important}}@media (min-width:768px) and (max-width:991px){.visible-sm{display:block!important}table.visible-sm{display:table!important}tr.visible-sm{display:table-row!important}td.visible-sm,th.visible-sm{display:table-cell!important}}@media (min-width:768px) and (max-width:991px){.visible-sm-block{display:block!important}}@media (min-width:768px) and (max-width:991px){.visible-sm-inline{display:inline!important}}@media (min-width:768px) and (max-width:991px){.visible-sm-inline-block{display:inline-block!important}}@media (min-width:992px) and (max-width:1199px){.visible-md{display:block!important}table.visible-md{display:table!important}tr.visible-md{display:table-row!important}td.visible-md,th.visible-md{display:table-cell!important}}@media (min-width:992px) and (max-width:1199px){.visible-md-block{display:block!important}}@media (min-width:992px) and (max-width:1199px){.visible-md-inline{display:inline!important}}@media (min-width:992px) and (max-width:1199px){.visible-md-inline-block{display:inline-block!important}}@media (min-width:1200px){.visible-lg{display:block!important}table.visible-lg{display:table!important}tr.visible-lg{display:table-row!important}td.visible-lg,th.visible-lg{display:table-cell!important}}@media (min-width:1200px){.visible-lg-block{display:block!important}}@media (min-width:1200px){.visible-lg-inline{display:inline!important}}@media (min-width:1200px){.visible-lg-inline-block{display:inline-block!important}}@media (max-width:767px){.hidden-xs{display:none!important}}@media (min-width:768px) and (max-width:991px){.hidden-sm{display:none!important}}@media (min-width:992px) and (max-width:1199px){.hidden-md{display:none!important}}@media (min-width:1200px){.hidden-lg{display:none!important}}.visible-print{display:none!important}@media print{.visible-print{display:block!important}table.visible-print{display:table!important}tr.visible-print{display:table-row!important}td.visible-print,th.visible-print{display:table-cell!important}}.visible-print-block{display:none!important}@media print{.visible-print-block{display:block!important}}.visible-print-inline{display:none!important}@media print{.visible-print-inline{display:inline!important}}.visible-print-inline-block{display:none!important}@media print{.visible-print-inline-block{display:inline-block!important}}@media print{.hidden-print{display:none!important}} +/*# sourceMappingURL=bootstrap.min.css.map */ \ No newline at end of file diff --git a/hbase-server/src/main/resources/hbase-webapps/static/css/hbase.css b/hbase-server/src/main/resources/hbase-webapps/static/css/hbase.css index 0014ae737a0c..2661c8d62229 100644 --- a/hbase-server/src/main/resources/hbase-webapps/static/css/hbase.css +++ b/hbase-server/src/main/resources/hbase-webapps/static/css/hbase.css @@ -39,6 +39,11 @@ section { margin-bottom: 3em; } margin-top: 1.2em; } +table#tab_Procedures td:nth-child(-n+7) { + word-break: normal; + overflow-wrap: normal; +} + /* Region Server page styling */ /* striped tables styling */ @@ -48,17 +53,17 @@ table.table-striped td { } /* sortable tables styling */ -table.tablesorter thead tr .header { +table.tablesorter thead tr .tablesorter-header { background-image: url(bg.gif); background-repeat: no-repeat; background-position: center right; cursor: pointer; padding-right: 30px; } -table.tablesorter thead tr .headerSortUp { +table.tablesorter thead tr .tablesorter-headerAsc { background-image: url(asc.gif); } -table.tablesorter thead tr .headerSortDown { +table.tablesorter thead tr .tablesorter-headerDesc { background-image: url(desc.gif); } diff --git a/hbase-server/src/main/resources/hbase-webapps/static/fonts/glyphicons-halflings-regular.eot b/hbase-server/src/main/resources/hbase-webapps/static/fonts/glyphicons-halflings-regular.eot old mode 100755 new mode 100644 index 87eaa434234e..b93a4953fff6 Binary files a/hbase-server/src/main/resources/hbase-webapps/static/fonts/glyphicons-halflings-regular.eot and b/hbase-server/src/main/resources/hbase-webapps/static/fonts/glyphicons-halflings-regular.eot differ diff --git a/hbase-server/src/main/resources/hbase-webapps/static/fonts/glyphicons-halflings-regular.svg b/hbase-server/src/main/resources/hbase-webapps/static/fonts/glyphicons-halflings-regular.svg old mode 100755 new mode 100644 index 5fee0685496c..94fb5490a2ed --- a/hbase-server/src/main/resources/hbase-webapps/static/fonts/glyphicons-halflings-regular.svg +++ b/hbase-server/src/main/resources/hbase-webapps/static/fonts/glyphicons-halflings-regular.svg @@ -6,223 +6,283 @@ - - + + - - + + - - - - - - - - - + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - + + + + + - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/hbase-server/src/main/resources/hbase-webapps/static/fonts/glyphicons-halflings-regular.ttf b/hbase-server/src/main/resources/hbase-webapps/static/fonts/glyphicons-halflings-regular.ttf old mode 100755 new mode 100644 index be784dc1d5bc..1413fc609ab6 Binary files a/hbase-server/src/main/resources/hbase-webapps/static/fonts/glyphicons-halflings-regular.ttf and b/hbase-server/src/main/resources/hbase-webapps/static/fonts/glyphicons-halflings-regular.ttf differ diff --git a/hbase-server/src/main/resources/hbase-webapps/static/fonts/glyphicons-halflings-regular.woff b/hbase-server/src/main/resources/hbase-webapps/static/fonts/glyphicons-halflings-regular.woff old mode 100755 new mode 100644 index 2cc3e4852a5a..9e612858f802 Binary files a/hbase-server/src/main/resources/hbase-webapps/static/fonts/glyphicons-halflings-regular.woff and b/hbase-server/src/main/resources/hbase-webapps/static/fonts/glyphicons-halflings-regular.woff differ diff --git a/hbase-server/src/main/resources/hbase-webapps/static/fonts/glyphicons-halflings-regular.woff2 b/hbase-server/src/main/resources/hbase-webapps/static/fonts/glyphicons-halflings-regular.woff2 new file mode 100644 index 000000000000..64539b54c375 Binary files /dev/null and b/hbase-server/src/main/resources/hbase-webapps/static/fonts/glyphicons-halflings-regular.woff2 differ diff --git a/hbase-server/src/main/resources/hbase-webapps/static/js/bootstrap.js b/hbase-server/src/main/resources/hbase-webapps/static/js/bootstrap.js deleted file mode 100755 index 2c64257146c4..000000000000 --- a/hbase-server/src/main/resources/hbase-webapps/static/js/bootstrap.js +++ /dev/null @@ -1,1999 +0,0 @@ -/** -* bootstrap.js v3.0.0 by @fat and @mdo -* Copyright 2013 Twitter Inc. -* http://www.apache.org/licenses/LICENSE-2.0 -*/ -if (!jQuery) { throw new Error("Bootstrap requires jQuery") } - -/* ======================================================================== - * Bootstrap: transition.js v3.0.0 - * http://twbs.github.com/bootstrap/javascript.html#transitions - * ======================================================================== - * Copyright 2013 Twitter, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * ======================================================================== */ - - -+function ($) { "use strict"; - - // CSS TRANSITION SUPPORT (Shoutout: http://www.modernizr.com/) - // ============================================================ - - function transitionEnd() { - var el = document.createElement('bootstrap') - - var transEndEventNames = { - 'WebkitTransition' : 'webkitTransitionEnd' - , 'MozTransition' : 'transitionend' - , 'OTransition' : 'oTransitionEnd otransitionend' - , 'transition' : 'transitionend' - } - - for (var name in transEndEventNames) { - if (el.style[name] !== undefined) { - return { end: transEndEventNames[name] } - } - } - } - - // http://blog.alexmaccaw.com/css-transitions - $.fn.emulateTransitionEnd = function (duration) { - var called = false, $el = this - $(this).one($.support.transition.end, function () { called = true }) - var callback = function () { if (!called) $($el).trigger($.support.transition.end) } - setTimeout(callback, duration) - return this - } - - $(function () { - $.support.transition = transitionEnd() - }) - -}(window.jQuery); - -/* ======================================================================== - * Bootstrap: alert.js v3.0.0 - * http://twbs.github.com/bootstrap/javascript.html#alerts - * ======================================================================== - * Copyright 2013 Twitter, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * ======================================================================== */ - - -+function ($) { "use strict"; - - // ALERT CLASS DEFINITION - // ====================== - - var dismiss = '[data-dismiss="alert"]' - var Alert = function (el) { - $(el).on('click', dismiss, this.close) - } - - Alert.prototype.close = function (e) { - var $this = $(this) - var selector = $this.attr('data-target') - - if (!selector) { - selector = $this.attr('href') - selector = selector && selector.replace(/.*(?=#[^\s]*$)/, '') // strip for ie7 - } - - var $parent = $(selector) - - if (e) e.preventDefault() - - if (!$parent.length) { - $parent = $this.hasClass('alert') ? $this : $this.parent() - } - - $parent.trigger(e = $.Event('close.bs.alert')) - - if (e.isDefaultPrevented()) return - - $parent.removeClass('in') - - function removeElement() { - $parent.trigger('closed.bs.alert').remove() - } - - $.support.transition && $parent.hasClass('fade') ? - $parent - .one($.support.transition.end, removeElement) - .emulateTransitionEnd(150) : - removeElement() - } - - - // ALERT PLUGIN DEFINITION - // ======================= - - var old = $.fn.alert - - $.fn.alert = function (option) { - return this.each(function () { - var $this = $(this) - var data = $this.data('bs.alert') - - if (!data) $this.data('bs.alert', (data = new Alert(this))) - if (typeof option == 'string') data[option].call($this) - }) - } - - $.fn.alert.Constructor = Alert - - - // ALERT NO CONFLICT - // ================= - - $.fn.alert.noConflict = function () { - $.fn.alert = old - return this - } - - - // ALERT DATA-API - // ============== - - $(document).on('click.bs.alert.data-api', dismiss, Alert.prototype.close) - -}(window.jQuery); - -/* ======================================================================== - * Bootstrap: button.js v3.0.0 - * http://twbs.github.com/bootstrap/javascript.html#buttons - * ======================================================================== - * Copyright 2013 Twitter, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * ======================================================================== */ - - -+function ($) { "use strict"; - - // BUTTON PUBLIC CLASS DEFINITION - // ============================== - - var Button = function (element, options) { - this.$element = $(element) - this.options = $.extend({}, Button.DEFAULTS, options) - } - - Button.DEFAULTS = { - loadingText: 'loading...' - } - - Button.prototype.setState = function (state) { - var d = 'disabled' - var $el = this.$element - var val = $el.is('input') ? 'val' : 'html' - var data = $el.data() - - state = state + 'Text' - - if (!data.resetText) $el.data('resetText', $el[val]()) - - $el[val](data[state] || this.options[state]) - - // push to event loop to allow forms to submit - setTimeout(function () { - state == 'loadingText' ? - $el.addClass(d).attr(d, d) : - $el.removeClass(d).removeAttr(d); - }, 0) - } - - Button.prototype.toggle = function () { - var $parent = this.$element.closest('[data-toggle="buttons"]') - - if ($parent.length) { - var $input = this.$element.find('input') - .prop('checked', !this.$element.hasClass('active')) - .trigger('change') - if ($input.prop('type') === 'radio') $parent.find('.active').removeClass('active') - } - - this.$element.toggleClass('active') - } - - - // BUTTON PLUGIN DEFINITION - // ======================== - - var old = $.fn.button - - $.fn.button = function (option) { - return this.each(function () { - var $this = $(this) - var data = $this.data('bs.button') - var options = typeof option == 'object' && option - - if (!data) $this.data('bs.button', (data = new Button(this, options))) - - if (option == 'toggle') data.toggle() - else if (option) data.setState(option) - }) - } - - $.fn.button.Constructor = Button - - - // BUTTON NO CONFLICT - // ================== - - $.fn.button.noConflict = function () { - $.fn.button = old - return this - } - - - // BUTTON DATA-API - // =============== - - $(document).on('click.bs.button.data-api', '[data-toggle^=button]', function (e) { - var $btn = $(e.target) - if (!$btn.hasClass('btn')) $btn = $btn.closest('.btn') - $btn.button('toggle') - e.preventDefault() - }) - -}(window.jQuery); - -/* ======================================================================== - * Bootstrap: carousel.js v3.0.0 - * http://twbs.github.com/bootstrap/javascript.html#carousel - * ======================================================================== - * Copyright 2012 Twitter, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * ======================================================================== */ - - -+function ($) { "use strict"; - - // CAROUSEL CLASS DEFINITION - // ========================= - - var Carousel = function (element, options) { - this.$element = $(element) - this.$indicators = this.$element.find('.carousel-indicators') - this.options = options - this.paused = - this.sliding = - this.interval = - this.$active = - this.$items = null - - this.options.pause == 'hover' && this.$element - .on('mouseenter', $.proxy(this.pause, this)) - .on('mouseleave', $.proxy(this.cycle, this)) - } - - Carousel.DEFAULTS = { - interval: 5000 - , pause: 'hover' - , wrap: true - } - - Carousel.prototype.cycle = function (e) { - e || (this.paused = false) - - this.interval && clearInterval(this.interval) - - this.options.interval - && !this.paused - && (this.interval = setInterval($.proxy(this.next, this), this.options.interval)) - - return this - } - - Carousel.prototype.getActiveIndex = function () { - this.$active = this.$element.find('.item.active') - this.$items = this.$active.parent().children() - - return this.$items.index(this.$active) - } - - Carousel.prototype.to = function (pos) { - var that = this - var activeIndex = this.getActiveIndex() - - if (pos > (this.$items.length - 1) || pos < 0) return - - if (this.sliding) return this.$element.one('slid', function () { that.to(pos) }) - if (activeIndex == pos) return this.pause().cycle() - - return this.slide(pos > activeIndex ? 'next' : 'prev', $(this.$items[pos])) - } - - Carousel.prototype.pause = function (e) { - e || (this.paused = true) - - if (this.$element.find('.next, .prev').length && $.support.transition.end) { - this.$element.trigger($.support.transition.end) - this.cycle(true) - } - - this.interval = clearInterval(this.interval) - - return this - } - - Carousel.prototype.next = function () { - if (this.sliding) return - return this.slide('next') - } - - Carousel.prototype.prev = function () { - if (this.sliding) return - return this.slide('prev') - } - - Carousel.prototype.slide = function (type, next) { - var $active = this.$element.find('.item.active') - var $next = next || $active[type]() - var isCycling = this.interval - var direction = type == 'next' ? 'left' : 'right' - var fallback = type == 'next' ? 'first' : 'last' - var that = this - - if (!$next.length) { - if (!this.options.wrap) return - $next = this.$element.find('.item')[fallback]() - } - - this.sliding = true - - isCycling && this.pause() - - var e = $.Event('slide.bs.carousel', { relatedTarget: $next[0], direction: direction }) - - if ($next.hasClass('active')) return - - if (this.$indicators.length) { - this.$indicators.find('.active').removeClass('active') - this.$element.one('slid', function () { - var $nextIndicator = $(that.$indicators.children()[that.getActiveIndex()]) - $nextIndicator && $nextIndicator.addClass('active') - }) - } - - if ($.support.transition && this.$element.hasClass('slide')) { - this.$element.trigger(e) - if (e.isDefaultPrevented()) return - $next.addClass(type) - $next[0].offsetWidth // force reflow - $active.addClass(direction) - $next.addClass(direction) - $active - .one($.support.transition.end, function () { - $next.removeClass([type, direction].join(' ')).addClass('active') - $active.removeClass(['active', direction].join(' ')) - that.sliding = false - setTimeout(function () { that.$element.trigger('slid') }, 0) - }) - .emulateTransitionEnd(600) - } else { - this.$element.trigger(e) - if (e.isDefaultPrevented()) return - $active.removeClass('active') - $next.addClass('active') - this.sliding = false - this.$element.trigger('slid') - } - - isCycling && this.cycle() - - return this - } - - - // CAROUSEL PLUGIN DEFINITION - // ========================== - - var old = $.fn.carousel - - $.fn.carousel = function (option) { - return this.each(function () { - var $this = $(this) - var data = $this.data('bs.carousel') - var options = $.extend({}, Carousel.DEFAULTS, $this.data(), typeof option == 'object' && option) - var action = typeof option == 'string' ? option : options.slide - - if (!data) $this.data('bs.carousel', (data = new Carousel(this, options))) - if (typeof option == 'number') data.to(option) - else if (action) data[action]() - else if (options.interval) data.pause().cycle() - }) - } - - $.fn.carousel.Constructor = Carousel - - - // CAROUSEL NO CONFLICT - // ==================== - - $.fn.carousel.noConflict = function () { - $.fn.carousel = old - return this - } - - - // CAROUSEL DATA-API - // ================= - - $(document).on('click.bs.carousel.data-api', '[data-slide], [data-slide-to]', function (e) { - var $this = $(this), href - var $target = $($this.attr('data-target') || (href = $this.attr('href')) && href.replace(/.*(?=#[^\s]+$)/, '')) //strip for ie7 - var options = $.extend({}, $target.data(), $this.data()) - var slideIndex = $this.attr('data-slide-to') - if (slideIndex) options.interval = false - - $target.carousel(options) - - if (slideIndex = $this.attr('data-slide-to')) { - $target.data('bs.carousel').to(slideIndex) - } - - e.preventDefault() - }) - - $(window).on('load', function () { - $('[data-ride="carousel"]').each(function () { - var $carousel = $(this) - $carousel.carousel($carousel.data()) - }) - }) - -}(window.jQuery); - -/* ======================================================================== - * Bootstrap: collapse.js v3.0.0 - * http://twbs.github.com/bootstrap/javascript.html#collapse - * ======================================================================== - * Copyright 2012 Twitter, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * ======================================================================== */ - - -+function ($) { "use strict"; - - // COLLAPSE PUBLIC CLASS DEFINITION - // ================================ - - var Collapse = function (element, options) { - this.$element = $(element) - this.options = $.extend({}, Collapse.DEFAULTS, options) - this.transitioning = null - - if (this.options.parent) this.$parent = $(this.options.parent) - if (this.options.toggle) this.toggle() - } - - Collapse.DEFAULTS = { - toggle: true - } - - Collapse.prototype.dimension = function () { - var hasWidth = this.$element.hasClass('width') - return hasWidth ? 'width' : 'height' - } - - Collapse.prototype.show = function () { - if (this.transitioning || this.$element.hasClass('in')) return - - var startEvent = $.Event('show.bs.collapse') - this.$element.trigger(startEvent) - if (startEvent.isDefaultPrevented()) return - - var actives = this.$parent && this.$parent.find('> .panel > .in') - - if (actives && actives.length) { - var hasData = actives.data('bs.collapse') - if (hasData && hasData.transitioning) return - actives.collapse('hide') - hasData || actives.data('bs.collapse', null) - } - - var dimension = this.dimension() - - this.$element - .removeClass('collapse') - .addClass('collapsing') - [dimension](0) - - this.transitioning = 1 - - var complete = function () { - this.$element - .removeClass('collapsing') - .addClass('in') - [dimension]('auto') - this.transitioning = 0 - this.$element.trigger('shown.bs.collapse') - } - - if (!$.support.transition) return complete.call(this) - - var scrollSize = $.camelCase(['scroll', dimension].join('-')) - - this.$element - .one($.support.transition.end, $.proxy(complete, this)) - .emulateTransitionEnd(350) - [dimension](this.$element[0][scrollSize]) - } - - Collapse.prototype.hide = function () { - if (this.transitioning || !this.$element.hasClass('in')) return - - var startEvent = $.Event('hide.bs.collapse') - this.$element.trigger(startEvent) - if (startEvent.isDefaultPrevented()) return - - var dimension = this.dimension() - - this.$element - [dimension](this.$element[dimension]()) - [0].offsetHeight - - this.$element - .addClass('collapsing') - .removeClass('collapse') - .removeClass('in') - - this.transitioning = 1 - - var complete = function () { - this.transitioning = 0 - this.$element - .trigger('hidden.bs.collapse') - .removeClass('collapsing') - .addClass('collapse') - } - - if (!$.support.transition) return complete.call(this) - - this.$element - [dimension](0) - .one($.support.transition.end, $.proxy(complete, this)) - .emulateTransitionEnd(350) - } - - Collapse.prototype.toggle = function () { - this[this.$element.hasClass('in') ? 'hide' : 'show']() - } - - - // COLLAPSE PLUGIN DEFINITION - // ========================== - - var old = $.fn.collapse - - $.fn.collapse = function (option) { - return this.each(function () { - var $this = $(this) - var data = $this.data('bs.collapse') - var options = $.extend({}, Collapse.DEFAULTS, $this.data(), typeof option == 'object' && option) - - if (!data) $this.data('bs.collapse', (data = new Collapse(this, options))) - if (typeof option == 'string') data[option]() - }) - } - - $.fn.collapse.Constructor = Collapse - - - // COLLAPSE NO CONFLICT - // ==================== - - $.fn.collapse.noConflict = function () { - $.fn.collapse = old - return this - } - - - // COLLAPSE DATA-API - // ================= - - $(document).on('click.bs.collapse.data-api', '[data-toggle=collapse]', function (e) { - var $this = $(this), href - var target = $this.attr('data-target') - || e.preventDefault() - || (href = $this.attr('href')) && href.replace(/.*(?=#[^\s]+$)/, '') //strip for ie7 - var $target = $(target) - var data = $target.data('bs.collapse') - var option = data ? 'toggle' : $this.data() - var parent = $this.attr('data-parent') - var $parent = parent && $(parent) - - if (!data || !data.transitioning) { - if ($parent) $parent.find('[data-toggle=collapse][data-parent="' + parent + '"]').not($this).addClass('collapsed') - $this[$target.hasClass('in') ? 'addClass' : 'removeClass']('collapsed') - } - - $target.collapse(option) - }) - -}(window.jQuery); - -/* ======================================================================== - * Bootstrap: dropdown.js v3.0.0 - * http://twbs.github.com/bootstrap/javascript.html#dropdowns - * ======================================================================== - * Copyright 2012 Twitter, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * ======================================================================== */ - - -+function ($) { "use strict"; - - // DROPDOWN CLASS DEFINITION - // ========================= - - var backdrop = '.dropdown-backdrop' - var toggle = '[data-toggle=dropdown]' - var Dropdown = function (element) { - var $el = $(element).on('click.bs.dropdown', this.toggle) - } - - Dropdown.prototype.toggle = function (e) { - var $this = $(this) - - if ($this.is('.disabled, :disabled')) return - - var $parent = getParent($this) - var isActive = $parent.hasClass('open') - - clearMenus() - - if (!isActive) { - if ('ontouchstart' in document.documentElement && !$parent.closest('.navbar-nav').length) { - // if mobile we we use a backdrop because click events don't delegate - $('