diff --git a/bin/hbase b/bin/hbase index 0206bbd2f0cd..959bdd1d773e 100755 --- a/bin/hbase +++ b/bin/hbase @@ -492,9 +492,19 @@ add_jdk11_deps_to_classpath() { done } -enable_trace() { - agent_jar=$(find lib/trace -type f -name "opentelemetry-javaagent-*") - HBASE_OPTS="$HBASE_OPTS -javaagent:$agent_jar $HBASE_TRACE_OPTS" +add_opentelemetry_agent() { + if [ -e "${OPENTELEMETRY_JAVAAGENT_PATH}" ] ; then + agent_jar="${OPENTELEMETRY_JAVAAGENT_PATH}" + elif ! agent_jar=$(find -L "${HBASE_HOME}/lib/trace" -type f -name "opentelemetry-javaagent-*" 2>/dev/null); then + # must be dev environment + f="${HBASE_HOME}/hbase-build-configuration/target/cached_classpath.txt" + if [ ! -f "${f}" ]; then + echo "As this is a development environment, we need ${f} to be generated from maven (command: mvn install -DskipTests)" + exit 1 + fi + agent_jar=$(tr ':' '\n' < "${f}" | grep opentelemetry-javaagent) + fi + HBASE_OPTS="$HBASE_OPTS -javaagent:$agent_jar" } #Add the development env class path stuff @@ -557,6 +567,7 @@ elif [ "$COMMAND" = 'jshell' ] ; then CLASS='jdk.internal.jshell.tool.JShellToolProvider' # set default values for HBASE_JSHELL_ARGS read -r -a JSHELL_ARGS <<< "${HBASE_JSHELL_ARGS:-"--startup DEFAULT --startup PRINTING --startup ${HBASE_HOME}/bin/hbase_startup.jsh"}" + HBASE_OPTS="$HBASE_OPTS $HBASE_JSHELL_OPTS" elif [ "$COMMAND" = "hbck" ] ; then # Look for the -j /path/to/HBCK2.jar parameter. Else pass through to hbck. case "${1}" in @@ -801,8 +812,10 @@ elif [ "${DEBUG}" = "true" ]; then fi if [[ -n "${HBASE_TRACE_OPTS}" ]]; then - echo "Attach opentelemetry agent to enable trace" - enable_trace + if [ "${DEBUG}" = "true" ]; then + echo "Attaching opentelemetry agent" + fi + add_opentelemetry_agent fi # Have JVM dump heap if we run out of memory. Files will be 'launch directory' diff --git a/conf/hbase-env.sh b/conf/hbase-env.sh index f660f166efa7..ed8d8120eb93 100644 --- a/conf/hbase-env.sh +++ b/conf/hbase-env.sh @@ -143,10 +143,32 @@ # export GREP="${GREP-grep}" # export SED="${SED-sed}" -# Uncomment to enable trace, you can change the options to use other exporters such as jaeger or -# zipkin. See https://github.com/open-telemetry/opentelemetry-java-instrumentation on how to +# Tracing +# Uncomment some combination of these lines to enable tracing. You should change the options to use +# the exporters appropriate to your environment. See +# https://github.com/open-telemetry/opentelemetry-java-instrumentation for details on how to # configure exporters and other components through system properties. -# export HBASE_TRACE_OPTS="-Dotel.resource.attributes=service.name=HBase -Dotel.traces.exporter=logging otel.metrics.exporter=none" +# +# The presence HBASE_TRACE_OPTS indicates that tracing should be enabled, and serves as site-wide +# settings. +# export HBASE_TRACE_OPTS="-Dotel.traces.exporter=none -Dotel.metrics.exporter=none" +# +# Per-process configuration variables allow for fine-grained configuration control. +# export HBASE_SHELL_OPTS="${HBASE_SHELL_OPTS} ${HBASE_TRACE_OPTS} -Dotel.resource.attributes=service.name=hbase-shell" +# export HBASE_JSHELL_OPTS="${HBASE_JSHELL_OPTS} ${HBASE_TRACE_OPTS} -Dotel.resource.attributes=service.name=hbase-jshell" +# export HBASE_HBCK_OPTS="${HBASE_HBCK_OPTS} ${HBASE_TRACE_OPTS} -Dotel.resource.attributes=service.name=hbase-hbck" +# export HBASE_MASTER_OPTS="${HBASE_MASTER_OPTS} ${HBASE_TRACE_OPTS} -Dotel.resource.attributes=service.name=hbase-master" +# export HBASE_REGIONSERVER_OPTS="${HBASE_REGIONSERVER_OPTS} ${HBASE_TRACE_OPTS} -Dotel.resource.attributes=service.name=hbase-regionserver" +# export HBASE_THRIFT_OPTS="${HBASE_THRIFT_OPTS} ${HBASE_TRACE_OPTS} -Dotel.resource.attributes=service.name=hbase-thrift" +# export HBASE_REST_OPTS="${HBASE_REST_OPTS} ${HBASE_TRACE_OPTS} -Dotel.resource.attributes=service.name=hbase-rest" +# export HBASE_ZOOKEEPER_OPTS="${HBASE_ZOOKEEPER_OPTS} ${HBASE_TRACE_OPTS} -Dotel.resource.attributes=service.name=hbase-zookeeper" +# export HBASE_PE_OPTS="${HBASE_PE_OPTS} ${HBASE_TRACE_OPTS} -Dotel.resource.attributes=service.name=hbase-performanceevaluation" +# export HBASE_LTT_OPTS="${HBASE_LTT_OPTS} ${HBASE_TRACE_OPTS} -Dotel.resource.attributes=service.name=hbase-loadtesttool" +# export HBASE_CANARY_OPTS="${HBASE_CANARY_OPTS} ${HBASE_TRACE_OPTS} -Dotel.resource.attributes=service.name=hbase-canary" +# export HBASE_HBTOP_OPTS="${HBASE_HBTOP_OPTS} ${HBASE_TRACE_OPTS} -Dotel.resource.attributes=service.name=hbase-hbtop" +# +# Manually specify a value for OPENTELEMETRY_JAVAAGENT_PATH to override the autodiscovery mechanism +# export OPENTELEMETRY_JAVAAGENT_PATH="" # Additional argments passed to jshell invocation # export HBASE_JSHELL_ARGS="--startup DEFAULT --startup PRINTING --startup hbase_startup.jsh" diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile index 8194787b2e67..15f192c01611 100644 --- a/dev-support/Jenkinsfile +++ b/dev-support/Jenkinsfile @@ -24,7 +24,7 @@ pipeline { pollSCM('@daily') } options { - buildDiscarder(logRotator(numToKeepStr: '15')) + buildDiscarder(logRotator(numToKeepStr: '20')) timeout (time: 16, unit: 'HOURS') timestamps() skipDefaultCheckout() @@ -54,6 +54,8 @@ pipeline { SHALLOW_CHECKS = 'all,-shadedjars,-unit' // run by the 'yetus general check' DEEP_CHECKS = 'compile,htmlout,javac,maven,mvninstall,shadedjars,unit' // run by 'yetus jdkX (HadoopY) checks' ASF_NIGHTLIES = 'https://nightlies.apache.org' + ASF_NIGHTLIES_BASE_ORI = "${ASF_NIGHTLIES}/hbase/${JOB_NAME}/${BUILD_NUMBER}" + ASF_NIGHTLIES_BASE = "${ASF_NIGHTLIES_BASE_ORI.replaceAll(' ', '%20')}" } parameters { booleanParam(name: 'USE_YETUS_PRERELEASE', defaultValue: false, description: '''Check to use the current HEAD of apache/yetus rather than our configured release. @@ -91,6 +93,7 @@ pipeline { "${WORKSPACE}/component/dev-support/jenkins-scripts/cache-apache-project-artifact.sh" \ --working-dir "${WORKSPACE}/downloads-yetus" \ --keys 'https://www.apache.org/dist/yetus/KEYS' \ + --verify-tar-gz \ "${WORKSPACE}/yetus-${YETUS_RELEASE}-bin.tar.gz" \ "yetus/${YETUS_RELEASE}/apache-yetus-${YETUS_RELEASE}-bin.tar.gz" mv "yetus-${YETUS_RELEASE}-bin.tar.gz" yetus.tar.gz @@ -137,6 +140,7 @@ pipeline { "${WORKSPACE}/component/dev-support/jenkins-scripts/cache-apache-project-artifact.sh" \ --working-dir "${WORKSPACE}/downloads-hadoop-2" \ --keys 'http://www.apache.org/dist/hadoop/common/KEYS' \ + --verify-tar-gz \ "${WORKSPACE}/hadoop-${HADOOP2_VERSION}-bin.tar.gz" \ "hadoop/common/hadoop-${HADOOP2_VERSION}/hadoop-${HADOOP2_VERSION}.tar.gz" for stale in $(ls -1 "${WORKSPACE}"/hadoop-2*.tar.gz | grep -v ${HADOOP2_VERSION}); do @@ -164,6 +168,7 @@ pipeline { "${WORKSPACE}/component/dev-support/jenkins-scripts/cache-apache-project-artifact.sh" \ --working-dir "${WORKSPACE}/downloads-hadoop-3" \ --keys 'http://www.apache.org/dist/hadoop/common/KEYS' \ + --verify-tar-gz \ "${WORKSPACE}/hadoop-${HADOOP3_VERSION}-bin.tar.gz" \ "hadoop/common/hadoop-${HADOOP3_VERSION}/hadoop-${HADOOP3_VERSION}.tar.gz" for stale in $(ls -1 "${WORKSPACE}"/hadoop-3*.tar.gz | grep -v ${HADOOP3_VERSION}); do @@ -202,6 +207,7 @@ pipeline { SET_JAVA_HOME = '/usr/lib/jvm/java-8' OUTPUT_DIR_RELATIVE = "${env.OUTPUT_DIR_RELATIVE_GENERAL}" OUTPUT_DIR = "${env.WORKSPACE}/${env.OUTPUT_DIR_RELATIVE_GENERAL}" + ASF_NIGHTLIES_GENERAL_CHECK_BASE="${ASF_NIGHTLIES_BASE}/${OUTPUT_DIR_RELATIVE}" } steps { // Must do prior to anything else, since if one of them timesout we'll stash the commentfile @@ -210,7 +216,7 @@ pipeline { rm -rf "${OUTPUT_DIR}" && mkdir "${OUTPUT_DIR}" echo '(x) {color:red}-1 general checks{color}' >"${OUTPUT_DIR}/commentfile" echo "-- Something went wrong running this stage, please [check relevant console output|${BUILD_URL}/console]." >> "${OUTPUT_DIR}/commentfile" -''' + ''' unstash 'yetus' // since we have a new node definition we need to re-do the scm checkout dir('component') { @@ -222,24 +228,59 @@ pipeline { "${BASEDIR}/dev-support/gather_machine_environment.sh" "${OUTPUT_DIR_RELATIVE}/machine" echo "got the following saved stats in '${OUTPUT_DIR_RELATIVE}/machine'" ls -lh "${OUTPUT_DIR_RELATIVE}/machine" -''' - // TODO roll this into the hbase_nightly_yetus script - sh '''#!/usr/bin/env bash - set -e - declare -i status=0 - if "${BASEDIR}/dev-support/hbase_nightly_yetus.sh" ; then - echo '(/) {color:green}+1 general checks{color}' > "${OUTPUT_DIR}/commentfile" - else - echo '(x) {color:red}-1 general checks{color}' > "${OUTPUT_DIR}/commentfile" - status=1 - fi - echo "-- For more information [see general report|${BUILD_URL}General_20Nightly_20Build_20Report/]" >> "${OUTPUT_DIR}/commentfile" - exit "${status}" ''' + // TODO roll this into the hbase_nightly_yetus script + script { + def ret = sh( + returnStatus: true, + script: '''#!/usr/bin/env bash + set -e + declare -i status=0 + if "${BASEDIR}/dev-support/hbase_nightly_yetus.sh" ; then + echo '(/) {color:green}+1 general checks{color}' > "${OUTPUT_DIR}/commentfile" + else + echo '(x) {color:red}-1 general checks{color}' > "${OUTPUT_DIR}/commentfile" + status=1 + fi + echo "-- For more information [see general report|${BUILD_URL}General_20Nightly_20Build_20Report/]" >> "${OUTPUT_DIR}/commentfile" + exit "${status}" + ''' + ) + if (ret != 0) { + // mark the build as UNSTABLE instead of FAILURE, to avoid skipping the later publish of + // test output. See HBASE-26339 for more details. + currentBuild.result = 'UNSTABLE' + } + } } post { always { stash name: 'general-result', includes: "${OUTPUT_DIR_RELATIVE}/commentfile" + sshPublisher(publishers: [ + sshPublisherDesc(configName: 'Nightlies', + transfers: [ + sshTransfer(remoteDirectory: "hbase/${JOB_NAME}/${BUILD_NUMBER}", + sourceFiles: "${env.OUTPUT_DIR_RELATIVE}/*-site/*,${env.OUTPUT_DIR_RELATIVE}/*-site/**/*" + ) + ] + ) + ]) + sh '''#!/bin/bash -e + if [ -d "${OUTPUT_DIR}/branch-site" ]; then + echo "Remove ${OUTPUT_DIR}/branch-site for saving space" + rm -rf "${OUTPUT_DIR}/branch-site" + python ${BASEDIR}/dev-support/gen_redirect_html.py "${ASF_NIGHTLIES_GENERAL_CHECK_BASE}/branch-site" > "${OUTPUT_DIR}/branch-site.html" + else + echo "No branch-site, skipping" + fi + if [ -d "${OUTPUT_DIR}/patch-site" ]; then + echo "Remove ${OUTPUT_DIR}/patch-site for saving space" + rm -rf "${OUTPUT_DIR}/patch-site" + python ${BASEDIR}/dev-support/gen_redirect_html.py "${ASF_NIGHTLIES_GENERAL_CHECK_BASE}/patch-site" > "${OUTPUT_DIR}/patch-site.html" + else + echo "No patch-site, skipping" + fi + ''' // Has to be relative to WORKSPACE. archiveArtifacts artifacts: "${env.OUTPUT_DIR_RELATIVE}/*" archiveArtifacts artifacts: "${env.OUTPUT_DIR_RELATIVE}/**/*" @@ -278,7 +319,7 @@ pipeline { rm -rf "${OUTPUT_DIR}" && mkdir "${OUTPUT_DIR}" echo '(x) {color:red}-1 jdk7 checks{color}' >"${OUTPUT_DIR}/commentfile" echo "-- Something went wrong running this stage, please [check relevant console output|${BUILD_URL}/console]." >> "${OUTPUT_DIR}/commentfile" -''' + ''' unstash 'yetus' dir('component') { checkout scm @@ -289,19 +330,29 @@ pipeline { "${BASEDIR}/dev-support/gather_machine_environment.sh" "${OUTPUT_DIR_RELATIVE}/machine" echo "got the following saved stats in '${OUTPUT_DIR_RELATIVE}/machine'" ls -lh "${OUTPUT_DIR_RELATIVE}/machine" -''' - sh '''#!/usr/bin/env bash - set -e - declare -i status=0 - if "${BASEDIR}/dev-support/hbase_nightly_yetus.sh" ; then - echo '(/) {color:green}+1 jdk7 checks{color}' > "${OUTPUT_DIR}/commentfile" - else - echo '(x) {color:red}-1 jdk7 checks{color}' > "${OUTPUT_DIR}/commentfile" - status=1 - fi - echo "-- For more information [see jdk7 report|${BUILD_URL}/JDK7_20Nightly_20Build_20Report/]" >> "${OUTPUT_DIR}/commentfile" - exit "${status}" ''' + script { + def ret = sh( + returnStatus: true, + script: '''#!/usr/bin/env bash + set -e + declare -i status=0 + if "${BASEDIR}/dev-support/hbase_nightly_yetus.sh" ; then + echo '(/) {color:green}+1 jdk7 checks{color}' > "${OUTPUT_DIR}/commentfile" + else + echo '(x) {color:red}-1 jdk7 checks{color}' > "${OUTPUT_DIR}/commentfile" + status=1 + fi + echo "-- For more information [see jdk7 report|${BUILD_URL}/JDK7_20Nightly_20Build_20Report/]" >> "${OUTPUT_DIR}/commentfile" + exit "${status}" + ''' + ) + if (ret != 0) { + // mark the build as UNSTABLE instead of FAILURE, to avoid skipping the later publish of + // test output. See HBASE-26339 for more details. + currentBuild.result = 'UNSTABLE' + } + } } post { always { @@ -330,12 +381,12 @@ pipeline { ] ) ]) - // remove the big test logs zip file, store the nightlies url in test_logs.txt + // remove the big test logs zip file, store the nightlies url in test_logs.html sh '''#!/bin/bash -e if [ -f "${OUTPUT_DIR}/test_logs.zip" ]; then echo "Remove ${OUTPUT_DIR}/test_logs.zip for saving space" rm -rf "${OUTPUT_DIR}/test_logs.zip" - echo "${ASF_NIGHTLIES}/hbase/${JOB_NAME}/${BUILD_NUMBER}/${OUTPUT_DIR_RELATIVE}/test_logs.zip" > "${OUTPUT_DIR}/test_logs.txt" + python ${BASEDIR}/dev-support/gen_redirect_html.py "${ASF_NIGHTLIES_BASE}/${OUTPUT_DIR_RELATIVE}" > "${OUTPUT_DIR}/test_logs.html" else echo "No test_logs.zip, skipping" fi @@ -378,7 +429,7 @@ pipeline { rm -rf "${OUTPUT_DIR}" && mkdir "${OUTPUT_DIR}" echo '(x) {color:red}-1 jdk8 hadoop2 checks{color}' >"${OUTPUT_DIR}/commentfile" echo "-- Something went wrong running this stage, please [check relevant console output|${BUILD_URL}/console]." >> "${OUTPUT_DIR}/commentfile" -''' + ''' unstash 'yetus' dir('component') { checkout scm @@ -389,19 +440,29 @@ pipeline { "${BASEDIR}/dev-support/gather_machine_environment.sh" "${OUTPUT_DIR_RELATIVE}/machine" echo "got the following saved stats in '${OUTPUT_DIR_RELATIVE}/machine'" ls -lh "${OUTPUT_DIR_RELATIVE}/machine" -''' - sh '''#!/usr/bin/env bash - set -e - declare -i status=0 - if "${BASEDIR}/dev-support/hbase_nightly_yetus.sh" ; then - echo '(/) {color:green}+1 jdk8 hadoop2 checks{color}' > "${OUTPUT_DIR}/commentfile" - else - echo '(x) {color:red}-1 jdk8 hadoop2 checks{color}' > "${OUTPUT_DIR}/commentfile" - status=1 - fi - echo "-- For more information [see jdk8 (hadoop2) report|${BUILD_URL}JDK8_20Nightly_20Build_20Report_20_28Hadoop2_29/]" >> "${OUTPUT_DIR}/commentfile" - exit "${status}" ''' + script { + def ret = sh( + returnStatus: true, + script: '''#!/usr/bin/env bash + set -e + declare -i status=0 + if "${BASEDIR}/dev-support/hbase_nightly_yetus.sh" ; then + echo '(/) {color:green}+1 jdk8 hadoop2 checks{color}' > "${OUTPUT_DIR}/commentfile" + else + echo '(x) {color:red}-1 jdk8 hadoop2 checks{color}' > "${OUTPUT_DIR}/commentfile" + status=1 + fi + echo "-- For more information [see jdk8 (hadoop2) report|${BUILD_URL}JDK8_20Nightly_20Build_20Report_20_28Hadoop2_29/]" >> "${OUTPUT_DIR}/commentfile" + exit "${status}" + ''' + ) + if (ret != 0) { + // mark the build as UNSTABLE instead of FAILURE, to avoid skipping the later publish of + // test output. See HBASE-26339 for more details. + currentBuild.result = 'UNSTABLE' + } + } } post { always { @@ -430,12 +491,12 @@ pipeline { ] ) ]) - // remove the big test logs zip file, store the nightlies url in test_logs.txt + // remove the big test logs zip file, store the nightlies url in test_logs.html sh '''#!/bin/bash -e if [ -f "${OUTPUT_DIR}/test_logs.zip" ]; then echo "Remove ${OUTPUT_DIR}/test_logs.zip for saving space" rm -rf "${OUTPUT_DIR}/test_logs.zip" - echo "${ASF_NIGHTLIES}/hbase/${JOB_NAME}/${BUILD_NUMBER}/${OUTPUT_DIR_RELATIVE}/test_logs.zip" > "${OUTPUT_DIR}/test_logs.txt" + python ${BASEDIR}/dev-support/gen_redirect_html.py "${ASF_NIGHTLIES_BASE}/${OUTPUT_DIR_RELATIVE}" > "${OUTPUT_DIR}/test_logs.html" else echo "No test_logs.zip, skipping" fi @@ -482,7 +543,7 @@ pipeline { rm -rf "${OUTPUT_DIR}" && mkdir "${OUTPUT_DIR}" echo '(x) {color:red}-1 jdk8 hadoop3 checks{color}' >"${OUTPUT_DIR}/commentfile" echo "-- Something went wrong running this stage, please [check relevant console output|${BUILD_URL}/console]." >> "${OUTPUT_DIR}/commentfile" -''' + ''' unstash 'yetus' dir('component') { checkout scm @@ -493,19 +554,29 @@ pipeline { "${BASEDIR}/dev-support/gather_machine_environment.sh" "${OUTPUT_DIR_RELATIVE}/machine" echo "got the following saved stats in '${OUTPUT_DIR_RELATIVE}/machine'" ls -lh "${OUTPUT_DIR_RELATIVE}/machine" -''' - sh '''#!/usr/bin/env bash - set -e - declare -i status=0 - if "${BASEDIR}/dev-support/hbase_nightly_yetus.sh" ; then - echo '(/) {color:green}+1 jdk8 hadoop3 checks{color}' > "${OUTPUT_DIR}/commentfile" - else - echo '(x) {color:red}-1 jdk8 hadoop3 checks{color}' > "${OUTPUT_DIR}/commentfile" - status=1 - fi - echo "-- For more information [see jdk8 (hadoop3) report|${BUILD_URL}JDK8_20Nightly_20Build_20Report_20_28Hadoop3_29/]" >> "${OUTPUT_DIR}/commentfile" - exit "${status}" ''' + script { + def ret = sh( + returnStatus: true, + script: '''#!/usr/bin/env bash + set -e + declare -i status=0 + if "${BASEDIR}/dev-support/hbase_nightly_yetus.sh" ; then + echo '(/) {color:green}+1 jdk8 hadoop3 checks{color}' > "${OUTPUT_DIR}/commentfile" + else + echo '(x) {color:red}-1 jdk8 hadoop3 checks{color}' > "${OUTPUT_DIR}/commentfile" + status=1 + fi + echo "-- For more information [see jdk8 (hadoop3) report|${BUILD_URL}JDK8_20Nightly_20Build_20Report_20_28Hadoop3_29/]" >> "${OUTPUT_DIR}/commentfile" + exit "${status}" + ''' + ) + if (ret != 0) { + // mark the build as UNSTABLE instead of FAILURE, to avoid skipping the later publish of + // test output. See HBASE-26339 for more details. + currentBuild.result = 'UNSTABLE' + } + } } post { always { @@ -534,12 +605,12 @@ pipeline { ] ) ]) - // remove the big test logs zip file, store the nightlies url in test_logs.txt + // remove the big test logs zip file, store the nightlies url in test_logs.html sh '''#!/bin/bash -e if [ -f "${OUTPUT_DIR}/test_logs.zip" ]; then echo "Remove ${OUTPUT_DIR}/test_logs.zip for saving space" rm -rf "${OUTPUT_DIR}/test_logs.zip" - echo "${ASF_NIGHTLIES}/hbase/${JOB_NAME}/${BUILD_NUMBER}/${OUTPUT_DIR_RELATIVE}/test_logs.zip" > "${OUTPUT_DIR}/test_logs.txt" + python ${BASEDIR}/dev-support/gen_redirect_html.py "${ASF_NIGHTLIES_BASE}/${OUTPUT_DIR_RELATIVE}" > "${OUTPUT_DIR}/test_logs.html" else echo "No test_logs.zip, skipping" fi @@ -588,7 +659,7 @@ pipeline { rm -rf "${OUTPUT_DIR}" && mkdir "${OUTPUT_DIR}" echo '(x) {color:red}-1 jdk11 hadoop3 checks{color}' >"${OUTPUT_DIR}/commentfile" echo "-- Something went wrong running this stage, please [check relevant console output|${BUILD_URL}/console]." >> "${OUTPUT_DIR}/commentfile" -''' + ''' unstash 'yetus' dir('component') { checkout scm @@ -599,19 +670,29 @@ pipeline { "${BASEDIR}/dev-support/gather_machine_environment.sh" "${OUTPUT_DIR_RELATIVE}/machine" echo "got the following saved stats in '${OUTPUT_DIR_RELATIVE}/machine'" ls -lh "${OUTPUT_DIR_RELATIVE}/machine" -''' - sh '''#!/usr/bin/env bash - set -e - declare -i status=0 - if "${BASEDIR}/dev-support/hbase_nightly_yetus.sh" ; then - echo '(/) {color:green}+1 jdk11 hadoop3 checks{color}' > "${OUTPUT_DIR}/commentfile" - else - echo '(x) {color:red}-1 jdk11 hadoop3 checks{color}' > "${OUTPUT_DIR}/commentfile" - status=1 - fi - echo "-- For more information [see jdk11 report|${BUILD_URL}JDK11_20Nightly_20Build_20Report_20_28Hadoop3_29/]" >> "${OUTPUT_DIR}/commentfile" - exit "${status}" ''' + script { + def ret = sh( + returnStatus: true, + script: '''#!/usr/bin/env bash + set -e + declare -i status=0 + if "${BASEDIR}/dev-support/hbase_nightly_yetus.sh" ; then + echo '(/) {color:green}+1 jdk11 hadoop3 checks{color}' > "${OUTPUT_DIR}/commentfile" + else + echo '(x) {color:red}-1 jdk11 hadoop3 checks{color}' > "${OUTPUT_DIR}/commentfile" + status=1 + fi + echo "-- For more information [see jdk11 report|${BUILD_URL}JDK11_20Nightly_20Build_20Report_20_28Hadoop3_29/]" >> "${OUTPUT_DIR}/commentfile" + exit "${status}" + ''' + ) + if (ret != 0) { + // mark the build as UNSTABLE instead of FAILURE, to avoid skipping the later publish of + // test output. See HBASE-26339 for more details. + currentBuild.result = 'UNSTABLE' + } + } } post { always { @@ -630,7 +711,7 @@ pipeline { else echo "No archiver directory, skipping compressing." fi -''' + ''' sshPublisher(publishers: [ sshPublisherDesc(configName: 'Nightlies', transfers: [ @@ -640,16 +721,16 @@ pipeline { ] ) ]) - // remove the big test logs zip file, store the nightlies url in test_logs.txt + // remove the big test logs zip file, store the nightlies url in test_logs.html sh '''#!/bin/bash -e if [ -f "${OUTPUT_DIR}/test_logs.zip" ]; then echo "Remove ${OUTPUT_DIR}/test_logs.zip for saving space" rm -rf "${OUTPUT_DIR}/test_logs.zip" - echo "${ASF_NIGHTLIES}/hbase/${JOB_NAME}/${BUILD_NUMBER}/${OUTPUT_DIR_RELATIVE}/test_logs.zip" > "${OUTPUT_DIR}/test_logs.txt" + python ${BASEDIR}/dev-support/gen_redirect_html.py "${ASF_NIGHTLIES_BASE}/${OUTPUT_DIR_RELATIVE}" > "${OUTPUT_DIR}/test_logs.html" else echo "No test_logs.zip, skipping" fi -''' + ''' // Has to be relative to WORKSPACE. archiveArtifacts artifacts: "${env.OUTPUT_DIR_RELATIVE}/*" archiveArtifacts artifacts: "${env.OUTPUT_DIR_RELATIVE}/**/*" @@ -692,14 +773,14 @@ pipeline { rm -rf ".m2-for-src" && mkdir ".m2-for-src" echo "(x) {color:red}-1 source release artifact{color}\n-- Something went wrong with this stage, [check relevant console output|${BUILD_URL}/console]." >output-srctarball/commentfile echo "(x) {color:red}-1 client integration test{color}\n-- Something went wrong with this stage, [check relevant console output|${BUILD_URL}/console]." >output-integration/commentfile -''' + ''' sh '''#!/usr/bin/env bash set -e rm -rf "output-srctarball/machine" && mkdir "output-srctarball/machine" "${BASEDIR}/dev-support/gather_machine_environment.sh" "output-srctarball/machine" echo "got the following saved stats in 'output-srctarball/machine'" ls -lh "output-srctarball/machine" -''' + ''' sh """#!/bin/bash -e echo "Checking the steps for an RM to make a source artifact, then a binary artifact." if "${env.BASEDIR}/dev-support/hbase_nightly_source-artifact.sh" \ @@ -714,7 +795,7 @@ pipeline { echo '(x) {color:red}-1 source release artifact{color}\n-- See build output for details.' >output-srctarball/commentfile exit 1 fi -""" + """ echo "unpacking the hbase bin tarball into 'hbase-install' and the client tarball into 'hbase-client'" sh '''#!/bin/bash -e if [ 2 -ne $(ls -1 "${WORKSPACE}"/unpacked_src_tarball/hbase-assembly/target/hbase-*-bin.tar.gz | wc -l) ]; then @@ -725,7 +806,7 @@ pipeline { tar --strip-component=1 -xzf "${install_artifact}" -C "hbase-install" client_artifact=$(ls -1 "${WORKSPACE}"/unpacked_src_tarball/hbase-assembly/target/hbase-*-bin.tar.gz | sort | tail -n 1) tar --strip-component=1 -xzf "${client_artifact}" -C "hbase-client" -''' + ''' unstash 'hadoop-2' sh '''#!/bin/bash -xe if [[ "${BRANCH}" = branch-2* ]] || [[ "${BRANCH}" = branch-1* ]]; then @@ -749,7 +830,7 @@ pipeline { else echo "Skipping to run against Hadoop 2 for branch ${BRANCH}" fi -''' + ''' unstash 'hadoop-3' sh '''#!/bin/bash -e if [[ "${BRANCH}" = branch-1* ]]; then @@ -790,13 +871,31 @@ pipeline { fi echo "(/) {color:green}+1 client integration test{color}" >output-integration/commentfile fi -''' - - + ''' } post { always { stash name: 'srctarball-result', includes: "output-srctarball/commentfile,output-integration/commentfile" + sshPublisher(publishers: [ + sshPublisherDesc(configName: 'Nightlies', + transfers: [ + sshTransfer(remoteDirectory: "hbase/${JOB_NAME}/${BUILD_NUMBER}", + sourceFiles: "output-srctarball/hbase-src.tar.gz" + ) + ] + ) + ]) + // remove the big src tarball, store the nightlies url in hbase-src.html + sh '''#!/bin/bash -e + SRC_TAR="${WORKSPACE}/output-srctarball/hbase-src.tar.gz" + if [ -f "${SRC_TAR}" ]; then + echo "Remove ${SRC_TAR} for saving space" + rm -rf "${SRC_TAR}" + python ${BASEDIR}/dev-support/gen_redirect_html.py "${ASF_NIGHTLIES_BASE}/output-srctarball" > "${WORKSPACE}/output-srctarball/hbase-src.html" + else + echo "No hbase-src.tar.gz, skipping" + fi + ''' archiveArtifacts artifacts: 'output-srctarball/*' archiveArtifacts artifacts: 'output-srctarball/**/*' archiveArtifacts artifacts: 'output-integration/*' diff --git a/dev-support/Jenkinsfile_GitHub b/dev-support/Jenkinsfile_GitHub index 56ba85552ed4..c9dee54ae64c 100644 --- a/dev-support/Jenkinsfile_GitHub +++ b/dev-support/Jenkinsfile_GitHub @@ -53,6 +53,8 @@ pipeline { WORKDIR_REL_JDK8_HADOOP3_CHECK = 'yetus-jdk8-hadoop3-check' WORKDIR_REL_JDK11_HADOOP3_CHECK = 'yetus-jdk11-hadoop3-check' ASF_NIGHTLIES = 'https://nightlies.apache.org' + ASF_NIGHTLIES_BASE_ORI = "${ASF_NIGHTLIES}/hbase/${JOB_NAME}/${BUILD_NUMBER}" + ASF_NIGHTLIES_BASE = "${ASF_NIGHTLIES_BASE_ORI.replaceAll(' ', '%20')}" } parameters { @@ -83,6 +85,7 @@ pipeline { BUILD_URL_ARTIFACTS = "artifact/${WORKDIR_REL}/${PATCH_REL}" DOCKERFILE = "${WORKDIR}/${DOCKERFILE_REL}" YETUS_DRIVER = "${WORKDIR}/${YETUS_DRIVER_REL}" + ASF_NIGHTLIES_GENERAL_CHECK_BASE="${ASF_NIGHTLIES_BASE}/${WORKDIR_REL}/${PATCH_REL}" } steps { dir("${SOURCEDIR}") { @@ -102,20 +105,56 @@ pipeline { passwordVariable: 'GITHUB_PASSWORD', usernameVariable: 'GITHUB_USER' )]) { - sh label: 'test-patch', script: '''#!/bin/bash -e - hostname -a ; pwd ; ls -la - printenv 2>&1 | sort - echo "[INFO] Launching Yetus via ${YETUS_DRIVER}" - "${YETUS_DRIVER}" - ''' + script { + def ret = sh( + label: 'test-patch', + returnStatus: true, + script: '''#!/bin/bash -e + hostname -a ; pwd ; ls -la + printenv 2>&1 | sort + echo "[INFO] Launching Yetus via ${YETUS_DRIVER}" + "${YETUS_DRIVER}" + ''' + ) + if (ret != 0) { + // mark the build as UNSTABLE instead of FAILURE, to avoid skipping the later publish of + // test output. See HBASE-26339 for more details. + currentBuild.result = 'UNSTABLE' + } + } } } } post { always { + sshPublisher(publishers: [ + sshPublisherDesc(configName: 'Nightlies', + transfers: [ + sshTransfer(remoteDirectory: "hbase/${JOB_NAME}/${BUILD_NUMBER}", + sourceFiles: "${env.WORKDIR_REL}/${env.PATCH_REL}/*-site/*,${env.WORKDIR_REL}/${env.PATCH_REL}/*-site/**/*" + ) + ] + ) + ]) + sh '''#!/bin/bash -e + if [ -d "${PATCHDIR}/branch-site" ]; then + echo "Remove ${PATCHDIR}/branch-site for saving space" + rm -rf "${PATCHDIR}/branch-site" + python ${SOURCEDIR}/dev-support/gen_redirect_html.py "${ASF_NIGHTLIES_GENERAL_CHECK_BASE}/branch-site" > "${PATCHDIR}/branch-site.html" + else + echo "No branch-site, skipping" + fi + if [ -d "${PATCHDIR}/patch-site" ]; then + echo "Remove ${PATCHDIR}/patch-site for saving space" + rm -rf "${PATCHDIR}/patch-site" + python ${SOURCEDIR}/dev-support/gen_redirect_html.py "${ASF_NIGHTLIES_GENERAL_CHECK_BASE}/patch-site" > "${PATCHDIR}/patch-site.html" + else + echo "No patch-site, skipping" + fi + ''' // Has to be relative to WORKSPACE. - archiveArtifacts artifacts: "${WORKDIR_REL}/${PATCH_REL}/*", excludes: "${WORKDIR_REL}/${PATCH_REL}/precommit" - archiveArtifacts artifacts: "${WORKDIR_REL}/${PATCH_REL}/**/*", excludes: "${WORKDIR_REL}/${PATCH_REL}/precommit/**/*" + archiveArtifacts artifacts: "${WORKDIR_REL}/${PATCH_REL}/*" + archiveArtifacts artifacts: "${WORKDIR_REL}/${PATCH_REL}/**/*" publishHTML target: [ allowMissing: true, keepAll: true, @@ -189,12 +228,23 @@ pipeline { passwordVariable: 'GITHUB_PASSWORD', usernameVariable: 'GITHUB_USER' )]) { - sh label: 'test-patch', script: '''#!/bin/bash -e - hostname -a ; pwd ; ls -la - printenv 2>&1 | sort - echo "[INFO] Launching Yetus via ${YETUS_DRIVER}" - "${YETUS_DRIVER}" - ''' + script { + def ret = sh( + label: 'test-patch', + returnStatus: true, + script: '''#!/bin/bash -e + hostname -a ; pwd ; ls -la + printenv 2>&1 | sort + echo "[INFO] Launching Yetus via ${YETUS_DRIVER}" + "${YETUS_DRIVER}" + ''' + ) + if (ret != 0) { + // mark the build as UNSTABLE instead of FAILURE, to avoid skipping the later publish of + // test output. See HBASE-26339 for more details. + currentBuild.result = 'UNSTABLE' + } + } } } } @@ -228,7 +278,7 @@ pipeline { if [ -f "${PATCHDIR}/test_logs.zip" ]; then echo "Remove ${PATCHDIR}/test_logs.zip for saving space" rm -rf "${PATCHDIR}/test_logs.zip" - echo "${ASF_NIGHTLIES}/hbase/${JOB_NAME}/${BUILD_NUMBER}/${WORKDIR_REL}/${PATCH_REL}/test_logs.zip" > "${PATCHDIR}/test_logs.txt" + python ${SOURCEDIR}/dev-support/gen_redirect_html.py "${ASF_NIGHTLIES_BASE}/${WORKDIR_REL}/${PATCH_REL}" > "${PATCHDIR}/test_logs.html" else echo "No test_logs.zip, skipping" fi @@ -309,12 +359,23 @@ pipeline { passwordVariable: 'GITHUB_PASSWORD', usernameVariable: 'GITHUB_USER' )]) { - sh label: 'test-patch', script: '''#!/bin/bash -e - hostname -a ; pwd ; ls -la - printenv 2>&1 | sort - echo "[INFO] Launching Yetus via ${YETUS_DRIVER}" - "${YETUS_DRIVER}" - ''' + script { + def ret = sh( + label: 'test-patch', + returnStatus: true, + script: '''#!/bin/bash -e + hostname -a ; pwd ; ls -la + printenv 2>&1 | sort + echo "[INFO] Launching Yetus via ${YETUS_DRIVER}" + "${YETUS_DRIVER}" + ''' + ) + if (ret != 0) { + // mark the build as UNSTABLE instead of FAILURE, to avoid skipping the later publish of + // test output. See HBASE-26339 for more details. + currentBuild.result = 'UNSTABLE' + } + } } } } @@ -348,7 +409,7 @@ pipeline { if [ -f "${PATCHDIR}/test_logs.zip" ]; then echo "Remove ${PATCHDIR}/test_logs.zip for saving space" rm -rf "${PATCHDIR}/test_logs.zip" - echo "${ASF_NIGHTLIES}/hbase/${JOB_NAME}/${BUILD_NUMBER}/${WORKDIR_REL}/${PATCH_REL}/test_logs.zip" > "${PATCHDIR}/test_logs.txt" + python ${SOURCEDIR}/dev-support/gen_redirect_html.py "${ASF_NIGHTLIES_BASE}/${WORKDIR_REL}/${PATCH_REL}" > "${PATCHDIR}/test_logs.html" else echo "No test_logs.zip, skipping" fi diff --git a/dev-support/create-release/mac-sshd-gpg-agent/Dockerfile b/dev-support/create-release/mac-sshd-gpg-agent/Dockerfile index 3d206dc83365..f9f0af416d52 100644 --- a/dev-support/create-release/mac-sshd-gpg-agent/Dockerfile +++ b/dev-support/create-release/mac-sshd-gpg-agent/Dockerfile @@ -83,7 +83,9 @@ FROM ubuntu:18.04 # into the container rather than launching a new docker container. RUN DEBIAN_FRONTEND=noninteractive apt-get -qq -y update \ && DEBIAN_FRONTEND=noninteractive apt-get -qq -y install --no-install-recommends \ - openssh-server=1:7.6p1-4ubuntu0.3 gnupg2=2.2.4-1ubuntu1.3 && mkdir /run/sshd \ + openssh-server=1:7.6p1-* \ + gnupg2=2.2.4-* \ + && mkdir /run/sshd \ && echo "StreamLocalBindUnlink yes" >> /etc/ssh/sshd_config \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* diff --git a/dev-support/design-docs/NamedQueue_Framework_Design_HBASE-24528_HBASE-22978_HBASE-24718.pdf b/dev-support/design-docs/NamedQueue_Framework_Design_HBASE-24528_HBASE-22978_HBASE-24718.pdf new file mode 100644 index 000000000000..3da6bc7151e3 Binary files /dev/null and b/dev-support/design-docs/NamedQueue_Framework_Design_HBASE-24528_HBASE-22978_HBASE-24718.pdf differ diff --git a/dev-support/flaky-tests/flaky-reporting.Jenkinsfile b/dev-support/flaky-tests/flaky-reporting.Jenkinsfile index 25e3fdeef841..22f88faad93f 100644 --- a/dev-support/flaky-tests/flaky-reporting.Jenkinsfile +++ b/dev-support/flaky-tests/flaky-reporting.Jenkinsfile @@ -28,6 +28,9 @@ pipeline { timeout (time: 15, unit: 'MINUTES') timestamps() } + environment { + ASF_NIGHTLIES = 'https://nightlies.apache.org' + } parameters { booleanParam(name: 'DEBUG', defaultValue: false, description: 'Produce a lot more meta-information.') } @@ -40,12 +43,26 @@ pipeline { set -x fi declare -a flaky_args - flaky_args=("${flaky_args[@]}" --urls "${JENKINS_URL}/job/HBase/job/HBase%20Nightly/job/${BRANCH_NAME}" --is-yetus True --max-builds 10) - flaky_args=("${flaky_args[@]}" --urls "${JENKINS_URL}/job/HBase/job/HBase-Flaky-Tests/job/${BRANCH_NAME}" --is-yetus False --max-builds 30) + flaky_args=("${flaky_args[@]}" --urls "${JENKINS_URL}/job/HBase/job/HBase%20Nightly/job/${BRANCH_NAME}" --is-yetus True --max-builds 20) + flaky_args=("${flaky_args[@]}" --urls "${JENKINS_URL}/job/HBase/job/HBase-Flaky-Tests/job/${BRANCH_NAME}" --is-yetus False --max-builds 50) docker build -t hbase-dev-support dev-support docker run --ulimit nproc=12500 -v "${WORKSPACE}":/hbase -u `id -u`:`id -g` --workdir=/hbase hbase-dev-support \ python dev-support/flaky-tests/report-flakies.py --mvn -v -o output "${flaky_args[@]}" -''' + ''' + sshPublisher(publishers: [ + sshPublisherDesc(configName: 'Nightlies', + transfers: [ + sshTransfer(remoteDirectory: "hbase/${JOB_NAME}/${BUILD_NUMBER}", + sourceFiles: "output/dashboard.html" + ) + ] + ) + ]) + sh ''' + if [ -f "output/dashboard.html" ]; then + ./dev-support/gen_redirect_html.py "${ASF_NIGHTLIES}/hbase/${JOB_NAME}/${BUILD_NUMBER}/output/dashboard.html" > output/dashboard.html + fi + ''' } } } @@ -53,15 +70,6 @@ pipeline { always { // Has to be relative to WORKSPACE. archiveArtifacts artifacts: "output/*" - publishHTML target: [ - allowMissing: true, - keepAll: true, - alwaysLinkToLastBuild: true, - // Has to be relative to WORKSPACE - reportDir: "output", - reportFiles: 'dashboard.html', - reportName: 'Flaky Test Report' - ] } } } diff --git a/dev-support/flaky-tests/run-flaky-tests.Jenkinsfile b/dev-support/flaky-tests/run-flaky-tests.Jenkinsfile index 594000e1146f..a681d3ca0e43 100644 --- a/dev-support/flaky-tests/run-flaky-tests.Jenkinsfile +++ b/dev-support/flaky-tests/run-flaky-tests.Jenkinsfile @@ -25,7 +25,7 @@ pipeline { } options { // this should roughly match how long we tell the flaky dashboard to look at - buildDiscarder(logRotator(numToKeepStr: '30')) + buildDiscarder(logRotator(numToKeepStr: '50')) timeout (time: 2, unit: 'HOURS') timestamps() } @@ -85,9 +85,9 @@ pipeline { ) ]) sh '''#!/bin/bash -e - echo "${ASF_NIGHTLIES}/hbase/${JOB_NAME}/${BUILD_NUMBER}" > "test_logs.txt" + ./dev-support/gen_redirect_html.py "${ASF_NIGHTLIES}/hbase/${JOB_NAME}/${BUILD_NUMBER}" > test_logs.html ''' - archiveArtifacts artifacts: 'includes.txt,test_logs.txt,target/machine/*' + archiveArtifacts artifacts: 'includes.txt,test_logs.html,target/machine/*' } } } diff --git a/dev-support/gen_redirect_html.py b/dev-support/gen_redirect_html.py new file mode 100755 index 000000000000..0e73a5716563 --- /dev/null +++ b/dev-support/gen_redirect_html.py @@ -0,0 +1,37 @@ +#!/usr/bin/python +## +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +from string import Template + +if len(sys.argv) != 2 : + print "usage: %s " % sys.argv[0] + exit(1) + +url = sys.argv[1].replace(" ", "%20") +template = Template(""" + + + + +

Redirecting. If not work, please click this link.

+ +""") + +output = template.substitute(url = url) +print output diff --git a/dev-support/hbase-personality.sh b/dev-support/hbase-personality.sh index 6eb38e342284..6a5f1de9f877 100755 --- a/dev-support/hbase-personality.sh +++ b/dev-support/hbase-personality.sh @@ -115,6 +115,10 @@ function personality_parse_args delete_parameter "${i}" SKIP_ERRORPRONE=true ;; + --asf-nightlies-general-check-base=*) + delete_parameter "${i}" + ASF_NIGHTLIES_GENERAL_CHECK_BASE=${i#*=} + ;; esac done } @@ -414,7 +418,11 @@ function refguide_rebuild fi add_vote_table 0 refguide "${repostatus} has no errors when building the reference guide. See footer for rendered docs, which you should manually inspect." - add_footer_table refguide "@@BASE@@/${repostatus}-site/book.html" + if [[ -n "${ASF_NIGHTLIES_GENERAL_CHECK_BASE}" ]]; then + add_footer_table refguide "${ASF_NIGHTLIES_GENERAL_CHECK_BASE}/${repostatus}-site/book.html" + else + add_footer_table refguide "@@BASE@@/${repostatus}-site/book.html" + fi return 0 } @@ -591,9 +599,9 @@ function hadoopcheck_rebuild elif [[ "${PATCH_BRANCH}" = branch-2.* ]]; then yetus_info "Setting Hadoop 2 versions to test based on branch-2.3+ rules." if [[ "${QUICK_HADOOPCHECK}" == "true" ]]; then - hbase_hadoop2_versions="2.10.0" + hbase_hadoop2_versions="2.10.1" else - hbase_hadoop2_versions="2.10.0" + hbase_hadoop2_versions="2.10.0 2.10.1" fi else yetus_info "Setting Hadoop 2 versions to null on master/feature branch rules since we do not support hadoop 2 for hbase 3.x any more." @@ -612,16 +620,16 @@ function hadoopcheck_rebuild elif [[ "${PATCH_BRANCH}" = branch-2.2 ]] || [[ "${PATCH_BRANCH}" = branch-2.3 ]]; then yetus_info "Setting Hadoop 3 versions to test based on branch-2.2/branch-2.3 rules" if [[ "${QUICK_HADOOPCHECK}" == "true" ]]; then - hbase_hadoop3_versions="3.1.2 3.2.1" + hbase_hadoop3_versions="3.1.2 3.2.2" else - hbase_hadoop3_versions="3.1.1 3.1.2 3.2.0 3.2.1" + hbase_hadoop3_versions="3.1.1 3.1.2 3.2.0 3.2.1 3.2.2" fi else yetus_info "Setting Hadoop 3 versions to test based on branch-2.4+/master/feature branch rules" if [[ "${QUICK_HADOOPCHECK}" == "true" ]]; then - hbase_hadoop3_versions="3.1.2 3.2.1 3.3.0" + hbase_hadoop3_versions="3.1.2 3.2.2 3.3.1" else - hbase_hadoop3_versions="3.1.1 3.1.2 3.2.0 3.2.1 3.3.0" + hbase_hadoop3_versions="3.1.1 3.1.2 3.2.0 3.2.1 3.2.2 3.3.0 3.3.1" fi fi diff --git a/dev-support/hbase_nightly_yetus.sh b/dev-support/hbase_nightly_yetus.sh index 65b5270f0bf8..3fb1ac7bfce3 100755 --- a/dev-support/hbase_nightly_yetus.sh +++ b/dev-support/hbase_nightly_yetus.sh @@ -96,6 +96,11 @@ if [[ ! -d "${OUTPUT_DIR}" ]]; then exit 1 fi +# pass asf nightlies url in +if [[ -n "${ASF_NIGHTLIES_GENERAL_CHECK_BASE}" ]]; then + YETUS_ARGS=("--asf-nightlies-general-check-base=${ASF_NIGHTLIES_GENERAL_CHECK_BASE}" "${YETUS_ARGS[@]}") +fi + if [[ true != "${USE_YETUS_PRERELEASE}" ]]; then YETUS_ARGS=("--shelldocs=${WORKSPACE}/yetus-${YETUS_RELEASE}/bin/shelldocs" "${YETUS_ARGS[@]}") TESTPATCHBIN="${WORKSPACE}/yetus-${YETUS_RELEASE}/bin/test-patch" diff --git a/dev-support/jenkins-scripts/cache-apache-project-artifact.sh b/dev-support/jenkins-scripts/cache-apache-project-artifact.sh index 5653b05cb4e2..ddd65b69a0fb 100755 --- a/dev-support/jenkins-scripts/cache-apache-project-artifact.sh +++ b/dev-support/jenkins-scripts/cache-apache-project-artifact.sh @@ -21,6 +21,7 @@ function usage { echo "Usage: ${0} [options] /path/to/download/file.tar.gz download/fragment/eg/project/subdir/some-artifact-version.tar.gz" echo "" echo " --force for a redownload even if /path/to/download/file.tar.gz exists." + echo " --verify-tar-gz Only use a cached file if it can be parsed as a gzipped tarball." echo " --working-dir /path/to/use Path for writing tempfiles. must exist." echo " defaults to making a directory via mktemp that we clean." echo " --keys url://to/project/KEYS where to get KEYS. needed to check signature on download." @@ -35,6 +36,7 @@ fi # Get arguments declare done_if_cached="true" +declare verify_tar_gz="false" declare working_dir declare cleanup="true" declare keys @@ -42,6 +44,7 @@ while [ $# -gt 0 ] do case "$1" in --force) shift; done_if_cached="false";; + --verify-tar-gz) shift; verify_tar_gz="true";; --working-dir) shift; working_dir=$1; cleanup="false"; shift;; --keys) shift; keys=$1; shift;; --) shift; break;; @@ -58,9 +61,18 @@ fi target="$1" artifact="$2" -if [ -f "${target}" ] && [ "true" = "${done_if_cached}" ]; then - echo "Reusing existing download of '${artifact}'." - exit 0 +if [ -f "${target}" ] && [ -s "${target}" ] && [ -r "${target}" ] && [ "true" = "${done_if_cached}" ]; then + if [ "false" = "${verify_tar_gz}" ]; then + echo "Reusing existing download of '${artifact}'." + exit 0 + fi + if ! tar tzf "${target}" > /dev/null 2>&1; then + echo "Cached artifact is not a well formed gzipped tarball; clearing the cached file at '${target}'." + rm -rf "${target}" + else + echo "Reusing existing download of '${artifact}', which is a well formed gzipped tarball." + exit 0 + fi fi if [ -z "${working_dir}" ]; then diff --git a/dev-support/jenkins_precommit_github_yetus.sh b/dev-support/jenkins_precommit_github_yetus.sh index 5bb2b1b755a4..9652d1e471fa 100755 --- a/dev-support/jenkins_precommit_github_yetus.sh +++ b/dev-support/jenkins_precommit_github_yetus.sh @@ -143,6 +143,10 @@ fi YETUS_ARGS+=("--sentinel") # use emoji vote so it is easier to find the broken line YETUS_ARGS+=("--github-use-emoji-vote") +# pass asf nightlies url in +if [[ -n "${ASF_NIGHTLIES_GENERAL_CHECK_BASE}" ]]; then + YETUS_ARGS+=("--asf-nightlies-general-check-base=${ASF_NIGHTLIES_GENERAL_CHECK_BASE}") +fi echo "Launching yetus with command line:" echo "${TESTPATCHBIN} ${YETUS_ARGS[*]}" diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/BalancerClusterState.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/BalancerClusterState.java index c69f17c5263f..f15ca92321a0 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/BalancerClusterState.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/BalancerClusterState.java @@ -84,12 +84,9 @@ class BalancerClusterState { int[] regionIndexToServerIndex; // regionIndex -> serverIndex int[] initialRegionIndexToServerIndex; // regionIndex -> serverIndex (initial cluster state) int[] regionIndexToTableIndex; // regionIndex -> tableIndex - int[][] numRegionsPerServerPerTable; // serverIndex -> tableIndex -> # regions + int[][] numRegionsPerServerPerTable; // tableIndex -> serverIndex -> tableIndex -> # regions int[] numRegionsPerTable; // tableIndex -> region count double[] meanRegionsPerTable; // mean region count per table - double[] regionSkewByTable; // skew on RS per by table - double[] minRegionSkewByTable; // min skew on RS per by table - double[] maxRegionSkewByTable; // max skew on RS per by table int[] regionIndexToPrimaryIndex; // regionIndex -> regionIndex of the primary boolean hasRegionReplicas = false; // whether there is regions with replicas @@ -179,6 +176,7 @@ public String getRack(ServerName server) { serversPerHostList.get(hostIndex).add(serverIndex); String rack = this.rackManager.getRack(sn); + if (!racksToIndex.containsKey(rack)) { racksToIndex.put(rack, numRacks++); serversPerRackList.add(new ArrayList<>()); @@ -187,6 +185,7 @@ public String getRack(ServerName server) { serversPerRackList.get(rackIndex).add(serverIndex); } + LOG.debug("Hosts are {} racks are {}", hostsToIndex, racksToIndex); // Count how many regions there are. for (Map.Entry> entry : clusterState.entrySet()) { numRegions += entry.getValue().size(); @@ -281,10 +280,16 @@ public String getRack(ServerName server) { regionIndex++; } + if (LOG.isDebugEnabled()) { + for (int i = 0; i < numServers; i++) { + LOG.debug("server {} has {} regions", i, regionsPerServer[i].length); + } + } for (int i = 0; i < serversPerHostList.size(); i++) { serversPerHost[i] = new int[serversPerHostList.get(i).size()]; for (int j = 0; j < serversPerHost[i].length; j++) { serversPerHost[i][j] = serversPerHostList.get(i).get(j); + LOG.debug("server {} is on host {}",serversPerHostList.get(i).get(j), i); } if (serversPerHost[i].length > 1) { multiServersPerHost = true; @@ -295,44 +300,34 @@ public String getRack(ServerName server) { serversPerRack[i] = new int[serversPerRackList.get(i).size()]; for (int j = 0; j < serversPerRack[i].length; j++) { serversPerRack[i][j] = serversPerRackList.get(i).get(j); + LOG.info("server {} is on rack {}",serversPerRackList.get(i).get(j), i); } } numTables = tables.size(); - LOG.debug("Number of tables={}", numTables); - numRegionsPerServerPerTable = new int[numServers][numTables]; + LOG.debug("Number of tables={}, number of hosts={}, number of racks={}", numTables, + numHosts, numRacks); + numRegionsPerServerPerTable = new int[numTables][numServers]; numRegionsPerTable = new int[numTables]; - for (int i = 0; i < numServers; i++) { - for (int j = 0; j < numTables; j++) { + for (int i = 0; i < numTables; i++) { + for (int j = 0; j < numServers; j++) { numRegionsPerServerPerTable[i][j] = 0; } } for (int i = 0; i < regionIndexToServerIndex.length; i++) { if (regionIndexToServerIndex[i] >= 0) { - numRegionsPerServerPerTable[regionIndexToServerIndex[i]][regionIndexToTableIndex[i]]++; + numRegionsPerServerPerTable[regionIndexToTableIndex[i]][regionIndexToServerIndex[i]]++; numRegionsPerTable[regionIndexToTableIndex[i]]++; } } // Avoid repeated computation for planning meanRegionsPerTable = new double[numTables]; - regionSkewByTable = new double[numTables]; - maxRegionSkewByTable = new double[numTables]; - minRegionSkewByTable = new double[numTables]; for (int i = 0; i < numTables; i++) { meanRegionsPerTable[i] = Double.valueOf(numRegionsPerTable[i]) / numServers; - minRegionSkewByTable[i] += DoubleArrayCost.getMinSkew(numRegionsPerTable[i], numServers); - maxRegionSkewByTable[i] += DoubleArrayCost.getMaxSkew(numRegionsPerTable[i], numServers); - } - - for (int[] aNumRegionsPerServerPerTable : numRegionsPerServerPerTable) { - for (int tableIdx = 0; tableIdx < aNumRegionsPerServerPerTable.length; tableIdx++) { - regionSkewByTable[tableIdx] += Math.abs(aNumRegionsPerServerPerTable[tableIdx] - - meanRegionsPerTable[tableIdx]); - } } for (int i = 0; i < regions.length; i++) { @@ -680,14 +675,9 @@ void regionMoved(int region, int oldServer, int newServer) { } int tableIndex = regionIndexToTableIndex[region]; if (oldServer >= 0) { - numRegionsPerServerPerTable[oldServer][tableIndex]--; - // update regionSkewPerTable for the move from old server - regionSkewByTable[tableIndex] += getSkewChangeFor(oldServer, tableIndex, -1); + numRegionsPerServerPerTable[tableIndex][oldServer]--; } - numRegionsPerServerPerTable[newServer][tableIndex]++; - - // update regionSkewPerTable for the move to new server - regionSkewByTable[tableIndex] += getSkewChangeFor(newServer, tableIndex, 1); + numRegionsPerServerPerTable[tableIndex][newServer]++; // update for servers int primary = regionIndexToPrimaryIndex[region]; @@ -792,6 +782,10 @@ boolean contains(int[] arr, int val) { private Comparator numRegionsComparator = Comparator.comparingInt(this::getNumRegions); + public Comparator getNumRegionsComparator() { + return numRegionsComparator; + } + int getLowestLocalityRegionOnServer(int serverIndex) { if (regionFinder != null) { float lowestLocality = 1.0f; @@ -857,18 +851,9 @@ public String toString() { .append(Arrays.toString(serverIndicesSortedByRegionCount)).append(", regionsPerServer=") .append(Arrays.deepToString(regionsPerServer)); - desc.append(", regionSkewByTable=").append(Arrays.toString(regionSkewByTable)) - .append(", numRegions=").append(numRegions).append(", numServers=").append(numServers) + desc.append(", numRegions=").append(numRegions).append(", numServers=").append(numServers) .append(", numTables=").append(numTables).append(", numMovedRegions=").append(numMovedRegions) .append('}'); return desc.toString(); } - - private double getSkewChangeFor(int serverIndex, int tableIndex, int regionCountChange) { - double curSkew = Math.abs(numRegionsPerServerPerTable[serverIndex][tableIndex] - - meanRegionsPerTable[tableIndex]); - double oldSkew = Math.abs(numRegionsPerServerPerTable[serverIndex][tableIndex] - - regionCountChange - meanRegionsPerTable[tableIndex]); - return curSkew - oldSkew; - } } diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java index 4827e8e99044..01cb20041d8e 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java @@ -608,7 +608,7 @@ protected abstract List balanceTable(TableName tableName, }); return result; } else { - LOG.info("Start Generate Balance plan for cluster."); + LOG.debug("Start Generate Balance plan for cluster."); return balanceTable(HConstants.ENSEMBLE_TABLE_NAME, toEnsumbleTableLoad(loadOfAllTable)); } } diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/CostFromRegionLoadFunction.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/CostFromRegionLoadFunction.java index 06eb07d67e06..199aa10a75fa 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/CostFromRegionLoadFunction.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/CostFromRegionLoadFunction.java @@ -79,4 +79,4 @@ protected double getRegionLoadCost(Collection regionLoadList } protected abstract double getCostFromRl(BalancerRegionLoad rl); -} \ No newline at end of file +} diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/CostFunction.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/CostFunction.java index 2735b6956f22..977c6b14ec0a 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/CostFunction.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/CostFunction.java @@ -83,6 +83,16 @@ protected void regionMoved(int region, int oldServer, int newServer) { protected abstract double cost(); + /** + * Add the cost of this cost function to the weight of the candidate generator that is optimized + * for this cost function. By default it is the RandomCandiateGenerator for a cost function. + * Called once per init or after postAction. + * @param weights the weights for every generator. + */ + public void updateWeight(double[] weights) { + weights[StochasticLoadBalancer.GeneratorType.RANDOM.ordinal()] += cost(); + } + /** * Scale the value between 0 and 1. * @param min Min value diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/DoubleArrayCost.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/DoubleArrayCost.java index a535d829cf1d..29afd59084f7 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/DoubleArrayCost.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/DoubleArrayCost.java @@ -66,17 +66,21 @@ void applyCostsChange(Consumer consumer) { } private static double computeCost(double[] stats) { + if (stats == null || stats.length == 0) { + return 0; + } double totalCost = 0; double total = getSum(stats); double count = stats.length; double mean = total / count; - for (int i = 0; i < stats.length; i++) { double n = stats[i]; - double diff = Math.abs(mean - n); + double diff = (mean - n) * (mean - n); totalCost += diff; } + // No need to compute standard deviation with division by cluster size when scaling. + totalCost = Math.sqrt(totalCost); return CostFunction.scale(getMinSkew(total, count), getMaxSkew(total, count), totalCost); } @@ -94,18 +98,22 @@ private static double getSum(double[] stats) { * @param total is total number of regions */ public static double getMinSkew(double total, double numServers) { + if (numServers == 0) { + return 0; + } double mean = total / numServers; // It's possible that there aren't enough regions to go around double min; if (numServers > total) { - min = ((numServers - total) * mean + (1 - mean) * total) ; + min = ((numServers - total) * mean * mean + (1 - mean) * (1 - mean) * total) ; } else { // Some will have 1 more than everything else. int numHigh = (int) (total - (Math.floor(mean) * numServers)); int numLow = (int) (numServers - numHigh); - min = numHigh * (Math.ceil(mean) - mean) + numLow * (mean - Math.floor(mean)); + min = numHigh * (Math.ceil(mean) - mean) * (Math.ceil(mean) - mean) + + numLow * (mean - Math.floor(mean)) * (mean - Math.floor(mean)); } - return min; + return Math.sqrt(min); } /** @@ -114,7 +122,10 @@ public static double getMinSkew(double total, double numServers) { * a zero sum cost for this to make sense. */ public static double getMaxSkew(double total, double numServers) { + if (numServers == 0) { + return 0; + } double mean = total / numServers; - return (total - mean) + (numServers - 1) * mean; + return Math.sqrt((total - mean) * (total - mean) + (numServers - 1) * mean * mean); } } diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredStochasticBalancer.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredStochasticBalancer.java index a7eb623df24a..acccc321ae3c 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredStochasticBalancer.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredStochasticBalancer.java @@ -90,6 +90,14 @@ protected List createCandidateGenerators() { return fnPickers; } + /** + * @return any candidate generator in random + */ + @Override + protected CandidateGenerator getRandomGenerator() { + return candidateGenerators.get(ThreadLocalRandom.current().nextInt(candidateGenerators.size())); + } + /** * Round robin assignment: Segregate the regions into two types: * diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/LoadCandidateGenerator.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/LoadCandidateGenerator.java index 595e1857e251..8604f4a47f7f 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/LoadCandidateGenerator.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/LoadCandidateGenerator.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.master.balancer; +import java.util.concurrent.ThreadLocalRandom; import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private @@ -34,27 +35,53 @@ BalanceAction generate(BalancerClusterState cluster) { private int pickLeastLoadedServer(final BalancerClusterState cluster, int thisServer) { Integer[] servers = cluster.serverIndicesSortedByRegionCount; - int index = 0; - while (servers[index] == null || servers[index] == thisServer) { - index++; - if (index == servers.length) { - return -1; + int selectedIndex = -1; + double currentLargestRandom = -1; + for (int i = 0; i < servers.length; i++) { + if (servers[i] == null || servers[i] == thisServer) { + continue; + } + if (selectedIndex != -1 + && cluster.getNumRegionsComparator().compare(servers[i], servers[selectedIndex]) != 0) { + // Exhausted servers of the same region count + break; + } + // we don't know how many servers have the same region count, we will randomly select one + // using a simplified inline reservoir sampling by assignmening a random number to stream + // data and choose the greatest one. (http://gregable.com/2007/10/reservoir-sampling.html) + double currentRandom = ThreadLocalRandom.current().nextDouble(); + if (currentRandom > currentLargestRandom) { + selectedIndex = i; + currentLargestRandom = currentRandom; } } - return servers[index]; + return selectedIndex == -1 ? -1 : servers[selectedIndex]; } private int pickMostLoadedServer(final BalancerClusterState cluster, int thisServer) { Integer[] servers = cluster.serverIndicesSortedByRegionCount; - int index = servers.length - 1; - while (servers[index] == null || servers[index] == thisServer) { - index--; - if (index < 0) { - return -1; + int selectedIndex = -1; + double currentLargestRandom = -1; + for (int i = servers.length - 1; i >= 0; i--) { + if (servers[i] == null || servers[i] == thisServer) { + continue; + } + if (selectedIndex != -1 && cluster.getNumRegionsComparator().compare(servers[i], + servers[selectedIndex]) != 0) { + // Exhausted servers of the same region count + break; + } + // we don't know how many servers have the same region count, we will randomly select one + // using a simplified inline reservoir sampling by assignmening a random number to stream + // data and choose the greatest one. (http://gregable.com/2007/10/reservoir-sampling.html) + double currentRandom = ThreadLocalRandom.current().nextDouble(); + if (currentRandom > currentLargestRandom) { + selectedIndex = i; + currentLargestRandom = currentRandom; } } - return servers[index]; + return selectedIndex == -1? -1 : servers[selectedIndex]; } } diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/LocalityBasedCostFunction.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/LocalityBasedCostFunction.java index f7650b347b50..678c9a3e9adf 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/LocalityBasedCostFunction.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/LocalityBasedCostFunction.java @@ -88,4 +88,8 @@ private double getWeightedLocality(int region, int entity) { return cluster.getOrComputeWeightedLocality(region, entity, type); } -} \ No newline at end of file + @Override + public final void updateWeight(double[] weights) { + weights[StochasticLoadBalancer.GeneratorType.LOCALITY.ordinal()] += cost(); + } +} diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/MoveCostFunction.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/MoveCostFunction.java index 071f1350724e..21018368f4dd 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/MoveCostFunction.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/MoveCostFunction.java @@ -34,7 +34,7 @@ class MoveCostFunction extends CostFunction { static final float DEFAULT_MOVE_COST = 7; static final float DEFAULT_MOVE_COST_OFFPEAK = 3; private static final int DEFAULT_MAX_MOVES = 600; - private static final float DEFAULT_MAX_MOVE_PERCENT = 0.25f; + private static final float DEFAULT_MAX_MOVE_PERCENT = 1.0f; private final float maxMovesPercent; private final ClusterInfoProvider provider; @@ -79,4 +79,4 @@ protected double cost() { return scale(0, Math.min(cluster.numRegions, maxMoves), moveCost); } -} \ No newline at end of file +} diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionCountSkewCostFunction.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionCountSkewCostFunction.java index 4f66ba465c39..442bbc9b7bcf 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionCountSkewCostFunction.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionCountSkewCostFunction.java @@ -19,17 +19,12 @@ import org.apache.hadoop.conf.Configuration; import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; /** * Compute the cost of a potential cluster state from skew in number of regions on a cluster. */ @InterfaceAudience.Private class RegionCountSkewCostFunction extends CostFunction { - - private static final Logger LOG = LoggerFactory.getLogger(RegionCountSkewCostFunction.class); - static final String REGION_COUNT_SKEW_COST_KEY = "hbase.master.balancer.stochastic.regionCountCost"; static final float DEFAULT_REGION_COUNT_SKEW_COST = 500; @@ -50,14 +45,6 @@ void prepare(BalancerClusterState cluster) { costs[i] = cluster.regionsPerServer[i].length; } }); - LOG.debug("{} sees a total of {} servers and {} regions.", getClass().getSimpleName(), - cluster.numServers, cluster.numRegions); - if (LOG.isTraceEnabled()) { - for (int i = 0; i < cluster.numServers; i++) { - LOG.trace("{} sees server '{}' has {} regions", getClass().getSimpleName(), - cluster.servers[i], cluster.regionsPerServer[i].length); - } - } } @Override @@ -72,4 +59,9 @@ protected void regionMoved(int region, int oldServer, int newServer) { costs[newServer] = cluster.regionsPerServer[newServer].length; }); } -} \ No newline at end of file + + @Override + public final void updateWeight(double[] weights) { + weights[StochasticLoadBalancer.GeneratorType.LOAD.ordinal()] += cost(); + } +} diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionReplicaGroupingCostFunction.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionReplicaGroupingCostFunction.java index 520eb6c764fa..cd4012a0e8ef 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionReplicaGroupingCostFunction.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionReplicaGroupingCostFunction.java @@ -74,6 +74,11 @@ protected double cost() { return scale(0, maxCost, totalCost); } + @Override + public final void updateWeight(double[] weights) { + weights[StochasticLoadBalancer.GeneratorType.RACK.ordinal()] += cost(); + } + /** * For each primary region, it computes the total number of replicas in the array (numReplicas) * and returns a sum of numReplicas-1 squared. For example, if the server hosts regions a, b, c, diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java index eaa923e74fcf..ff6d031cef33 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java @@ -129,14 +129,14 @@ public class StochasticLoadBalancer extends BaseLoadBalancer { private int numRegionLoadsToRemember = 15; private float minCostNeedBalance = 0.025f; - private List candidateGenerators; private List costFunctions; // FindBugs: Wants this protected; IS2_INCONSISTENT_SYNC // To save currently configed sum of multiplier. Defaulted at 1 for cases that carry high cost - private float sumMultiplier = 1.0f; + private float sumMultiplier; // to save and report costs to JMX private double curOverallCost = 0d; private double[] tempFunctionCosts; private double[] curFunctionCosts; + private double[] weightsOfGenerators; // Keep locality based picker and cost function to alert them // when new services are offered @@ -146,6 +146,12 @@ public class StochasticLoadBalancer extends BaseLoadBalancer { private RegionReplicaHostCostFunction regionReplicaHostCostFunction; private RegionReplicaRackCostFunction regionReplicaRackCostFunction; + protected List candidateGenerators; + + public enum GeneratorType { + RANDOM, LOAD, LOCALITY, RACK + } + /** * The constructor that pass a MetricsStochasticBalancer to BaseLoadBalancer to replace its * default MetricsBalancer @@ -204,10 +210,11 @@ protected float getDefaultSlop() { protected List createCandidateGenerators() { List candidateGenerators = new ArrayList(4); - candidateGenerators.add(new RandomCandidateGenerator()); - candidateGenerators.add(new LoadCandidateGenerator()); - candidateGenerators.add(localityCandidateGenerator); - candidateGenerators.add(new RegionReplicaRackCandidateGenerator()); + candidateGenerators.add(GeneratorType.RANDOM.ordinal(), new RandomCandidateGenerator()); + candidateGenerators.add(GeneratorType.LOAD.ordinal(), new LoadCandidateGenerator()); + candidateGenerators.add(GeneratorType.LOCALITY.ordinal(), localityCandidateGenerator); + candidateGenerators.add(GeneratorType.RACK.ordinal(), + new RegionReplicaRackCandidateGenerator()); return candidateGenerators; } @@ -248,11 +255,12 @@ protected void loadConf(Configuration conf) { curFunctionCosts = new double[costFunctions.size()]; tempFunctionCosts = new double[costFunctions.size()]; - LOG.info("Loaded config; maxSteps=" + maxSteps + ", runMaxSteps=" + runMaxSteps, - ", stepsPerRegion=" + stepsPerRegion + - ", maxRunningTime=" + maxRunningTime + ", isByTable=" + isByTable + ", CostFunctions=" + - Arrays.toString(getCostFunctionNames()) + " etc."); - } + LOG.info( + "Loaded config; maxSteps=" + maxSteps + ", runMaxSteps=" + runMaxSteps + + ", stepsPerRegion=" + stepsPerRegion + + ", maxRunningTime=" + maxRunningTime + ", isByTable=" + isByTable + + ", CostFunctions=" + Arrays.toString(getCostFunctionNames()) + + " , sum of multiplier of cost functions = " + sumMultiplier + " etc."); } @Override public void updateClusterMetrics(ClusterMetrics st) { @@ -310,14 +318,7 @@ void updateMetricsSize(int size) { private boolean areSomeRegionReplicasColocated(BalancerClusterState c) { regionReplicaHostCostFunction.prepare(c); - if (Math.abs(regionReplicaHostCostFunction.cost()) > CostFunction.COST_EPSILON) { - return true; - } - regionReplicaRackCostFunction.prepare(c); - if (Math.abs(regionReplicaRackCostFunction.cost()) > CostFunction.COST_EPSILON) { - return true; - } - return false; + return (Math.abs(regionReplicaHostCostFunction.cost()) > CostFunction.COST_EPSILON); } private String getBalanceReason(double total, double sumMultiplier) { @@ -345,33 +346,25 @@ boolean needsBalance(TableName tableName, BalancerClusterState cluster) { return false; } if (areSomeRegionReplicasColocated(cluster)) { - LOG.info("Running balancer because at least one server hosts replicas of the same region."); + LOG.info("Running balancer because at least one server hosts replicas of the same region." + + " function cost={}", functionCost()); return true; } if (idleRegionServerExist(cluster)){ - LOG.info("Running balancer because cluster has idle server(s)."); + LOG.info("Running balancer because cluster has idle server(s)."+ + " function cost={}", functionCost()); return true; } - sumMultiplier = 0.0f; double total = 0.0; for (CostFunction c : costFunctions) { - float multiplier = c.getMultiplier(); - double cost = c.cost(); if (!c.isNeeded()) { LOG.trace("{} not needed", c.getClass().getSimpleName()); continue; } - total += cost * multiplier; - sumMultiplier += multiplier; + total += c.cost() * c.getMultiplier(); } - if (sumMultiplier <= 0) { - LOG.error("At least one cost function needs a multiplier > 0. For example, set " - + "hbase.master.balancer.stochastic.regionCountCost to a positive value or default"); - return false; - } - boolean balanced = (total / sumMultiplier < minCostNeedBalance); if (balanced) { @@ -394,8 +387,33 @@ boolean needsBalance(TableName tableName, BalancerClusterState cluster) { @RestrictedApi(explanation = "Should only be called in tests", link = "", allowedOnPath = ".*(/src/test/.*|StochasticLoadBalancer).java") BalanceAction nextAction(BalancerClusterState cluster) { - return candidateGenerators.get(ThreadLocalRandom.current().nextInt(candidateGenerators.size())) - .generate(cluster); + return getRandomGenerator().generate(cluster); + } + + /** + * Select the candidate generator to use based on the cost of cost functions. The chance of + * selecting a candidate generator is propotional to the share of cost of all cost functions among + * all cost functions that benefit from it. + */ + protected CandidateGenerator getRandomGenerator() { + double sum = 0; + for (int i = 0; i < weightsOfGenerators.length; i++) { + sum += weightsOfGenerators[i]; + weightsOfGenerators[i] = sum; + } + if (sum == 0) { + return candidateGenerators.get(0); + } + for (int i = 0; i < weightsOfGenerators.length; i++) { + weightsOfGenerators[i] /= sum; + } + double rand = ThreadLocalRandom.current().nextDouble(); + for (int i = 0; i < weightsOfGenerators.length; i++) { + if (rand <= weightsOfGenerators[i]) { + return candidateGenerators.get(i); + } + } + return candidateGenerators.get(candidateGenerators.size() - 1); } @RestrictedApi(explanation = "Should only be called in tests", link = "", @@ -435,6 +453,18 @@ protected List balanceTable(TableName tableName, Map 0. For example, set " + + "hbase.master.balancer.stochastic.regionCountCost to a positive value or default"); + return null; + } + double currentCost = computeCost(cluster, Double.MAX_VALUE); curOverallCost = currentCost; System.arraycopy(tempFunctionCosts, 0, curFunctionCosts, 0, curFunctionCosts.length); @@ -476,7 +506,7 @@ protected List balanceTable(TableName tableName, Map balanceTable(TableName tableName, Map @@ -510,9 +540,9 @@ protected List balanceTable(TableName tableName, Map= minCostNeedBalance) { + builder.append(", need balance"); } } else { builder.append("not needed"); @@ -687,17 +717,28 @@ private void updateRegionLoad() { @RestrictedApi(explanation = "Should only be called in tests", link = "", allowedOnPath = ".*(/src/test/.*|StochasticLoadBalancer).java") void initCosts(BalancerClusterState cluster) { + // Initialize the weights of generator every time + weightsOfGenerators = new double[this.candidateGenerators.size()]; for (CostFunction c : costFunctions) { c.prepare(cluster); + c.updateWeight(weightsOfGenerators); } } + /** + * Update both the costs of costfunctions and the weights of candidate generators + */ @RestrictedApi(explanation = "Should only be called in tests", link = "", allowedOnPath = ".*(/src/test/.*|StochasticLoadBalancer).java") - void updateCostsWithAction(BalancerClusterState cluster, BalanceAction action) { + void updateCostsAndWeightsWithAction(BalancerClusterState cluster, BalanceAction action) { + // Reset all the weights to 0 + for (int i = 0; i < weightsOfGenerators.length; i++) { + weightsOfGenerators[i] = 0; + } for (CostFunction c : costFunctions) { if (c.isNeeded()) { c.postAction(action); + c.updateWeight(weightsOfGenerators); } } } diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/TableSkewCostFunction.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/TableSkewCostFunction.java index efc22cf1876b..d1e7cd217343 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/TableSkewCostFunction.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/TableSkewCostFunction.java @@ -29,17 +29,43 @@ class TableSkewCostFunction extends CostFunction { private static final String TABLE_SKEW_COST_KEY = "hbase.master.balancer.stochastic.tableSkewCost"; private static final float DEFAULT_TABLE_SKEW_COST = 35; + DoubleArrayCost[] costsPerTable; TableSkewCostFunction(Configuration conf) { this.setMultiplier(conf.getFloat(TABLE_SKEW_COST_KEY, DEFAULT_TABLE_SKEW_COST)); } + @Override + void prepare(BalancerClusterState cluster) { + super.prepare(cluster); + costsPerTable = new DoubleArrayCost[cluster.numTables]; + for (int tableIdx = 0; tableIdx < cluster.numTables; tableIdx++) { + costsPerTable[tableIdx] = new DoubleArrayCost(); + costsPerTable[tableIdx].prepare(cluster.numServers); + final int tableIndex = tableIdx; + costsPerTable[tableIdx].applyCostsChange(costs -> { + // Keep a cached deep copy for change-only recomputation + for (int i = 0; i < cluster.numServers; i++) { + costs[i] = cluster.numRegionsPerServerPerTable[tableIndex][i]; + } + }); + } + } + + @Override + protected void regionMoved(int region, int oldServer, int newServer) { + int tableIdx = cluster.regionIndexToTableIndex[region]; + costsPerTable[tableIdx].applyCostsChange(costs -> { + costs[oldServer] = cluster.numRegionsPerServerPerTable[tableIdx][oldServer]; + costs[newServer] = cluster.numRegionsPerServerPerTable[tableIdx][newServer]; + }); + } + @Override protected double cost() { double cost = 0; for (int tableIdx = 0; tableIdx < cluster.numTables; tableIdx++) { - cost += scale(cluster.minRegionSkewByTable[tableIdx], - cluster.maxRegionSkewByTable[tableIdx], cluster.regionSkewByTable[tableIdx]); + cost += costsPerTable[tableIdx].cost(); } return cost; } diff --git a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/StochasticBalancerTestBase.java b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/StochasticBalancerTestBase.java index 56d5f10f6b7e..8a077b793ccb 100644 --- a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/StochasticBalancerTestBase.java +++ b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/StochasticBalancerTestBase.java @@ -39,13 +39,13 @@ public class StochasticBalancerTestBase extends BalancerTestBase { protected static StochasticLoadBalancer loadBalancer; - protected static DummyMetricsStochasticBalancer dummyMetricsStochasticBalancer = new DummyMetricsStochasticBalancer(); + protected static DummyMetricsStochasticBalancer dummyMetricsStochasticBalancer = new + DummyMetricsStochasticBalancer(); @BeforeClass public static void beforeAllTests() throws Exception { conf = HBaseConfiguration.create(); conf.setClass("hbase.util.ip.to.rack.determiner", MockMapping.class, DNSToSwitchMapping.class); - conf.setFloat("hbase.master.balancer.stochastic.maxMovePercent", 0.75f); conf.setFloat("hbase.regions.slop", 0.0f); conf.setFloat("hbase.master.balancer.stochastic.localityCost", 0); conf.setBoolean("hbase.master.balancer.stochastic.runMaxSteps", true); @@ -59,7 +59,17 @@ protected void testWithCluster(int numNodes, int numRegions, int numRegionsPerSe boolean assertFullyBalancedForReplicas) { Map> serverMap = createServerMap(numNodes, numRegions, numRegionsPerServer, replication, numTables); - testWithCluster(serverMap, null, assertFullyBalanced, assertFullyBalancedForReplicas); + testWithCluster(serverMap, null, assertFullyBalanced, + assertFullyBalancedForReplicas); + } + + protected void testWithClusterWithIteration(int numNodes, int numRegions, int numRegionsPerServer, + int replication, int numTables, boolean assertFullyBalanced, + boolean assertFullyBalancedForReplicas) { + Map> serverMap = + createServerMap(numNodes, numRegions, numRegionsPerServer, replication, numTables); + testWithClusterWithIteration(serverMap, null, assertFullyBalanced, + assertFullyBalancedForReplicas); } protected void testWithCluster(Map> serverMap, @@ -80,7 +90,7 @@ protected void testWithCluster(Map> serverMap, List balancedCluster = reconcile(list, plans, serverMap); // Print out the cluster loads to make debugging easier. - LOG.info("Mock Balance : " + printMock(balancedCluster)); + LOG.info("Mock after Balance : " + printMock(balancedCluster)); if (assertFullyBalanced) { assertClusterAsBalanced(balancedCluster); @@ -95,4 +105,41 @@ protected void testWithCluster(Map> serverMap, } } } + + protected void testWithClusterWithIteration(Map> serverMap, + RackManager rackManager, boolean assertFullyBalanced, boolean assertFullyBalancedForReplicas) { + List list = convertToList(serverMap); + LOG.info("Mock Cluster : " + printMock(list) + " " + printStats(list)); + + loadBalancer.setRackManager(rackManager); + // Run the balancer. + Map>> LoadOfAllTable = + (Map) mockClusterServersWithTables(serverMap); + List plans = loadBalancer.balanceCluster(LoadOfAllTable); + assertNotNull("Initial cluster balance should produce plans.", plans); + + List balancedCluster = null; + // Run through iteration until done. Otherwise will be killed as test time out + while (plans != null && (assertFullyBalanced || assertFullyBalancedForReplicas)) { + // Apply the plan to the mock cluster. + balancedCluster = reconcile(list, plans, serverMap); + + // Print out the cluster loads to make debugging easier. + LOG.info("Mock after balance: " + printMock(balancedCluster)); + + LoadOfAllTable = (Map) mockClusterServersWithTables(serverMap); + plans = loadBalancer.balanceCluster(LoadOfAllTable); + } + + // Print out the cluster loads to make debugging easier. + LOG.info("Mock Final balance: " + printMock(balancedCluster)); + + if (assertFullyBalanced) { + assertNull("Given a requirement to be fully balanced, second attempt at plans should " + + "produce none.", plans); + } + if (assertFullyBalancedForReplicas) { + assertRegionReplicaPlacement(serverMap, rackManager); + } + } } diff --git a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/StochasticBalancerTestBase2.java b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/StochasticBalancerTestBase2.java index 9ab37110ab64..41dbb552db6a 100644 --- a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/StochasticBalancerTestBase2.java +++ b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/StochasticBalancerTestBase2.java @@ -24,7 +24,6 @@ public class StochasticBalancerTestBase2 extends StochasticBalancerTestBase { @Before public void before() { - conf.setFloat("hbase.master.balancer.stochastic.maxMovePercent", 1.0f); conf.setLong(StochasticLoadBalancer.MAX_STEPS_KEY, 2000000L); conf.setFloat("hbase.master.balancer.stochastic.localityCost", 0); conf.setLong("hbase.master.balancer.stochastic.maxRunningTime", 3 * 60 * 1000L); diff --git a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestBaseLoadBalancer.java b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestBaseLoadBalancer.java index 0c94a20e3586..5dc3fa81e1b3 100644 --- a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestBaseLoadBalancer.java +++ b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestBaseLoadBalancer.java @@ -355,8 +355,6 @@ public void testRegionAvailabilityWithRegionMoves() throws Exception { // now move region1 from servers[0] to servers[2] cluster.doAction(new MoveRegionAction(0, 0, 2)); - // check that the regionSkewByTable for "table" has increased to 2 - assertEquals(2, cluster.regionSkewByTable[0], 0.01); // now repeat check whether moving region1 from servers[1] to servers[2] // would lower availability assertTrue(cluster.wouldLowerAvailability(hri1, servers[2])); diff --git a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestDoubleArrayCost.java b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestDoubleArrayCost.java index d3bef06759e2..38834a8c9fa4 100644 --- a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestDoubleArrayCost.java +++ b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestDoubleArrayCost.java @@ -62,6 +62,6 @@ public void testComputeCost() { } costs[100] = 100; }); - assertEquals(0.5, cost.cost(), 0.01); + assertEquals(0.0708, cost.cost(), 0.01); } } diff --git a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer.java b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer.java index ddb8b067466a..9ad080b1a3d4 100644 --- a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer.java +++ b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer.java @@ -417,13 +417,13 @@ public void testMoveCost() throws Exception { cluster.setNumRegions(10000); cluster.setNumMovedRegions(250); cost = costFunction.cost(); - assertEquals(0.1f, cost, 0.001); + assertEquals(0.025f, cost, 0.001); cluster.setNumMovedRegions(1250); cost = costFunction.cost(); - assertEquals(0.5f, cost, 0.001); + assertEquals(0.125f, cost, 0.001); cluster.setNumMovedRegions(2500); cost = costFunction.cost(); - assertEquals(1.0f, cost, 0.01); + assertEquals(0.25f, cost, 0.01); } } @@ -464,10 +464,10 @@ public void testCostAfterUndoAction() { final double expectedCost = loadBalancer.computeCost(cluster, Double.MAX_VALUE); BalanceAction action = loadBalancer.nextAction(cluster); cluster.doAction(action); - loadBalancer.updateCostsWithAction(cluster, action); + loadBalancer.updateCostsAndWeightsWithAction(cluster, action); BalanceAction undoAction = action.undoAction(); cluster.doAction(undoAction); - loadBalancer.updateCostsWithAction(cluster, undoAction); + loadBalancer.updateCostsAndWeightsWithAction(cluster, undoAction); final double actualCost = loadBalancer.computeCost(cluster, Double.MAX_VALUE); assertEquals(expectedCost, actualCost, 0); } diff --git a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerBalanceCluster.java b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerBalanceCluster.java index 94c382443f63..5269fe71d7f0 100644 --- a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerBalanceCluster.java +++ b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerBalanceCluster.java @@ -51,7 +51,6 @@ public class TestStochasticLoadBalancerBalanceCluster extends StochasticBalancer */ @Test public void testBalanceCluster() throws Exception { - conf.setFloat("hbase.master.balancer.stochastic.maxMovePercent", 1.0f); loadBalancer.onConfigurationChange(conf); for (int[] mockCluster : clusterStateMocks) { Map> servers = mockClusterServers(mockCluster); diff --git a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerHeterogeneousCost.java b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerHeterogeneousCost.java index 04e852813fc4..5a0dc06e4707 100644 --- a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerHeterogeneousCost.java +++ b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerHeterogeneousCost.java @@ -77,10 +77,9 @@ public static void beforeAllTests() throws IOException { RULES_FILE = HTU.getDataTestDir(DEFAULT_RULES_FILE_NAME).toString(); conf.set(HeterogeneousRegionCountCostFunction.HBASE_MASTER_BALANCER_HETEROGENEOUS_RULES_FILE, RULES_FILE); - loadBalancer = new StochasticLoadBalancer(); + loadBalancer = new StochasticLoadTestBalancer(); loadBalancer.setClusterInfoProvider(new DummyClusterInfoProvider(conf)); loadBalancer.initialize(); - loadBalancer.getCandidateGenerators().add(new FairRandomCandidateGenerator()); } @Test @@ -302,4 +301,14 @@ BalanceAction generate(BalancerClusterState cluster) { return super.generate(cluster); } } + + static class StochasticLoadTestBalancer extends StochasticLoadBalancer { + private FairRandomCandidateGenerator fairRandomCandidateGenerator = + new FairRandomCandidateGenerator(); + + @Override + protected CandidateGenerator getRandomGenerator() { + return fairRandomCandidateGenerator; + } + } } diff --git a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerLargeCluster.java b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerLargeCluster.java index a23167db12e9..ba2da0a860a0 100644 --- a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerLargeCluster.java +++ b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerLargeCluster.java @@ -39,8 +39,8 @@ public void testLargeCluster() { int numTables = 100; int replication = 1; conf.setLong("hbase.master.balancer.stochastic.maxRunningTime", 6 * 60 * 1000); - conf.setFloat("hbase.master.balancer.stochastic.maxMovePercent", 1.0f); loadBalancer.onConfigurationChange(conf); - testWithCluster(numNodes, numRegions, numRegionsPerServer, replication, numTables, true, true); + testWithClusterWithIteration(numNodes, numRegions, numRegionsPerServer, replication, numTables, + true, true); } } diff --git a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerRegionReplica.java b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerRegionReplica.java index ac6ad4b75ace..58eed9e63796 100644 --- a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerRegionReplica.java +++ b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerRegionReplica.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.master.balancer; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import java.util.ArrayList; @@ -161,8 +162,7 @@ public void testNeedsBalanceForColocatedReplicas() { map.put(s2, regionsOnS2); // add another server so that the cluster has some host on another rack map.put(ServerName.valueOf("host2", 1000, 11111), randomRegions(1)); - assertTrue( - loadBalancer.needsBalance(HConstants.ENSEMBLE_TABLE_NAME, + assertFalse(loadBalancer.needsBalance(HConstants.ENSEMBLE_TABLE_NAME, new BalancerClusterState(map, null, null, new ForTestRackManagerOne()))); } diff --git a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerRegionReplicaSameHosts.java b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerRegionReplicaSameHosts.java index 8a71443e7ba6..85576efc229f 100644 --- a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerRegionReplicaSameHosts.java +++ b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerRegionReplicaSameHosts.java @@ -41,7 +41,6 @@ public class TestStochasticLoadBalancerRegionReplicaSameHosts extends Stochastic public void testRegionReplicationOnMidClusterSameHosts() { conf.setLong(StochasticLoadBalancer.MAX_STEPS_KEY, 2000000L); conf.setLong("hbase.master.balancer.stochastic.maxRunningTime", 90 * 1000); // 90 sec - conf.setFloat("hbase.master.balancer.stochastic.maxMovePercent", 1.0f); loadBalancer.onConfigurationChange(conf); int numHosts = 30; int numRegions = 30 * 30; diff --git a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerRegionReplicaWithRacks.java b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerRegionReplicaWithRacks.java index 3b2c847c5455..83384ee47189 100644 --- a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerRegionReplicaWithRacks.java +++ b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerRegionReplicaWithRacks.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hbase.master.balancer; +import java.util.HashMap; import java.util.List; import java.util.Map; import org.apache.hadoop.hbase.HBaseClassTestRule; @@ -37,7 +38,10 @@ public class TestStochasticLoadBalancerRegionReplicaWithRacks extends Stochastic HBaseClassTestRule.forClass(TestStochasticLoadBalancerRegionReplicaWithRacks.class); private static class ForTestRackManager extends RackManager { + int numRacks; + Map serverIndexes = new HashMap(); + int numServers = 0; public ForTestRackManager(int numRacks) { this.numRacks = numRacks; @@ -45,17 +49,21 @@ public ForTestRackManager(int numRacks) { @Override public String getRack(ServerName server) { - return "rack_" + (server.hashCode() % numRacks); + String key = server.getServerName(); + if (!serverIndexes.containsKey(key)) { + serverIndexes.put(key, numServers++); + } + return "rack_" + serverIndexes.get(key) % numRacks; } } @Test public void testRegionReplicationOnMidClusterWithRacks() { - conf.setLong(StochasticLoadBalancer.MAX_STEPS_KEY, 10000000L); - conf.setFloat("hbase.master.balancer.stochastic.maxMovePercent", 1.0f); + conf.setLong(StochasticLoadBalancer.MAX_STEPS_KEY, 100000000L); + conf.setBoolean("hbase.master.balancer.stochastic.runMaxSteps", true); conf.setLong("hbase.master.balancer.stochastic.maxRunningTime", 120 * 1000); // 120 sec loadBalancer.onConfigurationChange(conf); - int numNodes = 4; + int numNodes = 5; int numRegions = numNodes * 1; int replication = 3; // 3 replicas per region int numRegionsPerServer = 1; @@ -64,7 +72,25 @@ public void testRegionReplicationOnMidClusterWithRacks() { Map> serverMap = createServerMap(numNodes, numRegions, numRegionsPerServer, replication, numTables); RackManager rm = new ForTestRackManager(numRacks); + testWithClusterWithIteration(serverMap, rm, true, true); + } + + @Test + public void testRegionReplicationOnLargeClusterWithRacks() { + conf.setBoolean("hbase.master.balancer.stochastic.runMaxSteps", false); + conf.setLong(StochasticLoadBalancer.MAX_STEPS_KEY, 5000L); + conf.setLong("hbase.master.balancer.stochastic.maxRunningTime", 10 * 1000); // 10 sec + loadBalancer.onConfigurationChange(conf); + int numNodes = 100; + int numRegions = numNodes * 30; + int replication = 3; // 3 replicas per region + int numRegionsPerServer = 28; + int numTables = 1; + int numRacks = 4; // all replicas should be on a different rack + Map> serverMap = + createServerMap(numNodes, numRegions, numRegionsPerServer, replication, numTables); + RackManager rm = new ForTestRackManager(numRacks); - testWithCluster(serverMap, rm, false, true); + testWithClusterWithIteration(serverMap, rm, true, true); } } diff --git a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerSmallCluster.java b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerSmallCluster.java index 8adaa6578f94..479e194a6d6c 100644 --- a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerSmallCluster.java +++ b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerSmallCluster.java @@ -58,7 +58,7 @@ public void testSmallCluster3() { int numRegionsPerServer = 1; // all servers except one int replication = 1; int numTables = 10; - /* fails because of max moves */ + // fails because of max moves testWithCluster(numNodes, numRegions, numRegionsPerServer, replication, numTables, false, false); } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java index 5dafa76feb05..6c72baa2db1f 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java @@ -94,7 +94,8 @@ public class KeyValue implements ExtendedCell, Cloneable { /** * Comparator for plain key/values; i.e. non-catalog table key/values. Works on Key portion * of KeyValue only. - * @deprecated Use {@link CellComparator#getInstance()} instead. Deprecated for hbase 2.0, remove for hbase 3.0. + * @deprecated Use {@link CellComparator#getInstance()} instead. Deprecated for hbase 2.0, + * remove for hbase 3.0. */ @Deprecated public static final KVComparator COMPARATOR = new KVComparator(); @@ -261,7 +262,7 @@ static boolean isValidType(byte b) { /** * Cannot rely on enum ordinals . They change if item is removed or moved. * Do our own codes. - * @param b + * @param b the kv serialized byte[] to process * @return Type associated with passed code. */ public static Type codeToType(final byte b) { @@ -352,7 +353,7 @@ public KeyValue(final byte[] bytes, final int offset, final int length) { * @param bytes byte array * @param offset offset to start of the KeyValue * @param length length of the KeyValue - * @param ts + * @param ts timestamp */ public KeyValue(final byte[] bytes, final int offset, final int length, long ts) { this(bytes, offset, length, null, 0, 0, null, 0, 0, ts, Type.Maximum, null, 0, 0, null); @@ -364,7 +365,7 @@ public KeyValue(final byte[] bytes, final int offset, final int length, long ts) * Constructs KeyValue structure filled with null value. * Sets type to {@link KeyValue.Type#Maximum} * @param row - row key (arbitrary byte array) - * @param timestamp + * @param timestamp version timestamp */ public KeyValue(final byte [] row, final long timestamp) { this(row, null, null, timestamp, Type.Maximum, null); @@ -373,7 +374,7 @@ public KeyValue(final byte [] row, final long timestamp) { /** * Constructs KeyValue structure filled with null value. * @param row - row key (arbitrary byte array) - * @param timestamp + * @param timestamp version timestamp */ public KeyValue(final byte [] row, final long timestamp, Type type) { this(row, null, null, timestamp, type, null); @@ -410,7 +411,7 @@ public KeyValue(final byte [] row, final byte [] family, * @param qualifier column qualifier * @param timestamp version timestamp * @param type key type - * @throws IllegalArgumentException + * @throws IllegalArgumentException an illegal value was passed */ public KeyValue(final byte[] row, final byte[] family, final byte[] qualifier, final long timestamp, Type type) { @@ -424,7 +425,7 @@ public KeyValue(final byte[] row, final byte[] family, * @param qualifier column qualifier * @param timestamp version timestamp * @param value column value - * @throws IllegalArgumentException + * @throws IllegalArgumentException an illegal value was passed */ public KeyValue(final byte[] row, final byte[] family, final byte[] qualifier, final long timestamp, final byte[] value) { @@ -439,7 +440,7 @@ public KeyValue(final byte[] row, final byte[] family, * @param timestamp version timestamp * @param value column value * @param tags tags - * @throws IllegalArgumentException + * @throws IllegalArgumentException an illegal value was passed */ public KeyValue(final byte[] row, final byte[] family, final byte[] qualifier, final long timestamp, final byte[] value, @@ -455,7 +456,7 @@ public KeyValue(final byte[] row, final byte[] family, * @param timestamp version timestamp * @param value column value * @param tags tags non-empty list of tags or null - * @throws IllegalArgumentException + * @throws IllegalArgumentException an illegal value was passed */ public KeyValue(final byte[] row, final byte[] family, final byte[] qualifier, final long timestamp, final byte[] value, @@ -475,7 +476,7 @@ public KeyValue(final byte[] row, final byte[] family, * @param timestamp version timestamp * @param type key type * @param value column value - * @throws IllegalArgumentException + * @throws IllegalArgumentException an illegal value was passed */ public KeyValue(final byte[] row, final byte[] family, final byte[] qualifier, final long timestamp, Type type, @@ -494,7 +495,7 @@ public KeyValue(final byte[] row, final byte[] family, * @param timestamp version timestamp * @param type key type * @param value column value - * @throws IllegalArgumentException + * @throws IllegalArgumentException an illegal value was passed */ public KeyValue(final byte[] row, final byte[] family, final byte[] qualifier, final long timestamp, Type type, @@ -511,7 +512,7 @@ public KeyValue(final byte[] row, final byte[] family, * @param timestamp version timestamp * @param type key type * @param value column value - * @throws IllegalArgumentException + * @throws IllegalArgumentException an illegal value was passed */ public KeyValue(final byte[] row, final byte[] family, final byte[] qualifier, final long timestamp, Type type, @@ -532,7 +533,7 @@ public KeyValue(final byte[] row, final byte[] family, * @param value column value * @param voffset value offset * @param vlength value length - * @throws IllegalArgumentException + * @throws IllegalArgumentException an illegal value was passed */ public KeyValue(byte [] row, byte [] family, byte [] qualifier, int qoffset, int qlength, long timestamp, Type type, @@ -544,17 +545,17 @@ public KeyValue(byte [] row, byte [] family, } /** - * @param row - * @param family - * @param qualifier - * @param qoffset - * @param qlength - * @param timestamp - * @param type - * @param value - * @param voffset - * @param vlength - * @param tags + * @param row row key + * @param family family name + * @param qualifier qualifier name + * @param qoffset qualifier offset + * @param qlength qualifier length + * @param timestamp version timestamp + * @param type key type + * @param value column value + * @param voffset value offset + * @param vlength value length + * @param tags tags */ public KeyValue(byte [] row, byte [] family, byte [] qualifier, int qoffset, int qlength, long timestamp, Type type, @@ -570,7 +571,7 @@ public KeyValue(byte [] row, byte [] family, *

* Column is split into two fields, family and qualifier. * @param row row key - * @throws IllegalArgumentException + * @throws IllegalArgumentException an illegal value was passed */ public KeyValue(final byte [] row, final int roffset, final int rlength, final byte [] family, final int foffset, final int flength, @@ -605,22 +606,18 @@ public KeyValue(final byte [] row, final int roffset, final int rlength, * @param vlength value length * @param tags non-empty list of tags or null * @throws IllegalArgumentException an illegal value was passed or there is insufficient space - * remaining in the buffer + * remaining in the buffer */ - public KeyValue(byte [] buffer, final int boffset, - final byte [] row, final int roffset, final int rlength, - final byte [] family, final int foffset, final int flength, - final byte [] qualifier, final int qoffset, final int qlength, - final long timestamp, final Type type, - final byte [] value, final int voffset, final int vlength, - final Tag[] tags) { - this.bytes = buffer; - this.length = writeByteArray(buffer, boffset, - row, roffset, rlength, - family, foffset, flength, qualifier, qoffset, qlength, - timestamp, type, value, voffset, vlength, tags); - this.offset = boffset; - } + public KeyValue(byte[] buffer, final int boffset, final byte[] row, final int roffset, + final int rlength, final byte[] family, final int foffset, final int flength, + final byte[] qualifier, final int qoffset, final int qlength, final long timestamp, + final Type type, final byte[] value, final int voffset, final int vlength, final Tag[] tags) { + this.bytes = buffer; + this.length = + writeByteArray(buffer, boffset, row, roffset, rlength, family, foffset, flength, qualifier, + qoffset, qlength, timestamp, type, value, voffset, vlength, tags); + this.offset = boffset; + } /** * Constructs KeyValue structure filled with specified values. @@ -641,7 +638,7 @@ public KeyValue(byte [] buffer, final int boffset, * @param voffset value offset * @param vlength value length * @param tags tags - * @throws IllegalArgumentException + * @throws IllegalArgumentException an illegal value was passed */ public KeyValue(final byte [] row, final int roffset, final int rlength, final byte [] family, final int foffset, final int flength, @@ -657,21 +654,21 @@ public KeyValue(final byte [] row, final int roffset, final int rlength, } /** - * @param row - * @param roffset - * @param rlength - * @param family - * @param foffset - * @param flength - * @param qualifier - * @param qoffset - * @param qlength - * @param timestamp - * @param type - * @param value - * @param voffset - * @param vlength - * @param tags + * @param row row key + * @param roffset row offset + * @param rlength row length + * @param family family name + * @param foffset fammily offset + * @param flength family length + * @param qualifier column qualifier + * @param qoffset qualifier offset + * @param qlength qualifier length + * @param timestamp version timestamp + * @param type key type + * @param value column value + * @param voffset value offset + * @param vlength value length + * @param tags input tags */ public KeyValue(final byte [] row, final int roffset, final int rlength, final byte [] family, final int foffset, final int flength, @@ -697,7 +694,7 @@ public KeyValue(final byte [] row, final int roffset, final int rlength, * @param timestamp version timestamp * @param type key type * @param vlength value length - * @throws IllegalArgumentException + * @throws IllegalArgumentException an illegal value was passed */ public KeyValue(final int rlength, final int flength, @@ -718,8 +715,8 @@ public KeyValue(final int rlength, * @param timestamp version timestamp * @param type key type * @param vlength value length - * @param tagsLength - * @throws IllegalArgumentException + * @param tagsLength length of the tags + * @throws IllegalArgumentException an illegal value was passed */ public KeyValue(final int rlength, final int flength, @@ -755,12 +752,12 @@ public KeyValue(Cell c) { /** * Create an empty byte[] representing a KeyValue * All lengths are preset and can be filled in later. - * @param rlength - * @param flength - * @param qlength - * @param timestamp - * @param type - * @param vlength + * @param rlength row length + * @param flength family length + * @param qlength qualifier length + * @param timestamp version timestamp + * @param type key type + * @param vlength value length * @return The newly created byte array. */ private static byte[] createEmptyByteArray(final int rlength, int flength, @@ -875,7 +872,7 @@ static void checkParameters(final byte [] row, final int rlength, * @return The number of useful bytes in the buffer. * * @throws IllegalArgumentException an illegal value was passed or there is insufficient space - * remaining in the buffer + * remaining in the buffer */ public static int writeByteArray(byte [] buffer, final int boffset, final byte [] row, final int roffset, final int rlength, @@ -1101,7 +1098,7 @@ private int calculateHashForKey(Cell cell) { /** * Clones a KeyValue. This creates a copy, re-allocating the buffer. * @return Fully copied clone of this KeyValue - * @throws CloneNotSupportedException + * @throws CloneNotSupportedException if cloning of keyValue is not supported */ @Override public KeyValue clone() throws CloneNotSupportedException { @@ -1189,7 +1186,9 @@ public Map toStringMap() { * @return Key as a String. */ public static String keyToString(final byte [] b, final int o, final int l) { - if (b == null) return ""; + if (b == null) { + return ""; + } int rowlength = Bytes.toShort(b, o); String row = Bytes.toStringBinary(b, o + Bytes.SIZEOF_SHORT, rowlength); int columnoffset = o + Bytes.SIZEOF_SHORT + 1 + rowlength; @@ -1581,10 +1580,10 @@ public KeyValue createKeyOnly(boolean lenAsVal) { } /** - * @param b - * @param delimiter + * @param b the kv serialized byte[] to process + * @param delimiter input delimeter to fetch index from start * @return Index of delimiter having started from start of b - * moving rightward. + * moving rightward. */ public static int getDelimiter(final byte [] b, int offset, final int length, final int delimiter) { @@ -1603,8 +1602,10 @@ public static int getDelimiter(final byte [] b, int offset, final int length, /** * Find index of passed delimiter walking from end of buffer backwards. - * @param b - * @param delimiter + * @param b the kv serialized byte[] to process + * @param offset the offset in the byte[] + * @param length the length in the byte[] + * @param delimiter input delimeter to fetch index from end * @return Index of delimiter */ public static int getDelimiterInReverse(final byte [] b, final int offset, @@ -1765,8 +1766,8 @@ public int compare(byte[] l, int loff, int llen, byte[] r, int roff, int rlen) { /** * Compares the only the user specified portion of a Key. This is overridden by MetaComparator. - * @param left - * @param right + * @param left left cell to compare row key + * @param right right cell to compare row key * @return 0 if equal, <0 if left smaller, >0 if right smaller */ protected int compareRowKey(final Cell left, final Cell right) { @@ -1776,12 +1777,12 @@ protected int compareRowKey(final Cell left, final Cell right) { /** * Compares left to right assuming that left,loffset,llength and right,roffset,rlength are * full KVs laid out in a flat byte[]s. - * @param left - * @param loffset - * @param llength - * @param right - * @param roffset - * @param rlength + * @param left the left kv serialized byte[] to be compared with + * @param loffset the offset in the left byte[] + * @param llength the length in the left byte[] + * @param right the right kv serialized byte[] to be compared with + * @param roffset the offset in the right byte[] + * @param rlength the length in the right byte[] * @return 0 if equal, <0 if left smaller, >0 if right smaller */ public int compareFlatKey(byte[] left, int loffset, int llength, @@ -1877,8 +1878,8 @@ public int compareTimestamps(final Cell left, final Cell right) { } /** - * @param left - * @param right + * @param left left cell to compare rows for + * @param right right cell to compare rows for * @return Result comparing rows. */ public int compareRows(final Cell left, final Cell right) { @@ -1888,12 +1889,12 @@ public int compareRows(final Cell left, final Cell right) { /** * Get the b[],o,l for left and right rowkey portions and compare. - * @param left - * @param loffset - * @param llength - * @param right - * @param roffset - * @param rlength + * @param left the left kv serialized byte[] to be compared with + * @param loffset the offset in the left byte[] + * @param llength the length in the left byte[] + * @param right the right kv serialized byte[] to be compared with + * @param roffset the offset in the right byte[] + * @param rlength the length in the right byte[] * @return 0 if equal, <0 if left smaller, >0 if right smaller */ public int compareRows(byte[] left, int loffset, int llength, byte[] right, int roffset, @@ -1906,20 +1907,17 @@ int compareColumns(final Cell left, final short lrowlength, final Cell right, return CellComparatorImpl.COMPARATOR.compareColumns(left, right); } - protected int compareColumns( - byte [] left, int loffset, int llength, final int lfamilylength, - byte [] right, int roffset, int rlength, final int rfamilylength) { + protected int compareColumns(byte[] left, int loffset, int llength, final int lfamilylength, + byte[] right, int roffset, int rlength, final int rfamilylength) { // Compare family portion first. - int diff = Bytes.compareTo(left, loffset, lfamilylength, - right, roffset, rfamilylength); + int diff = Bytes.compareTo(left, loffset, lfamilylength, right, roffset, rfamilylength); if (diff != 0) { return diff; } // Compare qualifier portion - return Bytes.compareTo(left, loffset + lfamilylength, - llength - lfamilylength, - right, roffset + rfamilylength, rlength - rfamilylength); - } + return Bytes.compareTo(left, loffset + lfamilylength, llength - lfamilylength, right, + roffset + rfamilylength, rlength - rfamilylength); + } static int compareTimestamps(final long ltimestamp, final long rtimestamp) { // The below older timestamps sorting ahead of newer timestamps looks @@ -1936,13 +1934,13 @@ static int compareTimestamps(final long ltimestamp, final long rtimestamp) { /** * Overridden - * @param commonPrefix - * @param left - * @param loffset - * @param llength - * @param right - * @param roffset - * @param rlength + * @param commonPrefix location of expected common prefix + * @param left the left kv serialized byte[] to be compared with + * @param loffset the offset in the left byte[] + * @param llength the length in the left byte[] + * @param right the right kv serialized byte[] to be compared with + * @param roffset the offset in the byte[] + * @param rlength the length in the right byte[] * @return 0 if equal, <0 if left smaller, >0 if right smaller */ @Override // SamePrefixComparator @@ -2083,8 +2081,8 @@ protected int compareColumns(final byte[] left, final int loffset, final int lqu } /** * Compares the row and column of two keyvalues for equality - * @param left - * @param right + * @param left left cell to compare row and column + * @param right right cell to compare row and column * @return True if same row and column. */ public boolean matchingRowColumn(final Cell left, @@ -2121,8 +2119,8 @@ public boolean matchingRowColumn(final Cell left, /** * Compares the row of two keyvalues for equality - * @param left - * @param right + * @param left left cell to compare row + * @param right right cell to compare row * @return True if rows match. */ public boolean matchingRows(final Cell left, final Cell right) { @@ -2132,10 +2130,10 @@ public boolean matchingRows(final Cell left, final Cell right) { } /** - * @param left - * @param lrowlength - * @param right - * @param rrowlength + * @param left left cell to compare row + * @param lrowlength left row length + * @param right right cell to compare row + * @param rrowlength right row length * @return True if rows match. */ private boolean matchingRows(final Cell left, final short lrowlength, @@ -2179,8 +2177,8 @@ public byte[] calcIndexKey(byte[] lastKeyOfPreviousBlock, byte[] firstKeyInBlock /** * This is a HFile block index key optimization. - * @param leftKey - * @param rightKey + * @param leftKey byte array for left Key + * @param rightKey byte array for right Key * @return 0 if equal, <0 if left smaller, >0 if right smaller * @deprecated Since 0.99.2; */ @@ -2259,10 +2257,10 @@ protected Object clone() throws CloneNotSupportedException { /** * @param in Where to read bytes from. Creates a byte array to hold the KeyValue - * backing bytes copied from the steam. + * backing bytes copied from the steam. * @return KeyValue created by deserializing from in OR if we find a length - * of zero, we will return null which can be useful marking a stream as done. - * @throws IOException + * of zero, we will return null which can be useful marking a stream as done. + * @throws IOException if any IO error happen */ public static KeyValue create(final DataInput in) throws IOException { return create(in.readInt(), in); @@ -2270,16 +2268,18 @@ public static KeyValue create(final DataInput in) throws IOException { /** * Create a KeyValue reading length from in - * @param length - * @param in + * @param length length of the Key + * @param in Input to read from * @return Created KeyValue OR if we find a length of zero, we will return null which - * can be useful marking a stream as done. - * @throws IOException + * can be useful marking a stream as done. + * @throws IOException if any IO error happen */ public static KeyValue create(int length, final DataInput in) throws IOException { if (length <= 0) { - if (length == 0) return null; + if (length == 0) { + return null; + } throw new IOException("Failed read " + length + " bytes, stream corrupt?"); } @@ -2291,10 +2291,10 @@ public static KeyValue create(int length, final DataInput in) throws IOException /** * Write out a KeyValue in the manner in which we used to when KeyValue was a Writable. - * @param kv - * @param out + * @param kv the KeyValue on which write is being requested + * @param out OutputStream to write keyValue to * @return Length written on stream - * @throws IOException + * @throws IOException if any IO error happen * @see #create(DataInput) for the inverse function */ public static long write(final KeyValue kv, final DataOutput out) throws IOException { @@ -2310,11 +2310,11 @@ public static long write(final KeyValue kv, final DataOutput out) throws IOExcep * Write out a KeyValue in the manner in which we used to when KeyValue was a Writable but do * not require a {@link DataOutput}, just take plain {@link OutputStream} * Named oswrite so does not clash with {@link #write(KeyValue, DataOutput)} - * @param kv - * @param out - * @param withTags + * @param kv the KeyValue on which write is being requested + * @param out OutputStream to write keyValue to + * @param withTags boolean value indicating write is with Tags or not * @return Length written on stream - * @throws IOException + * @throws IOException if any IO error happen * @see #create(DataInput) for the inverse function * @see #write(KeyValue, DataOutput) * @see KeyValueUtil#oswrite(Cell, OutputStream, boolean) @@ -2431,9 +2431,9 @@ public int getKeyOffset() { /** * A setter that helps to avoid object creation every time and whenever * there is a need to create new KeyOnlyKeyValue. - * @param key - * @param offset - * @param length + * @param key Key to set + * @param offset Offset of the Key + * @param length length of the Key */ public void setKey(byte[] key, int offset, int length) { this.bytes = key; diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/compress/CanReinit.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/compress/CanReinit.java new file mode 100644 index 000000000000..186ad10da4bb --- /dev/null +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/compress/CanReinit.java @@ -0,0 +1,31 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.hbase.io.compress; + +import org.apache.hadoop.conf.Configuration; +import org.apache.yetus.audience.InterfaceAudience; + +/** + * This is a marker interface that indicates if a compressor or decompressor + * type can support reinitialization via reinit(Configuration conf). + */ +@InterfaceAudience.Private +public interface CanReinit { + + void reinit(Configuration conf); + +} diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/compress/Compression.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/compress/Compression.java index d411fd7a9eab..8bff2944cccd 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/compress/Compression.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/compress/Compression.java @@ -22,12 +22,9 @@ import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; - import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; -import org.apache.hadoop.hbase.io.util.BlockIOUtils; -import org.apache.hadoop.hbase.nio.ByteBuff; import org.apache.hadoop.io.compress.CodecPool; import org.apache.hadoop.io.compress.CompressionCodec; import org.apache.hadoop.io.compress.CompressionInputStream; @@ -521,37 +518,6 @@ public static String[] getSupportedAlgorithms() { return ret; } - /** - * Decompresses data from the given stream using the configured compression algorithm. It will - * throw an exception if the dest buffer does not have enough space to hold the decompressed data. - * @param dest the output buffer - * @param bufferedBoundedStream a stream to read compressed data from, bounded to the exact amount - * of compressed data - * @param uncompressedSize uncompressed data size, header not included - * @param compressAlgo compression algorithm used - * @throws IOException if any IO error happen - */ - public static void decompress(ByteBuff dest, InputStream bufferedBoundedStream, - int uncompressedSize, Compression.Algorithm compressAlgo) throws IOException { - if (dest.remaining() < uncompressedSize) { - throw new IllegalArgumentException("Output buffer does not have enough space to hold " - + uncompressedSize + " decompressed bytes, available: " + dest.remaining()); - } - - Decompressor decompressor = null; - try { - decompressor = compressAlgo.getDecompressor(); - try (InputStream is = - compressAlgo.createDecompressionStream(bufferedBoundedStream, decompressor, 0)) { - BlockIOUtils.readFullyWithHeapBuffer(is, dest, uncompressedSize); - } - } finally { - if (decompressor != null) { - compressAlgo.returnDecompressor(decompressor); - } - } - } - /** * Load a codec implementation for an algorithm using the supplied configuration. * @param conf the configuration to use diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/compress/CompressionUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/compress/CompressionUtil.java index becff7665b05..70b959a1172a 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/compress/CompressionUtil.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/compress/CompressionUtil.java @@ -19,7 +19,9 @@ import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private -public class CompressionUtil { +public final class CompressionUtil { + + private CompressionUtil() { } /** * Round up to the next power of two, unless the value would become negative (ints diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/compress/DictionaryCache.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/compress/DictionaryCache.java new file mode 100644 index 000000000000..f9508bf481e9 --- /dev/null +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/compress/DictionaryCache.java @@ -0,0 +1,164 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.hbase.io.compress; + +import java.io.ByteArrayOutputStream; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.InputStream; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.cache.CacheBuilder; +import org.apache.hbase.thirdparty.com.google.common.cache.CacheLoader; +import org.apache.hbase.thirdparty.com.google.common.cache.LoadingCache; + +/** + * A utility class for managing compressor/decompressor dictionary loading and caching of load + * results. Useful for any codec that can support changing dictionaries at runtime, + * such as ZStandard. + */ +@InterfaceAudience.Private +public final class DictionaryCache { + + public static final String DICTIONARY_MAX_SIZE_KEY = "hbase.io.compress.dictionary.max.size"; + public static final int DEFAULT_DICTIONARY_MAX_SIZE = 10 * 1024 * 1024; + public static final String RESOURCE_SCHEME = "resource://"; + + private static final Logger LOG = LoggerFactory.getLogger(DictionaryCache.class); + private static volatile LoadingCache CACHE; + + private DictionaryCache() { } + + /** + * Load a dictionary or return a previously cached load. + * @param conf configuration + * @param path the hadoop Path where the dictionary is located, as a String + * @return the dictionary bytes if successful, null otherwise + */ + public static byte[] getDictionary(final Configuration conf, final String path) + throws IOException { + if (path == null || path.isEmpty()) { + return null; + } + // Create the dictionary loading cache if we haven't already + if (CACHE == null) { + synchronized (DictionaryCache.class) { + if (CACHE == null) { + final int maxSize = conf.getInt(DICTIONARY_MAX_SIZE_KEY, DEFAULT_DICTIONARY_MAX_SIZE); + CACHE = CacheBuilder.newBuilder() + .maximumSize(100) + .expireAfterAccess(10, TimeUnit.MINUTES) + .build( + new CacheLoader() { + @Override + public byte[] load(String s) throws Exception { + byte[] bytes; + if (path.startsWith(RESOURCE_SCHEME)) { + bytes = loadFromResource(conf, path, maxSize); + } else { + bytes = loadFromHadoopFs(conf, path, maxSize); + } + LOG.info("Loaded dictionary from {} (size {})", s, bytes.length); + return bytes; + } + }); + } + } + } + + // Get or load the dictionary for the given path + try { + return CACHE.get(path); + } catch (ExecutionException e) { + throw new IOException(e); + } + } + + // Visible for testing + public static byte[] loadFromResource(final Configuration conf, final String s, + final int maxSize) throws IOException { + if (!s.startsWith(RESOURCE_SCHEME)) { + throw new IOException("Path does not start with " + RESOURCE_SCHEME); + } + final String path = s.substring(RESOURCE_SCHEME.length(), s.length()); + LOG.info("Loading resource {}", path); + final InputStream in = DictionaryCache.class.getClassLoader().getResourceAsStream(path); + if (in == null) { + throw new FileNotFoundException("Resource " + path + " not found"); + } + final ByteArrayOutputStream baos = new ByteArrayOutputStream(); + try { + final byte[] buffer = new byte[8192]; + int n, len = 0; + do { + n = in.read(buffer); + if (n > 0) { + len += n; + if (len > maxSize) { + throw new IOException("Dictionary " + s + " is too large, limit=" + maxSize); + } + baos.write(buffer, 0, n); + } + } while (n > 0); + } finally { + in.close(); + } + return baos.toByteArray(); + } + + private static byte[] loadFromHadoopFs(final Configuration conf, final String s, + final int maxSize) throws IOException { + final Path path = new Path(s); + final FileSystem fs = FileSystem.get(path.toUri(), conf); + LOG.info("Loading file {}", path); + final ByteArrayOutputStream baos = new ByteArrayOutputStream(); + final FSDataInputStream in = fs.open(path); + try { + final byte[] buffer = new byte[8192]; + int n, len = 0; + do { + n = in.read(buffer); + if (n > 0) { + len += n; + if (len > maxSize) { + throw new IOException("Dictionary " + s + " is too large, limit=" + maxSize); + } + baos.write(buffer, 0, n); + } + } while (n > 0); + } finally { + in.close(); + } + return baos.toByteArray(); + } + + // Visible for testing + public static boolean contains(String dictionaryPath) { + if (CACHE != null) { + return CACHE.asMap().containsKey(dictionaryPath); + } + return false; + } + +} diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/AbstractDataBlockEncoder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/AbstractDataBlockEncoder.java index e96b800e7ef0..9fa867762038 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/AbstractDataBlockEncoder.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/AbstractDataBlockEncoder.java @@ -18,6 +18,8 @@ import java.io.IOException; import java.nio.ByteBuffer; + +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ByteBufferKeyOnlyKeyValue; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.KeyValue; @@ -29,14 +31,15 @@ public abstract class AbstractDataBlockEncoder implements DataBlockEncoder { @Override - public HFileBlockEncodingContext newDataBlockEncodingContext( + public HFileBlockEncodingContext newDataBlockEncodingContext(Configuration conf, DataBlockEncoding encoding, byte[] header, HFileContext meta) { - return new HFileBlockDefaultEncodingContext(encoding, header, meta); + return new HFileBlockDefaultEncodingContext(conf, encoding, header, meta); } @Override - public HFileBlockDecodingContext newDataBlockDecodingContext(HFileContext meta) { - return new HFileBlockDefaultDecodingContext(meta); + public HFileBlockDecodingContext newDataBlockDecodingContext(Configuration conf, + HFileContext meta) { + return new HFileBlockDefaultDecodingContext(conf, meta); } protected void postEncoding(HFileBlockEncodingContext encodingCtx) diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoder.java index a6aafead500c..98225bd5dfae 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoder.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoder.java @@ -20,6 +20,8 @@ import java.io.DataOutputStream; import java.io.IOException; import java.nio.ByteBuffer; + +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.io.hfile.HFileContext; @@ -91,6 +93,8 @@ ByteBuffer decodeKeyValues(DataInputStream source, HFileBlockDecodingContext dec /** * Creates a encoder specific encoding context * + * @param conf + * store configuration * @param encoding * encoding strategy used * @param headerBytes @@ -100,18 +104,20 @@ ByteBuffer decodeKeyValues(DataInputStream source, HFileBlockDecodingContext dec * HFile meta data * @return a newly created encoding context */ - HFileBlockEncodingContext newDataBlockEncodingContext( + HFileBlockEncodingContext newDataBlockEncodingContext(Configuration conf, DataBlockEncoding encoding, byte[] headerBytes, HFileContext meta); /** * Creates an encoder specific decoding context, which will prepare the data * before actual decoding * + * @param conf + * store configuration * @param meta * HFile meta data * @return a newly created decoding context */ - HFileBlockDecodingContext newDataBlockDecodingContext(HFileContext meta); + HFileBlockDecodingContext newDataBlockDecodingContext(Configuration conf, HFileContext meta); /** * An interface which enable to seek while underlying data is encoded. diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/EncodedDataBlock.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/EncodedDataBlock.java index 0f19254e34a6..62caccc6366c 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/EncodedDataBlock.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/EncodedDataBlock.java @@ -27,6 +27,7 @@ import java.util.Iterator; import java.util.List; import org.apache.commons.lang3.NotImplementedException; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; @@ -57,6 +58,7 @@ public class EncodedDataBlock { private HFileContext meta; private final DataBlockEncoding encoding; + private final Configuration conf; // The is for one situation that there are some cells includes tags and others are not. // isTagsLenZero stores if cell tags length is zero before doing encoding since we need @@ -68,21 +70,23 @@ public class EncodedDataBlock { /** * Create a buffer which will be encoded using dataBlockEncoder. + * @param conf store configuration * @param dataBlockEncoder Algorithm used for compression. * @param encoding encoding type used - * @param rawKVs - * @param meta + * @param rawKVs raw KVs + * @param meta hfile context */ - public EncodedDataBlock(DataBlockEncoder dataBlockEncoder, DataBlockEncoding encoding, - byte[] rawKVs, HFileContext meta) { + public EncodedDataBlock(Configuration conf, DataBlockEncoder dataBlockEncoder, + DataBlockEncoding encoding, byte[] rawKVs, HFileContext meta) { Preconditions.checkNotNull(encoding, "Cannot create encoded data block with null encoder"); this.dataBlockEncoder = dataBlockEncoder; this.encoding = encoding; - encodingCtx = dataBlockEncoder.newDataBlockEncodingContext(encoding, + encodingCtx = dataBlockEncoder.newDataBlockEncodingContext(conf, encoding, HConstants.HFILEBLOCK_DUMMY_HEADER, meta); this.rawKVs = rawKVs; this.meta = meta; + this.conf = conf; } /** @@ -115,7 +119,7 @@ public Cell next() { if (decompressedData == null) { try { decompressedData = dataBlockEncoder.decodeKeyValues(dis, dataBlockEncoder - .newDataBlockDecodingContext(meta)); + .newDataBlockDecodingContext(conf, meta)); } catch (IOException e) { throw new RuntimeException("Problem with data block encoder, " + "most likely it requested more bytes than are available.", e); diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/HFileBlockDefaultDecodingContext.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/HFileBlockDefaultDecodingContext.java index e321a259c2de..5c7f6edd3cd9 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/HFileBlockDefaultDecodingContext.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/HFileBlockDefaultDecodingContext.java @@ -20,8 +20,10 @@ import java.io.IOException; import java.io.InputStream; import org.apache.commons.io.IOUtils; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.io.ByteBuffInputStream; import org.apache.hadoop.hbase.io.TagCompressionContext; +import org.apache.hadoop.hbase.io.compress.CanReinit; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.crypto.Cipher; import org.apache.hadoop.hbase.io.crypto.Decryptor; @@ -30,6 +32,7 @@ import org.apache.hadoop.hbase.io.util.BlockIOUtils; import org.apache.hadoop.hbase.nio.ByteBuff; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.io.compress.Decompressor; import org.apache.yetus.audience.InterfaceAudience; /** @@ -41,10 +44,12 @@ */ @InterfaceAudience.Private public class HFileBlockDefaultDecodingContext implements HFileBlockDecodingContext { + private final Configuration conf; private final HFileContext fileContext; private TagCompressionContext tagCompressionContext; - public HFileBlockDefaultDecodingContext(HFileContext fileContext) { + public HFileBlockDefaultDecodingContext(Configuration conf, HFileContext fileContext) { + this.conf = conf; this.fileContext = fileContext; } @@ -87,8 +92,24 @@ public void prepareDecoding(int onDiskSizeWithoutHeader, int uncompressedSizeWit Compression.Algorithm compression = fileContext.getCompression(); if (compression != Compression.Algorithm.NONE) { - Compression.decompress(blockBufferWithoutHeader, dataInputStream, - uncompressedSizeWithoutHeader, compression); + Decompressor decompressor = null; + try { + decompressor = compression.getDecompressor(); + // Some algorithms don't return decompressors and accept null as a valid parameter for + // same when creating decompression streams. We can ignore these cases wrt reinit. + if (decompressor instanceof CanReinit) { + ((CanReinit)decompressor).reinit(conf); + } + try (InputStream is = + compression.createDecompressionStream(dataInputStream, decompressor, 0)) { + BlockIOUtils.readFullyWithHeapBuffer(is, blockBufferWithoutHeader, + uncompressedSizeWithoutHeader); + } + } finally { + if (decompressor != null) { + compression.returnDecompressor(decompressor); + } + } } else { BlockIOUtils.readFullyWithHeapBuffer(dataInputStream, blockBufferWithoutHeader, onDiskSizeWithoutHeader); diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/HFileBlockDefaultEncodingContext.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/HFileBlockDefaultEncodingContext.java index 169f97915b26..8d9e6824fa43 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/HFileBlockDefaultEncodingContext.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/HFileBlockDefaultEncodingContext.java @@ -22,6 +22,8 @@ import java.io.IOException; import java.io.InputStream; import java.security.SecureRandom; + +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.io.ByteArrayOutputStream; import org.apache.hadoop.hbase.io.TagCompressionContext; import org.apache.hadoop.hbase.io.compress.Compression; @@ -72,18 +74,26 @@ public class HFileBlockDefaultEncodingContext implements HFileBlockEncodingConte private EncodingState encoderState; /** + * @param conf configuraton * @param encoding encoding used * @param headerBytes dummy header bytes * @param fileContext HFile meta data */ - public HFileBlockDefaultEncodingContext(DataBlockEncoding encoding, byte[] headerBytes, - HFileContext fileContext) { + public HFileBlockDefaultEncodingContext(Configuration conf, DataBlockEncoding encoding, + byte[] headerBytes, HFileContext fileContext) { this.encodingAlgo = encoding; this.fileContext = fileContext; Compression.Algorithm compressionAlgorithm = fileContext.getCompression() == null ? NONE : fileContext.getCompression(); if (compressionAlgorithm != NONE) { - compressor = compressionAlgorithm.getCompressor(); + if (compressor == null) { + compressor = compressionAlgorithm.getCompressor(); + // Some algorithms don't return compressors and accept null as a valid parameter for + // same when creating compression streams. We can ignore these cases wrt reinit. + if (compressor != null) { + compressor.reinit(conf); + } + } compressedByteStream = new ByteArrayOutputStream(); try { compressionStream = diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/MultiByteBuff.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/MultiByteBuff.java index a25791e5ce83..f9000ed953ad 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/MultiByteBuff.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/MultiByteBuff.java @@ -181,6 +181,9 @@ public byte getByteAfterPosition(int offset) { * Returns in which sub ByteBuffer, the given element index will be available. */ private int getItemIndex(int elemIndex) { + if (elemIndex < 0) { + throw new IndexOutOfBoundsException(); + } int index = 1; while (elemIndex >= this.itemBeginPos[index]) { index++; @@ -721,15 +724,16 @@ public MultiByteBuff put(byte b) { } /** - * Writes a byte to this MBB at the given index - * @param index - * @param b + * Writes a byte to this MBB at the given index and won't affect the position of any of the + * buffers. * @return this object + * @throws IndexOutOfBoundsException If index is negative or not smaller than the + * {@link MultiByteBuff#limit} */ @Override public MultiByteBuff put(int index, byte b) { checkRefCount(); - int itemIndex = getItemIndex(limit); + int itemIndex = getItemIndex(index); ByteBuffer item = items[itemIndex]; item.put(index - itemBeginPos[itemIndex], b); return this; diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/trace/TraceUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/trace/TraceUtil.java index fb37080a5fb9..74e6daa1ed77 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/trace/TraceUtil.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/trace/TraceUtil.java @@ -31,14 +31,13 @@ import java.util.concurrent.CompletableFuture; import java.util.function.Supplier; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.Version; import org.apache.hadoop.hbase.util.FutureUtils; import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private public final class TraceUtil { - private static final String INSTRUMENTATION_NAME = "io.opentelemetry.contrib.hbase"; - public static final AttributeKey NAMESPACE_KEY = SemanticAttributes.DB_HBASE_NAMESPACE; public static final AttributeKey TABLE_KEY = AttributeKey.stringKey("db.hbase.table"); @@ -68,7 +67,7 @@ private TraceUtil() { } public static Tracer getGlobalTracer() { - return GlobalOpenTelemetry.getTracer(INSTRUMENTATION_NAME); + return GlobalOpenTelemetry.getTracer("org.apache.hbase", Version.version); } /** diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Addressing.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Addressing.java index 46042c2500be..891ad355a35f 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Addressing.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Addressing.java @@ -152,6 +152,21 @@ public static boolean isLocalAddress(InetAddress addr) { return local; } + /** + * Given an InetSocketAddress object returns a String represent of it. + * This is a util method for Java 17. The toString() function of InetSocketAddress + * will flag the unresolved address with a substring in the string, which will result + * in unexpected problem. We should use this util function to get the string when we + * not sure whether the input address is resolved or not. + * @param address address to convert to a "host:port" String. + * @return the String represent of the given address, like "foo:1234". + */ + public static String inetSocketAddress2String(InetSocketAddress address) { + return address.isUnresolved() ? + address.toString().replace("/", "") : + address.toString(); + } + /** * Interface for AddressSelectionCondition to check if address is acceptable */ diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteBufferUtils.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteBufferUtils.java index d270d63d635c..eb0cfcec8ad5 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteBufferUtils.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteBufferUtils.java @@ -388,8 +388,7 @@ public static long readVLong(ByteBuff in) { * @return Number of bytes written. * @throws IOException on stream error */ - public static int putCompressedInt(OutputStream out, final int value) - throws IOException { + public static int putCompressedInt(OutputStream out, final int value) throws IOException { int i = 0; int tmpvalue = value; do { @@ -404,23 +403,23 @@ public static int putCompressedInt(OutputStream out, final int value) return i; } - /** - * Put in output stream 32 bit integer (Big Endian byte order). - * @param out Where to put integer. - * @param value Value of integer. - * @throws IOException On stream error. - */ - public static void putInt(OutputStream out, final int value) - throws IOException { - // We have writeInt in ByteBufferOutputStream so that it can directly write - // int to underlying - // ByteBuffer in one step. - if (out instanceof ByteBufferWriter) { - ((ByteBufferWriter) out).writeInt(value); - } else { - StreamUtils.writeInt(out, value); - } - } + /** + * Put in output stream 32 bit integer (Big Endian byte order). + * + * @param out Where to put integer. + * @param value Value of integer. + * @throws IOException On stream error. + */ + public static void putInt(OutputStream out, final int value) throws IOException { + // We have writeInt in ByteBufferOutputStream so that it can directly write + // int to underlying + // ByteBuffer in one step. + if (out instanceof ByteBufferWriter) { + ((ByteBufferWriter) out).writeInt(value); + } else { + StreamUtils.writeInt(out, value); + } + } public static byte toByte(ByteBuffer buffer, int offset) { if (UNSAFE_AVAIL) { @@ -566,7 +565,7 @@ public static int intFitsIn(final int value) { /** * Read integer from stream coded in 7 bits and increment position. * @return the integer that has been read - * @throws IOException + * @throws IOException on stream error */ public static int readCompressedInt(InputStream input) throws IOException { @@ -601,7 +600,7 @@ public static int readCompressedInt(ByteBuffer buffer) { * Read long which was written to fitInBytes bytes and increment position. * @param fitInBytes In how many bytes given long is stored. * @return The value of parsed long. - * @throws IOException + * @throws IOException on stream error */ public static long readLong(InputStream in, final int fitInBytes) throws IOException { @@ -682,11 +681,11 @@ public static void copyFromBufferToBuffer(ByteBuffer in, ByteBuffer out) { /** * Copy from one buffer to another from given offset. This will be absolute positional copying and * won't affect the position of any of the buffers. - * @param in - * @param out - * @param sourceOffset - * @param destinationOffset - * @param length + * @param in input bytebuffer + * @param out destination bytebuffer + * @param sourceOffset offset of source buffer + * @param destinationOffset offset of destination buffer + * @param length the number of bytes to copy */ public static void copyFromBufferToBuffer(ByteBuffer in, ByteBuffer out, int sourceOffset, int destinationOffset, int length) { @@ -837,9 +836,9 @@ public static byte[] toBytes(ByteBuffer buffer, int startPosition) { /** * Copy the given number of bytes from specified offset into a new byte[] - * @param buffer - * @param offset - * @param length + * @param buffer input bytebuffer to read + * @param offset input offset where Bytes are + * @param length the number of bytes to read * @return a new byte[] containing the bytes in the specified range */ public static byte[] toBytes(ByteBuffer buffer, int offset, int length) { @@ -858,7 +857,9 @@ public static boolean equals(ByteBuffer buf1, int o1, int l1, ByteBuffer buf2, i // Since we're often comparing adjacent sorted data, // it's usual to have equal arrays except for the very last byte // so check that first - if (toByte(buf1, o1 + l1 - 1) != toByte(buf2, o2 + l2 - 1)) return false; + if (toByte(buf1, o1 + l1 - 1) != toByte(buf2, o2 + l2 - 1)) { + return false; + } return compareTo(buf1, o1, l1, buf2, o2, l2) == 0; } @@ -890,7 +891,9 @@ public static boolean equals(ByteBuffer buf1, int o1, int l1, byte[] buf2, int o // Since we're often comparing adjacent sorted data, // it's usual to have equal arrays except for the very last byte // so check that first - if (toByte(buf1, o1 + l1 - 1) != buf2[o2 + l2 - 1]) return false; + if (toByte(buf1, o1 + l1 - 1) != buf2[o2 + l2 - 1]) { + return false; + } return compareTo(buf1, o1, l1, buf2, o2, l2) == 0; } @@ -952,8 +955,8 @@ static int compareToUnsafe(Object obj1, long o1, int l1, Object obj2, long o2, i /** * Reads a short value at the given buffer's offset. - * @param buffer - * @param offset + * @param buffer input byte buffer to read + * @param offset input offset where short is * @return short value at offset */ public static short toShort(ByteBuffer buffer, int offset) { @@ -969,8 +972,8 @@ public static int toInt(ByteBuffer buffer) { /** * Reads an int value at the given buffer's offset. - * @param buffer - * @param offset + * @param buffer input byte buffer to read + * @param offset input offset where int is * @return int value at offset */ public static int toInt(ByteBuffer buffer, int offset) { @@ -1002,8 +1005,8 @@ public static int readAsInt(ByteBuffer buf, int offset, final int length) { /** * Reads a long value at the given buffer's offset. - * @param buffer - * @param offset + * @param buffer input byte buffer to read + * @param offset input offset where Long is * @return long value at offset */ public static long toLong(ByteBuffer buffer, int offset) { @@ -1026,7 +1029,7 @@ public static int putInt(ByteBuffer buffer, int index, int val) { /** * Reads a double value at the given buffer's offset. - * @param buffer + * @param buffer input byte buffer to read * @param offset offset where double is * @return double value at offset */ @@ -1036,8 +1039,8 @@ public static double toDouble(ByteBuffer buffer, int offset) { /** * Reads a BigDecimal value at the given buffer's offset. - * @param buffer - * @param offset + * @param buffer input bytebuffer to read + * @param offset input offset * @return BigDecimal value at offset */ public static BigDecimal toBigDecimal(ByteBuffer buffer, int offset, int length) { @@ -1090,10 +1093,10 @@ public static int putLong(ByteBuffer buffer, int index, long val) { /** * Copies the bytes from given array's offset to length part into the given buffer. Puts the bytes * to buffer's current position. This also advances the position in the 'out' buffer by 'length' - * @param out - * @param in - * @param inOffset - * @param length + * @param out output bytebuffer to copy to + * @param in input array to copy from + * @param inOffset input offset to copy from + * @param length the number of bytes to copy */ public static void copyFromArrayToBuffer(ByteBuffer out, byte[] in, int inOffset, int length) { if (out.hasArray()) { @@ -1111,11 +1114,12 @@ public static void copyFromArrayToBuffer(ByteBuffer out, byte[] in, int inOffset /** * Copies bytes from given array's offset to length part into the given buffer. Puts the bytes - * to buffer's given position. This doesn't affact the position of buffer. - * @param out - * @param in - * @param inOffset - * @param length + * to buffer's given position. This doesn't affect the position of buffer. + * @param out output bytebuffer to copy to + * @param outOffset output buffer offset + * @param in input array to copy from + * @param inOffset input offset to copy from + * @param length the number of bytes to copy */ public static void copyFromArrayToBuffer(ByteBuffer out, int outOffset, byte[] in, int inOffset, int length) { @@ -1132,12 +1136,12 @@ public static void copyFromArrayToBuffer(ByteBuffer out, int outOffset, byte[] i /** * Copies specified number of bytes from given offset of 'in' ByteBuffer to - * the array. This doesn't affact the position of buffer. - * @param out - * @param in - * @param sourceOffset - * @param destinationOffset - * @param length + * the array. This doesn't affect the position of buffer. + * @param out output array to copy input bytebuffer to + * @param in input bytebuffer to copy from + * @param sourceOffset offset of source bytebuffer + * @param destinationOffset offset of destination array + * @param length the number of bytes to copy */ public static void copyFromBufferToArray(byte[] out, ByteBuffer in, int sourceOffset, int destinationOffset, int length) { @@ -1161,7 +1165,9 @@ public static void copyFromBufferToArray(byte[] out, ByteBuffer in, int sourceOf */ public static byte[] copyOfRange(ByteBuffer original, int from, int to) { int newLength = to - from; - if (newLength < 0) throw new IllegalArgumentException(from + " > " + to); + if (newLength < 0) { + throw new IllegalArgumentException(from + " > " + to); + } byte[] copy = new byte[newLength]; ByteBufferUtils.copyFromBufferToArray(copy, original, from, 0, newLength); return copy; @@ -1171,10 +1177,12 @@ public static byte[] copyOfRange(ByteBuffer original, int from, int to) { public static String toStringBinary(final ByteBuffer b, int off, int len) { StringBuilder result = new StringBuilder(); // Just in case we are passed a 'len' that is > buffer length... - if (off >= b.capacity()) + if (off >= b.capacity()) { return result.toString(); - if (off + len > b.capacity()) + } + if (off + len > b.capacity()) { len = b.capacity() - off; + } for (int i = off; i < off + len; ++i) { int ch = b.get(i) & 0xFF; if ((ch >= '0' && ch <= '9') || (ch >= 'A' && ch <= 'Z') || (ch >= 'a' && ch <= 'z') diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ClassSize.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ClassSize.java index b983fc0f3db6..4e6005637bf7 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ClassSize.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ClassSize.java @@ -257,7 +257,11 @@ public static boolean useUnsafeLayout() { LINKEDLIST_ENTRY = align(OBJECT + (2 * REFERENCE)); //noinspection PointlessArithmeticExpression - BYTE_BUFFER = align(OBJECT + REFERENCE + + BYTE_BUFFER = JVM.getJVMSpecVersion() < 17 ? + align(OBJECT + REFERENCE + + (5 * Bytes.SIZEOF_INT) + + (3 * Bytes.SIZEOF_BOOLEAN) + Bytes.SIZEOF_LONG) + align(ARRAY) : + align(OBJECT + 2 * REFERENCE + (5 * Bytes.SIZEOF_INT) + (3 * Bytes.SIZEOF_BOOLEAN) + Bytes.SIZEOF_LONG) + align(ARRAY); diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/JVM.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/JVM.java index 6657481ed02b..8bd70e486fbc 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/JVM.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/JVM.java @@ -60,6 +60,21 @@ public class JVM { private static final String JVMVersion = System.getProperty("java.version"); + /** + * The raw String of java specification version. + * "1.8" for java8, "9","10"... for Java 9, 10... + */ + private static final String JVM_SPEC_VERSION_STRING = + System.getProperty("java.specification.version"); + + /** + * The Integer represent of JVM_SPEC_VERSION, for the JVM version comparison. + * Java 8, 9, 10 ... will be noted as 8, 9 10 ... + */ + private static final int JVM_SPEC_VERSION = JVM_SPEC_VERSION_STRING.contains(".") ? + (int) (Float.parseFloat(JVM_SPEC_VERSION_STRING) * 10 % 10) : + Integer.parseInt(JVM_SPEC_VERSION_STRING); + /** * Constructor. Get the running Operating System instance */ @@ -106,6 +121,10 @@ public static boolean isGZIPOutputStreamFinishBroken() { return ibmvendor && JVMVersion.contains("1.6.0"); } + public static int getJVMSpecVersion() { + return JVM_SPEC_VERSION; + } + /** * Load the implementation of UnixOperatingSystemMXBean for Oracle jvm * and runs the desired method. diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/io/compress/CompressionTestBase.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/io/compress/CompressionTestBase.java index 616bf0b25fef..fddff46e0968 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/io/compress/CompressionTestBase.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/io/compress/CompressionTestBase.java @@ -17,12 +17,10 @@ package org.apache.hadoop.hbase.io.compress; import static org.junit.Assert.assertTrue; - import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.util.Arrays; import java.util.Random; - import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; @@ -31,6 +29,8 @@ import org.apache.hadoop.io.compress.CompressionCodec; import org.apache.hadoop.io.compress.CompressionInputStream; import org.apache.hadoop.io.compress.CompressionOutputStream; +import org.apache.hadoop.io.compress.Compressor; +import org.apache.hadoop.io.compress.Decompressor; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -39,11 +39,11 @@ public class CompressionTestBase { protected static final Logger LOG = LoggerFactory.getLogger(CompressionTestBase.class); - static final int LARGE_SIZE = 10 * 1024 * 1024; - static final int VERY_LARGE_SIZE = 100 * 1024 * 1024; - static final int BLOCK_SIZE = 4096; + protected static final int LARGE_SIZE = 10 * 1024 * 1024; + protected static final int VERY_LARGE_SIZE = 100 * 1024 * 1024; + protected static final int BLOCK_SIZE = 4096; - static final byte[] SMALL_INPUT; + protected static final byte[] SMALL_INPUT; static { // 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610, 987, 1597 SMALL_INPUT = new byte[1+1+2+3+5+8+13+21+34+55+89+144+233+377+610+987+1597]; @@ -67,15 +67,20 @@ public class CompressionTestBase { Arrays.fill(SMALL_INPUT, off, (off+=1597), (byte)'Q'); } - protected void codecTest(final CompressionCodec codec, final byte[][] input) - throws Exception { + protected void codecTest(final CompressionCodec codec, final byte[][] input) throws Exception { + codecTest(codec, input, null); + } + + protected void codecTest(final CompressionCodec codec, final byte[][] input, + final Integer expectedCompressedSize) throws Exception { // We do this in Compression.java ((Configurable)codec).getConf().setInt("io.file.buffer.size", 32 * 1024); // Compress + long start = EnvironmentEdgeManager.currentTime(); + Compressor compressor = codec.createCompressor(); ByteArrayOutputStream baos = new ByteArrayOutputStream(); - CompressionOutputStream out = codec.createOutputStream(baos); + CompressionOutputStream out = codec.createOutputStream(baos, compressor); int inLen = 0; - long start = EnvironmentEdgeManager.currentTime(); for (int i = 0; i < input.length; i++) { out.write(input[i]); inLen += input[i].length; @@ -85,9 +90,15 @@ protected void codecTest(final CompressionCodec codec, final byte[][] input) final byte[] compressed = baos.toByteArray(); LOG.info("{} compressed {} bytes to {} bytes in {} ms", codec.getClass().getSimpleName(), inLen, compressed.length, end - start); + if (expectedCompressedSize != null) { + assertTrue("Expected compressed size does not match: (expected=" + expectedCompressedSize + + ", actual=" + compressed.length + ")", expectedCompressedSize == compressed.length); + } // Decompress final byte[] plain = new byte[inLen]; - CompressionInputStream in = codec.createInputStream(new ByteArrayInputStream(compressed)); + Decompressor decompressor = codec.createDecompressor(); + CompressionInputStream in = codec.createInputStream(new ByteArrayInputStream(compressed), + decompressor); start = EnvironmentEdgeManager.currentTime(); IOUtils.readFully(in, plain, 0, plain.length); in.close(); @@ -113,29 +124,37 @@ protected void codecSmallTest(final CompressionCodec codec) throws Exception { /** * Test with a large input (1MB) divided into blocks of 4KB. */ - protected void codecLargeTest(final CompressionCodec codec, final double sigma) throws Exception { - RandomDistribution.DiscreteRNG zipf = + protected void codecLargeTest(final CompressionCodec codec, final double sigma) + throws Exception { + RandomDistribution.DiscreteRNG rng = new RandomDistribution.Zipf(new Random(), 0, Byte.MAX_VALUE, sigma); final byte[][] input = new byte[LARGE_SIZE/BLOCK_SIZE][BLOCK_SIZE]; - for (int i = 0; i < input.length; i++) { - for (int j = 0; j < input[i].length; j++) { - input[i][j] = (byte)zipf.nextInt(); - } - } + fill(rng, input); codecTest(codec, input); } /** * Test with a very large input (100MB) as a single input buffer. */ - protected void codecVeryLargeTest(final CompressionCodec codec, final double sigma) throws Exception { - RandomDistribution.DiscreteRNG zipf = + protected void codecVeryLargeTest(final CompressionCodec codec, final double sigma) + throws Exception { + RandomDistribution.DiscreteRNG rng = new RandomDistribution.Zipf(new Random(), 0, Byte.MAX_VALUE, sigma); final byte[][] input = new byte[1][VERY_LARGE_SIZE]; - for (int i = 0; i < VERY_LARGE_SIZE; i++) { - input[0][i] = (byte)zipf.nextInt(); - } + fill(rng, input); codecTest(codec, input); } + protected static void fill(RandomDistribution.DiscreteRNG rng, byte[][] input) { + for (int i = 0; i < input.length; i++) { + fill(rng, input[i]); + } + } + + protected static void fill(RandomDistribution.DiscreteRNG rng, byte[] input) { + for (int i = 0; i < input.length; i++) { + input[i] = (byte) rng.nextInt(); + } + } + } diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/nio/TestMultiByteBuff.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/nio/TestMultiByteBuff.java index 563f82a1cd99..b40ac0c22a88 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/nio/TestMultiByteBuff.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/nio/TestMultiByteBuff.java @@ -594,4 +594,58 @@ private void doTestPositionalPutByteBuff(ByteBuff srcByteBuff) throws Exception assertTrue(e != null); } } + + @Test + public void testPositionalPutByte() throws Exception { + ByteBuffer bb1 = ByteBuffer.allocate(50); + ByteBuffer bb2 = ByteBuffer.allocate(50); + ByteBuffer bb3 = ByteBuffer.allocate(50); + ByteBuffer bb4 = ByteBuffer.allocate(50); + MultiByteBuff srcMultiByteBuff = new MultiByteBuff(bb1, bb2, bb3, bb4); + for (int i = 1; i <= 200; i++) { + srcMultiByteBuff.put((byte) 0xff); + } + + srcMultiByteBuff.put(20, (byte) 0); + byte val = srcMultiByteBuff.get(20); + assertTrue(val == 0); + + srcMultiByteBuff.put(50, (byte) 0); + val = srcMultiByteBuff.get(50); + assertTrue(val == 0); + + srcMultiByteBuff.put(80, (byte) 0); + val = srcMultiByteBuff.get(80); + assertTrue(val == 0); + + srcMultiByteBuff.put(100, (byte) 0); + val = srcMultiByteBuff.get(100); + assertTrue(val == 0); + + srcMultiByteBuff.put(121, (byte) 0); + val = srcMultiByteBuff.get(121); + assertTrue(val == 0); + + srcMultiByteBuff.put(150, (byte) 0); + val = srcMultiByteBuff.get(150); + assertTrue(val == 0); + + srcMultiByteBuff.put(180, (byte) 0); + val = srcMultiByteBuff.get(180); + assertTrue(val == 0); + + try { + srcMultiByteBuff.put(200, (byte) 0); + fail(); + } catch (IndexOutOfBoundsException e) { + assertTrue(e != null); + } + + try { + srcMultiByteBuff.put(260, (byte) 0); + fail(); + } catch (IndexOutOfBoundsException e) { + assertTrue(e != null); + } + } } diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestOrderedBytes.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestOrderedBytes.java index 45b8e3547ce1..c8e0381969b2 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestOrderedBytes.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestOrderedBytes.java @@ -1263,26 +1263,44 @@ public void testEncodedValueCheck() { int cnt = 0; PositionedByteRange buff = new SimplePositionedMutableByteRange(1024); for (Order ord : new Order[] { Order.ASCENDING, Order.DESCENDING }) { - int o; - o = OrderedBytes.encodeNull(buff, ord); cnt++; - o = OrderedBytes.encodeNumeric(buff, negInf, ord); cnt++; - o = OrderedBytes.encodeNumeric(buff, negLarge, ord); cnt++; - o = OrderedBytes.encodeNumeric(buff, negMed, ord); cnt++; - o = OrderedBytes.encodeNumeric(buff, negSmall, ord); cnt++; - o = OrderedBytes.encodeNumeric(buff, zero, ord); cnt++; - o = OrderedBytes.encodeNumeric(buff, posSmall, ord); cnt++; - o = OrderedBytes.encodeNumeric(buff, posMed, ord); cnt++; - o = OrderedBytes.encodeNumeric(buff, posLarge, ord); cnt++; - o = OrderedBytes.encodeNumeric(buff, posInf, ord); cnt++; - o = OrderedBytes.encodeNumeric(buff, nan, ord); cnt++; - o = OrderedBytes.encodeInt8(buff, int8, ord); cnt++; - o = OrderedBytes.encodeInt16(buff, int16, ord); cnt++; - o = OrderedBytes.encodeInt32(buff, int32, ord); cnt++; - o = OrderedBytes.encodeInt64(buff, int64, ord); cnt++; - o = OrderedBytes.encodeFloat32(buff, float32, ord); cnt++; - o = OrderedBytes.encodeFloat64(buff, float64, ord); cnt++; - o = OrderedBytes.encodeString(buff, text, ord); cnt++; - o = OrderedBytes.encodeBlobVar(buff, blobVar, ord); cnt++; + OrderedBytes.encodeNull(buff, ord); + cnt++; + OrderedBytes.encodeNumeric(buff, negInf, ord); + cnt++; + OrderedBytes.encodeNumeric(buff, negLarge, ord); + cnt++; + OrderedBytes.encodeNumeric(buff, negMed, ord); + cnt++; + OrderedBytes.encodeNumeric(buff, negSmall, ord); + cnt++; + OrderedBytes.encodeNumeric(buff, zero, ord); + cnt++; + OrderedBytes.encodeNumeric(buff, posSmall, ord); + cnt++; + OrderedBytes.encodeNumeric(buff, posMed, ord); + cnt++; + OrderedBytes.encodeNumeric(buff, posLarge, ord); + cnt++; + OrderedBytes.encodeNumeric(buff, posInf, ord); + cnt++; + OrderedBytes.encodeNumeric(buff, nan, ord); + cnt++; + OrderedBytes.encodeInt8(buff, int8, ord); + cnt++; + OrderedBytes.encodeInt16(buff, int16, ord); + cnt++; + OrderedBytes.encodeInt32(buff, int32, ord); + cnt++; + OrderedBytes.encodeInt64(buff, int64, ord); + cnt++; + OrderedBytes.encodeFloat32(buff, float32, ord); + cnt++; + OrderedBytes.encodeFloat64(buff, float64, ord); + cnt++; + OrderedBytes.encodeString(buff, text, ord); + cnt++; + OrderedBytes.encodeBlobVar(buff, blobVar, ord); + cnt++; } buff.setPosition(0); diff --git a/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/HadoopCompressor.java b/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/HadoopCompressor.java index 284c8db56a69..43231c35b92e 100644 --- a/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/HadoopCompressor.java +++ b/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/HadoopCompressor.java @@ -19,6 +19,7 @@ import java.io.IOException; import java.nio.ByteBuffer; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.io.compress.CanReinit; import org.apache.hadoop.hbase.io.compress.CompressionUtil; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; @@ -30,7 +31,7 @@ */ @InterfaceAudience.Private public abstract class HadoopCompressor - implements org.apache.hadoop.io.compress.Compressor { + implements CanReinit, org.apache.hadoop.io.compress.Compressor { protected static final Logger LOG = LoggerFactory.getLogger(HadoopCompressor.class); protected T compressor; diff --git a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionLz4.java b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionLz4.java index 547fe1ddb947..1defda25a593 100644 --- a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionLz4.java +++ b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionLz4.java @@ -18,7 +18,9 @@ package org.apache.hadoop.hbase.io.compress.aircompressor; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.compress.HFileTestBase; import org.apache.hadoop.hbase.testclassification.IOTests; @@ -35,9 +37,11 @@ public class TestHFileCompressionLz4 extends HFileTestBase { public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestHFileCompressionLz4.class); + private static Configuration conf; + @BeforeClass public static void setUpBeforeClass() throws Exception { - Configuration conf = TEST_UTIL.getConfiguration(); + conf = TEST_UTIL.getConfiguration(); conf.set(Compression.LZ4_CODEC_CLASS_KEY, Lz4Codec.class.getCanonicalName()); Compression.Algorithm.LZ4.reload(conf); HFileTestBase.setUpBeforeClass(); @@ -45,7 +49,9 @@ public static void setUpBeforeClass() throws Exception { @Test public void test() throws Exception { - doTest(Compression.Algorithm.LZ4); + Path path = new Path(TEST_UTIL.getDataTestDir(), + HBaseTestingUtil.getRandomUUID().toString() + ".hfile"); + doTest(conf, path, Compression.Algorithm.LZ4); } } diff --git a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionLzo.java b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionLzo.java index db0a79dc29e8..98ee5c04bafe 100644 --- a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionLzo.java +++ b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionLzo.java @@ -18,7 +18,9 @@ package org.apache.hadoop.hbase.io.compress.aircompressor; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.compress.HFileTestBase; import org.apache.hadoop.hbase.testclassification.IOTests; @@ -35,9 +37,11 @@ public class TestHFileCompressionLzo extends HFileTestBase { public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestHFileCompressionLzo.class); + private static Configuration conf; + @BeforeClass public static void setUpBeforeClass() throws Exception { - Configuration conf = TEST_UTIL.getConfiguration(); + conf = TEST_UTIL.getConfiguration(); conf.set(Compression.LZO_CODEC_CLASS_KEY, LzoCodec.class.getCanonicalName()); Compression.Algorithm.LZO.reload(conf); HFileTestBase.setUpBeforeClass(); @@ -45,7 +49,9 @@ public static void setUpBeforeClass() throws Exception { @Test public void test() throws Exception { - doTest(Compression.Algorithm.LZO); + Path path = new Path(TEST_UTIL.getDataTestDir(), + HBaseTestingUtil.getRandomUUID().toString() + ".hfile"); + doTest(conf, path, Compression.Algorithm.LZO); } } diff --git a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionSnappy.java b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionSnappy.java index 85b17b05d53c..a6d863b61a5e 100644 --- a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionSnappy.java +++ b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionSnappy.java @@ -18,7 +18,9 @@ package org.apache.hadoop.hbase.io.compress.aircompressor; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.compress.HFileTestBase; import org.apache.hadoop.hbase.testclassification.IOTests; @@ -35,9 +37,11 @@ public class TestHFileCompressionSnappy extends HFileTestBase { public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestHFileCompressionSnappy.class); + private static Configuration conf; + @BeforeClass public static void setUpBeforeClass() throws Exception { - Configuration conf = TEST_UTIL.getConfiguration(); + conf = TEST_UTIL.getConfiguration(); conf.set(Compression.SNAPPY_CODEC_CLASS_KEY, SnappyCodec.class.getCanonicalName()); Compression.Algorithm.SNAPPY.reload(conf); HFileTestBase.setUpBeforeClass(); @@ -45,7 +49,9 @@ public static void setUpBeforeClass() throws Exception { @Test public void test() throws Exception { - doTest(Compression.Algorithm.SNAPPY); + Path path = new Path(TEST_UTIL.getDataTestDir(), + HBaseTestingUtil.getRandomUUID().toString() + ".hfile"); + doTest(conf, path, Compression.Algorithm.SNAPPY); } } diff --git a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionZstd.java b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionZstd.java index 692cc098b999..de0f4575e62a 100644 --- a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionZstd.java +++ b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionZstd.java @@ -18,7 +18,9 @@ package org.apache.hadoop.hbase.io.compress.aircompressor; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.compress.HFileTestBase; import org.apache.hadoop.hbase.testclassification.IOTests; @@ -35,9 +37,11 @@ public class TestHFileCompressionZstd extends HFileTestBase { public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestHFileCompressionZstd.class); + private static Configuration conf; + @BeforeClass public static void setUpBeforeClass() throws Exception { - Configuration conf = TEST_UTIL.getConfiguration(); + conf = TEST_UTIL.getConfiguration(); conf.set(Compression.ZSTD_CODEC_CLASS_KEY, ZstdCodec.class.getCanonicalName()); Compression.Algorithm.ZSTD.reload(conf); HFileTestBase.setUpBeforeClass(); @@ -45,7 +49,9 @@ public static void setUpBeforeClass() throws Exception { @Test public void test() throws Exception { - doTest(Compression.Algorithm.ZSTD); + Path path = new Path(TEST_UTIL.getDataTestDir(), + HBaseTestingUtil.getRandomUUID().toString() + ".hfile"); + doTest(conf, path, Compression.Algorithm.ZSTD); } } diff --git a/hbase-compression/hbase-compression-lz4/src/main/java/org/apache/hadoop/hbase/io/compress/lz4/Lz4Compressor.java b/hbase-compression/hbase-compression-lz4/src/main/java/org/apache/hadoop/hbase/io/compress/lz4/Lz4Compressor.java index cb7e5c16c244..b2bf0a247a27 100644 --- a/hbase-compression/hbase-compression-lz4/src/main/java/org/apache/hadoop/hbase/io/compress/lz4/Lz4Compressor.java +++ b/hbase-compression/hbase-compression-lz4/src/main/java/org/apache/hadoop/hbase/io/compress/lz4/Lz4Compressor.java @@ -19,6 +19,7 @@ import java.io.IOException; import java.nio.ByteBuffer; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.io.compress.CanReinit; import org.apache.hadoop.hbase.io.compress.CompressionUtil; import org.apache.hadoop.io.compress.Compressor; import org.apache.yetus.audience.InterfaceAudience; @@ -31,7 +32,7 @@ * Hadoop compressor glue for lz4-java. */ @InterfaceAudience.Private -public class Lz4Compressor implements Compressor { +public class Lz4Compressor implements CanReinit, Compressor { protected static final Logger LOG = LoggerFactory.getLogger(Lz4Compressor.class); protected LZ4Compressor compressor; diff --git a/hbase-compression/hbase-compression-lz4/src/test/java/org/apache/hadoop/hbase/io/compress/lz4/TestHFileCompressionLz4.java b/hbase-compression/hbase-compression-lz4/src/test/java/org/apache/hadoop/hbase/io/compress/lz4/TestHFileCompressionLz4.java index 78c0f652d836..06c113dea5ee 100644 --- a/hbase-compression/hbase-compression-lz4/src/test/java/org/apache/hadoop/hbase/io/compress/lz4/TestHFileCompressionLz4.java +++ b/hbase-compression/hbase-compression-lz4/src/test/java/org/apache/hadoop/hbase/io/compress/lz4/TestHFileCompressionLz4.java @@ -18,7 +18,9 @@ package org.apache.hadoop.hbase.io.compress.lz4; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.compress.HFileTestBase; import org.apache.hadoop.hbase.testclassification.IOTests; @@ -35,9 +37,11 @@ public class TestHFileCompressionLz4 extends HFileTestBase { public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestHFileCompressionLz4.class); + private static Configuration conf; + @BeforeClass public static void setUpBeforeClass() throws Exception { - Configuration conf = TEST_UTIL.getConfiguration(); + conf = TEST_UTIL.getConfiguration(); conf.set(Compression.LZ4_CODEC_CLASS_KEY, Lz4Codec.class.getCanonicalName()); Compression.Algorithm.LZ4.reload(conf); HFileTestBase.setUpBeforeClass(); @@ -45,7 +49,9 @@ public static void setUpBeforeClass() throws Exception { @Test public void test() throws Exception { - doTest(Compression.Algorithm.LZ4); + Path path = new Path(TEST_UTIL.getDataTestDir(), + HBaseTestingUtil.getRandomUUID().toString() + ".hfile"); + doTest(conf, path, Compression.Algorithm.LZ4); } } diff --git a/hbase-compression/hbase-compression-snappy/src/main/java/org/apache/hadoop/hbase/io/compress/xerial/SnappyCompressor.java b/hbase-compression/hbase-compression-snappy/src/main/java/org/apache/hadoop/hbase/io/compress/xerial/SnappyCompressor.java index fc3056c66be5..082a52e1e723 100644 --- a/hbase-compression/hbase-compression-snappy/src/main/java/org/apache/hadoop/hbase/io/compress/xerial/SnappyCompressor.java +++ b/hbase-compression/hbase-compression-snappy/src/main/java/org/apache/hadoop/hbase/io/compress/xerial/SnappyCompressor.java @@ -19,6 +19,7 @@ import java.io.IOException; import java.nio.ByteBuffer; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.io.compress.CanReinit; import org.apache.hadoop.hbase.io.compress.CompressionUtil; import org.apache.hadoop.io.compress.Compressor; import org.apache.yetus.audience.InterfaceAudience; @@ -30,7 +31,7 @@ * Hadoop compressor glue for Xerial Snappy. */ @InterfaceAudience.Private -public class SnappyCompressor implements Compressor { +public class SnappyCompressor implements CanReinit, Compressor { protected static final Logger LOG = LoggerFactory.getLogger(SnappyCompressor.class); protected ByteBuffer inBuf, outBuf; diff --git a/hbase-compression/hbase-compression-snappy/src/test/java/org/apache/hadoop/hbase/io/compress/xerial/TestHFileCompressionSnappy.java b/hbase-compression/hbase-compression-snappy/src/test/java/org/apache/hadoop/hbase/io/compress/xerial/TestHFileCompressionSnappy.java index 094b43486972..15ed99ef65f1 100644 --- a/hbase-compression/hbase-compression-snappy/src/test/java/org/apache/hadoop/hbase/io/compress/xerial/TestHFileCompressionSnappy.java +++ b/hbase-compression/hbase-compression-snappy/src/test/java/org/apache/hadoop/hbase/io/compress/xerial/TestHFileCompressionSnappy.java @@ -18,7 +18,9 @@ package org.apache.hadoop.hbase.io.compress.xerial; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.compress.HFileTestBase; import org.apache.hadoop.hbase.testclassification.IOTests; @@ -35,9 +37,11 @@ public class TestHFileCompressionSnappy extends HFileTestBase { public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestHFileCompressionSnappy.class); + private static Configuration conf; + @BeforeClass public static void setUpBeforeClass() throws Exception { - Configuration conf = TEST_UTIL.getConfiguration(); + conf = TEST_UTIL.getConfiguration(); conf.set(Compression.SNAPPY_CODEC_CLASS_KEY, SnappyCodec.class.getCanonicalName()); Compression.Algorithm.SNAPPY.reload(conf); HFileTestBase.setUpBeforeClass(); @@ -45,7 +49,9 @@ public static void setUpBeforeClass() throws Exception { @Test public void test() throws Exception { - doTest(Compression.Algorithm.SNAPPY); + Path path = new Path(TEST_UTIL.getDataTestDir(), + HBaseTestingUtil.getRandomUUID().toString() + ".hfile"); + doTest(conf, path, Compression.Algorithm.SNAPPY); } } diff --git a/hbase-compression/hbase-compression-xz/src/test/java/org/apache/hadoop/hbase/io/compress/xz/TestHFileCompressionLzma.java b/hbase-compression/hbase-compression-xz/src/test/java/org/apache/hadoop/hbase/io/compress/xz/TestHFileCompressionLzma.java index 04c7b5193a57..481c7287aa38 100644 --- a/hbase-compression/hbase-compression-xz/src/test/java/org/apache/hadoop/hbase/io/compress/xz/TestHFileCompressionLzma.java +++ b/hbase-compression/hbase-compression-xz/src/test/java/org/apache/hadoop/hbase/io/compress/xz/TestHFileCompressionLzma.java @@ -17,8 +17,12 @@ */ package org.apache.hadoop.hbase.io.compress.xz; +import static org.junit.Assert.assertTrue; + import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.compress.HFileTestBase; import org.apache.hadoop.hbase.testclassification.IOTests; @@ -35,9 +39,11 @@ public class TestHFileCompressionLzma extends HFileTestBase { public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestHFileCompressionLzma.class); + private static Configuration conf; + @BeforeClass public static void setUpBeforeClass() throws Exception { - Configuration conf = TEST_UTIL.getConfiguration(); + conf = TEST_UTIL.getConfiguration(); conf.set(Compression.LZMA_CODEC_CLASS_KEY, LzmaCodec.class.getCanonicalName()); Compression.Algorithm.LZMA.reload(conf); HFileTestBase.setUpBeforeClass(); @@ -45,7 +51,26 @@ public static void setUpBeforeClass() throws Exception { @Test public void test() throws Exception { - doTest(Compression.Algorithm.LZMA); + Path path = new Path(TEST_UTIL.getDataTestDir(), + HBaseTestingUtil.getRandomUUID().toString() + ".hfile"); + doTest(conf, path, Compression.Algorithm.LZMA); + } + + @Test + public void testReconfLevels() throws Exception { + Path path_1 = new Path(TEST_UTIL.getDataTestDir(), + HBaseTestingUtil.getRandomUUID().toString() + ".1.hfile"); + Path path_2 = new Path(TEST_UTIL.getDataTestDir(), + HBaseTestingUtil.getRandomUUID().toString() + ".2.hfile"); + conf.setInt(LzmaCodec.LZMA_LEVEL_KEY, 1); + doTest(conf, path_1, Compression.Algorithm.LZMA); + long len_1 = FS.getFileStatus(path_1).getLen(); + conf.setInt(LzmaCodec.LZMA_LEVEL_KEY, 9); + doTest(conf, path_2, Compression.Algorithm.LZMA); + long len_2 = FS.getFileStatus(path_2).getLen(); + LOG.info("Level 1 len {}", len_1); + LOG.info("Level 9 len {}", len_2); + assertTrue("Reconfiguraton with LZMA_LEVEL_KEY did not seem to work", len_1 > len_2); } } diff --git a/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdCodec.java b/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdCodec.java index fc5f445d29a2..07b26d0c4bf0 100644 --- a/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdCodec.java +++ b/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdCodec.java @@ -19,9 +19,12 @@ import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; +import java.nio.ByteBuffer; +import java.nio.ByteOrder; import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.hbase.io.compress.DictionaryCache; import org.apache.hadoop.io.compress.BlockCompressorStream; import org.apache.hadoop.io.compress.BlockDecompressorStream; import org.apache.hadoop.io.compress.CompressionCodec; @@ -41,6 +44,7 @@ public class ZstdCodec implements Configurable, CompressionCodec { public static final String ZSTD_LEVEL_KEY = "hbase.io.compress.zstd.level"; public static final String ZSTD_BUFFER_SIZE_KEY = "hbase.io.compress.zstd.buffersize"; + public static final String ZSTD_DICTIONARY_KEY = "hbase.io.compress.zstd.dictionary"; private Configuration conf; @@ -60,12 +64,12 @@ public void setConf(Configuration conf) { @Override public Compressor createCompressor() { - return new ZstdCompressor(getLevel(conf), getBufferSize(conf)); + return new ZstdCompressor(getLevel(conf), getBufferSize(conf), getDictionary(conf)); } @Override public Decompressor createDecompressor() { - return new ZstdDecompressor(getBufferSize(conf)); + return new ZstdDecompressor(getBufferSize(conf), getDictionary(conf)); } @Override @@ -123,4 +127,31 @@ static int getBufferSize(Configuration conf) { return size > 0 ? size : 256 * 1024; // Don't change this default } + static byte[] getDictionary(final Configuration conf) { + String path = conf.get(ZSTD_DICTIONARY_KEY); + try { + return DictionaryCache.getDictionary(conf, path); + } catch (IOException e) { + throw new RuntimeException("Unable to load dictionary at " + path, e); + } + } + + // Zstandard dictionaries begin with a 32-bit magic number, 0xEC30A437 in little-endian + // format, followed by a 32-bit identifier also in little-endian format. + // Reference: https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md + + static boolean isDictionary(byte[] dictionary) { + return (dictionary[0] == (byte)0x37 && + dictionary[1] == (byte)0xA4 && + dictionary[2] == (byte)0x30 && + dictionary[3] == (byte)0xEC); + } + + static int getDictionaryId(byte[] dictionary) { + if (!isDictionary(dictionary)) { + throw new IllegalArgumentException("Not a ZStandard dictionary"); + } + return ByteBuffer.wrap(dictionary, 4, 4).order(ByteOrder.LITTLE_ENDIAN).getInt(); + } + } diff --git a/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdCompressor.java b/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdCompressor.java index ec508779c0e8..3ac514a5fb94 100644 --- a/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdCompressor.java +++ b/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdCompressor.java @@ -19,35 +19,47 @@ import java.io.IOException; import java.nio.ByteBuffer; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.io.compress.CanReinit; import org.apache.hadoop.hbase.io.compress.CompressionUtil; import org.apache.hadoop.io.compress.Compressor; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.github.luben.zstd.Zstd; +import com.github.luben.zstd.ZstdDictCompress; /** * Hadoop compressor glue for zstd-jni. */ @InterfaceAudience.Private -public class ZstdCompressor implements Compressor { +public class ZstdCompressor implements CanReinit, Compressor { protected static final Logger LOG = LoggerFactory.getLogger(ZstdCompressor.class); protected int level, bufferSize; protected ByteBuffer inBuf, outBuf; protected boolean finish, finished; protected long bytesRead, bytesWritten; + protected int dictId; + protected ZstdDictCompress dict; - ZstdCompressor(int level, int bufferSize) { + ZstdCompressor(final int level, final int bufferSize, final byte[] dictionary) { this.level = level; this.bufferSize = bufferSize; this.inBuf = ByteBuffer.allocateDirect(bufferSize); this.outBuf = ByteBuffer.allocateDirect(bufferSize); this.outBuf.position(bufferSize); + if (dictionary != null) { + this.dictId = ZstdCodec.getDictionaryId(dictionary); + this.dict = new ZstdDictCompress(dictionary, level); + } + } + + ZstdCompressor(final int level, final int bufferSize) { + this(level, bufferSize, null); } @Override - public int compress(byte[] b, int off, int len) throws IOException { + public int compress(final byte[] b, final int off, final int len) throws IOException { // If we have previously compressed our input and still have some buffered bytes // remaining, provide them to the caller. if (outBuf.hasRemaining()) { @@ -71,7 +83,12 @@ public int compress(byte[] b, int off, int len) throws IOException { } else { outBuf.clear(); } - int written = Zstd.compress(outBuf, inBuf, level, true); + int written; + if (dict != null) { + written = Zstd.compress(outBuf, inBuf, dict); + } else { + written = Zstd.compress(outBuf, inBuf, level); + } bytesWritten += written; inBuf.clear(); LOG.trace("compress: compressed {} -> {} (level {})", uncompressed, written, level); @@ -125,17 +142,37 @@ public boolean needsInput() { } @Override - public void reinit(Configuration conf) { + public void reinit(final Configuration conf) { LOG.trace("reinit"); if (conf != null) { // Level might have changed - level = ZstdCodec.getLevel(conf); + boolean levelChanged = false; + int newLevel = ZstdCodec.getLevel(conf); + if (level != newLevel) { + LOG.trace("Level changed, was {} now {}", level, newLevel); + level = newLevel; + levelChanged = true; + } + // Dictionary may have changed + byte[] b = ZstdCodec.getDictionary(conf); + if (b != null) { + // Don't casually create dictionary objects; they consume native memory + int thisDictId = ZstdCodec.getDictionaryId(b); + if (dict == null || dictId != thisDictId || levelChanged) { + dictId = thisDictId; + dict = new ZstdDictCompress(b, level); + LOG.trace("Reloaded dictionary, new id is {}", dictId); + } + } else { + dict = null; + } // Buffer size might have changed int newBufferSize = ZstdCodec.getBufferSize(conf); if (bufferSize != newBufferSize) { bufferSize = newBufferSize; this.inBuf = ByteBuffer.allocateDirect(bufferSize); this.outBuf = ByteBuffer.allocateDirect(bufferSize); + LOG.trace("Resized buffers, new size is {}", bufferSize); } } reset(); @@ -154,12 +191,12 @@ public void reset() { } @Override - public void setDictionary(byte[] b, int off, int len) { + public void setDictionary(final byte[] b, final int off, final int len) { throw new UnsupportedOperationException("setDictionary is not supported"); } @Override - public void setInput(byte[] b, int off, int len) { + public void setInput(final byte[] b, final int off, final int len) { LOG.trace("setInput: off={} len={}", off, len); if (inBuf.remaining() < len) { // Get a new buffer that can accomodate the accumulated input plus the additional @@ -179,7 +216,7 @@ public void setInput(byte[] b, int off, int len) { // Package private - int maxCompressedLength(int len) { + static int maxCompressedLength(final int len) { return (int) Zstd.compressBound(len); } diff --git a/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdDecompressor.java b/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdDecompressor.java index 0c476cb39033..dfa37db636ae 100644 --- a/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdDecompressor.java +++ b/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdDecompressor.java @@ -18,32 +18,47 @@ import java.io.IOException; import java.nio.ByteBuffer; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.io.compress.CanReinit; import org.apache.hadoop.hbase.io.compress.CompressionUtil; import org.apache.hadoop.io.compress.Decompressor; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.github.luben.zstd.Zstd; +import com.github.luben.zstd.ZstdDictDecompress; /** * Hadoop decompressor glue for zstd-java. */ @InterfaceAudience.Private -public class ZstdDecompressor implements Decompressor { +public class ZstdDecompressor implements CanReinit, Decompressor { protected static final Logger LOG = LoggerFactory.getLogger(ZstdDecompressor.class); protected ByteBuffer inBuf, outBuf; + protected int bufferSize; protected int inLen; protected boolean finished; + protected int dictId; + protected ZstdDictDecompress dict; - ZstdDecompressor(int bufferSize) { + ZstdDecompressor(final int bufferSize, final byte[] dictionary) { + this.bufferSize = bufferSize; this.inBuf = ByteBuffer.allocateDirect(bufferSize); this.outBuf = ByteBuffer.allocateDirect(bufferSize); this.outBuf.position(bufferSize); + if (dictionary != null) { + this.dictId = ZstdCodec.getDictionaryId(dictionary); + this.dict = new ZstdDictDecompress(dictionary); + } + } + + ZstdDecompressor(final int bufferSize) { + this(bufferSize, null); } @Override - public int decompress(byte[] b, int off, int len) throws IOException { + public int decompress(final byte[] b, final int off, final int len) throws IOException { if (outBuf.hasRemaining()) { int remaining = outBuf.remaining(), n = Math.min(remaining, len); outBuf.get(b, off, n); @@ -55,7 +70,12 @@ public int decompress(byte[] b, int off, int len) throws IOException { int remaining = inBuf.remaining(); inLen -= remaining; outBuf.clear(); - int written = Zstd.decompress(outBuf, inBuf); + int written; + if (dict != null) { + written = Zstd.decompress(outBuf, inBuf, dict); + } else { + written = Zstd.decompress(outBuf, inBuf); + } inBuf.clear(); LOG.trace("decompress: decompressed {} -> {}", remaining, written); outBuf.flip(); @@ -104,24 +124,24 @@ public void reset() { @Override public boolean needsInput() { - boolean b = (inBuf.position() == 0); + final boolean b = (inBuf.position() == 0); LOG.trace("needsInput: {}", b); return b; } @Override - public void setDictionary(byte[] b, int off, int len) { + public void setDictionary(final byte[] b, final int off, final int len) { throw new UnsupportedOperationException("setDictionary is not supported"); } @Override - public void setInput(byte[] b, int off, int len) { + public void setInput(final byte[] b, final int off, final int len) { LOG.trace("setInput: off={} len={}", off, len); if (inBuf.remaining() < len) { // Get a new buffer that can accomodate the accumulated input plus the additional // input that would cause a buffer overflow without reallocation. // This condition should be fortunately rare, because it is expensive. - int needed = CompressionUtil.roundInt2(inBuf.capacity() + len); + final int needed = CompressionUtil.roundInt2(inBuf.capacity() + len); LOG.trace("setInput: resize inBuf {}", needed); ByteBuffer newBuf = ByteBuffer.allocateDirect(needed); inBuf.flip(); @@ -133,4 +153,33 @@ public void setInput(byte[] b, int off, int len) { finished = false; } + @Override + public void reinit(final Configuration conf) { + LOG.trace("reinit"); + if (conf != null) { + // Dictionary may have changed + byte[] b = ZstdCodec.getDictionary(conf); + if (b != null) { + // Don't casually create dictionary objects; they consume native memory + int thisDictId = ZstdCodec.getDictionaryId(b); + if (dict == null || dictId != thisDictId) { + dictId = thisDictId; + dict = new ZstdDictDecompress(b); + LOG.trace("Reloaded dictionary, new id is {}", dictId); + } + } else { + dict = null; + } + // Buffer size might have changed + int newBufferSize = ZstdCodec.getBufferSize(conf); + if (bufferSize != newBufferSize) { + bufferSize = newBufferSize; + this.inBuf = ByteBuffer.allocateDirect(bufferSize); + this.outBuf = ByteBuffer.allocateDirect(bufferSize); + LOG.trace("Resized buffers, new size is {}", bufferSize); + } + } + reset(); + } + } diff --git a/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestHFileCompressionZstd.java b/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestHFileCompressionZstd.java index 07ce12dc821d..42c56a822d4d 100644 --- a/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestHFileCompressionZstd.java +++ b/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestHFileCompressionZstd.java @@ -17,8 +17,12 @@ */ package org.apache.hadoop.hbase.io.compress.zstd; +import static org.junit.Assert.assertTrue; + import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.compress.HFileTestBase; import org.apache.hadoop.hbase.testclassification.IOTests; @@ -35,9 +39,11 @@ public class TestHFileCompressionZstd extends HFileTestBase { public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestHFileCompressionZstd.class); + private static Configuration conf; + @BeforeClass public static void setUpBeforeClass() throws Exception { - Configuration conf = TEST_UTIL.getConfiguration(); + conf = TEST_UTIL.getConfiguration(); conf.set(Compression.ZSTD_CODEC_CLASS_KEY, ZstdCodec.class.getCanonicalName()); Compression.Algorithm.ZSTD.reload(conf); HFileTestBase.setUpBeforeClass(); @@ -45,7 +51,26 @@ public static void setUpBeforeClass() throws Exception { @Test public void test() throws Exception { - doTest(Compression.Algorithm.ZSTD); + Path path = new Path(TEST_UTIL.getDataTestDir(), + HBaseTestingUtil.getRandomUUID().toString() + ".hfile"); + doTest(conf, path, Compression.Algorithm.ZSTD); + } + + @Test + public void testReconfLevels() throws Exception { + Path path_1 = new Path(TEST_UTIL.getDataTestDir(), + HBaseTestingUtil.getRandomUUID().toString() + ".1.hfile"); + Path path_2 = new Path(TEST_UTIL.getDataTestDir(), + HBaseTestingUtil.getRandomUUID().toString() + ".2.hfile"); + conf.setInt(ZstdCodec.ZSTD_LEVEL_KEY, 1); + doTest(conf, path_1, Compression.Algorithm.ZSTD); + long len_1 = FS.getFileStatus(path_1).getLen(); + conf.setInt(ZstdCodec.ZSTD_LEVEL_KEY, 22); + doTest(conf, path_2, Compression.Algorithm.ZSTD); + long len_2 = FS.getFileStatus(path_2).getLen(); + LOG.info("Level 1 len {}", len_1); + LOG.info("Level 22 len {}", len_2); + assertTrue("Reconfiguraton with ZSTD_LEVEL_KEY did not seem to work", len_1 > len_2); } } diff --git a/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestZstdCodec.java b/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestZstdCodec.java index 6bcb2aa11511..bf1c78cbc17f 100644 --- a/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestZstdCodec.java +++ b/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestZstdCodec.java @@ -19,6 +19,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.io.compress.CompressionTestBase; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.junit.ClassRule; @@ -33,20 +34,20 @@ public class TestZstdCodec extends CompressionTestBase { HBaseClassTestRule.forClass(TestZstdCodec.class); @Test - public void testzstdCodecSmall() throws Exception { + public void testZstdCodecSmall() throws Exception { codecSmallTest(new ZstdCodec()); } @Test - public void testzstdCodecLarge() throws Exception { + public void testZstdCodecLarge() throws Exception { codecLargeTest(new ZstdCodec(), 1.1); // poor compressability codecLargeTest(new ZstdCodec(), 2); codecLargeTest(new ZstdCodec(), 10); // very high compressability } @Test - public void testzstdCodecVeryLarge() throws Exception { - Configuration conf = new Configuration(); + public void testZstdCodecVeryLarge() throws Exception { + Configuration conf = HBaseConfiguration.create(); // ZStandard levels range from 1 to 22. // Level 22 might take up to a minute to complete. 3 is the Hadoop default, and will be fast. conf.setInt(CommonConfigurationKeys.IO_COMPRESSION_CODEC_ZSTD_LEVEL_KEY, 3); diff --git a/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestZstdDictionary.java b/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestZstdDictionary.java new file mode 100644 index 000000000000..0a17ef997d20 --- /dev/null +++ b/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestZstdDictionary.java @@ -0,0 +1,98 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package org.apache.hadoop.hbase.io.compress.zstd; + +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; + +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.util.Random; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.io.compress.CompressionTestBase; +import org.apache.hadoop.hbase.io.compress.DictionaryCache; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.util.RandomDistribution; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category(SmallTests.class) +public class TestZstdDictionary extends CompressionTestBase { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestZstdDictionary.class); + + private static final String DICTIONARY_PATH = DictionaryCache.RESOURCE_SCHEME + "zstd.test.dict"; + // zstd.test.data compressed with zstd.test.dict at level 3 will produce a result of + // 358555 bytes + private static final int EXPECTED_COMPRESSED_SIZE = 358555; + + private static byte[] TEST_DATA; + + @BeforeClass + public static void setUp() throws Exception { + Configuration conf = new Configuration(); + TEST_DATA = DictionaryCache.loadFromResource(conf, + DictionaryCache.RESOURCE_SCHEME + "zstd.test.data", /* maxSize */ 1024*1024); + assertNotNull("Failed to load test data", TEST_DATA); + } + + @Test + public void test() throws Exception { + Configuration conf = new Configuration(); + conf.setInt(CommonConfigurationKeys.IO_COMPRESSION_CODEC_ZSTD_LEVEL_KEY, 3); + conf.set(ZstdCodec.ZSTD_DICTIONARY_KEY, DICTIONARY_PATH); + ZstdCodec codec = new ZstdCodec(); + codec.setConf(conf); + codecTest(codec, new byte[][] { TEST_DATA }, EXPECTED_COMPRESSED_SIZE); + // Assert that the dictionary was actually loaded + assertTrue("Dictionary was not loaded by codec", DictionaryCache.contains(DICTIONARY_PATH)); + } + + // + // For generating the test data in src/test/resources/ + // + + public static void main(String[] args) throws IOException { + // Write 1000 1k blocks for training to the specified file + // Train with: + // zstd --train -B1024 -o + if (args.length < 1) { + System.err.println("Usage: TestZstdCodec "); + System.exit(-1); + } + final RandomDistribution.DiscreteRNG rng = + new RandomDistribution.Zipf(new Random(), 0, Byte.MAX_VALUE, 2); + final File outFile = new File(args[0]); + final byte[] buffer = new byte[1024]; + System.out.println("Generating " + outFile); + try (FileOutputStream os = new FileOutputStream(outFile)) { + for (int i = 0; i < 1000; i++) { + fill(rng, buffer); + os.write(buffer); + } + } + System.out.println("Done"); + } + +} diff --git a/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestZstdDictionarySplitMerge.java b/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestZstdDictionarySplitMerge.java new file mode 100644 index 000000000000..6d850114bbbd --- /dev/null +++ b/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestZstdDictionarySplitMerge.java @@ -0,0 +1,148 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.io.compress.zstd; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; + +import java.util.concurrent.TimeUnit; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtil; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.Waiter.ExplainingPredicate; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.AsyncConnection; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; +import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.client.TableDescriptorBuilder; +import org.apache.hadoop.hbase.io.compress.Compression; +import org.apache.hadoop.hbase.io.compress.DictionaryCache; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.hbase.testclassification.RegionServerTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category({ RegionServerTests.class, LargeTests.class }) +public class TestZstdDictionarySplitMerge { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestZstdDictionarySplitMerge.class); + + private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); + private static Configuration conf; + + @BeforeClass + public static void setUp() throws Exception { + // NOTE: Don't put configuration settings in global site schema. We are testing if per + // CF or per table schema settings are applied correctly. + conf = TEST_UTIL.getConfiguration(); + conf.set(Compression.ZSTD_CODEC_CLASS_KEY, ZstdCodec.class.getCanonicalName()); + Compression.Algorithm.ZSTD.reload(conf); + conf.setInt(HConstants.HBASE_CLIENT_META_OPERATION_TIMEOUT, 1000); + conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 2); + TEST_UTIL.startMiniCluster(1); + } + + @AfterClass + public static void tearDown() throws Exception { + TEST_UTIL.shutdownMiniCluster(); + } + + @Test + public void test() throws Exception { + // Create the table + + final TableName tableName = TableName.valueOf("TestZstdDictionarySplitMerge"); + final byte[] cfName = Bytes.toBytes("info"); + final String dictionaryPath = DictionaryCache.RESOURCE_SCHEME + "zstd.test.dict"; + final TableDescriptor td = TableDescriptorBuilder.newBuilder(tableName) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(cfName) + .setCompressionType(Compression.Algorithm.ZSTD) + .setConfiguration(ZstdCodec.ZSTD_DICTIONARY_KEY, dictionaryPath) + .build()) + .build(); + final Admin admin = TEST_UTIL.getAdmin(); + admin.createTable(td, new byte[][] { Bytes.toBytes(1) }); + TEST_UTIL.waitTableAvailable(tableName); + + // Load some data + + Table t = ConnectionFactory.createConnection(conf).getTable(tableName); + TEST_UTIL.loadNumericRows(t, cfName, 0, 100_000); + admin.flush(tableName); + assertTrue("Dictionary was not loaded", DictionaryCache.contains(dictionaryPath)); + TEST_UTIL.verifyNumericRows(t, cfName, 0, 100_000, 0); + + // Test split procedure + + admin.split(tableName, Bytes.toBytes(50_000)); + TEST_UTIL.waitFor(30000, new ExplainingPredicate() { + @Override + public boolean evaluate() throws Exception { + return TEST_UTIL.getMiniHBaseCluster().getRegions(tableName).size() == 3; + } + @Override + public String explainFailure() throws Exception { + return "Split has not finished yet"; + } + }); + TEST_UTIL.waitUntilNoRegionsInTransition(); + TEST_UTIL.verifyNumericRows(t, cfName, 0, 100_000, 0); + + // Test merge procedure + + RegionInfo regionA = null; + RegionInfo regionB = null; + for (RegionInfo region: admin.getRegions(tableName)) { + if (region.getStartKey().length == 0) { + regionA = region; + } else if (Bytes.equals(region.getStartKey(), Bytes.toBytes(1))) { + regionB = region; + } + } + assertNotNull(regionA); + assertNotNull(regionB); + admin.mergeRegionsAsync(new byte[][] { + regionA.getRegionName(), + regionB.getRegionName() + }, false).get(30, TimeUnit.SECONDS); + assertEquals(2, admin.getRegions(tableName).size()); + ServerName expected = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0).getServerName(); + assertEquals(expected, TEST_UTIL.getConnection().getRegionLocator(tableName) + .getRegionLocation(Bytes.toBytes(1), true).getServerName()); + try (AsyncConnection asyncConn = + ConnectionFactory.createAsyncConnection(conf).get()) { + assertEquals(expected, asyncConn.getRegionLocator(tableName) + .getRegionLocation(Bytes.toBytes(1), true).get().getServerName()); + } + TEST_UTIL.verifyNumericRows(t, cfName, 0, 100_000, 0); + } + +} diff --git a/hbase-compression/hbase-compression-zstd/src/test/resources/zstd.test.data b/hbase-compression/hbase-compression-zstd/src/test/resources/zstd.test.data new file mode 100644 index 000000000000..a497af551fd3 Binary files /dev/null and b/hbase-compression/hbase-compression-zstd/src/test/resources/zstd.test.data differ diff --git a/hbase-compression/hbase-compression-zstd/src/test/resources/zstd.test.dict b/hbase-compression/hbase-compression-zstd/src/test/resources/zstd.test.dict new file mode 100644 index 000000000000..8d9ec652b0bf Binary files /dev/null and b/hbase-compression/hbase-compression-zstd/src/test/resources/zstd.test.dict differ diff --git a/hbase-examples/src/main/java/org/apache/hadoop/hbase/thrift/DemoClient.java b/hbase-examples/src/main/java/org/apache/hadoop/hbase/thrift/DemoClient.java index 8bdff58cff8e..0718538ad442 100644 --- a/hbase-examples/src/main/java/org/apache/hadoop/hbase/thrift/DemoClient.java +++ b/hbase-examples/src/main/java/org/apache/hadoop/hbase/thrift/DemoClient.java @@ -123,7 +123,8 @@ private void run() throws Exception { TProtocol protocol = new TBinaryProtocol(transport, true, true); Hbase.Client client = new Hbase.Client(protocol); - byte[] t = bytes("demo_table"); + ByteBuffer demoTable = ByteBuffer.wrap(bytes("demo_table")); + ByteBuffer disabledTable = ByteBuffer.wrap(bytes("disabled_table")); // Scan all tables, look for the demo table and delete it. System.out.println("scanning tables..."); @@ -131,7 +132,7 @@ private void run() throws Exception { for (ByteBuffer name : client.getTableNames()) { System.out.println(" found: " + ClientUtils.utf8(name.array())); - if (ClientUtils.utf8(name.array()).equals(ClientUtils.utf8(t))) { + if (name.equals(demoTable) || name.equals(disabledTable)) { if (client.isTableEnabled(name)) { System.out.println(" disabling table: " + ClientUtils.utf8(name.array())); client.disableTable(name); @@ -155,22 +156,35 @@ private void run() throws Exception { col.timeToLive = Integer.MAX_VALUE; columns.add(col); - System.out.println("creating table: " + ClientUtils.utf8(t)); + System.out.println("creating table: " + ClientUtils.utf8(demoTable.array())); try { - client.createTable(ByteBuffer.wrap(t), columns); + client.createTable(demoTable, columns); + client.createTable(disabledTable, columns); } catch (AlreadyExists ae) { System.out.println("WARN: " + ae.message); } - System.out.println("column families in " + ClientUtils.utf8(t) + ": "); - Map columnMap = client.getColumnDescriptors(ByteBuffer.wrap(t)); + System.out.println("column families in " + ClientUtils.utf8(demoTable.array()) + ": "); + Map columnMap = client.getColumnDescriptors(demoTable); for (ColumnDescriptor col2 : columnMap.values()) { System.out.println(" column: " + ClientUtils.utf8(col2.name.array()) + ", maxVer: " + col2.maxVersions); } + if (client.isTableEnabled(disabledTable)){ + System.out.println("disabling table: " + ClientUtils.utf8(disabledTable.array())); + client.disableTable(disabledTable); + } + + System.out.println("list tables with enabled statuses : "); + Map statusMap = client.getTableNamesWithIsTableEnabled(); + for (Map.Entry entry : statusMap.entrySet()) { + System.out.println(" Table: " + ClientUtils.utf8(entry.getKey().array()) + + ", is enabled: " + entry.getValue()); + } + Map dummyAttributes = null; boolean writeToWal = false; @@ -187,27 +201,27 @@ private void run() throws Exception { mutations = new ArrayList<>(1); mutations.add(new Mutation(false, ByteBuffer.wrap(bytes("entry:foo")), ByteBuffer.wrap(invalid), writeToWal)); - client.mutateRow(ByteBuffer.wrap(t), ByteBuffer.wrap(bytes("foo")), + client.mutateRow(demoTable, ByteBuffer.wrap(bytes("foo")), mutations, dummyAttributes); // this row name is valid utf8 mutations = new ArrayList<>(1); mutations.add(new Mutation(false, ByteBuffer.wrap(bytes("entry:foo")), ByteBuffer.wrap(valid), writeToWal)); - client.mutateRow(ByteBuffer.wrap(t), ByteBuffer.wrap(valid), mutations, dummyAttributes); + client.mutateRow(demoTable, ByteBuffer.wrap(valid), mutations, dummyAttributes); // non-utf8 is now allowed in row names because HBase stores values as binary mutations = new ArrayList<>(1); mutations.add(new Mutation(false, ByteBuffer.wrap(bytes("entry:foo")), ByteBuffer.wrap(invalid), writeToWal)); - client.mutateRow(ByteBuffer.wrap(t), ByteBuffer.wrap(invalid), mutations, dummyAttributes); + client.mutateRow(demoTable, ByteBuffer.wrap(invalid), mutations, dummyAttributes); // Run a scanner on the rows we just created ArrayList columnNames = new ArrayList<>(); columnNames.add(ByteBuffer.wrap(bytes("entry:"))); System.out.println("Starting scanner..."); - int scanner = client.scannerOpen(ByteBuffer.wrap(t), ByteBuffer.wrap(bytes("")), columnNames, + int scanner = client.scannerOpen(demoTable, ByteBuffer.wrap(bytes("")), columnNames, dummyAttributes); while (true) { @@ -231,9 +245,9 @@ private void run() throws Exception { mutations = new ArrayList<>(1); mutations.add(new Mutation(false, ByteBuffer.wrap(bytes("unused:")), ByteBuffer.wrap(bytes("DELETE_ME")), writeToWal)); - client.mutateRow(ByteBuffer.wrap(t), ByteBuffer.wrap(row), mutations, dummyAttributes); - printRow(client.getRow(ByteBuffer.wrap(t), ByteBuffer.wrap(row), dummyAttributes)); - client.deleteAllRow(ByteBuffer.wrap(t), ByteBuffer.wrap(row), dummyAttributes); + client.mutateRow(demoTable, ByteBuffer.wrap(row), mutations, dummyAttributes); + printRow(client.getRow(demoTable, ByteBuffer.wrap(row), dummyAttributes)); + client.deleteAllRow(demoTable, ByteBuffer.wrap(row), dummyAttributes); // sleep to force later timestamp try { @@ -247,8 +261,8 @@ private void run() throws Exception { ByteBuffer.wrap(bytes("0")), writeToWal)); mutations.add(new Mutation(false, ByteBuffer.wrap(bytes("entry:foo")), ByteBuffer.wrap(bytes("FOO")), writeToWal)); - client.mutateRow(ByteBuffer.wrap(t), ByteBuffer.wrap(row), mutations, dummyAttributes); - printRow(client.getRow(ByteBuffer.wrap(t), ByteBuffer.wrap(row), dummyAttributes)); + client.mutateRow(demoTable, ByteBuffer.wrap(row), mutations, dummyAttributes); + printRow(client.getRow(demoTable, ByteBuffer.wrap(row), dummyAttributes)); Mutation m; mutations = new ArrayList<>(2); @@ -260,16 +274,16 @@ private void run() throws Exception { m.column = ByteBuffer.wrap(bytes("entry:num")); m.value = ByteBuffer.wrap(bytes("-1")); mutations.add(m); - client.mutateRow(ByteBuffer.wrap(t), ByteBuffer.wrap(row), mutations, dummyAttributes); - printRow(client.getRow(ByteBuffer.wrap(t), ByteBuffer.wrap(row), dummyAttributes)); + client.mutateRow(demoTable, ByteBuffer.wrap(row), mutations, dummyAttributes); + printRow(client.getRow(demoTable, ByteBuffer.wrap(row), dummyAttributes)); mutations = new ArrayList<>(); mutations.add(new Mutation(false, ByteBuffer.wrap(bytes("entry:num")), ByteBuffer.wrap(bytes(Integer.toString(i))), writeToWal)); mutations.add(new Mutation(false, ByteBuffer.wrap(bytes("entry:sqr")), ByteBuffer.wrap(bytes(Integer.toString(i * i))), writeToWal)); - client.mutateRow(ByteBuffer.wrap(t), ByteBuffer.wrap(row), mutations, dummyAttributes); - printRow(client.getRow(ByteBuffer.wrap(t), ByteBuffer.wrap(row), dummyAttributes)); + client.mutateRow(demoTable, ByteBuffer.wrap(row), mutations, dummyAttributes); + printRow(client.getRow(demoTable, ByteBuffer.wrap(row), dummyAttributes)); // sleep to force later timestamp try { @@ -286,11 +300,11 @@ private void run() throws Exception { m = new Mutation(); m.column = ByteBuffer.wrap(bytes("entry:sqr")); m.isDelete = true; - client.mutateRowTs(ByteBuffer.wrap(t), ByteBuffer.wrap(row), mutations, 1, + client.mutateRowTs(demoTable, ByteBuffer.wrap(row), mutations, 1, dummyAttributes); // shouldn't override latest - printRow(client.getRow(ByteBuffer.wrap(t), ByteBuffer.wrap(row), dummyAttributes)); + printRow(client.getRow(demoTable, ByteBuffer.wrap(row), dummyAttributes)); - List versions = client.getVer(ByteBuffer.wrap(t), ByteBuffer.wrap(row), + List versions = client.getVer(demoTable, ByteBuffer.wrap(row), ByteBuffer.wrap(bytes("entry:num")), 10, dummyAttributes); printVersions(ByteBuffer.wrap(row), versions); @@ -299,7 +313,7 @@ private void run() throws Exception { System.exit(-1); } - List result = client.get(ByteBuffer.wrap(t), ByteBuffer.wrap(row), + List result = client.get(demoTable, ByteBuffer.wrap(row), ByteBuffer.wrap(bytes("entry:foo")), dummyAttributes); if (!result.isEmpty()) { @@ -313,7 +327,7 @@ private void run() throws Exception { // scan all rows/columnNames columnNames.clear(); - for (ColumnDescriptor col2 : client.getColumnDescriptors(ByteBuffer.wrap(t)).values()) { + for (ColumnDescriptor col2 : client.getColumnDescriptors(demoTable).values()) { System.out.println("column with name: " + new String(col2.name.array())); System.out.println(col2.toString()); @@ -321,7 +335,7 @@ private void run() throws Exception { } System.out.println("Starting scanner..."); - scanner = client.scannerOpenWithStop(ByteBuffer.wrap(t), ByteBuffer.wrap(bytes("00020")), + scanner = client.scannerOpenWithStop(demoTable, ByteBuffer.wrap(bytes("00020")), ByteBuffer.wrap(bytes("00040")), columnNames, dummyAttributes); while (true) { diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestLoadCommonCrawl.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestLoadCommonCrawl.java index 5dcd84e2462a..d75014512df0 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestLoadCommonCrawl.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestLoadCommonCrawl.java @@ -26,12 +26,12 @@ import java.net.URI; import java.net.URISyntaxException; import java.nio.charset.StandardCharsets; +import java.util.ArrayList; import java.util.HashSet; -import java.util.LinkedList; import java.util.List; import java.util.Set; +import java.util.concurrent.atomic.AtomicLong; import java.util.zip.GZIPInputStream; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.fs.FSDataInputStream; @@ -46,6 +46,7 @@ import org.apache.hadoop.hbase.MasterNotRunningException; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.BufferedMutator; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.Connection; @@ -83,7 +84,6 @@ import org.apache.hadoop.util.ToolRunner; import org.slf4j.Logger; import org.slf4j.LoggerFactory; - import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine; /** @@ -156,21 +156,23 @@ public class IntegrationTestLoadCommonCrawl extends IntegrationTestBase { protected static byte[] RECORD_ID_QUALIFIER = Bytes.toBytes("r"); protected static byte[] TARGET_URI_QUALIFIER = Bytes.toBytes("u"); + private static final int VERIFICATION_READ_RETRIES = 10; + public static enum Counts { REFERENCED, UNREFERENCED, CORRUPT } - Path warcFileInputDir = null; - Path outputDir = null; - String[] args; + protected Path warcFileInputDir = null; + protected Path outputDir = null; + protected String[] args; - protected int runLoader(Path warcFileInputDir, Path outputDir) throws Exception { + protected int runLoader(final Path warcFileInputDir, final Path outputDir) throws Exception { Loader loader = new Loader(); loader.setConf(conf); return loader.run(warcFileInputDir, outputDir); } - protected int runVerify(Path inputDir) throws Exception { + protected int runVerify(final Path inputDir) throws Exception { Verify verify = new Verify(); verify.setConf(conf); return verify.run(inputDir); @@ -205,7 +207,7 @@ public int run(String[] args) { } @Override - protected void processOptions(CommandLine cmd) { + protected void processOptions(final CommandLine cmd) { processBaseOptions(cmd); args = cmd.getArgs(); } @@ -229,7 +231,7 @@ public void cleanUpCluster() throws Exception { } } - static TableName getTablename(Configuration c) { + static TableName getTablename(final Configuration c) { return TableName.valueOf(c.get(TABLE_NAME_KEY, DEFAULT_TABLE_NAME)); } @@ -418,7 +420,7 @@ public static class Loader extends Configured implements Tool { private static final Logger LOG = LoggerFactory.getLogger(Loader.class); private static final String USAGE = "Loader "; - void createSchema(TableName tableName) throws IOException { + void createSchema(final TableName tableName) throws IOException { try (Connection conn = ConnectionFactory.createConnection(getConf()); Admin admin = conn.getAdmin()) { @@ -474,24 +476,24 @@ void createSchema(TableName tableName) throws IOException { } } - int run(Path warcFileInput, Path outputDir) + int run(final Path warcFileInput, final Path outputDir) throws IOException, ClassNotFoundException, InterruptedException { createSchema(getTablename(getConf())); - Job job = Job.getInstance(getConf()); + final Job job = Job.getInstance(getConf()); job.setJobName(Loader.class.getName()); job.setNumReduceTasks(0); job.setJarByClass(getClass()); job.setMapperClass(LoaderMapper.class); job.setInputFormatClass(WARCInputFormat.class); - FileSystem fs = FileSystem.get(warcFileInput.toUri(), getConf()); + final FileSystem fs = FileSystem.get(warcFileInput.toUri(), getConf()); if (fs.getFileStatus(warcFileInput).isDirectory()) { LOG.info("Using directory as WARC input path: " + warcFileInput); FileInputFormat.setInputPaths(job, warcFileInput); - } else { + } else if (warcFileInput.toUri().getScheme().equals("file")) { LOG.info("Getting WARC input paths from file: " + warcFileInput); - List paths = new LinkedList(); + final List paths = new ArrayList(); try (FSDataInputStream is = fs.open(warcFileInput)) { InputStreamReader reader; if (warcFileInput.getName().toLowerCase().endsWith(".gz")) { @@ -508,6 +510,8 @@ int run(Path warcFileInput, Path outputDir) } LOG.info("Read " + paths.size() + " WARC input paths from " + warcFileInput); FileInputFormat.setInputPaths(job, paths.toArray(new Path[paths.size()])); + } else { + FileInputFormat.setInputPaths(job, warcFileInput); } job.setOutputFormatClass(SequenceFileOutputFormat.class); SequenceFileOutputFormat.setOutputPath(job, outputDir); @@ -516,8 +520,6 @@ int run(Path warcFileInput, Path outputDir) job.setOutputValueClass(BytesWritable.class); TableMapReduceUtil.addDependencyJars(job); - LOG.info("Submitting job." + - " This will take time proportional to the number of input files, please be patient."); boolean success = job.waitForCompletion(true); if (!success) { LOG.error("Failure during job " + job.getJobID()); @@ -549,20 +551,21 @@ public static void main(String[] args) throws Exception { public static class LoaderMapper extends Mapper { - Configuration conf; - Connection conn; - Table table; + protected Configuration conf; + protected Connection conn; + protected BufferedMutator mutator; @Override - protected void setup(Context context) throws IOException, InterruptedException { - conn = ConnectionFactory.createConnection(context.getConfiguration()); - table = conn.getTable(getTablename(conn.getConfiguration())); + protected void setup(final Context context) throws IOException, InterruptedException { + conf = context.getConfiguration(); + conn = ConnectionFactory.createConnection(conf); + mutator = conn.getBufferedMutator(getTablename(conf)); } @Override - protected void cleanup(Context context) throws IOException, InterruptedException { + protected void cleanup(final Context context) throws IOException, InterruptedException { try { - table.close(); + mutator.close(); } catch (Exception e) { LOG.warn("Exception closing Table", e); } @@ -574,16 +577,15 @@ protected void cleanup(Context context) throws IOException, InterruptedException } @Override - protected void map(LongWritable key, WARCWritable value, Context output) + protected void map(final LongWritable key, final WARCWritable value, final Context output) throws IOException, InterruptedException { - WARCRecord.Header warcHeader = value.getRecord().getHeader(); - String recordID = warcHeader.getRecordID(); - String targetURI = warcHeader.getTargetURI(); + final WARCRecord.Header warcHeader = value.getRecord().getHeader(); + final String recordID = warcHeader.getRecordID(); + final String targetURI = warcHeader.getTargetURI(); if (warcHeader.getRecordType().equals("response") && targetURI != null) { - String contentType = warcHeader.getField("WARC-Identified-Payload-Type"); + final String contentType = warcHeader.getField("WARC-Identified-Payload-Type"); if (contentType != null) { - LOG.debug("Processing record id=" + recordID + ", targetURI=\"" + targetURI + "\""); - long now = EnvironmentEdgeManager.currentTime(); + LOG.info("Processing uri=\"" + targetURI + "\", id=" + recordID); // Make row key @@ -601,89 +603,94 @@ protected void map(LongWritable key, WARCWritable value, Context output) // Get the content and calculate the CRC64 - byte[] content = value.getRecord().getContent(); - CRC64 crc = new CRC64(); + final byte[] content = value.getRecord().getContent(); + final CRC64 crc = new CRC64(); crc.update(content); - long crc64 = crc.getValue(); + final long crc64 = crc.getValue(); // Store to HBase - Put put = new Put(rowKey); - put.addColumn(CONTENT_FAMILY_NAME, CONTENT_QUALIFIER, now, content); - put.addColumn(INFO_FAMILY_NAME, CONTENT_LENGTH_QUALIFIER, now, + final long ts = getCurrentTime(); + final Put put = new Put(rowKey); + put.addColumn(CONTENT_FAMILY_NAME, CONTENT_QUALIFIER, ts, content); + put.addColumn(INFO_FAMILY_NAME, CONTENT_LENGTH_QUALIFIER, ts, Bytes.toBytes(content.length)); - put.addColumn(INFO_FAMILY_NAME, CONTENT_TYPE_QUALIFIER, now, + put.addColumn(INFO_FAMILY_NAME, CONTENT_TYPE_QUALIFIER, ts, Bytes.toBytes(contentType)); - put.addColumn(INFO_FAMILY_NAME, CRC_QUALIFIER, now, Bytes.toBytes(crc64)); - put.addColumn(INFO_FAMILY_NAME, RECORD_ID_QUALIFIER, now, Bytes.toBytes(recordID)); - put.addColumn(INFO_FAMILY_NAME, TARGET_URI_QUALIFIER, now, Bytes.toBytes(targetURI)); - put.addColumn(INFO_FAMILY_NAME, DATE_QUALIFIER, now, + put.addColumn(INFO_FAMILY_NAME, CRC_QUALIFIER, ts, Bytes.toBytes(crc64)); + put.addColumn(INFO_FAMILY_NAME, RECORD_ID_QUALIFIER, ts, Bytes.toBytes(recordID)); + put.addColumn(INFO_FAMILY_NAME, TARGET_URI_QUALIFIER, ts, Bytes.toBytes(targetURI)); + put.addColumn(INFO_FAMILY_NAME, DATE_QUALIFIER, ts, Bytes.toBytes(warcHeader.getDateString())); - String ipAddr = warcHeader.getField("WARC-IP-Address"); + final String ipAddr = warcHeader.getField("WARC-IP-Address"); if (ipAddr != null) { - put.addColumn(INFO_FAMILY_NAME, IP_ADDRESS_QUALIFIER, now, Bytes.toBytes(ipAddr)); + put.addColumn(INFO_FAMILY_NAME, IP_ADDRESS_QUALIFIER, ts, Bytes.toBytes(ipAddr)); } - table.put(put); + mutator.mutate(put); // Write records out for later verification, one per HBase field except for the // content record, which will be verified by CRC64. - output.write(new HBaseKeyWritable(rowKey, INFO_FAMILY_NAME, CRC_QUALIFIER, now), + output.write(new HBaseKeyWritable(rowKey, INFO_FAMILY_NAME, CRC_QUALIFIER, ts), new BytesWritable(Bytes.toBytes(crc64))); output.write(new HBaseKeyWritable(rowKey, INFO_FAMILY_NAME, CONTENT_LENGTH_QUALIFIER, - now), new BytesWritable(Bytes.toBytes(content.length))); + ts), new BytesWritable(Bytes.toBytes(content.length))); output.write(new HBaseKeyWritable(rowKey, INFO_FAMILY_NAME, CONTENT_TYPE_QUALIFIER, - now), new BytesWritable(Bytes.toBytes(contentType))); + ts), new BytesWritable(Bytes.toBytes(contentType))); output.write(new HBaseKeyWritable(rowKey, INFO_FAMILY_NAME, RECORD_ID_QUALIFIER, - now), new BytesWritable(Bytes.toBytes(recordID))); + ts), new BytesWritable(Bytes.toBytes(recordID))); output.write(new HBaseKeyWritable(rowKey, INFO_FAMILY_NAME, TARGET_URI_QUALIFIER, - now), new BytesWritable(Bytes.toBytes(targetURI))); - output.write(new HBaseKeyWritable(rowKey, INFO_FAMILY_NAME, DATE_QUALIFIER, now), + ts), new BytesWritable(Bytes.toBytes(targetURI))); + output.write(new HBaseKeyWritable(rowKey, INFO_FAMILY_NAME, DATE_QUALIFIER, ts), new BytesWritable(Bytes.toBytes(warcHeader.getDateString()))); if (ipAddr != null) { output.write(new HBaseKeyWritable(rowKey, INFO_FAMILY_NAME, IP_ADDRESS_QUALIFIER, - now), new BytesWritable(Bytes.toBytes(ipAddr))); + ts), new BytesWritable(Bytes.toBytes(ipAddr))); } } } } - private byte[] rowKeyFromTargetURI(String targetUri) + private byte[] rowKeyFromTargetURI(final String targetUri) throws URISyntaxException, IllegalArgumentException { - URI uri = new URI(targetUri); - StringBuffer sb = new StringBuffer(); + final URI uri = new URI(targetUri); // Ignore the scheme // Reverse the components of the hostname + String reversedHost; if (uri.getHost() != null) { - String[] hostComponents = uri.getHost().split("\\."); + final StringBuilder sb = new StringBuilder(); + final String[] hostComponents = uri.getHost().split("\\."); for (int i = hostComponents.length - 1; i >= 0; i--) { sb.append(hostComponents[i]); if (i != 0) { sb.append('.'); } } + reversedHost = sb.toString(); } else { throw new IllegalArgumentException("URI is missing host component"); } - // Port - if (uri.getPort() != -1) { + final StringBuilder sb = new StringBuilder(); + sb.append(reversedHost); + if (uri.getPort() >= 0) { sb.append(':'); sb.append(uri.getPort()); } - if (uri.getRawPath() != null) { - sb.append(uri.getRawPath()); + if (uri.getPath() != null) { + sb.append('/'); + sb.append(uri.getPath()); } - if (uri.getRawQuery() != null) { + if (uri.getQuery() != null) { sb.append('?'); - sb.append(uri.getRawQuery()); + sb.append(uri.getQuery()); } - if (uri.getRawFragment() != null) { + if (uri.getFragment() != null) { sb.append('#'); - sb.append(uri.getRawFragment()); + sb.append(uri.getFragment()); } - // Constrain the key size to the maximum allowed row key length - if (sb.length() > HConstants.MAX_ROW_LENGTH) { - sb.setLength(HConstants.MAX_ROW_LENGTH); + if (sb.length() > HConstants.MAX_ROW_LENGTH) { + throw new IllegalArgumentException("Key would be too large (length=" + sb.length() + + ", limit=" + HConstants.MAX_ROW_LENGTH); } return Bytes.toBytes(sb.toString()); } @@ -693,7 +700,7 @@ private byte[] rowKeyFromTargetURI(String targetUri) public static class OneFilePerMapperSFIF extends SequenceFileInputFormat { @Override - protected boolean isSplitable(JobContext context, Path filename) { + protected boolean isSplitable(final JobContext context, final Path filename) { return false; } } @@ -703,7 +710,8 @@ public static class Verify extends Configured implements Tool { public static final Logger LOG = LoggerFactory.getLogger(Verify.class); public static final String USAGE = "Verify "; - int run(Path inputDir) throws IOException, ClassNotFoundException, InterruptedException { + int run(final Path inputDir) + throws IOException, ClassNotFoundException, InterruptedException { Job job = Job.getInstance(getConf()); job.setJobName(Verify.class.getName()); job.setJarByClass(getClass()); @@ -718,10 +726,18 @@ int run(Path inputDir) throws IOException, ClassNotFoundException, InterruptedEx if (!success) { LOG.error("Failure during job " + job.getJobID()); } - Counters counters = job.getCounters(); + final Counters counters = job.getCounters(); for (Counts c: Counts.values()) { LOG.info(c + ": " + counters.findCounter(c).getValue()); } + if (counters.findCounter(Counts.UNREFERENCED).getValue() > 0) { + LOG.error("Nonzero UNREFERENCED count from job " + job.getJobID()); + success = false; + } + if (counters.findCounter(Counts.CORRUPT).getValue() > 0) { + LOG.error("Nonzero CORRUPT count from job " + job.getJobID()); + success = false; + } return success ? 0 : 1; } @@ -742,98 +758,132 @@ public static void main(String[] args) throws Exception { public static class VerifyMapper extends Mapper { - Connection conn; - Table table; + private Connection conn; + private Table table; @Override - protected void setup(Context context) throws IOException, InterruptedException { + protected void setup(final Context context) throws IOException, InterruptedException { conn = ConnectionFactory.createConnection(context.getConfiguration()); table = conn.getTable(getTablename(conn.getConfiguration())); } @Override - protected void cleanup(Context context) throws IOException ,InterruptedException { - table.close(); - conn.close(); + protected void cleanup(final Context context) throws IOException, InterruptedException { + try { + table.close(); + } catch (Exception e) { + LOG.warn("Exception closing Table", e); + } + try { + conn.close(); + } catch (Exception e) { + LOG.warn("Exception closing Connection", e); + } } @Override - protected void map(HBaseKeyWritable key, BytesWritable value, Context output) - throws IOException, InterruptedException { - - byte[] row = Bytes.copy(key.getRowArray(), key.getRowOffset(), key.getRowLength()); - byte[] family = Bytes.copy(key.getFamilyArray(), key.getFamilyOffset(), + protected void map(final HBaseKeyWritable key, final BytesWritable value, + final Context output) throws IOException, InterruptedException { + final byte[] row = Bytes.copy(key.getRowArray(), key.getRowOffset(), key.getRowLength()); + final byte[] family = Bytes.copy(key.getFamilyArray(), key.getFamilyOffset(), key.getFamilyLength()); - byte[] qualifier = Bytes.copy(key.getQualifierArray(), key.getQualifierOffset(), + final byte[] qualifier = Bytes.copy(key.getQualifierArray(), key.getQualifierOffset(), key.getQualifierLength()); - long ts = key.getTimestamp(); + final long ts = key.getTimestamp(); - if (Bytes.equals(INFO_FAMILY_NAME, family) && - Bytes.equals(CRC_QUALIFIER, qualifier)) { + int retries = VERIFICATION_READ_RETRIES; + while (true) { - long expectedCRC64 = Bytes.toLong(value.getBytes(), 0, value.getLength()); + if (Bytes.equals(INFO_FAMILY_NAME, family) && + Bytes.equals(CRC_QUALIFIER, qualifier)) { - Result result = - table.get(new Get(row) + final long expectedCRC64 = Bytes.toLong(value.getBytes(), 0, value.getLength()); + final Result result = table.get(new Get(row) .addColumn(CONTENT_FAMILY_NAME, CONTENT_QUALIFIER) .addColumn(INFO_FAMILY_NAME, CRC_QUALIFIER) .setTimestamp(ts)); - - byte[] content = result.getValue(CONTENT_FAMILY_NAME, CONTENT_QUALIFIER); - if (content == null) { - LOG.info("Row " + Bytes.toStringBinary(row) + ": missing content"); - output.getCounter(Counts.UNREFERENCED).increment(1); - return; - } else { - CRC64 crc = new CRC64(); - crc.update(content); - if (crc.getValue() != expectedCRC64) { - LOG.info("Row " + Bytes.toStringBinary(row) + ": corrupt content"); + final byte[] content = result.getValue(CONTENT_FAMILY_NAME, CONTENT_QUALIFIER); + if (content == null) { + if (retries-- > 0) { + continue; + } + LOG.error("Row " + Bytes.toStringBinary(row) + ": missing content"); + output.getCounter(Counts.UNREFERENCED).increment(1); + return; + } else { + final CRC64 crc = new CRC64(); + crc.update(content); + if (crc.getValue() != expectedCRC64) { + LOG.error("Row " + Bytes.toStringBinary(row) + ": corrupt content"); + output.getCounter(Counts.CORRUPT).increment(1); + return; + } + } + final byte[] crc = result.getValue(INFO_FAMILY_NAME, CRC_QUALIFIER); + if (crc == null) { + if (retries-- > 0) { + continue; + } + LOG.error("Row " + Bytes.toStringBinary(row) + ": missing i:c"); + output.getCounter(Counts.UNREFERENCED).increment(1); + return; + } + if (Bytes.toLong(crc) != expectedCRC64) { + if (retries-- > 0) { + continue; + } + LOG.error("Row " + Bytes.toStringBinary(row) + ": i:c mismatch"); output.getCounter(Counts.CORRUPT).increment(1); return; } - } - byte[] crc = result.getValue(INFO_FAMILY_NAME, CRC_QUALIFIER); - if (crc == null) { - LOG.info("Row " + Bytes.toStringBinary(row) + ": missing i:c"); - output.getCounter(Counts.UNREFERENCED).increment(1); - return; - } - if (Bytes.toLong(crc) != expectedCRC64) { - LOG.info("Row " + Bytes.toStringBinary(row) + ": i:c mismatch"); - output.getCounter(Counts.CORRUPT).increment(1); - return; - } - } else { + } else { - Result result = - table.get(new Get(row) + final Result result = table.get(new Get(row) .addColumn(family, qualifier) .setTimestamp(ts)); + final byte[] bytes = result.getValue(family, qualifier); + if (bytes == null) { + if (retries-- > 0) { + continue; + } + LOG.error("Row " + Bytes.toStringBinary(row) + ": missing " + + Bytes.toStringBinary(family) + ":" + Bytes.toStringBinary(qualifier)); + output.getCounter(Counts.UNREFERENCED).increment(1); + return; + } + if (!Bytes.equals(bytes, 0, bytes.length, value.getBytes(), 0, value.getLength())) { + if (retries-- > 0) { + continue; + } + LOG.error("Row " + Bytes.toStringBinary(row) + ": " + + Bytes.toStringBinary(family) + ":" + Bytes.toStringBinary(qualifier) + + " mismatch"); + output.getCounter(Counts.CORRUPT).increment(1); + return; + } - byte[] bytes = result.getValue(family, qualifier); - if (bytes == null) { - LOG.info("Row " + Bytes.toStringBinary(row) + ": missing " + - Bytes.toStringBinary(family) + ":" + Bytes.toStringBinary(qualifier)); - output.getCounter(Counts.UNREFERENCED).increment(1); - return; - } - if (!Bytes.equals(bytes, 0, bytes.length, value.getBytes(), 0, value.getLength())) { - LOG.info("Row " + Bytes.toStringBinary(row) + ": " + - Bytes.toStringBinary(family) + ":" + Bytes.toStringBinary(qualifier) + - " mismatch"); - output.getCounter(Counts.CORRUPT).increment(1); - return; } - } + // If we fell through to here all verification checks have succeeded, potentially after + // retries, and we must exit the while loop. + output.getCounter(Counts.REFERENCED).increment(1); + break; - output.getCounter(Counts.REFERENCED).increment(1); + } } - } + } + + private static final AtomicLong counter = new AtomicLong(); + private static long getCurrentTime() { + // Typical hybrid logical clock scheme. + // Take the current time, shift by 16 bits and zero those bits, and replace those bits + // with the low 16 bits of the atomic counter. Mask off the high bit too because timestamps + // cannot be negative. + return ((EnvironmentEdgeManager.currentTime() << 16) & 0x7fff_ffff_ffff_0000L) | + (counter.getAndIncrement() & 0xffffL); } } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCounter.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCounter.java index 803b65e8f5b7..c244d8b7bd91 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCounter.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCounter.java @@ -39,7 +39,7 @@ import org.apache.hadoop.hbase.filter.RowFilter; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.io.IntWritable; +import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.Reducer; @@ -50,7 +50,6 @@ import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; - /** * A job with a a map and reduce phase to count cells in a table. * The counter lists the following stats for a given table: @@ -59,8 +58,11 @@ * 2. Total number of CFs across all rows * 3. Total qualifiers across all rows * 4. Total occurrence of each CF - * 5. Total occurrence of each qualifier + * 5. Total occurrence of each qualifier * 6. Total number of versions of each qualifier. + * 7. Total size of serialized cells of each CF. + * 8. Total size of serialized cells of each qualifier. + * 9. Total size of serialized cells across all rows. * * * The cellcounter can take optional parameters to use a user @@ -86,13 +88,14 @@ public class CellCounter extends Configured implements Tool { * Mapper that runs the count. */ static class CellCounterMapper - extends TableMapper { + extends TableMapper { /** * Counter enumeration to count the actual rows. */ public static enum Counters { ROWS, - CELLS + CELLS, + SIZE } private Configuration conf; @@ -143,34 +146,41 @@ public void map(ImmutableBytesWritable row, Result values, currentFamily = null; currentQualifier = null; context.getCounter(Counters.ROWS).increment(1); - context.write(new Text("Total ROWS"), new IntWritable(1)); + context.write(new Text("Total ROWS"), new LongWritable(1)); } if (!values.isEmpty()) { int cellCount = 0; for (Cell value : values.listCells()) { cellCount++; + long size = value.getSerializedSize(); if (currentFamily == null || !CellUtil.matchingFamily(value, currentFamily)) { currentFamily = CellUtil.cloneFamily(value); currentFamilyName = Bytes.toStringBinary(currentFamily); currentQualifier = null; context.getCounter("CF", currentFamilyName).increment(1); if (1 == context.getCounter("CF", currentFamilyName).getValue()) { - context.write(new Text("Total Families Across all Rows"), new IntWritable(1)); - context.write(new Text(currentFamily), new IntWritable(1)); + context.write(new Text("Total Families Across all Rows"), new LongWritable(1)); + context.write(new Text(currentFamily), new LongWritable(1)); } + context.getCounter(Counters.SIZE).increment(size); + context.write(new Text("Total SIZE"), new LongWritable(size)); + context.getCounter("CF", currentFamilyName + "_Size").increment(size); + context.write(new Text(currentFamilyName + "_Size"), new LongWritable(size)); } - if (currentQualifier == null || !CellUtil.matchingQualifier(value, currentQualifier)) { + if (currentQualifier == null || !CellUtil.matchingQualifier(value, currentQualifier)){ currentQualifier = CellUtil.cloneQualifier(value); currentQualifierName = currentFamilyName + separator + Bytes.toStringBinary(currentQualifier); currentRowQualifierName = currentRowKey + separator + currentQualifierName; context.write(new Text("Total Qualifiers across all Rows"), - new IntWritable(1)); - context.write(new Text(currentQualifierName), new IntWritable(1)); + new LongWritable(1)); + context.write(new Text(currentQualifierName), new LongWritable(1)); + context.getCounter("Q", currentQualifierName + "_Size").increment(size); + context.write(new Text(currentQualifierName + "_Size"), new LongWritable(size)); } // Increment versions - context.write(new Text(currentRowQualifierName + "_Versions"), new IntWritable(1)); + context.write(new Text(currentRowQualifierName + "_Versions"), new LongWritable(1)); } context.getCounter(Counters.CELLS).increment(cellCount); } @@ -181,20 +191,20 @@ public void map(ImmutableBytesWritable row, Result values, } } - static class IntSumReducer extends Reducer { + static class LongSumReducer extends Reducer { + + private LongWritable result = new LongWritable(); - private IntWritable result = new IntWritable(); - public void reduce(Key key, Iterable values, - Context context) - throws IOException, InterruptedException { - int sum = 0; - for (IntWritable val : values) { + public void reduce(Key key, Iterable values, Context context) + throws IOException, InterruptedException { + long sum = 0; + for (LongWritable val : values) { sum += val.get(); } result.set(sum); context.write(key, result); } + } /** @@ -216,14 +226,14 @@ public static Job createSubmittableJob(Configuration conf, String[] args) Scan scan = getConfiguredScanForJob(conf, args); TableMapReduceUtil.initTableMapperJob(tableName, scan, CellCounterMapper.class, ImmutableBytesWritable.class, Result.class, job); - job.setNumReduceTasks(1); job.setMapOutputKeyClass(Text.class); - job.setMapOutputValueClass(IntWritable.class); + job.setMapOutputValueClass(LongWritable.class); job.setOutputFormatClass(TextOutputFormat.class); job.setOutputKeyClass(Text.class); - job.setOutputValueClass(IntWritable.class); + job.setOutputValueClass(LongWritable.class); FileOutputFormat.setOutputPath(job, outputDir); - job.setReducerClass(IntSumReducer.class); + job.setReducerClass(LongSumReducer.class); + job.setCombinerClass(LongSumReducer.class); return job; } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileInputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileInputFormat.java index 1a9b655825c6..03254feec042 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileInputFormat.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileInputFormat.java @@ -91,7 +91,7 @@ public void initialize(InputSplit split, TaskAttemptContext context) // The file info must be loaded before the scanner can be used. // This seems like a bug in HBase, but it's easily worked around. - this.scanner = in.getScanner(false, false); + this.scanner = in.getScanner(conf, false, false); } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java index f45eb4f4fc9a..f976684d1252 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java @@ -485,7 +485,7 @@ public void testWritingPEData() throws Exception { LocatedFileStatus keyFileStatus = iterator.next(); HFile.Reader reader = HFile.createReader(fs, keyFileStatus.getPath(), new CacheConfig(conf), true, conf); - HFileScanner scanner = reader.getScanner(false, false, false); + HFileScanner scanner = reader.getScanner(conf, false, false, false); kvCount += reader.getEntries(); scanner.seekTo(); @@ -534,7 +534,7 @@ public void test_WritingTagData() LocatedFileStatus keyFileStatus = iterator.next(); HFile.Reader reader = HFile.createReader(fs, keyFileStatus.getPath(), new CacheConfig(conf), true, conf); - HFileScanner scanner = reader.getScanner(false, false, false); + HFileScanner scanner = reader.getScanner(conf, false, false, false); scanner.seekTo(); Cell cell = scanner.getCell(); List tagsFromCell = PrivateCellUtil.getTags(cell); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithVisibilityLabels.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithVisibilityLabels.java index 049ec0ad4f14..910f4f6836c8 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithVisibilityLabels.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithVisibilityLabels.java @@ -485,7 +485,7 @@ private static void validateTable(Configuration conf, TableName tableName, Strin private static int getKVCountFromHfile(FileSystem fs, Path p) throws IOException { Configuration conf = util.getConfiguration(); HFile.Reader reader = HFile.createReader(fs, p, new CacheConfig(conf), true, conf); - HFileScanner scanner = reader.getScanner(false, false); + HFileScanner scanner = reader.getScanner(conf, false, false); scanner.seekTo(); int count = 0; do { diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsv.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsv.java index bf4c868475c2..a3427f2a5ec6 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsv.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsv.java @@ -561,7 +561,7 @@ private static void validateHFiles(FileSystem fs, String outputPath, String fami private static int getKVCountFromHfile(FileSystem fs, Path p) throws IOException { Configuration conf = util.getConfiguration(); HFile.Reader reader = HFile.createReader(fs, p, new CacheConfig(conf), true, conf); - HFileScanner scanner = reader.getScanner(false, false); + HFileScanner scanner = reader.getScanner(conf, false, false); scanner.seekTo(); int count = 0; do { diff --git a/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto b/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto index 4f1a3cf1635f..01121699880f 100644 --- a/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto +++ b/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto @@ -645,3 +645,13 @@ enum ClaimReplicationQueuesState { CLAIM_REPLICATION_QUEUES_DISPATCH = 1; CLAIM_REPLICATION_QUEUES_FINISH = 2; } + +enum ModifyTableDescriptorState { + MODIFY_TABLE_DESCRIPTOR_PREPARE = 1; + MODIFY_TABLE_DESCRIPTOR_UPDATE = 2; +} + +message ModifyTableDescriptorStateData { + required TableSchema unmodified_table_schema = 1; + optional TableSchema modified_table_schema = 2; +} \ No newline at end of file diff --git a/hbase-resource-bundle/src/main/resources/supplemental-models.xml b/hbase-resource-bundle/src/main/resources/supplemental-models.xml index 1d9ddd41e8c8..87be14e2d611 100644 --- a/hbase-resource-bundle/src/main/resources/supplemental-models.xml +++ b/hbase-resource-bundle/src/main/resources/supplemental-models.xml @@ -21,7 +21,7 @@ under the License. xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/supplemental-model/1.0.0 http://maven.apache.org/xsd/supplemental-model-1.0.0.xsd"> @@ -1452,7 +1452,7 @@ Copyright (c) 2010 Oracle and/or its affiliates. - + javax.servlet servlet-api @@ -2251,4 +2251,45 @@ Copyright (c) 2007-2017 The JRuby project + + + dnsjava + dnsjava + 3.4.0 + dnsjava + + + BSD 2-Clause License + https://opensource.org/licenses/BSD-2-Clause + repo + + Copyright (c) 1998-2011, Brian Wellington. + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + + + + + diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java index ed0e84deace6..2e6c19edfca2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java @@ -397,7 +397,8 @@ public interface Reader extends Closeable, CachingBlockReader { CellComparator getComparator(); - HFileScanner getScanner(boolean cacheBlocks, final boolean pread, final boolean isCompaction); + HFileScanner getScanner(Configuration conf, boolean cacheBlocks, boolean pread, + boolean isCompaction); HFileBlock getMetaBlock(String metaBlockName, boolean cacheBlock) throws IOException; @@ -425,7 +426,7 @@ public interface Reader extends Closeable, CachingBlockReader { void setMetaBlockIndexReader(HFileBlockIndex.ByteArrayKeyBlockIndexReader reader); HFileBlockIndex.ByteArrayKeyBlockIndexReader getMetaBlockIndexReader(); - HFileScanner getScanner(boolean cacheBlocks, boolean pread); + HFileScanner getScanner(Configuration conf, boolean cacheBlocks, boolean pread); /** * Retrieves general Bloom filter metadata as appropriate for each diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java index 0ad48295e8f6..fd6cea1b0a19 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java @@ -28,6 +28,8 @@ import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; + +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.hbase.Cell; @@ -836,12 +838,13 @@ EncodingState getEncodingState() { /** * @param dataBlockEncoder data block encoding algorithm to use */ - public Writer(HFileDataBlockEncoder dataBlockEncoder, HFileContext fileContext) { - this(dataBlockEncoder, fileContext, ByteBuffAllocator.HEAP); + public Writer(Configuration conf, HFileDataBlockEncoder dataBlockEncoder, + HFileContext fileContext) { + this(conf, dataBlockEncoder, fileContext, ByteBuffAllocator.HEAP); } - public Writer(HFileDataBlockEncoder dataBlockEncoder, HFileContext fileContext, - ByteBuffAllocator allocator) { + public Writer(Configuration conf, HFileDataBlockEncoder dataBlockEncoder, + HFileContext fileContext, ByteBuffAllocator allocator) { if (fileContext.getBytesPerChecksum() < HConstants.HFILEBLOCK_HEADER_SIZE) { throw new RuntimeException("Unsupported value of bytesPerChecksum. " + " Minimum is " + HConstants.HFILEBLOCK_HEADER_SIZE + " but the configured value is " + @@ -850,11 +853,11 @@ public Writer(HFileDataBlockEncoder dataBlockEncoder, HFileContext fileContext, this.allocator = allocator; this.dataBlockEncoder = dataBlockEncoder != null? dataBlockEncoder: NoOpDataBlockEncoder.INSTANCE; - this.dataBlockEncodingCtx = this.dataBlockEncoder. - newDataBlockEncodingContext(HConstants.HFILEBLOCK_DUMMY_HEADER, fileContext); - // TODO: This should be lazily instantiated since we usually do NOT need this default encoder - this.defaultBlockEncodingCtx = new HFileBlockDefaultEncodingContext(null, - HConstants.HFILEBLOCK_DUMMY_HEADER, fileContext); + this.dataBlockEncodingCtx = this.dataBlockEncoder.newDataBlockEncodingContext(conf, + HConstants.HFILEBLOCK_DUMMY_HEADER, fileContext); + // TODO: This should be lazily instantiated + this.defaultBlockEncodingCtx = new HFileBlockDefaultEncodingContext(conf, null, + HConstants.HFILEBLOCK_DUMMY_HEADER, fileContext); // TODO: Set BAOS initial size. Use fileContext.getBlocksize() and add for header/checksum baosInMemory = new ByteArrayOutputStream(); prevOffsetByType = new long[BlockType.values().length]; @@ -1344,7 +1347,7 @@ HFileBlock readBlockData(long offset, long onDiskSize, boolean pread, boolean up HFileBlockDecodingContext getDefaultBlockDecodingContext(); void setIncludesMemStoreTS(boolean includesMemstoreTS); - void setDataBlockEncoder(HFileDataBlockEncoder encoder); + void setDataBlockEncoder(HFileDataBlockEncoder encoder, Configuration conf); /** * To close the stream's socket. Note: This can be concurrently called from multiple threads and @@ -1412,7 +1415,7 @@ static class FSReaderImpl implements FSReader { private final Lock streamLock = new ReentrantLock(); FSReaderImpl(ReaderContext readerContext, HFileContext fileContext, - ByteBuffAllocator allocator) throws IOException { + ByteBuffAllocator allocator, Configuration conf) throws IOException { this.fileSize = readerContext.getFileSize(); this.hfs = readerContext.getFileSystem(); if (readerContext.getFilePath() != null) { @@ -1425,7 +1428,7 @@ static class FSReaderImpl implements FSReader { this.streamWrapper = readerContext.getInputStreamWrapper(); // Older versions of HBase didn't support checksum. this.streamWrapper.prepareForBlockReader(!fileContext.isUseHBaseChecksum()); - defaultDecodingCtx = new HFileBlockDefaultDecodingContext(fileContext); + defaultDecodingCtx = new HFileBlockDefaultDecodingContext(conf, fileContext); encodedBlockDecodingCtx = defaultDecodingCtx; } @@ -1789,8 +1792,8 @@ public void setIncludesMemStoreTS(boolean includesMemstoreTS) { } @Override - public void setDataBlockEncoder(HFileDataBlockEncoder encoder) { - encodedBlockDecodingCtx = encoder.newDataBlockDecodingContext(this.fileContext); + public void setDataBlockEncoder(HFileDataBlockEncoder encoder, Configuration conf) { + encodedBlockDecodingCtx = encoder.newDataBlockDecodingContext(conf, fileContext); } @Override @@ -2029,7 +2032,7 @@ static private byte[] getDummyHeaderForVersion(boolean usesHBaseChecksum) { * @return This HFileBlocks fileContext which will a derivative of the * fileContext for the file from which this block's data was originally read. */ - HFileContext getHFileContext() { + public HFileContext getHFileContext() { return this.fileContext; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoder.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoder.java index 3c118da258bc..6a1611de8dc3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoder.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoder.java @@ -18,13 +18,13 @@ import java.io.DataOutputStream; import java.io.IOException; - -import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; import org.apache.hadoop.hbase.io.encoding.HFileBlockDecodingContext; import org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; /** * Controls what kind of data block encoding is used. If data block encoding is @@ -97,11 +97,12 @@ void saveMetadata(HFile.Writer writer) * encoding context should also perform compression if compressionAlgorithm is * valid. * + * @param conf store configuration * @param headerBytes header bytes * @param fileContext HFile meta data * @return a new {@link HFileBlockEncodingContext} object */ - HFileBlockEncodingContext newDataBlockEncodingContext(byte[] headerBytes, + HFileBlockEncodingContext newDataBlockEncodingContext(Configuration conf, byte[] headerBytes, HFileContext fileContext); /** @@ -109,8 +110,10 @@ HFileBlockEncodingContext newDataBlockEncodingContext(byte[] headerBytes, * decoding context should also do decompression if compressionAlgorithm * is valid. * + * @param conf store configuration * @param fileContext - HFile meta data * @return a new {@link HFileBlockDecodingContext} object */ - HFileBlockDecodingContext newDataBlockDecodingContext(HFileContext fileContext); + HFileBlockDecodingContext newDataBlockDecodingContext(Configuration conf, + HFileContext fileContext); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoderImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoderImpl.java index 462064f7b895..d2ce77245c9c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoderImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoderImpl.java @@ -19,6 +19,7 @@ import java.io.DataOutputStream; import java.io.IOException; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoder; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; @@ -108,22 +109,23 @@ public String toString() { } @Override - public HFileBlockEncodingContext newDataBlockEncodingContext( + public HFileBlockEncodingContext newDataBlockEncodingContext(Configuration conf, byte[] dummyHeader, HFileContext fileContext) { DataBlockEncoder encoder = encoding.getEncoder(); if (encoder != null) { - return encoder.newDataBlockEncodingContext(encoding, dummyHeader, fileContext); + return encoder.newDataBlockEncodingContext(conf, encoding, dummyHeader, fileContext); } - return new HFileBlockDefaultEncodingContext(null, dummyHeader, fileContext); + return new HFileBlockDefaultEncodingContext(conf, null, dummyHeader, fileContext); } @Override - public HFileBlockDecodingContext newDataBlockDecodingContext(HFileContext fileContext) { + public HFileBlockDecodingContext newDataBlockDecodingContext(Configuration conf, + HFileContext fileContext) { DataBlockEncoder encoder = encoding.getEncoder(); if (encoder != null) { - return encoder.newDataBlockDecodingContext(fileContext); + return encoder.newDataBlockDecodingContext(conf, fileContext); } - return new HFileBlockDefaultDecodingContext(fileContext); + return new HFileBlockDefaultDecodingContext(conf, fileContext); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java index 02efa8e89863..c24d8be7c035 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java @@ -320,7 +320,7 @@ public int processFile(Path file, boolean checkRootDir) throws IOException { if (verbose || printKey || checkRow || checkFamily || printStats || checkMobIntegrity) { // scan over file and read key/value's and check if requested - HFileScanner scanner = reader.getScanner(false, false, false); + HFileScanner scanner = reader.getScanner(getConf(), false, false, false); fileStats = new KeyValueStatsCollector(); boolean shouldScanKeysValues; if (this.isSeekToRow && !Bytes.equals(row, reader.getFirstRowKey().orElse(null))) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java index 0b8d63080a71..c7a71584327c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java @@ -141,9 +141,9 @@ public HFileReaderImpl(ReaderContext context, HFileInfo fileInfo, CacheConfig ca this.trailer = fileInfo.getTrailer(); this.hfileContext = fileInfo.getHFileContext(); this.fsBlockReader = new HFileBlock.FSReaderImpl(context, hfileContext, - cacheConf.getByteBuffAllocator()); + cacheConf.getByteBuffAllocator(), conf); this.dataBlockEncoder = HFileDataBlockEncoderImpl.createFromFileInfo(fileInfo); - fsBlockReader.setDataBlockEncoder(dataBlockEncoder); + fsBlockReader.setDataBlockEncoder(dataBlockEncoder, conf); dataBlockIndexReader = fileInfo.getDataBlockIndexReader(); metaBlockIndexReader = fileInfo.getMetaBlockIndexReader(); } @@ -256,7 +256,7 @@ public String getName() { @Override public void setDataBlockEncoder(HFileDataBlockEncoder dataBlockEncoder) { this.dataBlockEncoder = dataBlockEncoder; - this.fsBlockReader.setDataBlockEncoder(dataBlockEncoder); + this.fsBlockReader.setDataBlockEncoder(dataBlockEncoder, conf); } @Override @@ -1445,11 +1445,11 @@ protected static class EncodedScanner extends HFileScannerImpl { private final DataBlockEncoder dataBlockEncoder; public EncodedScanner(HFile.Reader reader, boolean cacheBlocks, - boolean pread, boolean isCompaction, HFileContext meta) { + boolean pread, boolean isCompaction, HFileContext meta, Configuration conf) { super(reader, cacheBlocks, pread, isCompaction); DataBlockEncoding encoding = reader.getDataBlockEncoding(); dataBlockEncoder = encoding.getEncoder(); - decodingCtx = dataBlockEncoder.newDataBlockDecodingContext(meta); + decodingCtx = dataBlockEncoder.newDataBlockDecodingContext(conf, meta); seeker = dataBlockEncoder.createSeeker(decodingCtx); } @@ -1637,16 +1637,17 @@ public boolean prefetchComplete() { * {@link HFileScanner#seekTo(Cell)} to position an start the read. There is * nothing to clean up in a Scanner. Letting go of your references to the * scanner is sufficient. NOTE: Do not use this overload of getScanner for - * compactions. See {@link #getScanner(boolean, boolean, boolean)} + * compactions. See {@link #getScanner(Configuration, boolean, boolean, boolean)} * + * @param conf Store configuration. * @param cacheBlocks True if we should cache blocks read in by this scanner. * @param pread Use positional read rather than seek+read if true (pread is * better for random reads, seek+read is better scanning). * @return Scanner on this file. */ @Override - public HFileScanner getScanner(boolean cacheBlocks, final boolean pread) { - return getScanner(cacheBlocks, pread, false); + public HFileScanner getScanner(Configuration conf, boolean cacheBlocks, final boolean pread) { + return getScanner(conf, cacheBlocks, pread, false); } /** @@ -1654,6 +1655,8 @@ public HFileScanner getScanner(boolean cacheBlocks, final boolean pread) { * {@link HFileScanner#seekTo(Cell)} to position an start the read. There is * nothing to clean up in a Scanner. Letting go of your references to the * scanner is sufficient. + * @param conf + * Store configuration. * @param cacheBlocks * True if we should cache blocks read in by this scanner. * @param pread @@ -1664,10 +1667,10 @@ public HFileScanner getScanner(boolean cacheBlocks, final boolean pread) { * @return Scanner on this file. */ @Override - public HFileScanner getScanner(boolean cacheBlocks, final boolean pread, + public HFileScanner getScanner(Configuration conf, boolean cacheBlocks, final boolean pread, final boolean isCompaction) { if (dataBlockEncoder.useEncodedScanner()) { - return new EncodedScanner(this, cacheBlocks, pread, isCompaction, this.hfileContext); + return new EncodedScanner(this, cacheBlocks, pread, isCompaction, this.hfileContext, conf); } return new HFileScannerImpl(this, cacheBlocks, pread, isCompaction); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java index 39cd8ebb534a..4275c368aa97 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java @@ -302,7 +302,7 @@ protected void finishInit(final Configuration conf) { if (blockWriter != null) { throw new IllegalStateException("finishInit called twice"); } - blockWriter = new HFileBlock.Writer(blockEncoder, hFileContext, + blockWriter = new HFileBlock.Writer(conf, blockEncoder, hFileContext, cacheConf.getByteBuffAllocator()); // Data block index writer boolean cacheIndexesOnWrite = cacheConf.shouldCacheIndexesOnWrite(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/NoOpDataBlockEncoder.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/NoOpDataBlockEncoder.java index 467480f68d04..c519d9fd8095 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/NoOpDataBlockEncoder.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/NoOpDataBlockEncoder.java @@ -19,6 +19,7 @@ import java.io.DataOutputStream; import java.io.IOException; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; @@ -81,14 +82,15 @@ public String toString() { } @Override - public HFileBlockEncodingContext newDataBlockEncodingContext( + public HFileBlockEncodingContext newDataBlockEncodingContext(Configuration conf, byte[] dummyHeader, HFileContext meta) { - return new HFileBlockDefaultEncodingContext(null, dummyHeader, meta); + return new HFileBlockDefaultEncodingContext(conf, null, dummyHeader, meta); } @Override - public HFileBlockDecodingContext newDataBlockDecodingContext(HFileContext meta) { - return new HFileBlockDefaultDecodingContext(meta); + public HFileBlockDecodingContext newDataBlockDecodingContext(Configuration conf, + HFileContext meta) { + return new HFileBlockDefaultDecodingContext(conf, meta); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java index 00c813836965..25859d4a7169 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java @@ -67,9 +67,11 @@ import org.apache.hadoop.hbase.io.hfile.Cacheable; import org.apache.hadoop.hbase.io.hfile.CachedBlock; import org.apache.hadoop.hbase.io.hfile.HFileBlock; +import org.apache.hadoop.hbase.io.hfile.HFileContext; import org.apache.hadoop.hbase.nio.ByteBuff; import org.apache.hadoop.hbase.nio.RefCnt; import org.apache.hadoop.hbase.protobuf.ProtobufMagic; +import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.IdReadWriteLock; import org.apache.hadoop.hbase.util.IdReadWriteLockStrongRef; @@ -249,6 +251,10 @@ public class BucketCache implements BlockCache, HeapSize { * */ private String algorithm; + /* Tracing failed Bucket Cache allocations. */ + private long allocFailLogPrevTs; // time of previous log event for allocation failure. + private static final int ALLOCATION_FAIL_LOG_TIME_PERIOD = 60000; // Default 1 minute. + public BucketCache(String ioEngineName, long capacity, int blockSize, int[] bucketSizes, int writerThreadNum, int writerQLen, String persistencePath) throws IOException { this(ioEngineName, capacity, blockSize, bucketSizes, writerThreadNum, writerQLen, @@ -291,6 +297,8 @@ public BucketCache(String ioEngineName, long capacity, int blockSize, int[] buck this.blockSize = blockSize; this.ioErrorsTolerationDuration = ioErrorsTolerationDuration; + this.allocFailLogPrevTs = 0; + bucketAllocator = new BucketAllocator(capacity, bucketSizes); for (int i = 0; i < writerThreads.length; ++i) { writerQueues.add(new ArrayBlockingQueue<>(writerQLen)); @@ -727,7 +735,8 @@ public void logStats() { (StringUtils.formatPercent(cacheStats.getHitCachingRatio(), 2)+ ", ")) + "evictions=" + cacheStats.getEvictionCount() + ", " + "evicted=" + cacheStats.getEvictedCount() + ", " + - "evictedPerRun=" + cacheStats.evictedPerEviction()); + "evictedPerRun=" + cacheStats.evictedPerEviction() + ", " + + "allocationFailCount=" + cacheStats.getAllocationFailCount()); cacheStats.reset(); } @@ -995,6 +1004,41 @@ protected void putIntoBackingMap(BlockCacheKey key, BucketEntry bucketEntry) { } } + /** + * Prepare and return a warning message for Bucket Allocator Exception + * @param fle The exception + * @param re The RAMQueueEntry for which the exception was thrown. + * @return A warning message created from the input RAMQueueEntry object. + */ + private static String getAllocationFailWarningMessage(final BucketAllocatorException fle, + final RAMQueueEntry re) { + final StringBuilder sb = new StringBuilder(); + sb.append("Most recent failed allocation after "); + sb.append(ALLOCATION_FAIL_LOG_TIME_PERIOD); + sb.append(" ms;"); + if (re != null) { + if (re.getData() instanceof HFileBlock) { + final HFileContext fileContext = ((HFileBlock) re.getData()).getHFileContext(); + final String columnFamily = Bytes.toString(fileContext.getColumnFamily()); + final String tableName = Bytes.toString(fileContext.getTableName()); + if (tableName != null && columnFamily != null) { + sb.append(" Table: "); + sb.append(tableName); + sb.append(" CF: "); + sb.append(columnFamily); + sb.append(" HFile: "); + sb.append(fileContext.getHFileName()); + } + } else { + sb.append(" HFile: "); + sb.append(re.getKey()); + } + } + sb.append(" Message: "); + sb.append(fle.getMessage()); + return sb.toString(); + } + /** * Flush the entries in ramCache to IOEngine and add bucket entry to backingMap. Process all that * are passed in even if failure being sure to remove from ramCache else we'll never undo the @@ -1040,7 +1084,12 @@ void doDrain(final List entries) throws InterruptedException { } index++; } catch (BucketAllocatorException fle) { - LOG.warn("Failed allocation for " + (re == null ? "" : re.getKey()) + "; " + fle); + long currTs = EnvironmentEdgeManager.currentTime(); + cacheStats.allocationFailed(); // Record the warning. + if (allocFailLogPrevTs == 0 || (currTs - allocFailLogPrevTs) > ALLOCATION_FAIL_LOG_TIME_PERIOD) { + LOG.warn(getAllocationFailWarningMessage(fle, re)); + allocFailLogPrevTs = currTs; + } // Presume can't add. Too big? Move index on. Entry will be cleared from ramCache below. bucketEntries[index] = null; index++; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCacheStats.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCacheStats.java index 49b9bbaedb78..4a2b0a13590d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCacheStats.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCacheStats.java @@ -35,14 +35,20 @@ public class BucketCacheStats extends CacheStats { private static final long NANO_TIME = TimeUnit.MILLISECONDS.toNanos(1); private long lastLogTime = EnvironmentEdgeManager.currentTime(); + /* Tracing failed Bucket Cache allocations. */ + private LongAdder allocationFailCount = new LongAdder(); + BucketCacheStats() { super("BucketCache"); + + allocationFailCount.reset(); } @Override public String toString() { return super.toString() + ", ioHitsPerSecond=" + getIOHitsPerSecond() + - ", ioTimePerHit=" + getIOTimePerHit(); + ", ioTimePerHit=" + getIOTimePerHit() + ", allocationFailCount=" + + getAllocationFailCount(); } public void ioHit(long time) { @@ -66,5 +72,14 @@ public double getIOTimePerHit() { public void reset() { ioHitCount.reset(); ioHitTime.reset(); + allocationFailCount.reset(); + } + + public long getAllocationFailCount() { + return allocationFailCount.sum(); + } + + public void allocationFailed () { + allocationFailCount.increment(); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java index b6293e28a573..4ebc9fa5325a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java @@ -372,6 +372,7 @@ public void saslReadAndProcess(ByteBuff saslToken) throws IOException, replyToken = saslServer.evaluateResponse(saslToken.hasArray()? saslToken.array() : saslToken.toBytes()); } catch (IOException e) { + RpcServer.LOG.debug("Failed to execute SASL handshake", e); IOException sendToClient = e; Throwable cause = e; while (cause != null) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index b822921cbaa1..ad6969ba9abf 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -367,7 +367,10 @@ public class HMaster extends HBaseServerBase implements Maste private HbckChore hbckChore; CatalogJanitor catalogJanitorChore; - private DirScanPool cleanerPool; + // Threadpool for scanning the archive directory, used by the HFileCleaner + private DirScanPool hfileCleanerPool; + // Threadpool for scanning the Old logs directory, used by the LogCleaner + private DirScanPool logCleanerPool; private LogCleaner logCleaner; private HFileCleaner hfileCleaner; private ReplicationBarrierCleaner replicationBarrierCleaner; @@ -1131,7 +1134,8 @@ private void finishActiveMasterInitialization(MonitoredTask status) throws IOExc (EnvironmentEdgeManager.currentTime() - masterActiveTime) / 1000.0f)); this.masterFinishedInitializationTime = EnvironmentEdgeManager.currentTime(); configurationManager.registerObserver(this.balancer); - configurationManager.registerObserver(this.cleanerPool); + configurationManager.registerObserver(this.hfileCleanerPool); + configurationManager.registerObserver(this.logCleanerPool); configurationManager.registerObserver(this.hfileCleaner); configurationManager.registerObserver(this.logCleaner); configurationManager.registerObserver(this.regionsRecoveryConfigManager); @@ -1493,21 +1497,24 @@ private void startServiceThreads() throws IOException { ExecutorType.MASTER_TABLE_OPERATIONS).setCorePoolSize(1)); startProcedureExecutor(); - // Create cleaner thread pool - cleanerPool = new DirScanPool(conf); + // Create log cleaner thread pool + logCleanerPool = DirScanPool.getLogCleanerScanPool(conf); Map params = new HashMap<>(); params.put(MASTER, this); // Start log cleaner thread int cleanerInterval = conf.getInt(HBASE_MASTER_CLEANER_INTERVAL, DEFAULT_HBASE_MASTER_CLEANER_INTERVAL); this.logCleaner = new LogCleaner(cleanerInterval, this, conf, - getMasterWalManager().getFileSystem(), getMasterWalManager().getOldLogDir(), cleanerPool, params); + getMasterWalManager().getFileSystem(), getMasterWalManager().getOldLogDir(), + logCleanerPool, params); getChoreService().scheduleChore(logCleaner); // start the hfile archive cleaner thread Path archiveDir = HFileArchiveUtil.getArchivePath(conf); + // Create archive cleaner thread pool + hfileCleanerPool = DirScanPool.getHFileCleanerScanPool(conf); this.hfileCleaner = new HFileCleaner(cleanerInterval, this, conf, - getMasterFileSystem().getFileSystem(), archiveDir, cleanerPool, params); + getMasterFileSystem().getFileSystem(), archiveDir, hfileCleanerPool, params); getChoreService().scheduleChore(hfileCleaner); // Regions Reopen based on very high storeFileRefCount is considered enabled @@ -1557,9 +1564,13 @@ protected void stopServiceThreads() { } stopChoreService(); stopExecutorService(); - if (cleanerPool != null) { - cleanerPool.shutdownNow(); - cleanerPool = null; + if (hfileCleanerPool != null) { + hfileCleanerPool.shutdownNow(); + hfileCleanerPool = null; + } + if (logCleanerPool != null) { + logCleanerPool.shutdownNow(); + logCleanerPool = null; } if (maintenanceRegionServer != null) { maintenanceRegionServer.getRegionServer().stop(HBASE_MASTER_CLEANER_INTERVAL); @@ -1973,7 +1984,7 @@ public List executeRegionPlansWithThrottling(List plans) } } } - LOG.info("Balancer is going into sleep until next period in {}ms", getConfiguration() + LOG.debug("Balancer is going into sleep until next period in {}ms", getConfiguration() .getInt(HConstants.HBASE_BALANCER_PERIOD, HConstants.DEFAULT_HBASE_BALANCER_PERIOD)); return successRegionPlans; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HbckChore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HbckChore.java index 917a8d3ba783..43f451efed8c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HbckChore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HbckChore.java @@ -223,7 +223,8 @@ private void loadRegionsFromInMemoryState() { .isTableState(regionInfo.getTable(), TableState.State.DISABLED)) { disabledTableRegions.add(regionInfo.getRegionNameAsString()); } - if (regionState.isSplit()) { + // Check both state and regioninfo for split status, see HBASE-26383 + if (regionState.isSplit() || regionInfo.isSplit()) { splitParentRegions.add(regionInfo.getRegionNameAsString()); } HbckRegionInfo.MetaEntry metaEntry = diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java index fb57cb92dc2f..da3d73ea852d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java @@ -24,6 +24,8 @@ import java.util.Collections; import java.util.List; import java.util.stream.Stream; + +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.MetaMutationAnnotation; @@ -53,6 +55,7 @@ import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; import org.apache.hadoop.hbase.regionserver.HStoreFile; import org.apache.hadoop.hbase.regionserver.StoreFileInfo; +import org.apache.hadoop.hbase.regionserver.StoreUtils; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CommonFSUtils; import org.apache.hadoop.hbase.wal.WALSplitUtil; @@ -609,10 +612,16 @@ private void mergeStoreFiles(MasterProcedureEnv env, HRegionFileSystem regionFs, String family = hcd.getNameAsString(); final Collection storeFiles = regionFs.getStoreFiles(family); if (storeFiles != null && storeFiles.size() > 0) { + final Configuration storeConfiguration = + StoreUtils.createStoreConfiguration(env.getMasterConfiguration(), htd, hcd); for (StoreFileInfo storeFileInfo : storeFiles) { // Create reference file(s) to parent region file here in mergedDir. // As this procedure is running on master, use CacheConfig.DISABLED means // don't cache any block. + // We also need to pass through a suitable CompoundConfiguration as if this + // is running in a regionserver's Store context, or we might not be able + // to read the hfiles. + storeFileInfo.setConf(storeConfiguration); mergeRegionFs.mergeStoreFile(regionFs.getRegionInfo(), family, new HStoreFile(storeFileInfo, hcd.getBloomFilterType(), CacheConfig.DISABLED)); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java index 0a15e36a16af..26d0a4b4eb38 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java @@ -64,6 +64,7 @@ import org.apache.hadoop.hbase.regionserver.RegionSplitPolicy; import org.apache.hadoop.hbase.regionserver.RegionSplitRestriction; import org.apache.hadoop.hbase.regionserver.StoreFileInfo; +import org.apache.hadoop.hbase.regionserver.StoreUtils; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CommonFSUtils; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; @@ -711,12 +712,17 @@ private Pair splitStoreFiles(final MasterProcedureEnv env, final ColumnFamilyDescriptor hcd = htd.getColumnFamily(familyName); final Collection storeFiles = e.getValue(); if (storeFiles != null && storeFiles.size() > 0) { + final Configuration storeConfiguration = + StoreUtils.createStoreConfiguration(env.getMasterConfiguration(), htd, hcd); for (StoreFileInfo storeFileInfo : storeFiles) { // As this procedure is running on master, use CacheConfig.DISABLED means // don't cache any block. - StoreFileSplitter sfs = - new StoreFileSplitter(regionFs, familyName, new HStoreFile( - storeFileInfo, hcd.getBloomFilterType(), CacheConfig.DISABLED)); + // We also need to pass through a suitable CompoundConfiguration as if this + // is running in a regionserver's Store context, or we might not be able + // to read the hfiles. + storeFileInfo.setConf(storeConfiguration); + StoreFileSplitter sfs = new StoreFileSplitter(regionFs, familyName, + new HStoreFile(storeFileInfo, hcd.getBloomFilterType(), CacheConfig.DISABLED)); futures.add(threadPool.submit(sfs)); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java index 4ae428d51c62..8454eae3ea57 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java @@ -57,6 +57,7 @@ public abstract class CleanerChore extends Schedu private static final int AVAIL_PROCESSORS = Runtime.getRuntime().availableProcessors(); /** + * Configures the threadpool used for scanning the archive directory for the HFileCleaner * If it is an integer and >= 1, it would be the size; * if 0.0 < size <= 1.0, size would be available processors * size. * Pay attention that 1.0 is different from 1, former indicates it will use 100% of cores, @@ -64,6 +65,12 @@ public abstract class CleanerChore extends Schedu */ public static final String CHORE_POOL_SIZE = "hbase.cleaner.scan.dir.concurrent.size"; static final String DEFAULT_CHORE_POOL_SIZE = "0.25"; + /** + * Configures the threadpool used for scanning the Old logs directory for the LogCleaner + * Follows the same configuration mechanism as CHORE_POOL_SIZE, but has a default of 1 thread. + */ + public static final String LOG_CLEANER_CHORE_SIZE = "hbase.log.cleaner.scan.dir.concurrent.size"; + static final String DEFAULT_LOG_CLEANER_CHORE_POOL_SIZE = "1"; private final DirScanPool pool; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/DirScanPool.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/DirScanPool.java index 164752b97314..6e1426985cc4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/DirScanPool.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/DirScanPool.java @@ -39,21 +39,41 @@ public class DirScanPool implements ConfigurationObserver { private final ThreadPoolExecutor pool; private int cleanerLatch; private boolean reconfigNotification; + private Type dirScanPoolType; + private final String name; - public DirScanPool(Configuration conf) { - String poolSize = conf.get(CleanerChore.CHORE_POOL_SIZE, CleanerChore.DEFAULT_CHORE_POOL_SIZE); + private enum Type { + LOG_CLEANER(CleanerChore.LOG_CLEANER_CHORE_SIZE, + CleanerChore.DEFAULT_LOG_CLEANER_CHORE_POOL_SIZE), + HFILE_CLEANER(CleanerChore.CHORE_POOL_SIZE, CleanerChore.DEFAULT_CHORE_POOL_SIZE); + + private final String cleanerPoolSizeConfigName; + private final String cleanerPoolSizeConfigDefault; + + private Type(String cleanerPoolSizeConfigName, String cleanerPoolSizeConfigDefault) { + this.cleanerPoolSizeConfigName = cleanerPoolSizeConfigName; + this.cleanerPoolSizeConfigDefault = cleanerPoolSizeConfigDefault; + } + } + + private DirScanPool(Configuration conf, Type dirScanPoolType) { + this.dirScanPoolType = dirScanPoolType; + this.name = dirScanPoolType.name().toLowerCase(); + String poolSize = conf.get(dirScanPoolType.cleanerPoolSizeConfigName, + dirScanPoolType.cleanerPoolSizeConfigDefault); size = CleanerChore.calculatePoolSize(poolSize); // poolSize may be 0 or 0.0 from a careless configuration, // double check to make sure. - size = size == 0 ? CleanerChore.calculatePoolSize(CleanerChore.DEFAULT_CHORE_POOL_SIZE) : size; - pool = initializePool(size); - LOG.info("Cleaner pool size is {}", size); + size = size == 0 ? + CleanerChore.calculatePoolSize(dirScanPoolType.cleanerPoolSizeConfigDefault) : size; + pool = initializePool(size, name); + LOG.info("{} Cleaner pool size is {}", name, size); cleanerLatch = 0; } - private static ThreadPoolExecutor initializePool(int size) { + private static ThreadPoolExecutor initializePool(int size, String name) { return Threads.getBoundedCachedThreadPool(size, 1, TimeUnit.MINUTES, - new ThreadFactoryBuilder().setNameFormat("dir-scan-pool-%d").setDaemon(true) + new ThreadFactoryBuilder().setNameFormat(name + "-dir-scan-pool-%d").setDaemon(true) .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); } @@ -64,9 +84,11 @@ private static ThreadPoolExecutor initializePool(int size) { @Override public synchronized void onConfigurationChange(Configuration conf) { int newSize = CleanerChore.calculatePoolSize( - conf.get(CleanerChore.CHORE_POOL_SIZE, CleanerChore.DEFAULT_CHORE_POOL_SIZE)); + conf.get(dirScanPoolType.cleanerPoolSizeConfigName, + dirScanPoolType.cleanerPoolSizeConfigDefault)); if (newSize == size) { - LOG.trace("Size from configuration is same as previous={}, no need to update.", newSize); + LOG.trace("{} Cleaner Size from configuration is same as previous={}, no need to update.", + name, newSize); return; } size = newSize; @@ -109,11 +131,19 @@ synchronized void tryUpdatePoolSize(long timeout) { break; } } - LOG.info("Update chore's pool size from {} to {}", pool.getPoolSize(), size); + LOG.info("Update {} chore's pool size from {} to {}", name, pool.getPoolSize(), size); pool.setCorePoolSize(size); } public int getSize() { return size; } + + public static DirScanPool getHFileCleanerScanPool(Configuration conf) { + return new DirScanPool(conf, Type.HFILE_CLEANER); + } + + public static DirScanPool getLogCleanerScanPool(Configuration conf) { + return new DirScanPool(conf, Type.LOG_CLEANER); + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableDescriptorProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableDescriptorProcedure.java new file mode 100644 index 000000000000..e11b4aba248c --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableDescriptorProcedure.java @@ -0,0 +1,161 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master.procedure; + +import java.io.IOException; +import java.util.Optional; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer; +import org.apache.hadoop.hbase.procedure2.ProcedureSuspendedException; +import org.apache.hadoop.hbase.procedure2.ProcedureYieldException; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.ModifyTableDescriptorState; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.ModifyTableDescriptorStateData; + +/** + * The procedure will only update the table descriptor without reopening all the regions. + *

+ * It is usually used for migrating when upgrading, where we need to add something into the table + * descriptor, such as the rs group information. + */ +@InterfaceAudience.Private +public abstract class ModifyTableDescriptorProcedure + extends AbstractStateMachineTableProcedure { + + private static final Logger LOG = LoggerFactory.getLogger(ModifyTableDescriptorProcedure.class); + + private TableDescriptor unmodifiedTableDescriptor; + private TableDescriptor modifiedTableDescriptor; + + protected ModifyTableDescriptorProcedure() { + } + + protected ModifyTableDescriptorProcedure(MasterProcedureEnv env, TableDescriptor unmodified) { + super(env); + this.unmodifiedTableDescriptor = unmodified; + } + + @Override + public TableName getTableName() { + return unmodifiedTableDescriptor.getTableName(); + } + + @Override + public TableOperationType getTableOperationType() { + return TableOperationType.EDIT; + } + + /** + * Sub class should implement this method to modify the table descriptor, such as storing the rs + * group information. + *

+ * Since the migrating is asynchronouns, it is possible that users have already changed the rs + * group for a table, in this case we do not need to modify the table descriptor any more, then + * you could just return {@link Optional#empty()}. + */ + protected abstract Optional modify(MasterProcedureEnv env, + TableDescriptor current) throws IOException; + + @Override + protected Flow executeFromState(MasterProcedureEnv env, ModifyTableDescriptorState state) + throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException { + try { + switch (state) { + case MODIFY_TABLE_DESCRIPTOR_PREPARE: + Optional modified = modify(env, unmodifiedTableDescriptor); + if (modified.isPresent()) { + modifiedTableDescriptor = modified.get(); + setNextState(ModifyTableDescriptorState.MODIFY_TABLE_DESCRIPTOR_UPDATE); + return Flow.HAS_MORE_STATE; + } else { + // do not need to modify + return Flow.NO_MORE_STATE; + } + case MODIFY_TABLE_DESCRIPTOR_UPDATE: + env.getMasterServices().getTableDescriptors().update(modifiedTableDescriptor); + return Flow.NO_MORE_STATE; + default: + throw new UnsupportedOperationException("unhandled state=" + state); + } + } catch (IOException e) { + if (isRollbackSupported(state)) { + setFailure("master-modify-table-descriptor", e); + } else { + LOG.warn("Retriable error trying to modify table descriptor={} (in state={})", + getTableName(), state, e); + } + } + return Flow.HAS_MORE_STATE; + } + + @Override + protected void rollbackState(MasterProcedureEnv env, ModifyTableDescriptorState state) + throws IOException, InterruptedException { + if (state == ModifyTableDescriptorState.MODIFY_TABLE_DESCRIPTOR_PREPARE) { + return; + } + throw new UnsupportedOperationException("unhandled state=" + state); + } + + @Override + protected boolean isRollbackSupported(ModifyTableDescriptorState state) { + return state == ModifyTableDescriptorState.MODIFY_TABLE_DESCRIPTOR_PREPARE; + } + + @Override + protected ModifyTableDescriptorState getState(int stateId) { + return ModifyTableDescriptorState.forNumber(stateId); + } + + @Override + protected int getStateId(ModifyTableDescriptorState state) { + return state.getNumber(); + } + + @Override + protected ModifyTableDescriptorState getInitialState() { + return ModifyTableDescriptorState.MODIFY_TABLE_DESCRIPTOR_PREPARE; + } + + @Override + protected void serializeStateData(ProcedureStateSerializer serializer) throws IOException { + super.serializeStateData(serializer); + ModifyTableDescriptorStateData.Builder builder = ModifyTableDescriptorStateData.newBuilder() + .setUnmodifiedTableSchema(ProtobufUtil.toTableSchema(unmodifiedTableDescriptor)); + if (modifiedTableDescriptor != null) { + builder.setModifiedTableSchema(ProtobufUtil.toTableSchema(modifiedTableDescriptor)); + } + serializer.serialize(builder.build()); + } + + @Override + protected void deserializeStateData(ProcedureStateSerializer serializer) throws IOException { + super.deserializeStateData(serializer); + ModifyTableDescriptorStateData data = + serializer.deserialize(ModifyTableDescriptorStateData.class); + unmodifiedTableDescriptor = ProtobufUtil.toTableDescriptor(data.getUnmodifiedTableSchema()); + if (data.hasModifiedTableSchema()) { + modifiedTableDescriptor = ProtobufUtil.toTableDescriptor(data.getModifiedTableSchema()); + } + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java index add51210a38f..d9829a5f1e1b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java @@ -40,6 +40,7 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil; import org.apache.hadoop.hbase.replication.BaseReplicationEndpoint; +import org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint; import org.apache.hadoop.hbase.replication.ReplicationEndpoint; import org.apache.hadoop.hbase.replication.ReplicationException; import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; @@ -51,7 +52,6 @@ import org.apache.hadoop.hbase.replication.ReplicationStorageFactory; import org.apache.hadoop.hbase.replication.ReplicationUtils; import org.apache.hadoop.hbase.replication.SyncReplicationState; -import org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint; import org.apache.hadoop.hbase.zookeeper.ZKClusterId; import org.apache.hadoop.hbase.zookeeper.ZKConfig; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; @@ -358,13 +358,13 @@ private void checkPeerConfig(ReplicationPeerConfig peerConfig) throws DoNotRetry e); } } - // Default is HBaseInterClusterReplicationEndpoint and only it need to check cluster key - if (endpoint == null || endpoint instanceof HBaseInterClusterReplicationEndpoint) { + // Endpoints implementing HBaseReplicationEndpoint need to check cluster key + if (endpoint == null || endpoint instanceof HBaseReplicationEndpoint) { checkClusterKey(peerConfig.getClusterKey()); - } - // Default is HBaseInterClusterReplicationEndpoint which cannot replicate to same cluster - if (endpoint == null || !endpoint.canReplicateToSameCluster()) { - checkClusterId(peerConfig.getClusterKey()); + // Check if endpoint can replicate to the same cluster + if (endpoint == null || !endpoint.canReplicateToSameCluster()) { + checkSameClusterKey(peerConfig.getClusterKey()); + } } if (peerConfig.replicateAllUserTables()) { @@ -510,7 +510,7 @@ private void checkClusterKey(String clusterKey) throws DoNotRetryIOException { } } - private void checkClusterId(String clusterKey) throws DoNotRetryIOException { + private void checkSameClusterKey(String clusterKey) throws DoNotRetryIOException { String peerClusterId = ""; try { // Create the peer cluster config for get peer cluster id diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure2/store/region/HFileProcedurePrettyPrinter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure2/store/region/HFileProcedurePrettyPrinter.java index f52c16637c94..ede2949be00e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure2/store/region/HFileProcedurePrettyPrinter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure2/store/region/HFileProcedurePrettyPrinter.java @@ -136,7 +136,7 @@ private void processFile(Path file) throws IOException { out.println("Scanning -> " + file); FileSystem fs = file.getFileSystem(conf); try (HFile.Reader reader = HFile.createReader(fs, file, CacheConfig.DISABLED, true, conf); - HFileScanner scanner = reader.getScanner(false, false, false)) { + HFileScanner scanner = reader.getScanner(conf, false, false, false)) { if (procId != null) { if (scanner .seekTo(PrivateCellUtil.createFirstOnRow(Bytes.toBytes(procId.longValue()))) != -1) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java index 5da0de9a3045..a9683ac762b3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java @@ -567,12 +567,12 @@ private void pushPipelineToSnapshot() { boolean done = false; while (!done) { iterationsCnt++; - VersionedSegmentsList segments = pipeline.getVersionedList(); + VersionedSegmentsList segments = getImmutableSegments(); pushToSnapshot(segments.getStoreSegments()); // swap can return false in case the pipeline was updated by ongoing compaction // and the version increase, the chance of it happenning is very low // In Swap: don't close segments (they are in snapshot now) and don't update the region size - done = pipeline.swap(segments, null, false, false); + done = swapPipelineWithNull(segments); if (iterationsCnt>2) { // practically it is impossible that this loop iterates more than two times // (because the compaction is stopped and none restarts it while in snapshot request), @@ -585,6 +585,10 @@ private void pushPipelineToSnapshot() { } } + protected boolean swapPipelineWithNull(VersionedSegmentsList segments) { + return pipeline.swap(segments, null, false, false); + } + private void pushToSnapshot(List segments) { if(segments.isEmpty()) return; if(segments.size() == 1 && !segments.get(0).isEmpty()) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionPipeline.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionPipeline.java index 965529739ed9..5f52b9562c54 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionPipeline.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionPipeline.java @@ -22,6 +22,7 @@ import java.util.Iterator; import java.util.LinkedList; import java.util.List; +import java.util.ListIterator; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ClassSize; @@ -64,7 +65,16 @@ public class CompactionPipeline { private final LinkedList pipeline = new LinkedList<>(); // The list is volatile to avoid reading a new allocated reference before the c'tor is executed private volatile LinkedList readOnlyCopy = new LinkedList<>(); - // Version is volatile to ensure it is atomically read when not using a lock + /** + *

+   * Version is volatile to ensure it is atomically read when not using a lock.
+   * To indicate whether the suffix of pipleline changes:
+   * 1.for {@link CompactionPipeline#pushHead(MutableSegment)},new {@link ImmutableSegment} only
+   *   added at Head, {@link #version} not change.
+   * 2.for {@link CompactionPipeline#swap},{@link #version} increase.
+   * 3.for {@link CompactionPipeline#replaceAtIndex},{@link #version} increase.
+   * 
+ */ private volatile long version = 0; public CompactionPipeline(RegionServicesForStores region) { @@ -290,10 +300,15 @@ public MemStoreSize getPipelineSize() { return memStoreSizing.getMemStoreSize(); } + /** + * Must be called under the {@link CompactionPipeline#pipeline} Lock. + */ private void swapSuffix(List suffix, ImmutableSegment segment, boolean closeSegmentsInSuffix) { - pipeline.removeAll(suffix); - if(segment != null) pipeline.addLast(segment); + matchAndRemoveSuffixFromPipeline(suffix); + if (segment != null) { + pipeline.addLast(segment); + } // During index merge we won't be closing the segments undergoing the merge. Segment#close() // will release the MSLAB chunks to pool. But in case of index merge there wont be any data copy // from old MSLABs. So the new cells in new segment also refers to same chunks. In case of data @@ -307,6 +322,41 @@ private void swapSuffix(List suffix, ImmutableSegment segment } } + /** + * Checking that the {@link Segment}s in suffix input parameter is same as the {@link Segment}s in + * {@link CompactionPipeline#pipeline} one by one from the last element to the first element of + * suffix. If matched, remove suffix from {@link CompactionPipeline#pipeline}.
+ * Must be called under the {@link CompactionPipeline#pipeline} Lock. + */ + private void matchAndRemoveSuffixFromPipeline(List suffix) { + if (suffix.isEmpty()) { + return; + } + if (pipeline.size() < suffix.size()) { + throw new IllegalStateException( + "CODE-BUG:pipleine size:[" + pipeline.size() + "],suffix size:[" + suffix.size() + + "],pipeline size must greater than or equals suffix size"); + } + + ListIterator suffixIterator = suffix.listIterator(suffix.size()); + ListIterator pipelineIterator = pipeline.listIterator(pipeline.size()); + int count = 0; + while (suffixIterator.hasPrevious()) { + Segment suffixSegment = suffixIterator.previous(); + Segment pipelineSegment = pipelineIterator.previous(); + if (suffixSegment != pipelineSegment) { + throw new IllegalStateException("CODE-BUG:suffix last:[" + count + "]" + suffixSegment + + " is not pipleline segment:[" + pipelineSegment + "]"); + } + count++; + } + + for (int index = 1; index <= count; index++) { + pipeline.pollLast(); + } + + } + // replacing one segment in the pipeline with a new one exactly at the same index // need to be called only within synchronized block @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="VO_VOLATILE_INCREMENT", diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultHeapMemoryTuner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultHeapMemoryTuner.java index 2ff7d58b8272..9b8b3ef909e8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultHeapMemoryTuner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultHeapMemoryTuner.java @@ -135,6 +135,8 @@ public TunerResult tune(TunerContext context) { // Ignoring the first few tuner periods ignoreInitialPeriods++; rollingStatsForTunerSteps.insertDataValue(0); + LOG.info("Ignoring initial tuning periods: {} so far, {} to ignore", ignoreInitialPeriods, + numPeriodsToIgnore); return NO_OP_TUNER_RESULT; } StepDirection newTuneDirection = getTuneDirection(context); @@ -252,12 +254,15 @@ private StepDirection getTuneDirection(TunerContext context) { if (earlyMemstoreSufficientCheck && earlyBlockCacheSufficientCheck) { // Both memstore and block cache memory seems to be sufficient. No operation required. newTuneDirection = StepDirection.NEUTRAL; + tunerLog.append("Going to do nothing because no changes are needed."); } else if (earlyMemstoreSufficientCheck) { // Increase the block cache size and corresponding decrease in memstore size. newTuneDirection = StepDirection.INCREASE_BLOCK_CACHE_SIZE; + tunerLog.append("Going to increase the block cache size."); } else if (earlyBlockCacheSufficientCheck) { // Increase the memstore size and corresponding decrease in block cache size. newTuneDirection = StepDirection.INCREASE_MEMSTORE_SIZE; + tunerLog.append("Going to increase the memstore size."); } else { // Early checks for sufficient memory failed. Tuning memory based on past statistics. // Boolean indicator to show if we need to revert previous step or not. @@ -347,8 +352,17 @@ private StepDirection getTuneDirection(TunerContext context) { } } } - if (LOG.isDebugEnabled()) { - LOG.debug(tunerLog.toString()); + // Log NEUTRAL decisions at DEBUG, because they are the most frequent and not that interesting. + // Log other decisions at INFO because they are making meaningful operational changes. + switch (newTuneDirection) { + case NEUTRAL: + if (LOG.isDebugEnabled()) { + LOG.debug(tunerLog.toString()); + } + break; + default: + LOG.info(tunerLog.toString()); + break; } return newTuneDirection; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java index 5b26f9aa8ab7..69d9c8ff37d4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java @@ -667,9 +667,9 @@ public Path splitStoreFile(RegionInfo hri, String familyName, HStoreFile f, byte // If it is outside the range, return directly. f.initReader(); try { + Cell splitKey = PrivateCellUtil.createFirstOnRow(splitRow); if (top) { //check if larger than last key. - Cell splitKey = PrivateCellUtil.createFirstOnRow(splitRow); Optional lastKey = f.getLastKey(); // If lastKey is null means storefile is empty. if (!lastKey.isPresent()) { @@ -680,7 +680,6 @@ public Path splitStoreFile(RegionInfo hri, String familyName, HStoreFile f, byte } } else { //check if smaller than first key - Cell splitKey = PrivateCellUtil.createLastOnRow(splitRow); Optional firstKey = f.getFirstKey(); // If firstKey is null means storefile is empty. if (!firstKey.isPresent()) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index 7af717bff49e..4bf2d9c25f1d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -380,8 +380,8 @@ public class HRegionServer extends HBaseServerBase private Map coprocessorServiceHandlers = Maps.newHashMap(); /** - * @deprecated since 2.4.0 and will be removed in 4.0.0. - * Use {@link HRegionServer#UNSAFE_RS_HOSTNAME_DISABLE_MASTER_REVERSEDNS_KEY} instead. + * @deprecated since 2.4.0 and will be removed in 4.0.0. Use + * {@link HRegionServer#UNSAFE_RS_HOSTNAME_DISABLE_MASTER_REVERSEDNS_KEY} instead. * @see HBASE-24667 */ @Deprecated @@ -521,9 +521,10 @@ protected String getUseThisHostnameInstead(Configuration conf) throws IOExceptio String hostname = conf.get(UNSAFE_RS_HOSTNAME_KEY); if (conf.getBoolean(UNSAFE_RS_HOSTNAME_DISABLE_MASTER_REVERSEDNS_KEY, false)) { if (!StringUtils.isBlank(hostname)) { - String msg = UNSAFE_RS_HOSTNAME_DISABLE_MASTER_REVERSEDNS_KEY + " and " + UNSAFE_RS_HOSTNAME_KEY + - " are mutually exclusive. Do not set " + UNSAFE_RS_HOSTNAME_DISABLE_MASTER_REVERSEDNS_KEY + - " to true while " + UNSAFE_RS_HOSTNAME_KEY + " is used"; + String msg = UNSAFE_RS_HOSTNAME_DISABLE_MASTER_REVERSEDNS_KEY + " and " + + UNSAFE_RS_HOSTNAME_KEY + " are mutually exclusive. Do not set " + + UNSAFE_RS_HOSTNAME_DISABLE_MASTER_REVERSEDNS_KEY + " to true while " + + UNSAFE_RS_HOSTNAME_KEY + " is used"; throw new IOException(msg); } else { return rpcServices.getSocketAddress().getHostName(); @@ -617,7 +618,9 @@ public boolean registerService(Service instance) { private static void checkCodecs(final Configuration c) throws IOException { // check to see if the codec list is available: String [] codecs = c.getStrings(REGIONSERVER_CODEC, (String[])null); - if (codecs == null) return; + if (codecs == null) { + return; + } for (String codec : codecs) { if (!CompressionTest.testCompression(codec)) { throw new IOException("Compression codec " + codec + @@ -852,9 +855,15 @@ public void run() { // Send interrupts to wake up threads if sleeping so they notice shutdown. // TODO: Should we check they are alive? If OOME could have exited already - if (this.hMemManager != null) this.hMemManager.stop(); - if (this.cacheFlusher != null) this.cacheFlusher.interruptIfNecessary(); - if (this.compactSplitThread != null) this.compactSplitThread.interruptIfNecessary(); + if (this.hMemManager != null) { + this.hMemManager.stop(); + } + if (this.cacheFlusher != null) { + this.cacheFlusher.interruptIfNecessary(); + } + if (this.compactSplitThread != null) { + this.compactSplitThread.interruptIfNecessary(); + } // Stop the snapshot and other procedure handlers, forcefully killing all running tasks if (rspmHost != null) { @@ -949,7 +958,9 @@ private boolean containsMetaTableRegions() { } private boolean areAllUserRegionsOffline() { - if (getNumberOfOnlineRegions() > 2) return false; + if (getNumberOfOnlineRegions() > 2) { + return false; + } boolean allUserRegionsOffline = true; for (Map.Entry e: this.onlineRegions.entrySet()) { if (!e.getValue().getRegionInfo().isMetaRegion()) { @@ -1185,7 +1196,9 @@ private ClusterStatusProtos.ServerLoad buildServerLoad(long reportStartTime, lon private String getOnlineRegionsAsPrintableString() { StringBuilder sb = new StringBuilder(); for (Region r: this.onlineRegions.values()) { - if (sb.length() > 0) sb.append(", "); + if (sb.length() > 0) { + sb.append(", "); + } sb.append(r.getRegionInfo().getEncodedName()); } return sb.toString(); @@ -1284,7 +1297,7 @@ private void shutdownWAL(final boolean close) { * @param c Extra configuration. */ protected void handleReportForDutyResponse(final RegionServerStartupResponse c) - throws IOException { + throws IOException { try { boolean updateRootDir = false; for (NameStringPair e : c.getMapEntriesList()) { @@ -1560,7 +1573,7 @@ protected void chore() { this.instance.compactSplitThread.requestCompaction(hr, s, getName() + " requests major compaction; use default priority", Store.NO_PRIORITY, - CompactionLifeCycleTracker.DUMMY, null); + CompactionLifeCycleTracker.DUMMY, null); } else { this.instance.compactSplitThread.requestCompaction(hr, s, getName() + " requests major compaction; use configured priority", @@ -1595,7 +1608,9 @@ private static class PeriodicMemStoreFlusher extends ScheduledChore { protected void chore() { final StringBuilder whyFlush = new StringBuilder(); for (HRegion r : this.server.onlineRegions.values()) { - if (r == null) continue; + if (r == null) { + continue; + } if (r.shouldFlush(whyFlush)) { FlushRequester requester = server.getFlushRequester(); if (requester != null) { @@ -1698,7 +1713,7 @@ private void startServices() throws IOException { // Health checker thread. if (isHealthCheckerConfigured()) { int sleepTime = this.conf.getInt(HConstants.HEALTH_CHORE_WAKE_FREQ, - HConstants.DEFAULT_THREAD_WAKE_FREQUENCY); + HConstants.DEFAULT_THREAD_WAKE_FREQUENCY); healthCheckChore = new HealthCheckChore(sleepTime, this, getConfiguration()); } // Executor status collect thread. @@ -2134,7 +2149,9 @@ public boolean reportRegionStateTransition(final RegionStateTransitionContext co " after " + pauseTime + "ms delay (Master is coming online...).": " immediately."), ioe); - if (pause) Threads.sleep(pauseTime); + if (pause) { + Threads.sleep(pauseTime); + } tries++; if (rssStub == rss) { rssStub = null; @@ -2168,7 +2185,7 @@ private void triggerFlushInPrimaryRegion(final HRegion region) { } else { LOG.info("Executor is null; not running flush of primary region replica for {}", region.getRegionInfo()); - } + } } @InterfaceAudience.Private @@ -2299,8 +2316,7 @@ protected void stopServiceThreads() { } /** - * @return Return the object that implements the replication - * source executorService. + * @return Return the object that implements the replication source executorService. */ @Override public ReplicationSourceService getReplicationSourceService() { @@ -2397,8 +2413,8 @@ protected synchronized ServerName createRegionServerStatusStub(boolean refresh) } /** - * @return True if we should break loop because cluster is going down or - * this server has been stopped or hdfs has gone bad. + * @return True if we should break loop because cluster is going down or this server has been + * stopped or hdfs has gone bad. */ private boolean keepLooping() { return !this.stopped && isClusterUp(); @@ -2412,10 +2428,14 @@ private boolean keepLooping() { * @throws IOException */ private RegionServerStartupResponse reportForDuty() throws IOException { - if (this.masterless) return RegionServerStartupResponse.getDefaultInstance(); + if (this.masterless) { + return RegionServerStartupResponse.getDefaultInstance(); + } ServerName masterServerName = createRegionServerStatusStub(true); RegionServerStatusService.BlockingInterface rss = rssStub; - if (masterServerName == null || rss == null) return null; + if (masterServerName == null || rss == null) { + return null; + } RegionServerStartupResponse result = null; try { rpcServices.requestCount.reset(); @@ -2493,12 +2513,16 @@ private void closeMetaTableRegions(final boolean abort) { if (hri.isMetaRegion()) { meta = e.getValue(); } - if (meta != null) break; + if (meta != null) { + break; + } } } finally { this.onlineRegionsLock.writeLock().unlock(); } - if (meta != null) closeRegionIgnoreErrors(meta.getRegionInfo(), abort); + if (meta != null) { + closeRegionIgnoreErrors(meta.getRegionInfo(), abort); + } } /** @@ -2722,17 +2746,17 @@ public static void main(String[] args) { */ @Override public List getRegions(TableName tableName) { - List tableRegions = new ArrayList<>(); - synchronized (this.onlineRegions) { - for (HRegion region: this.onlineRegions.values()) { - RegionInfo regionInfo = region.getRegionInfo(); - if(regionInfo.getTable().equals(tableName)) { - tableRegions.add(region); - } - } - } - return tableRegions; - } + List tableRegions = new ArrayList<>(); + synchronized (this.onlineRegions) { + for (HRegion region: this.onlineRegions.values()) { + RegionInfo regionInfo = region.getRegionInfo(); + if(regionInfo.getTable().equals(tableName)) { + tableRegions.add(region); + } + } + } + return tableRegions; + } @Override public List getRegions() { @@ -2906,13 +2930,16 @@ public boolean removeRegion(final HRegion r, ServerName destination) { if (closeSeqNum == HConstants.NO_SEQNUM) { // No edits in WAL for this region; get the sequence number when the region was opened. closeSeqNum = r.getOpenSeqNum(); - if (closeSeqNum == HConstants.NO_SEQNUM) closeSeqNum = 0; + if (closeSeqNum == HConstants.NO_SEQNUM) { + closeSeqNum = 0; + } } boolean selfMove = ServerName.isSameAddress(destination, this.getServerName()); addToMovedRegions(r.getRegionInfo().getEncodedName(), destination, closeSeqNum, selfMove); if (selfMove) { - this.regionServerAccounting.getRetainedRegionRWRequestsCnt().put(r.getRegionInfo().getEncodedName() - , new Pair<>(r.getReadRequestsCount(), r.getWriteRequestsCount())); + this.regionServerAccounting.getRetainedRegionRWRequestsCnt().put( + r.getRegionInfo().getEncodedName(), + new Pair<>(r.getReadRequestsCount(), r.getWriteRequestsCount())); } } this.regionFavoredNodesMap.remove(r.getRegionInfo().getEncodedName()); @@ -3028,7 +3055,7 @@ public void updateRegionFavoredNodesMapping(String encodedRegionName, * Return the favored nodes for a region given its encoded name. Look at the * comment around {@link #regionFavoredNodesMap} on why we convert to InetSocketAddress[] * here. - * @param encodedRegionName + * @param encodedRegionName the encoded region name. * @return array of favored locations */ @Override @@ -3048,7 +3075,7 @@ private static class MovedRegionInfo { MovedRegionInfo(ServerName serverName, long closeSeqNum) { this.serverName = serverName; this.seqNum = closeSeqNum; - } + } public ServerName getServerName() { return serverName; @@ -3065,7 +3092,8 @@ public long getSeqNum() { */ private static final int TIMEOUT_REGION_MOVED = (2 * 60 * 1000); - private void addToMovedRegions(String encodedName, ServerName destination, long closeSeqNum, boolean selfMove) { + private void addToMovedRegions(String encodedName, ServerName destination, + long closeSeqNum, boolean selfMove) { if (selfMove) { LOG.warn("Not adding moved region record: " + encodedName + " to self."); return; @@ -3086,7 +3114,7 @@ public MovedRegionInfo getMovedRegion(String encodedRegionName) { @InterfaceAudience.Private public int movedRegionCacheExpiredTime() { - return TIMEOUT_REGION_MOVED; + return TIMEOUT_REGION_MOVED; } private String getMyEphemeralNodePath() { @@ -3114,8 +3142,8 @@ CoprocessorServiceResponse execRegionServerService( String serviceName = call.getServiceName(); Service service = coprocessorServiceHandlers.get(serviceName); if (service == null) { - throw new UnknownProtocolException(null, "No registered coprocessor executorService found for " + - serviceName); + throw new UnknownProtocolException(null, + "No registered coprocessor executorService found for " + serviceName); } ServiceDescriptor serviceDesc = service.getDescriptorForType(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java index e78e74ce0890..32693ab68511 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java @@ -59,7 +59,6 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; -import org.apache.hadoop.hbase.CompoundConfiguration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.MemoryCompactionPolicy; import org.apache.hadoop.hbase.TableName; @@ -67,7 +66,6 @@ import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.Scan; -import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.conf.ConfigurationManager; import org.apache.hadoop.hbase.conf.PropagatingConfigurationObserver; import org.apache.hadoop.hbase.coprocessor.ReadOnlyConfiguration; @@ -531,6 +529,9 @@ private List openStoreFiles(Collection files, boolean int totalValidStoreFile = 0; for (StoreFileInfo storeFileInfo : files) { + // The StoreFileInfo will carry store configuration down to HFile, we need to set it to + // our store's CompoundConfiguration here. + storeFileInfo.setConf(conf); // open each store file in parallel completionService.submit(() -> this.createStoreFileAndReader(storeFileInfo)); totalValidStoreFile++; @@ -806,7 +807,7 @@ public void assertBulkLoadHFileOk(Path srcPath) throws IOException { long verificationStartTime = EnvironmentEdgeManager.currentTime(); LOG.info("Full verification started for bulk load hfile: {}", srcPath); Cell prevCell = null; - HFileScanner scanner = reader.getScanner(false, false, false); + HFileScanner scanner = reader.getScanner(conf, false, false, false); scanner.seekTo(); do { Cell cell = scanner.getCell(); @@ -2519,10 +2520,11 @@ protected OffPeakHours getOffPeakHours() { @Override public void onConfigurationChange(Configuration conf) { - this.conf = StoreUtils.createStoreConfiguration(conf, region.getTableDescriptor(), + Configuration storeConf = StoreUtils.createStoreConfiguration(conf, region.getTableDescriptor(), getColumnFamilyDescriptor()); - this.storeEngine.compactionPolicy.setConf(conf); - this.offPeakHours = OffPeakHours.getInstance(conf); + this.conf = storeConf; + this.storeEngine.compactionPolicy.setConf(storeConf); + this.offPeakHours = OffPeakHours.getInstance(storeConf); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java index d0a28ed771ce..9a6a81c1669c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java @@ -673,9 +673,9 @@ private void notifyFlushRequest(Region region, boolean emergencyFlush) { FlushType type = null; if (emergencyFlush) { type = isAboveHighWaterMark(); - if (type == null) { - type = isAboveLowWaterMark(); - } + } + if (type == null) { + type = isAboveLowWaterMark(); } for (FlushRequestListener listener : flushRequestListeners) { listener.flushRequested(type, region); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java index b13ff64d5b2d..724da1a2116e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java @@ -2089,6 +2089,7 @@ public ReplicateWALEntryResponse replay(final RpcController controller, final ReplicateWALEntryRequest request) throws ServiceException { long before = EnvironmentEdgeManager.currentTime(); CellScanner cells = ((HBaseRpcController) controller).cellScanner(); + ((HBaseRpcController) controller).setCellScanner(null); try { checkOpen(); List entries = request.getEntryList(); @@ -2209,6 +2210,7 @@ public ReplicateWALEntryResponse replicateWALEntry(final RpcController controlle List entries = request.getEntryList(); checkShouldRejectReplicationRequest(entries); CellScanner cellScanner = ((HBaseRpcController) controller).cellScanner(); + ((HBaseRpcController) controller).setCellScanner(null); server.getRegionServerCoprocessorHost().preReplicateLogEntries(); server.getReplicationSinkService().replicateLogEntries(entries, cellScanner, request.getReplicationClusterId(), request.getSourceBaseNamespaceDirPath(), @@ -3757,7 +3759,8 @@ private void executeOpenRegionProcedures(OpenRegionRequest request, long masterSystemTime = request.hasMasterSystemTime() ? request.getMasterSystemTime() : -1; for (RegionOpenInfo regionOpenInfo : request.getOpenInfoList()) { RegionInfo regionInfo = ProtobufUtil.toRegionInfo(regionOpenInfo.getRegion()); - TableDescriptor tableDesc = tdCache.get(regionInfo.getTable()); + TableName tableName = regionInfo.getTable(); + TableDescriptor tableDesc = tdCache.get(tableName); if (tableDesc == null) { try { tableDesc = server.getTableDescriptors().get(regionInfo.getTable()); @@ -3769,6 +3772,9 @@ private void executeOpenRegionProcedures(OpenRegionRequest request, LOG.warn("Failed to get TableDescriptor of {}, will try again in the handler", regionInfo.getTable(), e); } + if(tableDesc != null) { + tdCache.put(tableName, tableDesc); + } } if (regionOpenInfo.getFavoredNodesCount() > 0) { server.updateRegionFavoredNodesMapping(regionInfo.getEncodedName(), diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java index 5eaab23fc6cf..7445f4787ed4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java @@ -25,6 +25,7 @@ import java.util.regex.Matcher; import java.util.regex.Pattern; +import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; @@ -49,7 +50,7 @@ * Describe a StoreFile (hfile, reference, link) */ @InterfaceAudience.Private -public class StoreFileInfo { +public class StoreFileInfo implements Configurable { private static final Logger LOG = LoggerFactory.getLogger(StoreFileInfo.class); /** @@ -77,7 +78,7 @@ public class StoreFileInfo { public static final boolean DEFAULT_STORE_FILE_READER_NO_READAHEAD = false; // Configuration - private final Configuration conf; + private Configuration conf; // FileSystem handle private final FileSystem fs; @@ -224,6 +225,16 @@ public StoreFileInfo(final Configuration conf, final FileSystem fs, final FileSt DEFAULT_STORE_FILE_READER_NO_READAHEAD); } + @Override + public Configuration getConf() { + return conf; + } + + @Override + public void setConf(Configuration conf) { + this.conf = conf; + } + /** * Size of the Hfile * @return size @@ -659,10 +670,6 @@ FileSystem getFileSystem() { return this.fs; } - Configuration getConf() { - return this.conf; - } - boolean isNoReadahead() { return this.noReadahead; } @@ -698,4 +705,5 @@ StoreFileReader postStoreFileReaderOpen(ReaderContext context, CacheConfig cache public void initHFileInfo(ReaderContext context) throws IOException { this.hfileInfo = new HFileInfo(context, conf); } + } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java index 7550511a356e..32ee47e21f1c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java @@ -75,6 +75,7 @@ public class StoreFileReader { private KeyValue.KeyOnlyKeyValue lastBloomKeyOnlyKV = null; private boolean skipResetSeqId = true; private int prefixLength = -1; + protected Configuration conf; // Counter that is incremented every time a scanner is created on the // store file. It is decremented when the scan on the store file is @@ -82,16 +83,18 @@ public class StoreFileReader { private final AtomicInteger refCount; private final ReaderContext context; - private StoreFileReader(HFile.Reader reader, AtomicInteger refCount, ReaderContext context) { + private StoreFileReader(HFile.Reader reader, AtomicInteger refCount, ReaderContext context, + Configuration conf) { this.reader = reader; bloomFilterType = BloomType.NONE; this.refCount = refCount; this.context = context; + this.conf = conf; } public StoreFileReader(ReaderContext context, HFileInfo fileInfo, CacheConfig cacheConf, AtomicInteger refCount, Configuration conf) throws IOException { - this(HFile.createReader(context, fileInfo, cacheConf, conf), refCount, context); + this(HFile.createReader(context, fileInfo, cacheConf, conf), refCount, context, conf); } void copyFields(StoreFileReader storeFileReader) throws IOException { @@ -205,7 +208,7 @@ public HFileScanner getScanner(boolean cacheBlocks, boolean pread) { @Deprecated public HFileScanner getScanner(boolean cacheBlocks, boolean pread, boolean isCompaction) { - return reader.getScanner(cacheBlocks, pread, isCompaction); + return reader.getScanner(conf, cacheBlocks, pread, isCompaction); } public void close(boolean evictOnClose) throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/MigrateRSGroupProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/MigrateRSGroupProcedure.java new file mode 100644 index 000000000000..bca77c80aa0e --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/MigrateRSGroupProcedure.java @@ -0,0 +1,65 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rsgroup; + +import java.io.IOException; +import java.util.Optional; +import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.client.TableDescriptorBuilder; +import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; +import org.apache.hadoop.hbase.master.procedure.ModifyTableDescriptorProcedure; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Procedure for migrating rs group information to table descriptor. + */ +@InterfaceAudience.Private +public class MigrateRSGroupProcedure extends ModifyTableDescriptorProcedure { + + private static final Logger LOG = LoggerFactory.getLogger(MigrateRSGroupProcedure.class); + + public MigrateRSGroupProcedure() { + } + + public MigrateRSGroupProcedure(MasterProcedureEnv env, TableDescriptor unmodified) { + super(env, unmodified); + } + + @Override + protected Optional modify(MasterProcedureEnv env, TableDescriptor current) + throws IOException { + if (current.getRegionServerGroup().isPresent()) { + // usually this means user has set the rs group using the new code which will set the group + // directly on table descriptor, skip. + LOG.debug("Skip migrating {} since it is already in group {}", current.getTableName(), + current.getRegionServerGroup().get()); + return Optional.empty(); + } + RSGroupInfo group = + env.getMasterServices().getRSGroupInfoManager().getRSGroupForTable(current.getTableName()); + if (group == null) { + LOG.debug("RSGroup for table {} is empty when migrating, usually this should not happen" + + " unless we have removed the RSGroup, ignore...", current.getTableName()); + return Optional.empty(); + } + return Optional + .of(TableDescriptorBuilder.newBuilder(current).setRegionServerGroup(group.getName()).build()); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java index 63e1866a657d..b13afef006b7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java @@ -72,10 +72,12 @@ import org.apache.hadoop.hbase.master.TableStateManager; import org.apache.hadoop.hbase.master.assignment.RegionStateNode; import org.apache.hadoop.hbase.master.procedure.CreateTableProcedure; +import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; import org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil; import org.apache.hadoop.hbase.master.procedure.ProcedureSyncWait; import org.apache.hadoop.hbase.net.Address; import org.apache.hadoop.hbase.procedure2.Procedure; +import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; import org.apache.hadoop.hbase.protobuf.ProtobufMagic; import org.apache.hadoop.hbase.regionserver.DisabledRegionSplitPolicy; import org.apache.hadoop.hbase.util.Bytes; @@ -278,7 +280,6 @@ public void serverRemoved(ServerName serverName) { updateDefaultServers(); } }); - migrate(); } static RSGroupInfoManager getInstance(MasterServices masterServices) throws IOException { @@ -492,11 +493,14 @@ private List retrieveGroupListFromZookeeper() throws IOException { private void migrate(Collection groupList) { TableDescriptors tds = masterServices.getTableDescriptors(); + ProcedureExecutor procExec = + masterServices.getMasterProcedureExecutor(); for (RSGroupInfo groupInfo : groupList) { if (groupInfo.getName().equals(RSGroupInfo.DEFAULT_GROUP)) { continue; } SortedSet failedTables = new TreeSet<>(); + List procs = new ArrayList<>(); for (TableName tableName : groupInfo.getTables()) { LOG.debug("Migrating {} in group {}", tableName, groupInfo.getName()); TableDescriptor oldTd; @@ -517,20 +521,24 @@ private void migrate(Collection groupList) { oldTd.getRegionServerGroup().get()); continue; } - TableDescriptor newTd = TableDescriptorBuilder.newBuilder(oldTd) - .setRegionServerGroup(groupInfo.getName()).build(); // This is a bit tricky. Since we know that the region server group config in // TableDescriptor will only be used at master side, it is fine to just update the table // descriptor on file system and also the cache, without reopening all the regions. This // will be much faster than the normal modifyTable. And when upgrading, we will update // master first and then region server, so after all the region servers has been reopened, // the new TableDescriptor will be loaded. + MigrateRSGroupProcedure proc = + new MigrateRSGroupProcedure(procExec.getEnvironment(), oldTd); + procExec.submitProcedure(proc); + procs.add(proc); + } + for (MigrateRSGroupProcedure proc : procs) { try { - tds.update(newTd); + ProcedureSyncWait.waitForProcedureToComplete(procExec, proc, 60000); } catch (IOException e) { - LOG.warn("Failed to migrate {} in group {}", tableName, groupInfo.getName(), e); - failedTables.add(tableName); - continue; + LOG.warn("Failed to migrate rs group {} for table {}", groupInfo.getName(), + proc.getTableName()); + failedTables.add(proc.getTableName()); } } LOG.debug("Done migrating {}, failed tables {}", groupInfo.getName(), failedTables); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/CanaryTool.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/CanaryTool.java index df48083cf01e..4f6638e9c439 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/CanaryTool.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/CanaryTool.java @@ -20,6 +20,7 @@ import static org.apache.hadoop.hbase.HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT; import static org.apache.hadoop.hbase.HConstants.ZOOKEEPER_ZNODE_PARENT; +import static org.apache.hadoop.hbase.util.Addressing.inetSocketAddress2String; import java.io.Closeable; import java.io.IOException; import java.net.BindException; @@ -1707,7 +1708,7 @@ protected ZookeeperMonitor(Connection connection, String[] monitorTargets, boole new ConnectStringParser(ZKConfig.getZKQuorumServersString(configuration)); hosts = Lists.newArrayList(); for (InetSocketAddress server : parser.getServerAddresses()) { - hosts.add(server.toString()); + hosts.add(inetSocketAddress2String(server)); } if (allowedFailures > (hosts.size() - 1) / 2) { LOG.warn( diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java index a172db2fa13f..f0549c3d633c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java @@ -145,7 +145,7 @@ public static void doSmokeTest(FileSystem fs, Path path, String codec) Cell cc = null; HFile.Reader reader = HFile.createReader(fs, path, CacheConfig.DISABLED, true, conf); try { - HFileScanner scanner = reader.getScanner(false, true); + HFileScanner scanner = reader.getScanner(conf, false, true); scanner.seekTo(); // position to the start of file // Scanner does not do Cells yet. Do below for now till fixed. cc = scanner.getCell(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java index 778d66da63d8..286caf8ed3b0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java @@ -38,7 +38,9 @@ import java.util.Iterator; import java.util.List; import java.util.Locale; +import java.util.Optional; import java.util.Set; +import java.util.concurrent.Callable; import java.util.concurrent.CancellationException; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; @@ -284,21 +286,51 @@ public RegionMover build() throws IOException { */ public boolean load() throws ExecutionException, InterruptedException, TimeoutException { ExecutorService loadPool = Executors.newFixedThreadPool(1); - Future loadTask = loadPool.submit(() -> { + Future loadTask = loadPool.submit(getMetaRegionMovePlan()); + boolean isMetaMoved = waitTaskToFinish(loadPool, loadTask, "loading"); + if (!isMetaMoved) { + return false; + } + loadPool = Executors.newFixedThreadPool(1); + loadTask = loadPool.submit(getNonMetaRegionsMovePlan()); + return waitTaskToFinish(loadPool, loadTask, "loading"); + } + + private Callable getMetaRegionMovePlan() { + return getRegionsMovePlan(true); + } + + private Callable getNonMetaRegionsMovePlan() { + return getRegionsMovePlan(false); + } + + private Callable getRegionsMovePlan(boolean moveMetaRegion) { + return () -> { try { List regionsToMove = readRegionsFromFile(filename); if (regionsToMove.isEmpty()) { LOG.info("No regions to load.Exiting"); return true; } - loadRegions(regionsToMove); + Optional metaRegion = getMetaRegionInfoIfToBeMoved(regionsToMove); + if (moveMetaRegion) { + if (metaRegion.isPresent()) { + loadRegions(Collections.singletonList(metaRegion.get())); + } + } else { + metaRegion.ifPresent(regionsToMove::remove); + loadRegions(regionsToMove); + } } catch (Exception e) { LOG.error("Error while loading regions to " + hostname, e); return false; } return true; - }); - return waitTaskToFinish(loadPool, loadTask, "loading"); + }; + } + + private Optional getMetaRegionInfoIfToBeMoved(List regionsToMove) { + return regionsToMove.stream().filter(RegionInfo::isMetaRegion).findFirst(); } private void loadRegions(List regionsToMove) @@ -472,30 +504,43 @@ private void unloadRegions(ServerName server, List regionServers, } LOG.info("Moving {} regions from {} to {} servers using {} threads .Ack Mode: {}", regionsToMove.size(), this.hostname, regionServers.size(), this.maxthreads, ack); - final ExecutorService moveRegionsPool = Executors.newFixedThreadPool(this.maxthreads); - List> taskList = new ArrayList<>(); - int serverIndex = 0; - for (RegionInfo regionToMove : regionsToMove) { - if (ack) { - Future task = moveRegionsPool.submit( - new MoveWithAck(conn, regionToMove, server, regionServers.get(serverIndex), - movedRegions)); - taskList.add(task); - } else { - Future task = moveRegionsPool.submit( - new MoveWithoutAck(admin, regionToMove, server, regionServers.get(serverIndex), - movedRegions)); - taskList.add(task); - } - serverIndex = (serverIndex + 1) % regionServers.size(); + + Optional metaRegion = getMetaRegionInfoIfToBeMoved(regionsToMove); + if (metaRegion.isPresent()) { + RegionInfo meta = metaRegion.get(); + submitRegionMovesWhileUnloading(server, regionServers, movedRegions, + Collections.singletonList(meta)); + regionsToMove.remove(meta); } - moveRegionsPool.shutdown(); - long timeoutInSeconds = regionsToMove.size() * admin.getConfiguration() - .getLong(MOVE_WAIT_MAX_KEY, DEFAULT_MOVE_WAIT_MAX); - waitMoveTasksToFinish(moveRegionsPool, taskList, timeoutInSeconds); + submitRegionMovesWhileUnloading(server, regionServers, movedRegions, regionsToMove); } } + private void submitRegionMovesWhileUnloading(ServerName server, List regionServers, + List movedRegions, List regionsToMove) throws Exception { + final ExecutorService moveRegionsPool = Executors.newFixedThreadPool(this.maxthreads); + List> taskList = new ArrayList<>(); + int serverIndex = 0; + for (RegionInfo regionToMove : regionsToMove) { + if (ack) { + Future task = moveRegionsPool.submit( + new MoveWithAck(conn, regionToMove, server, regionServers.get(serverIndex), + movedRegions)); + taskList.add(task); + } else { + Future task = moveRegionsPool.submit( + new MoveWithoutAck(admin, regionToMove, server, regionServers.get(serverIndex), + movedRegions)); + taskList.add(task); + } + serverIndex = (serverIndex + 1) % regionServers.size(); + } + moveRegionsPool.shutdown(); + long timeoutInSeconds = regionsToMove.size() * admin.getConfiguration() + .getLong(MOVE_WAIT_MAX_KEY, DEFAULT_MOVE_WAIT_MAX); + waitMoveTasksToFinish(moveRegionsPool, taskList, timeoutInSeconds); + } + private boolean waitTaskToFinish(ExecutorService pool, Future task, String operation) throws TimeoutException, InterruptedException, ExecutionException { pool.shutdown(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java index 69fb21cc9d06..3855e4a6d495 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java @@ -438,7 +438,7 @@ public SequentialReadBenchmark(Configuration conf, FileSystem fs, @Override void setUp() throws Exception { super.setUp(); - this.scanner = this.reader.getScanner(false, false); + this.scanner = this.reader.getScanner(conf, false, false); this.scanner.seekTo(); } @@ -470,7 +470,7 @@ public UniformRandomReadBenchmark(Configuration conf, FileSystem fs, @Override void doRow(int i) throws Exception { - HFileScanner scanner = this.reader.getScanner(false, true); + HFileScanner scanner = this.reader.getScanner(conf, false, true); byte [] b = getRandomRow(); if (scanner.seekTo(createCell(b)) < 0) { LOG.info("Not able to seekTo " + new String(b)); @@ -497,7 +497,7 @@ public UniformRandomSmallScan(Configuration conf, FileSystem fs, @Override void doRow(int i) throws Exception { - HFileScanner scanner = this.reader.getScanner(false, false); + HFileScanner scanner = this.reader.getScanner(conf, false, false); byte [] b = getRandomRow(); // System.out.println("Random row: " + new String(b)); Cell c = createCell(b); @@ -536,7 +536,7 @@ public GaussianRandomReadBenchmark(Configuration conf, FileSystem fs, @Override void doRow(int i) throws Exception { - HFileScanner scanner = this.reader.getScanner(false, true); + HFileScanner scanner = this.reader.getScanner(conf, false, true); byte[] gaussianRandomRowBytes = getGaussianRandomRowBytes(); scanner.seekTo(createCell(gaussianRandomRowBytes)); for (int ii = 0; ii < 30; ii++) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java index 70d39eb3154c..997fac56fe94 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java @@ -111,7 +111,7 @@ public static void setupCluster() throws Exception { // We don't want the cleaner to remove files. The tests do that. UTIL.getMiniHBaseCluster().getMaster().getHFileCleaner().cancel(true); - POOL = new DirScanPool(UTIL.getConfiguration()); + POOL = DirScanPool.getHFileCleanerScanPool(UTIL.getConfiguration()); } private static void setupConf(Configuration conf) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java index 6355abd0f367..92b6b78e14cb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java @@ -119,7 +119,7 @@ public static void setupCluster() throws Exception { String archivingZNode = ZKTableArchiveClient.getArchiveZNode(UTIL.getConfiguration(), watcher); ZKUtil.createWithParents(watcher, archivingZNode); rss = mock(RegionServerServices.class); - POOL = new DirScanPool(UTIL.getConfiguration()); + POOL= DirScanPool.getHFileCleanerScanPool(UTIL.getConfiguration()); } private static void setupConf(Configuration conf) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/compress/HFileTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/compress/HFileTestBase.java index 4860059d5b74..9c97493cb38a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/compress/HFileTestBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/compress/HFileTestBase.java @@ -36,38 +36,32 @@ import org.apache.hadoop.hbase.io.hfile.HFileContext; import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder; import org.apache.hadoop.hbase.io.hfile.HFileScanner; -import org.apache.hadoop.hbase.logging.Log4jUtils; import org.apache.hadoop.hbase.util.RedundantKVGenerator; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class HFileTestBase { - static { - Log4jUtils.setLogLevel("org.apache.hadoop.hbase.io.compress", "TRACE"); - } - protected static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); protected static final Logger LOG = LoggerFactory.getLogger(HFileTestBase.class); protected static final SecureRandom RNG = new SecureRandom(); - protected static FileSystem fs; + protected static FileSystem FS; public static void setUpBeforeClass() throws Exception { Configuration conf = TEST_UTIL.getConfiguration(); // Disable block cache in this test. conf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0.0f); - conf.setInt("hfile.format.version", 3); - fs = FileSystem.get(conf); + FS = FileSystem.get(conf); } @SuppressWarnings("deprecation") - public void doTest(Compression.Algorithm compression) throws Exception { + public void doTest(Configuration conf, Path path, Compression.Algorithm compression) + throws Exception { // Create 10000 random test KVs RedundantKVGenerator generator = new RedundantKVGenerator(); List testKvs = generator.generateTestKeyValues(10000); // Iterate through data block encoding and compression combinations - Configuration conf = TEST_UTIL.getConfiguration(); CacheConfig cacheConf = new CacheConfig(conf); HFileContext fileContext = new HFileContextBuilder() .withBlockSize(4096) // small block @@ -75,9 +69,7 @@ public void doTest(Compression.Algorithm compression) throws Exception { .build(); // write a new test HFile LOG.info("Writing with " + fileContext); - Path path = new Path(TEST_UTIL.getDataTestDir(), - HBaseTestingUtil.getRandomUUID().toString() + ".hfile"); - FSDataOutputStream out = fs.create(path); + FSDataOutputStream out = FS.create(path); HFile.Writer writer = HFile.getWriterFactory(conf, cacheConf) .withOutputStream(out) .withFileContext(fileContext) @@ -95,9 +87,9 @@ public void doTest(Compression.Algorithm compression) throws Exception { LOG.info("Reading with " + fileContext); int i = 0; HFileScanner scanner = null; - HFile.Reader reader = HFile.createReader(fs, path, cacheConf, true, conf); + HFile.Reader reader = HFile.createReader(FS, path, cacheConf, true, conf); try { - scanner = reader.getScanner(false, false); + scanner = reader.getScanner(conf, false, false); assertTrue("Initial seekTo failed", scanner.seekTo()); do { Cell kv = scanner.getCell(); @@ -114,9 +106,9 @@ public void doTest(Compression.Algorithm compression) throws Exception { // Test random seeks with pread LOG.info("Random seeking with " + fileContext); - reader = HFile.createReader(fs, path, cacheConf, true, conf); + reader = HFile.createReader(FS, path, cacheConf, true, conf); try { - scanner = reader.getScanner(false, true); + scanner = reader.getScanner(conf, false, true); assertTrue("Initial seekTo failed", scanner.seekTo()); for (i = 0; i < 100; i++) { KeyValue kv = testKvs.get(RNG.nextInt(testKvs.size())); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.java index 4cb48696ed3e..da5bd73a9e9b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.java @@ -30,11 +30,14 @@ import java.util.Collection; import java.util.List; import java.util.Random; + +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ArrayBackedTag; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparatorImpl; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; @@ -82,9 +85,9 @@ public class TestDataBlockEncoders { + DataBlockEncoding.ID_SIZE; static final byte[] HFILEBLOCK_DUMMY_HEADER = new byte[HConstants.HFILEBLOCK_HEADER_SIZE]; - private RedundantKVGenerator generator = new RedundantKVGenerator(); - private Random randomizer = new Random(42L); - + private final Configuration conf = HBaseConfiguration.create(); + private final RedundantKVGenerator generator = new RedundantKVGenerator(); + private final Random randomizer = new Random(42L); private final boolean includesMemstoreTS; private final boolean includesTags; private final boolean useOffheapData; @@ -101,8 +104,8 @@ public TestDataBlockEncoders(boolean includesMemstoreTS, boolean includesTag, this.useOffheapData = useOffheapData; } - private HFileBlockEncodingContext getEncodingContext(Compression.Algorithm algo, - DataBlockEncoding encoding) { + private HFileBlockEncodingContext getEncodingContext(Configuration conf, + Compression.Algorithm algo, DataBlockEncoding encoding) { DataBlockEncoder encoder = encoding.getEncoder(); HFileContext meta = new HFileContextBuilder() .withHBaseCheckSum(false) @@ -110,9 +113,9 @@ private HFileBlockEncodingContext getEncodingContext(Compression.Algorithm algo, .withIncludesTags(includesTags) .withCompression(algo).build(); if (encoder != null) { - return encoder.newDataBlockEncodingContext(encoding, HFILEBLOCK_DUMMY_HEADER, meta); + return encoder.newDataBlockEncodingContext(conf, encoding, HFILEBLOCK_DUMMY_HEADER, meta); } else { - return new HFileBlockDefaultEncodingContext(encoding, HFILEBLOCK_DUMMY_HEADER, meta); + return new HFileBlockDefaultEncodingContext(conf, encoding, HFILEBLOCK_DUMMY_HEADER, meta); } } @@ -199,7 +202,7 @@ public void testSeekingOnSample() throws IOException { } LOG.info("Encoder: " + encoder); ByteBuffer encodedBuffer = encodeKeyValues(encoding, sampleKv, - getEncodingContext(Compression.Algorithm.NONE, encoding), this.useOffheapData); + getEncodingContext(conf, Compression.Algorithm.NONE, encoding), this.useOffheapData); HFileContext meta = new HFileContextBuilder() .withHBaseCheckSum(false) .withIncludesMvcc(includesMemstoreTS) @@ -207,7 +210,7 @@ public void testSeekingOnSample() throws IOException { .withCompression(Compression.Algorithm.NONE) .build(); DataBlockEncoder.EncodedSeeker seeker = - encoder.createSeeker(encoder.newDataBlockDecodingContext(meta)); + encoder.createSeeker(encoder.newDataBlockDecodingContext(conf, meta)); seeker.setCurrentBuffer(new SingleByteBuff(encodedBuffer)); encodedSeekers.add(seeker); } @@ -272,7 +275,7 @@ public void testNextOnSample() throws IOException { } DataBlockEncoder encoder = encoding.getEncoder(); ByteBuffer encodedBuffer = encodeKeyValues(encoding, sampleKv, - getEncodingContext(Compression.Algorithm.NONE, encoding), this.useOffheapData); + getEncodingContext(conf, Compression.Algorithm.NONE, encoding), this.useOffheapData); HFileContext meta = new HFileContextBuilder() .withHBaseCheckSum(false) .withIncludesMvcc(includesMemstoreTS) @@ -280,7 +283,7 @@ public void testNextOnSample() throws IOException { .withCompression(Compression.Algorithm.NONE) .build(); DataBlockEncoder.EncodedSeeker seeker = - encoder.createSeeker(encoder.newDataBlockDecodingContext(meta)); + encoder.createSeeker(encoder.newDataBlockDecodingContext(conf, meta)); seeker.setCurrentBuffer(new SingleByteBuff(encodedBuffer)); int i = 0; do { @@ -315,7 +318,7 @@ public void testFirstKeyInBlockOnSample() throws IOException { } DataBlockEncoder encoder = encoding.getEncoder(); ByteBuffer encodedBuffer = encodeKeyValues(encoding, sampleKv, - getEncodingContext(Compression.Algorithm.NONE, encoding), this.useOffheapData); + getEncodingContext(conf, Compression.Algorithm.NONE, encoding), this.useOffheapData); Cell key = encoder.getFirstKeyCellInBlock(new SingleByteBuff(encodedBuffer)); KeyValue firstKv = sampleKv.get(0); if (0 != PrivateCellUtil.compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, key, firstKv)) { @@ -336,13 +339,13 @@ public void testRowIndexWithTagsButNoTagsInCell() throws IOException { kvList.add(expectedKV); DataBlockEncoding encoding = DataBlockEncoding.ROW_INDEX_V1; DataBlockEncoder encoder = encoding.getEncoder(); - ByteBuffer encodedBuffer = - encodeKeyValues(encoding, kvList, getEncodingContext(Algorithm.NONE, encoding), false); + ByteBuffer encodedBuffer = encodeKeyValues(encoding, kvList, + getEncodingContext(conf, Algorithm.NONE, encoding), false); HFileContext meta = - new HFileContextBuilder().withHBaseCheckSum(false).withIncludesMvcc(includesMemstoreTS) - .withIncludesTags(includesTags).withCompression(Compression.Algorithm.NONE).build(); + new HFileContextBuilder().withHBaseCheckSum(false).withIncludesMvcc(includesMemstoreTS) + .withIncludesTags(includesTags).withCompression(Compression.Algorithm.NONE).build(); DataBlockEncoder.EncodedSeeker seeker = - encoder.createSeeker(encoder.newDataBlockDecodingContext(meta)); + encoder.createSeeker(encoder.newDataBlockDecodingContext(conf, meta)); seeker.setCurrentBuffer(new SingleByteBuff(encodedBuffer)); Cell cell = seeker.getCell(); Assert.assertEquals(expectedKV.getLength(), ((KeyValue) cell).getLength()); @@ -393,9 +396,9 @@ private void testEncodersOnDataset(List kvList, boolean includesMemsto if (encoder == null) { continue; } - HFileBlockEncodingContext encodingContext = new HFileBlockDefaultEncodingContext(encoding, - HFILEBLOCK_DUMMY_HEADER, fileContext); - + HFileBlockEncodingContext encodingContext = + new HFileBlockDefaultEncodingContext(conf, encoding, HFILEBLOCK_DUMMY_HEADER, + fileContext); ByteArrayOutputStream baos = new ByteArrayOutputStream(); baos.write(HFILEBLOCK_DUMMY_HEADER); DataOutputStream dos = new DataOutputStream(baos); @@ -441,7 +444,7 @@ private void testAlgorithm(byte[] encodedData, ByteBuffer unencodedDataBuf, HFileContext meta = new HFileContextBuilder().withHBaseCheckSum(false) .withIncludesMvcc(includesMemstoreTS).withIncludesTags(includesTags) .withCompression(Compression.Algorithm.NONE).build(); - actualDataset = encoder.decodeKeyValues(dis, encoder.newDataBlockDecodingContext(meta)); + actualDataset = encoder.decodeKeyValues(dis, encoder.newDataBlockDecodingContext(conf, meta)); actualDataset.rewind(); // this is because in case of prefix tree the decoded stream will not have diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestSeekToBlockWithEncoders.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestSeekToBlockWithEncoders.java index 44dfc35e235f..2c40fa394066 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestSeekToBlockWithEncoders.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestSeekToBlockWithEncoders.java @@ -24,9 +24,12 @@ import java.util.ArrayList; import java.util.Collection; import java.util.List; + +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseCommonTestingUtil; +import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.PrivateCellUtil; @@ -54,6 +57,7 @@ public class TestSeekToBlockWithEncoders { static final byte[] HFILEBLOCK_DUMMY_HEADER = new byte[HConstants.HFILEBLOCK_HEADER_SIZE]; private final boolean useOffheapData; + private final Configuration conf = HBaseConfiguration.create(); @Parameters public static Collection parameters() { @@ -283,14 +287,14 @@ private void seekToTheKey(KeyValue expected, List kvs, Cell toSeek) } DataBlockEncoder encoder = encoding.getEncoder(); HFileContext meta = new HFileContextBuilder().withHBaseCheckSum(false) - .withIncludesMvcc(false).withIncludesTags(false) - .withCompression(Compression.Algorithm.NONE).build(); - HFileBlockEncodingContext encodingContext = encoder.newDataBlockEncodingContext(encoding, - HFILEBLOCK_DUMMY_HEADER, meta); + .withIncludesMvcc(false).withIncludesTags(false) + .withCompression(Compression.Algorithm.NONE).build(); + HFileBlockEncodingContext encodingContext = encoder.newDataBlockEncodingContext(conf, + encoding, HFILEBLOCK_DUMMY_HEADER, meta); ByteBuffer encodedBuffer = TestDataBlockEncoders.encodeKeyValues(encoding, kvs, - encodingContext, this.useOffheapData); + encodingContext, this.useOffheapData); DataBlockEncoder.EncodedSeeker seeker = - encoder.createSeeker(encoder.newDataBlockDecodingContext(meta)); + encoder.createSeeker(encoder.newDataBlockDecodingContext(conf, meta)); seeker.setCurrentBuffer(new SingleByteBuff(encodedBuffer)); encodedSeekers.add(seeker); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java index 9d376d5e3a4a..bac0c42b1f2e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java @@ -283,7 +283,7 @@ private void readStoreFile(boolean useTags) throws IOException { .withIncludesTags(useTags).build(); final boolean cacheBlocks = false; final boolean pread = false; - HFileScanner scanner = reader.getScanner(cacheBlocks, pread); + HFileScanner scanner = reader.getScanner(conf, cacheBlocks, pread); assertTrue(testDescription, scanner.seekTo()); long offset = 0; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestChecksum.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestChecksum.java index 9df4149a80f2..22d045dd267a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestChecksum.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestChecksum.java @@ -30,6 +30,7 @@ import java.nio.BufferUnderflowException; import java.nio.ByteBuffer; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; @@ -85,7 +86,7 @@ public void testNewBlocksHaveDefaultChecksum() throws IOException { Path path = new Path(TEST_UTIL.getDataTestDir(), "default_checksum"); FSDataOutputStream os = fs.create(path); HFileContext meta = new HFileContextBuilder().build(); - HFileBlock.Writer hbw = new HFileBlock.Writer(null, meta); + HFileBlock.Writer hbw = new HFileBlock.Writer(TEST_UTIL.getConfiguration(), null, meta); DataOutputStream dos = hbw.startWriting(BlockType.DATA); for (int i = 0; i < 1000; ++i) dos.writeInt(i); @@ -105,7 +106,7 @@ public void testNewBlocksHaveDefaultChecksum() throws IOException { .withFilePath(path) .build(); HFileBlock.FSReader hbr = new HFileBlock.FSReaderImpl(context, - meta, ByteBuffAllocator.HEAP); + meta, ByteBuffAllocator.HEAP, TEST_UTIL.getConfiguration()); HFileBlock b = hbr.readBlockData(0, -1, false, false, true); assertTrue(!b.isSharedMem()); assertEquals(b.getChecksumType(), ChecksumType.getDefaultChecksumType().getCode()); @@ -137,7 +138,7 @@ public void testVerifyCheckSum() throws IOException { HFileContext meta = new HFileContextBuilder() .withChecksumType(ckt) .build(); - HFileBlock.Writer hbw = new HFileBlock.Writer(null, meta); + HFileBlock.Writer hbw = new HFileBlock.Writer(TEST_UTIL.getConfiguration(), null, meta); DataOutputStream dos = hbw.startWriting(BlockType.DATA); for (int i = 0; i < intCount; ++i) { dos.writeInt(i); @@ -158,7 +159,7 @@ public void testVerifyCheckSum() throws IOException { .withFilePath(path) .build(); HFileBlock.FSReader hbr = new HFileBlock.FSReaderImpl(context, - meta, ByteBuffAllocator.HEAP); + meta, ByteBuffAllocator.HEAP, TEST_UTIL.getConfiguration()); HFileBlock b = hbr.readBlockData(0, -1, false, false, true); assertTrue(!b.isSharedMem()); @@ -206,7 +207,7 @@ protected void testChecksumCorruptionInternals(boolean useTags) throws IOExcepti .withIncludesTags(useTags) .withBytesPerCheckSum(HFile.DEFAULT_BYTES_PER_CHECKSUM) .build(); - HFileBlock.Writer hbw = new HFileBlock.Writer(null, meta); + HFileBlock.Writer hbw = new HFileBlock.Writer(TEST_UTIL.getConfiguration(), null, meta); long totalSize = 0; for (int blockId = 0; blockId < 2; ++blockId) { DataOutputStream dos = hbw.startWriting(BlockType.DATA); @@ -234,7 +235,8 @@ protected void testChecksumCorruptionInternals(boolean useTags) throws IOExcepti .withFileSystem(fs) .withFilePath(path) .build(); - HFileBlock.FSReader hbr = new CorruptedFSReaderImpl(context, meta); + HFileBlock.FSReader hbr = new CorruptedFSReaderImpl(context, meta, + TEST_UTIL.getConfiguration()); HFileBlock b = hbr.readBlockData(0, -1, pread, false, true); b.sanityCheck(); assertEquals(4936, b.getUncompressedSizeWithoutHeader()); @@ -276,7 +278,8 @@ protected void testChecksumCorruptionInternals(boolean useTags) throws IOExcepti // Now, use a completely new reader. Switch off hbase checksums in // the configuration. In this case, we should not detect // any retries within hbase. - HFileSystem newfs = new HFileSystem(TEST_UTIL.getConfiguration(), false); + Configuration conf = TEST_UTIL.getConfiguration(); + HFileSystem newfs = new HFileSystem(conf, false); assertEquals(false, newfs.useHBaseChecksum()); is = new FSDataInputStreamWrapper(newfs, path); context = new ReaderContextBuilder() @@ -285,7 +288,7 @@ protected void testChecksumCorruptionInternals(boolean useTags) throws IOExcepti .withFileSystem(newfs) .withFilePath(path) .build(); - hbr = new CorruptedFSReaderImpl(context, meta); + hbr = new CorruptedFSReaderImpl(context, meta, conf); b = hbr.readBlockData(0, -1, pread, false, true); is.close(); b.sanityCheck(); @@ -329,7 +332,7 @@ protected void testChecksumInternals(boolean useTags) throws IOException { .withHBaseCheckSum(true) .withBytesPerCheckSum(bytesPerChecksum) .build(); - HFileBlock.Writer hbw = new HFileBlock.Writer(null, + HFileBlock.Writer hbw = new HFileBlock.Writer(TEST_UTIL.getConfiguration(), null, meta); // write one block. The block has data @@ -373,7 +376,8 @@ protected void testChecksumInternals(boolean useTags) throws IOException { .withFilePath(path) .build(); HFileBlock.FSReader hbr = - new HFileBlock.FSReaderImpl(context, meta, ByteBuffAllocator.HEAP); + new HFileBlock.FSReaderImpl(context, meta, ByteBuffAllocator.HEAP, + TEST_UTIL.getConfiguration()); HFileBlock b = hbr.readBlockData(0, -1, pread, false, true); assertTrue(b.getBufferReadOnly() instanceof SingleByteBuff); is.close(); @@ -413,8 +417,9 @@ static private class CorruptedFSReaderImpl extends HFileBlock.FSReaderImpl { */ boolean corruptDataStream = false; - public CorruptedFSReaderImpl(ReaderContext context, HFileContext meta) throws IOException { - super(context, meta, ByteBuffAllocator.HEAP); + public CorruptedFSReaderImpl(ReaderContext context, HFileContext meta, Configuration conf) + throws IOException { + super(context, meta, ByteBuffAllocator.HEAP, conf); } @Override diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java index 9728fb4d5c24..301894a78688 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java @@ -528,7 +528,7 @@ void basicWithSomeCodec(String codec, boolean useTags) throws IOException { System.out.println(cacheConf.toString()); // Load up the index. // Get a scanner that caches and that does not use pread. - HFileScanner scanner = reader.getScanner(true, false); + HFileScanner scanner = reader.getScanner(conf, true, false); // Align scanner at start of the file. scanner.seekTo(); readAllRecords(scanner); @@ -617,7 +617,7 @@ private void metablocks(final String compress) throws Exception { ReaderContext context = new ReaderContextBuilder().withFileSystemAndPath(fs, mFile).build(); Reader reader = createReaderFromStream(context, cacheConf, conf); // No data -- this should return false. - assertFalse(reader.getScanner(false, false).seekTo()); + assertFalse(reader.getScanner(conf, false, false).seekTo()); someReadingWithMetaBlock(reader); fs.delete(mFile, true); reader.close(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java index 3bbe17d51fd5..4fb0e7b19c5b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java @@ -269,7 +269,7 @@ static HFileBlock.Writer createTestV2Block(Compression.Algorithm algo, .withIncludesTags(includesTag) .withBytesPerCheckSum(HFile.DEFAULT_BYTES_PER_CHECKSUM) .build(); - HFileBlock.Writer hbw = new HFileBlock.Writer(null, meta); + HFileBlock.Writer hbw = new HFileBlock.Writer(TEST_UTIL.getConfiguration(), null, meta); DataOutputStream dos = hbw.startWriting(blockType); writeTestBlockContents(dos); dos.flush(); @@ -351,8 +351,9 @@ private void assertRelease(HFileBlock blk) { } protected void testReaderV2Internals() throws IOException { - if(includesTag) { - TEST_UTIL.getConfiguration().setInt("hfile.format.version", 3); + final Configuration conf = TEST_UTIL.getConfiguration(); + if (includesTag) { + conf.setInt("hfile.format.version", 3); } for (Compression.Algorithm algo : COMPRESSION_ALGORITHMS) { for (boolean pread : new boolean[] { false, true }) { @@ -367,7 +368,7 @@ protected void testReaderV2Internals() throws IOException { .withIncludesTags(includesTag) .withBytesPerCheckSum(HFile.DEFAULT_BYTES_PER_CHECKSUM) .build(); - HFileBlock.Writer hbw = new HFileBlock.Writer(null, + HFileBlock.Writer hbw = new HFileBlock.Writer(conf, null, meta); long totalSize = 0; for (int blockId = 0; blockId < 2; ++blockId) { @@ -391,7 +392,8 @@ protected void testReaderV2Internals() throws IOException { .withFilePath(path) .withFileSystem(fs) .build(); - HFileBlock.FSReader hbr = new HFileBlock.FSReaderImpl(context, meta, alloc); + HFileBlock.FSReader hbr = new HFileBlock.FSReaderImpl(context, meta, alloc, + TEST_UTIL.getConfiguration()); HFileBlock b = hbr.readBlockData(0, -1, pread, false, true); is.close(); assertEquals(0, HFile.getAndResetChecksumFailuresCount()); @@ -410,7 +412,8 @@ protected void testReaderV2Internals() throws IOException { .withFilePath(path) .withFileSystem(fs) .build(); - hbr = new HFileBlock.FSReaderImpl(readerContext, meta, alloc); + hbr = new HFileBlock.FSReaderImpl(readerContext, meta, alloc, + TEST_UTIL.getConfiguration()); b = hbr.readBlockData(0, 2173 + HConstants.HFILEBLOCK_HEADER_SIZE + b.totalChecksumBytes(), pread, false, true); assertEquals(expected, b); @@ -444,8 +447,9 @@ public void testDataBlockEncoding() throws IOException { private void testInternals() throws IOException { final int numBlocks = 5; + final Configuration conf = TEST_UTIL.getConfiguration(); if(includesTag) { - TEST_UTIL.getConfiguration().setInt("hfile.format.version", 3); + conf.setInt("hfile.format.version", 3); } for (Compression.Algorithm algo : COMPRESSION_ALGORITHMS) { for (boolean pread : new boolean[] { false, true }) { @@ -463,7 +467,7 @@ private void testInternals() throws IOException { .withIncludesTags(includesTag) .withBytesPerCheckSum(HFile.DEFAULT_BYTES_PER_CHECKSUM) .build(); - HFileBlock.Writer hbw = new HFileBlock.Writer(dataBlockEncoder, meta); + HFileBlock.Writer hbw = new HFileBlock.Writer(conf, dataBlockEncoder, meta); long totalSize = 0; final List encodedSizes = new ArrayList<>(); final List encodedBlocks = new ArrayList<>(); @@ -500,8 +504,8 @@ private void testInternals() throws IOException { .withFileSystem(fs) .build(); HFileBlock.FSReaderImpl hbr = - new HFileBlock.FSReaderImpl(context, meta, alloc); - hbr.setDataBlockEncoder(dataBlockEncoder); + new HFileBlock.FSReaderImpl(context, meta, alloc, conf); + hbr.setDataBlockEncoder(dataBlockEncoder, conf); hbr.setIncludesMemStoreTS(includesMemstoreTS); HFileBlock blockFromHFile, blockUnpacked; int pos = 0; @@ -609,6 +613,7 @@ public void testPreviousOffset() throws IOException { protected void testPreviousOffsetInternals() throws IOException { // TODO: parameterize these nested loops. + Configuration conf = TEST_UTIL.getConfiguration(); for (Compression.Algorithm algo : COMPRESSION_ALGORITHMS) { for (boolean pread : BOOLEAN_VALUES) { for (boolean cacheOnWrite : BOOLEAN_VALUES) { @@ -620,8 +625,8 @@ protected void testPreviousOffsetInternals() throws IOException { List expectedPrevOffsets = new ArrayList<>(); List expectedTypes = new ArrayList<>(); List expectedContents = cacheOnWrite ? new ArrayList<>() : null; - long totalSize = writeBlocks(rand, algo, path, expectedOffsets, - expectedPrevOffsets, expectedTypes, expectedContents); + long totalSize = writeBlocks(TEST_UTIL.getConfiguration(), rand, algo, path, + expectedOffsets, expectedPrevOffsets, expectedTypes, expectedContents); FSDataInputStream is = fs.open(path); HFileContext meta = new HFileContextBuilder() @@ -635,8 +640,7 @@ protected void testPreviousOffsetInternals() throws IOException { .withFilePath(path) .withFileSystem(fs) .build(); - HFileBlock.FSReader hbr = - new HFileBlock.FSReaderImpl(context, meta, alloc); + HFileBlock.FSReader hbr = new HFileBlock.FSReaderImpl(context, meta, alloc, conf); long curOffset = 0; for (int i = 0; i < NUM_TEST_BLOCKS; ++i) { if (!pread) { @@ -819,12 +823,14 @@ public void testConcurrentReading() throws Exception { protected void testConcurrentReadingInternals() throws IOException, InterruptedException, ExecutionException { + Configuration conf = TEST_UTIL.getConfiguration(); for (Compression.Algorithm compressAlgo : COMPRESSION_ALGORITHMS) { Path path = new Path(TEST_UTIL.getDataTestDir(), "concurrent_reading"); Random rand = defaultRandom(); List offsets = new ArrayList<>(); List types = new ArrayList<>(); - writeBlocks(rand, compressAlgo, path, offsets, null, types, null); + writeBlocks(TEST_UTIL.getConfiguration(), rand, compressAlgo, path, offsets, null, + types, null); FSDataInputStream is = fs.open(path); long fileSize = fs.getFileStatus(path).getLen(); HFileContext meta = new HFileContextBuilder() @@ -839,7 +845,7 @@ protected void testConcurrentReadingInternals() throws IOException, .withFilePath(path) .withFileSystem(fs) .build(); - HFileBlock.FSReader hbr = new HFileBlock.FSReaderImpl(context, meta, alloc); + HFileBlock.FSReader hbr = new HFileBlock.FSReaderImpl(context, meta, alloc, conf); Executor exec = Executors.newFixedThreadPool(NUM_READER_THREADS); ExecutorCompletionService ecs = new ExecutorCompletionService<>(exec); @@ -862,7 +868,7 @@ protected void testConcurrentReadingInternals() throws IOException, } } - private long writeBlocks(Random rand, Compression.Algorithm compressAlgo, + private long writeBlocks(Configuration conf, Random rand, Compression.Algorithm compressAlgo, Path path, List expectedOffsets, List expectedPrevOffsets, List expectedTypes, List expectedContents ) throws IOException { @@ -875,7 +881,7 @@ private long writeBlocks(Random rand, Compression.Algorithm compressAlgo, .withCompression(compressAlgo) .withBytesPerCheckSum(HFile.DEFAULT_BYTES_PER_CHECKSUM) .build(); - HFileBlock.Writer hbw = new HFileBlock.Writer(null, meta); + HFileBlock.Writer hbw = new HFileBlock.Writer(conf, null, meta); Map prevOffsetByType = new HashMap<>(); long totalSize = 0; for (int i = 0; i < NUM_TEST_BLOCKS; ++i) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java index a66b1a3b49aa..08165bbdf442 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java @@ -213,7 +213,7 @@ private void readIndex(boolean useTags) throws IOException { .build(); ReaderContext context = new ReaderContextBuilder().withFileSystemAndPath(fs, path).build(); HFileBlock.FSReader blockReader = new HFileBlock.FSReaderImpl(context, meta, - ByteBuffAllocator.HEAP); + ByteBuffAllocator.HEAP, conf); BlockReaderWrapper brw = new BlockReaderWrapper(blockReader); HFileBlockIndex.BlockIndexReader indexReader = @@ -270,7 +270,7 @@ private void writeWholeIndex(boolean useTags) throws IOException { .withCompression(compr) .withBytesPerCheckSum(HFile.DEFAULT_BYTES_PER_CHECKSUM) .build(); - HFileBlock.Writer hbw = new HFileBlock.Writer(null, + HFileBlock.Writer hbw = new HFileBlock.Writer(TEST_UTIL.getConfiguration(), null, meta); FSDataOutputStream outputStream = fs.create(path); HFileBlockIndex.BlockIndexWriter biw = @@ -650,7 +650,7 @@ public void testHFileWriterAndReader() throws IOException { LOG.info("Last key: " + Bytes.toStringBinary(keys[NUM_KV - 1])); for (boolean pread : new boolean[] { false, true }) { - HFileScanner scanner = reader.getScanner(true, pread); + HFileScanner scanner = reader.getScanner(conf, true, pread); for (int i = 0; i < NUM_KV; ++i) { checkSeekTo(keys, scanner, i); checkKeyValue("i=" + i, keys[i], values[i], @@ -779,7 +779,7 @@ public void testIntermediateLevelIndicesWithLargeKeys(int minNumEntries) throws HFile.Reader reader = HFile.createReader(fs, hfPath, cacheConf, true, conf); // Scanner doesn't do Cells yet. Fix. - HFileScanner scanner = reader.getScanner(true, true); + HFileScanner scanner = reader.getScanner(conf, true, true); for (int i = 0; i < keys.size(); ++i) { scanner.seekTo(ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY) .setRow(keys.get(i)).setFamily(HConstants.EMPTY_BYTE_ARRAY) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileDataBlockEncoder.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileDataBlockEncoder.java index ec2af490c345..d428acf6f01f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileDataBlockEncoder.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileDataBlockEncoder.java @@ -27,8 +27,11 @@ import java.util.ArrayList; import java.util.Collection; import java.util.List; + +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.io.ByteArrayOutputStream; @@ -58,8 +61,9 @@ public class TestHFileDataBlockEncoder { public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestHFileDataBlockEncoder.class); + private final Configuration conf = HBaseConfiguration.create(); + private final RedundantKVGenerator generator = new RedundantKVGenerator(); private HFileDataBlockEncoder blockEncoder; - private RedundantKVGenerator generator = new RedundantKVGenerator(); private boolean includesMemstoreTS; /** @@ -87,7 +91,7 @@ public void testEncodingWithCache() throws IOException { private void testEncodingWithCacheInternals(boolean useTag) throws IOException { List kvs = generator.generateTestKeyValues(60, useTag); HFileBlock block = getSampleHFileBlock(kvs, useTag); - HFileBlock cacheBlock = createBlockOnDisk(kvs, block, useTag); + HFileBlock cacheBlock = createBlockOnDisk(conf, kvs, block, useTag); LruBlockCache blockCache = new LruBlockCache(8 * 1024 * 1024, 32 * 1024); @@ -135,7 +139,7 @@ private void testHeaderSizeInCacheWithoutChecksumInternals(boolean useTags) thro .build(); HFileBlock block = new HFileBlock(BlockType.DATA, size, size, -1, ByteBuff.wrap(buf), HFileBlock.FILL_HEADER, 0, 0, -1, hfileContext, ByteBuffAllocator.HEAP); - HFileBlock cacheBlock = createBlockOnDisk(kvs, block, useTags); + HFileBlock cacheBlock = createBlockOnDisk(conf, kvs, block, useTags); assertEquals(headerSize, cacheBlock.getDummyHeaderForVersion().length); } @@ -162,7 +166,7 @@ public void testEncodingWithOffheapKeyValue() throws IOException { HFileContext meta = new HFileContextBuilder().withIncludesMvcc(includesMemstoreTS) .withIncludesTags(true).withHBaseCheckSum(true).withCompression(Algorithm.NONE) .withBlockSize(0).withChecksumType(ChecksumType.NULL).build(); - writeBlock(kvs, meta, true); + writeBlock(conf, kvs, meta, true); } catch (IllegalArgumentException e) { fail("No exception should have been thrown"); } @@ -172,7 +176,7 @@ private void testEncodingInternals(boolean useTag) throws IOException { // usually we have just block without headers, but don't complicate that List kvs = generator.generateTestKeyValues(60, useTag); HFileBlock block = getSampleHFileBlock(kvs, useTag); - HFileBlock blockOnDisk = createBlockOnDisk(kvs, block, useTag); + HFileBlock blockOnDisk = createBlockOnDisk(conf, kvs, block, useTag); if (blockEncoder.getDataBlockEncoding() != DataBlockEncoding.NONE) { @@ -204,10 +208,10 @@ private HFileBlock getSampleHFileBlock(List kvs, boolean useTag) { return b; } - private HFileBlock createBlockOnDisk(List kvs, HFileBlock block, boolean useTags) - throws IOException { + private HFileBlock createBlockOnDisk(Configuration conf, List kvs, HFileBlock block, + boolean useTags) throws IOException { int size; - HFileBlockEncodingContext context = new HFileBlockDefaultEncodingContext( + HFileBlockEncodingContext context = new HFileBlockDefaultEncodingContext(conf, blockEncoder.getDataBlockEncoding(), HConstants.HFILEBLOCK_DUMMY_HEADER, block.getHFileContext()); @@ -226,9 +230,9 @@ private HFileBlock createBlockOnDisk(List kvs, HFileBlock block, boole block.getOnDiskDataSizeWithHeader(), -1, block.getHFileContext(), ByteBuffAllocator.HEAP); } - private void writeBlock(List kvs, HFileContext fileContext, boolean useTags) - throws IOException { - HFileBlockEncodingContext context = new HFileBlockDefaultEncodingContext( + private void writeBlock(Configuration conf, List kvs, HFileContext fileContext, + boolean useTags) throws IOException { + HFileBlockEncodingContext context = new HFileBlockDefaultEncodingContext(conf, blockEncoder.getDataBlockEncoding(), HConstants.HFILEBLOCK_DUMMY_HEADER, fileContext); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileEncryption.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileEncryption.java index a6dffbcdd7cc..6b71f495e0c7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileEncryption.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileEncryption.java @@ -94,9 +94,9 @@ public static void setUp() throws Exception { cryptoContext.setKey(key); } - private int writeBlock(FSDataOutputStream os, HFileContext fileContext, int size) - throws IOException { - HFileBlock.Writer hbw = new HFileBlock.Writer(null, fileContext); + private int writeBlock(Configuration conf, FSDataOutputStream os, HFileContext fileContext, + int size) throws IOException { + HFileBlock.Writer hbw = new HFileBlock.Writer(conf, null, fileContext); DataOutputStream dos = hbw.startWriting(BlockType.DATA); for (int j = 0; j < size; j++) { dos.writeInt(j); @@ -149,7 +149,7 @@ public void testDataBlockEncryption() throws IOException { FSDataOutputStream os = fs.create(path); try { for (int i = 0; i < blocks; i++) { - totalSize += writeBlock(os, fileContext, blockSizes[i]); + totalSize += writeBlock(TEST_UTIL.getConfiguration(), os, fileContext, blockSizes[i]); } } finally { os.close(); @@ -162,7 +162,7 @@ public void testDataBlockEncryption() throws IOException { .withFileSize(totalSize).build(); try { HFileBlock.FSReaderImpl hbr = new HFileBlock.FSReaderImpl(context, fileContext, - ByteBuffAllocator.HEAP); + ByteBuffAllocator.HEAP, TEST_UTIL.getConfiguration()); long pos = 0; for (int i = 0; i < blocks; i++) { pos += readAndVerifyBlock(pos, fileContext, hbr, blockSizes[i]); @@ -254,7 +254,7 @@ public void testHFileEncryption() throws Exception { try { FixedFileTrailer trailer = reader.getTrailer(); assertNotNull(trailer.getEncryptionKey()); - scanner = reader.getScanner(false, false); + scanner = reader.getScanner(conf, false, false); assertTrue("Initial seekTo failed", scanner.seekTo()); do { Cell kv = scanner.getCell(); @@ -273,7 +273,7 @@ public void testHFileEncryption() throws Exception { LOG.info("Random seeking with " + fileContext); reader = HFile.createReader(fs, path, cacheConf, true, conf); try { - scanner = reader.getScanner(false, true); + scanner = reader.getScanner(conf, false, true); assertTrue("Initial seekTo failed", scanner.seekTo()); for (i = 0; i < 100; i++) { KeyValue kv = testKvs.get(RNG.nextInt(testKvs.size())); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileInlineToRootChunkConversion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileInlineToRootChunkConversion.java index 4f9416fd84d6..3adc57a625d9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileInlineToRootChunkConversion.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileInlineToRootChunkConversion.java @@ -95,7 +95,7 @@ public void testWriteHFile() throws Exception { HFile.Reader reader = HFile.createReader(fs, hfPath, cacheConf, true, conf); // Scanner doesn't do Cells yet. Fix. - HFileScanner scanner = reader.getScanner(true, true); + HFileScanner scanner = reader.getScanner(conf, true, true); for (int i = 0; i < keys.size(); ++i) { scanner.seekTo(ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY) .setRow(keys.get(i)) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileReaderImpl.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileReaderImpl.java index f245fc8c57a6..0809ca8be543 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileReaderImpl.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileReaderImpl.java @@ -92,7 +92,7 @@ public void testSeekBefore() throws Exception { HFile.Reader reader = HFile.createReader(fs, p, new CacheConfig(conf, bucketcache), true, conf); // warm cache - HFileScanner scanner = reader.getScanner(true, true); + HFileScanner scanner = reader.getScanner(conf, true, true); scanner.seekTo(toKV("i")); assertEquals("i", toRowStr(scanner.getCell())); scanner.close(); @@ -102,7 +102,7 @@ public void testSeekBefore() throws Exception { } // reopen again. - scanner = reader.getScanner(true, true); + scanner = reader.getScanner(conf, true, true); scanner.seekTo(toKV("i")); assertEquals("i", toRowStr(scanner.getCell())); scanner.seekBefore(toKV("i")); @@ -117,7 +117,7 @@ public void testSeekBefore() throws Exception { } // case 2 - scanner = reader.getScanner(true, true); + scanner = reader.getScanner(conf, true, true); scanner.seekTo(toKV("i")); assertEquals("i", toRowStr(scanner.getCell())); scanner.seekBefore(toKV("c")); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileScannerImplReferenceCount.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileScannerImplReferenceCount.java index 4fd52fd3316b..1243d466ca38 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileScannerImplReferenceCount.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileScannerImplReferenceCount.java @@ -201,7 +201,7 @@ private void testReleaseBlock(Algorithm compression, DataBlockEncoding encoding) // We've build a HFile tree with index = 16. Assert.assertEquals(16, reader.getTrailer().getNumDataIndexLevels()); - HFileScannerImpl scanner = (HFileScannerImpl) reader.getScanner(true, true, false); + HFileScannerImpl scanner = (HFileScannerImpl) reader.getScanner(conf, true, true, false); HFileBlock block1 = reader.getDataBlockIndexReader() .loadDataBlockWithScanInfo(firstCell, null, true, true, false, DataBlockEncoding.NONE, reader).getHFileBlock(); @@ -285,7 +285,7 @@ public void testSeekBefore() throws Exception { // We've build a HFile tree with index = 16. Assert.assertEquals(16, reader.getTrailer().getNumDataIndexLevels()); - HFileScannerImpl scanner = (HFileScannerImpl) reader.getScanner(true, true, false); + HFileScannerImpl scanner = (HFileScannerImpl) reader.getScanner(conf, true, true, false); HFileBlock block1 = reader.getDataBlockIndexReader() .loadDataBlockWithScanInfo(firstCell, null, true, true, false, DataBlockEncoding.NONE, reader).getHFileBlock(); @@ -415,7 +415,7 @@ public void testWithLruBlockCache() throws Exception { // We've build a HFile tree with index = 16. Assert.assertEquals(16, reader.getTrailer().getNumDataIndexLevels()); - HFileScannerImpl scanner = (HFileScannerImpl) reader.getScanner(true, true, false); + HFileScannerImpl scanner = (HFileScannerImpl) reader.getScanner(conf, true, true, false); HFileBlock block1 = reader.getDataBlockIndexReader() .loadDataBlockWithScanInfo(firstCell, null, true, true, false, DataBlockEncoding.NONE, reader).getHFileBlock(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileSeek.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileSeek.java index ffc515ec90fe..bbaf8cc79d60 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileSeek.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileSeek.java @@ -190,7 +190,7 @@ public void seekTFile() throws IOException { Reader reader = TestHFile.createReaderFromStream(context, new CacheConfig(conf), conf); KeySampler kSampler = new KeySampler(rng, ((KeyValue) reader.getFirstKey().get()).getKey(), ((KeyValue) reader.getLastKey().get()).getKey(), keyLenGen); - HFileScanner scanner = reader.getScanner(false, USE_PREAD); + HFileScanner scanner = reader.getScanner(conf, false, USE_PREAD); BytesWritable key = new BytesWritable(); timer.reset(); timer.start(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV3.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV3.java index 53d4fe6281f0..d14ba00a532c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV3.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV3.java @@ -190,7 +190,7 @@ private void writeDataAndReadFromHFile(Path hfilePath, .withFileSystem(fs) .withFileSize(fileSize).build(); HFileBlock.FSReader blockReader = - new HFileBlock.FSReaderImpl(readerContext, meta, ByteBuffAllocator.HEAP); + new HFileBlock.FSReaderImpl(readerContext, meta, ByteBuffAllocator.HEAP, conf); // Comparator class name is stored in the trailer in version 3. CellComparator comparator = trailer.createComparator(); HFileBlockIndex.BlockIndexReader dataBlockIndexReader = diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV3WithDataEncoders.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV3WithDataEncoders.java index 710e92df2fb9..cbf615e76f97 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV3WithDataEncoders.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV3WithDataEncoders.java @@ -176,7 +176,7 @@ private void writeDataAndReadFromHFile(Path hfilePath, .withFileSystem(fs) .withFileSize(fileSize).build(); HFileBlock.FSReader blockReader = - new HFileBlock.FSReaderImpl(readerContext, meta, ByteBuffAllocator.HEAP); + new HFileBlock.FSReaderImpl(readerContext, meta, ByteBuffAllocator.HEAP, conf); // Comparator class name is stored in the trailer in version 3. CellComparator comparator = trailer.createComparator(); HFileBlockIndex.BlockIndexReader dataBlockIndexReader = @@ -277,7 +277,7 @@ private long scanBlocks(int entryCount, HFileContext context, List key origBlock.limit(pos + block.getUncompressedSizeWithoutHeader() - DataBlockEncoding.ID_SIZE); ByteBuff buf = origBlock.slice(); DataBlockEncoder.EncodedSeeker seeker = - encoder.createSeeker(encoder.newDataBlockDecodingContext(meta)); + encoder.createSeeker(encoder.newDataBlockDecodingContext(conf, meta)); seeker.setCurrentBuffer(buf); Cell res = seeker.getCell(); KeyValue kv = keyValues.get(entriesRead); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestReseekTo.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestReseekTo.java index 46db1b27d3aa..4365bab14a69 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestReseekTo.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestReseekTo.java @@ -112,7 +112,7 @@ private void testReseekToInternals(TagUsage tagUsage) throws IOException { HFile.Reader reader = HFile.createReader(TEST_UTIL.getTestFileSystem(), ncTFile, cacheConf, true, TEST_UTIL.getConfiguration()); - HFileScanner scanner = reader.getScanner(false, true); + HFileScanner scanner = reader.getScanner(TEST_UTIL.getConfiguration(), false, true); scanner.seekTo(); for (int i = 0; i < keyList.size(); i++) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekBeforeWithInlineBlocks.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekBeforeWithInlineBlocks.java index ef807d2b117d..5c78470e6934 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekBeforeWithInlineBlocks.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekBeforeWithInlineBlocks.java @@ -150,7 +150,7 @@ public void testMultiIndexLevelRandomHFileWithBlooms() throws IOException { // Check that we can seekBefore in either direction and with both pread // enabled and disabled for (boolean pread : new boolean[] { false, true }) { - HFileScanner scanner = reader.getScanner(true, pread); + HFileScanner scanner = reader.getScanner(conf, true, pread); checkNoSeekBefore(cells, scanner, 0); for (int i = 1; i < NUM_KV; i++) { checkSeekBefore(cells, scanner, i); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java index 08479c3cc5b4..ffe28d78bf60 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java @@ -148,7 +148,7 @@ protected void testSeekBeforeInternals(TagUsage tagUsage) throws IOException { FileSystem fs = TEST_UTIL.getTestFileSystem(); Configuration conf = TEST_UTIL.getConfiguration(); HFile.Reader reader = HFile.createReader(fs, p, new CacheConfig(conf), true, conf); - HFileScanner scanner = reader.getScanner(false, true); + HFileScanner scanner = reader.getScanner(conf, false, true); assertFalse(scanner.seekBefore(toKV("a", tagUsage))); assertFalse(scanner.seekBefore(toKV("c", tagUsage))); @@ -206,7 +206,7 @@ protected void testSeekBeforeWithReSeekToInternals(TagUsage tagUsage) throws IOE FileSystem fs = TEST_UTIL.getTestFileSystem(); Configuration conf = TEST_UTIL.getConfiguration(); HFile.Reader reader = HFile.createReader(fs, p, new CacheConfig(conf), true, conf); - HFileScanner scanner = reader.getScanner(false, true); + HFileScanner scanner = reader.getScanner(conf, false, true); assertFalse(scanner.seekBefore(toKV("a", tagUsage))); assertFalse(scanner.seekBefore(toKV("b", tagUsage))); assertFalse(scanner.seekBefore(toKV("c", tagUsage))); @@ -300,7 +300,7 @@ protected void testSeekToInternals(TagUsage tagUsage) throws IOException { Configuration conf = TEST_UTIL.getConfiguration(); HFile.Reader reader = HFile.createReader(fs, p, new CacheConfig(conf), true, conf); assertEquals(2, reader.getDataBlockIndexReader().getRootBlockCount()); - HFileScanner scanner = reader.getScanner(false, true); + HFileScanner scanner = reader.getScanner(conf, false, true); // lies before the start of the file. assertEquals(-1, scanner.seekTo(toKV("a", tagUsage))); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/AssignmentTestingUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/AssignmentTestingUtil.java index cd13905855c5..4d7a4830c41e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/AssignmentTestingUtil.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/AssignmentTestingUtil.java @@ -159,6 +159,11 @@ public static boolean waitForAssignment(AssignmentManager am, RegionInfo regionI public static void insertData(final HBaseTestingUtil UTIL, final TableName tableName, int rowCount, int startRowNum, String... cfs) throws IOException { + insertData(UTIL, tableName, rowCount, startRowNum, false, cfs); + } + + public static void insertData(final HBaseTestingUtil UTIL, final TableName tableName, + int rowCount, int startRowNum, boolean flushOnce, String... cfs) throws IOException { Table t = UTIL.getConnection().getTable(tableName); Put p; for (int i = 0; i < rowCount / 2; i++) { @@ -172,9 +177,12 @@ public static void insertData(final HBaseTestingUtil UTIL, final TableName table p.addColumn(Bytes.toBytes(cf), Bytes.toBytes("q"), Bytes.toBytes(i)); } t.put(p); - if (i % 5 == 0) { + if (i % 5 == 0 && !flushOnce) { UTIL.getAdmin().flush(tableName); } } + if (flushOnce) { + UTIL.getAdmin().flush(tableName); + } } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionSplit.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionSplit.java index 62f966a43341..105f72f006e8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionSplit.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionSplit.java @@ -19,7 +19,9 @@ import static org.apache.hadoop.hbase.master.assignment.AssignmentTestingUtil.insertData; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; + import java.util.List; import java.util.Map; import org.apache.hadoop.conf.Configuration; @@ -39,6 +41,7 @@ import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.JVMClusterUtil; + import org.junit.After; import org.junit.AfterClass; import org.junit.Before; @@ -157,6 +160,54 @@ public void testSplitTableRegion() throws Exception { regionInfoMap.get(tableRegions.get(1).getRegionInfo())); } + @Test + public void testSplitStoreFiles() throws Exception { + final TableName tableName = TableName.valueOf(name.getMethodName()); + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + + RegionInfo[] regions = MasterProcedureTestingUtility.createTable(procExec, tableName, + null, columnFamilyName); + // flush the memstore + insertData(UTIL, tableName, rowCount, startRowNum, true, columnFamilyName); + + // assert the hfile count of the table + int storeFilesCountSum = 0; + for(HRegion region : UTIL.getHBaseCluster().getRegions(tableName)){ + storeFilesCountSum += region.getStore(Bytes.toBytes(columnFamilyName)).getStorefiles().size(); + } + assertEquals(1, storeFilesCountSum); + + // split at the start row + byte[] splitKey = Bytes.toBytes("" + startRowNum); + + assertNotNull("Not able to find a splittable region", regions); + assertEquals("Not able to find a splittable region", 1, regions.length); + + // Split region of the table + long procId = procExec.submitProcedure( + new SplitTableRegionProcedure(procExec.getEnvironment(), regions[0], splitKey)); + // Wait the completion + ProcedureTestingUtility.waitProcedure(procExec, procId); + ProcedureTestingUtility.assertProcNotFailed(procExec, procId); + + assertEquals("Not able to split table", + 2, UTIL.getHBaseCluster().getRegions(tableName).size()); + + // assert sum of the hfiles of all regions + int childStoreFilesSum = 0; + for(HRegion region : UTIL.getHBaseCluster().getRegions(tableName)){ + childStoreFilesSum += region.getStore(Bytes.toBytes(columnFamilyName)).getStorefiles().size(); + } + assertEquals(1, childStoreFilesSum); + + List tableRegions = UTIL.getHBaseCluster().getRegions(tableName); + assertEquals("Table region not correct.", 2, tableRegions.size()); + Map regionInfoMap = UTIL.getHBaseCluster().getMaster() + .getAssignmentManager().getRegionStates().getRegionAssignments(); + assertEquals(regionInfoMap.get(tableRegions.get(0).getRegionInfo()), + regionInfoMap.get(tableRegions.get(1).getRegionInfo())); + } + private ProcedureExecutor getMasterProcedureExecutor() { return UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticBalancerJmxMetrics.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticBalancerJmxMetrics.java index aec763cb8ef4..727fcb5e2a8a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticBalancerJmxMetrics.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticBalancerJmxMetrics.java @@ -94,7 +94,6 @@ public static void setupBeforeClass() throws Exception { conf = UTIL.getConfiguration(); conf.setClass("hbase.util.ip.to.rack.determiner", MockMapping.class, DNSToSwitchMapping.class); - conf.setFloat("hbase.master.balancer.stochastic.maxMovePercent", 0.75f); conf.setFloat("hbase.regions.slop", 0.0f); conf.set(CoprocessorHost.REGIONSERVER_COPROCESSOR_CONF_KEY, JMXListener.class.getName()); Random rand = new Random(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestCleanerChore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestCleanerChore.java index 0f75030cf836..c3e7741aa2ec 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestCleanerChore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestCleanerChore.java @@ -61,7 +61,7 @@ public class TestCleanerChore { @BeforeClass public static void setup() { - POOL = new DirScanPool(UTIL.getConfiguration()); + POOL = DirScanPool.getHFileCleanerScanPool(UTIL.getConfiguration()); } @AfterClass @@ -469,6 +469,57 @@ public void testOnConfigurationChange() throws Exception { t.join(); } + @Test + public void testOnConfigurationChangeLogCleaner() throws Exception { + int availableProcessorNum = Runtime.getRuntime().availableProcessors(); + if (availableProcessorNum == 1) { // no need to run this test + return; + } + + DirScanPool pool = DirScanPool.getLogCleanerScanPool(UTIL.getConfiguration()); + + // have at least 2 available processors/cores + int initPoolSize = availableProcessorNum / 2; + int changedPoolSize = availableProcessorNum; + + Stoppable stop = new StoppableImplementation(); + Configuration conf = UTIL.getConfiguration(); + Path testDir = UTIL.getDataTestDir(); + FileSystem fs = UTIL.getTestFileSystem(); + String confKey = "hbase.test.cleaner.delegates"; + conf.set(confKey, AlwaysDelete.class.getName()); + conf.set(CleanerChore.LOG_CLEANER_CHORE_SIZE, String.valueOf(initPoolSize)); + final AllValidPaths chore = + new AllValidPaths("test-file-cleaner", stop, conf, fs, testDir, confKey, pool); + chore.setEnabled(true); + // Create subdirs under testDir + int dirNums = 6; + Path[] subdirs = new Path[dirNums]; + for (int i = 0; i < dirNums; i++) { + subdirs[i] = new Path(testDir, "subdir-" + i); + fs.mkdirs(subdirs[i]); + } + // Under each subdirs create 6 files + for (Path subdir : subdirs) { + createFiles(fs, subdir, 6); + } + // Start chore + Thread t = new Thread(new Runnable() { + @Override + public void run() { + chore.chore(); + } + }); + t.setDaemon(true); + t.start(); + // Change size of chore's pool + conf.set(CleanerChore.LOG_CLEANER_CHORE_SIZE, String.valueOf(changedPoolSize)); + pool.onConfigurationChange(conf); + assertEquals(changedPoolSize, chore.getChorePoolSize()); + // Stop chore + t.join(); + } + @Test public void testMinimumNumberOfThreads() throws Exception { Configuration conf = UTIL.getConfiguration(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java index 6d08a5045bd4..0408ad1b1685 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java @@ -71,7 +71,7 @@ public class TestHFileCleaner { public static void setupCluster() throws Exception { // have to use a minidfs cluster because the localfs doesn't modify file times correctly UTIL.startMiniDFSCluster(1); - POOL = new DirScanPool(UTIL.getConfiguration()); + POOL = DirScanPool.getHFileCleanerScanPool(UTIL.getConfiguration()); } @AfterClass diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileLinkCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileLinkCleaner.java index 5f92e34462ce..32ffaeca2372 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileLinkCleaner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileLinkCleaner.java @@ -67,7 +67,7 @@ public class TestHFileLinkCleaner { @BeforeClass public static void setUp() { - POOL = new DirScanPool(TEST_UTIL.getConfiguration()); + POOL = DirScanPool.getHFileCleanerScanPool(TEST_UTIL.getConfiguration()); } @AfterClass diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java index d00aec849d8d..e924e6449d3f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java @@ -94,7 +94,7 @@ public class TestLogsCleaner { public static void setUpBeforeClass() throws Exception { TEST_UTIL.startMiniZKCluster(); TEST_UTIL.startMiniDFSCluster(1); - POOL = new DirScanPool(TEST_UTIL.getConfiguration()); + POOL = DirScanPool.getLogCleanerScanPool(TEST_UTIL.getConfiguration()); } @AfterClass diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/MasterRegionTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/MasterRegionTestBase.java index ea6d9d093b19..721e4d1b189c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/MasterRegionTestBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/MasterRegionTestBase.java @@ -48,7 +48,9 @@ public class MasterRegionTestBase { protected ChoreService choreService; - protected DirScanPool cleanerPool; + protected DirScanPool hfileCleanerPool; + + protected DirScanPool logCleanerPool; protected static byte[] CF1 = Bytes.toBytes("f1"); @@ -80,7 +82,8 @@ public void setUp() throws IOException { htu.getConfiguration().setBoolean(CommonFSUtils.UNSAFE_STREAM_CAPABILITY_ENFORCE, false); configure(htu.getConfiguration()); choreService = new ChoreService(getClass().getSimpleName()); - cleanerPool = new DirScanPool(htu.getConfiguration()); + hfileCleanerPool = DirScanPool.getHFileCleanerScanPool(htu.getConfiguration()); + logCleanerPool = DirScanPool.getLogCleanerScanPool(htu.getConfiguration()); Server server = mock(Server.class); when(server.getConfiguration()).thenReturn(htu.getConfiguration()); when(server.getServerName()) @@ -103,7 +106,8 @@ public void setUp() throws IOException { @After public void tearDown() throws IOException { region.close(true); - cleanerPool.shutdownNow(); + hfileCleanerPool.shutdownNow(); + logCleanerPool.shutdownNow(); choreService.shutdown(); htu.cleanupTestDir(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/TestMasterRegionCompaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/TestMasterRegionCompaction.java index 713fc3096f77..6759903608e8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/TestMasterRegionCompaction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/TestMasterRegionCompaction.java @@ -77,7 +77,7 @@ public void stop(String why) { public boolean isStopped() { return stopped; } - }, conf, fs, globalArchivePath, cleanerPool); + }, conf, fs, globalArchivePath, hfileCleanerPool); choreService.scheduleChore(hfileCleaner); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/TestMasterRegionWALCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/TestMasterRegionWALCleaner.java index 39497b07e52f..f936e9e4f592 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/TestMasterRegionWALCleaner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/TestMasterRegionWALCleaner.java @@ -72,7 +72,7 @@ public void stop(String why) { public boolean isStopped() { return stopped; } - }, conf, fs, globalWALArchiveDir, cleanerPool, null); + }, conf, fs, globalWALArchiveDir, logCleanerPool, null); choreService.scheduleChore(logCleaner); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.java index a8efa16047da..01f40be93f51 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.java @@ -119,6 +119,7 @@ public class DataBlockEncodingTool { private static int benchmarkNTimes = DEFAULT_BENCHMARK_N_TIMES; private static int benchmarkNOmit = DEFAULT_BENCHMARK_N_OMIT; + private final Configuration conf; private List codecs = new ArrayList<>(); private long totalPrefixLength = 0; private long totalKeyLength = 0; @@ -157,7 +158,8 @@ public String toString() { * @param compressionAlgorithmName What kind of algorithm should be used * as baseline for comparison (e.g. lzo, gz). */ - public DataBlockEncodingTool(String compressionAlgorithmName) { + public DataBlockEncodingTool(Configuration conf, String compressionAlgorithmName) { + this.conf = conf; this.compressionAlgorithmName = compressionAlgorithmName; this.compressionAlgorithm = Compression.getCompressionAlgorithmByName( compressionAlgorithmName); @@ -242,7 +244,7 @@ public void checkStatistics(final KeyValueScanner scanner, final int kvLimit) .withCompression(Compression.Algorithm.NONE) .withIncludesMvcc(includesMemstoreTS) .withIncludesTags(USE_TAG).build(); - codecs.add(new EncodedDataBlock(d, encoding, rawKVs, meta )); + codecs.add(new EncodedDataBlock(conf, d, encoding, rawKVs, meta)); } } @@ -619,7 +621,7 @@ public static void testCodecs(Configuration conf, int kvLimit, false, hsf.getMaxMemStoreTS(), 0, false); USE_TAG = reader.getHFileReader().getFileContext().isIncludesTags(); // run the utilities - DataBlockEncodingTool comp = new DataBlockEncodingTool(compressionName); + DataBlockEncodingTool comp = new DataBlockEncodingTool(conf, compressionName); int majorVersion = reader.getHFileVersion(); comp.useHBaseChecksum = majorVersion > 2 || (majorVersion == 2 && diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java index cd83dc8c2494..54b0d182d54c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java @@ -231,7 +231,7 @@ private void readStoreFile(Path path) throws IOException { HFile.Reader reader = sf.getReader().getHFileReader(); try { // Open a scanner with (on read) caching disabled - HFileScanner scanner = reader.getScanner(false, false); + HFileScanner scanner = reader.getScanner(conf, false, false); assertTrue(testDescription, scanner.seekTo()); // Cribbed from io.hfile.TestCacheOnWrite long offset = 0; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java index b64f52192740..d08a1d09f98b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java @@ -96,6 +96,7 @@ import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder; import org.apache.hadoop.hbase.monitoring.MonitoredTask; import org.apache.hadoop.hbase.quotas.RegionSizeStoreImpl; +import org.apache.hadoop.hbase.regionserver.MemStoreCompactionStrategy.Action; import org.apache.hadoop.hbase.regionserver.compactions.CompactionConfiguration; import org.apache.hadoop.hbase.regionserver.compactions.DefaultCompactor; import org.apache.hadoop.hbase.regionserver.querymatcher.ScanQueryMatcher; @@ -238,6 +239,7 @@ private HStore init(String methodName, Configuration conf, TableDescriptorBuilde } else { store = new MyStore(region, hcd, conf, hook, switchToPread); } + region.stores.put(store.getColumnFamilyDescriptor().getName(), store); return store; } @@ -1790,14 +1792,16 @@ public void testCompactingMemStoreNoCellButDataSizeExceedsInmemoryFlushSize() // InmemoryFlushSize @Test(timeout = 60000) public void testCompactingMemStoreCellExceedInmemoryFlushSize() - throws IOException, InterruptedException { + throws Exception { Configuration conf = HBaseConfiguration.create(); - conf.set(HStore.MEMSTORE_CLASS_NAME, CompactingMemStore.class.getName()); + conf.set(HStore.MEMSTORE_CLASS_NAME, MyCompactingMemStore6.class.getName()); init(name.getMethodName(), conf, ColumnFamilyDescriptorBuilder.newBuilder(family) .setInMemoryCompaction(MemoryCompactionPolicy.BASIC).build()); - int size = (int) ((CompactingMemStore) store.memstore).getInmemoryFlushSize(); + MyCompactingMemStore6 myCompactingMemStore = ((MyCompactingMemStore6) store.memstore); + + int size = (int) (myCompactingMemStore.getInmemoryFlushSize()); byte[] value = new byte[size + 1]; MemStoreSizing memStoreSizing = new NonThreadSafeMemStoreSizing(); @@ -1808,6 +1812,8 @@ public void testCompactingMemStoreCellExceedInmemoryFlushSize() store.add(cell, memStoreSizing); assertTrue(memStoreSizing.getCellsCount() == 1); assertTrue(memStoreSizing.getDataSize() == cellByteSize); + // Waiting the in memory compaction completed, see HBASE-26438 + myCompactingMemStore.inMemoryCompactionEndCyclicBarrier.await(); } // This test is for HBASE-26210 also, test write large cell and small cell concurrently when @@ -1921,6 +1927,248 @@ private void doWriteTestLargeCellAndSmallCellConcurrently( } } + /** + *
+   * This test is for HBASE-26384,
+   * test {@link CompactingMemStore#flattenOneSegment} and {@link CompactingMemStore#snapshot()}
+   * execute concurrently.
+   * The threads sequence before HBASE-26384 is(The bug only exists for branch-2,and I add UTs
+   * for both branch-2 and master):
+   * 1. The {@link CompactingMemStore} size exceeds
+   *    {@link CompactingMemStore#getInmemoryFlushSize()},the write thread adds a new
+   *    {@link ImmutableSegment}  to the head of {@link CompactingMemStore#pipeline},and start a
+   *    in memory compact thread to execute {@link CompactingMemStore#inMemoryCompaction}.
+   * 2. The in memory compact thread starts and then stopping before
+   *    {@link CompactingMemStore#flattenOneSegment}.
+   * 3. The snapshot thread starts {@link CompactingMemStore#snapshot} concurrently,after the
+   *    snapshot thread executing {@link CompactingMemStore#getImmutableSegments},the in memory
+   *    compact thread continues.
+   *    Assuming {@link VersionedSegmentsList#version} returned from
+   *    {@link CompactingMemStore#getImmutableSegments} is v.
+   * 4. The snapshot thread stopping before {@link CompactingMemStore#swapPipelineWithNull}.
+   * 5. The in memory compact thread completes {@link CompactingMemStore#flattenOneSegment},
+   *    {@link CompactionPipeline#version} is still v.
+   * 6. The snapshot thread continues {@link CompactingMemStore#swapPipelineWithNull}, and because
+   *    {@link CompactionPipeline#version} is v, {@link CompactingMemStore#swapPipelineWithNull}
+   *    thinks it is successful and continue flushing,but the {@link ImmutableSegment} in
+   *    {@link CompactionPipeline} has changed because
+   *    {@link CompactingMemStore#flattenOneSegment},so the {@link ImmutableSegment} is not
+   *    removed in fact and still remaining in {@link CompactionPipeline}.
+   *
+   * After HBASE-26384, the 5-6 step is changed to following, which is expected behavior:
+   * 5. The in memory compact thread completes {@link CompactingMemStore#flattenOneSegment},
+   *    {@link CompactingMemStore#flattenOneSegment} change {@link CompactionPipeline#version} to
+   *    v+1.
+   * 6. The snapshot thread continues {@link CompactingMemStore#swapPipelineWithNull}, and because
+   *    {@link CompactionPipeline#version} is v+1, {@link CompactingMemStore#swapPipelineWithNull}
+   *    failed and retry the while loop in {@link CompactingMemStore#pushPipelineToSnapshot} once
+   *    again, because there is no concurrent {@link CompactingMemStore#inMemoryCompaction} now,
+   *    {@link CompactingMemStore#swapPipelineWithNull} succeeds.
+   * 
+ */ + @Test + public void testFlattenAndSnapshotCompactingMemStoreConcurrently() throws Exception { + Configuration conf = HBaseConfiguration.create(); + + byte[] smallValue = new byte[3]; + byte[] largeValue = new byte[9]; + final long timestamp = EnvironmentEdgeManager.currentTime(); + final long seqId = 100; + final Cell smallCell = createCell(qf1, timestamp, seqId, smallValue); + final Cell largeCell = createCell(qf2, timestamp, seqId, largeValue); + int smallCellByteSize = MutableSegment.getCellLength(smallCell); + int largeCellByteSize = MutableSegment.getCellLength(largeCell); + int totalCellByteSize = (smallCellByteSize + largeCellByteSize); + int flushByteSize = totalCellByteSize - 2; + + // set CompactingMemStore.inmemoryFlushSize to flushByteSize. + conf.set(HStore.MEMSTORE_CLASS_NAME, MyCompactingMemStore4.class.getName()); + conf.setDouble(CompactingMemStore.IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY, 0.005); + conf.set(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, String.valueOf(flushByteSize * 200)); + + init(name.getMethodName(), conf, ColumnFamilyDescriptorBuilder.newBuilder(family) + .setInMemoryCompaction(MemoryCompactionPolicy.BASIC).build()); + + MyCompactingMemStore4 myCompactingMemStore = ((MyCompactingMemStore4) store.memstore); + assertTrue((int) (myCompactingMemStore.getInmemoryFlushSize()) == flushByteSize); + + store.add(smallCell, new NonThreadSafeMemStoreSizing()); + store.add(largeCell, new NonThreadSafeMemStoreSizing()); + + String oldThreadName = Thread.currentThread().getName(); + try { + Thread.currentThread().setName(MyCompactingMemStore4.TAKE_SNAPSHOT_THREAD_NAME); + /** + * {@link CompactingMemStore#snapshot} must wait the in memory compact thread enters + * {@link CompactingMemStore#flattenOneSegment},because {@link CompactingMemStore#snapshot} + * would invoke {@link CompactingMemStore#stopCompaction}. + */ + myCompactingMemStore.snapShotStartCyclicCyclicBarrier.await(); + + MemStoreSnapshot memStoreSnapshot = myCompactingMemStore.snapshot(); + myCompactingMemStore.inMemoryCompactionEndCyclicBarrier.await(); + + assertTrue(memStoreSnapshot.getCellsCount() == 2); + assertTrue(((int) (memStoreSnapshot.getDataSize())) == totalCellByteSize); + VersionedSegmentsList segments = myCompactingMemStore.getImmutableSegments(); + assertTrue(segments.getNumOfSegments() == 0); + assertTrue(segments.getNumOfCells() == 0); + assertTrue(myCompactingMemStore.setInMemoryCompactionFlagCounter.get() == 1); + assertTrue(myCompactingMemStore.swapPipelineWithNullCounter.get() == 2); + } finally { + Thread.currentThread().setName(oldThreadName); + } + } + + /** + *
+   * This test is for HBASE-26384,
+   * test {@link CompactingMemStore#flattenOneSegment}{@link CompactingMemStore#snapshot()}
+   * and writeMemStore execute concurrently.
+   * The threads sequence before HBASE-26384 is(The bug only exists for branch-2,and I add UTs
+   * for both branch-2 and master):
+   * 1. The {@link CompactingMemStore} size exceeds
+   *    {@link CompactingMemStore#getInmemoryFlushSize()},the write thread adds a new
+   *    {@link ImmutableSegment}  to the head of {@link CompactingMemStore#pipeline},and start a
+   *    in memory compact thread to execute {@link CompactingMemStore#inMemoryCompaction}.
+   * 2. The in memory compact thread starts and then stopping before
+   *    {@link CompactingMemStore#flattenOneSegment}.
+   * 3. The snapshot thread starts {@link CompactingMemStore#snapshot} concurrently,after the
+   *    snapshot thread executing {@link CompactingMemStore#getImmutableSegments},the in memory
+   *    compact thread continues.
+   *    Assuming {@link VersionedSegmentsList#version} returned from
+   *    {@link CompactingMemStore#getImmutableSegments} is v.
+   * 4. The snapshot thread stopping before {@link CompactingMemStore#swapPipelineWithNull}.
+   * 5. The in memory compact thread completes {@link CompactingMemStore#flattenOneSegment},
+   *    {@link CompactionPipeline#version} is still v.
+   * 6. The snapshot thread continues {@link CompactingMemStore#swapPipelineWithNull}, and because
+   *    {@link CompactionPipeline#version} is v, {@link CompactingMemStore#swapPipelineWithNull}
+   *    thinks it is successful and continue flushing,but the {@link ImmutableSegment} in
+   *    {@link CompactionPipeline} has changed because
+   *    {@link CompactingMemStore#flattenOneSegment},so the {@link ImmutableSegment} is not
+   *    removed in fact and still remaining in {@link CompactionPipeline}.
+   *
+   * After HBASE-26384, the 5-6 step is changed to following, which is expected behavior,
+   * and I add step 7-8 to test there is new segment added before retry.
+   * 5. The in memory compact thread completes {@link CompactingMemStore#flattenOneSegment},
+   *    {@link CompactingMemStore#flattenOneSegment} change {@link CompactionPipeline#version} to
+   *     v+1.
+   * 6. The snapshot thread continues {@link CompactingMemStore#swapPipelineWithNull}, and because
+   *    {@link CompactionPipeline#version} is v+1, {@link CompactingMemStore#swapPipelineWithNull}
+   *    failed and retry,{@link VersionedSegmentsList#version} returned from
+   *    {@link CompactingMemStore#getImmutableSegments} is v+1.
+   * 7. The write thread continues writing to {@link CompactingMemStore} and
+   *    {@link CompactingMemStore} size exceeds {@link CompactingMemStore#getInmemoryFlushSize()},
+   *    {@link CompactingMemStore#flushInMemory(MutableSegment)} is called and a new
+   *    {@link ImmutableSegment} is added to the head of {@link CompactingMemStore#pipeline},
+   *    {@link CompactionPipeline#version} is still v+1.
+   * 8. The snapshot thread continues {@link CompactingMemStore#swapPipelineWithNull}, and because
+   *    {@link CompactionPipeline#version} is still v+1,
+   *    {@link CompactingMemStore#swapPipelineWithNull} succeeds.The new {@link ImmutableSegment}
+   *    remained at the head of {@link CompactingMemStore#pipeline},the old is removed by
+   *    {@link CompactingMemStore#swapPipelineWithNull}.
+   * 
+ */ + @Test + public void testFlattenSnapshotWriteCompactingMemeStoreConcurrently() throws Exception { + Configuration conf = HBaseConfiguration.create(); + + byte[] smallValue = new byte[3]; + byte[] largeValue = new byte[9]; + final long timestamp = EnvironmentEdgeManager.currentTime(); + final long seqId = 100; + final Cell smallCell = createCell(qf1, timestamp, seqId, smallValue); + final Cell largeCell = createCell(qf2, timestamp, seqId, largeValue); + int smallCellByteSize = MutableSegment.getCellLength(smallCell); + int largeCellByteSize = MutableSegment.getCellLength(largeCell); + int firstWriteCellByteSize = (smallCellByteSize + largeCellByteSize); + int flushByteSize = firstWriteCellByteSize - 2; + + // set CompactingMemStore.inmemoryFlushSize to flushByteSize. + conf.set(HStore.MEMSTORE_CLASS_NAME, MyCompactingMemStore5.class.getName()); + conf.setDouble(CompactingMemStore.IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY, 0.005); + conf.set(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, String.valueOf(flushByteSize * 200)); + + init(name.getMethodName(), conf, ColumnFamilyDescriptorBuilder.newBuilder(family) + .setInMemoryCompaction(MemoryCompactionPolicy.BASIC).build()); + + final MyCompactingMemStore5 myCompactingMemStore = ((MyCompactingMemStore5) store.memstore); + assertTrue((int) (myCompactingMemStore.getInmemoryFlushSize()) == flushByteSize); + + store.add(smallCell, new NonThreadSafeMemStoreSizing()); + store.add(largeCell, new NonThreadSafeMemStoreSizing()); + + final AtomicReference exceptionRef = new AtomicReference(); + final Cell writeAgainCell1 = createCell(qf3, timestamp, seqId + 1, largeValue); + final Cell writeAgainCell2 = createCell(qf4, timestamp, seqId + 1, largeValue); + final int writeAgainCellByteSize = MutableSegment.getCellLength(writeAgainCell1) + + MutableSegment.getCellLength(writeAgainCell2); + final Thread writeAgainThread = new Thread(() -> { + try { + myCompactingMemStore.writeMemStoreAgainStartCyclicBarrier.await(); + + store.add(writeAgainCell1, new NonThreadSafeMemStoreSizing()); + store.add(writeAgainCell2, new NonThreadSafeMemStoreSizing()); + + myCompactingMemStore.writeMemStoreAgainEndCyclicBarrier.await(); + } catch (Throwable exception) { + exceptionRef.set(exception); + } + }); + writeAgainThread.setName(MyCompactingMemStore5.WRITE_AGAIN_THREAD_NAME); + writeAgainThread.start(); + + String oldThreadName = Thread.currentThread().getName(); + try { + Thread.currentThread().setName(MyCompactingMemStore5.TAKE_SNAPSHOT_THREAD_NAME); + /** + * {@link CompactingMemStore#snapshot} must wait the in memory compact thread enters + * {@link CompactingMemStore#flattenOneSegment},because {@link CompactingMemStore#snapshot} + * would invoke {@link CompactingMemStore#stopCompaction}. + */ + myCompactingMemStore.snapShotStartCyclicCyclicBarrier.await(); + MemStoreSnapshot memStoreSnapshot = myCompactingMemStore.snapshot(); + myCompactingMemStore.inMemoryCompactionEndCyclicBarrier.await(); + writeAgainThread.join(); + + assertTrue(memStoreSnapshot.getCellsCount() == 2); + assertTrue(((int) (memStoreSnapshot.getDataSize())) == firstWriteCellByteSize); + VersionedSegmentsList segments = myCompactingMemStore.getImmutableSegments(); + assertTrue(segments.getNumOfSegments() == 1); + assertTrue( + ((int) (segments.getStoreSegments().get(0).getDataSize())) == writeAgainCellByteSize); + assertTrue(segments.getNumOfCells() == 2); + assertTrue(myCompactingMemStore.setInMemoryCompactionFlagCounter.get() == 2); + assertTrue(exceptionRef.get() == null); + assertTrue(myCompactingMemStore.swapPipelineWithNullCounter.get() == 2); + } finally { + Thread.currentThread().setName(oldThreadName); + } + } + + @Test + public void testOnConfigurationChange() throws IOException { + final int COMMON_MAX_FILES_TO_COMPACT = 10; + final int NEW_COMMON_MAX_FILES_TO_COMPACT = 8; + final int STORE_MAX_FILES_TO_COMPACT = 6; + + //Build a table that its maxFileToCompact different from common configuration. + Configuration conf = HBaseConfiguration.create(); + conf.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MAX_KEY, + COMMON_MAX_FILES_TO_COMPACT); + ColumnFamilyDescriptor hcd = ColumnFamilyDescriptorBuilder.newBuilder(family) + .setConfiguration(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MAX_KEY, + String.valueOf(STORE_MAX_FILES_TO_COMPACT)).build(); + init(this.name.getMethodName(), conf, hcd); + + //After updating common configuration, the conf in HStore itself must not be changed. + conf.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MAX_KEY, + NEW_COMMON_MAX_FILES_TO_COMPACT); + this.store.onConfigurationChange(conf); + assertEquals(STORE_MAX_FILES_TO_COMPACT, + store.getStoreEngine().getCompactionPolicy().getConf().getMaxFilesToCompact()); + } + private HStoreFile mockStoreFileWithLength(long length) { HStoreFile sf = mock(HStoreFile.class); StoreFileReader sfr = mock(StoreFileReader.class); @@ -2286,4 +2534,317 @@ void enableCompaction() { } } + + public static class MyCompactingMemStore4 extends CompactingMemStore { + private static final String TAKE_SNAPSHOT_THREAD_NAME = "takeSnapShotThread"; + /** + * {@link CompactingMemStore#flattenOneSegment} must execute after + * {@link CompactingMemStore#getImmutableSegments} + */ + private final CyclicBarrier flattenOneSegmentPreCyclicBarrier = new CyclicBarrier(2); + /** + * Only after {@link CompactingMemStore#flattenOneSegment} completed, + * {@link CompactingMemStore#swapPipelineWithNull} could execute. + */ + private final CyclicBarrier flattenOneSegmentPostCyclicBarrier = new CyclicBarrier(2); + /** + * Only the in memory compact thread enters {@link CompactingMemStore#flattenOneSegment},the + * snapshot thread starts {@link CompactingMemStore#snapshot},because + * {@link CompactingMemStore#snapshot} would invoke {@link CompactingMemStore#stopCompaction}. + */ + private final CyclicBarrier snapShotStartCyclicCyclicBarrier = new CyclicBarrier(2); + /** + * To wait for {@link CompactingMemStore.InMemoryCompactionRunnable} stopping. + */ + private final CyclicBarrier inMemoryCompactionEndCyclicBarrier = new CyclicBarrier(2); + private final AtomicInteger getImmutableSegmentsListCounter = new AtomicInteger(0); + private final AtomicInteger swapPipelineWithNullCounter = new AtomicInteger(0); + private final AtomicInteger flattenOneSegmentCounter = new AtomicInteger(0); + private final AtomicInteger setInMemoryCompactionFlagCounter = new AtomicInteger(0); + + public MyCompactingMemStore4(Configuration conf, CellComparatorImpl cellComparator, + HStore store, RegionServicesForStores regionServices, + MemoryCompactionPolicy compactionPolicy) throws IOException { + super(conf, cellComparator, store, regionServices, compactionPolicy); + } + + @Override + public VersionedSegmentsList getImmutableSegments() { + VersionedSegmentsList result = super.getImmutableSegments(); + if (Thread.currentThread().getName().equals(TAKE_SNAPSHOT_THREAD_NAME)) { + int currentCount = getImmutableSegmentsListCounter.incrementAndGet(); + if (currentCount <= 1) { + try { + flattenOneSegmentPreCyclicBarrier.await(); + } catch (Throwable e) { + throw new RuntimeException(e); + } + } + } + return result; + } + + @Override + protected boolean swapPipelineWithNull(VersionedSegmentsList segments) { + if (Thread.currentThread().getName().equals(TAKE_SNAPSHOT_THREAD_NAME)) { + int currentCount = swapPipelineWithNullCounter.incrementAndGet(); + if (currentCount <= 1) { + try { + flattenOneSegmentPostCyclicBarrier.await(); + } catch (Throwable e) { + throw new RuntimeException(e); + } + } + } + boolean result = super.swapPipelineWithNull(segments); + if (Thread.currentThread().getName().equals(TAKE_SNAPSHOT_THREAD_NAME)) { + int currentCount = swapPipelineWithNullCounter.get(); + if (currentCount <= 1) { + assertTrue(!result); + } + if (currentCount == 2) { + assertTrue(result); + } + } + return result; + + } + + @Override + public void flattenOneSegment(long requesterVersion, Action action) { + int currentCount = flattenOneSegmentCounter.incrementAndGet(); + if (currentCount <= 1) { + try { + /** + * {@link CompactingMemStore#snapshot} could start. + */ + snapShotStartCyclicCyclicBarrier.await(); + flattenOneSegmentPreCyclicBarrier.await(); + } catch (Throwable e) { + throw new RuntimeException(e); + } + } + super.flattenOneSegment(requesterVersion, action); + if (currentCount <= 1) { + try { + flattenOneSegmentPostCyclicBarrier.await(); + } catch (Throwable e) { + throw new RuntimeException(e); + } + } + } + + @Override + protected boolean setInMemoryCompactionFlag() { + boolean result = super.setInMemoryCompactionFlag(); + assertTrue(result); + setInMemoryCompactionFlagCounter.incrementAndGet(); + return result; + } + + @Override + void inMemoryCompaction() { + try { + super.inMemoryCompaction(); + } finally { + try { + inMemoryCompactionEndCyclicBarrier.await(); + } catch (Throwable e) { + throw new RuntimeException(e); + } + + } + } + + } + + public static class MyCompactingMemStore5 extends CompactingMemStore { + private static final String TAKE_SNAPSHOT_THREAD_NAME = "takeSnapShotThread"; + private static final String WRITE_AGAIN_THREAD_NAME = "writeAgainThread"; + /** + * {@link CompactingMemStore#flattenOneSegment} must execute after + * {@link CompactingMemStore#getImmutableSegments} + */ + private final CyclicBarrier flattenOneSegmentPreCyclicBarrier = new CyclicBarrier(2); + /** + * Only after {@link CompactingMemStore#flattenOneSegment} completed, + * {@link CompactingMemStore#swapPipelineWithNull} could execute. + */ + private final CyclicBarrier flattenOneSegmentPostCyclicBarrier = new CyclicBarrier(2); + /** + * Only the in memory compact thread enters {@link CompactingMemStore#flattenOneSegment},the + * snapshot thread starts {@link CompactingMemStore#snapshot},because + * {@link CompactingMemStore#snapshot} would invoke {@link CompactingMemStore#stopCompaction}. + */ + private final CyclicBarrier snapShotStartCyclicCyclicBarrier = new CyclicBarrier(2); + /** + * To wait for {@link CompactingMemStore.InMemoryCompactionRunnable} stopping. + */ + private final CyclicBarrier inMemoryCompactionEndCyclicBarrier = new CyclicBarrier(2); + private final AtomicInteger getImmutableSegmentsListCounter = new AtomicInteger(0); + private final AtomicInteger swapPipelineWithNullCounter = new AtomicInteger(0); + private final AtomicInteger flattenOneSegmentCounter = new AtomicInteger(0); + private final AtomicInteger setInMemoryCompactionFlagCounter = new AtomicInteger(0); + /** + * Only the snapshot thread retry {@link CompactingMemStore#swapPipelineWithNull}, writeAgain + * thread could start. + */ + private final CyclicBarrier writeMemStoreAgainStartCyclicBarrier = new CyclicBarrier(2); + /** + * This is used for snapshot thread,writeAgain thread and in memory compact thread. Only the + * writeAgain thread completes, {@link CompactingMemStore#swapPipelineWithNull} would + * execute,and in memory compact thread would exit,because we expect that in memory compact + * executing only once. + */ + private final CyclicBarrier writeMemStoreAgainEndCyclicBarrier = new CyclicBarrier(3); + + public MyCompactingMemStore5(Configuration conf, CellComparatorImpl cellComparator, + HStore store, RegionServicesForStores regionServices, + MemoryCompactionPolicy compactionPolicy) throws IOException { + super(conf, cellComparator, store, regionServices, compactionPolicy); + } + + @Override + public VersionedSegmentsList getImmutableSegments() { + VersionedSegmentsList result = super.getImmutableSegments(); + if (Thread.currentThread().getName().equals(TAKE_SNAPSHOT_THREAD_NAME)) { + int currentCount = getImmutableSegmentsListCounter.incrementAndGet(); + if (currentCount <= 1) { + try { + flattenOneSegmentPreCyclicBarrier.await(); + } catch (Throwable e) { + throw new RuntimeException(e); + } + } + + } + + return result; + } + + @Override + protected boolean swapPipelineWithNull(VersionedSegmentsList segments) { + if (Thread.currentThread().getName().equals(TAKE_SNAPSHOT_THREAD_NAME)) { + int currentCount = swapPipelineWithNullCounter.incrementAndGet(); + if (currentCount <= 1) { + try { + flattenOneSegmentPostCyclicBarrier.await(); + } catch (Throwable e) { + throw new RuntimeException(e); + } + } + + if (currentCount == 2) { + try { + /** + * Only the snapshot thread retry {@link CompactingMemStore#swapPipelineWithNull}, + * writeAgain thread could start. + */ + writeMemStoreAgainStartCyclicBarrier.await(); + /** + * Only the writeAgain thread completes, retry + * {@link CompactingMemStore#swapPipelineWithNull} would execute. + */ + writeMemStoreAgainEndCyclicBarrier.await(); + } catch (Throwable e) { + throw new RuntimeException(e); + } + } + + } + boolean result = super.swapPipelineWithNull(segments); + if (Thread.currentThread().getName().equals(TAKE_SNAPSHOT_THREAD_NAME)) { + int currentCount = swapPipelineWithNullCounter.get(); + if (currentCount <= 1) { + assertTrue(!result); + } + if (currentCount == 2) { + assertTrue(result); + } + } + return result; + + } + + @Override + public void flattenOneSegment(long requesterVersion, Action action) { + int currentCount = flattenOneSegmentCounter.incrementAndGet(); + if (currentCount <= 1) { + try { + /** + * {@link CompactingMemStore#snapshot} could start. + */ + snapShotStartCyclicCyclicBarrier.await(); + flattenOneSegmentPreCyclicBarrier.await(); + } catch (Throwable e) { + throw new RuntimeException(e); + } + } + super.flattenOneSegment(requesterVersion, action); + if (currentCount <= 1) { + try { + flattenOneSegmentPostCyclicBarrier.await(); + /** + * Only the writeAgain thread completes, in memory compact thread would exit,because we + * expect that in memory compact executing only once. + */ + writeMemStoreAgainEndCyclicBarrier.await(); + } catch (Throwable e) { + throw new RuntimeException(e); + } + + } + } + + @Override + protected boolean setInMemoryCompactionFlag() { + boolean result = super.setInMemoryCompactionFlag(); + int count = setInMemoryCompactionFlagCounter.incrementAndGet(); + if (count <= 1) { + assertTrue(result); + } + if (count == 2) { + assertTrue(!result); + } + return result; + } + + @Override + void inMemoryCompaction() { + try { + super.inMemoryCompaction(); + } finally { + try { + inMemoryCompactionEndCyclicBarrier.await(); + } catch (Throwable e) { + throw new RuntimeException(e); + } + + } + } + } + + public static class MyCompactingMemStore6 extends CompactingMemStore { + private final CyclicBarrier inMemoryCompactionEndCyclicBarrier = new CyclicBarrier(2); + + public MyCompactingMemStore6(Configuration conf, CellComparatorImpl cellComparator, + HStore store, RegionServicesForStores regionServices, + MemoryCompactionPolicy compactionPolicy) throws IOException { + super(conf, cellComparator, store, regionServices, compactionPolicy); + } + + @Override + void inMemoryCompaction() { + try { + super.inMemoryCompaction(); + } finally { + try { + inMemoryCompactionEndCyclicBarrier.await(); + } catch (Throwable e) { + throw new RuntimeException(e); + } + + } + } + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestNonHBaseReplicationEndpoint.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestNonHBaseReplicationEndpoint.java new file mode 100644 index 000000000000..7b395ad157c7 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestNonHBaseReplicationEndpoint.java @@ -0,0 +1,205 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.replication; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; + +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtil; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.client.TableDescriptorBuilder; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.testclassification.ReplicationTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.Threads; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category({ MediumTests.class, ReplicationTests.class }) +public class TestNonHBaseReplicationEndpoint { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestNonHBaseReplicationEndpoint.class); + + private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); + + private static Admin ADMIN; + + private static final TableName tableName = TableName.valueOf("test"); + private static final byte[] famName = Bytes.toBytes("f"); + + private static final AtomicBoolean REPLICATED = new AtomicBoolean(); + + @BeforeClass + public static void setupBeforeClass() throws Exception { + UTIL.startMiniCluster(); + ADMIN = UTIL.getAdmin(); + } + + @AfterClass + public static void teardownAfterClass() throws Exception { + UTIL.shutdownMiniCluster(); + } + + @Before + public void setup() { + REPLICATED.set(false); + } + + @Test + public void test() throws IOException { + TableDescriptor td = TableDescriptorBuilder.newBuilder(tableName) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(famName) + .setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()) + .build(); + Table table = UTIL.createTable(td, HBaseTestingUtil.KEYS_FOR_HBA_CREATE_TABLE); + + ReplicationPeerConfig peerConfig = ReplicationPeerConfig.newBuilder() + .setReplicationEndpointImpl(NonHBaseReplicationEndpoint.class.getName()) + .setReplicateAllUserTables(false) + .setTableCFsMap(new HashMap>() {{ + put(tableName, new ArrayList<>()); + } + }).build(); + + ADMIN.addReplicationPeer("1", peerConfig); + loadData(table); + + UTIL.waitFor(10000L, () -> REPLICATED.get()); + } + + protected static void loadData(Table table) throws IOException { + for (int i = 0; i < 100; i++) { + Put put = new Put(Bytes.toBytes(Integer.toString(i))); + put.addColumn(famName, famName, Bytes.toBytes(i)); + table.put(put); + } + } + + public static class NonHBaseReplicationEndpoint implements ReplicationEndpoint { + + private boolean running = false; + + @Override + public void init(Context context) throws IOException { + } + + @Override + public boolean canReplicateToSameCluster() { + return false; + } + + @Override + public UUID getPeerUUID() { + return UUID.randomUUID(); + } + + @Override + public WALEntryFilter getWALEntryfilter() { + return null; + } + + @Override + public boolean replicate(ReplicateContext replicateContext) { + REPLICATED.set(true); + return true; + } + + @Override + public boolean isRunning() { + return running; + } + + @Override + public boolean isStarting() { + return false; + } + + @Override + public void start() { + running = true; + } + + @Override + public void awaitRunning() { + long interval = 100L; + while (!running) { + Threads.sleep(interval); + } + } + + @Override + public void awaitRunning(long timeout, TimeUnit unit) { + long start = System.currentTimeMillis(); + long end = start + unit.toMillis(timeout); + long interval = 100L; + while (!running && System.currentTimeMillis() < end) { + Threads.sleep(interval); + } + } + + @Override + public void stop() { + running = false; + } + + @Override + public void awaitTerminated() { + long interval = 100L; + while (running) { + Threads.sleep(interval); + } + } + + @Override + public void awaitTerminated(long timeout, TimeUnit unit) { + long start = System.currentTimeMillis(); + long end = start + unit.toMillis(timeout); + long interval = 100L; + while (running && System.currentTimeMillis() < end) { + Threads.sleep(interval); + } + } + + @Override + public Throwable failureCause() { + return null; + } + + @Override + public void peerConfigUpdated(ReplicationPeerConfig rpc) { + } + } +} \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestBulkLoadHFiles.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestBulkLoadHFiles.java index b421dc5f5664..ffc09bccd6be 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestBulkLoadHFiles.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestBulkLoadHFiles.java @@ -611,7 +611,7 @@ private int verifyHFile(Path p) throws IOException { Configuration conf = util.getConfiguration(); HFile.Reader reader = HFile.createReader(p.getFileSystem(conf), p, new CacheConfig(conf), true, conf); - HFileScanner scanner = reader.getScanner(false, false); + HFileScanner scanner = reader.getScanner(conf, false, false); scanner.seekTo(); int count = 0; do { diff --git a/hbase-shell/src/main/ruby/hbase/table.rb b/hbase-shell/src/main/ruby/hbase/table.rb index 9b216e0559d5..d55e516ba94b 100644 --- a/hbase-shell/src/main/ruby/hbase/table.rb +++ b/hbase-shell/src/main/ruby/hbase/table.rb @@ -556,7 +556,7 @@ def _hash_to_scan(args) end scan.setScanMetricsEnabled(enablemetrics) if enablemetrics - scan.setTimeStamp(timestamp) if timestamp + scan.setTimestamp(timestamp) if timestamp scan.setCacheBlocks(cache_blocks) scan.setReversed(reversed) scan.setCaching(cache) if cache > 0 diff --git a/hbase-shell/src/main/ruby/shell/commands/trace.rb b/hbase-shell/src/main/ruby/shell/commands/trace.rb index 598bca4e3774..7e0672d6f06f 100644 --- a/hbase-shell/src/main/ruby/shell/commands/trace.rb +++ b/hbase-shell/src/main/ruby/shell/commands/trace.rb @@ -17,21 +17,16 @@ # limitations under the License. # -# Disable tracing for now as HTrace does not work any more -#java_import org.apache.hadoop.hbase.trace.SpanReceiverHost - module Shell module Commands class Trace < Command -# @@conf = org.apache.htrace.core.HTraceConfiguration.fromKeyValuePairs( -# 'sampler.classes', 'org.apache.htrace.core.AlwaysSampler' -# ) -# @@tracer = org.apache.htrace.core.Tracer::Builder.new('HBaseShell').conf(@@conf).build() -# @@tracescope = nil + @@tracer = org.apache.hadoop.hbase.trace.TraceUtil.getGlobalTracer() + @@tracespan = nil + @@tracescope = nil def help <<-EOF -Start or Stop tracing using HTrace. +Start or Stop tracing using OpenTelemetry. Always returns true if tracing is running, otherwise false. If the first argument is 'start', new span is started. If the first argument is 'stop', current running span is stopped. @@ -58,23 +53,24 @@ def command(startstop = 'status', spanname = 'HBaseShell') end def trace(startstop, spanname) -# @@receiver ||= SpanReceiverHost.getInstance(@shell.hbase.configuration) -# if startstop == 'start' -# unless tracing? -# @@tracescope = @@tracer.newScope(spanname) -# end -# elsif startstop == 'stop' -# if tracing? -# @@tracescope.close -# @@tracescope = nil -# end -# end -# tracing? + if startstop == 'start' + unless tracing? + @@tracespan = @@tracer.spanBuilder(spanname).startSpan() + @@tracescope = @@tracespan.makeCurrent() + end + elsif startstop == 'stop' + if tracing? + @@tracescope.close() + @@tracespan.end() + @@tracescope = nil + end + end + tracing? end -# def tracing? -# @@tracescope != nil -# end + def tracing? + @@tracescope != nil + end end end end diff --git a/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestTableShell.java b/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestTableShell.java index 26369349e1e5..4e67a74fad0c 100644 --- a/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestTableShell.java +++ b/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestTableShell.java @@ -35,6 +35,6 @@ public class TestTableShell extends AbstractTestShell { @Override protected String getIncludeList() { - return "test_table.rb"; + return "table_test.rb"; } } diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftHBaseServiceHandler.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftHBaseServiceHandler.java index 9147ea5dba67..37cf8d692665 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftHBaseServiceHandler.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftHBaseServiceHandler.java @@ -28,6 +28,7 @@ import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Collections; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.TreeMap; @@ -203,6 +204,20 @@ public boolean isTableEnabled(ByteBuffer tableName) throws IOError { } } + @Override + public Map getTableNamesWithIsTableEnabled() throws IOError { + try { + HashMap tables = new HashMap<>(); + for (ByteBuffer tableName: this.getTableNames()) { + tables.put(tableName, this.isTableEnabled(tableName)); + } + return tables; + } catch (IOError e) { + LOG.warn(e.getMessage(), e); + throw getIOError(e); + } + } + // ThriftServerRunner.compact should be deprecated and replaced with methods specific to // table and region. @Override diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/Hbase.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/Hbase.java index 52b63806bff5..43bc7fb60118 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/Hbase.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/Hbase.java @@ -7,7 +7,7 @@ package org.apache.hadoop.hbase.thrift.generated; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked", "unused"}) -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19") +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-10-05") public class Hbase { public interface Iface { @@ -45,6 +45,13 @@ public interface Iface { */ public java.util.List getTableNames() throws IOError, org.apache.thrift.TException; + /** + * List all the userspace tables and their enabled or disabled flags. + * + * @return list of tables with is enabled flags + */ + public java.util.Map getTableNamesWithIsTableEnabled() throws IOError, org.apache.thrift.TException; + /** * List all the column families assoicated with a table. * @@ -657,6 +664,8 @@ public interface AsyncIface { public void getTableNames(org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException; + public void getTableNamesWithIsTableEnabled(org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException; + public void getColumnDescriptors(java.nio.ByteBuffer tableName, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException; public void getTableRegions(java.nio.ByteBuffer tableName, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException; @@ -906,6 +915,31 @@ public java.util.List recv_getTableNames() throws IOError, throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "getTableNames failed: unknown result"); } + public java.util.Map getTableNamesWithIsTableEnabled() throws IOError, org.apache.thrift.TException + { + send_getTableNamesWithIsTableEnabled(); + return recv_getTableNamesWithIsTableEnabled(); + } + + public void send_getTableNamesWithIsTableEnabled() throws org.apache.thrift.TException + { + getTableNamesWithIsTableEnabled_args args = new getTableNamesWithIsTableEnabled_args(); + sendBase("getTableNamesWithIsTableEnabled", args); + } + + public java.util.Map recv_getTableNamesWithIsTableEnabled() throws IOError, org.apache.thrift.TException + { + getTableNamesWithIsTableEnabled_result result = new getTableNamesWithIsTableEnabled_result(); + receiveBase(result, "getTableNamesWithIsTableEnabled"); + if (result.isSetSuccess()) { + return result.success; + } + if (result.io != null) { + throw result.io; + } + throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "getTableNamesWithIsTableEnabled failed: unknown result"); + } + public java.util.Map getColumnDescriptors(java.nio.ByteBuffer tableName) throws IOError, org.apache.thrift.TException { send_getColumnDescriptors(tableName); @@ -2282,6 +2316,35 @@ public java.util.List getResult() throws IOError, org.apach } } + public void getTableNamesWithIsTableEnabled(org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException { + checkReady(); + getTableNamesWithIsTableEnabled_call method_call = new getTableNamesWithIsTableEnabled_call(resultHandler, this, ___protocolFactory, ___transport); + this.___currentMethod = method_call; + ___manager.call(method_call); + } + + public static class getTableNamesWithIsTableEnabled_call extends org.apache.thrift.async.TAsyncMethodCall> { + public getTableNamesWithIsTableEnabled_call(org.apache.thrift.async.AsyncMethodCallback> resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + super(client, protocolFactory, transport, resultHandler, false); + } + + public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { + prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("getTableNamesWithIsTableEnabled", org.apache.thrift.protocol.TMessageType.CALL, 0)); + getTableNamesWithIsTableEnabled_args args = new getTableNamesWithIsTableEnabled_args(); + args.write(prot); + prot.writeMessageEnd(); + } + + public java.util.Map getResult() throws IOError, org.apache.thrift.TException { + if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { + throw new java.lang.IllegalStateException("Method call not finished!"); + } + org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); + org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); + return (new Client(prot)).recv_getTableNamesWithIsTableEnabled(); + } + } + public void getColumnDescriptors(java.nio.ByteBuffer tableName, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException { checkReady(); getColumnDescriptors_call method_call = new getColumnDescriptors_call(tableName, resultHandler, this, ___protocolFactory, ___transport); @@ -3912,6 +3975,7 @@ protected Processor(I iface, java.util.Map extends org.apache.thrift.ProcessFunction { + public getTableNamesWithIsTableEnabled() { + super("getTableNamesWithIsTableEnabled"); + } + + public getTableNamesWithIsTableEnabled_args getEmptyArgsInstance() { + return new getTableNamesWithIsTableEnabled_args(); + } + + protected boolean isOneway() { + return false; + } + + @Override + protected boolean rethrowUnhandledExceptions() { + return false; + } + + public getTableNamesWithIsTableEnabled_result getResult(I iface, getTableNamesWithIsTableEnabled_args args) throws org.apache.thrift.TException { + getTableNamesWithIsTableEnabled_result result = new getTableNamesWithIsTableEnabled_result(); + try { + result.success = iface.getTableNamesWithIsTableEnabled(); + } catch (IOError io) { + result.io = io; + } + return result; + } + } + public static class getColumnDescriptors extends org.apache.thrift.ProcessFunction { public getColumnDescriptors() { super("getColumnDescriptors"); @@ -5393,6 +5486,7 @@ protected AsyncProcessor(I iface, java.util.Map extends org.apache.thrift.AsyncProcessFunction> { - public getColumnDescriptors() { - super("getColumnDescriptors"); + public static class getTableNamesWithIsTableEnabled extends org.apache.thrift.AsyncProcessFunction> { + public getTableNamesWithIsTableEnabled() { + super("getTableNamesWithIsTableEnabled"); } - public getColumnDescriptors_args getEmptyArgsInstance() { - return new getColumnDescriptors_args(); + public getTableNamesWithIsTableEnabled_args getEmptyArgsInstance() { + return new getTableNamesWithIsTableEnabled_args(); } - public org.apache.thrift.async.AsyncMethodCallback> getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) { + public org.apache.thrift.async.AsyncMethodCallback> getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) { final org.apache.thrift.AsyncProcessFunction fcall = this; - return new org.apache.thrift.async.AsyncMethodCallback>() { - public void onComplete(java.util.Map o) { - getColumnDescriptors_result result = new getColumnDescriptors_result(); + return new org.apache.thrift.async.AsyncMethodCallback>() { + public void onComplete(java.util.Map o) { + getTableNamesWithIsTableEnabled_result result = new getTableNamesWithIsTableEnabled_result(); result.success = o; try { fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); @@ -5853,7 +5947,7 @@ public void onComplete(java.util.Map o) { public void onError(java.lang.Exception e) { byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; org.apache.thrift.TSerializable msg; - getColumnDescriptors_result result = new getColumnDescriptors_result(); + getTableNamesWithIsTableEnabled_result result = new getTableNamesWithIsTableEnabled_result(); if (e instanceof IOError) { result.io = (IOError) e; result.setIoIsSet(true); @@ -5885,25 +5979,25 @@ protected boolean isOneway() { return false; } - public void start(I iface, getColumnDescriptors_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException { - iface.getColumnDescriptors(args.tableName,resultHandler); + public void start(I iface, getTableNamesWithIsTableEnabled_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException { + iface.getTableNamesWithIsTableEnabled(resultHandler); } } - public static class getTableRegions extends org.apache.thrift.AsyncProcessFunction> { - public getTableRegions() { - super("getTableRegions"); + public static class getColumnDescriptors extends org.apache.thrift.AsyncProcessFunction> { + public getColumnDescriptors() { + super("getColumnDescriptors"); } - public getTableRegions_args getEmptyArgsInstance() { - return new getTableRegions_args(); + public getColumnDescriptors_args getEmptyArgsInstance() { + return new getColumnDescriptors_args(); } - public org.apache.thrift.async.AsyncMethodCallback> getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) { + public org.apache.thrift.async.AsyncMethodCallback> getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) { final org.apache.thrift.AsyncProcessFunction fcall = this; - return new org.apache.thrift.async.AsyncMethodCallback>() { - public void onComplete(java.util.List o) { - getTableRegions_result result = new getTableRegions_result(); + return new org.apache.thrift.async.AsyncMethodCallback>() { + public void onComplete(java.util.Map o) { + getColumnDescriptors_result result = new getColumnDescriptors_result(); result.success = o; try { fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); @@ -5918,7 +6012,7 @@ public void onComplete(java.util.List o) { public void onError(java.lang.Exception e) { byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; org.apache.thrift.TSerializable msg; - getTableRegions_result result = new getTableRegions_result(); + getColumnDescriptors_result result = new getColumnDescriptors_result(); if (e instanceof IOError) { result.io = (IOError) e; result.setIoIsSet(true); @@ -5950,25 +6044,26 @@ protected boolean isOneway() { return false; } - public void start(I iface, getTableRegions_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException { - iface.getTableRegions(args.tableName,resultHandler); + public void start(I iface, getColumnDescriptors_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException { + iface.getColumnDescriptors(args.tableName,resultHandler); } } - public static class createTable extends org.apache.thrift.AsyncProcessFunction { - public createTable() { - super("createTable"); + public static class getTableRegions extends org.apache.thrift.AsyncProcessFunction> { + public getTableRegions() { + super("getTableRegions"); } - public createTable_args getEmptyArgsInstance() { - return new createTable_args(); + public getTableRegions_args getEmptyArgsInstance() { + return new getTableRegions_args(); } - public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) { + public org.apache.thrift.async.AsyncMethodCallback> getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) { final org.apache.thrift.AsyncProcessFunction fcall = this; - return new org.apache.thrift.async.AsyncMethodCallback() { - public void onComplete(Void o) { - createTable_result result = new createTable_result(); + return new org.apache.thrift.async.AsyncMethodCallback>() { + public void onComplete(java.util.List o) { + getTableRegions_result result = new getTableRegions_result(); + result.success = o; try { fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); } catch (org.apache.thrift.transport.TTransportException e) { @@ -5982,19 +6077,11 @@ public void onComplete(Void o) { public void onError(java.lang.Exception e) { byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; org.apache.thrift.TSerializable msg; - createTable_result result = new createTable_result(); + getTableRegions_result result = new getTableRegions_result(); if (e instanceof IOError) { result.io = (IOError) e; result.setIoIsSet(true); msg = result; - } else if (e instanceof IllegalArgument) { - result.ia = (IllegalArgument) e; - result.setIaIsSet(true); - msg = result; - } else if (e instanceof AlreadyExists) { - result.exist = (AlreadyExists) e; - result.setExistIsSet(true); - msg = result; } else if (e instanceof org.apache.thrift.transport.TTransportException) { _LOGGER.error("TTransportException inside handler", e); fb.close(); @@ -6022,25 +6109,25 @@ protected boolean isOneway() { return false; } - public void start(I iface, createTable_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { - iface.createTable(args.tableName, args.columnFamilies,resultHandler); + public void start(I iface, getTableRegions_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException { + iface.getTableRegions(args.tableName,resultHandler); } } - public static class deleteTable extends org.apache.thrift.AsyncProcessFunction { - public deleteTable() { - super("deleteTable"); + public static class createTable extends org.apache.thrift.AsyncProcessFunction { + public createTable() { + super("createTable"); } - public deleteTable_args getEmptyArgsInstance() { - return new deleteTable_args(); + public createTable_args getEmptyArgsInstance() { + return new createTable_args(); } public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) { final org.apache.thrift.AsyncProcessFunction fcall = this; return new org.apache.thrift.async.AsyncMethodCallback() { public void onComplete(Void o) { - deleteTable_result result = new deleteTable_result(); + createTable_result result = new createTable_result(); try { fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); } catch (org.apache.thrift.transport.TTransportException e) { @@ -6054,140 +6141,18 @@ public void onComplete(Void o) { public void onError(java.lang.Exception e) { byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; org.apache.thrift.TSerializable msg; - deleteTable_result result = new deleteTable_result(); + createTable_result result = new createTable_result(); if (e instanceof IOError) { result.io = (IOError) e; result.setIoIsSet(true); msg = result; - } else if (e instanceof org.apache.thrift.transport.TTransportException) { - _LOGGER.error("TTransportException inside handler", e); - fb.close(); - return; - } else if (e instanceof org.apache.thrift.TApplicationException) { - _LOGGER.error("TApplicationException inside handler", e); - msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; - msg = (org.apache.thrift.TApplicationException)e; - } else { - _LOGGER.error("Exception inside handler", e); - msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; - msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); - } - try { - fcall.sendResponse(fb,msg,msgType,seqid); - } catch (java.lang.Exception ex) { - _LOGGER.error("Exception writing to internal frame buffer", ex); - fb.close(); - } - } - }; - } - - protected boolean isOneway() { - return false; - } - - public void start(I iface, deleteTable_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { - iface.deleteTable(args.tableName,resultHandler); - } - } - - public static class get extends org.apache.thrift.AsyncProcessFunction> { - public get() { - super("get"); - } - - public get_args getEmptyArgsInstance() { - return new get_args(); - } - - public org.apache.thrift.async.AsyncMethodCallback> getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) { - final org.apache.thrift.AsyncProcessFunction fcall = this; - return new org.apache.thrift.async.AsyncMethodCallback>() { - public void onComplete(java.util.List o) { - get_result result = new get_result(); - result.success = o; - try { - fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); - } catch (org.apache.thrift.transport.TTransportException e) { - _LOGGER.error("TTransportException writing to internal frame buffer", e); - fb.close(); - } catch (java.lang.Exception e) { - _LOGGER.error("Exception writing to internal frame buffer", e); - onError(e); - } - } - public void onError(java.lang.Exception e) { - byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; - org.apache.thrift.TSerializable msg; - get_result result = new get_result(); - if (e instanceof IOError) { - result.io = (IOError) e; - result.setIoIsSet(true); + } else if (e instanceof IllegalArgument) { + result.ia = (IllegalArgument) e; + result.setIaIsSet(true); msg = result; - } else if (e instanceof org.apache.thrift.transport.TTransportException) { - _LOGGER.error("TTransportException inside handler", e); - fb.close(); - return; - } else if (e instanceof org.apache.thrift.TApplicationException) { - _LOGGER.error("TApplicationException inside handler", e); - msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; - msg = (org.apache.thrift.TApplicationException)e; - } else { - _LOGGER.error("Exception inside handler", e); - msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; - msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); - } - try { - fcall.sendResponse(fb,msg,msgType,seqid); - } catch (java.lang.Exception ex) { - _LOGGER.error("Exception writing to internal frame buffer", ex); - fb.close(); - } - } - }; - } - - protected boolean isOneway() { - return false; - } - - public void start(I iface, get_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException { - iface.get(args.tableName, args.row, args.column, args.attributes,resultHandler); - } - } - - public static class getVer extends org.apache.thrift.AsyncProcessFunction> { - public getVer() { - super("getVer"); - } - - public getVer_args getEmptyArgsInstance() { - return new getVer_args(); - } - - public org.apache.thrift.async.AsyncMethodCallback> getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) { - final org.apache.thrift.AsyncProcessFunction fcall = this; - return new org.apache.thrift.async.AsyncMethodCallback>() { - public void onComplete(java.util.List o) { - getVer_result result = new getVer_result(); - result.success = o; - try { - fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); - } catch (org.apache.thrift.transport.TTransportException e) { - _LOGGER.error("TTransportException writing to internal frame buffer", e); - fb.close(); - } catch (java.lang.Exception e) { - _LOGGER.error("Exception writing to internal frame buffer", e); - onError(e); - } - } - public void onError(java.lang.Exception e) { - byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; - org.apache.thrift.TSerializable msg; - getVer_result result = new getVer_result(); - if (e instanceof IOError) { - result.io = (IOError) e; - result.setIoIsSet(true); + } else if (e instanceof AlreadyExists) { + result.exist = (AlreadyExists) e; + result.setExistIsSet(true); msg = result; } else if (e instanceof org.apache.thrift.transport.TTransportException) { _LOGGER.error("TTransportException inside handler", e); @@ -6216,25 +6181,89 @@ protected boolean isOneway() { return false; } - public void start(I iface, getVer_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException { - iface.getVer(args.tableName, args.row, args.column, args.numVersions, args.attributes,resultHandler); + public void start(I iface, createTable_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + iface.createTable(args.tableName, args.columnFamilies,resultHandler); } } - public static class getVerTs extends org.apache.thrift.AsyncProcessFunction> { - public getVerTs() { - super("getVerTs"); + public static class deleteTable extends org.apache.thrift.AsyncProcessFunction { + public deleteTable() { + super("deleteTable"); } - public getVerTs_args getEmptyArgsInstance() { - return new getVerTs_args(); + public deleteTable_args getEmptyArgsInstance() { + return new deleteTable_args(); + } + + public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new org.apache.thrift.async.AsyncMethodCallback() { + public void onComplete(Void o) { + deleteTable_result result = new deleteTable_result(); + try { + fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + } catch (org.apache.thrift.transport.TTransportException e) { + _LOGGER.error("TTransportException writing to internal frame buffer", e); + fb.close(); + } catch (java.lang.Exception e) { + _LOGGER.error("Exception writing to internal frame buffer", e); + onError(e); + } + } + public void onError(java.lang.Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TSerializable msg; + deleteTable_result result = new deleteTable_result(); + if (e instanceof IOError) { + result.io = (IOError) e; + result.setIoIsSet(true); + msg = result; + } else if (e instanceof org.apache.thrift.transport.TTransportException) { + _LOGGER.error("TTransportException inside handler", e); + fb.close(); + return; + } else if (e instanceof org.apache.thrift.TApplicationException) { + _LOGGER.error("TApplicationException inside handler", e); + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TApplicationException)e; + } else { + _LOGGER.error("Exception inside handler", e); + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + } catch (java.lang.Exception ex) { + _LOGGER.error("Exception writing to internal frame buffer", ex); + fb.close(); + } + } + }; + } + + protected boolean isOneway() { + return false; + } + + public void start(I iface, deleteTable_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + iface.deleteTable(args.tableName,resultHandler); + } + } + + public static class get extends org.apache.thrift.AsyncProcessFunction> { + public get() { + super("get"); + } + + public get_args getEmptyArgsInstance() { + return new get_args(); } public org.apache.thrift.async.AsyncMethodCallback> getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) { final org.apache.thrift.AsyncProcessFunction fcall = this; return new org.apache.thrift.async.AsyncMethodCallback>() { public void onComplete(java.util.List o) { - getVerTs_result result = new getVerTs_result(); + get_result result = new get_result(); result.success = o; try { fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); @@ -6249,7 +6278,7 @@ public void onComplete(java.util.List o) { public void onError(java.lang.Exception e) { byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; org.apache.thrift.TSerializable msg; - getVerTs_result result = new getVerTs_result(); + get_result result = new get_result(); if (e instanceof IOError) { result.io = (IOError) e; result.setIoIsSet(true); @@ -6281,25 +6310,25 @@ protected boolean isOneway() { return false; } - public void start(I iface, getVerTs_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException { - iface.getVerTs(args.tableName, args.row, args.column, args.timestamp, args.numVersions, args.attributes,resultHandler); + public void start(I iface, get_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException { + iface.get(args.tableName, args.row, args.column, args.attributes,resultHandler); } } - public static class getRow extends org.apache.thrift.AsyncProcessFunction> { - public getRow() { - super("getRow"); + public static class getVer extends org.apache.thrift.AsyncProcessFunction> { + public getVer() { + super("getVer"); } - public getRow_args getEmptyArgsInstance() { - return new getRow_args(); + public getVer_args getEmptyArgsInstance() { + return new getVer_args(); } - public org.apache.thrift.async.AsyncMethodCallback> getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) { + public org.apache.thrift.async.AsyncMethodCallback> getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) { final org.apache.thrift.AsyncProcessFunction fcall = this; - return new org.apache.thrift.async.AsyncMethodCallback>() { - public void onComplete(java.util.List o) { - getRow_result result = new getRow_result(); + return new org.apache.thrift.async.AsyncMethodCallback>() { + public void onComplete(java.util.List o) { + getVer_result result = new getVer_result(); result.success = o; try { fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); @@ -6314,7 +6343,7 @@ public void onComplete(java.util.List o) { public void onError(java.lang.Exception e) { byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; org.apache.thrift.TSerializable msg; - getRow_result result = new getRow_result(); + getVer_result result = new getVer_result(); if (e instanceof IOError) { result.io = (IOError) e; result.setIoIsSet(true); @@ -6346,25 +6375,25 @@ protected boolean isOneway() { return false; } - public void start(I iface, getRow_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException { - iface.getRow(args.tableName, args.row, args.attributes,resultHandler); + public void start(I iface, getVer_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException { + iface.getVer(args.tableName, args.row, args.column, args.numVersions, args.attributes,resultHandler); } } - public static class getRowWithColumns extends org.apache.thrift.AsyncProcessFunction> { - public getRowWithColumns() { - super("getRowWithColumns"); + public static class getVerTs extends org.apache.thrift.AsyncProcessFunction> { + public getVerTs() { + super("getVerTs"); } - public getRowWithColumns_args getEmptyArgsInstance() { - return new getRowWithColumns_args(); + public getVerTs_args getEmptyArgsInstance() { + return new getVerTs_args(); } - public org.apache.thrift.async.AsyncMethodCallback> getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) { + public org.apache.thrift.async.AsyncMethodCallback> getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) { final org.apache.thrift.AsyncProcessFunction fcall = this; - return new org.apache.thrift.async.AsyncMethodCallback>() { - public void onComplete(java.util.List o) { - getRowWithColumns_result result = new getRowWithColumns_result(); + return new org.apache.thrift.async.AsyncMethodCallback>() { + public void onComplete(java.util.List o) { + getVerTs_result result = new getVerTs_result(); result.success = o; try { fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); @@ -6379,7 +6408,7 @@ public void onComplete(java.util.List o) { public void onError(java.lang.Exception e) { byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; org.apache.thrift.TSerializable msg; - getRowWithColumns_result result = new getRowWithColumns_result(); + getVerTs_result result = new getVerTs_result(); if (e instanceof IOError) { result.io = (IOError) e; result.setIoIsSet(true); @@ -6411,25 +6440,25 @@ protected boolean isOneway() { return false; } - public void start(I iface, getRowWithColumns_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException { - iface.getRowWithColumns(args.tableName, args.row, args.columns, args.attributes,resultHandler); + public void start(I iface, getVerTs_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException { + iface.getVerTs(args.tableName, args.row, args.column, args.timestamp, args.numVersions, args.attributes,resultHandler); } } - public static class getRowTs extends org.apache.thrift.AsyncProcessFunction> { - public getRowTs() { - super("getRowTs"); + public static class getRow extends org.apache.thrift.AsyncProcessFunction> { + public getRow() { + super("getRow"); } - public getRowTs_args getEmptyArgsInstance() { - return new getRowTs_args(); + public getRow_args getEmptyArgsInstance() { + return new getRow_args(); } public org.apache.thrift.async.AsyncMethodCallback> getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) { final org.apache.thrift.AsyncProcessFunction fcall = this; return new org.apache.thrift.async.AsyncMethodCallback>() { public void onComplete(java.util.List o) { - getRowTs_result result = new getRowTs_result(); + getRow_result result = new getRow_result(); result.success = o; try { fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); @@ -6444,7 +6473,7 @@ public void onComplete(java.util.List o) { public void onError(java.lang.Exception e) { byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; org.apache.thrift.TSerializable msg; - getRowTs_result result = new getRowTs_result(); + getRow_result result = new getRow_result(); if (e instanceof IOError) { result.io = (IOError) e; result.setIoIsSet(true); @@ -6476,25 +6505,25 @@ protected boolean isOneway() { return false; } - public void start(I iface, getRowTs_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException { - iface.getRowTs(args.tableName, args.row, args.timestamp, args.attributes,resultHandler); + public void start(I iface, getRow_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException { + iface.getRow(args.tableName, args.row, args.attributes,resultHandler); } } - public static class getRowWithColumnsTs extends org.apache.thrift.AsyncProcessFunction> { - public getRowWithColumnsTs() { - super("getRowWithColumnsTs"); + public static class getRowWithColumns extends org.apache.thrift.AsyncProcessFunction> { + public getRowWithColumns() { + super("getRowWithColumns"); } - public getRowWithColumnsTs_args getEmptyArgsInstance() { - return new getRowWithColumnsTs_args(); + public getRowWithColumns_args getEmptyArgsInstance() { + return new getRowWithColumns_args(); } public org.apache.thrift.async.AsyncMethodCallback> getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) { final org.apache.thrift.AsyncProcessFunction fcall = this; return new org.apache.thrift.async.AsyncMethodCallback>() { public void onComplete(java.util.List o) { - getRowWithColumnsTs_result result = new getRowWithColumnsTs_result(); + getRowWithColumns_result result = new getRowWithColumns_result(); result.success = o; try { fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); @@ -6509,7 +6538,7 @@ public void onComplete(java.util.List o) { public void onError(java.lang.Exception e) { byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; org.apache.thrift.TSerializable msg; - getRowWithColumnsTs_result result = new getRowWithColumnsTs_result(); + getRowWithColumns_result result = new getRowWithColumns_result(); if (e instanceof IOError) { result.io = (IOError) e; result.setIoIsSet(true); @@ -6541,25 +6570,25 @@ protected boolean isOneway() { return false; } - public void start(I iface, getRowWithColumnsTs_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException { - iface.getRowWithColumnsTs(args.tableName, args.row, args.columns, args.timestamp, args.attributes,resultHandler); + public void start(I iface, getRowWithColumns_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException { + iface.getRowWithColumns(args.tableName, args.row, args.columns, args.attributes,resultHandler); } } - public static class getRows extends org.apache.thrift.AsyncProcessFunction> { - public getRows() { - super("getRows"); + public static class getRowTs extends org.apache.thrift.AsyncProcessFunction> { + public getRowTs() { + super("getRowTs"); } - public getRows_args getEmptyArgsInstance() { - return new getRows_args(); + public getRowTs_args getEmptyArgsInstance() { + return new getRowTs_args(); } public org.apache.thrift.async.AsyncMethodCallback> getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) { final org.apache.thrift.AsyncProcessFunction fcall = this; return new org.apache.thrift.async.AsyncMethodCallback>() { public void onComplete(java.util.List o) { - getRows_result result = new getRows_result(); + getRowTs_result result = new getRowTs_result(); result.success = o; try { fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); @@ -6574,7 +6603,7 @@ public void onComplete(java.util.List o) { public void onError(java.lang.Exception e) { byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; org.apache.thrift.TSerializable msg; - getRows_result result = new getRows_result(); + getRowTs_result result = new getRowTs_result(); if (e instanceof IOError) { result.io = (IOError) e; result.setIoIsSet(true); @@ -6606,25 +6635,25 @@ protected boolean isOneway() { return false; } - public void start(I iface, getRows_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException { - iface.getRows(args.tableName, args.rows, args.attributes,resultHandler); + public void start(I iface, getRowTs_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException { + iface.getRowTs(args.tableName, args.row, args.timestamp, args.attributes,resultHandler); } } - public static class getRowsWithColumns extends org.apache.thrift.AsyncProcessFunction> { - public getRowsWithColumns() { - super("getRowsWithColumns"); + public static class getRowWithColumnsTs extends org.apache.thrift.AsyncProcessFunction> { + public getRowWithColumnsTs() { + super("getRowWithColumnsTs"); } - public getRowsWithColumns_args getEmptyArgsInstance() { - return new getRowsWithColumns_args(); + public getRowWithColumnsTs_args getEmptyArgsInstance() { + return new getRowWithColumnsTs_args(); } public org.apache.thrift.async.AsyncMethodCallback> getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) { final org.apache.thrift.AsyncProcessFunction fcall = this; return new org.apache.thrift.async.AsyncMethodCallback>() { public void onComplete(java.util.List o) { - getRowsWithColumns_result result = new getRowsWithColumns_result(); + getRowWithColumnsTs_result result = new getRowWithColumnsTs_result(); result.success = o; try { fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); @@ -6639,7 +6668,7 @@ public void onComplete(java.util.List o) { public void onError(java.lang.Exception e) { byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; org.apache.thrift.TSerializable msg; - getRowsWithColumns_result result = new getRowsWithColumns_result(); + getRowWithColumnsTs_result result = new getRowWithColumnsTs_result(); if (e instanceof IOError) { result.io = (IOError) e; result.setIoIsSet(true); @@ -6671,25 +6700,25 @@ protected boolean isOneway() { return false; } - public void start(I iface, getRowsWithColumns_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException { - iface.getRowsWithColumns(args.tableName, args.rows, args.columns, args.attributes,resultHandler); + public void start(I iface, getRowWithColumnsTs_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException { + iface.getRowWithColumnsTs(args.tableName, args.row, args.columns, args.timestamp, args.attributes,resultHandler); } } - public static class getRowsTs extends org.apache.thrift.AsyncProcessFunction> { - public getRowsTs() { - super("getRowsTs"); + public static class getRows extends org.apache.thrift.AsyncProcessFunction> { + public getRows() { + super("getRows"); } - public getRowsTs_args getEmptyArgsInstance() { - return new getRowsTs_args(); + public getRows_args getEmptyArgsInstance() { + return new getRows_args(); } public org.apache.thrift.async.AsyncMethodCallback> getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) { final org.apache.thrift.AsyncProcessFunction fcall = this; return new org.apache.thrift.async.AsyncMethodCallback>() { public void onComplete(java.util.List o) { - getRowsTs_result result = new getRowsTs_result(); + getRows_result result = new getRows_result(); result.success = o; try { fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); @@ -6704,7 +6733,7 @@ public void onComplete(java.util.List o) { public void onError(java.lang.Exception e) { byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; org.apache.thrift.TSerializable msg; - getRowsTs_result result = new getRowsTs_result(); + getRows_result result = new getRows_result(); if (e instanceof IOError) { result.io = (IOError) e; result.setIoIsSet(true); @@ -6736,25 +6765,25 @@ protected boolean isOneway() { return false; } - public void start(I iface, getRowsTs_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException { - iface.getRowsTs(args.tableName, args.rows, args.timestamp, args.attributes,resultHandler); + public void start(I iface, getRows_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException { + iface.getRows(args.tableName, args.rows, args.attributes,resultHandler); } } - public static class getRowsWithColumnsTs extends org.apache.thrift.AsyncProcessFunction> { - public getRowsWithColumnsTs() { - super("getRowsWithColumnsTs"); + public static class getRowsWithColumns extends org.apache.thrift.AsyncProcessFunction> { + public getRowsWithColumns() { + super("getRowsWithColumns"); } - public getRowsWithColumnsTs_args getEmptyArgsInstance() { - return new getRowsWithColumnsTs_args(); + public getRowsWithColumns_args getEmptyArgsInstance() { + return new getRowsWithColumns_args(); } public org.apache.thrift.async.AsyncMethodCallback> getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) { final org.apache.thrift.AsyncProcessFunction fcall = this; return new org.apache.thrift.async.AsyncMethodCallback>() { public void onComplete(java.util.List o) { - getRowsWithColumnsTs_result result = new getRowsWithColumnsTs_result(); + getRowsWithColumns_result result = new getRowsWithColumns_result(); result.success = o; try { fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); @@ -6769,7 +6798,7 @@ public void onComplete(java.util.List o) { public void onError(java.lang.Exception e) { byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; org.apache.thrift.TSerializable msg; - getRowsWithColumnsTs_result result = new getRowsWithColumnsTs_result(); + getRowsWithColumns_result result = new getRowsWithColumns_result(); if (e instanceof IOError) { result.io = (IOError) e; result.setIoIsSet(true); @@ -6801,25 +6830,26 @@ protected boolean isOneway() { return false; } - public void start(I iface, getRowsWithColumnsTs_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException { - iface.getRowsWithColumnsTs(args.tableName, args.rows, args.columns, args.timestamp, args.attributes,resultHandler); + public void start(I iface, getRowsWithColumns_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException { + iface.getRowsWithColumns(args.tableName, args.rows, args.columns, args.attributes,resultHandler); } } - public static class mutateRow extends org.apache.thrift.AsyncProcessFunction { - public mutateRow() { - super("mutateRow"); + public static class getRowsTs extends org.apache.thrift.AsyncProcessFunction> { + public getRowsTs() { + super("getRowsTs"); } - public mutateRow_args getEmptyArgsInstance() { - return new mutateRow_args(); + public getRowsTs_args getEmptyArgsInstance() { + return new getRowsTs_args(); } - public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) { + public org.apache.thrift.async.AsyncMethodCallback> getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) { final org.apache.thrift.AsyncProcessFunction fcall = this; - return new org.apache.thrift.async.AsyncMethodCallback() { - public void onComplete(Void o) { - mutateRow_result result = new mutateRow_result(); + return new org.apache.thrift.async.AsyncMethodCallback>() { + public void onComplete(java.util.List o) { + getRowsTs_result result = new getRowsTs_result(); + result.success = o; try { fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); } catch (org.apache.thrift.transport.TTransportException e) { @@ -6833,15 +6863,11 @@ public void onComplete(Void o) { public void onError(java.lang.Exception e) { byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; org.apache.thrift.TSerializable msg; - mutateRow_result result = new mutateRow_result(); + getRowsTs_result result = new getRowsTs_result(); if (e instanceof IOError) { result.io = (IOError) e; result.setIoIsSet(true); msg = result; - } else if (e instanceof IllegalArgument) { - result.ia = (IllegalArgument) e; - result.setIaIsSet(true); - msg = result; } else if (e instanceof org.apache.thrift.transport.TTransportException) { _LOGGER.error("TTransportException inside handler", e); fb.close(); @@ -6869,25 +6895,26 @@ protected boolean isOneway() { return false; } - public void start(I iface, mutateRow_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { - iface.mutateRow(args.tableName, args.row, args.mutations, args.attributes,resultHandler); + public void start(I iface, getRowsTs_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException { + iface.getRowsTs(args.tableName, args.rows, args.timestamp, args.attributes,resultHandler); } } - public static class mutateRowTs extends org.apache.thrift.AsyncProcessFunction { - public mutateRowTs() { - super("mutateRowTs"); + public static class getRowsWithColumnsTs extends org.apache.thrift.AsyncProcessFunction> { + public getRowsWithColumnsTs() { + super("getRowsWithColumnsTs"); } - public mutateRowTs_args getEmptyArgsInstance() { - return new mutateRowTs_args(); + public getRowsWithColumnsTs_args getEmptyArgsInstance() { + return new getRowsWithColumnsTs_args(); } - public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) { + public org.apache.thrift.async.AsyncMethodCallback> getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) { final org.apache.thrift.AsyncProcessFunction fcall = this; - return new org.apache.thrift.async.AsyncMethodCallback() { - public void onComplete(Void o) { - mutateRowTs_result result = new mutateRowTs_result(); + return new org.apache.thrift.async.AsyncMethodCallback>() { + public void onComplete(java.util.List o) { + getRowsWithColumnsTs_result result = new getRowsWithColumnsTs_result(); + result.success = o; try { fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); } catch (org.apache.thrift.transport.TTransportException e) { @@ -6901,15 +6928,11 @@ public void onComplete(Void o) { public void onError(java.lang.Exception e) { byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; org.apache.thrift.TSerializable msg; - mutateRowTs_result result = new mutateRowTs_result(); + getRowsWithColumnsTs_result result = new getRowsWithColumnsTs_result(); if (e instanceof IOError) { result.io = (IOError) e; result.setIoIsSet(true); msg = result; - } else if (e instanceof IllegalArgument) { - result.ia = (IllegalArgument) e; - result.setIaIsSet(true); - msg = result; } else if (e instanceof org.apache.thrift.transport.TTransportException) { _LOGGER.error("TTransportException inside handler", e); fb.close(); @@ -6937,25 +6960,25 @@ protected boolean isOneway() { return false; } - public void start(I iface, mutateRowTs_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { - iface.mutateRowTs(args.tableName, args.row, args.mutations, args.timestamp, args.attributes,resultHandler); + public void start(I iface, getRowsWithColumnsTs_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException { + iface.getRowsWithColumnsTs(args.tableName, args.rows, args.columns, args.timestamp, args.attributes,resultHandler); } } - public static class mutateRows extends org.apache.thrift.AsyncProcessFunction { - public mutateRows() { - super("mutateRows"); + public static class mutateRow extends org.apache.thrift.AsyncProcessFunction { + public mutateRow() { + super("mutateRow"); } - public mutateRows_args getEmptyArgsInstance() { - return new mutateRows_args(); + public mutateRow_args getEmptyArgsInstance() { + return new mutateRow_args(); } public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) { final org.apache.thrift.AsyncProcessFunction fcall = this; return new org.apache.thrift.async.AsyncMethodCallback() { public void onComplete(Void o) { - mutateRows_result result = new mutateRows_result(); + mutateRow_result result = new mutateRow_result(); try { fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); } catch (org.apache.thrift.transport.TTransportException e) { @@ -6969,7 +6992,7 @@ public void onComplete(Void o) { public void onError(java.lang.Exception e) { byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; org.apache.thrift.TSerializable msg; - mutateRows_result result = new mutateRows_result(); + mutateRow_result result = new mutateRow_result(); if (e instanceof IOError) { result.io = (IOError) e; result.setIoIsSet(true); @@ -7005,25 +7028,25 @@ protected boolean isOneway() { return false; } - public void start(I iface, mutateRows_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { - iface.mutateRows(args.tableName, args.rowBatches, args.attributes,resultHandler); + public void start(I iface, mutateRow_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + iface.mutateRow(args.tableName, args.row, args.mutations, args.attributes,resultHandler); } } - public static class mutateRowsTs extends org.apache.thrift.AsyncProcessFunction { - public mutateRowsTs() { - super("mutateRowsTs"); + public static class mutateRowTs extends org.apache.thrift.AsyncProcessFunction { + public mutateRowTs() { + super("mutateRowTs"); } - public mutateRowsTs_args getEmptyArgsInstance() { - return new mutateRowsTs_args(); + public mutateRowTs_args getEmptyArgsInstance() { + return new mutateRowTs_args(); } public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) { final org.apache.thrift.AsyncProcessFunction fcall = this; return new org.apache.thrift.async.AsyncMethodCallback() { public void onComplete(Void o) { - mutateRowsTs_result result = new mutateRowsTs_result(); + mutateRowTs_result result = new mutateRowTs_result(); try { fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); } catch (org.apache.thrift.transport.TTransportException e) { @@ -7037,7 +7060,7 @@ public void onComplete(Void o) { public void onError(java.lang.Exception e) { byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; org.apache.thrift.TSerializable msg; - mutateRowsTs_result result = new mutateRowsTs_result(); + mutateRowTs_result result = new mutateRowTs_result(); if (e instanceof IOError) { result.io = (IOError) e; result.setIoIsSet(true); @@ -7073,27 +7096,25 @@ protected boolean isOneway() { return false; } - public void start(I iface, mutateRowsTs_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { - iface.mutateRowsTs(args.tableName, args.rowBatches, args.timestamp, args.attributes,resultHandler); + public void start(I iface, mutateRowTs_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + iface.mutateRowTs(args.tableName, args.row, args.mutations, args.timestamp, args.attributes,resultHandler); } } - public static class atomicIncrement extends org.apache.thrift.AsyncProcessFunction { - public atomicIncrement() { - super("atomicIncrement"); + public static class mutateRows extends org.apache.thrift.AsyncProcessFunction { + public mutateRows() { + super("mutateRows"); } - public atomicIncrement_args getEmptyArgsInstance() { - return new atomicIncrement_args(); + public mutateRows_args getEmptyArgsInstance() { + return new mutateRows_args(); } - public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) { + public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) { final org.apache.thrift.AsyncProcessFunction fcall = this; - return new org.apache.thrift.async.AsyncMethodCallback() { - public void onComplete(java.lang.Long o) { - atomicIncrement_result result = new atomicIncrement_result(); - result.success = o; - result.setSuccessIsSet(true); + return new org.apache.thrift.async.AsyncMethodCallback() { + public void onComplete(Void o) { + mutateRows_result result = new mutateRows_result(); try { fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); } catch (org.apache.thrift.transport.TTransportException e) { @@ -7107,7 +7128,7 @@ public void onComplete(java.lang.Long o) { public void onError(java.lang.Exception e) { byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; org.apache.thrift.TSerializable msg; - atomicIncrement_result result = new atomicIncrement_result(); + mutateRows_result result = new mutateRows_result(); if (e instanceof IOError) { result.io = (IOError) e; result.setIoIsSet(true); @@ -7143,25 +7164,25 @@ protected boolean isOneway() { return false; } - public void start(I iface, atomicIncrement_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { - iface.atomicIncrement(args.tableName, args.row, args.column, args.value,resultHandler); + public void start(I iface, mutateRows_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + iface.mutateRows(args.tableName, args.rowBatches, args.attributes,resultHandler); } } - public static class deleteAll extends org.apache.thrift.AsyncProcessFunction { - public deleteAll() { - super("deleteAll"); + public static class mutateRowsTs extends org.apache.thrift.AsyncProcessFunction { + public mutateRowsTs() { + super("mutateRowsTs"); } - public deleteAll_args getEmptyArgsInstance() { - return new deleteAll_args(); + public mutateRowsTs_args getEmptyArgsInstance() { + return new mutateRowsTs_args(); } public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) { final org.apache.thrift.AsyncProcessFunction fcall = this; return new org.apache.thrift.async.AsyncMethodCallback() { public void onComplete(Void o) { - deleteAll_result result = new deleteAll_result(); + mutateRowsTs_result result = new mutateRowsTs_result(); try { fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); } catch (org.apache.thrift.transport.TTransportException e) { @@ -7175,74 +7196,14 @@ public void onComplete(Void o) { public void onError(java.lang.Exception e) { byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; org.apache.thrift.TSerializable msg; - deleteAll_result result = new deleteAll_result(); + mutateRowsTs_result result = new mutateRowsTs_result(); if (e instanceof IOError) { result.io = (IOError) e; result.setIoIsSet(true); msg = result; - } else if (e instanceof org.apache.thrift.transport.TTransportException) { - _LOGGER.error("TTransportException inside handler", e); - fb.close(); - return; - } else if (e instanceof org.apache.thrift.TApplicationException) { - _LOGGER.error("TApplicationException inside handler", e); - msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; - msg = (org.apache.thrift.TApplicationException)e; - } else { - _LOGGER.error("Exception inside handler", e); - msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; - msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); - } - try { - fcall.sendResponse(fb,msg,msgType,seqid); - } catch (java.lang.Exception ex) { - _LOGGER.error("Exception writing to internal frame buffer", ex); - fb.close(); - } - } - }; - } - - protected boolean isOneway() { - return false; - } - - public void start(I iface, deleteAll_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { - iface.deleteAll(args.tableName, args.row, args.column, args.attributes,resultHandler); - } - } - - public static class deleteAllTs extends org.apache.thrift.AsyncProcessFunction { - public deleteAllTs() { - super("deleteAllTs"); - } - - public deleteAllTs_args getEmptyArgsInstance() { - return new deleteAllTs_args(); - } - - public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) { - final org.apache.thrift.AsyncProcessFunction fcall = this; - return new org.apache.thrift.async.AsyncMethodCallback() { - public void onComplete(Void o) { - deleteAllTs_result result = new deleteAllTs_result(); - try { - fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); - } catch (org.apache.thrift.transport.TTransportException e) { - _LOGGER.error("TTransportException writing to internal frame buffer", e); - fb.close(); - } catch (java.lang.Exception e) { - _LOGGER.error("Exception writing to internal frame buffer", e); - onError(e); - } - } - public void onError(java.lang.Exception e) { - byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; - org.apache.thrift.TSerializable msg; - deleteAllTs_result result = new deleteAllTs_result(); - if (e instanceof IOError) { - result.io = (IOError) e; - result.setIoIsSet(true); + } else if (e instanceof IllegalArgument) { + result.ia = (IllegalArgument) e; + result.setIaIsSet(true); msg = result; } else if (e instanceof org.apache.thrift.transport.TTransportException) { _LOGGER.error("TTransportException inside handler", e); @@ -7271,25 +7232,27 @@ protected boolean isOneway() { return false; } - public void start(I iface, deleteAllTs_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { - iface.deleteAllTs(args.tableName, args.row, args.column, args.timestamp, args.attributes,resultHandler); + public void start(I iface, mutateRowsTs_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + iface.mutateRowsTs(args.tableName, args.rowBatches, args.timestamp, args.attributes,resultHandler); } } - public static class deleteAllRow extends org.apache.thrift.AsyncProcessFunction { - public deleteAllRow() { - super("deleteAllRow"); + public static class atomicIncrement extends org.apache.thrift.AsyncProcessFunction { + public atomicIncrement() { + super("atomicIncrement"); } - public deleteAllRow_args getEmptyArgsInstance() { - return new deleteAllRow_args(); + public atomicIncrement_args getEmptyArgsInstance() { + return new atomicIncrement_args(); } - public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) { + public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) { final org.apache.thrift.AsyncProcessFunction fcall = this; - return new org.apache.thrift.async.AsyncMethodCallback() { - public void onComplete(Void o) { - deleteAllRow_result result = new deleteAllRow_result(); + return new org.apache.thrift.async.AsyncMethodCallback() { + public void onComplete(java.lang.Long o) { + atomicIncrement_result result = new atomicIncrement_result(); + result.success = o; + result.setSuccessIsSet(true); try { fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); } catch (org.apache.thrift.transport.TTransportException e) { @@ -7303,11 +7266,15 @@ public void onComplete(Void o) { public void onError(java.lang.Exception e) { byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; org.apache.thrift.TSerializable msg; - deleteAllRow_result result = new deleteAllRow_result(); + atomicIncrement_result result = new atomicIncrement_result(); if (e instanceof IOError) { result.io = (IOError) e; result.setIoIsSet(true); msg = result; + } else if (e instanceof IllegalArgument) { + result.ia = (IllegalArgument) e; + result.setIaIsSet(true); + msg = result; } else if (e instanceof org.apache.thrift.transport.TTransportException) { _LOGGER.error("TTransportException inside handler", e); fb.close(); @@ -7335,25 +7302,25 @@ protected boolean isOneway() { return false; } - public void start(I iface, deleteAllRow_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { - iface.deleteAllRow(args.tableName, args.row, args.attributes,resultHandler); + public void start(I iface, atomicIncrement_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + iface.atomicIncrement(args.tableName, args.row, args.column, args.value,resultHandler); } } - public static class increment extends org.apache.thrift.AsyncProcessFunction { - public increment() { - super("increment"); + public static class deleteAll extends org.apache.thrift.AsyncProcessFunction { + public deleteAll() { + super("deleteAll"); } - public increment_args getEmptyArgsInstance() { - return new increment_args(); + public deleteAll_args getEmptyArgsInstance() { + return new deleteAll_args(); } public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) { final org.apache.thrift.AsyncProcessFunction fcall = this; return new org.apache.thrift.async.AsyncMethodCallback() { public void onComplete(Void o) { - increment_result result = new increment_result(); + deleteAll_result result = new deleteAll_result(); try { fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); } catch (org.apache.thrift.transport.TTransportException e) { @@ -7367,7 +7334,7 @@ public void onComplete(Void o) { public void onError(java.lang.Exception e) { byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; org.apache.thrift.TSerializable msg; - increment_result result = new increment_result(); + deleteAll_result result = new deleteAll_result(); if (e instanceof IOError) { result.io = (IOError) e; result.setIoIsSet(true); @@ -7399,25 +7366,25 @@ protected boolean isOneway() { return false; } - public void start(I iface, increment_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { - iface.increment(args.increment,resultHandler); + public void start(I iface, deleteAll_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + iface.deleteAll(args.tableName, args.row, args.column, args.attributes,resultHandler); } } - public static class incrementRows extends org.apache.thrift.AsyncProcessFunction { - public incrementRows() { - super("incrementRows"); + public static class deleteAllTs extends org.apache.thrift.AsyncProcessFunction { + public deleteAllTs() { + super("deleteAllTs"); } - public incrementRows_args getEmptyArgsInstance() { - return new incrementRows_args(); + public deleteAllTs_args getEmptyArgsInstance() { + return new deleteAllTs_args(); } public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) { final org.apache.thrift.AsyncProcessFunction fcall = this; return new org.apache.thrift.async.AsyncMethodCallback() { public void onComplete(Void o) { - incrementRows_result result = new incrementRows_result(); + deleteAllTs_result result = new deleteAllTs_result(); try { fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); } catch (org.apache.thrift.transport.TTransportException e) { @@ -7431,7 +7398,7 @@ public void onComplete(Void o) { public void onError(java.lang.Exception e) { byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; org.apache.thrift.TSerializable msg; - incrementRows_result result = new incrementRows_result(); + deleteAllTs_result result = new deleteAllTs_result(); if (e instanceof IOError) { result.io = (IOError) e; result.setIoIsSet(true); @@ -7463,25 +7430,25 @@ protected boolean isOneway() { return false; } - public void start(I iface, incrementRows_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { - iface.incrementRows(args.increments,resultHandler); + public void start(I iface, deleteAllTs_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + iface.deleteAllTs(args.tableName, args.row, args.column, args.timestamp, args.attributes,resultHandler); } } - public static class deleteAllRowTs extends org.apache.thrift.AsyncProcessFunction { - public deleteAllRowTs() { - super("deleteAllRowTs"); + public static class deleteAllRow extends org.apache.thrift.AsyncProcessFunction { + public deleteAllRow() { + super("deleteAllRow"); } - public deleteAllRowTs_args getEmptyArgsInstance() { - return new deleteAllRowTs_args(); + public deleteAllRow_args getEmptyArgsInstance() { + return new deleteAllRow_args(); } public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) { final org.apache.thrift.AsyncProcessFunction fcall = this; return new org.apache.thrift.async.AsyncMethodCallback() { public void onComplete(Void o) { - deleteAllRowTs_result result = new deleteAllRowTs_result(); + deleteAllRow_result result = new deleteAllRow_result(); try { fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); } catch (org.apache.thrift.transport.TTransportException e) { @@ -7495,7 +7462,7 @@ public void onComplete(Void o) { public void onError(java.lang.Exception e) { byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; org.apache.thrift.TSerializable msg; - deleteAllRowTs_result result = new deleteAllRowTs_result(); + deleteAllRow_result result = new deleteAllRow_result(); if (e instanceof IOError) { result.io = (IOError) e; result.setIoIsSet(true); @@ -7527,27 +7494,25 @@ protected boolean isOneway() { return false; } - public void start(I iface, deleteAllRowTs_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { - iface.deleteAllRowTs(args.tableName, args.row, args.timestamp, args.attributes,resultHandler); + public void start(I iface, deleteAllRow_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + iface.deleteAllRow(args.tableName, args.row, args.attributes,resultHandler); } } - public static class scannerOpenWithScan extends org.apache.thrift.AsyncProcessFunction { - public scannerOpenWithScan() { - super("scannerOpenWithScan"); + public static class increment extends org.apache.thrift.AsyncProcessFunction { + public increment() { + super("increment"); } - public scannerOpenWithScan_args getEmptyArgsInstance() { - return new scannerOpenWithScan_args(); + public increment_args getEmptyArgsInstance() { + return new increment_args(); } - public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) { + public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) { final org.apache.thrift.AsyncProcessFunction fcall = this; - return new org.apache.thrift.async.AsyncMethodCallback() { - public void onComplete(java.lang.Integer o) { - scannerOpenWithScan_result result = new scannerOpenWithScan_result(); - result.success = o; - result.setSuccessIsSet(true); + return new org.apache.thrift.async.AsyncMethodCallback() { + public void onComplete(Void o) { + increment_result result = new increment_result(); try { fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); } catch (org.apache.thrift.transport.TTransportException e) { @@ -7561,7 +7526,7 @@ public void onComplete(java.lang.Integer o) { public void onError(java.lang.Exception e) { byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; org.apache.thrift.TSerializable msg; - scannerOpenWithScan_result result = new scannerOpenWithScan_result(); + increment_result result = new increment_result(); if (e instanceof IOError) { result.io = (IOError) e; result.setIoIsSet(true); @@ -7593,27 +7558,25 @@ protected boolean isOneway() { return false; } - public void start(I iface, scannerOpenWithScan_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { - iface.scannerOpenWithScan(args.tableName, args.scan, args.attributes,resultHandler); + public void start(I iface, increment_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + iface.increment(args.increment,resultHandler); } } - public static class scannerOpen extends org.apache.thrift.AsyncProcessFunction { - public scannerOpen() { - super("scannerOpen"); + public static class incrementRows extends org.apache.thrift.AsyncProcessFunction { + public incrementRows() { + super("incrementRows"); } - public scannerOpen_args getEmptyArgsInstance() { - return new scannerOpen_args(); + public incrementRows_args getEmptyArgsInstance() { + return new incrementRows_args(); } - public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) { + public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) { final org.apache.thrift.AsyncProcessFunction fcall = this; - return new org.apache.thrift.async.AsyncMethodCallback() { - public void onComplete(java.lang.Integer o) { - scannerOpen_result result = new scannerOpen_result(); - result.success = o; - result.setSuccessIsSet(true); + return new org.apache.thrift.async.AsyncMethodCallback() { + public void onComplete(Void o) { + incrementRows_result result = new incrementRows_result(); try { fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); } catch (org.apache.thrift.transport.TTransportException e) { @@ -7627,7 +7590,7 @@ public void onComplete(java.lang.Integer o) { public void onError(java.lang.Exception e) { byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; org.apache.thrift.TSerializable msg; - scannerOpen_result result = new scannerOpen_result(); + incrementRows_result result = new incrementRows_result(); if (e instanceof IOError) { result.io = (IOError) e; result.setIoIsSet(true); @@ -7659,25 +7622,89 @@ protected boolean isOneway() { return false; } - public void start(I iface, scannerOpen_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { - iface.scannerOpen(args.tableName, args.startRow, args.columns, args.attributes,resultHandler); + public void start(I iface, incrementRows_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + iface.incrementRows(args.increments,resultHandler); } } - public static class scannerOpenWithStop extends org.apache.thrift.AsyncProcessFunction { - public scannerOpenWithStop() { - super("scannerOpenWithStop"); + public static class deleteAllRowTs extends org.apache.thrift.AsyncProcessFunction { + public deleteAllRowTs() { + super("deleteAllRowTs"); } - public scannerOpenWithStop_args getEmptyArgsInstance() { - return new scannerOpenWithStop_args(); + public deleteAllRowTs_args getEmptyArgsInstance() { + return new deleteAllRowTs_args(); + } + + public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new org.apache.thrift.async.AsyncMethodCallback() { + public void onComplete(Void o) { + deleteAllRowTs_result result = new deleteAllRowTs_result(); + try { + fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + } catch (org.apache.thrift.transport.TTransportException e) { + _LOGGER.error("TTransportException writing to internal frame buffer", e); + fb.close(); + } catch (java.lang.Exception e) { + _LOGGER.error("Exception writing to internal frame buffer", e); + onError(e); + } + } + public void onError(java.lang.Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TSerializable msg; + deleteAllRowTs_result result = new deleteAllRowTs_result(); + if (e instanceof IOError) { + result.io = (IOError) e; + result.setIoIsSet(true); + msg = result; + } else if (e instanceof org.apache.thrift.transport.TTransportException) { + _LOGGER.error("TTransportException inside handler", e); + fb.close(); + return; + } else if (e instanceof org.apache.thrift.TApplicationException) { + _LOGGER.error("TApplicationException inside handler", e); + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TApplicationException)e; + } else { + _LOGGER.error("Exception inside handler", e); + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + } catch (java.lang.Exception ex) { + _LOGGER.error("Exception writing to internal frame buffer", ex); + fb.close(); + } + } + }; + } + + protected boolean isOneway() { + return false; + } + + public void start(I iface, deleteAllRowTs_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + iface.deleteAllRowTs(args.tableName, args.row, args.timestamp, args.attributes,resultHandler); + } + } + + public static class scannerOpenWithScan extends org.apache.thrift.AsyncProcessFunction { + public scannerOpenWithScan() { + super("scannerOpenWithScan"); + } + + public scannerOpenWithScan_args getEmptyArgsInstance() { + return new scannerOpenWithScan_args(); } public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) { final org.apache.thrift.AsyncProcessFunction fcall = this; return new org.apache.thrift.async.AsyncMethodCallback() { public void onComplete(java.lang.Integer o) { - scannerOpenWithStop_result result = new scannerOpenWithStop_result(); + scannerOpenWithScan_result result = new scannerOpenWithScan_result(); result.success = o; result.setSuccessIsSet(true); try { @@ -7693,7 +7720,7 @@ public void onComplete(java.lang.Integer o) { public void onError(java.lang.Exception e) { byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; org.apache.thrift.TSerializable msg; - scannerOpenWithStop_result result = new scannerOpenWithStop_result(); + scannerOpenWithScan_result result = new scannerOpenWithScan_result(); if (e instanceof IOError) { result.io = (IOError) e; result.setIoIsSet(true); @@ -7725,25 +7752,25 @@ protected boolean isOneway() { return false; } - public void start(I iface, scannerOpenWithStop_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { - iface.scannerOpenWithStop(args.tableName, args.startRow, args.stopRow, args.columns, args.attributes,resultHandler); + public void start(I iface, scannerOpenWithScan_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + iface.scannerOpenWithScan(args.tableName, args.scan, args.attributes,resultHandler); } } - public static class scannerOpenWithPrefix extends org.apache.thrift.AsyncProcessFunction { - public scannerOpenWithPrefix() { - super("scannerOpenWithPrefix"); + public static class scannerOpen extends org.apache.thrift.AsyncProcessFunction { + public scannerOpen() { + super("scannerOpen"); } - public scannerOpenWithPrefix_args getEmptyArgsInstance() { - return new scannerOpenWithPrefix_args(); + public scannerOpen_args getEmptyArgsInstance() { + return new scannerOpen_args(); } public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) { final org.apache.thrift.AsyncProcessFunction fcall = this; return new org.apache.thrift.async.AsyncMethodCallback() { public void onComplete(java.lang.Integer o) { - scannerOpenWithPrefix_result result = new scannerOpenWithPrefix_result(); + scannerOpen_result result = new scannerOpen_result(); result.success = o; result.setSuccessIsSet(true); try { @@ -7759,7 +7786,7 @@ public void onComplete(java.lang.Integer o) { public void onError(java.lang.Exception e) { byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; org.apache.thrift.TSerializable msg; - scannerOpenWithPrefix_result result = new scannerOpenWithPrefix_result(); + scannerOpen_result result = new scannerOpen_result(); if (e instanceof IOError) { result.io = (IOError) e; result.setIoIsSet(true); @@ -7791,25 +7818,25 @@ protected boolean isOneway() { return false; } - public void start(I iface, scannerOpenWithPrefix_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { - iface.scannerOpenWithPrefix(args.tableName, args.startAndPrefix, args.columns, args.attributes,resultHandler); + public void start(I iface, scannerOpen_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + iface.scannerOpen(args.tableName, args.startRow, args.columns, args.attributes,resultHandler); } } - public static class scannerOpenTs extends org.apache.thrift.AsyncProcessFunction { - public scannerOpenTs() { - super("scannerOpenTs"); + public static class scannerOpenWithStop extends org.apache.thrift.AsyncProcessFunction { + public scannerOpenWithStop() { + super("scannerOpenWithStop"); } - public scannerOpenTs_args getEmptyArgsInstance() { - return new scannerOpenTs_args(); + public scannerOpenWithStop_args getEmptyArgsInstance() { + return new scannerOpenWithStop_args(); } public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) { final org.apache.thrift.AsyncProcessFunction fcall = this; return new org.apache.thrift.async.AsyncMethodCallback() { public void onComplete(java.lang.Integer o) { - scannerOpenTs_result result = new scannerOpenTs_result(); + scannerOpenWithStop_result result = new scannerOpenWithStop_result(); result.success = o; result.setSuccessIsSet(true); try { @@ -7825,7 +7852,7 @@ public void onComplete(java.lang.Integer o) { public void onError(java.lang.Exception e) { byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; org.apache.thrift.TSerializable msg; - scannerOpenTs_result result = new scannerOpenTs_result(); + scannerOpenWithStop_result result = new scannerOpenWithStop_result(); if (e instanceof IOError) { result.io = (IOError) e; result.setIoIsSet(true); @@ -7857,25 +7884,25 @@ protected boolean isOneway() { return false; } - public void start(I iface, scannerOpenTs_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { - iface.scannerOpenTs(args.tableName, args.startRow, args.columns, args.timestamp, args.attributes,resultHandler); + public void start(I iface, scannerOpenWithStop_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + iface.scannerOpenWithStop(args.tableName, args.startRow, args.stopRow, args.columns, args.attributes,resultHandler); } } - public static class scannerOpenWithStopTs extends org.apache.thrift.AsyncProcessFunction { - public scannerOpenWithStopTs() { - super("scannerOpenWithStopTs"); + public static class scannerOpenWithPrefix extends org.apache.thrift.AsyncProcessFunction { + public scannerOpenWithPrefix() { + super("scannerOpenWithPrefix"); } - public scannerOpenWithStopTs_args getEmptyArgsInstance() { - return new scannerOpenWithStopTs_args(); + public scannerOpenWithPrefix_args getEmptyArgsInstance() { + return new scannerOpenWithPrefix_args(); } public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) { final org.apache.thrift.AsyncProcessFunction fcall = this; return new org.apache.thrift.async.AsyncMethodCallback() { public void onComplete(java.lang.Integer o) { - scannerOpenWithStopTs_result result = new scannerOpenWithStopTs_result(); + scannerOpenWithPrefix_result result = new scannerOpenWithPrefix_result(); result.success = o; result.setSuccessIsSet(true); try { @@ -7891,7 +7918,7 @@ public void onComplete(java.lang.Integer o) { public void onError(java.lang.Exception e) { byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; org.apache.thrift.TSerializable msg; - scannerOpenWithStopTs_result result = new scannerOpenWithStopTs_result(); + scannerOpenWithPrefix_result result = new scannerOpenWithPrefix_result(); if (e instanceof IOError) { result.io = (IOError) e; result.setIoIsSet(true); @@ -7923,26 +7950,27 @@ protected boolean isOneway() { return false; } - public void start(I iface, scannerOpenWithStopTs_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { - iface.scannerOpenWithStopTs(args.tableName, args.startRow, args.stopRow, args.columns, args.timestamp, args.attributes,resultHandler); + public void start(I iface, scannerOpenWithPrefix_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + iface.scannerOpenWithPrefix(args.tableName, args.startAndPrefix, args.columns, args.attributes,resultHandler); } } - public static class scannerGet extends org.apache.thrift.AsyncProcessFunction> { - public scannerGet() { - super("scannerGet"); + public static class scannerOpenTs extends org.apache.thrift.AsyncProcessFunction { + public scannerOpenTs() { + super("scannerOpenTs"); } - public scannerGet_args getEmptyArgsInstance() { - return new scannerGet_args(); + public scannerOpenTs_args getEmptyArgsInstance() { + return new scannerOpenTs_args(); } - public org.apache.thrift.async.AsyncMethodCallback> getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) { + public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) { final org.apache.thrift.AsyncProcessFunction fcall = this; - return new org.apache.thrift.async.AsyncMethodCallback>() { - public void onComplete(java.util.List o) { - scannerGet_result result = new scannerGet_result(); + return new org.apache.thrift.async.AsyncMethodCallback() { + public void onComplete(java.lang.Integer o) { + scannerOpenTs_result result = new scannerOpenTs_result(); result.success = o; + result.setSuccessIsSet(true); try { fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); } catch (org.apache.thrift.transport.TTransportException e) { @@ -7956,15 +7984,11 @@ public void onComplete(java.util.List o) { public void onError(java.lang.Exception e) { byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; org.apache.thrift.TSerializable msg; - scannerGet_result result = new scannerGet_result(); + scannerOpenTs_result result = new scannerOpenTs_result(); if (e instanceof IOError) { result.io = (IOError) e; result.setIoIsSet(true); msg = result; - } else if (e instanceof IllegalArgument) { - result.ia = (IllegalArgument) e; - result.setIaIsSet(true); - msg = result; } else if (e instanceof org.apache.thrift.transport.TTransportException) { _LOGGER.error("TTransportException inside handler", e); fb.close(); @@ -7992,26 +8016,27 @@ protected boolean isOneway() { return false; } - public void start(I iface, scannerGet_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException { - iface.scannerGet(args.id,resultHandler); + public void start(I iface, scannerOpenTs_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + iface.scannerOpenTs(args.tableName, args.startRow, args.columns, args.timestamp, args.attributes,resultHandler); } } - public static class scannerGetList extends org.apache.thrift.AsyncProcessFunction> { - public scannerGetList() { - super("scannerGetList"); + public static class scannerOpenWithStopTs extends org.apache.thrift.AsyncProcessFunction { + public scannerOpenWithStopTs() { + super("scannerOpenWithStopTs"); } - public scannerGetList_args getEmptyArgsInstance() { - return new scannerGetList_args(); + public scannerOpenWithStopTs_args getEmptyArgsInstance() { + return new scannerOpenWithStopTs_args(); } - public org.apache.thrift.async.AsyncMethodCallback> getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) { + public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) { final org.apache.thrift.AsyncProcessFunction fcall = this; - return new org.apache.thrift.async.AsyncMethodCallback>() { - public void onComplete(java.util.List o) { - scannerGetList_result result = new scannerGetList_result(); + return new org.apache.thrift.async.AsyncMethodCallback() { + public void onComplete(java.lang.Integer o) { + scannerOpenWithStopTs_result result = new scannerOpenWithStopTs_result(); result.success = o; + result.setSuccessIsSet(true); try { fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); } catch (org.apache.thrift.transport.TTransportException e) { @@ -8025,15 +8050,11 @@ public void onComplete(java.util.List o) { public void onError(java.lang.Exception e) { byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; org.apache.thrift.TSerializable msg; - scannerGetList_result result = new scannerGetList_result(); + scannerOpenWithStopTs_result result = new scannerOpenWithStopTs_result(); if (e instanceof IOError) { result.io = (IOError) e; result.setIoIsSet(true); msg = result; - } else if (e instanceof IllegalArgument) { - result.ia = (IllegalArgument) e; - result.setIaIsSet(true); - msg = result; } else if (e instanceof org.apache.thrift.transport.TTransportException) { _LOGGER.error("TTransportException inside handler", e); fb.close(); @@ -8061,25 +8082,26 @@ protected boolean isOneway() { return false; } - public void start(I iface, scannerGetList_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException { - iface.scannerGetList(args.id, args.nbRows,resultHandler); + public void start(I iface, scannerOpenWithStopTs_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + iface.scannerOpenWithStopTs(args.tableName, args.startRow, args.stopRow, args.columns, args.timestamp, args.attributes,resultHandler); } } - public static class scannerClose extends org.apache.thrift.AsyncProcessFunction { - public scannerClose() { - super("scannerClose"); + public static class scannerGet extends org.apache.thrift.AsyncProcessFunction> { + public scannerGet() { + super("scannerGet"); } - public scannerClose_args getEmptyArgsInstance() { - return new scannerClose_args(); + public scannerGet_args getEmptyArgsInstance() { + return new scannerGet_args(); } - public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) { + public org.apache.thrift.async.AsyncMethodCallback> getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) { final org.apache.thrift.AsyncProcessFunction fcall = this; - return new org.apache.thrift.async.AsyncMethodCallback() { - public void onComplete(Void o) { - scannerClose_result result = new scannerClose_result(); + return new org.apache.thrift.async.AsyncMethodCallback>() { + public void onComplete(java.util.List o) { + scannerGet_result result = new scannerGet_result(); + result.success = o; try { fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); } catch (org.apache.thrift.transport.TTransportException e) { @@ -8093,7 +8115,144 @@ public void onComplete(Void o) { public void onError(java.lang.Exception e) { byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; org.apache.thrift.TSerializable msg; - scannerClose_result result = new scannerClose_result(); + scannerGet_result result = new scannerGet_result(); + if (e instanceof IOError) { + result.io = (IOError) e; + result.setIoIsSet(true); + msg = result; + } else if (e instanceof IllegalArgument) { + result.ia = (IllegalArgument) e; + result.setIaIsSet(true); + msg = result; + } else if (e instanceof org.apache.thrift.transport.TTransportException) { + _LOGGER.error("TTransportException inside handler", e); + fb.close(); + return; + } else if (e instanceof org.apache.thrift.TApplicationException) { + _LOGGER.error("TApplicationException inside handler", e); + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TApplicationException)e; + } else { + _LOGGER.error("Exception inside handler", e); + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + } catch (java.lang.Exception ex) { + _LOGGER.error("Exception writing to internal frame buffer", ex); + fb.close(); + } + } + }; + } + + protected boolean isOneway() { + return false; + } + + public void start(I iface, scannerGet_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException { + iface.scannerGet(args.id,resultHandler); + } + } + + public static class scannerGetList extends org.apache.thrift.AsyncProcessFunction> { + public scannerGetList() { + super("scannerGetList"); + } + + public scannerGetList_args getEmptyArgsInstance() { + return new scannerGetList_args(); + } + + public org.apache.thrift.async.AsyncMethodCallback> getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new org.apache.thrift.async.AsyncMethodCallback>() { + public void onComplete(java.util.List o) { + scannerGetList_result result = new scannerGetList_result(); + result.success = o; + try { + fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + } catch (org.apache.thrift.transport.TTransportException e) { + _LOGGER.error("TTransportException writing to internal frame buffer", e); + fb.close(); + } catch (java.lang.Exception e) { + _LOGGER.error("Exception writing to internal frame buffer", e); + onError(e); + } + } + public void onError(java.lang.Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TSerializable msg; + scannerGetList_result result = new scannerGetList_result(); + if (e instanceof IOError) { + result.io = (IOError) e; + result.setIoIsSet(true); + msg = result; + } else if (e instanceof IllegalArgument) { + result.ia = (IllegalArgument) e; + result.setIaIsSet(true); + msg = result; + } else if (e instanceof org.apache.thrift.transport.TTransportException) { + _LOGGER.error("TTransportException inside handler", e); + fb.close(); + return; + } else if (e instanceof org.apache.thrift.TApplicationException) { + _LOGGER.error("TApplicationException inside handler", e); + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TApplicationException)e; + } else { + _LOGGER.error("Exception inside handler", e); + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + } catch (java.lang.Exception ex) { + _LOGGER.error("Exception writing to internal frame buffer", ex); + fb.close(); + } + } + }; + } + + protected boolean isOneway() { + return false; + } + + public void start(I iface, scannerGetList_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException { + iface.scannerGetList(args.id, args.nbRows,resultHandler); + } + } + + public static class scannerClose extends org.apache.thrift.AsyncProcessFunction { + public scannerClose() { + super("scannerClose"); + } + + public scannerClose_args getEmptyArgsInstance() { + return new scannerClose_args(); + } + + public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new org.apache.thrift.async.AsyncMethodCallback() { + public void onComplete(Void o) { + scannerClose_result result = new scannerClose_result(); + try { + fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + } catch (org.apache.thrift.transport.TTransportException e) { + _LOGGER.error("TTransportException writing to internal frame buffer", e); + fb.close(); + } catch (java.lang.Exception e) { + _LOGGER.error("Exception writing to internal frame buffer", e); + onError(e); + } + } + public void onError(java.lang.Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TSerializable msg; + scannerClose_result result = new scannerClose_result(); if (e instanceof IOError) { result.io = (IOError) e; result.setIoIsSet(true); @@ -9294,7 +9453,764 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, enableTable_result struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, enableTable_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, enableTable_result struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.io != null) { + oprot.writeFieldBegin(IO_FIELD_DESC); + struct.io.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class enableTable_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + public enableTable_resultTupleScheme getScheme() { + return new enableTable_resultTupleScheme(); + } + } + + private static class enableTable_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, enableTable_result struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot; + java.util.BitSet optionals = new java.util.BitSet(); + if (struct.isSetIo()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetIo()) { + struct.io.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, enableTable_result struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot; + java.util.BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.io = new IOError(); + struct.io.read(iprot); + struct.setIoIsSet(true); + } + } + } + + private static S scheme(org.apache.thrift.protocol.TProtocol proto) { + return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme(); + } + } + + public static class disableTable_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("disableTable_args"); + + private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)1); + + private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new disableTable_argsStandardSchemeFactory(); + private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new disableTable_argsTupleSchemeFactory(); + + /** + * name of the table + */ + public @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableName; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + /** + * name of the table + */ + TABLE_NAME((short)1, "tableName"); + + private static final java.util.Map byName = new java.util.HashMap(); + + static { + for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + @org.apache.thrift.annotation.Nullable + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // TABLE_NAME + return TABLE_NAME; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + @org.apache.thrift.annotation.Nullable + public static _Fields findByName(java.lang.String name) { + return byName.get(name); + } + + private final short _thriftId; + private final java.lang.String _fieldName; + + _Fields(short thriftId, java.lang.String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public java.lang.String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , "Bytes"))); + metaDataMap = java.util.Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(disableTable_args.class, metaDataMap); + } + + public disableTable_args() { + } + + public disableTable_args( + java.nio.ByteBuffer tableName) + { + this(); + this.tableName = org.apache.thrift.TBaseHelper.copyBinary(tableName); + } + + /** + * Performs a deep copy on other. + */ + public disableTable_args(disableTable_args other) { + if (other.isSetTableName()) { + this.tableName = org.apache.thrift.TBaseHelper.copyBinary(other.tableName); + } + } + + public disableTable_args deepCopy() { + return new disableTable_args(this); + } + + @Override + public void clear() { + this.tableName = null; + } + + /** + * name of the table + */ + public byte[] getTableName() { + setTableName(org.apache.thrift.TBaseHelper.rightSize(tableName)); + return tableName == null ? null : tableName.array(); + } + + public java.nio.ByteBuffer bufferForTableName() { + return org.apache.thrift.TBaseHelper.copyBinary(tableName); + } + + /** + * name of the table + */ + public disableTable_args setTableName(byte[] tableName) { + this.tableName = tableName == null ? (java.nio.ByteBuffer)null : java.nio.ByteBuffer.wrap(tableName.clone()); + return this; + } + + public disableTable_args setTableName(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableName) { + this.tableName = org.apache.thrift.TBaseHelper.copyBinary(tableName); + return this; + } + + public void unsetTableName() { + this.tableName = null; + } + + /** Returns true if field tableName is set (has been assigned a value) and false otherwise */ + public boolean isSetTableName() { + return this.tableName != null; + } + + public void setTableNameIsSet(boolean value) { + if (!value) { + this.tableName = null; + } + } + + public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) { + switch (field) { + case TABLE_NAME: + if (value == null) { + unsetTableName(); + } else { + if (value instanceof byte[]) { + setTableName((byte[])value); + } else { + setTableName((java.nio.ByteBuffer)value); + } + } + break; + + } + } + + @org.apache.thrift.annotation.Nullable + public java.lang.Object getFieldValue(_Fields field) { + switch (field) { + case TABLE_NAME: + return getTableName(); + + } + throw new java.lang.IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new java.lang.IllegalArgumentException(); + } + + switch (field) { + case TABLE_NAME: + return isSetTableName(); + } + throw new java.lang.IllegalStateException(); + } + + @Override + public boolean equals(java.lang.Object that) { + if (that instanceof disableTable_args) + return this.equals((disableTable_args)that); + return false; + } + + public boolean equals(disableTable_args that) { + if (that == null) + return false; + if (this == that) + return true; + + boolean this_present_tableName = true && this.isSetTableName(); + boolean that_present_tableName = true && that.isSetTableName(); + if (this_present_tableName || that_present_tableName) { + if (!(this_present_tableName && that_present_tableName)) + return false; + if (!this.tableName.equals(that.tableName)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + int hashCode = 1; + + hashCode = hashCode * 8191 + ((isSetTableName()) ? 131071 : 524287); + if (isSetTableName()) + hashCode = hashCode * 8191 + tableName.hashCode(); + + return hashCode; + } + + @Override + public int compareTo(disableTable_args other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = java.lang.Boolean.compare(isSetTableName(), other.isSetTableName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetTableName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tableName, other.tableName); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + @org.apache.thrift.annotation.Nullable + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + scheme(iprot).read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + scheme(oprot).write(oprot, this); + } + + @Override + public java.lang.String toString() { + java.lang.StringBuilder sb = new java.lang.StringBuilder("disableTable_args("); + boolean first = true; + + sb.append("tableName:"); + if (this.tableName == null) { + sb.append("null"); + } else { + org.apache.thrift.TBaseHelper.toString(this.tableName, sb); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class disableTable_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + public disableTable_argsStandardScheme getScheme() { + return new disableTable_argsStandardScheme(); + } + } + + private static class disableTable_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, disableTable_args struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // TABLE_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.tableName = iprot.readBinary(); + struct.setTableNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + + // check for required fields of primitive type, which can't be checked in the validate method + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, disableTable_args struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.tableName != null) { + oprot.writeFieldBegin(TABLE_NAME_FIELD_DESC); + oprot.writeBinary(struct.tableName); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class disableTable_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + public disableTable_argsTupleScheme getScheme() { + return new disableTable_argsTupleScheme(); + } + } + + private static class disableTable_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, disableTable_args struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot; + java.util.BitSet optionals = new java.util.BitSet(); + if (struct.isSetTableName()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetTableName()) { + oprot.writeBinary(struct.tableName); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, disableTable_args struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot; + java.util.BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.tableName = iprot.readBinary(); + struct.setTableNameIsSet(true); + } + } + } + + private static S scheme(org.apache.thrift.protocol.TProtocol proto) { + return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme(); + } + } + + public static class disableTable_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("disableTable_result"); + + private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1); + + private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new disableTable_resultStandardSchemeFactory(); + private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new disableTable_resultTupleSchemeFactory(); + + public @org.apache.thrift.annotation.Nullable IOError io; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + IO((short)1, "io"); + + private static final java.util.Map byName = new java.util.HashMap(); + + static { + for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + @org.apache.thrift.annotation.Nullable + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // IO + return IO; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + @org.apache.thrift.annotation.Nullable + public static _Fields findByName(java.lang.String name) { + return byName.get(name); + } + + private final short _thriftId; + private final java.lang.String _fieldName; + + _Fields(short thriftId, java.lang.String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public java.lang.String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, IOError.class))); + metaDataMap = java.util.Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(disableTable_result.class, metaDataMap); + } + + public disableTable_result() { + } + + public disableTable_result( + IOError io) + { + this(); + this.io = io; + } + + /** + * Performs a deep copy on other. + */ + public disableTable_result(disableTable_result other) { + if (other.isSetIo()) { + this.io = new IOError(other.io); + } + } + + public disableTable_result deepCopy() { + return new disableTable_result(this); + } + + @Override + public void clear() { + this.io = null; + } + + @org.apache.thrift.annotation.Nullable + public IOError getIo() { + return this.io; + } + + public disableTable_result setIo(@org.apache.thrift.annotation.Nullable IOError io) { + this.io = io; + return this; + } + + public void unsetIo() { + this.io = null; + } + + /** Returns true if field io is set (has been assigned a value) and false otherwise */ + public boolean isSetIo() { + return this.io != null; + } + + public void setIoIsSet(boolean value) { + if (!value) { + this.io = null; + } + } + + public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) { + switch (field) { + case IO: + if (value == null) { + unsetIo(); + } else { + setIo((IOError)value); + } + break; + + } + } + + @org.apache.thrift.annotation.Nullable + public java.lang.Object getFieldValue(_Fields field) { + switch (field) { + case IO: + return getIo(); + + } + throw new java.lang.IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new java.lang.IllegalArgumentException(); + } + + switch (field) { + case IO: + return isSetIo(); + } + throw new java.lang.IllegalStateException(); + } + + @Override + public boolean equals(java.lang.Object that) { + if (that instanceof disableTable_result) + return this.equals((disableTable_result)that); + return false; + } + + public boolean equals(disableTable_result that) { + if (that == null) + return false; + if (this == that) + return true; + + boolean this_present_io = true && this.isSetIo(); + boolean that_present_io = true && that.isSetIo(); + if (this_present_io || that_present_io) { + if (!(this_present_io && that_present_io)) + return false; + if (!this.io.equals(that.io)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + int hashCode = 1; + + hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287); + if (isSetIo()) + hashCode = hashCode * 8191 + io.hashCode(); + + return hashCode; + } + + @Override + public int compareTo(disableTable_result other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = java.lang.Boolean.compare(isSetIo(), other.isSetIo()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetIo()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.io, other.io); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + @org.apache.thrift.annotation.Nullable + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + scheme(iprot).read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + scheme(oprot).write(oprot, this); + } + + @Override + public java.lang.String toString() { + java.lang.StringBuilder sb = new java.lang.StringBuilder("disableTable_result("); + boolean first = true; + + sb.append("io:"); + if (this.io == null) { + sb.append("null"); + } else { + sb.append(this.io); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class disableTable_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + public disableTable_resultStandardScheme getScheme() { + return new disableTable_resultStandardScheme(); + } + } + + private static class disableTable_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, disableTable_result struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // IO + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.io = new IOError(); + struct.io.read(iprot); + struct.setIoIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + + // check for required fields of primitive type, which can't be checked in the validate method + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, disableTable_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -9309,16 +10225,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, enableTable_result } - private static class enableTable_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { - public enableTable_resultTupleScheme getScheme() { - return new enableTable_resultTupleScheme(); + private static class disableTable_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + public disableTable_resultTupleScheme getScheme() { + return new disableTable_resultTupleScheme(); } } - private static class enableTable_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme { + private static class disableTable_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, enableTable_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, disableTable_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot; java.util.BitSet optionals = new java.util.BitSet(); if (struct.isSetIo()) { @@ -9331,7 +10247,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, enableTable_result } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, enableTable_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, disableTable_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot; java.util.BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { @@ -9347,23 +10263,23 @@ private static S scheme(org.apache. } } - public static class disableTable_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("disableTable_args"); + public static class isTableEnabled_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("isTableEnabled_args"); private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)1); - private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new disableTable_argsStandardSchemeFactory(); - private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new disableTable_argsTupleSchemeFactory(); + private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new isTableEnabled_argsStandardSchemeFactory(); + private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new isTableEnabled_argsTupleSchemeFactory(); /** - * name of the table + * name of the table to check */ public @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableName; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { /** - * name of the table + * name of the table to check */ TABLE_NAME((short)1, "tableName"); @@ -9430,13 +10346,13 @@ public java.lang.String getFieldName() { tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , "Bytes"))); metaDataMap = java.util.Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(disableTable_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(isTableEnabled_args.class, metaDataMap); } - public disableTable_args() { + public isTableEnabled_args() { } - public disableTable_args( + public isTableEnabled_args( java.nio.ByteBuffer tableName) { this(); @@ -9446,14 +10362,14 @@ public disableTable_args( /** * Performs a deep copy on other. */ - public disableTable_args(disableTable_args other) { + public isTableEnabled_args(isTableEnabled_args other) { if (other.isSetTableName()) { this.tableName = org.apache.thrift.TBaseHelper.copyBinary(other.tableName); } } - public disableTable_args deepCopy() { - return new disableTable_args(this); + public isTableEnabled_args deepCopy() { + return new isTableEnabled_args(this); } @Override @@ -9462,7 +10378,7 @@ public void clear() { } /** - * name of the table + * name of the table to check */ public byte[] getTableName() { setTableName(org.apache.thrift.TBaseHelper.rightSize(tableName)); @@ -9474,14 +10390,14 @@ public java.nio.ByteBuffer bufferForTableName() { } /** - * name of the table + * name of the table to check */ - public disableTable_args setTableName(byte[] tableName) { + public isTableEnabled_args setTableName(byte[] tableName) { this.tableName = tableName == null ? (java.nio.ByteBuffer)null : java.nio.ByteBuffer.wrap(tableName.clone()); return this; } - public disableTable_args setTableName(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableName) { + public isTableEnabled_args setTableName(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableName) { this.tableName = org.apache.thrift.TBaseHelper.copyBinary(tableName); return this; } @@ -9543,12 +10459,12 @@ public boolean isSet(_Fields field) { @Override public boolean equals(java.lang.Object that) { - if (that instanceof disableTable_args) - return this.equals((disableTable_args)that); + if (that instanceof isTableEnabled_args) + return this.equals((isTableEnabled_args)that); return false; } - public boolean equals(disableTable_args that) { + public boolean equals(isTableEnabled_args that) { if (that == null) return false; if (this == that) @@ -9578,7 +10494,7 @@ public int hashCode() { } @Override - public int compareTo(disableTable_args other) { + public int compareTo(isTableEnabled_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -9613,7 +10529,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public java.lang.String toString() { - java.lang.StringBuilder sb = new java.lang.StringBuilder("disableTable_args("); + java.lang.StringBuilder sb = new java.lang.StringBuilder("isTableEnabled_args("); boolean first = true; sb.append("tableName:"); @@ -9648,15 +10564,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class disableTable_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { - public disableTable_argsStandardScheme getScheme() { - return new disableTable_argsStandardScheme(); + private static class isTableEnabled_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + public isTableEnabled_argsStandardScheme getScheme() { + return new isTableEnabled_argsStandardScheme(); } } - private static class disableTable_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme { + private static class isTableEnabled_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, disableTable_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, isTableEnabled_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -9685,7 +10601,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, disableTable_args s struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, disableTable_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, isTableEnabled_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -9700,16 +10616,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, disableTable_args } - private static class disableTable_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { - public disableTable_argsTupleScheme getScheme() { - return new disableTable_argsTupleScheme(); + private static class isTableEnabled_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + public isTableEnabled_argsTupleScheme getScheme() { + return new isTableEnabled_argsTupleScheme(); } } - private static class disableTable_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme { + private static class isTableEnabled_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, disableTable_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, isTableEnabled_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot; java.util.BitSet optionals = new java.util.BitSet(); if (struct.isSetTableName()) { @@ -9722,7 +10638,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, disableTable_args s } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, disableTable_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, isTableEnabled_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot; java.util.BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { @@ -9737,18 +10653,21 @@ private static S scheme(org.apache. } } - public static class disableTable_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("disableTable_result"); + public static class isTableEnabled_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("isTableEnabled_result"); + private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.BOOL, (short)0); private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1); - private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new disableTable_resultStandardSchemeFactory(); - private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new disableTable_resultTupleSchemeFactory(); + private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new isTableEnabled_resultStandardSchemeFactory(); + private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new isTableEnabled_resultTupleSchemeFactory(); + public boolean success; // required public @org.apache.thrift.annotation.Nullable IOError io; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { + SUCCESS((short)0, "success"), IO((short)1, "io"); private static final java.util.Map byName = new java.util.HashMap(); @@ -9765,6 +10684,8 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @org.apache.thrift.annotation.Nullable public static _Fields findByThriftId(int fieldId) { switch(fieldId) { + case 0: // SUCCESS + return SUCCESS; case 1: // IO return IO; default: @@ -9808,49 +10729,83 @@ public java.lang.String getFieldName() { } // isset id assignments + private static final int __SUCCESS_ISSET_ID = 0; + private byte __isset_bitfield = 0; public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, IOError.class))); metaDataMap = java.util.Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(disableTable_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(isTableEnabled_result.class, metaDataMap); } - public disableTable_result() { + public isTableEnabled_result() { } - public disableTable_result( + public isTableEnabled_result( + boolean success, IOError io) { this(); + this.success = success; + setSuccessIsSet(true); this.io = io; } /** * Performs a deep copy on other. */ - public disableTable_result(disableTable_result other) { + public isTableEnabled_result(isTableEnabled_result other) { + __isset_bitfield = other.__isset_bitfield; + this.success = other.success; if (other.isSetIo()) { this.io = new IOError(other.io); } } - public disableTable_result deepCopy() { - return new disableTable_result(this); + public isTableEnabled_result deepCopy() { + return new isTableEnabled_result(this); } @Override public void clear() { + setSuccessIsSet(false); + this.success = false; this.io = null; } + public boolean isSuccess() { + return this.success; + } + + public isTableEnabled_result setSuccess(boolean success) { + this.success = success; + setSuccessIsSet(true); + return this; + } + + public void unsetSuccess() { + __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __SUCCESS_ISSET_ID); + } + + /** Returns true if field success is set (has been assigned a value) and false otherwise */ + public boolean isSetSuccess() { + return org.apache.thrift.EncodingUtils.testBit(__isset_bitfield, __SUCCESS_ISSET_ID); + } + + public void setSuccessIsSet(boolean value) { + __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __SUCCESS_ISSET_ID, value); + } + @org.apache.thrift.annotation.Nullable public IOError getIo() { return this.io; } - public disableTable_result setIo(@org.apache.thrift.annotation.Nullable IOError io) { + public isTableEnabled_result setIo(@org.apache.thrift.annotation.Nullable IOError io) { this.io = io; return this; } @@ -9872,6 +10827,14 @@ public void setIoIsSet(boolean value) { public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) { switch (field) { + case SUCCESS: + if (value == null) { + unsetSuccess(); + } else { + setSuccess((java.lang.Boolean)value); + } + break; + case IO: if (value == null) { unsetIo(); @@ -9886,6 +10849,9 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable @org.apache.thrift.annotation.Nullable public java.lang.Object getFieldValue(_Fields field) { switch (field) { + case SUCCESS: + return isSuccess(); + case IO: return getIo(); @@ -9900,6 +10866,8 @@ public boolean isSet(_Fields field) { } switch (field) { + case SUCCESS: + return isSetSuccess(); case IO: return isSetIo(); } @@ -9908,17 +10876,26 @@ public boolean isSet(_Fields field) { @Override public boolean equals(java.lang.Object that) { - if (that instanceof disableTable_result) - return this.equals((disableTable_result)that); + if (that instanceof isTableEnabled_result) + return this.equals((isTableEnabled_result)that); return false; } - public boolean equals(disableTable_result that) { + public boolean equals(isTableEnabled_result that) { if (that == null) return false; if (this == that) return true; + boolean this_present_success = true; + boolean that_present_success = true; + if (this_present_success || that_present_success) { + if (!(this_present_success && that_present_success)) + return false; + if (this.success != that.success) + return false; + } + boolean this_present_io = true && this.isSetIo(); boolean that_present_io = true && that.isSetIo(); if (this_present_io || that_present_io) { @@ -9935,6 +10912,8 @@ public boolean equals(disableTable_result that) { public int hashCode() { int hashCode = 1; + hashCode = hashCode * 8191 + ((success) ? 131071 : 524287); + hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287); if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode(); @@ -9943,13 +10922,23 @@ public int hashCode() { } @Override - public int compareTo(disableTable_result other) { + public int compareTo(isTableEnabled_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; + lastComparison = java.lang.Boolean.compare(isSetSuccess(), other.isSetSuccess()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetSuccess()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); + if (lastComparison != 0) { + return lastComparison; + } + } lastComparison = java.lang.Boolean.compare(isSetIo(), other.isSetIo()); if (lastComparison != 0) { return lastComparison; @@ -9978,9 +10967,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public java.lang.String toString() { - java.lang.StringBuilder sb = new java.lang.StringBuilder("disableTable_result("); + java.lang.StringBuilder sb = new java.lang.StringBuilder("isTableEnabled_result("); boolean first = true; + sb.append("success:"); + sb.append(this.success); + first = false; + if (!first) sb.append(", "); sb.append("io:"); if (this.io == null) { sb.append("null"); @@ -10007,21 +11000,23 @@ private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOExcept private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException { try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bitfield = 0; read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); } catch (org.apache.thrift.TException te) { throw new java.io.IOException(te); } } - private static class disableTable_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { - public disableTable_resultStandardScheme getScheme() { - return new disableTable_resultStandardScheme(); + private static class isTableEnabled_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + public isTableEnabled_resultStandardScheme getScheme() { + return new isTableEnabled_resultStandardScheme(); } } - private static class disableTable_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme { + private static class isTableEnabled_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, disableTable_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, isTableEnabled_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -10031,6 +11026,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, disableTable_result break; } switch (schemeField.id) { + case 0: // SUCCESS + if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { + struct.success = iprot.readBool(); + struct.setSuccessIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; case 1: // IO if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { struct.io = new IOError(); @@ -10051,10 +11054,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, disableTable_result struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, disableTable_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, isTableEnabled_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); + if (struct.isSetSuccess()) { + oprot.writeFieldBegin(SUCCESS_FIELD_DESC); + oprot.writeBool(struct.success); + oprot.writeFieldEnd(); + } if (struct.io != null) { oprot.writeFieldBegin(IO_FIELD_DESC); struct.io.write(oprot); @@ -10066,32 +11074,42 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, disableTable_resul } - private static class disableTable_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { - public disableTable_resultTupleScheme getScheme() { - return new disableTable_resultTupleScheme(); + private static class isTableEnabled_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + public isTableEnabled_resultTupleScheme getScheme() { + return new isTableEnabled_resultTupleScheme(); } } - private static class disableTable_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme { + private static class isTableEnabled_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, disableTable_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, isTableEnabled_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot; java.util.BitSet optionals = new java.util.BitSet(); - if (struct.isSetIo()) { + if (struct.isSetSuccess()) { optionals.set(0); } - oprot.writeBitSet(optionals, 1); + if (struct.isSetIo()) { + optionals.set(1); + } + oprot.writeBitSet(optionals, 2); + if (struct.isSetSuccess()) { + oprot.writeBool(struct.success); + } if (struct.isSetIo()) { struct.io.write(oprot); } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, disableTable_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, isTableEnabled_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot; - java.util.BitSet incoming = iprot.readBitSet(1); + java.util.BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { + struct.success = iprot.readBool(); + struct.setSuccessIsSet(true); + } + if (incoming.get(1)) { struct.io = new IOError(); struct.io.read(iprot); struct.setIoIsSet(true); @@ -10104,25 +11122,19 @@ private static S scheme(org.apache. } } - public static class isTableEnabled_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("isTableEnabled_args"); + public static class compact_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("compact_args"); - private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)1); + private static final org.apache.thrift.protocol.TField TABLE_NAME_OR_REGION_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableNameOrRegionName", org.apache.thrift.protocol.TType.STRING, (short)1); - private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new isTableEnabled_argsStandardSchemeFactory(); - private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new isTableEnabled_argsTupleSchemeFactory(); + private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new compact_argsStandardSchemeFactory(); + private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new compact_argsTupleSchemeFactory(); - /** - * name of the table to check - */ - public @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableName; // required + public @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableNameOrRegionName; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - /** - * name of the table to check - */ - TABLE_NAME((short)1, "tableName"); + TABLE_NAME_OR_REGION_NAME((short)1, "tableNameOrRegionName"); private static final java.util.Map byName = new java.util.HashMap(); @@ -10138,8 +11150,8 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @org.apache.thrift.annotation.Nullable public static _Fields findByThriftId(int fieldId) { switch(fieldId) { - case 1: // TABLE_NAME - return TABLE_NAME; + case 1: // TABLE_NAME_OR_REGION_NAME + return TABLE_NAME_OR_REGION_NAME; default: return null; } @@ -10184,90 +11196,84 @@ public java.lang.String getFieldName() { public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.DEFAULT, + tmpMap.put(_Fields.TABLE_NAME_OR_REGION_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableNameOrRegionName", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , "Bytes"))); metaDataMap = java.util.Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(isTableEnabled_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(compact_args.class, metaDataMap); } - public isTableEnabled_args() { + public compact_args() { } - public isTableEnabled_args( - java.nio.ByteBuffer tableName) + public compact_args( + java.nio.ByteBuffer tableNameOrRegionName) { this(); - this.tableName = org.apache.thrift.TBaseHelper.copyBinary(tableName); + this.tableNameOrRegionName = org.apache.thrift.TBaseHelper.copyBinary(tableNameOrRegionName); } /** * Performs a deep copy on other. */ - public isTableEnabled_args(isTableEnabled_args other) { - if (other.isSetTableName()) { - this.tableName = org.apache.thrift.TBaseHelper.copyBinary(other.tableName); + public compact_args(compact_args other) { + if (other.isSetTableNameOrRegionName()) { + this.tableNameOrRegionName = org.apache.thrift.TBaseHelper.copyBinary(other.tableNameOrRegionName); } } - public isTableEnabled_args deepCopy() { - return new isTableEnabled_args(this); + public compact_args deepCopy() { + return new compact_args(this); } @Override public void clear() { - this.tableName = null; + this.tableNameOrRegionName = null; } - /** - * name of the table to check - */ - public byte[] getTableName() { - setTableName(org.apache.thrift.TBaseHelper.rightSize(tableName)); - return tableName == null ? null : tableName.array(); + public byte[] getTableNameOrRegionName() { + setTableNameOrRegionName(org.apache.thrift.TBaseHelper.rightSize(tableNameOrRegionName)); + return tableNameOrRegionName == null ? null : tableNameOrRegionName.array(); } - public java.nio.ByteBuffer bufferForTableName() { - return org.apache.thrift.TBaseHelper.copyBinary(tableName); + public java.nio.ByteBuffer bufferForTableNameOrRegionName() { + return org.apache.thrift.TBaseHelper.copyBinary(tableNameOrRegionName); } - /** - * name of the table to check - */ - public isTableEnabled_args setTableName(byte[] tableName) { - this.tableName = tableName == null ? (java.nio.ByteBuffer)null : java.nio.ByteBuffer.wrap(tableName.clone()); + public compact_args setTableNameOrRegionName(byte[] tableNameOrRegionName) { + this.tableNameOrRegionName = tableNameOrRegionName == null ? (java.nio.ByteBuffer)null : java.nio.ByteBuffer.wrap(tableNameOrRegionName.clone()); return this; } - public isTableEnabled_args setTableName(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableName) { - this.tableName = org.apache.thrift.TBaseHelper.copyBinary(tableName); + public compact_args setTableNameOrRegionName(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableNameOrRegionName) { + this.tableNameOrRegionName = org.apache.thrift.TBaseHelper.copyBinary(tableNameOrRegionName); return this; } - public void unsetTableName() { - this.tableName = null; + public void unsetTableNameOrRegionName() { + this.tableNameOrRegionName = null; } - /** Returns true if field tableName is set (has been assigned a value) and false otherwise */ - public boolean isSetTableName() { - return this.tableName != null; + /** Returns true if field tableNameOrRegionName is set (has been assigned a value) and false otherwise */ + public boolean isSetTableNameOrRegionName() { + return this.tableNameOrRegionName != null; } - public void setTableNameIsSet(boolean value) { + public void setTableNameOrRegionNameIsSet(boolean value) { if (!value) { - this.tableName = null; + this.tableNameOrRegionName = null; } } public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) { switch (field) { - case TABLE_NAME: + case TABLE_NAME_OR_REGION_NAME: if (value == null) { - unsetTableName(); + unsetTableNameOrRegionName(); } else { if (value instanceof byte[]) { - setTableName((byte[])value); + setTableNameOrRegionName((byte[])value); } else { - setTableName((java.nio.ByteBuffer)value); + setTableNameOrRegionName((java.nio.ByteBuffer)value); } } break; @@ -10278,8 +11284,8 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable @org.apache.thrift.annotation.Nullable public java.lang.Object getFieldValue(_Fields field) { switch (field) { - case TABLE_NAME: - return getTableName(); + case TABLE_NAME_OR_REGION_NAME: + return getTableNameOrRegionName(); } throw new java.lang.IllegalStateException(); @@ -10292,31 +11298,31 @@ public boolean isSet(_Fields field) { } switch (field) { - case TABLE_NAME: - return isSetTableName(); + case TABLE_NAME_OR_REGION_NAME: + return isSetTableNameOrRegionName(); } throw new java.lang.IllegalStateException(); } @Override public boolean equals(java.lang.Object that) { - if (that instanceof isTableEnabled_args) - return this.equals((isTableEnabled_args)that); + if (that instanceof compact_args) + return this.equals((compact_args)that); return false; } - public boolean equals(isTableEnabled_args that) { + public boolean equals(compact_args that) { if (that == null) return false; if (this == that) return true; - boolean this_present_tableName = true && this.isSetTableName(); - boolean that_present_tableName = true && that.isSetTableName(); - if (this_present_tableName || that_present_tableName) { - if (!(this_present_tableName && that_present_tableName)) + boolean this_present_tableNameOrRegionName = true && this.isSetTableNameOrRegionName(); + boolean that_present_tableNameOrRegionName = true && that.isSetTableNameOrRegionName(); + if (this_present_tableNameOrRegionName || that_present_tableNameOrRegionName) { + if (!(this_present_tableNameOrRegionName && that_present_tableNameOrRegionName)) return false; - if (!this.tableName.equals(that.tableName)) + if (!this.tableNameOrRegionName.equals(that.tableNameOrRegionName)) return false; } @@ -10327,27 +11333,27 @@ public boolean equals(isTableEnabled_args that) { public int hashCode() { int hashCode = 1; - hashCode = hashCode * 8191 + ((isSetTableName()) ? 131071 : 524287); - if (isSetTableName()) - hashCode = hashCode * 8191 + tableName.hashCode(); + hashCode = hashCode * 8191 + ((isSetTableNameOrRegionName()) ? 131071 : 524287); + if (isSetTableNameOrRegionName()) + hashCode = hashCode * 8191 + tableNameOrRegionName.hashCode(); return hashCode; } @Override - public int compareTo(isTableEnabled_args other) { + public int compareTo(compact_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; - lastComparison = java.lang.Boolean.compare(isSetTableName(), other.isSetTableName()); + lastComparison = java.lang.Boolean.compare(isSetTableNameOrRegionName(), other.isSetTableNameOrRegionName()); if (lastComparison != 0) { return lastComparison; } - if (isSetTableName()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tableName, other.tableName); + if (isSetTableNameOrRegionName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tableNameOrRegionName, other.tableNameOrRegionName); if (lastComparison != 0) { return lastComparison; } @@ -10370,14 +11376,14 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public java.lang.String toString() { - java.lang.StringBuilder sb = new java.lang.StringBuilder("isTableEnabled_args("); + java.lang.StringBuilder sb = new java.lang.StringBuilder("compact_args("); boolean first = true; - sb.append("tableName:"); - if (this.tableName == null) { + sb.append("tableNameOrRegionName:"); + if (this.tableNameOrRegionName == null) { sb.append("null"); } else { - org.apache.thrift.TBaseHelper.toString(this.tableName, sb); + org.apache.thrift.TBaseHelper.toString(this.tableNameOrRegionName, sb); } first = false; sb.append(")"); @@ -10405,15 +11411,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class isTableEnabled_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { - public isTableEnabled_argsStandardScheme getScheme() { - return new isTableEnabled_argsStandardScheme(); + private static class compact_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + public compact_argsStandardScheme getScheme() { + return new compact_argsStandardScheme(); } } - private static class isTableEnabled_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme { + private static class compact_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, isTableEnabled_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, compact_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -10423,10 +11429,10 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, isTableEnabled_args break; } switch (schemeField.id) { - case 1: // TABLE_NAME + case 1: // TABLE_NAME_OR_REGION_NAME if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.tableName = iprot.readBinary(); - struct.setTableNameIsSet(true); + struct.tableNameOrRegionName = iprot.readBinary(); + struct.setTableNameOrRegionNameIsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -10442,13 +11448,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, isTableEnabled_args struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, isTableEnabled_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, compact_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); - if (struct.tableName != null) { - oprot.writeFieldBegin(TABLE_NAME_FIELD_DESC); - oprot.writeBinary(struct.tableName); + if (struct.tableNameOrRegionName != null) { + oprot.writeFieldBegin(TABLE_NAME_OR_REGION_NAME_FIELD_DESC); + oprot.writeBinary(struct.tableNameOrRegionName); oprot.writeFieldEnd(); } oprot.writeFieldStop(); @@ -10457,34 +11463,34 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, isTableEnabled_arg } - private static class isTableEnabled_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { - public isTableEnabled_argsTupleScheme getScheme() { - return new isTableEnabled_argsTupleScheme(); + private static class compact_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + public compact_argsTupleScheme getScheme() { + return new compact_argsTupleScheme(); } } - private static class isTableEnabled_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme { + private static class compact_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, isTableEnabled_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, compact_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot; java.util.BitSet optionals = new java.util.BitSet(); - if (struct.isSetTableName()) { + if (struct.isSetTableNameOrRegionName()) { optionals.set(0); } oprot.writeBitSet(optionals, 1); - if (struct.isSetTableName()) { - oprot.writeBinary(struct.tableName); + if (struct.isSetTableNameOrRegionName()) { + oprot.writeBinary(struct.tableNameOrRegionName); } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, isTableEnabled_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, compact_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot; java.util.BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.tableName = iprot.readBinary(); - struct.setTableNameIsSet(true); + struct.tableNameOrRegionName = iprot.readBinary(); + struct.setTableNameOrRegionNameIsSet(true); } } } @@ -10494,21 +11500,18 @@ private static S scheme(org.apache. } } - public static class isTableEnabled_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("isTableEnabled_result"); + public static class compact_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("compact_result"); - private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.BOOL, (short)0); private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1); - private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new isTableEnabled_resultStandardSchemeFactory(); - private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new isTableEnabled_resultTupleSchemeFactory(); + private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new compact_resultStandardSchemeFactory(); + private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new compact_resultTupleSchemeFactory(); - public boolean success; // required public @org.apache.thrift.annotation.Nullable IOError io; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - SUCCESS((short)0, "success"), IO((short)1, "io"); private static final java.util.Map byName = new java.util.HashMap(); @@ -10525,8 +11528,6 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @org.apache.thrift.annotation.Nullable public static _Fields findByThriftId(int fieldId) { switch(fieldId) { - case 0: // SUCCESS - return SUCCESS; case 1: // IO return IO; default: @@ -10570,83 +11571,49 @@ public java.lang.String getFieldName() { } // isset id assignments - private static final int __SUCCESS_ISSET_ID = 0; - private byte __isset_bitfield = 0; public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, IOError.class))); metaDataMap = java.util.Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(isTableEnabled_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(compact_result.class, metaDataMap); } - public isTableEnabled_result() { + public compact_result() { } - public isTableEnabled_result( - boolean success, + public compact_result( IOError io) { this(); - this.success = success; - setSuccessIsSet(true); this.io = io; } /** * Performs a deep copy on other. */ - public isTableEnabled_result(isTableEnabled_result other) { - __isset_bitfield = other.__isset_bitfield; - this.success = other.success; + public compact_result(compact_result other) { if (other.isSetIo()) { this.io = new IOError(other.io); } } - public isTableEnabled_result deepCopy() { - return new isTableEnabled_result(this); + public compact_result deepCopy() { + return new compact_result(this); } @Override public void clear() { - setSuccessIsSet(false); - this.success = false; this.io = null; } - public boolean isSuccess() { - return this.success; - } - - public isTableEnabled_result setSuccess(boolean success) { - this.success = success; - setSuccessIsSet(true); - return this; - } - - public void unsetSuccess() { - __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __SUCCESS_ISSET_ID); - } - - /** Returns true if field success is set (has been assigned a value) and false otherwise */ - public boolean isSetSuccess() { - return org.apache.thrift.EncodingUtils.testBit(__isset_bitfield, __SUCCESS_ISSET_ID); - } - - public void setSuccessIsSet(boolean value) { - __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __SUCCESS_ISSET_ID, value); - } - @org.apache.thrift.annotation.Nullable public IOError getIo() { return this.io; } - public isTableEnabled_result setIo(@org.apache.thrift.annotation.Nullable IOError io) { + public compact_result setIo(@org.apache.thrift.annotation.Nullable IOError io) { this.io = io; return this; } @@ -10668,14 +11635,6 @@ public void setIoIsSet(boolean value) { public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) { switch (field) { - case SUCCESS: - if (value == null) { - unsetSuccess(); - } else { - setSuccess((java.lang.Boolean)value); - } - break; - case IO: if (value == null) { unsetIo(); @@ -10690,9 +11649,6 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable @org.apache.thrift.annotation.Nullable public java.lang.Object getFieldValue(_Fields field) { switch (field) { - case SUCCESS: - return isSuccess(); - case IO: return getIo(); @@ -10707,8 +11663,6 @@ public boolean isSet(_Fields field) { } switch (field) { - case SUCCESS: - return isSetSuccess(); case IO: return isSetIo(); } @@ -10717,26 +11671,17 @@ public boolean isSet(_Fields field) { @Override public boolean equals(java.lang.Object that) { - if (that instanceof isTableEnabled_result) - return this.equals((isTableEnabled_result)that); + if (that instanceof compact_result) + return this.equals((compact_result)that); return false; } - public boolean equals(isTableEnabled_result that) { + public boolean equals(compact_result that) { if (that == null) return false; if (this == that) return true; - boolean this_present_success = true; - boolean that_present_success = true; - if (this_present_success || that_present_success) { - if (!(this_present_success && that_present_success)) - return false; - if (this.success != that.success) - return false; - } - boolean this_present_io = true && this.isSetIo(); boolean that_present_io = true && that.isSetIo(); if (this_present_io || that_present_io) { @@ -10753,8 +11698,6 @@ public boolean equals(isTableEnabled_result that) { public int hashCode() { int hashCode = 1; - hashCode = hashCode * 8191 + ((success) ? 131071 : 524287); - hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287); if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode(); @@ -10763,23 +11706,13 @@ public int hashCode() { } @Override - public int compareTo(isTableEnabled_result other) { + public int compareTo(compact_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; - lastComparison = java.lang.Boolean.compare(isSetSuccess(), other.isSetSuccess()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSuccess()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); - if (lastComparison != 0) { - return lastComparison; - } - } lastComparison = java.lang.Boolean.compare(isSetIo(), other.isSetIo()); if (lastComparison != 0) { return lastComparison; @@ -10808,13 +11741,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public java.lang.String toString() { - java.lang.StringBuilder sb = new java.lang.StringBuilder("isTableEnabled_result("); + java.lang.StringBuilder sb = new java.lang.StringBuilder("compact_result("); boolean first = true; - sb.append("success:"); - sb.append(this.success); - first = false; - if (!first) sb.append(", "); sb.append("io:"); if (this.io == null) { sb.append("null"); @@ -10841,23 +11770,21 @@ private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOExcept private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException { try { - // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. - __isset_bitfield = 0; read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); } catch (org.apache.thrift.TException te) { throw new java.io.IOException(te); } } - private static class isTableEnabled_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { - public isTableEnabled_resultStandardScheme getScheme() { - return new isTableEnabled_resultStandardScheme(); + private static class compact_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + public compact_resultStandardScheme getScheme() { + return new compact_resultStandardScheme(); } } - private static class isTableEnabled_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme { + private static class compact_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, isTableEnabled_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, compact_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -10867,14 +11794,6 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, isTableEnabled_resu break; } switch (schemeField.id) { - case 0: // SUCCESS - if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { - struct.success = iprot.readBool(); - struct.setSuccessIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; case 1: // IO if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { struct.io = new IOError(); @@ -10895,15 +11814,10 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, isTableEnabled_resu struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, isTableEnabled_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, compact_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); - if (struct.isSetSuccess()) { - oprot.writeFieldBegin(SUCCESS_FIELD_DESC); - oprot.writeBool(struct.success); - oprot.writeFieldEnd(); - } if (struct.io != null) { oprot.writeFieldBegin(IO_FIELD_DESC); struct.io.write(oprot); @@ -10915,42 +11829,32 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, isTableEnabled_res } - private static class isTableEnabled_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { - public isTableEnabled_resultTupleScheme getScheme() { - return new isTableEnabled_resultTupleScheme(); + private static class compact_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + public compact_resultTupleScheme getScheme() { + return new compact_resultTupleScheme(); } } - private static class isTableEnabled_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme { + private static class compact_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, isTableEnabled_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, compact_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot; java.util.BitSet optionals = new java.util.BitSet(); - if (struct.isSetSuccess()) { - optionals.set(0); - } if (struct.isSetIo()) { - optionals.set(1); - } - oprot.writeBitSet(optionals, 2); - if (struct.isSetSuccess()) { - oprot.writeBool(struct.success); + optionals.set(0); } + oprot.writeBitSet(optionals, 1); if (struct.isSetIo()) { struct.io.write(oprot); } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, isTableEnabled_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, compact_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot; - java.util.BitSet incoming = iprot.readBitSet(2); + java.util.BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.success = iprot.readBool(); - struct.setSuccessIsSet(true); - } - if (incoming.get(1)) { struct.io = new IOError(); struct.io.read(iprot); struct.setIoIsSet(true); @@ -10963,13 +11867,13 @@ private static S scheme(org.apache. } } - public static class compact_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("compact_args"); + public static class majorCompact_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("majorCompact_args"); private static final org.apache.thrift.protocol.TField TABLE_NAME_OR_REGION_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableNameOrRegionName", org.apache.thrift.protocol.TType.STRING, (short)1); - private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new compact_argsStandardSchemeFactory(); - private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new compact_argsTupleSchemeFactory(); + private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new majorCompact_argsStandardSchemeFactory(); + private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new majorCompact_argsTupleSchemeFactory(); public @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableNameOrRegionName; // required @@ -11040,13 +11944,13 @@ public java.lang.String getFieldName() { tmpMap.put(_Fields.TABLE_NAME_OR_REGION_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableNameOrRegionName", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , "Bytes"))); metaDataMap = java.util.Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(compact_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(majorCompact_args.class, metaDataMap); } - public compact_args() { + public majorCompact_args() { } - public compact_args( + public majorCompact_args( java.nio.ByteBuffer tableNameOrRegionName) { this(); @@ -11056,14 +11960,14 @@ public compact_args( /** * Performs a deep copy on other. */ - public compact_args(compact_args other) { + public majorCompact_args(majorCompact_args other) { if (other.isSetTableNameOrRegionName()) { this.tableNameOrRegionName = org.apache.thrift.TBaseHelper.copyBinary(other.tableNameOrRegionName); } } - public compact_args deepCopy() { - return new compact_args(this); + public majorCompact_args deepCopy() { + return new majorCompact_args(this); } @Override @@ -11080,12 +11984,12 @@ public java.nio.ByteBuffer bufferForTableNameOrRegionName() { return org.apache.thrift.TBaseHelper.copyBinary(tableNameOrRegionName); } - public compact_args setTableNameOrRegionName(byte[] tableNameOrRegionName) { + public majorCompact_args setTableNameOrRegionName(byte[] tableNameOrRegionName) { this.tableNameOrRegionName = tableNameOrRegionName == null ? (java.nio.ByteBuffer)null : java.nio.ByteBuffer.wrap(tableNameOrRegionName.clone()); return this; } - public compact_args setTableNameOrRegionName(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableNameOrRegionName) { + public majorCompact_args setTableNameOrRegionName(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableNameOrRegionName) { this.tableNameOrRegionName = org.apache.thrift.TBaseHelper.copyBinary(tableNameOrRegionName); return this; } @@ -11147,12 +12051,12 @@ public boolean isSet(_Fields field) { @Override public boolean equals(java.lang.Object that) { - if (that instanceof compact_args) - return this.equals((compact_args)that); + if (that instanceof majorCompact_args) + return this.equals((majorCompact_args)that); return false; } - public boolean equals(compact_args that) { + public boolean equals(majorCompact_args that) { if (that == null) return false; if (this == that) @@ -11182,7 +12086,7 @@ public int hashCode() { } @Override - public int compareTo(compact_args other) { + public int compareTo(majorCompact_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -11217,7 +12121,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public java.lang.String toString() { - java.lang.StringBuilder sb = new java.lang.StringBuilder("compact_args("); + java.lang.StringBuilder sb = new java.lang.StringBuilder("majorCompact_args("); boolean first = true; sb.append("tableNameOrRegionName:"); @@ -11252,15 +12156,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class compact_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { - public compact_argsStandardScheme getScheme() { - return new compact_argsStandardScheme(); + private static class majorCompact_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + public majorCompact_argsStandardScheme getScheme() { + return new majorCompact_argsStandardScheme(); } } - private static class compact_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme { + private static class majorCompact_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, compact_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, majorCompact_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -11289,7 +12193,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, compact_args struct struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, compact_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, majorCompact_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -11304,16 +12208,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, compact_args struc } - private static class compact_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { - public compact_argsTupleScheme getScheme() { - return new compact_argsTupleScheme(); + private static class majorCompact_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + public majorCompact_argsTupleScheme getScheme() { + return new majorCompact_argsTupleScheme(); } } - private static class compact_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme { + private static class majorCompact_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, compact_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, majorCompact_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot; java.util.BitSet optionals = new java.util.BitSet(); if (struct.isSetTableNameOrRegionName()) { @@ -11326,7 +12230,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, compact_args struct } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, compact_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, majorCompact_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot; java.util.BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { @@ -11341,13 +12245,13 @@ private static S scheme(org.apache. } } - public static class compact_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("compact_result"); + public static class majorCompact_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("majorCompact_result"); private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1); - private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new compact_resultStandardSchemeFactory(); - private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new compact_resultTupleSchemeFactory(); + private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new majorCompact_resultStandardSchemeFactory(); + private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new majorCompact_resultTupleSchemeFactory(); public @org.apache.thrift.annotation.Nullable IOError io; // required @@ -11418,13 +12322,13 @@ public java.lang.String getFieldName() { tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, IOError.class))); metaDataMap = java.util.Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(compact_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(majorCompact_result.class, metaDataMap); } - public compact_result() { + public majorCompact_result() { } - public compact_result( + public majorCompact_result( IOError io) { this(); @@ -11434,14 +12338,14 @@ public compact_result( /** * Performs a deep copy on other. */ - public compact_result(compact_result other) { + public majorCompact_result(majorCompact_result other) { if (other.isSetIo()) { this.io = new IOError(other.io); } } - public compact_result deepCopy() { - return new compact_result(this); + public majorCompact_result deepCopy() { + return new majorCompact_result(this); } @Override @@ -11454,7 +12358,7 @@ public IOError getIo() { return this.io; } - public compact_result setIo(@org.apache.thrift.annotation.Nullable IOError io) { + public majorCompact_result setIo(@org.apache.thrift.annotation.Nullable IOError io) { this.io = io; return this; } @@ -11512,12 +12416,12 @@ public boolean isSet(_Fields field) { @Override public boolean equals(java.lang.Object that) { - if (that instanceof compact_result) - return this.equals((compact_result)that); + if (that instanceof majorCompact_result) + return this.equals((majorCompact_result)that); return false; } - public boolean equals(compact_result that) { + public boolean equals(majorCompact_result that) { if (that == null) return false; if (this == that) @@ -11547,7 +12451,7 @@ public int hashCode() { } @Override - public int compareTo(compact_result other) { + public int compareTo(majorCompact_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -11582,7 +12486,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public java.lang.String toString() { - java.lang.StringBuilder sb = new java.lang.StringBuilder("compact_result("); + java.lang.StringBuilder sb = new java.lang.StringBuilder("majorCompact_result("); boolean first = true; sb.append("io:"); @@ -11617,15 +12521,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class compact_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { - public compact_resultStandardScheme getScheme() { - return new compact_resultStandardScheme(); + private static class majorCompact_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + public majorCompact_resultStandardScheme getScheme() { + return new majorCompact_resultStandardScheme(); } } - private static class compact_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme { + private static class majorCompact_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, compact_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, majorCompact_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -11655,7 +12559,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, compact_result stru struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, compact_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, majorCompact_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -11670,16 +12574,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, compact_result str } - private static class compact_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { - public compact_resultTupleScheme getScheme() { - return new compact_resultTupleScheme(); + private static class majorCompact_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + public majorCompact_resultTupleScheme getScheme() { + return new majorCompact_resultTupleScheme(); } } - private static class compact_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme { + private static class majorCompact_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, compact_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, majorCompact_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot; java.util.BitSet optionals = new java.util.BitSet(); if (struct.isSetIo()) { @@ -11692,7 +12596,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, compact_result stru } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, compact_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, majorCompact_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot; java.util.BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { @@ -11708,19 +12612,17 @@ private static S scheme(org.apache. } } - public static class majorCompact_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("majorCompact_args"); + public static class getTableNames_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getTableNames_args"); - private static final org.apache.thrift.protocol.TField TABLE_NAME_OR_REGION_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableNameOrRegionName", org.apache.thrift.protocol.TType.STRING, (short)1); - private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new majorCompact_argsStandardSchemeFactory(); - private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new majorCompact_argsTupleSchemeFactory(); + private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new getTableNames_argsStandardSchemeFactory(); + private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new getTableNames_argsTupleSchemeFactory(); - public @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableNameOrRegionName; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - TABLE_NAME_OR_REGION_NAME((short)1, "tableNameOrRegionName"); +; private static final java.util.Map byName = new java.util.HashMap(); @@ -11736,8 +12638,6 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @org.apache.thrift.annotation.Nullable public static _Fields findByThriftId(int fieldId) { switch(fieldId) { - case 1: // TABLE_NAME_OR_REGION_NAME - return TABLE_NAME_OR_REGION_NAME; default: return null; } @@ -11777,102 +12677,38 @@ public java.lang.String getFieldName() { return _fieldName; } } - - // isset id assignments public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.TABLE_NAME_OR_REGION_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableNameOrRegionName", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , "Bytes"))); metaDataMap = java.util.Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(majorCompact_args.class, metaDataMap); - } - - public majorCompact_args() { + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getTableNames_args.class, metaDataMap); } - public majorCompact_args( - java.nio.ByteBuffer tableNameOrRegionName) - { - this(); - this.tableNameOrRegionName = org.apache.thrift.TBaseHelper.copyBinary(tableNameOrRegionName); + public getTableNames_args() { } /** * Performs a deep copy on other. */ - public majorCompact_args(majorCompact_args other) { - if (other.isSetTableNameOrRegionName()) { - this.tableNameOrRegionName = org.apache.thrift.TBaseHelper.copyBinary(other.tableNameOrRegionName); - } + public getTableNames_args(getTableNames_args other) { } - public majorCompact_args deepCopy() { - return new majorCompact_args(this); + public getTableNames_args deepCopy() { + return new getTableNames_args(this); } @Override public void clear() { - this.tableNameOrRegionName = null; - } - - public byte[] getTableNameOrRegionName() { - setTableNameOrRegionName(org.apache.thrift.TBaseHelper.rightSize(tableNameOrRegionName)); - return tableNameOrRegionName == null ? null : tableNameOrRegionName.array(); - } - - public java.nio.ByteBuffer bufferForTableNameOrRegionName() { - return org.apache.thrift.TBaseHelper.copyBinary(tableNameOrRegionName); - } - - public majorCompact_args setTableNameOrRegionName(byte[] tableNameOrRegionName) { - this.tableNameOrRegionName = tableNameOrRegionName == null ? (java.nio.ByteBuffer)null : java.nio.ByteBuffer.wrap(tableNameOrRegionName.clone()); - return this; - } - - public majorCompact_args setTableNameOrRegionName(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableNameOrRegionName) { - this.tableNameOrRegionName = org.apache.thrift.TBaseHelper.copyBinary(tableNameOrRegionName); - return this; - } - - public void unsetTableNameOrRegionName() { - this.tableNameOrRegionName = null; - } - - /** Returns true if field tableNameOrRegionName is set (has been assigned a value) and false otherwise */ - public boolean isSetTableNameOrRegionName() { - return this.tableNameOrRegionName != null; - } - - public void setTableNameOrRegionNameIsSet(boolean value) { - if (!value) { - this.tableNameOrRegionName = null; - } } public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) { switch (field) { - case TABLE_NAME_OR_REGION_NAME: - if (value == null) { - unsetTableNameOrRegionName(); - } else { - if (value instanceof byte[]) { - setTableNameOrRegionName((byte[])value); - } else { - setTableNameOrRegionName((java.nio.ByteBuffer)value); - } - } - break; - } } @org.apache.thrift.annotation.Nullable public java.lang.Object getFieldValue(_Fields field) { switch (field) { - case TABLE_NAME_OR_REGION_NAME: - return getTableNameOrRegionName(); - } throw new java.lang.IllegalStateException(); } @@ -11884,34 +12720,23 @@ public boolean isSet(_Fields field) { } switch (field) { - case TABLE_NAME_OR_REGION_NAME: - return isSetTableNameOrRegionName(); } throw new java.lang.IllegalStateException(); } @Override public boolean equals(java.lang.Object that) { - if (that instanceof majorCompact_args) - return this.equals((majorCompact_args)that); + if (that instanceof getTableNames_args) + return this.equals((getTableNames_args)that); return false; } - public boolean equals(majorCompact_args that) { + public boolean equals(getTableNames_args that) { if (that == null) return false; if (this == that) return true; - boolean this_present_tableNameOrRegionName = true && this.isSetTableNameOrRegionName(); - boolean that_present_tableNameOrRegionName = true && that.isSetTableNameOrRegionName(); - if (this_present_tableNameOrRegionName || that_present_tableNameOrRegionName) { - if (!(this_present_tableNameOrRegionName && that_present_tableNameOrRegionName)) - return false; - if (!this.tableNameOrRegionName.equals(that.tableNameOrRegionName)) - return false; - } - return true; } @@ -11919,31 +12744,17 @@ public boolean equals(majorCompact_args that) { public int hashCode() { int hashCode = 1; - hashCode = hashCode * 8191 + ((isSetTableNameOrRegionName()) ? 131071 : 524287); - if (isSetTableNameOrRegionName()) - hashCode = hashCode * 8191 + tableNameOrRegionName.hashCode(); - return hashCode; } @Override - public int compareTo(majorCompact_args other) { + public int compareTo(getTableNames_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; - lastComparison = java.lang.Boolean.compare(isSetTableNameOrRegionName(), other.isSetTableNameOrRegionName()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetTableNameOrRegionName()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tableNameOrRegionName, other.tableNameOrRegionName); - if (lastComparison != 0) { - return lastComparison; - } - } return 0; } @@ -11962,16 +12773,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public java.lang.String toString() { - java.lang.StringBuilder sb = new java.lang.StringBuilder("majorCompact_args("); + java.lang.StringBuilder sb = new java.lang.StringBuilder("getTableNames_args("); boolean first = true; - sb.append("tableNameOrRegionName:"); - if (this.tableNameOrRegionName == null) { - sb.append("null"); - } else { - org.apache.thrift.TBaseHelper.toString(this.tableNameOrRegionName, sb); - } - first = false; sb.append(")"); return sb.toString(); } @@ -11997,15 +12801,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class majorCompact_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { - public majorCompact_argsStandardScheme getScheme() { - return new majorCompact_argsStandardScheme(); + private static class getTableNames_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + public getTableNames_argsStandardScheme getScheme() { + return new getTableNames_argsStandardScheme(); } } - private static class majorCompact_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme { + private static class getTableNames_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, majorCompact_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, getTableNames_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -12015,14 +12819,6 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, majorCompact_args s break; } switch (schemeField.id) { - case 1: // TABLE_NAME_OR_REGION_NAME - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.tableNameOrRegionName = iprot.readBinary(); - struct.setTableNameOrRegionNameIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -12034,50 +12830,32 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, majorCompact_args s struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, majorCompact_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, getTableNames_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); - if (struct.tableNameOrRegionName != null) { - oprot.writeFieldBegin(TABLE_NAME_OR_REGION_NAME_FIELD_DESC); - oprot.writeBinary(struct.tableNameOrRegionName); - oprot.writeFieldEnd(); - } oprot.writeFieldStop(); oprot.writeStructEnd(); } } - private static class majorCompact_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { - public majorCompact_argsTupleScheme getScheme() { - return new majorCompact_argsTupleScheme(); + private static class getTableNames_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + public getTableNames_argsTupleScheme getScheme() { + return new getTableNames_argsTupleScheme(); } } - private static class majorCompact_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme { + private static class getTableNames_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, majorCompact_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, getTableNames_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot; - java.util.BitSet optionals = new java.util.BitSet(); - if (struct.isSetTableNameOrRegionName()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetTableNameOrRegionName()) { - oprot.writeBinary(struct.tableNameOrRegionName); - } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, majorCompact_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, getTableNames_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot; - java.util.BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.tableNameOrRegionName = iprot.readBinary(); - struct.setTableNameOrRegionNameIsSet(true); - } } } @@ -12086,18 +12864,21 @@ private static S scheme(org.apache. } } - public static class majorCompact_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("majorCompact_result"); + public static class getTableNames_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getTableNames_result"); + private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST, (short)0); private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1); - private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new majorCompact_resultStandardSchemeFactory(); - private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new majorCompact_resultTupleSchemeFactory(); + private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new getTableNames_resultStandardSchemeFactory(); + private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new getTableNames_resultTupleSchemeFactory(); + public @org.apache.thrift.annotation.Nullable java.util.List success; // required public @org.apache.thrift.annotation.Nullable IOError io; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { + SUCCESS((short)0, "success"), IO((short)1, "io"); private static final java.util.Map byName = new java.util.HashMap(); @@ -12114,6 +12895,8 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @org.apache.thrift.annotation.Nullable public static _Fields findByThriftId(int fieldId) { switch(fieldId) { + case 0: // SUCCESS + return SUCCESS; case 1: // IO return IO; default: @@ -12160,46 +12943,100 @@ public java.lang.String getFieldName() { public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , "Text")))); tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, IOError.class))); metaDataMap = java.util.Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(majorCompact_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getTableNames_result.class, metaDataMap); } - public majorCompact_result() { + public getTableNames_result() { } - public majorCompact_result( + public getTableNames_result( + java.util.List success, IOError io) { this(); + this.success = success; this.io = io; } /** * Performs a deep copy on other. */ - public majorCompact_result(majorCompact_result other) { + public getTableNames_result(getTableNames_result other) { + if (other.isSetSuccess()) { + java.util.List __this__success = new java.util.ArrayList(other.success.size()); + for (java.nio.ByteBuffer other_element : other.success) { + __this__success.add(org.apache.thrift.TBaseHelper.copyBinary(other_element)); + } + this.success = __this__success; + } if (other.isSetIo()) { this.io = new IOError(other.io); } } - public majorCompact_result deepCopy() { - return new majorCompact_result(this); + public getTableNames_result deepCopy() { + return new getTableNames_result(this); } @Override public void clear() { + this.success = null; this.io = null; } + public int getSuccessSize() { + return (this.success == null) ? 0 : this.success.size(); + } + + @org.apache.thrift.annotation.Nullable + public java.util.Iterator getSuccessIterator() { + return (this.success == null) ? null : this.success.iterator(); + } + + public void addToSuccess(java.nio.ByteBuffer elem) { + if (this.success == null) { + this.success = new java.util.ArrayList(); + } + this.success.add(elem); + } + + @org.apache.thrift.annotation.Nullable + public java.util.List getSuccess() { + return this.success; + } + + public getTableNames_result setSuccess(@org.apache.thrift.annotation.Nullable java.util.List success) { + this.success = success; + return this; + } + + public void unsetSuccess() { + this.success = null; + } + + /** Returns true if field success is set (has been assigned a value) and false otherwise */ + public boolean isSetSuccess() { + return this.success != null; + } + + public void setSuccessIsSet(boolean value) { + if (!value) { + this.success = null; + } + } + @org.apache.thrift.annotation.Nullable public IOError getIo() { return this.io; } - public majorCompact_result setIo(@org.apache.thrift.annotation.Nullable IOError io) { + public getTableNames_result setIo(@org.apache.thrift.annotation.Nullable IOError io) { this.io = io; return this; } @@ -12221,6 +13058,14 @@ public void setIoIsSet(boolean value) { public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) { switch (field) { + case SUCCESS: + if (value == null) { + unsetSuccess(); + } else { + setSuccess((java.util.List)value); + } + break; + case IO: if (value == null) { unsetIo(); @@ -12235,6 +13080,9 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable @org.apache.thrift.annotation.Nullable public java.lang.Object getFieldValue(_Fields field) { switch (field) { + case SUCCESS: + return getSuccess(); + case IO: return getIo(); @@ -12249,6 +13097,8 @@ public boolean isSet(_Fields field) { } switch (field) { + case SUCCESS: + return isSetSuccess(); case IO: return isSetIo(); } @@ -12257,17 +13107,26 @@ public boolean isSet(_Fields field) { @Override public boolean equals(java.lang.Object that) { - if (that instanceof majorCompact_result) - return this.equals((majorCompact_result)that); + if (that instanceof getTableNames_result) + return this.equals((getTableNames_result)that); return false; } - public boolean equals(majorCompact_result that) { + public boolean equals(getTableNames_result that) { if (that == null) return false; if (this == that) return true; + boolean this_present_success = true && this.isSetSuccess(); + boolean that_present_success = true && that.isSetSuccess(); + if (this_present_success || that_present_success) { + if (!(this_present_success && that_present_success)) + return false; + if (!this.success.equals(that.success)) + return false; + } + boolean this_present_io = true && this.isSetIo(); boolean that_present_io = true && that.isSetIo(); if (this_present_io || that_present_io) { @@ -12284,6 +13143,10 @@ public boolean equals(majorCompact_result that) { public int hashCode() { int hashCode = 1; + hashCode = hashCode * 8191 + ((isSetSuccess()) ? 131071 : 524287); + if (isSetSuccess()) + hashCode = hashCode * 8191 + success.hashCode(); + hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287); if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode(); @@ -12292,13 +13155,23 @@ public int hashCode() { } @Override - public int compareTo(majorCompact_result other) { + public int compareTo(getTableNames_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; + lastComparison = java.lang.Boolean.compare(isSetSuccess(), other.isSetSuccess()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetSuccess()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); + if (lastComparison != 0) { + return lastComparison; + } + } lastComparison = java.lang.Boolean.compare(isSetIo(), other.isSetIo()); if (lastComparison != 0) { return lastComparison; @@ -12327,9 +13200,17 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public java.lang.String toString() { - java.lang.StringBuilder sb = new java.lang.StringBuilder("majorCompact_result("); + java.lang.StringBuilder sb = new java.lang.StringBuilder("getTableNames_result("); boolean first = true; + sb.append("success:"); + if (this.success == null) { + sb.append("null"); + } else { + org.apache.thrift.TBaseHelper.toString(this.success, sb); + } + first = false; + if (!first) sb.append(", "); sb.append("io:"); if (this.io == null) { sb.append("null"); @@ -12362,15 +13243,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class majorCompact_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { - public majorCompact_resultStandardScheme getScheme() { - return new majorCompact_resultStandardScheme(); + private static class getTableNames_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + public getTableNames_resultStandardScheme getScheme() { + return new getTableNames_resultStandardScheme(); } } - private static class majorCompact_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme { + private static class getTableNames_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, majorCompact_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, getTableNames_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -12380,6 +13261,24 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, majorCompact_result break; } switch (schemeField.id) { + case 0: // SUCCESS + if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { + { + org.apache.thrift.protocol.TList _list50 = iprot.readListBegin(); + struct.success = new java.util.ArrayList(_list50.size); + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem51; + for (int _i52 = 0; _i52 < _list50.size; ++_i52) + { + _elem51 = iprot.readBinary(); + struct.success.add(_elem51); + } + iprot.readListEnd(); + } + struct.setSuccessIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; case 1: // IO if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { struct.io = new IOError(); @@ -12400,10 +13299,22 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, majorCompact_result struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, majorCompact_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, getTableNames_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); + if (struct.success != null) { + oprot.writeFieldBegin(SUCCESS_FIELD_DESC); + { + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); + for (java.nio.ByteBuffer _iter53 : struct.success) + { + oprot.writeBinary(_iter53); + } + oprot.writeListEnd(); + } + oprot.writeFieldEnd(); + } if (struct.io != null) { oprot.writeFieldBegin(IO_FIELD_DESC); struct.io.write(oprot); @@ -12415,32 +13326,57 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, majorCompact_resul } - private static class majorCompact_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { - public majorCompact_resultTupleScheme getScheme() { - return new majorCompact_resultTupleScheme(); + private static class getTableNames_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + public getTableNames_resultTupleScheme getScheme() { + return new getTableNames_resultTupleScheme(); } } - private static class majorCompact_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme { + private static class getTableNames_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, majorCompact_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, getTableNames_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot; java.util.BitSet optionals = new java.util.BitSet(); - if (struct.isSetIo()) { + if (struct.isSetSuccess()) { optionals.set(0); } - oprot.writeBitSet(optionals, 1); + if (struct.isSetIo()) { + optionals.set(1); + } + oprot.writeBitSet(optionals, 2); + if (struct.isSetSuccess()) { + { + oprot.writeI32(struct.success.size()); + for (java.nio.ByteBuffer _iter54 : struct.success) + { + oprot.writeBinary(_iter54); + } + } + } if (struct.isSetIo()) { struct.io.write(oprot); } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, majorCompact_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, getTableNames_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot; - java.util.BitSet incoming = iprot.readBitSet(1); + java.util.BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { + { + org.apache.thrift.protocol.TList _list55 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); + struct.success = new java.util.ArrayList(_list55.size); + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem56; + for (int _i57 = 0; _i57 < _list55.size; ++_i57) + { + _elem56 = iprot.readBinary(); + struct.success.add(_elem56); + } + } + struct.setSuccessIsSet(true); + } + if (incoming.get(1)) { struct.io = new IOError(); struct.io.read(iprot); struct.setIoIsSet(true); @@ -12453,12 +13389,12 @@ private static S scheme(org.apache. } } - public static class getTableNames_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getTableNames_args"); + public static class getTableNamesWithIsTableEnabled_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getTableNamesWithIsTableEnabled_args"); - private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new getTableNames_argsStandardSchemeFactory(); - private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new getTableNames_argsTupleSchemeFactory(); + private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new getTableNamesWithIsTableEnabled_argsStandardSchemeFactory(); + private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new getTableNamesWithIsTableEnabled_argsTupleSchemeFactory(); /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ @@ -12522,20 +13458,20 @@ public java.lang.String getFieldName() { static { java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); metaDataMap = java.util.Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getTableNames_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getTableNamesWithIsTableEnabled_args.class, metaDataMap); } - public getTableNames_args() { + public getTableNamesWithIsTableEnabled_args() { } /** * Performs a deep copy on other. */ - public getTableNames_args(getTableNames_args other) { + public getTableNamesWithIsTableEnabled_args(getTableNamesWithIsTableEnabled_args other) { } - public getTableNames_args deepCopy() { - return new getTableNames_args(this); + public getTableNamesWithIsTableEnabled_args deepCopy() { + return new getTableNamesWithIsTableEnabled_args(this); } @Override @@ -12567,12 +13503,12 @@ public boolean isSet(_Fields field) { @Override public boolean equals(java.lang.Object that) { - if (that instanceof getTableNames_args) - return this.equals((getTableNames_args)that); + if (that instanceof getTableNamesWithIsTableEnabled_args) + return this.equals((getTableNamesWithIsTableEnabled_args)that); return false; } - public boolean equals(getTableNames_args that) { + public boolean equals(getTableNamesWithIsTableEnabled_args that) { if (that == null) return false; if (this == that) @@ -12589,7 +13525,7 @@ public int hashCode() { } @Override - public int compareTo(getTableNames_args other) { + public int compareTo(getTableNamesWithIsTableEnabled_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -12614,7 +13550,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public java.lang.String toString() { - java.lang.StringBuilder sb = new java.lang.StringBuilder("getTableNames_args("); + java.lang.StringBuilder sb = new java.lang.StringBuilder("getTableNamesWithIsTableEnabled_args("); boolean first = true; sb.append(")"); @@ -12642,15 +13578,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class getTableNames_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { - public getTableNames_argsStandardScheme getScheme() { - return new getTableNames_argsStandardScheme(); + private static class getTableNamesWithIsTableEnabled_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + public getTableNamesWithIsTableEnabled_argsStandardScheme getScheme() { + return new getTableNamesWithIsTableEnabled_argsStandardScheme(); } } - private static class getTableNames_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme { + private static class getTableNamesWithIsTableEnabled_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, getTableNames_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, getTableNamesWithIsTableEnabled_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -12671,7 +13607,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getTableNames_args struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, getTableNames_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, getTableNamesWithIsTableEnabled_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -12681,21 +13617,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getTableNames_args } - private static class getTableNames_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { - public getTableNames_argsTupleScheme getScheme() { - return new getTableNames_argsTupleScheme(); + private static class getTableNamesWithIsTableEnabled_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + public getTableNamesWithIsTableEnabled_argsTupleScheme getScheme() { + return new getTableNamesWithIsTableEnabled_argsTupleScheme(); } } - private static class getTableNames_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme { + private static class getTableNamesWithIsTableEnabled_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, getTableNames_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, getTableNamesWithIsTableEnabled_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot; } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, getTableNames_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, getTableNamesWithIsTableEnabled_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot; } } @@ -12705,16 +13641,16 @@ private static S scheme(org.apache. } } - public static class getTableNames_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getTableNames_result"); + public static class getTableNamesWithIsTableEnabled_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getTableNamesWithIsTableEnabled_result"); - private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST, (short)0); + private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.MAP, (short)0); private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1); - private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new getTableNames_resultStandardSchemeFactory(); - private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new getTableNames_resultTupleSchemeFactory(); + private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new getTableNamesWithIsTableEnabled_resultStandardSchemeFactory(); + private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new getTableNamesWithIsTableEnabled_resultTupleSchemeFactory(); - public @org.apache.thrift.annotation.Nullable java.util.List success; // required + public @org.apache.thrift.annotation.Nullable java.util.Map success; // required public @org.apache.thrift.annotation.Nullable IOError io; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ @@ -12785,19 +13721,20 @@ public java.lang.String getFieldName() { static { java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , "Text")))); + new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , "Text"), + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)))); tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, IOError.class))); metaDataMap = java.util.Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getTableNames_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getTableNamesWithIsTableEnabled_result.class, metaDataMap); } - public getTableNames_result() { + public getTableNamesWithIsTableEnabled_result() { } - public getTableNames_result( - java.util.List success, + public getTableNamesWithIsTableEnabled_result( + java.util.Map success, IOError io) { this(); @@ -12808,11 +13745,19 @@ public getTableNames_result( /** * Performs a deep copy on other. */ - public getTableNames_result(getTableNames_result other) { + public getTableNamesWithIsTableEnabled_result(getTableNamesWithIsTableEnabled_result other) { if (other.isSetSuccess()) { - java.util.List __this__success = new java.util.ArrayList(other.success.size()); - for (java.nio.ByteBuffer other_element : other.success) { - __this__success.add(org.apache.thrift.TBaseHelper.copyBinary(other_element)); + java.util.Map __this__success = new java.util.HashMap(other.success.size()); + for (java.util.Map.Entry other_element : other.success.entrySet()) { + + java.nio.ByteBuffer other_element_key = other_element.getKey(); + java.lang.Boolean other_element_value = other_element.getValue(); + + java.nio.ByteBuffer __this__success_copy_key = org.apache.thrift.TBaseHelper.copyBinary(other_element_key); + + java.lang.Boolean __this__success_copy_value = other_element_value; + + __this__success.put(__this__success_copy_key, __this__success_copy_value); } this.success = __this__success; } @@ -12821,8 +13766,8 @@ public getTableNames_result(getTableNames_result other) { } } - public getTableNames_result deepCopy() { - return new getTableNames_result(this); + public getTableNamesWithIsTableEnabled_result deepCopy() { + return new getTableNamesWithIsTableEnabled_result(this); } @Override @@ -12835,24 +13780,19 @@ public int getSuccessSize() { return (this.success == null) ? 0 : this.success.size(); } - @org.apache.thrift.annotation.Nullable - public java.util.Iterator getSuccessIterator() { - return (this.success == null) ? null : this.success.iterator(); - } - - public void addToSuccess(java.nio.ByteBuffer elem) { + public void putToSuccess(java.nio.ByteBuffer key, boolean val) { if (this.success == null) { - this.success = new java.util.ArrayList(); + this.success = new java.util.HashMap(); } - this.success.add(elem); + this.success.put(key, val); } @org.apache.thrift.annotation.Nullable - public java.util.List getSuccess() { + public java.util.Map getSuccess() { return this.success; } - public getTableNames_result setSuccess(@org.apache.thrift.annotation.Nullable java.util.List success) { + public getTableNamesWithIsTableEnabled_result setSuccess(@org.apache.thrift.annotation.Nullable java.util.Map success) { this.success = success; return this; } @@ -12877,7 +13817,7 @@ public IOError getIo() { return this.io; } - public getTableNames_result setIo(@org.apache.thrift.annotation.Nullable IOError io) { + public getTableNamesWithIsTableEnabled_result setIo(@org.apache.thrift.annotation.Nullable IOError io) { this.io = io; return this; } @@ -12903,7 +13843,7 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable if (value == null) { unsetSuccess(); } else { - setSuccess((java.util.List)value); + setSuccess((java.util.Map)value); } break; @@ -12948,12 +13888,12 @@ public boolean isSet(_Fields field) { @Override public boolean equals(java.lang.Object that) { - if (that instanceof getTableNames_result) - return this.equals((getTableNames_result)that); + if (that instanceof getTableNamesWithIsTableEnabled_result) + return this.equals((getTableNamesWithIsTableEnabled_result)that); return false; } - public boolean equals(getTableNames_result that) { + public boolean equals(getTableNamesWithIsTableEnabled_result that) { if (that == null) return false; if (this == that) @@ -12996,7 +13936,7 @@ public int hashCode() { } @Override - public int compareTo(getTableNames_result other) { + public int compareTo(getTableNamesWithIsTableEnabled_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -13041,14 +13981,14 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public java.lang.String toString() { - java.lang.StringBuilder sb = new java.lang.StringBuilder("getTableNames_result("); + java.lang.StringBuilder sb = new java.lang.StringBuilder("getTableNamesWithIsTableEnabled_result("); boolean first = true; sb.append("success:"); if (this.success == null) { sb.append("null"); } else { - org.apache.thrift.TBaseHelper.toString(this.success, sb); + sb.append(this.success); } first = false; if (!first) sb.append(", "); @@ -13084,15 +14024,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class getTableNames_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { - public getTableNames_resultStandardScheme getScheme() { - return new getTableNames_resultStandardScheme(); + private static class getTableNamesWithIsTableEnabled_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + public getTableNamesWithIsTableEnabled_resultStandardScheme getScheme() { + return new getTableNamesWithIsTableEnabled_resultStandardScheme(); } } - private static class getTableNames_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme { + private static class getTableNamesWithIsTableEnabled_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, getTableNames_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, getTableNamesWithIsTableEnabled_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -13103,17 +14043,19 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getTableNames_resul } switch (schemeField.id) { case 0: // SUCCESS - if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { + if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TList _list50 = iprot.readListBegin(); - struct.success = new java.util.ArrayList(_list50.size); - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem51; - for (int _i52 = 0; _i52 < _list50.size; ++_i52) + org.apache.thrift.protocol.TMap _map58 = iprot.readMapBegin(); + struct.success = new java.util.HashMap(2*_map58.size); + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key59; + boolean _val60; + for (int _i61 = 0; _i61 < _map58.size; ++_i61) { - _elem51 = iprot.readBinary(); - struct.success.add(_elem51); + _key59 = iprot.readBinary(); + _val60 = iprot.readBool(); + struct.success.put(_key59, _val60); } - iprot.readListEnd(); + iprot.readMapEnd(); } struct.setSuccessIsSet(true); } else { @@ -13140,19 +14082,20 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getTableNames_resul struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, getTableNames_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, getTableNamesWithIsTableEnabled_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); if (struct.success != null) { oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { - oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (java.nio.ByteBuffer _iter53 : struct.success) + oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.BOOL, struct.success.size())); + for (java.util.Map.Entry _iter62 : struct.success.entrySet()) { - oprot.writeBinary(_iter53); + oprot.writeBinary(_iter62.getKey()); + oprot.writeBool(_iter62.getValue()); } - oprot.writeListEnd(); + oprot.writeMapEnd(); } oprot.writeFieldEnd(); } @@ -13167,16 +14110,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getTableNames_resu } - private static class getTableNames_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { - public getTableNames_resultTupleScheme getScheme() { - return new getTableNames_resultTupleScheme(); + private static class getTableNamesWithIsTableEnabled_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + public getTableNamesWithIsTableEnabled_resultTupleScheme getScheme() { + return new getTableNamesWithIsTableEnabled_resultTupleScheme(); } } - private static class getTableNames_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme { + private static class getTableNamesWithIsTableEnabled_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, getTableNames_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, getTableNamesWithIsTableEnabled_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot; java.util.BitSet optionals = new java.util.BitSet(); if (struct.isSetSuccess()) { @@ -13189,9 +14132,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getTableNames_resul if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (java.nio.ByteBuffer _iter54 : struct.success) + for (java.util.Map.Entry _iter63 : struct.success.entrySet()) { - oprot.writeBinary(_iter54); + oprot.writeBinary(_iter63.getKey()); + oprot.writeBool(_iter63.getValue()); } } } @@ -13201,18 +14145,20 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getTableNames_resul } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, getTableNames_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, getTableNamesWithIsTableEnabled_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot; java.util.BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list55 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); - struct.success = new java.util.ArrayList(_list55.size); - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem56; - for (int _i57 = 0; _i57 < _list55.size; ++_i57) + org.apache.thrift.protocol.TMap _map64 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.BOOL); + struct.success = new java.util.HashMap(2*_map64.size); + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key65; + boolean _val66; + for (int _i67 = 0; _i67 < _map64.size; ++_i67) { - _elem56 = iprot.readBinary(); - struct.success.add(_elem56); + _key65 = iprot.readBinary(); + _val66 = iprot.readBool(); + struct.success.put(_key65, _val66); } } struct.setSuccessIsSet(true); @@ -14024,16 +14970,16 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getColumnDescriptor case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map58 = iprot.readMapBegin(); - struct.success = new java.util.HashMap(2*_map58.size); - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key59; - @org.apache.thrift.annotation.Nullable ColumnDescriptor _val60; - for (int _i61 = 0; _i61 < _map58.size; ++_i61) + org.apache.thrift.protocol.TMap _map68 = iprot.readMapBegin(); + struct.success = new java.util.HashMap(2*_map68.size); + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key69; + @org.apache.thrift.annotation.Nullable ColumnDescriptor _val70; + for (int _i71 = 0; _i71 < _map68.size; ++_i71) { - _key59 = iprot.readBinary(); - _val60 = new ColumnDescriptor(); - _val60.read(iprot); - struct.success.put(_key59, _val60); + _key69 = iprot.readBinary(); + _val70 = new ColumnDescriptor(); + _val70.read(iprot); + struct.success.put(_key69, _val70); } iprot.readMapEnd(); } @@ -14070,10 +15016,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getColumnDescripto oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (java.util.Map.Entry _iter62 : struct.success.entrySet()) + for (java.util.Map.Entry _iter72 : struct.success.entrySet()) { - oprot.writeBinary(_iter62.getKey()); - _iter62.getValue().write(oprot); + oprot.writeBinary(_iter72.getKey()); + _iter72.getValue().write(oprot); } oprot.writeMapEnd(); } @@ -14112,10 +15058,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getColumnDescriptor if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (java.util.Map.Entry _iter63 : struct.success.entrySet()) + for (java.util.Map.Entry _iter73 : struct.success.entrySet()) { - oprot.writeBinary(_iter63.getKey()); - _iter63.getValue().write(oprot); + oprot.writeBinary(_iter73.getKey()); + _iter73.getValue().write(oprot); } } } @@ -14130,16 +15076,16 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getColumnDescriptors java.util.BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map64 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT); - struct.success = new java.util.HashMap(2*_map64.size); - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key65; - @org.apache.thrift.annotation.Nullable ColumnDescriptor _val66; - for (int _i67 = 0; _i67 < _map64.size; ++_i67) + org.apache.thrift.protocol.TMap _map74 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT); + struct.success = new java.util.HashMap(2*_map74.size); + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key75; + @org.apache.thrift.annotation.Nullable ColumnDescriptor _val76; + for (int _i77 = 0; _i77 < _map74.size; ++_i77) { - _key65 = iprot.readBinary(); - _val66 = new ColumnDescriptor(); - _val66.read(iprot); - struct.success.put(_key65, _val66); + _key75 = iprot.readBinary(); + _val76 = new ColumnDescriptor(); + _val76.read(iprot); + struct.success.put(_key75, _val76); } } struct.setSuccessIsSet(true); @@ -14947,14 +15893,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getTableRegions_res case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list68 = iprot.readListBegin(); - struct.success = new java.util.ArrayList(_list68.size); - @org.apache.thrift.annotation.Nullable TRegionInfo _elem69; - for (int _i70 = 0; _i70 < _list68.size; ++_i70) + org.apache.thrift.protocol.TList _list78 = iprot.readListBegin(); + struct.success = new java.util.ArrayList(_list78.size); + @org.apache.thrift.annotation.Nullable TRegionInfo _elem79; + for (int _i80 = 0; _i80 < _list78.size; ++_i80) { - _elem69 = new TRegionInfo(); - _elem69.read(iprot); - struct.success.add(_elem69); + _elem79 = new TRegionInfo(); + _elem79.read(iprot); + struct.success.add(_elem79); } iprot.readListEnd(); } @@ -14991,9 +15937,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getTableRegions_re oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (TRegionInfo _iter71 : struct.success) + for (TRegionInfo _iter81 : struct.success) { - _iter71.write(oprot); + _iter81.write(oprot); } oprot.writeListEnd(); } @@ -15032,9 +15978,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getTableRegions_res if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (TRegionInfo _iter72 : struct.success) + for (TRegionInfo _iter82 : struct.success) { - _iter72.write(oprot); + _iter82.write(oprot); } } } @@ -15049,14 +15995,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getTableRegions_resu java.util.BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list73 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); - struct.success = new java.util.ArrayList(_list73.size); - @org.apache.thrift.annotation.Nullable TRegionInfo _elem74; - for (int _i75 = 0; _i75 < _list73.size; ++_i75) + org.apache.thrift.protocol.TList _list83 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); + struct.success = new java.util.ArrayList(_list83.size); + @org.apache.thrift.annotation.Nullable TRegionInfo _elem84; + for (int _i85 = 0; _i85 < _list83.size; ++_i85) { - _elem74 = new TRegionInfo(); - _elem74.read(iprot); - struct.success.add(_elem74); + _elem84 = new TRegionInfo(); + _elem84.read(iprot); + struct.success.add(_elem84); } } struct.setSuccessIsSet(true); @@ -15519,14 +16465,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, createTable_args st case 2: // COLUMN_FAMILIES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list76 = iprot.readListBegin(); - struct.columnFamilies = new java.util.ArrayList(_list76.size); - @org.apache.thrift.annotation.Nullable ColumnDescriptor _elem77; - for (int _i78 = 0; _i78 < _list76.size; ++_i78) + org.apache.thrift.protocol.TList _list86 = iprot.readListBegin(); + struct.columnFamilies = new java.util.ArrayList(_list86.size); + @org.apache.thrift.annotation.Nullable ColumnDescriptor _elem87; + for (int _i88 = 0; _i88 < _list86.size; ++_i88) { - _elem77 = new ColumnDescriptor(); - _elem77.read(iprot); - struct.columnFamilies.add(_elem77); + _elem87 = new ColumnDescriptor(); + _elem87.read(iprot); + struct.columnFamilies.add(_elem87); } iprot.readListEnd(); } @@ -15559,9 +16505,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, createTable_args s oprot.writeFieldBegin(COLUMN_FAMILIES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.columnFamilies.size())); - for (ColumnDescriptor _iter79 : struct.columnFamilies) + for (ColumnDescriptor _iter89 : struct.columnFamilies) { - _iter79.write(oprot); + _iter89.write(oprot); } oprot.writeListEnd(); } @@ -15598,9 +16544,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, createTable_args st if (struct.isSetColumnFamilies()) { { oprot.writeI32(struct.columnFamilies.size()); - for (ColumnDescriptor _iter80 : struct.columnFamilies) + for (ColumnDescriptor _iter90 : struct.columnFamilies) { - _iter80.write(oprot); + _iter90.write(oprot); } } } @@ -15616,14 +16562,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, createTable_args str } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list81 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); - struct.columnFamilies = new java.util.ArrayList(_list81.size); - @org.apache.thrift.annotation.Nullable ColumnDescriptor _elem82; - for (int _i83 = 0; _i83 < _list81.size; ++_i83) + org.apache.thrift.protocol.TList _list91 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); + struct.columnFamilies = new java.util.ArrayList(_list91.size); + @org.apache.thrift.annotation.Nullable ColumnDescriptor _elem92; + for (int _i93 = 0; _i93 < _list91.size; ++_i93) { - _elem82 = new ColumnDescriptor(); - _elem82.read(iprot); - struct.columnFamilies.add(_elem82); + _elem92 = new ColumnDescriptor(); + _elem92.read(iprot); + struct.columnFamilies.add(_elem92); } } struct.setColumnFamiliesIsSet(true); @@ -17653,15 +18599,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_args struct) th case 4: // ATTRIBUTES if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map84 = iprot.readMapBegin(); - struct.attributes = new java.util.HashMap(2*_map84.size); - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key85; - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val86; - for (int _i87 = 0; _i87 < _map84.size; ++_i87) + org.apache.thrift.protocol.TMap _map94 = iprot.readMapBegin(); + struct.attributes = new java.util.HashMap(2*_map94.size); + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key95; + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val96; + for (int _i97 = 0; _i97 < _map94.size; ++_i97) { - _key85 = iprot.readBinary(); - _val86 = iprot.readBinary(); - struct.attributes.put(_key85, _val86); + _key95 = iprot.readBinary(); + _val96 = iprot.readBinary(); + struct.attributes.put(_key95, _val96); } iprot.readMapEnd(); } @@ -17704,10 +18650,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_args struct) t oprot.writeFieldBegin(ATTRIBUTES_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.attributes.size())); - for (java.util.Map.Entry _iter88 : struct.attributes.entrySet()) + for (java.util.Map.Entry _iter98 : struct.attributes.entrySet()) { - oprot.writeBinary(_iter88.getKey()); - oprot.writeBinary(_iter88.getValue()); + oprot.writeBinary(_iter98.getKey()); + oprot.writeBinary(_iter98.getValue()); } oprot.writeMapEnd(); } @@ -17756,10 +18702,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_args struct) th if (struct.isSetAttributes()) { { oprot.writeI32(struct.attributes.size()); - for (java.util.Map.Entry _iter89 : struct.attributes.entrySet()) + for (java.util.Map.Entry _iter99 : struct.attributes.entrySet()) { - oprot.writeBinary(_iter89.getKey()); - oprot.writeBinary(_iter89.getValue()); + oprot.writeBinary(_iter99.getKey()); + oprot.writeBinary(_iter99.getValue()); } } } @@ -17783,15 +18729,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_args struct) thr } if (incoming.get(3)) { { - org.apache.thrift.protocol.TMap _map90 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); - struct.attributes = new java.util.HashMap(2*_map90.size); - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key91; - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val92; - for (int _i93 = 0; _i93 < _map90.size; ++_i93) + org.apache.thrift.protocol.TMap _map100 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); + struct.attributes = new java.util.HashMap(2*_map100.size); + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key101; + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val102; + for (int _i103 = 0; _i103 < _map100.size; ++_i103) { - _key91 = iprot.readBinary(); - _val92 = iprot.readBinary(); - struct.attributes.put(_key91, _val92); + _key101 = iprot.readBinary(); + _val102 = iprot.readBinary(); + struct.attributes.put(_key101, _val102); } } struct.setAttributesIsSet(true); @@ -18204,14 +19150,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_result struct) case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list94 = iprot.readListBegin(); - struct.success = new java.util.ArrayList(_list94.size); - @org.apache.thrift.annotation.Nullable TCell _elem95; - for (int _i96 = 0; _i96 < _list94.size; ++_i96) + org.apache.thrift.protocol.TList _list104 = iprot.readListBegin(); + struct.success = new java.util.ArrayList(_list104.size); + @org.apache.thrift.annotation.Nullable TCell _elem105; + for (int _i106 = 0; _i106 < _list104.size; ++_i106) { - _elem95 = new TCell(); - _elem95.read(iprot); - struct.success.add(_elem95); + _elem105 = new TCell(); + _elem105.read(iprot); + struct.success.add(_elem105); } iprot.readListEnd(); } @@ -18248,9 +19194,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_result struct) oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (TCell _iter97 : struct.success) + for (TCell _iter107 : struct.success) { - _iter97.write(oprot); + _iter107.write(oprot); } oprot.writeListEnd(); } @@ -18289,9 +19235,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_result struct) if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (TCell _iter98 : struct.success) + for (TCell _iter108 : struct.success) { - _iter98.write(oprot); + _iter108.write(oprot); } } } @@ -18306,14 +19252,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_result struct) t java.util.BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list99 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); - struct.success = new java.util.ArrayList(_list99.size); - @org.apache.thrift.annotation.Nullable TCell _elem100; - for (int _i101 = 0; _i101 < _list99.size; ++_i101) + org.apache.thrift.protocol.TList _list109 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); + struct.success = new java.util.ArrayList(_list109.size); + @org.apache.thrift.annotation.Nullable TCell _elem110; + for (int _i111 = 0; _i111 < _list109.size; ++_i111) { - _elem100 = new TCell(); - _elem100.read(iprot); - struct.success.add(_elem100); + _elem110 = new TCell(); + _elem110.read(iprot); + struct.success.add(_elem110); } } struct.setSuccessIsSet(true); @@ -19109,15 +20055,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getVer_args struct) case 5: // ATTRIBUTES if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map102 = iprot.readMapBegin(); - struct.attributes = new java.util.HashMap(2*_map102.size); - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key103; - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val104; - for (int _i105 = 0; _i105 < _map102.size; ++_i105) + org.apache.thrift.protocol.TMap _map112 = iprot.readMapBegin(); + struct.attributes = new java.util.HashMap(2*_map112.size); + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key113; + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val114; + for (int _i115 = 0; _i115 < _map112.size; ++_i115) { - _key103 = iprot.readBinary(); - _val104 = iprot.readBinary(); - struct.attributes.put(_key103, _val104); + _key113 = iprot.readBinary(); + _val114 = iprot.readBinary(); + struct.attributes.put(_key113, _val114); } iprot.readMapEnd(); } @@ -19163,10 +20109,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getVer_args struct oprot.writeFieldBegin(ATTRIBUTES_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.attributes.size())); - for (java.util.Map.Entry _iter106 : struct.attributes.entrySet()) + for (java.util.Map.Entry _iter116 : struct.attributes.entrySet()) { - oprot.writeBinary(_iter106.getKey()); - oprot.writeBinary(_iter106.getValue()); + oprot.writeBinary(_iter116.getKey()); + oprot.writeBinary(_iter116.getValue()); } oprot.writeMapEnd(); } @@ -19221,10 +20167,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getVer_args struct) if (struct.isSetAttributes()) { { oprot.writeI32(struct.attributes.size()); - for (java.util.Map.Entry _iter107 : struct.attributes.entrySet()) + for (java.util.Map.Entry _iter117 : struct.attributes.entrySet()) { - oprot.writeBinary(_iter107.getKey()); - oprot.writeBinary(_iter107.getValue()); + oprot.writeBinary(_iter117.getKey()); + oprot.writeBinary(_iter117.getValue()); } } } @@ -19252,15 +20198,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getVer_args struct) } if (incoming.get(4)) { { - org.apache.thrift.protocol.TMap _map108 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); - struct.attributes = new java.util.HashMap(2*_map108.size); - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key109; - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val110; - for (int _i111 = 0; _i111 < _map108.size; ++_i111) + org.apache.thrift.protocol.TMap _map118 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); + struct.attributes = new java.util.HashMap(2*_map118.size); + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key119; + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val120; + for (int _i121 = 0; _i121 < _map118.size; ++_i121) { - _key109 = iprot.readBinary(); - _val110 = iprot.readBinary(); - struct.attributes.put(_key109, _val110); + _key119 = iprot.readBinary(); + _val120 = iprot.readBinary(); + struct.attributes.put(_key119, _val120); } } struct.setAttributesIsSet(true); @@ -19673,14 +20619,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getVer_result struc case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list112 = iprot.readListBegin(); - struct.success = new java.util.ArrayList(_list112.size); - @org.apache.thrift.annotation.Nullable TCell _elem113; - for (int _i114 = 0; _i114 < _list112.size; ++_i114) + org.apache.thrift.protocol.TList _list122 = iprot.readListBegin(); + struct.success = new java.util.ArrayList(_list122.size); + @org.apache.thrift.annotation.Nullable TCell _elem123; + for (int _i124 = 0; _i124 < _list122.size; ++_i124) { - _elem113 = new TCell(); - _elem113.read(iprot); - struct.success.add(_elem113); + _elem123 = new TCell(); + _elem123.read(iprot); + struct.success.add(_elem123); } iprot.readListEnd(); } @@ -19717,9 +20663,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getVer_result stru oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (TCell _iter115 : struct.success) + for (TCell _iter125 : struct.success) { - _iter115.write(oprot); + _iter125.write(oprot); } oprot.writeListEnd(); } @@ -19758,9 +20704,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getVer_result struc if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (TCell _iter116 : struct.success) + for (TCell _iter126 : struct.success) { - _iter116.write(oprot); + _iter126.write(oprot); } } } @@ -19775,14 +20721,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getVer_result struct java.util.BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list117 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); - struct.success = new java.util.ArrayList(_list117.size); - @org.apache.thrift.annotation.Nullable TCell _elem118; - for (int _i119 = 0; _i119 < _list117.size; ++_i119) + org.apache.thrift.protocol.TList _list127 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); + struct.success = new java.util.ArrayList(_list127.size); + @org.apache.thrift.annotation.Nullable TCell _elem128; + for (int _i129 = 0; _i129 < _list127.size; ++_i129) { - _elem118 = new TCell(); - _elem118.read(iprot); - struct.success.add(_elem118); + _elem128 = new TCell(); + _elem128.read(iprot); + struct.success.add(_elem128); } } struct.setSuccessIsSet(true); @@ -20673,15 +21619,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getVerTs_args struc case 6: // ATTRIBUTES if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map120 = iprot.readMapBegin(); - struct.attributes = new java.util.HashMap(2*_map120.size); - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key121; - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val122; - for (int _i123 = 0; _i123 < _map120.size; ++_i123) + org.apache.thrift.protocol.TMap _map130 = iprot.readMapBegin(); + struct.attributes = new java.util.HashMap(2*_map130.size); + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key131; + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val132; + for (int _i133 = 0; _i133 < _map130.size; ++_i133) { - _key121 = iprot.readBinary(); - _val122 = iprot.readBinary(); - struct.attributes.put(_key121, _val122); + _key131 = iprot.readBinary(); + _val132 = iprot.readBinary(); + struct.attributes.put(_key131, _val132); } iprot.readMapEnd(); } @@ -20730,10 +21676,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getVerTs_args stru oprot.writeFieldBegin(ATTRIBUTES_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.attributes.size())); - for (java.util.Map.Entry _iter124 : struct.attributes.entrySet()) + for (java.util.Map.Entry _iter134 : struct.attributes.entrySet()) { - oprot.writeBinary(_iter124.getKey()); - oprot.writeBinary(_iter124.getValue()); + oprot.writeBinary(_iter134.getKey()); + oprot.writeBinary(_iter134.getValue()); } oprot.writeMapEnd(); } @@ -20794,10 +21740,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getVerTs_args struc if (struct.isSetAttributes()) { { oprot.writeI32(struct.attributes.size()); - for (java.util.Map.Entry _iter125 : struct.attributes.entrySet()) + for (java.util.Map.Entry _iter135 : struct.attributes.entrySet()) { - oprot.writeBinary(_iter125.getKey()); - oprot.writeBinary(_iter125.getValue()); + oprot.writeBinary(_iter135.getKey()); + oprot.writeBinary(_iter135.getValue()); } } } @@ -20829,15 +21775,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getVerTs_args struct } if (incoming.get(5)) { { - org.apache.thrift.protocol.TMap _map126 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); - struct.attributes = new java.util.HashMap(2*_map126.size); - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key127; - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val128; - for (int _i129 = 0; _i129 < _map126.size; ++_i129) + org.apache.thrift.protocol.TMap _map136 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); + struct.attributes = new java.util.HashMap(2*_map136.size); + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key137; + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val138; + for (int _i139 = 0; _i139 < _map136.size; ++_i139) { - _key127 = iprot.readBinary(); - _val128 = iprot.readBinary(); - struct.attributes.put(_key127, _val128); + _key137 = iprot.readBinary(); + _val138 = iprot.readBinary(); + struct.attributes.put(_key137, _val138); } } struct.setAttributesIsSet(true); @@ -21250,14 +22196,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getVerTs_result str case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list130 = iprot.readListBegin(); - struct.success = new java.util.ArrayList(_list130.size); - @org.apache.thrift.annotation.Nullable TCell _elem131; - for (int _i132 = 0; _i132 < _list130.size; ++_i132) + org.apache.thrift.protocol.TList _list140 = iprot.readListBegin(); + struct.success = new java.util.ArrayList(_list140.size); + @org.apache.thrift.annotation.Nullable TCell _elem141; + for (int _i142 = 0; _i142 < _list140.size; ++_i142) { - _elem131 = new TCell(); - _elem131.read(iprot); - struct.success.add(_elem131); + _elem141 = new TCell(); + _elem141.read(iprot); + struct.success.add(_elem141); } iprot.readListEnd(); } @@ -21294,9 +22240,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getVerTs_result st oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (TCell _iter133 : struct.success) + for (TCell _iter143 : struct.success) { - _iter133.write(oprot); + _iter143.write(oprot); } oprot.writeListEnd(); } @@ -21335,9 +22281,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getVerTs_result str if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (TCell _iter134 : struct.success) + for (TCell _iter144 : struct.success) { - _iter134.write(oprot); + _iter144.write(oprot); } } } @@ -21352,14 +22298,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getVerTs_result stru java.util.BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list135 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); - struct.success = new java.util.ArrayList(_list135.size); - @org.apache.thrift.annotation.Nullable TCell _elem136; - for (int _i137 = 0; _i137 < _list135.size; ++_i137) + org.apache.thrift.protocol.TList _list145 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); + struct.success = new java.util.ArrayList(_list145.size); + @org.apache.thrift.annotation.Nullable TCell _elem146; + for (int _i147 = 0; _i147 < _list145.size; ++_i147) { - _elem136 = new TCell(); - _elem136.read(iprot); - struct.success.add(_elem136); + _elem146 = new TCell(); + _elem146.read(iprot); + struct.success.add(_elem146); } } struct.setSuccessIsSet(true); @@ -21941,15 +22887,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRow_args struct) case 3: // ATTRIBUTES if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map138 = iprot.readMapBegin(); - struct.attributes = new java.util.HashMap(2*_map138.size); - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key139; - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val140; - for (int _i141 = 0; _i141 < _map138.size; ++_i141) + org.apache.thrift.protocol.TMap _map148 = iprot.readMapBegin(); + struct.attributes = new java.util.HashMap(2*_map148.size); + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key149; + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val150; + for (int _i151 = 0; _i151 < _map148.size; ++_i151) { - _key139 = iprot.readBinary(); - _val140 = iprot.readBinary(); - struct.attributes.put(_key139, _val140); + _key149 = iprot.readBinary(); + _val150 = iprot.readBinary(); + struct.attributes.put(_key149, _val150); } iprot.readMapEnd(); } @@ -21987,10 +22933,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getRow_args struct oprot.writeFieldBegin(ATTRIBUTES_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.attributes.size())); - for (java.util.Map.Entry _iter142 : struct.attributes.entrySet()) + for (java.util.Map.Entry _iter152 : struct.attributes.entrySet()) { - oprot.writeBinary(_iter142.getKey()); - oprot.writeBinary(_iter142.getValue()); + oprot.writeBinary(_iter152.getKey()); + oprot.writeBinary(_iter152.getValue()); } oprot.writeMapEnd(); } @@ -22033,10 +22979,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getRow_args struct) if (struct.isSetAttributes()) { { oprot.writeI32(struct.attributes.size()); - for (java.util.Map.Entry _iter143 : struct.attributes.entrySet()) + for (java.util.Map.Entry _iter153 : struct.attributes.entrySet()) { - oprot.writeBinary(_iter143.getKey()); - oprot.writeBinary(_iter143.getValue()); + oprot.writeBinary(_iter153.getKey()); + oprot.writeBinary(_iter153.getValue()); } } } @@ -22056,15 +23002,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getRow_args struct) } if (incoming.get(2)) { { - org.apache.thrift.protocol.TMap _map144 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); - struct.attributes = new java.util.HashMap(2*_map144.size); - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key145; - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val146; - for (int _i147 = 0; _i147 < _map144.size; ++_i147) + org.apache.thrift.protocol.TMap _map154 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); + struct.attributes = new java.util.HashMap(2*_map154.size); + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key155; + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val156; + for (int _i157 = 0; _i157 < _map154.size; ++_i157) { - _key145 = iprot.readBinary(); - _val146 = iprot.readBinary(); - struct.attributes.put(_key145, _val146); + _key155 = iprot.readBinary(); + _val156 = iprot.readBinary(); + struct.attributes.put(_key155, _val156); } } struct.setAttributesIsSet(true); @@ -22477,14 +23423,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRow_result struc case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list148 = iprot.readListBegin(); - struct.success = new java.util.ArrayList(_list148.size); - @org.apache.thrift.annotation.Nullable TRowResult _elem149; - for (int _i150 = 0; _i150 < _list148.size; ++_i150) + org.apache.thrift.protocol.TList _list158 = iprot.readListBegin(); + struct.success = new java.util.ArrayList(_list158.size); + @org.apache.thrift.annotation.Nullable TRowResult _elem159; + for (int _i160 = 0; _i160 < _list158.size; ++_i160) { - _elem149 = new TRowResult(); - _elem149.read(iprot); - struct.success.add(_elem149); + _elem159 = new TRowResult(); + _elem159.read(iprot); + struct.success.add(_elem159); } iprot.readListEnd(); } @@ -22521,9 +23467,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getRow_result stru oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (TRowResult _iter151 : struct.success) + for (TRowResult _iter161 : struct.success) { - _iter151.write(oprot); + _iter161.write(oprot); } oprot.writeListEnd(); } @@ -22562,9 +23508,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getRow_result struc if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (TRowResult _iter152 : struct.success) + for (TRowResult _iter162 : struct.success) { - _iter152.write(oprot); + _iter162.write(oprot); } } } @@ -22579,14 +23525,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getRow_result struct java.util.BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list153 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); - struct.success = new java.util.ArrayList(_list153.size); - @org.apache.thrift.annotation.Nullable TRowResult _elem154; - for (int _i155 = 0; _i155 < _list153.size; ++_i155) + org.apache.thrift.protocol.TList _list163 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); + struct.success = new java.util.ArrayList(_list163.size); + @org.apache.thrift.annotation.Nullable TRowResult _elem164; + for (int _i165 = 0; _i165 < _list163.size; ++_i165) { - _elem154 = new TRowResult(); - _elem154.read(iprot); - struct.success.add(_elem154); + _elem164 = new TRowResult(); + _elem164.read(iprot); + struct.success.add(_elem164); } } struct.setSuccessIsSet(true); @@ -23283,13 +24229,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRowWithColumns_a case 3: // COLUMNS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list156 = iprot.readListBegin(); - struct.columns = new java.util.ArrayList(_list156.size); - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem157; - for (int _i158 = 0; _i158 < _list156.size; ++_i158) + org.apache.thrift.protocol.TList _list166 = iprot.readListBegin(); + struct.columns = new java.util.ArrayList(_list166.size); + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem167; + for (int _i168 = 0; _i168 < _list166.size; ++_i168) { - _elem157 = iprot.readBinary(); - struct.columns.add(_elem157); + _elem167 = iprot.readBinary(); + struct.columns.add(_elem167); } iprot.readListEnd(); } @@ -23301,15 +24247,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRowWithColumns_a case 4: // ATTRIBUTES if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map159 = iprot.readMapBegin(); - struct.attributes = new java.util.HashMap(2*_map159.size); - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key160; - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val161; - for (int _i162 = 0; _i162 < _map159.size; ++_i162) + org.apache.thrift.protocol.TMap _map169 = iprot.readMapBegin(); + struct.attributes = new java.util.HashMap(2*_map169.size); + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key170; + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val171; + for (int _i172 = 0; _i172 < _map169.size; ++_i172) { - _key160 = iprot.readBinary(); - _val161 = iprot.readBinary(); - struct.attributes.put(_key160, _val161); + _key170 = iprot.readBinary(); + _val171 = iprot.readBinary(); + struct.attributes.put(_key170, _val171); } iprot.readMapEnd(); } @@ -23347,9 +24293,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getRowWithColumns_ oprot.writeFieldBegin(COLUMNS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.columns.size())); - for (java.nio.ByteBuffer _iter163 : struct.columns) + for (java.nio.ByteBuffer _iter173 : struct.columns) { - oprot.writeBinary(_iter163); + oprot.writeBinary(_iter173); } oprot.writeListEnd(); } @@ -23359,10 +24305,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getRowWithColumns_ oprot.writeFieldBegin(ATTRIBUTES_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.attributes.size())); - for (java.util.Map.Entry _iter164 : struct.attributes.entrySet()) + for (java.util.Map.Entry _iter174 : struct.attributes.entrySet()) { - oprot.writeBinary(_iter164.getKey()); - oprot.writeBinary(_iter164.getValue()); + oprot.writeBinary(_iter174.getKey()); + oprot.writeBinary(_iter174.getValue()); } oprot.writeMapEnd(); } @@ -23408,19 +24354,19 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getRowWithColumns_a if (struct.isSetColumns()) { { oprot.writeI32(struct.columns.size()); - for (java.nio.ByteBuffer _iter165 : struct.columns) + for (java.nio.ByteBuffer _iter175 : struct.columns) { - oprot.writeBinary(_iter165); + oprot.writeBinary(_iter175); } } } if (struct.isSetAttributes()) { { oprot.writeI32(struct.attributes.size()); - for (java.util.Map.Entry _iter166 : struct.attributes.entrySet()) + for (java.util.Map.Entry _iter176 : struct.attributes.entrySet()) { - oprot.writeBinary(_iter166.getKey()); - oprot.writeBinary(_iter166.getValue()); + oprot.writeBinary(_iter176.getKey()); + oprot.writeBinary(_iter176.getValue()); } } } @@ -23440,28 +24386,28 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getRowWithColumns_ar } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list167 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); - struct.columns = new java.util.ArrayList(_list167.size); - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem168; - for (int _i169 = 0; _i169 < _list167.size; ++_i169) + org.apache.thrift.protocol.TList _list177 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); + struct.columns = new java.util.ArrayList(_list177.size); + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem178; + for (int _i179 = 0; _i179 < _list177.size; ++_i179) { - _elem168 = iprot.readBinary(); - struct.columns.add(_elem168); + _elem178 = iprot.readBinary(); + struct.columns.add(_elem178); } } struct.setColumnsIsSet(true); } if (incoming.get(3)) { { - org.apache.thrift.protocol.TMap _map170 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); - struct.attributes = new java.util.HashMap(2*_map170.size); - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key171; - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val172; - for (int _i173 = 0; _i173 < _map170.size; ++_i173) + org.apache.thrift.protocol.TMap _map180 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); + struct.attributes = new java.util.HashMap(2*_map180.size); + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key181; + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val182; + for (int _i183 = 0; _i183 < _map180.size; ++_i183) { - _key171 = iprot.readBinary(); - _val172 = iprot.readBinary(); - struct.attributes.put(_key171, _val172); + _key181 = iprot.readBinary(); + _val182 = iprot.readBinary(); + struct.attributes.put(_key181, _val182); } } struct.setAttributesIsSet(true); @@ -23874,14 +24820,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRowWithColumns_r case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list174 = iprot.readListBegin(); - struct.success = new java.util.ArrayList(_list174.size); - @org.apache.thrift.annotation.Nullable TRowResult _elem175; - for (int _i176 = 0; _i176 < _list174.size; ++_i176) + org.apache.thrift.protocol.TList _list184 = iprot.readListBegin(); + struct.success = new java.util.ArrayList(_list184.size); + @org.apache.thrift.annotation.Nullable TRowResult _elem185; + for (int _i186 = 0; _i186 < _list184.size; ++_i186) { - _elem175 = new TRowResult(); - _elem175.read(iprot); - struct.success.add(_elem175); + _elem185 = new TRowResult(); + _elem185.read(iprot); + struct.success.add(_elem185); } iprot.readListEnd(); } @@ -23918,9 +24864,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getRowWithColumns_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (TRowResult _iter177 : struct.success) + for (TRowResult _iter187 : struct.success) { - _iter177.write(oprot); + _iter187.write(oprot); } oprot.writeListEnd(); } @@ -23959,9 +24905,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getRowWithColumns_r if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (TRowResult _iter178 : struct.success) + for (TRowResult _iter188 : struct.success) { - _iter178.write(oprot); + _iter188.write(oprot); } } } @@ -23976,14 +24922,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getRowWithColumns_re java.util.BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list179 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); - struct.success = new java.util.ArrayList(_list179.size); - @org.apache.thrift.annotation.Nullable TRowResult _elem180; - for (int _i181 = 0; _i181 < _list179.size; ++_i181) + org.apache.thrift.protocol.TList _list189 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); + struct.success = new java.util.ArrayList(_list189.size); + @org.apache.thrift.annotation.Nullable TRowResult _elem190; + for (int _i191 = 0; _i191 < _list189.size; ++_i191) { - _elem180 = new TRowResult(); - _elem180.read(iprot); - struct.success.add(_elem180); + _elem190 = new TRowResult(); + _elem190.read(iprot); + struct.success.add(_elem190); } } struct.setSuccessIsSet(true); @@ -24664,15 +25610,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRowTs_args struc case 4: // ATTRIBUTES if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map182 = iprot.readMapBegin(); - struct.attributes = new java.util.HashMap(2*_map182.size); - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key183; - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val184; - for (int _i185 = 0; _i185 < _map182.size; ++_i185) + org.apache.thrift.protocol.TMap _map192 = iprot.readMapBegin(); + struct.attributes = new java.util.HashMap(2*_map192.size); + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key193; + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val194; + for (int _i195 = 0; _i195 < _map192.size; ++_i195) { - _key183 = iprot.readBinary(); - _val184 = iprot.readBinary(); - struct.attributes.put(_key183, _val184); + _key193 = iprot.readBinary(); + _val194 = iprot.readBinary(); + struct.attributes.put(_key193, _val194); } iprot.readMapEnd(); } @@ -24713,10 +25659,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getRowTs_args stru oprot.writeFieldBegin(ATTRIBUTES_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.attributes.size())); - for (java.util.Map.Entry _iter186 : struct.attributes.entrySet()) + for (java.util.Map.Entry _iter196 : struct.attributes.entrySet()) { - oprot.writeBinary(_iter186.getKey()); - oprot.writeBinary(_iter186.getValue()); + oprot.writeBinary(_iter196.getKey()); + oprot.writeBinary(_iter196.getValue()); } oprot.writeMapEnd(); } @@ -24765,10 +25711,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getRowTs_args struc if (struct.isSetAttributes()) { { oprot.writeI32(struct.attributes.size()); - for (java.util.Map.Entry _iter187 : struct.attributes.entrySet()) + for (java.util.Map.Entry _iter197 : struct.attributes.entrySet()) { - oprot.writeBinary(_iter187.getKey()); - oprot.writeBinary(_iter187.getValue()); + oprot.writeBinary(_iter197.getKey()); + oprot.writeBinary(_iter197.getValue()); } } } @@ -24792,15 +25738,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getRowTs_args struct } if (incoming.get(3)) { { - org.apache.thrift.protocol.TMap _map188 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); - struct.attributes = new java.util.HashMap(2*_map188.size); - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key189; - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val190; - for (int _i191 = 0; _i191 < _map188.size; ++_i191) + org.apache.thrift.protocol.TMap _map198 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); + struct.attributes = new java.util.HashMap(2*_map198.size); + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key199; + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val200; + for (int _i201 = 0; _i201 < _map198.size; ++_i201) { - _key189 = iprot.readBinary(); - _val190 = iprot.readBinary(); - struct.attributes.put(_key189, _val190); + _key199 = iprot.readBinary(); + _val200 = iprot.readBinary(); + struct.attributes.put(_key199, _val200); } } struct.setAttributesIsSet(true); @@ -25213,14 +26159,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRowTs_result str case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list192 = iprot.readListBegin(); - struct.success = new java.util.ArrayList(_list192.size); - @org.apache.thrift.annotation.Nullable TRowResult _elem193; - for (int _i194 = 0; _i194 < _list192.size; ++_i194) + org.apache.thrift.protocol.TList _list202 = iprot.readListBegin(); + struct.success = new java.util.ArrayList(_list202.size); + @org.apache.thrift.annotation.Nullable TRowResult _elem203; + for (int _i204 = 0; _i204 < _list202.size; ++_i204) { - _elem193 = new TRowResult(); - _elem193.read(iprot); - struct.success.add(_elem193); + _elem203 = new TRowResult(); + _elem203.read(iprot); + struct.success.add(_elem203); } iprot.readListEnd(); } @@ -25257,9 +26203,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getRowTs_result st oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (TRowResult _iter195 : struct.success) + for (TRowResult _iter205 : struct.success) { - _iter195.write(oprot); + _iter205.write(oprot); } oprot.writeListEnd(); } @@ -25298,9 +26244,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getRowTs_result str if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (TRowResult _iter196 : struct.success) + for (TRowResult _iter206 : struct.success) { - _iter196.write(oprot); + _iter206.write(oprot); } } } @@ -25315,14 +26261,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getRowTs_result stru java.util.BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list197 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); - struct.success = new java.util.ArrayList(_list197.size); - @org.apache.thrift.annotation.Nullable TRowResult _elem198; - for (int _i199 = 0; _i199 < _list197.size; ++_i199) + org.apache.thrift.protocol.TList _list207 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); + struct.success = new java.util.ArrayList(_list207.size); + @org.apache.thrift.annotation.Nullable TRowResult _elem208; + for (int _i209 = 0; _i209 < _list207.size; ++_i209) { - _elem198 = new TRowResult(); - _elem198.read(iprot); - struct.success.add(_elem198); + _elem208 = new TRowResult(); + _elem208.read(iprot); + struct.success.add(_elem208); } } struct.setSuccessIsSet(true); @@ -26098,13 +27044,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRowWithColumnsTs case 3: // COLUMNS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list200 = iprot.readListBegin(); - struct.columns = new java.util.ArrayList(_list200.size); - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem201; - for (int _i202 = 0; _i202 < _list200.size; ++_i202) + org.apache.thrift.protocol.TList _list210 = iprot.readListBegin(); + struct.columns = new java.util.ArrayList(_list210.size); + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem211; + for (int _i212 = 0; _i212 < _list210.size; ++_i212) { - _elem201 = iprot.readBinary(); - struct.columns.add(_elem201); + _elem211 = iprot.readBinary(); + struct.columns.add(_elem211); } iprot.readListEnd(); } @@ -26124,15 +27070,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRowWithColumnsTs case 5: // ATTRIBUTES if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map203 = iprot.readMapBegin(); - struct.attributes = new java.util.HashMap(2*_map203.size); - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key204; - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val205; - for (int _i206 = 0; _i206 < _map203.size; ++_i206) + org.apache.thrift.protocol.TMap _map213 = iprot.readMapBegin(); + struct.attributes = new java.util.HashMap(2*_map213.size); + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key214; + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val215; + for (int _i216 = 0; _i216 < _map213.size; ++_i216) { - _key204 = iprot.readBinary(); - _val205 = iprot.readBinary(); - struct.attributes.put(_key204, _val205); + _key214 = iprot.readBinary(); + _val215 = iprot.readBinary(); + struct.attributes.put(_key214, _val215); } iprot.readMapEnd(); } @@ -26170,9 +27116,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getRowWithColumnsT oprot.writeFieldBegin(COLUMNS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.columns.size())); - for (java.nio.ByteBuffer _iter207 : struct.columns) + for (java.nio.ByteBuffer _iter217 : struct.columns) { - oprot.writeBinary(_iter207); + oprot.writeBinary(_iter217); } oprot.writeListEnd(); } @@ -26185,10 +27131,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getRowWithColumnsT oprot.writeFieldBegin(ATTRIBUTES_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.attributes.size())); - for (java.util.Map.Entry _iter208 : struct.attributes.entrySet()) + for (java.util.Map.Entry _iter218 : struct.attributes.entrySet()) { - oprot.writeBinary(_iter208.getKey()); - oprot.writeBinary(_iter208.getValue()); + oprot.writeBinary(_iter218.getKey()); + oprot.writeBinary(_iter218.getValue()); } oprot.writeMapEnd(); } @@ -26237,9 +27183,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getRowWithColumnsTs if (struct.isSetColumns()) { { oprot.writeI32(struct.columns.size()); - for (java.nio.ByteBuffer _iter209 : struct.columns) + for (java.nio.ByteBuffer _iter219 : struct.columns) { - oprot.writeBinary(_iter209); + oprot.writeBinary(_iter219); } } } @@ -26249,10 +27195,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getRowWithColumnsTs if (struct.isSetAttributes()) { { oprot.writeI32(struct.attributes.size()); - for (java.util.Map.Entry _iter210 : struct.attributes.entrySet()) + for (java.util.Map.Entry _iter220 : struct.attributes.entrySet()) { - oprot.writeBinary(_iter210.getKey()); - oprot.writeBinary(_iter210.getValue()); + oprot.writeBinary(_iter220.getKey()); + oprot.writeBinary(_iter220.getValue()); } } } @@ -26272,13 +27218,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getRowWithColumnsTs_ } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list211 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); - struct.columns = new java.util.ArrayList(_list211.size); - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem212; - for (int _i213 = 0; _i213 < _list211.size; ++_i213) + org.apache.thrift.protocol.TList _list221 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); + struct.columns = new java.util.ArrayList(_list221.size); + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem222; + for (int _i223 = 0; _i223 < _list221.size; ++_i223) { - _elem212 = iprot.readBinary(); - struct.columns.add(_elem212); + _elem222 = iprot.readBinary(); + struct.columns.add(_elem222); } } struct.setColumnsIsSet(true); @@ -26289,15 +27235,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getRowWithColumnsTs_ } if (incoming.get(4)) { { - org.apache.thrift.protocol.TMap _map214 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); - struct.attributes = new java.util.HashMap(2*_map214.size); - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key215; - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val216; - for (int _i217 = 0; _i217 < _map214.size; ++_i217) + org.apache.thrift.protocol.TMap _map224 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); + struct.attributes = new java.util.HashMap(2*_map224.size); + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key225; + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val226; + for (int _i227 = 0; _i227 < _map224.size; ++_i227) { - _key215 = iprot.readBinary(); - _val216 = iprot.readBinary(); - struct.attributes.put(_key215, _val216); + _key225 = iprot.readBinary(); + _val226 = iprot.readBinary(); + struct.attributes.put(_key225, _val226); } } struct.setAttributesIsSet(true); @@ -26710,14 +27656,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRowWithColumnsTs case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list218 = iprot.readListBegin(); - struct.success = new java.util.ArrayList(_list218.size); - @org.apache.thrift.annotation.Nullable TRowResult _elem219; - for (int _i220 = 0; _i220 < _list218.size; ++_i220) + org.apache.thrift.protocol.TList _list228 = iprot.readListBegin(); + struct.success = new java.util.ArrayList(_list228.size); + @org.apache.thrift.annotation.Nullable TRowResult _elem229; + for (int _i230 = 0; _i230 < _list228.size; ++_i230) { - _elem219 = new TRowResult(); - _elem219.read(iprot); - struct.success.add(_elem219); + _elem229 = new TRowResult(); + _elem229.read(iprot); + struct.success.add(_elem229); } iprot.readListEnd(); } @@ -26754,9 +27700,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getRowWithColumnsT oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (TRowResult _iter221 : struct.success) + for (TRowResult _iter231 : struct.success) { - _iter221.write(oprot); + _iter231.write(oprot); } oprot.writeListEnd(); } @@ -26795,9 +27741,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getRowWithColumnsTs if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (TRowResult _iter222 : struct.success) + for (TRowResult _iter232 : struct.success) { - _iter222.write(oprot); + _iter232.write(oprot); } } } @@ -26812,14 +27758,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getRowWithColumnsTs_ java.util.BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list223 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); - struct.success = new java.util.ArrayList(_list223.size); - @org.apache.thrift.annotation.Nullable TRowResult _elem224; - for (int _i225 = 0; _i225 < _list223.size; ++_i225) + org.apache.thrift.protocol.TList _list233 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); + struct.success = new java.util.ArrayList(_list233.size); + @org.apache.thrift.annotation.Nullable TRowResult _elem234; + for (int _i235 = 0; _i235 < _list233.size; ++_i235) { - _elem224 = new TRowResult(); - _elem224.read(iprot); - struct.success.add(_elem224); + _elem234 = new TRowResult(); + _elem234.read(iprot); + struct.success.add(_elem234); } } struct.setSuccessIsSet(true); @@ -27401,13 +28347,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRows_args struct case 2: // ROWS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list226 = iprot.readListBegin(); - struct.rows = new java.util.ArrayList(_list226.size); - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem227; - for (int _i228 = 0; _i228 < _list226.size; ++_i228) + org.apache.thrift.protocol.TList _list236 = iprot.readListBegin(); + struct.rows = new java.util.ArrayList(_list236.size); + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem237; + for (int _i238 = 0; _i238 < _list236.size; ++_i238) { - _elem227 = iprot.readBinary(); - struct.rows.add(_elem227); + _elem237 = iprot.readBinary(); + struct.rows.add(_elem237); } iprot.readListEnd(); } @@ -27419,15 +28365,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRows_args struct case 3: // ATTRIBUTES if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map229 = iprot.readMapBegin(); - struct.attributes = new java.util.HashMap(2*_map229.size); - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key230; - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val231; - for (int _i232 = 0; _i232 < _map229.size; ++_i232) + org.apache.thrift.protocol.TMap _map239 = iprot.readMapBegin(); + struct.attributes = new java.util.HashMap(2*_map239.size); + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key240; + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val241; + for (int _i242 = 0; _i242 < _map239.size; ++_i242) { - _key230 = iprot.readBinary(); - _val231 = iprot.readBinary(); - struct.attributes.put(_key230, _val231); + _key240 = iprot.readBinary(); + _val241 = iprot.readBinary(); + struct.attributes.put(_key240, _val241); } iprot.readMapEnd(); } @@ -27460,9 +28406,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getRows_args struc oprot.writeFieldBegin(ROWS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.rows.size())); - for (java.nio.ByteBuffer _iter233 : struct.rows) + for (java.nio.ByteBuffer _iter243 : struct.rows) { - oprot.writeBinary(_iter233); + oprot.writeBinary(_iter243); } oprot.writeListEnd(); } @@ -27472,10 +28418,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getRows_args struc oprot.writeFieldBegin(ATTRIBUTES_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.attributes.size())); - for (java.util.Map.Entry _iter234 : struct.attributes.entrySet()) + for (java.util.Map.Entry _iter244 : struct.attributes.entrySet()) { - oprot.writeBinary(_iter234.getKey()); - oprot.writeBinary(_iter234.getValue()); + oprot.writeBinary(_iter244.getKey()); + oprot.writeBinary(_iter244.getValue()); } oprot.writeMapEnd(); } @@ -27515,19 +28461,19 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getRows_args struct if (struct.isSetRows()) { { oprot.writeI32(struct.rows.size()); - for (java.nio.ByteBuffer _iter235 : struct.rows) + for (java.nio.ByteBuffer _iter245 : struct.rows) { - oprot.writeBinary(_iter235); + oprot.writeBinary(_iter245); } } } if (struct.isSetAttributes()) { { oprot.writeI32(struct.attributes.size()); - for (java.util.Map.Entry _iter236 : struct.attributes.entrySet()) + for (java.util.Map.Entry _iter246 : struct.attributes.entrySet()) { - oprot.writeBinary(_iter236.getKey()); - oprot.writeBinary(_iter236.getValue()); + oprot.writeBinary(_iter246.getKey()); + oprot.writeBinary(_iter246.getValue()); } } } @@ -27543,28 +28489,28 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getRows_args struct) } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list237 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); - struct.rows = new java.util.ArrayList(_list237.size); - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem238; - for (int _i239 = 0; _i239 < _list237.size; ++_i239) + org.apache.thrift.protocol.TList _list247 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); + struct.rows = new java.util.ArrayList(_list247.size); + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem248; + for (int _i249 = 0; _i249 < _list247.size; ++_i249) { - _elem238 = iprot.readBinary(); - struct.rows.add(_elem238); + _elem248 = iprot.readBinary(); + struct.rows.add(_elem248); } } struct.setRowsIsSet(true); } if (incoming.get(2)) { { - org.apache.thrift.protocol.TMap _map240 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); - struct.attributes = new java.util.HashMap(2*_map240.size); - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key241; - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val242; - for (int _i243 = 0; _i243 < _map240.size; ++_i243) + org.apache.thrift.protocol.TMap _map250 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); + struct.attributes = new java.util.HashMap(2*_map250.size); + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key251; + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val252; + for (int _i253 = 0; _i253 < _map250.size; ++_i253) { - _key241 = iprot.readBinary(); - _val242 = iprot.readBinary(); - struct.attributes.put(_key241, _val242); + _key251 = iprot.readBinary(); + _val252 = iprot.readBinary(); + struct.attributes.put(_key251, _val252); } } struct.setAttributesIsSet(true); @@ -27977,14 +28923,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRows_result stru case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list244 = iprot.readListBegin(); - struct.success = new java.util.ArrayList(_list244.size); - @org.apache.thrift.annotation.Nullable TRowResult _elem245; - for (int _i246 = 0; _i246 < _list244.size; ++_i246) + org.apache.thrift.protocol.TList _list254 = iprot.readListBegin(); + struct.success = new java.util.ArrayList(_list254.size); + @org.apache.thrift.annotation.Nullable TRowResult _elem255; + for (int _i256 = 0; _i256 < _list254.size; ++_i256) { - _elem245 = new TRowResult(); - _elem245.read(iprot); - struct.success.add(_elem245); + _elem255 = new TRowResult(); + _elem255.read(iprot); + struct.success.add(_elem255); } iprot.readListEnd(); } @@ -28021,9 +28967,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getRows_result str oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (TRowResult _iter247 : struct.success) + for (TRowResult _iter257 : struct.success) { - _iter247.write(oprot); + _iter257.write(oprot); } oprot.writeListEnd(); } @@ -28062,9 +29008,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getRows_result stru if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (TRowResult _iter248 : struct.success) + for (TRowResult _iter258 : struct.success) { - _iter248.write(oprot); + _iter258.write(oprot); } } } @@ -28079,14 +29025,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getRows_result struc java.util.BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list249 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); - struct.success = new java.util.ArrayList(_list249.size); - @org.apache.thrift.annotation.Nullable TRowResult _elem250; - for (int _i251 = 0; _i251 < _list249.size; ++_i251) + org.apache.thrift.protocol.TList _list259 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); + struct.success = new java.util.ArrayList(_list259.size); + @org.apache.thrift.annotation.Nullable TRowResult _elem260; + for (int _i261 = 0; _i261 < _list259.size; ++_i261) { - _elem250 = new TRowResult(); - _elem250.read(iprot); - struct.success.add(_elem250); + _elem260 = new TRowResult(); + _elem260.read(iprot); + struct.success.add(_elem260); } } struct.setSuccessIsSet(true); @@ -28783,13 +29729,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRowsWithColumns_ case 2: // ROWS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list252 = iprot.readListBegin(); - struct.rows = new java.util.ArrayList(_list252.size); - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem253; - for (int _i254 = 0; _i254 < _list252.size; ++_i254) + org.apache.thrift.protocol.TList _list262 = iprot.readListBegin(); + struct.rows = new java.util.ArrayList(_list262.size); + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem263; + for (int _i264 = 0; _i264 < _list262.size; ++_i264) { - _elem253 = iprot.readBinary(); - struct.rows.add(_elem253); + _elem263 = iprot.readBinary(); + struct.rows.add(_elem263); } iprot.readListEnd(); } @@ -28801,13 +29747,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRowsWithColumns_ case 3: // COLUMNS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list255 = iprot.readListBegin(); - struct.columns = new java.util.ArrayList(_list255.size); - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem256; - for (int _i257 = 0; _i257 < _list255.size; ++_i257) + org.apache.thrift.protocol.TList _list265 = iprot.readListBegin(); + struct.columns = new java.util.ArrayList(_list265.size); + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem266; + for (int _i267 = 0; _i267 < _list265.size; ++_i267) { - _elem256 = iprot.readBinary(); - struct.columns.add(_elem256); + _elem266 = iprot.readBinary(); + struct.columns.add(_elem266); } iprot.readListEnd(); } @@ -28819,15 +29765,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRowsWithColumns_ case 4: // ATTRIBUTES if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map258 = iprot.readMapBegin(); - struct.attributes = new java.util.HashMap(2*_map258.size); - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key259; - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val260; - for (int _i261 = 0; _i261 < _map258.size; ++_i261) + org.apache.thrift.protocol.TMap _map268 = iprot.readMapBegin(); + struct.attributes = new java.util.HashMap(2*_map268.size); + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key269; + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val270; + for (int _i271 = 0; _i271 < _map268.size; ++_i271) { - _key259 = iprot.readBinary(); - _val260 = iprot.readBinary(); - struct.attributes.put(_key259, _val260); + _key269 = iprot.readBinary(); + _val270 = iprot.readBinary(); + struct.attributes.put(_key269, _val270); } iprot.readMapEnd(); } @@ -28860,9 +29806,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getRowsWithColumns oprot.writeFieldBegin(ROWS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.rows.size())); - for (java.nio.ByteBuffer _iter262 : struct.rows) + for (java.nio.ByteBuffer _iter272 : struct.rows) { - oprot.writeBinary(_iter262); + oprot.writeBinary(_iter272); } oprot.writeListEnd(); } @@ -28872,9 +29818,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getRowsWithColumns oprot.writeFieldBegin(COLUMNS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.columns.size())); - for (java.nio.ByteBuffer _iter263 : struct.columns) + for (java.nio.ByteBuffer _iter273 : struct.columns) { - oprot.writeBinary(_iter263); + oprot.writeBinary(_iter273); } oprot.writeListEnd(); } @@ -28884,10 +29830,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getRowsWithColumns oprot.writeFieldBegin(ATTRIBUTES_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.attributes.size())); - for (java.util.Map.Entry _iter264 : struct.attributes.entrySet()) + for (java.util.Map.Entry _iter274 : struct.attributes.entrySet()) { - oprot.writeBinary(_iter264.getKey()); - oprot.writeBinary(_iter264.getValue()); + oprot.writeBinary(_iter274.getKey()); + oprot.writeBinary(_iter274.getValue()); } oprot.writeMapEnd(); } @@ -28930,28 +29876,28 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getRowsWithColumns_ if (struct.isSetRows()) { { oprot.writeI32(struct.rows.size()); - for (java.nio.ByteBuffer _iter265 : struct.rows) + for (java.nio.ByteBuffer _iter275 : struct.rows) { - oprot.writeBinary(_iter265); + oprot.writeBinary(_iter275); } } } if (struct.isSetColumns()) { { oprot.writeI32(struct.columns.size()); - for (java.nio.ByteBuffer _iter266 : struct.columns) + for (java.nio.ByteBuffer _iter276 : struct.columns) { - oprot.writeBinary(_iter266); + oprot.writeBinary(_iter276); } } } if (struct.isSetAttributes()) { { oprot.writeI32(struct.attributes.size()); - for (java.util.Map.Entry _iter267 : struct.attributes.entrySet()) + for (java.util.Map.Entry _iter277 : struct.attributes.entrySet()) { - oprot.writeBinary(_iter267.getKey()); - oprot.writeBinary(_iter267.getValue()); + oprot.writeBinary(_iter277.getKey()); + oprot.writeBinary(_iter277.getValue()); } } } @@ -28967,41 +29913,41 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getRowsWithColumns_a } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list268 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); - struct.rows = new java.util.ArrayList(_list268.size); - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem269; - for (int _i270 = 0; _i270 < _list268.size; ++_i270) + org.apache.thrift.protocol.TList _list278 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); + struct.rows = new java.util.ArrayList(_list278.size); + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem279; + for (int _i280 = 0; _i280 < _list278.size; ++_i280) { - _elem269 = iprot.readBinary(); - struct.rows.add(_elem269); + _elem279 = iprot.readBinary(); + struct.rows.add(_elem279); } } struct.setRowsIsSet(true); } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list271 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); - struct.columns = new java.util.ArrayList(_list271.size); - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem272; - for (int _i273 = 0; _i273 < _list271.size; ++_i273) + org.apache.thrift.protocol.TList _list281 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); + struct.columns = new java.util.ArrayList(_list281.size); + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem282; + for (int _i283 = 0; _i283 < _list281.size; ++_i283) { - _elem272 = iprot.readBinary(); - struct.columns.add(_elem272); + _elem282 = iprot.readBinary(); + struct.columns.add(_elem282); } } struct.setColumnsIsSet(true); } if (incoming.get(3)) { { - org.apache.thrift.protocol.TMap _map274 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); - struct.attributes = new java.util.HashMap(2*_map274.size); - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key275; - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val276; - for (int _i277 = 0; _i277 < _map274.size; ++_i277) + org.apache.thrift.protocol.TMap _map284 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); + struct.attributes = new java.util.HashMap(2*_map284.size); + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key285; + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val286; + for (int _i287 = 0; _i287 < _map284.size; ++_i287) { - _key275 = iprot.readBinary(); - _val276 = iprot.readBinary(); - struct.attributes.put(_key275, _val276); + _key285 = iprot.readBinary(); + _val286 = iprot.readBinary(); + struct.attributes.put(_key285, _val286); } } struct.setAttributesIsSet(true); @@ -29414,14 +30360,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRowsWithColumns_ case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list278 = iprot.readListBegin(); - struct.success = new java.util.ArrayList(_list278.size); - @org.apache.thrift.annotation.Nullable TRowResult _elem279; - for (int _i280 = 0; _i280 < _list278.size; ++_i280) + org.apache.thrift.protocol.TList _list288 = iprot.readListBegin(); + struct.success = new java.util.ArrayList(_list288.size); + @org.apache.thrift.annotation.Nullable TRowResult _elem289; + for (int _i290 = 0; _i290 < _list288.size; ++_i290) { - _elem279 = new TRowResult(); - _elem279.read(iprot); - struct.success.add(_elem279); + _elem289 = new TRowResult(); + _elem289.read(iprot); + struct.success.add(_elem289); } iprot.readListEnd(); } @@ -29458,9 +30404,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getRowsWithColumns oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (TRowResult _iter281 : struct.success) + for (TRowResult _iter291 : struct.success) { - _iter281.write(oprot); + _iter291.write(oprot); } oprot.writeListEnd(); } @@ -29499,9 +30445,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getRowsWithColumns_ if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (TRowResult _iter282 : struct.success) + for (TRowResult _iter292 : struct.success) { - _iter282.write(oprot); + _iter292.write(oprot); } } } @@ -29516,14 +30462,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getRowsWithColumns_r java.util.BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list283 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); - struct.success = new java.util.ArrayList(_list283.size); - @org.apache.thrift.annotation.Nullable TRowResult _elem284; - for (int _i285 = 0; _i285 < _list283.size; ++_i285) + org.apache.thrift.protocol.TList _list293 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); + struct.success = new java.util.ArrayList(_list293.size); + @org.apache.thrift.annotation.Nullable TRowResult _elem294; + for (int _i295 = 0; _i295 < _list293.size; ++_i295) { - _elem284 = new TRowResult(); - _elem284.read(iprot); - struct.success.add(_elem284); + _elem294 = new TRowResult(); + _elem294.read(iprot); + struct.success.add(_elem294); } } struct.setSuccessIsSet(true); @@ -30196,13 +31142,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRowsTs_args stru case 2: // ROWS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list286 = iprot.readListBegin(); - struct.rows = new java.util.ArrayList(_list286.size); - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem287; - for (int _i288 = 0; _i288 < _list286.size; ++_i288) + org.apache.thrift.protocol.TList _list296 = iprot.readListBegin(); + struct.rows = new java.util.ArrayList(_list296.size); + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem297; + for (int _i298 = 0; _i298 < _list296.size; ++_i298) { - _elem287 = iprot.readBinary(); - struct.rows.add(_elem287); + _elem297 = iprot.readBinary(); + struct.rows.add(_elem297); } iprot.readListEnd(); } @@ -30222,15 +31168,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRowsTs_args stru case 4: // ATTRIBUTES if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map289 = iprot.readMapBegin(); - struct.attributes = new java.util.HashMap(2*_map289.size); - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key290; - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val291; - for (int _i292 = 0; _i292 < _map289.size; ++_i292) + org.apache.thrift.protocol.TMap _map299 = iprot.readMapBegin(); + struct.attributes = new java.util.HashMap(2*_map299.size); + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key300; + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val301; + for (int _i302 = 0; _i302 < _map299.size; ++_i302) { - _key290 = iprot.readBinary(); - _val291 = iprot.readBinary(); - struct.attributes.put(_key290, _val291); + _key300 = iprot.readBinary(); + _val301 = iprot.readBinary(); + struct.attributes.put(_key300, _val301); } iprot.readMapEnd(); } @@ -30263,9 +31209,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getRowsTs_args str oprot.writeFieldBegin(ROWS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.rows.size())); - for (java.nio.ByteBuffer _iter293 : struct.rows) + for (java.nio.ByteBuffer _iter303 : struct.rows) { - oprot.writeBinary(_iter293); + oprot.writeBinary(_iter303); } oprot.writeListEnd(); } @@ -30278,10 +31224,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getRowsTs_args str oprot.writeFieldBegin(ATTRIBUTES_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.attributes.size())); - for (java.util.Map.Entry _iter294 : struct.attributes.entrySet()) + for (java.util.Map.Entry _iter304 : struct.attributes.entrySet()) { - oprot.writeBinary(_iter294.getKey()); - oprot.writeBinary(_iter294.getValue()); + oprot.writeBinary(_iter304.getKey()); + oprot.writeBinary(_iter304.getValue()); } oprot.writeMapEnd(); } @@ -30324,9 +31270,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getRowsTs_args stru if (struct.isSetRows()) { { oprot.writeI32(struct.rows.size()); - for (java.nio.ByteBuffer _iter295 : struct.rows) + for (java.nio.ByteBuffer _iter305 : struct.rows) { - oprot.writeBinary(_iter295); + oprot.writeBinary(_iter305); } } } @@ -30336,10 +31282,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getRowsTs_args stru if (struct.isSetAttributes()) { { oprot.writeI32(struct.attributes.size()); - for (java.util.Map.Entry _iter296 : struct.attributes.entrySet()) + for (java.util.Map.Entry _iter306 : struct.attributes.entrySet()) { - oprot.writeBinary(_iter296.getKey()); - oprot.writeBinary(_iter296.getValue()); + oprot.writeBinary(_iter306.getKey()); + oprot.writeBinary(_iter306.getValue()); } } } @@ -30355,13 +31301,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getRowsTs_args struc } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list297 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); - struct.rows = new java.util.ArrayList(_list297.size); - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem298; - for (int _i299 = 0; _i299 < _list297.size; ++_i299) + org.apache.thrift.protocol.TList _list307 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); + struct.rows = new java.util.ArrayList(_list307.size); + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem308; + for (int _i309 = 0; _i309 < _list307.size; ++_i309) { - _elem298 = iprot.readBinary(); - struct.rows.add(_elem298); + _elem308 = iprot.readBinary(); + struct.rows.add(_elem308); } } struct.setRowsIsSet(true); @@ -30372,15 +31318,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getRowsTs_args struc } if (incoming.get(3)) { { - org.apache.thrift.protocol.TMap _map300 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); - struct.attributes = new java.util.HashMap(2*_map300.size); - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key301; - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val302; - for (int _i303 = 0; _i303 < _map300.size; ++_i303) + org.apache.thrift.protocol.TMap _map310 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); + struct.attributes = new java.util.HashMap(2*_map310.size); + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key311; + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val312; + for (int _i313 = 0; _i313 < _map310.size; ++_i313) { - _key301 = iprot.readBinary(); - _val302 = iprot.readBinary(); - struct.attributes.put(_key301, _val302); + _key311 = iprot.readBinary(); + _val312 = iprot.readBinary(); + struct.attributes.put(_key311, _val312); } } struct.setAttributesIsSet(true); @@ -30793,14 +31739,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRowsTs_result st case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list304 = iprot.readListBegin(); - struct.success = new java.util.ArrayList(_list304.size); - @org.apache.thrift.annotation.Nullable TRowResult _elem305; - for (int _i306 = 0; _i306 < _list304.size; ++_i306) + org.apache.thrift.protocol.TList _list314 = iprot.readListBegin(); + struct.success = new java.util.ArrayList(_list314.size); + @org.apache.thrift.annotation.Nullable TRowResult _elem315; + for (int _i316 = 0; _i316 < _list314.size; ++_i316) { - _elem305 = new TRowResult(); - _elem305.read(iprot); - struct.success.add(_elem305); + _elem315 = new TRowResult(); + _elem315.read(iprot); + struct.success.add(_elem315); } iprot.readListEnd(); } @@ -30837,9 +31783,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getRowsTs_result s oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (TRowResult _iter307 : struct.success) + for (TRowResult _iter317 : struct.success) { - _iter307.write(oprot); + _iter317.write(oprot); } oprot.writeListEnd(); } @@ -30878,9 +31824,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getRowsTs_result st if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (TRowResult _iter308 : struct.success) + for (TRowResult _iter318 : struct.success) { - _iter308.write(oprot); + _iter318.write(oprot); } } } @@ -30895,14 +31841,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getRowsTs_result str java.util.BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list309 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); - struct.success = new java.util.ArrayList(_list309.size); - @org.apache.thrift.annotation.Nullable TRowResult _elem310; - for (int _i311 = 0; _i311 < _list309.size; ++_i311) + org.apache.thrift.protocol.TList _list319 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); + struct.success = new java.util.ArrayList(_list319.size); + @org.apache.thrift.annotation.Nullable TRowResult _elem320; + for (int _i321 = 0; _i321 < _list319.size; ++_i321) { - _elem310 = new TRowResult(); - _elem310.read(iprot); - struct.success.add(_elem310); + _elem320 = new TRowResult(); + _elem320.read(iprot); + struct.success.add(_elem320); } } struct.setSuccessIsSet(true); @@ -31678,13 +32624,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRowsWithColumnsT case 2: // ROWS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list312 = iprot.readListBegin(); - struct.rows = new java.util.ArrayList(_list312.size); - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem313; - for (int _i314 = 0; _i314 < _list312.size; ++_i314) + org.apache.thrift.protocol.TList _list322 = iprot.readListBegin(); + struct.rows = new java.util.ArrayList(_list322.size); + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem323; + for (int _i324 = 0; _i324 < _list322.size; ++_i324) { - _elem313 = iprot.readBinary(); - struct.rows.add(_elem313); + _elem323 = iprot.readBinary(); + struct.rows.add(_elem323); } iprot.readListEnd(); } @@ -31696,13 +32642,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRowsWithColumnsT case 3: // COLUMNS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list315 = iprot.readListBegin(); - struct.columns = new java.util.ArrayList(_list315.size); - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem316; - for (int _i317 = 0; _i317 < _list315.size; ++_i317) + org.apache.thrift.protocol.TList _list325 = iprot.readListBegin(); + struct.columns = new java.util.ArrayList(_list325.size); + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem326; + for (int _i327 = 0; _i327 < _list325.size; ++_i327) { - _elem316 = iprot.readBinary(); - struct.columns.add(_elem316); + _elem326 = iprot.readBinary(); + struct.columns.add(_elem326); } iprot.readListEnd(); } @@ -31722,15 +32668,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRowsWithColumnsT case 5: // ATTRIBUTES if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map318 = iprot.readMapBegin(); - struct.attributes = new java.util.HashMap(2*_map318.size); - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key319; - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val320; - for (int _i321 = 0; _i321 < _map318.size; ++_i321) + org.apache.thrift.protocol.TMap _map328 = iprot.readMapBegin(); + struct.attributes = new java.util.HashMap(2*_map328.size); + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key329; + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val330; + for (int _i331 = 0; _i331 < _map328.size; ++_i331) { - _key319 = iprot.readBinary(); - _val320 = iprot.readBinary(); - struct.attributes.put(_key319, _val320); + _key329 = iprot.readBinary(); + _val330 = iprot.readBinary(); + struct.attributes.put(_key329, _val330); } iprot.readMapEnd(); } @@ -31763,9 +32709,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getRowsWithColumns oprot.writeFieldBegin(ROWS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.rows.size())); - for (java.nio.ByteBuffer _iter322 : struct.rows) + for (java.nio.ByteBuffer _iter332 : struct.rows) { - oprot.writeBinary(_iter322); + oprot.writeBinary(_iter332); } oprot.writeListEnd(); } @@ -31775,9 +32721,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getRowsWithColumns oprot.writeFieldBegin(COLUMNS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.columns.size())); - for (java.nio.ByteBuffer _iter323 : struct.columns) + for (java.nio.ByteBuffer _iter333 : struct.columns) { - oprot.writeBinary(_iter323); + oprot.writeBinary(_iter333); } oprot.writeListEnd(); } @@ -31790,10 +32736,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getRowsWithColumns oprot.writeFieldBegin(ATTRIBUTES_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.attributes.size())); - for (java.util.Map.Entry _iter324 : struct.attributes.entrySet()) + for (java.util.Map.Entry _iter334 : struct.attributes.entrySet()) { - oprot.writeBinary(_iter324.getKey()); - oprot.writeBinary(_iter324.getValue()); + oprot.writeBinary(_iter334.getKey()); + oprot.writeBinary(_iter334.getValue()); } oprot.writeMapEnd(); } @@ -31839,18 +32785,18 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getRowsWithColumnsT if (struct.isSetRows()) { { oprot.writeI32(struct.rows.size()); - for (java.nio.ByteBuffer _iter325 : struct.rows) + for (java.nio.ByteBuffer _iter335 : struct.rows) { - oprot.writeBinary(_iter325); + oprot.writeBinary(_iter335); } } } if (struct.isSetColumns()) { { oprot.writeI32(struct.columns.size()); - for (java.nio.ByteBuffer _iter326 : struct.columns) + for (java.nio.ByteBuffer _iter336 : struct.columns) { - oprot.writeBinary(_iter326); + oprot.writeBinary(_iter336); } } } @@ -31860,10 +32806,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getRowsWithColumnsT if (struct.isSetAttributes()) { { oprot.writeI32(struct.attributes.size()); - for (java.util.Map.Entry _iter327 : struct.attributes.entrySet()) + for (java.util.Map.Entry _iter337 : struct.attributes.entrySet()) { - oprot.writeBinary(_iter327.getKey()); - oprot.writeBinary(_iter327.getValue()); + oprot.writeBinary(_iter337.getKey()); + oprot.writeBinary(_iter337.getValue()); } } } @@ -31879,26 +32825,26 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getRowsWithColumnsTs } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list328 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); - struct.rows = new java.util.ArrayList(_list328.size); - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem329; - for (int _i330 = 0; _i330 < _list328.size; ++_i330) + org.apache.thrift.protocol.TList _list338 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); + struct.rows = new java.util.ArrayList(_list338.size); + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem339; + for (int _i340 = 0; _i340 < _list338.size; ++_i340) { - _elem329 = iprot.readBinary(); - struct.rows.add(_elem329); + _elem339 = iprot.readBinary(); + struct.rows.add(_elem339); } } struct.setRowsIsSet(true); } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list331 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); - struct.columns = new java.util.ArrayList(_list331.size); - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem332; - for (int _i333 = 0; _i333 < _list331.size; ++_i333) + org.apache.thrift.protocol.TList _list341 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); + struct.columns = new java.util.ArrayList(_list341.size); + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem342; + for (int _i343 = 0; _i343 < _list341.size; ++_i343) { - _elem332 = iprot.readBinary(); - struct.columns.add(_elem332); + _elem342 = iprot.readBinary(); + struct.columns.add(_elem342); } } struct.setColumnsIsSet(true); @@ -31909,15 +32855,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getRowsWithColumnsTs } if (incoming.get(4)) { { - org.apache.thrift.protocol.TMap _map334 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); - struct.attributes = new java.util.HashMap(2*_map334.size); - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key335; - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val336; - for (int _i337 = 0; _i337 < _map334.size; ++_i337) + org.apache.thrift.protocol.TMap _map344 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); + struct.attributes = new java.util.HashMap(2*_map344.size); + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key345; + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val346; + for (int _i347 = 0; _i347 < _map344.size; ++_i347) { - _key335 = iprot.readBinary(); - _val336 = iprot.readBinary(); - struct.attributes.put(_key335, _val336); + _key345 = iprot.readBinary(); + _val346 = iprot.readBinary(); + struct.attributes.put(_key345, _val346); } } struct.setAttributesIsSet(true); @@ -32330,14 +33276,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRowsWithColumnsT case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list338 = iprot.readListBegin(); - struct.success = new java.util.ArrayList(_list338.size); - @org.apache.thrift.annotation.Nullable TRowResult _elem339; - for (int _i340 = 0; _i340 < _list338.size; ++_i340) + org.apache.thrift.protocol.TList _list348 = iprot.readListBegin(); + struct.success = new java.util.ArrayList(_list348.size); + @org.apache.thrift.annotation.Nullable TRowResult _elem349; + for (int _i350 = 0; _i350 < _list348.size; ++_i350) { - _elem339 = new TRowResult(); - _elem339.read(iprot); - struct.success.add(_elem339); + _elem349 = new TRowResult(); + _elem349.read(iprot); + struct.success.add(_elem349); } iprot.readListEnd(); } @@ -32374,9 +33320,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getRowsWithColumns oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (TRowResult _iter341 : struct.success) + for (TRowResult _iter351 : struct.success) { - _iter341.write(oprot); + _iter351.write(oprot); } oprot.writeListEnd(); } @@ -32415,9 +33361,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getRowsWithColumnsT if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (TRowResult _iter342 : struct.success) + for (TRowResult _iter352 : struct.success) { - _iter342.write(oprot); + _iter352.write(oprot); } } } @@ -32432,14 +33378,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getRowsWithColumnsTs java.util.BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list343 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); - struct.success = new java.util.ArrayList(_list343.size); - @org.apache.thrift.annotation.Nullable TRowResult _elem344; - for (int _i345 = 0; _i345 < _list343.size; ++_i345) + org.apache.thrift.protocol.TList _list353 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); + struct.success = new java.util.ArrayList(_list353.size); + @org.apache.thrift.annotation.Nullable TRowResult _elem354; + for (int _i355 = 0; _i355 < _list353.size; ++_i355) { - _elem344 = new TRowResult(); - _elem344.read(iprot); - struct.success.add(_elem344); + _elem354 = new TRowResult(); + _elem354.read(iprot); + struct.success.add(_elem354); } } struct.setSuccessIsSet(true); @@ -33136,14 +34082,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, mutateRow_args stru case 3: // MUTATIONS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list346 = iprot.readListBegin(); - struct.mutations = new java.util.ArrayList(_list346.size); - @org.apache.thrift.annotation.Nullable Mutation _elem347; - for (int _i348 = 0; _i348 < _list346.size; ++_i348) + org.apache.thrift.protocol.TList _list356 = iprot.readListBegin(); + struct.mutations = new java.util.ArrayList(_list356.size); + @org.apache.thrift.annotation.Nullable Mutation _elem357; + for (int _i358 = 0; _i358 < _list356.size; ++_i358) { - _elem347 = new Mutation(); - _elem347.read(iprot); - struct.mutations.add(_elem347); + _elem357 = new Mutation(); + _elem357.read(iprot); + struct.mutations.add(_elem357); } iprot.readListEnd(); } @@ -33155,15 +34101,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, mutateRow_args stru case 4: // ATTRIBUTES if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map349 = iprot.readMapBegin(); - struct.attributes = new java.util.HashMap(2*_map349.size); - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key350; - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val351; - for (int _i352 = 0; _i352 < _map349.size; ++_i352) + org.apache.thrift.protocol.TMap _map359 = iprot.readMapBegin(); + struct.attributes = new java.util.HashMap(2*_map359.size); + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key360; + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val361; + for (int _i362 = 0; _i362 < _map359.size; ++_i362) { - _key350 = iprot.readBinary(); - _val351 = iprot.readBinary(); - struct.attributes.put(_key350, _val351); + _key360 = iprot.readBinary(); + _val361 = iprot.readBinary(); + struct.attributes.put(_key360, _val361); } iprot.readMapEnd(); } @@ -33201,9 +34147,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, mutateRow_args str oprot.writeFieldBegin(MUTATIONS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.mutations.size())); - for (Mutation _iter353 : struct.mutations) + for (Mutation _iter363 : struct.mutations) { - _iter353.write(oprot); + _iter363.write(oprot); } oprot.writeListEnd(); } @@ -33213,10 +34159,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, mutateRow_args str oprot.writeFieldBegin(ATTRIBUTES_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.attributes.size())); - for (java.util.Map.Entry _iter354 : struct.attributes.entrySet()) + for (java.util.Map.Entry _iter364 : struct.attributes.entrySet()) { - oprot.writeBinary(_iter354.getKey()); - oprot.writeBinary(_iter354.getValue()); + oprot.writeBinary(_iter364.getKey()); + oprot.writeBinary(_iter364.getValue()); } oprot.writeMapEnd(); } @@ -33262,19 +34208,19 @@ public void write(org.apache.thrift.protocol.TProtocol prot, mutateRow_args stru if (struct.isSetMutations()) { { oprot.writeI32(struct.mutations.size()); - for (Mutation _iter355 : struct.mutations) + for (Mutation _iter365 : struct.mutations) { - _iter355.write(oprot); + _iter365.write(oprot); } } } if (struct.isSetAttributes()) { { oprot.writeI32(struct.attributes.size()); - for (java.util.Map.Entry _iter356 : struct.attributes.entrySet()) + for (java.util.Map.Entry _iter366 : struct.attributes.entrySet()) { - oprot.writeBinary(_iter356.getKey()); - oprot.writeBinary(_iter356.getValue()); + oprot.writeBinary(_iter366.getKey()); + oprot.writeBinary(_iter366.getValue()); } } } @@ -33294,29 +34240,29 @@ public void read(org.apache.thrift.protocol.TProtocol prot, mutateRow_args struc } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list357 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); - struct.mutations = new java.util.ArrayList(_list357.size); - @org.apache.thrift.annotation.Nullable Mutation _elem358; - for (int _i359 = 0; _i359 < _list357.size; ++_i359) + org.apache.thrift.protocol.TList _list367 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); + struct.mutations = new java.util.ArrayList(_list367.size); + @org.apache.thrift.annotation.Nullable Mutation _elem368; + for (int _i369 = 0; _i369 < _list367.size; ++_i369) { - _elem358 = new Mutation(); - _elem358.read(iprot); - struct.mutations.add(_elem358); + _elem368 = new Mutation(); + _elem368.read(iprot); + struct.mutations.add(_elem368); } } struct.setMutationsIsSet(true); } if (incoming.get(3)) { { - org.apache.thrift.protocol.TMap _map360 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); - struct.attributes = new java.util.HashMap(2*_map360.size); - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key361; - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val362; - for (int _i363 = 0; _i363 < _map360.size; ++_i363) + org.apache.thrift.protocol.TMap _map370 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); + struct.attributes = new java.util.HashMap(2*_map370.size); + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key371; + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val372; + for (int _i373 = 0; _i373 < _map370.size; ++_i373) { - _key361 = iprot.readBinary(); - _val362 = iprot.readBinary(); - struct.attributes.put(_key361, _val362); + _key371 = iprot.readBinary(); + _val372 = iprot.readBinary(); + struct.attributes.put(_key371, _val372); } } struct.setAttributesIsSet(true); @@ -34573,14 +35519,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, mutateRowTs_args st case 3: // MUTATIONS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list364 = iprot.readListBegin(); - struct.mutations = new java.util.ArrayList(_list364.size); - @org.apache.thrift.annotation.Nullable Mutation _elem365; - for (int _i366 = 0; _i366 < _list364.size; ++_i366) + org.apache.thrift.protocol.TList _list374 = iprot.readListBegin(); + struct.mutations = new java.util.ArrayList(_list374.size); + @org.apache.thrift.annotation.Nullable Mutation _elem375; + for (int _i376 = 0; _i376 < _list374.size; ++_i376) { - _elem365 = new Mutation(); - _elem365.read(iprot); - struct.mutations.add(_elem365); + _elem375 = new Mutation(); + _elem375.read(iprot); + struct.mutations.add(_elem375); } iprot.readListEnd(); } @@ -34600,15 +35546,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, mutateRowTs_args st case 5: // ATTRIBUTES if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map367 = iprot.readMapBegin(); - struct.attributes = new java.util.HashMap(2*_map367.size); - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key368; - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val369; - for (int _i370 = 0; _i370 < _map367.size; ++_i370) + org.apache.thrift.protocol.TMap _map377 = iprot.readMapBegin(); + struct.attributes = new java.util.HashMap(2*_map377.size); + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key378; + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val379; + for (int _i380 = 0; _i380 < _map377.size; ++_i380) { - _key368 = iprot.readBinary(); - _val369 = iprot.readBinary(); - struct.attributes.put(_key368, _val369); + _key378 = iprot.readBinary(); + _val379 = iprot.readBinary(); + struct.attributes.put(_key378, _val379); } iprot.readMapEnd(); } @@ -34646,9 +35592,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, mutateRowTs_args s oprot.writeFieldBegin(MUTATIONS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.mutations.size())); - for (Mutation _iter371 : struct.mutations) + for (Mutation _iter381 : struct.mutations) { - _iter371.write(oprot); + _iter381.write(oprot); } oprot.writeListEnd(); } @@ -34661,10 +35607,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, mutateRowTs_args s oprot.writeFieldBegin(ATTRIBUTES_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.attributes.size())); - for (java.util.Map.Entry _iter372 : struct.attributes.entrySet()) + for (java.util.Map.Entry _iter382 : struct.attributes.entrySet()) { - oprot.writeBinary(_iter372.getKey()); - oprot.writeBinary(_iter372.getValue()); + oprot.writeBinary(_iter382.getKey()); + oprot.writeBinary(_iter382.getValue()); } oprot.writeMapEnd(); } @@ -34713,9 +35659,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, mutateRowTs_args st if (struct.isSetMutations()) { { oprot.writeI32(struct.mutations.size()); - for (Mutation _iter373 : struct.mutations) + for (Mutation _iter383 : struct.mutations) { - _iter373.write(oprot); + _iter383.write(oprot); } } } @@ -34725,10 +35671,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, mutateRowTs_args st if (struct.isSetAttributes()) { { oprot.writeI32(struct.attributes.size()); - for (java.util.Map.Entry _iter374 : struct.attributes.entrySet()) + for (java.util.Map.Entry _iter384 : struct.attributes.entrySet()) { - oprot.writeBinary(_iter374.getKey()); - oprot.writeBinary(_iter374.getValue()); + oprot.writeBinary(_iter384.getKey()); + oprot.writeBinary(_iter384.getValue()); } } } @@ -34748,14 +35694,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, mutateRowTs_args str } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list375 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); - struct.mutations = new java.util.ArrayList(_list375.size); - @org.apache.thrift.annotation.Nullable Mutation _elem376; - for (int _i377 = 0; _i377 < _list375.size; ++_i377) + org.apache.thrift.protocol.TList _list385 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); + struct.mutations = new java.util.ArrayList(_list385.size); + @org.apache.thrift.annotation.Nullable Mutation _elem386; + for (int _i387 = 0; _i387 < _list385.size; ++_i387) { - _elem376 = new Mutation(); - _elem376.read(iprot); - struct.mutations.add(_elem376); + _elem386 = new Mutation(); + _elem386.read(iprot); + struct.mutations.add(_elem386); } } struct.setMutationsIsSet(true); @@ -34766,15 +35712,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, mutateRowTs_args str } if (incoming.get(4)) { { - org.apache.thrift.protocol.TMap _map378 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); - struct.attributes = new java.util.HashMap(2*_map378.size); - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key379; - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val380; - for (int _i381 = 0; _i381 < _map378.size; ++_i381) + org.apache.thrift.protocol.TMap _map388 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); + struct.attributes = new java.util.HashMap(2*_map388.size); + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key389; + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val390; + for (int _i391 = 0; _i391 < _map388.size; ++_i391) { - _key379 = iprot.readBinary(); - _val380 = iprot.readBinary(); - struct.attributes.put(_key379, _val380); + _key389 = iprot.readBinary(); + _val390 = iprot.readBinary(); + struct.attributes.put(_key389, _val390); } } struct.setAttributesIsSet(true); @@ -35825,14 +36771,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, mutateRows_args str case 2: // ROW_BATCHES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list382 = iprot.readListBegin(); - struct.rowBatches = new java.util.ArrayList(_list382.size); - @org.apache.thrift.annotation.Nullable BatchMutation _elem383; - for (int _i384 = 0; _i384 < _list382.size; ++_i384) + org.apache.thrift.protocol.TList _list392 = iprot.readListBegin(); + struct.rowBatches = new java.util.ArrayList(_list392.size); + @org.apache.thrift.annotation.Nullable BatchMutation _elem393; + for (int _i394 = 0; _i394 < _list392.size; ++_i394) { - _elem383 = new BatchMutation(); - _elem383.read(iprot); - struct.rowBatches.add(_elem383); + _elem393 = new BatchMutation(); + _elem393.read(iprot); + struct.rowBatches.add(_elem393); } iprot.readListEnd(); } @@ -35844,15 +36790,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, mutateRows_args str case 3: // ATTRIBUTES if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map385 = iprot.readMapBegin(); - struct.attributes = new java.util.HashMap(2*_map385.size); - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key386; - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val387; - for (int _i388 = 0; _i388 < _map385.size; ++_i388) + org.apache.thrift.protocol.TMap _map395 = iprot.readMapBegin(); + struct.attributes = new java.util.HashMap(2*_map395.size); + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key396; + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val397; + for (int _i398 = 0; _i398 < _map395.size; ++_i398) { - _key386 = iprot.readBinary(); - _val387 = iprot.readBinary(); - struct.attributes.put(_key386, _val387); + _key396 = iprot.readBinary(); + _val397 = iprot.readBinary(); + struct.attributes.put(_key396, _val397); } iprot.readMapEnd(); } @@ -35885,9 +36831,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, mutateRows_args st oprot.writeFieldBegin(ROW_BATCHES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.rowBatches.size())); - for (BatchMutation _iter389 : struct.rowBatches) + for (BatchMutation _iter399 : struct.rowBatches) { - _iter389.write(oprot); + _iter399.write(oprot); } oprot.writeListEnd(); } @@ -35897,10 +36843,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, mutateRows_args st oprot.writeFieldBegin(ATTRIBUTES_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.attributes.size())); - for (java.util.Map.Entry _iter390 : struct.attributes.entrySet()) + for (java.util.Map.Entry _iter400 : struct.attributes.entrySet()) { - oprot.writeBinary(_iter390.getKey()); - oprot.writeBinary(_iter390.getValue()); + oprot.writeBinary(_iter400.getKey()); + oprot.writeBinary(_iter400.getValue()); } oprot.writeMapEnd(); } @@ -35940,19 +36886,19 @@ public void write(org.apache.thrift.protocol.TProtocol prot, mutateRows_args str if (struct.isSetRowBatches()) { { oprot.writeI32(struct.rowBatches.size()); - for (BatchMutation _iter391 : struct.rowBatches) + for (BatchMutation _iter401 : struct.rowBatches) { - _iter391.write(oprot); + _iter401.write(oprot); } } } if (struct.isSetAttributes()) { { oprot.writeI32(struct.attributes.size()); - for (java.util.Map.Entry _iter392 : struct.attributes.entrySet()) + for (java.util.Map.Entry _iter402 : struct.attributes.entrySet()) { - oprot.writeBinary(_iter392.getKey()); - oprot.writeBinary(_iter392.getValue()); + oprot.writeBinary(_iter402.getKey()); + oprot.writeBinary(_iter402.getValue()); } } } @@ -35968,29 +36914,29 @@ public void read(org.apache.thrift.protocol.TProtocol prot, mutateRows_args stru } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list393 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); - struct.rowBatches = new java.util.ArrayList(_list393.size); - @org.apache.thrift.annotation.Nullable BatchMutation _elem394; - for (int _i395 = 0; _i395 < _list393.size; ++_i395) + org.apache.thrift.protocol.TList _list403 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); + struct.rowBatches = new java.util.ArrayList(_list403.size); + @org.apache.thrift.annotation.Nullable BatchMutation _elem404; + for (int _i405 = 0; _i405 < _list403.size; ++_i405) { - _elem394 = new BatchMutation(); - _elem394.read(iprot); - struct.rowBatches.add(_elem394); + _elem404 = new BatchMutation(); + _elem404.read(iprot); + struct.rowBatches.add(_elem404); } } struct.setRowBatchesIsSet(true); } if (incoming.get(2)) { { - org.apache.thrift.protocol.TMap _map396 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); - struct.attributes = new java.util.HashMap(2*_map396.size); - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key397; - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val398; - for (int _i399 = 0; _i399 < _map396.size; ++_i399) + org.apache.thrift.protocol.TMap _map406 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); + struct.attributes = new java.util.HashMap(2*_map406.size); + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key407; + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val408; + for (int _i409 = 0; _i409 < _map406.size; ++_i409) { - _key397 = iprot.readBinary(); - _val398 = iprot.readBinary(); - struct.attributes.put(_key397, _val398); + _key407 = iprot.readBinary(); + _val408 = iprot.readBinary(); + struct.attributes.put(_key407, _val408); } } struct.setAttributesIsSet(true); @@ -37132,14 +38078,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, mutateRowsTs_args s case 2: // ROW_BATCHES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list400 = iprot.readListBegin(); - struct.rowBatches = new java.util.ArrayList(_list400.size); - @org.apache.thrift.annotation.Nullable BatchMutation _elem401; - for (int _i402 = 0; _i402 < _list400.size; ++_i402) + org.apache.thrift.protocol.TList _list410 = iprot.readListBegin(); + struct.rowBatches = new java.util.ArrayList(_list410.size); + @org.apache.thrift.annotation.Nullable BatchMutation _elem411; + for (int _i412 = 0; _i412 < _list410.size; ++_i412) { - _elem401 = new BatchMutation(); - _elem401.read(iprot); - struct.rowBatches.add(_elem401); + _elem411 = new BatchMutation(); + _elem411.read(iprot); + struct.rowBatches.add(_elem411); } iprot.readListEnd(); } @@ -37159,15 +38105,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, mutateRowsTs_args s case 4: // ATTRIBUTES if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map403 = iprot.readMapBegin(); - struct.attributes = new java.util.HashMap(2*_map403.size); - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key404; - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val405; - for (int _i406 = 0; _i406 < _map403.size; ++_i406) + org.apache.thrift.protocol.TMap _map413 = iprot.readMapBegin(); + struct.attributes = new java.util.HashMap(2*_map413.size); + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key414; + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val415; + for (int _i416 = 0; _i416 < _map413.size; ++_i416) { - _key404 = iprot.readBinary(); - _val405 = iprot.readBinary(); - struct.attributes.put(_key404, _val405); + _key414 = iprot.readBinary(); + _val415 = iprot.readBinary(); + struct.attributes.put(_key414, _val415); } iprot.readMapEnd(); } @@ -37200,9 +38146,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, mutateRowsTs_args oprot.writeFieldBegin(ROW_BATCHES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.rowBatches.size())); - for (BatchMutation _iter407 : struct.rowBatches) + for (BatchMutation _iter417 : struct.rowBatches) { - _iter407.write(oprot); + _iter417.write(oprot); } oprot.writeListEnd(); } @@ -37215,10 +38161,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, mutateRowsTs_args oprot.writeFieldBegin(ATTRIBUTES_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.attributes.size())); - for (java.util.Map.Entry _iter408 : struct.attributes.entrySet()) + for (java.util.Map.Entry _iter418 : struct.attributes.entrySet()) { - oprot.writeBinary(_iter408.getKey()); - oprot.writeBinary(_iter408.getValue()); + oprot.writeBinary(_iter418.getKey()); + oprot.writeBinary(_iter418.getValue()); } oprot.writeMapEnd(); } @@ -37261,9 +38207,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, mutateRowsTs_args s if (struct.isSetRowBatches()) { { oprot.writeI32(struct.rowBatches.size()); - for (BatchMutation _iter409 : struct.rowBatches) + for (BatchMutation _iter419 : struct.rowBatches) { - _iter409.write(oprot); + _iter419.write(oprot); } } } @@ -37273,10 +38219,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, mutateRowsTs_args s if (struct.isSetAttributes()) { { oprot.writeI32(struct.attributes.size()); - for (java.util.Map.Entry _iter410 : struct.attributes.entrySet()) + for (java.util.Map.Entry _iter420 : struct.attributes.entrySet()) { - oprot.writeBinary(_iter410.getKey()); - oprot.writeBinary(_iter410.getValue()); + oprot.writeBinary(_iter420.getKey()); + oprot.writeBinary(_iter420.getValue()); } } } @@ -37292,14 +38238,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, mutateRowsTs_args st } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list411 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); - struct.rowBatches = new java.util.ArrayList(_list411.size); - @org.apache.thrift.annotation.Nullable BatchMutation _elem412; - for (int _i413 = 0; _i413 < _list411.size; ++_i413) + org.apache.thrift.protocol.TList _list421 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); + struct.rowBatches = new java.util.ArrayList(_list421.size); + @org.apache.thrift.annotation.Nullable BatchMutation _elem422; + for (int _i423 = 0; _i423 < _list421.size; ++_i423) { - _elem412 = new BatchMutation(); - _elem412.read(iprot); - struct.rowBatches.add(_elem412); + _elem422 = new BatchMutation(); + _elem422.read(iprot); + struct.rowBatches.add(_elem422); } } struct.setRowBatchesIsSet(true); @@ -37310,15 +38256,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, mutateRowsTs_args st } if (incoming.get(3)) { { - org.apache.thrift.protocol.TMap _map414 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); - struct.attributes = new java.util.HashMap(2*_map414.size); - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key415; - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val416; - for (int _i417 = 0; _i417 < _map414.size; ++_i417) + org.apache.thrift.protocol.TMap _map424 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); + struct.attributes = new java.util.HashMap(2*_map424.size); + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key425; + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val426; + for (int _i427 = 0; _i427 < _map424.size; ++_i427) { - _key415 = iprot.readBinary(); - _val416 = iprot.readBinary(); - struct.attributes.put(_key415, _val416); + _key425 = iprot.readBinary(); + _val426 = iprot.readBinary(); + struct.attributes.put(_key425, _val426); } } struct.setAttributesIsSet(true); @@ -39822,15 +40768,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, deleteAll_args stru case 4: // ATTRIBUTES if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map418 = iprot.readMapBegin(); - struct.attributes = new java.util.HashMap(2*_map418.size); - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key419; - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val420; - for (int _i421 = 0; _i421 < _map418.size; ++_i421) + org.apache.thrift.protocol.TMap _map428 = iprot.readMapBegin(); + struct.attributes = new java.util.HashMap(2*_map428.size); + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key429; + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val430; + for (int _i431 = 0; _i431 < _map428.size; ++_i431) { - _key419 = iprot.readBinary(); - _val420 = iprot.readBinary(); - struct.attributes.put(_key419, _val420); + _key429 = iprot.readBinary(); + _val430 = iprot.readBinary(); + struct.attributes.put(_key429, _val430); } iprot.readMapEnd(); } @@ -39873,10 +40819,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, deleteAll_args str oprot.writeFieldBegin(ATTRIBUTES_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.attributes.size())); - for (java.util.Map.Entry _iter422 : struct.attributes.entrySet()) + for (java.util.Map.Entry _iter432 : struct.attributes.entrySet()) { - oprot.writeBinary(_iter422.getKey()); - oprot.writeBinary(_iter422.getValue()); + oprot.writeBinary(_iter432.getKey()); + oprot.writeBinary(_iter432.getValue()); } oprot.writeMapEnd(); } @@ -39925,10 +40871,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, deleteAll_args stru if (struct.isSetAttributes()) { { oprot.writeI32(struct.attributes.size()); - for (java.util.Map.Entry _iter423 : struct.attributes.entrySet()) + for (java.util.Map.Entry _iter433 : struct.attributes.entrySet()) { - oprot.writeBinary(_iter423.getKey()); - oprot.writeBinary(_iter423.getValue()); + oprot.writeBinary(_iter433.getKey()); + oprot.writeBinary(_iter433.getValue()); } } } @@ -39952,15 +40898,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, deleteAll_args struc } if (incoming.get(3)) { { - org.apache.thrift.protocol.TMap _map424 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); - struct.attributes = new java.util.HashMap(2*_map424.size); - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key425; - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val426; - for (int _i427 = 0; _i427 < _map424.size; ++_i427) + org.apache.thrift.protocol.TMap _map434 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); + struct.attributes = new java.util.HashMap(2*_map434.size); + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key435; + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val436; + for (int _i437 = 0; _i437 < _map434.size; ++_i437) { - _key425 = iprot.readBinary(); - _val426 = iprot.readBinary(); - struct.attributes.put(_key425, _val426); + _key435 = iprot.readBinary(); + _val436 = iprot.readBinary(); + struct.attributes.put(_key435, _val436); } } struct.setAttributesIsSet(true); @@ -41118,15 +42064,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, deleteAllTs_args st case 5: // ATTRIBUTES if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map428 = iprot.readMapBegin(); - struct.attributes = new java.util.HashMap(2*_map428.size); - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key429; - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val430; - for (int _i431 = 0; _i431 < _map428.size; ++_i431) + org.apache.thrift.protocol.TMap _map438 = iprot.readMapBegin(); + struct.attributes = new java.util.HashMap(2*_map438.size); + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key439; + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val440; + for (int _i441 = 0; _i441 < _map438.size; ++_i441) { - _key429 = iprot.readBinary(); - _val430 = iprot.readBinary(); - struct.attributes.put(_key429, _val430); + _key439 = iprot.readBinary(); + _val440 = iprot.readBinary(); + struct.attributes.put(_key439, _val440); } iprot.readMapEnd(); } @@ -41172,10 +42118,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, deleteAllTs_args s oprot.writeFieldBegin(ATTRIBUTES_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.attributes.size())); - for (java.util.Map.Entry _iter432 : struct.attributes.entrySet()) + for (java.util.Map.Entry _iter442 : struct.attributes.entrySet()) { - oprot.writeBinary(_iter432.getKey()); - oprot.writeBinary(_iter432.getValue()); + oprot.writeBinary(_iter442.getKey()); + oprot.writeBinary(_iter442.getValue()); } oprot.writeMapEnd(); } @@ -41230,10 +42176,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, deleteAllTs_args st if (struct.isSetAttributes()) { { oprot.writeI32(struct.attributes.size()); - for (java.util.Map.Entry _iter433 : struct.attributes.entrySet()) + for (java.util.Map.Entry _iter443 : struct.attributes.entrySet()) { - oprot.writeBinary(_iter433.getKey()); - oprot.writeBinary(_iter433.getValue()); + oprot.writeBinary(_iter443.getKey()); + oprot.writeBinary(_iter443.getValue()); } } } @@ -41261,15 +42207,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, deleteAllTs_args str } if (incoming.get(4)) { { - org.apache.thrift.protocol.TMap _map434 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); - struct.attributes = new java.util.HashMap(2*_map434.size); - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key435; - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val436; - for (int _i437 = 0; _i437 < _map434.size; ++_i437) + org.apache.thrift.protocol.TMap _map444 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); + struct.attributes = new java.util.HashMap(2*_map444.size); + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key445; + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val446; + for (int _i447 = 0; _i447 < _map444.size; ++_i447) { - _key435 = iprot.readBinary(); - _val436 = iprot.readBinary(); - struct.attributes.put(_key435, _val436); + _key445 = iprot.readBinary(); + _val446 = iprot.readBinary(); + struct.attributes.put(_key445, _val446); } } struct.setAttributesIsSet(true); @@ -42213,15 +43159,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, deleteAllRow_args s case 3: // ATTRIBUTES if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map438 = iprot.readMapBegin(); - struct.attributes = new java.util.HashMap(2*_map438.size); - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key439; - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val440; - for (int _i441 = 0; _i441 < _map438.size; ++_i441) + org.apache.thrift.protocol.TMap _map448 = iprot.readMapBegin(); + struct.attributes = new java.util.HashMap(2*_map448.size); + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key449; + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val450; + for (int _i451 = 0; _i451 < _map448.size; ++_i451) { - _key439 = iprot.readBinary(); - _val440 = iprot.readBinary(); - struct.attributes.put(_key439, _val440); + _key449 = iprot.readBinary(); + _val450 = iprot.readBinary(); + struct.attributes.put(_key449, _val450); } iprot.readMapEnd(); } @@ -42259,10 +43205,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, deleteAllRow_args oprot.writeFieldBegin(ATTRIBUTES_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.attributes.size())); - for (java.util.Map.Entry _iter442 : struct.attributes.entrySet()) + for (java.util.Map.Entry _iter452 : struct.attributes.entrySet()) { - oprot.writeBinary(_iter442.getKey()); - oprot.writeBinary(_iter442.getValue()); + oprot.writeBinary(_iter452.getKey()); + oprot.writeBinary(_iter452.getValue()); } oprot.writeMapEnd(); } @@ -42305,10 +43251,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, deleteAllRow_args s if (struct.isSetAttributes()) { { oprot.writeI32(struct.attributes.size()); - for (java.util.Map.Entry _iter443 : struct.attributes.entrySet()) + for (java.util.Map.Entry _iter453 : struct.attributes.entrySet()) { - oprot.writeBinary(_iter443.getKey()); - oprot.writeBinary(_iter443.getValue()); + oprot.writeBinary(_iter453.getKey()); + oprot.writeBinary(_iter453.getValue()); } } } @@ -42328,15 +43274,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, deleteAllRow_args st } if (incoming.get(2)) { { - org.apache.thrift.protocol.TMap _map444 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); - struct.attributes = new java.util.HashMap(2*_map444.size); - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key445; - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val446; - for (int _i447 = 0; _i447 < _map444.size; ++_i447) + org.apache.thrift.protocol.TMap _map454 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); + struct.attributes = new java.util.HashMap(2*_map454.size); + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key455; + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val456; + for (int _i457 = 0; _i457 < _map454.size; ++_i457) { - _key445 = iprot.readBinary(); - _val446 = iprot.readBinary(); - struct.attributes.put(_key445, _val446); + _key455 = iprot.readBinary(); + _val456 = iprot.readBinary(); + struct.attributes.put(_key455, _val456); } } struct.setAttributesIsSet(true); @@ -43795,14 +44741,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, incrementRows_args case 1: // INCREMENTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list448 = iprot.readListBegin(); - struct.increments = new java.util.ArrayList(_list448.size); - @org.apache.thrift.annotation.Nullable TIncrement _elem449; - for (int _i450 = 0; _i450 < _list448.size; ++_i450) + org.apache.thrift.protocol.TList _list458 = iprot.readListBegin(); + struct.increments = new java.util.ArrayList(_list458.size); + @org.apache.thrift.annotation.Nullable TIncrement _elem459; + for (int _i460 = 0; _i460 < _list458.size; ++_i460) { - _elem449 = new TIncrement(); - _elem449.read(iprot); - struct.increments.add(_elem449); + _elem459 = new TIncrement(); + _elem459.read(iprot); + struct.increments.add(_elem459); } iprot.readListEnd(); } @@ -43830,9 +44776,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, incrementRows_args oprot.writeFieldBegin(INCREMENTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.increments.size())); - for (TIncrement _iter451 : struct.increments) + for (TIncrement _iter461 : struct.increments) { - _iter451.write(oprot); + _iter461.write(oprot); } oprot.writeListEnd(); } @@ -43863,9 +44809,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, incrementRows_args if (struct.isSetIncrements()) { { oprot.writeI32(struct.increments.size()); - for (TIncrement _iter452 : struct.increments) + for (TIncrement _iter462 : struct.increments) { - _iter452.write(oprot); + _iter462.write(oprot); } } } @@ -43877,14 +44823,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, incrementRows_args s java.util.BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list453 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); - struct.increments = new java.util.ArrayList(_list453.size); - @org.apache.thrift.annotation.Nullable TIncrement _elem454; - for (int _i455 = 0; _i455 < _list453.size; ++_i455) + org.apache.thrift.protocol.TList _list463 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); + struct.increments = new java.util.ArrayList(_list463.size); + @org.apache.thrift.annotation.Nullable TIncrement _elem464; + for (int _i465 = 0; _i465 < _list463.size; ++_i465) { - _elem454 = new TIncrement(); - _elem454.read(iprot); - struct.increments.add(_elem454); + _elem464 = new TIncrement(); + _elem464.read(iprot); + struct.increments.add(_elem464); } } struct.setIncrementsIsSet(true); @@ -44927,15 +45873,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, deleteAllRowTs_args case 4: // ATTRIBUTES if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map456 = iprot.readMapBegin(); - struct.attributes = new java.util.HashMap(2*_map456.size); - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key457; - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val458; - for (int _i459 = 0; _i459 < _map456.size; ++_i459) + org.apache.thrift.protocol.TMap _map466 = iprot.readMapBegin(); + struct.attributes = new java.util.HashMap(2*_map466.size); + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key467; + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val468; + for (int _i469 = 0; _i469 < _map466.size; ++_i469) { - _key457 = iprot.readBinary(); - _val458 = iprot.readBinary(); - struct.attributes.put(_key457, _val458); + _key467 = iprot.readBinary(); + _val468 = iprot.readBinary(); + struct.attributes.put(_key467, _val468); } iprot.readMapEnd(); } @@ -44976,10 +45922,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, deleteAllRowTs_arg oprot.writeFieldBegin(ATTRIBUTES_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.attributes.size())); - for (java.util.Map.Entry _iter460 : struct.attributes.entrySet()) + for (java.util.Map.Entry _iter470 : struct.attributes.entrySet()) { - oprot.writeBinary(_iter460.getKey()); - oprot.writeBinary(_iter460.getValue()); + oprot.writeBinary(_iter470.getKey()); + oprot.writeBinary(_iter470.getValue()); } oprot.writeMapEnd(); } @@ -45028,10 +45974,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, deleteAllRowTs_args if (struct.isSetAttributes()) { { oprot.writeI32(struct.attributes.size()); - for (java.util.Map.Entry _iter461 : struct.attributes.entrySet()) + for (java.util.Map.Entry _iter471 : struct.attributes.entrySet()) { - oprot.writeBinary(_iter461.getKey()); - oprot.writeBinary(_iter461.getValue()); + oprot.writeBinary(_iter471.getKey()); + oprot.writeBinary(_iter471.getValue()); } } } @@ -45055,15 +46001,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, deleteAllRowTs_args } if (incoming.get(3)) { { - org.apache.thrift.protocol.TMap _map462 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); - struct.attributes = new java.util.HashMap(2*_map462.size); - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key463; - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val464; - for (int _i465 = 0; _i465 < _map462.size; ++_i465) + org.apache.thrift.protocol.TMap _map472 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); + struct.attributes = new java.util.HashMap(2*_map472.size); + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key473; + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val474; + for (int _i475 = 0; _i475 < _map472.size; ++_i475) { - _key463 = iprot.readBinary(); - _val464 = iprot.readBinary(); - struct.attributes.put(_key463, _val464); + _key473 = iprot.readBinary(); + _val474 = iprot.readBinary(); + struct.attributes.put(_key473, _val474); } } struct.setAttributesIsSet(true); @@ -45998,15 +46944,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scannerOpenWithScan case 3: // ATTRIBUTES if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map466 = iprot.readMapBegin(); - struct.attributes = new java.util.HashMap(2*_map466.size); - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key467; - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val468; - for (int _i469 = 0; _i469 < _map466.size; ++_i469) + org.apache.thrift.protocol.TMap _map476 = iprot.readMapBegin(); + struct.attributes = new java.util.HashMap(2*_map476.size); + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key477; + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val478; + for (int _i479 = 0; _i479 < _map476.size; ++_i479) { - _key467 = iprot.readBinary(); - _val468 = iprot.readBinary(); - struct.attributes.put(_key467, _val468); + _key477 = iprot.readBinary(); + _val478 = iprot.readBinary(); + struct.attributes.put(_key477, _val478); } iprot.readMapEnd(); } @@ -46044,10 +46990,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, scannerOpenWithSca oprot.writeFieldBegin(ATTRIBUTES_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.attributes.size())); - for (java.util.Map.Entry _iter470 : struct.attributes.entrySet()) + for (java.util.Map.Entry _iter480 : struct.attributes.entrySet()) { - oprot.writeBinary(_iter470.getKey()); - oprot.writeBinary(_iter470.getValue()); + oprot.writeBinary(_iter480.getKey()); + oprot.writeBinary(_iter480.getValue()); } oprot.writeMapEnd(); } @@ -46090,10 +47036,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, scannerOpenWithScan if (struct.isSetAttributes()) { { oprot.writeI32(struct.attributes.size()); - for (java.util.Map.Entry _iter471 : struct.attributes.entrySet()) + for (java.util.Map.Entry _iter481 : struct.attributes.entrySet()) { - oprot.writeBinary(_iter471.getKey()); - oprot.writeBinary(_iter471.getValue()); + oprot.writeBinary(_iter481.getKey()); + oprot.writeBinary(_iter481.getValue()); } } } @@ -46114,15 +47060,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, scannerOpenWithScan_ } if (incoming.get(2)) { { - org.apache.thrift.protocol.TMap _map472 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); - struct.attributes = new java.util.HashMap(2*_map472.size); - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key473; - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val474; - for (int _i475 = 0; _i475 < _map472.size; ++_i475) + org.apache.thrift.protocol.TMap _map482 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); + struct.attributes = new java.util.HashMap(2*_map482.size); + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key483; + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val484; + for (int _i485 = 0; _i485 < _map482.size; ++_i485) { - _key473 = iprot.readBinary(); - _val474 = iprot.readBinary(); - struct.attributes.put(_key473, _val474); + _key483 = iprot.readBinary(); + _val484 = iprot.readBinary(); + struct.attributes.put(_key483, _val484); } } struct.setAttributesIsSet(true); @@ -47295,13 +48241,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scannerOpen_args st case 3: // COLUMNS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list476 = iprot.readListBegin(); - struct.columns = new java.util.ArrayList(_list476.size); - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem477; - for (int _i478 = 0; _i478 < _list476.size; ++_i478) + org.apache.thrift.protocol.TList _list486 = iprot.readListBegin(); + struct.columns = new java.util.ArrayList(_list486.size); + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem487; + for (int _i488 = 0; _i488 < _list486.size; ++_i488) { - _elem477 = iprot.readBinary(); - struct.columns.add(_elem477); + _elem487 = iprot.readBinary(); + struct.columns.add(_elem487); } iprot.readListEnd(); } @@ -47313,15 +48259,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scannerOpen_args st case 4: // ATTRIBUTES if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map479 = iprot.readMapBegin(); - struct.attributes = new java.util.HashMap(2*_map479.size); - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key480; - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val481; - for (int _i482 = 0; _i482 < _map479.size; ++_i482) + org.apache.thrift.protocol.TMap _map489 = iprot.readMapBegin(); + struct.attributes = new java.util.HashMap(2*_map489.size); + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key490; + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val491; + for (int _i492 = 0; _i492 < _map489.size; ++_i492) { - _key480 = iprot.readBinary(); - _val481 = iprot.readBinary(); - struct.attributes.put(_key480, _val481); + _key490 = iprot.readBinary(); + _val491 = iprot.readBinary(); + struct.attributes.put(_key490, _val491); } iprot.readMapEnd(); } @@ -47359,9 +48305,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, scannerOpen_args s oprot.writeFieldBegin(COLUMNS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.columns.size())); - for (java.nio.ByteBuffer _iter483 : struct.columns) + for (java.nio.ByteBuffer _iter493 : struct.columns) { - oprot.writeBinary(_iter483); + oprot.writeBinary(_iter493); } oprot.writeListEnd(); } @@ -47371,10 +48317,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, scannerOpen_args s oprot.writeFieldBegin(ATTRIBUTES_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.attributes.size())); - for (java.util.Map.Entry _iter484 : struct.attributes.entrySet()) + for (java.util.Map.Entry _iter494 : struct.attributes.entrySet()) { - oprot.writeBinary(_iter484.getKey()); - oprot.writeBinary(_iter484.getValue()); + oprot.writeBinary(_iter494.getKey()); + oprot.writeBinary(_iter494.getValue()); } oprot.writeMapEnd(); } @@ -47420,19 +48366,19 @@ public void write(org.apache.thrift.protocol.TProtocol prot, scannerOpen_args st if (struct.isSetColumns()) { { oprot.writeI32(struct.columns.size()); - for (java.nio.ByteBuffer _iter485 : struct.columns) + for (java.nio.ByteBuffer _iter495 : struct.columns) { - oprot.writeBinary(_iter485); + oprot.writeBinary(_iter495); } } } if (struct.isSetAttributes()) { { oprot.writeI32(struct.attributes.size()); - for (java.util.Map.Entry _iter486 : struct.attributes.entrySet()) + for (java.util.Map.Entry _iter496 : struct.attributes.entrySet()) { - oprot.writeBinary(_iter486.getKey()); - oprot.writeBinary(_iter486.getValue()); + oprot.writeBinary(_iter496.getKey()); + oprot.writeBinary(_iter496.getValue()); } } } @@ -47452,28 +48398,28 @@ public void read(org.apache.thrift.protocol.TProtocol prot, scannerOpen_args str } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list487 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); - struct.columns = new java.util.ArrayList(_list487.size); - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem488; - for (int _i489 = 0; _i489 < _list487.size; ++_i489) + org.apache.thrift.protocol.TList _list497 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); + struct.columns = new java.util.ArrayList(_list497.size); + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem498; + for (int _i499 = 0; _i499 < _list497.size; ++_i499) { - _elem488 = iprot.readBinary(); - struct.columns.add(_elem488); + _elem498 = iprot.readBinary(); + struct.columns.add(_elem498); } } struct.setColumnsIsSet(true); } if (incoming.get(3)) { { - org.apache.thrift.protocol.TMap _map490 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); - struct.attributes = new java.util.HashMap(2*_map490.size); - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key491; - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val492; - for (int _i493 = 0; _i493 < _map490.size; ++_i493) + org.apache.thrift.protocol.TMap _map500 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); + struct.attributes = new java.util.HashMap(2*_map500.size); + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key501; + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val502; + for (int _i503 = 0; _i503 < _map500.size; ++_i503) { - _key491 = iprot.readBinary(); - _val492 = iprot.readBinary(); - struct.attributes.put(_key491, _val492); + _key501 = iprot.readBinary(); + _val502 = iprot.readBinary(); + struct.attributes.put(_key501, _val502); } } struct.setAttributesIsSet(true); @@ -48765,13 +49711,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scannerOpenWithStop case 4: // COLUMNS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list494 = iprot.readListBegin(); - struct.columns = new java.util.ArrayList(_list494.size); - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem495; - for (int _i496 = 0; _i496 < _list494.size; ++_i496) + org.apache.thrift.protocol.TList _list504 = iprot.readListBegin(); + struct.columns = new java.util.ArrayList(_list504.size); + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem505; + for (int _i506 = 0; _i506 < _list504.size; ++_i506) { - _elem495 = iprot.readBinary(); - struct.columns.add(_elem495); + _elem505 = iprot.readBinary(); + struct.columns.add(_elem505); } iprot.readListEnd(); } @@ -48783,15 +49729,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scannerOpenWithStop case 5: // ATTRIBUTES if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map497 = iprot.readMapBegin(); - struct.attributes = new java.util.HashMap(2*_map497.size); - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key498; - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val499; - for (int _i500 = 0; _i500 < _map497.size; ++_i500) + org.apache.thrift.protocol.TMap _map507 = iprot.readMapBegin(); + struct.attributes = new java.util.HashMap(2*_map507.size); + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key508; + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val509; + for (int _i510 = 0; _i510 < _map507.size; ++_i510) { - _key498 = iprot.readBinary(); - _val499 = iprot.readBinary(); - struct.attributes.put(_key498, _val499); + _key508 = iprot.readBinary(); + _val509 = iprot.readBinary(); + struct.attributes.put(_key508, _val509); } iprot.readMapEnd(); } @@ -48834,9 +49780,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, scannerOpenWithSto oprot.writeFieldBegin(COLUMNS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.columns.size())); - for (java.nio.ByteBuffer _iter501 : struct.columns) + for (java.nio.ByteBuffer _iter511 : struct.columns) { - oprot.writeBinary(_iter501); + oprot.writeBinary(_iter511); } oprot.writeListEnd(); } @@ -48846,10 +49792,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, scannerOpenWithSto oprot.writeFieldBegin(ATTRIBUTES_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.attributes.size())); - for (java.util.Map.Entry _iter502 : struct.attributes.entrySet()) + for (java.util.Map.Entry _iter512 : struct.attributes.entrySet()) { - oprot.writeBinary(_iter502.getKey()); - oprot.writeBinary(_iter502.getValue()); + oprot.writeBinary(_iter512.getKey()); + oprot.writeBinary(_iter512.getValue()); } oprot.writeMapEnd(); } @@ -48901,19 +49847,19 @@ public void write(org.apache.thrift.protocol.TProtocol prot, scannerOpenWithStop if (struct.isSetColumns()) { { oprot.writeI32(struct.columns.size()); - for (java.nio.ByteBuffer _iter503 : struct.columns) + for (java.nio.ByteBuffer _iter513 : struct.columns) { - oprot.writeBinary(_iter503); + oprot.writeBinary(_iter513); } } } if (struct.isSetAttributes()) { { oprot.writeI32(struct.attributes.size()); - for (java.util.Map.Entry _iter504 : struct.attributes.entrySet()) + for (java.util.Map.Entry _iter514 : struct.attributes.entrySet()) { - oprot.writeBinary(_iter504.getKey()); - oprot.writeBinary(_iter504.getValue()); + oprot.writeBinary(_iter514.getKey()); + oprot.writeBinary(_iter514.getValue()); } } } @@ -48937,28 +49883,28 @@ public void read(org.apache.thrift.protocol.TProtocol prot, scannerOpenWithStop_ } if (incoming.get(3)) { { - org.apache.thrift.protocol.TList _list505 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); - struct.columns = new java.util.ArrayList(_list505.size); - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem506; - for (int _i507 = 0; _i507 < _list505.size; ++_i507) + org.apache.thrift.protocol.TList _list515 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); + struct.columns = new java.util.ArrayList(_list515.size); + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem516; + for (int _i517 = 0; _i517 < _list515.size; ++_i517) { - _elem506 = iprot.readBinary(); - struct.columns.add(_elem506); + _elem516 = iprot.readBinary(); + struct.columns.add(_elem516); } } struct.setColumnsIsSet(true); } if (incoming.get(4)) { { - org.apache.thrift.protocol.TMap _map508 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); - struct.attributes = new java.util.HashMap(2*_map508.size); - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key509; - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val510; - for (int _i511 = 0; _i511 < _map508.size; ++_i511) + org.apache.thrift.protocol.TMap _map518 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); + struct.attributes = new java.util.HashMap(2*_map518.size); + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key519; + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val520; + for (int _i521 = 0; _i521 < _map518.size; ++_i521) { - _key509 = iprot.readBinary(); - _val510 = iprot.readBinary(); - struct.attributes.put(_key509, _val510); + _key519 = iprot.readBinary(); + _val520 = iprot.readBinary(); + struct.attributes.put(_key519, _val520); } } struct.setAttributesIsSet(true); @@ -50119,13 +51065,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scannerOpenWithPref case 3: // COLUMNS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list512 = iprot.readListBegin(); - struct.columns = new java.util.ArrayList(_list512.size); - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem513; - for (int _i514 = 0; _i514 < _list512.size; ++_i514) + org.apache.thrift.protocol.TList _list522 = iprot.readListBegin(); + struct.columns = new java.util.ArrayList(_list522.size); + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem523; + for (int _i524 = 0; _i524 < _list522.size; ++_i524) { - _elem513 = iprot.readBinary(); - struct.columns.add(_elem513); + _elem523 = iprot.readBinary(); + struct.columns.add(_elem523); } iprot.readListEnd(); } @@ -50137,15 +51083,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scannerOpenWithPref case 4: // ATTRIBUTES if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map515 = iprot.readMapBegin(); - struct.attributes = new java.util.HashMap(2*_map515.size); - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key516; - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val517; - for (int _i518 = 0; _i518 < _map515.size; ++_i518) + org.apache.thrift.protocol.TMap _map525 = iprot.readMapBegin(); + struct.attributes = new java.util.HashMap(2*_map525.size); + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key526; + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val527; + for (int _i528 = 0; _i528 < _map525.size; ++_i528) { - _key516 = iprot.readBinary(); - _val517 = iprot.readBinary(); - struct.attributes.put(_key516, _val517); + _key526 = iprot.readBinary(); + _val527 = iprot.readBinary(); + struct.attributes.put(_key526, _val527); } iprot.readMapEnd(); } @@ -50183,9 +51129,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, scannerOpenWithPre oprot.writeFieldBegin(COLUMNS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.columns.size())); - for (java.nio.ByteBuffer _iter519 : struct.columns) + for (java.nio.ByteBuffer _iter529 : struct.columns) { - oprot.writeBinary(_iter519); + oprot.writeBinary(_iter529); } oprot.writeListEnd(); } @@ -50195,10 +51141,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, scannerOpenWithPre oprot.writeFieldBegin(ATTRIBUTES_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.attributes.size())); - for (java.util.Map.Entry _iter520 : struct.attributes.entrySet()) + for (java.util.Map.Entry _iter530 : struct.attributes.entrySet()) { - oprot.writeBinary(_iter520.getKey()); - oprot.writeBinary(_iter520.getValue()); + oprot.writeBinary(_iter530.getKey()); + oprot.writeBinary(_iter530.getValue()); } oprot.writeMapEnd(); } @@ -50244,19 +51190,19 @@ public void write(org.apache.thrift.protocol.TProtocol prot, scannerOpenWithPref if (struct.isSetColumns()) { { oprot.writeI32(struct.columns.size()); - for (java.nio.ByteBuffer _iter521 : struct.columns) + for (java.nio.ByteBuffer _iter531 : struct.columns) { - oprot.writeBinary(_iter521); + oprot.writeBinary(_iter531); } } } if (struct.isSetAttributes()) { { oprot.writeI32(struct.attributes.size()); - for (java.util.Map.Entry _iter522 : struct.attributes.entrySet()) + for (java.util.Map.Entry _iter532 : struct.attributes.entrySet()) { - oprot.writeBinary(_iter522.getKey()); - oprot.writeBinary(_iter522.getValue()); + oprot.writeBinary(_iter532.getKey()); + oprot.writeBinary(_iter532.getValue()); } } } @@ -50276,28 +51222,28 @@ public void read(org.apache.thrift.protocol.TProtocol prot, scannerOpenWithPrefi } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list523 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); - struct.columns = new java.util.ArrayList(_list523.size); - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem524; - for (int _i525 = 0; _i525 < _list523.size; ++_i525) + org.apache.thrift.protocol.TList _list533 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); + struct.columns = new java.util.ArrayList(_list533.size); + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem534; + for (int _i535 = 0; _i535 < _list533.size; ++_i535) { - _elem524 = iprot.readBinary(); - struct.columns.add(_elem524); + _elem534 = iprot.readBinary(); + struct.columns.add(_elem534); } } struct.setColumnsIsSet(true); } if (incoming.get(3)) { { - org.apache.thrift.protocol.TMap _map526 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); - struct.attributes = new java.util.HashMap(2*_map526.size); - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key527; - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val528; - for (int _i529 = 0; _i529 < _map526.size; ++_i529) + org.apache.thrift.protocol.TMap _map536 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); + struct.attributes = new java.util.HashMap(2*_map536.size); + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key537; + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val538; + for (int _i539 = 0; _i539 < _map536.size; ++_i539) { - _key527 = iprot.readBinary(); - _val528 = iprot.readBinary(); - struct.attributes.put(_key527, _val528); + _key537 = iprot.readBinary(); + _val538 = iprot.readBinary(); + struct.attributes.put(_key537, _val538); } } struct.setAttributesIsSet(true); @@ -51561,13 +52507,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scannerOpenTs_args case 3: // COLUMNS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list530 = iprot.readListBegin(); - struct.columns = new java.util.ArrayList(_list530.size); - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem531; - for (int _i532 = 0; _i532 < _list530.size; ++_i532) + org.apache.thrift.protocol.TList _list540 = iprot.readListBegin(); + struct.columns = new java.util.ArrayList(_list540.size); + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem541; + for (int _i542 = 0; _i542 < _list540.size; ++_i542) { - _elem531 = iprot.readBinary(); - struct.columns.add(_elem531); + _elem541 = iprot.readBinary(); + struct.columns.add(_elem541); } iprot.readListEnd(); } @@ -51587,15 +52533,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scannerOpenTs_args case 5: // ATTRIBUTES if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map533 = iprot.readMapBegin(); - struct.attributes = new java.util.HashMap(2*_map533.size); - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key534; - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val535; - for (int _i536 = 0; _i536 < _map533.size; ++_i536) + org.apache.thrift.protocol.TMap _map543 = iprot.readMapBegin(); + struct.attributes = new java.util.HashMap(2*_map543.size); + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key544; + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val545; + for (int _i546 = 0; _i546 < _map543.size; ++_i546) { - _key534 = iprot.readBinary(); - _val535 = iprot.readBinary(); - struct.attributes.put(_key534, _val535); + _key544 = iprot.readBinary(); + _val545 = iprot.readBinary(); + struct.attributes.put(_key544, _val545); } iprot.readMapEnd(); } @@ -51633,9 +52579,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, scannerOpenTs_args oprot.writeFieldBegin(COLUMNS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.columns.size())); - for (java.nio.ByteBuffer _iter537 : struct.columns) + for (java.nio.ByteBuffer _iter547 : struct.columns) { - oprot.writeBinary(_iter537); + oprot.writeBinary(_iter547); } oprot.writeListEnd(); } @@ -51648,10 +52594,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, scannerOpenTs_args oprot.writeFieldBegin(ATTRIBUTES_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.attributes.size())); - for (java.util.Map.Entry _iter538 : struct.attributes.entrySet()) + for (java.util.Map.Entry _iter548 : struct.attributes.entrySet()) { - oprot.writeBinary(_iter538.getKey()); - oprot.writeBinary(_iter538.getValue()); + oprot.writeBinary(_iter548.getKey()); + oprot.writeBinary(_iter548.getValue()); } oprot.writeMapEnd(); } @@ -51700,9 +52646,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, scannerOpenTs_args if (struct.isSetColumns()) { { oprot.writeI32(struct.columns.size()); - for (java.nio.ByteBuffer _iter539 : struct.columns) + for (java.nio.ByteBuffer _iter549 : struct.columns) { - oprot.writeBinary(_iter539); + oprot.writeBinary(_iter549); } } } @@ -51712,10 +52658,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, scannerOpenTs_args if (struct.isSetAttributes()) { { oprot.writeI32(struct.attributes.size()); - for (java.util.Map.Entry _iter540 : struct.attributes.entrySet()) + for (java.util.Map.Entry _iter550 : struct.attributes.entrySet()) { - oprot.writeBinary(_iter540.getKey()); - oprot.writeBinary(_iter540.getValue()); + oprot.writeBinary(_iter550.getKey()); + oprot.writeBinary(_iter550.getValue()); } } } @@ -51735,13 +52681,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, scannerOpenTs_args s } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list541 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); - struct.columns = new java.util.ArrayList(_list541.size); - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem542; - for (int _i543 = 0; _i543 < _list541.size; ++_i543) + org.apache.thrift.protocol.TList _list551 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); + struct.columns = new java.util.ArrayList(_list551.size); + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem552; + for (int _i553 = 0; _i553 < _list551.size; ++_i553) { - _elem542 = iprot.readBinary(); - struct.columns.add(_elem542); + _elem552 = iprot.readBinary(); + struct.columns.add(_elem552); } } struct.setColumnsIsSet(true); @@ -51752,15 +52698,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, scannerOpenTs_args s } if (incoming.get(4)) { { - org.apache.thrift.protocol.TMap _map544 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); - struct.attributes = new java.util.HashMap(2*_map544.size); - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key545; - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val546; - for (int _i547 = 0; _i547 < _map544.size; ++_i547) + org.apache.thrift.protocol.TMap _map554 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); + struct.attributes = new java.util.HashMap(2*_map554.size); + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key555; + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val556; + for (int _i557 = 0; _i557 < _map554.size; ++_i557) { - _key545 = iprot.readBinary(); - _val546 = iprot.readBinary(); - struct.attributes.put(_key545, _val546); + _key555 = iprot.readBinary(); + _val556 = iprot.readBinary(); + struct.attributes.put(_key555, _val556); } } struct.setAttributesIsSet(true); @@ -53143,13 +54089,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scannerOpenWithStop case 4: // COLUMNS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list548 = iprot.readListBegin(); - struct.columns = new java.util.ArrayList(_list548.size); - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem549; - for (int _i550 = 0; _i550 < _list548.size; ++_i550) + org.apache.thrift.protocol.TList _list558 = iprot.readListBegin(); + struct.columns = new java.util.ArrayList(_list558.size); + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem559; + for (int _i560 = 0; _i560 < _list558.size; ++_i560) { - _elem549 = iprot.readBinary(); - struct.columns.add(_elem549); + _elem559 = iprot.readBinary(); + struct.columns.add(_elem559); } iprot.readListEnd(); } @@ -53169,15 +54115,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scannerOpenWithStop case 6: // ATTRIBUTES if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map551 = iprot.readMapBegin(); - struct.attributes = new java.util.HashMap(2*_map551.size); - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key552; - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val553; - for (int _i554 = 0; _i554 < _map551.size; ++_i554) + org.apache.thrift.protocol.TMap _map561 = iprot.readMapBegin(); + struct.attributes = new java.util.HashMap(2*_map561.size); + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key562; + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val563; + for (int _i564 = 0; _i564 < _map561.size; ++_i564) { - _key552 = iprot.readBinary(); - _val553 = iprot.readBinary(); - struct.attributes.put(_key552, _val553); + _key562 = iprot.readBinary(); + _val563 = iprot.readBinary(); + struct.attributes.put(_key562, _val563); } iprot.readMapEnd(); } @@ -53220,9 +54166,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, scannerOpenWithSto oprot.writeFieldBegin(COLUMNS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.columns.size())); - for (java.nio.ByteBuffer _iter555 : struct.columns) + for (java.nio.ByteBuffer _iter565 : struct.columns) { - oprot.writeBinary(_iter555); + oprot.writeBinary(_iter565); } oprot.writeListEnd(); } @@ -53235,10 +54181,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, scannerOpenWithSto oprot.writeFieldBegin(ATTRIBUTES_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.attributes.size())); - for (java.util.Map.Entry _iter556 : struct.attributes.entrySet()) + for (java.util.Map.Entry _iter566 : struct.attributes.entrySet()) { - oprot.writeBinary(_iter556.getKey()); - oprot.writeBinary(_iter556.getValue()); + oprot.writeBinary(_iter566.getKey()); + oprot.writeBinary(_iter566.getValue()); } oprot.writeMapEnd(); } @@ -53293,9 +54239,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, scannerOpenWithStop if (struct.isSetColumns()) { { oprot.writeI32(struct.columns.size()); - for (java.nio.ByteBuffer _iter557 : struct.columns) + for (java.nio.ByteBuffer _iter567 : struct.columns) { - oprot.writeBinary(_iter557); + oprot.writeBinary(_iter567); } } } @@ -53305,10 +54251,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, scannerOpenWithStop if (struct.isSetAttributes()) { { oprot.writeI32(struct.attributes.size()); - for (java.util.Map.Entry _iter558 : struct.attributes.entrySet()) + for (java.util.Map.Entry _iter568 : struct.attributes.entrySet()) { - oprot.writeBinary(_iter558.getKey()); - oprot.writeBinary(_iter558.getValue()); + oprot.writeBinary(_iter568.getKey()); + oprot.writeBinary(_iter568.getValue()); } } } @@ -53332,13 +54278,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, scannerOpenWithStopT } if (incoming.get(3)) { { - org.apache.thrift.protocol.TList _list559 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); - struct.columns = new java.util.ArrayList(_list559.size); - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem560; - for (int _i561 = 0; _i561 < _list559.size; ++_i561) + org.apache.thrift.protocol.TList _list569 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); + struct.columns = new java.util.ArrayList(_list569.size); + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem570; + for (int _i571 = 0; _i571 < _list569.size; ++_i571) { - _elem560 = iprot.readBinary(); - struct.columns.add(_elem560); + _elem570 = iprot.readBinary(); + struct.columns.add(_elem570); } } struct.setColumnsIsSet(true); @@ -53349,15 +54295,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, scannerOpenWithStopT } if (incoming.get(5)) { { - org.apache.thrift.protocol.TMap _map562 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); - struct.attributes = new java.util.HashMap(2*_map562.size); - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key563; - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val564; - for (int _i565 = 0; _i565 < _map562.size; ++_i565) + org.apache.thrift.protocol.TMap _map572 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); + struct.attributes = new java.util.HashMap(2*_map572.size); + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key573; + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val574; + for (int _i575 = 0; _i575 < _map572.size; ++_i575) { - _key563 = iprot.readBinary(); - _val564 = iprot.readBinary(); - struct.attributes.put(_key563, _val564); + _key573 = iprot.readBinary(); + _val574 = iprot.readBinary(); + struct.attributes.put(_key573, _val574); } } struct.setAttributesIsSet(true); @@ -54693,14 +55639,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scannerGet_result s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list566 = iprot.readListBegin(); - struct.success = new java.util.ArrayList(_list566.size); - @org.apache.thrift.annotation.Nullable TRowResult _elem567; - for (int _i568 = 0; _i568 < _list566.size; ++_i568) + org.apache.thrift.protocol.TList _list576 = iprot.readListBegin(); + struct.success = new java.util.ArrayList(_list576.size); + @org.apache.thrift.annotation.Nullable TRowResult _elem577; + for (int _i578 = 0; _i578 < _list576.size; ++_i578) { - _elem567 = new TRowResult(); - _elem567.read(iprot); - struct.success.add(_elem567); + _elem577 = new TRowResult(); + _elem577.read(iprot); + struct.success.add(_elem577); } iprot.readListEnd(); } @@ -54746,9 +55692,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, scannerGet_result oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (TRowResult _iter569 : struct.success) + for (TRowResult _iter579 : struct.success) { - _iter569.write(oprot); + _iter579.write(oprot); } oprot.writeListEnd(); } @@ -54795,9 +55741,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, scannerGet_result s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (TRowResult _iter570 : struct.success) + for (TRowResult _iter580 : struct.success) { - _iter570.write(oprot); + _iter580.write(oprot); } } } @@ -54815,14 +55761,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, scannerGet_result st java.util.BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list571 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); - struct.success = new java.util.ArrayList(_list571.size); - @org.apache.thrift.annotation.Nullable TRowResult _elem572; - for (int _i573 = 0; _i573 < _list571.size; ++_i573) + org.apache.thrift.protocol.TList _list581 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); + struct.success = new java.util.ArrayList(_list581.size); + @org.apache.thrift.annotation.Nullable TRowResult _elem582; + for (int _i583 = 0; _i583 < _list581.size; ++_i583) { - _elem572 = new TRowResult(); - _elem572.read(iprot); - struct.success.add(_elem572); + _elem582 = new TRowResult(); + _elem582.read(iprot); + struct.success.add(_elem582); } } struct.setSuccessIsSet(true); @@ -55807,14 +56753,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scannerGetList_resu case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list574 = iprot.readListBegin(); - struct.success = new java.util.ArrayList(_list574.size); - @org.apache.thrift.annotation.Nullable TRowResult _elem575; - for (int _i576 = 0; _i576 < _list574.size; ++_i576) + org.apache.thrift.protocol.TList _list584 = iprot.readListBegin(); + struct.success = new java.util.ArrayList(_list584.size); + @org.apache.thrift.annotation.Nullable TRowResult _elem585; + for (int _i586 = 0; _i586 < _list584.size; ++_i586) { - _elem575 = new TRowResult(); - _elem575.read(iprot); - struct.success.add(_elem575); + _elem585 = new TRowResult(); + _elem585.read(iprot); + struct.success.add(_elem585); } iprot.readListEnd(); } @@ -55860,9 +56806,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, scannerGetList_res oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (TRowResult _iter577 : struct.success) + for (TRowResult _iter587 : struct.success) { - _iter577.write(oprot); + _iter587.write(oprot); } oprot.writeListEnd(); } @@ -55909,9 +56855,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, scannerGetList_resu if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (TRowResult _iter578 : struct.success) + for (TRowResult _iter588 : struct.success) { - _iter578.write(oprot); + _iter588.write(oprot); } } } @@ -55929,14 +56875,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, scannerGetList_resul java.util.BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list579 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); - struct.success = new java.util.ArrayList(_list579.size); - @org.apache.thrift.annotation.Nullable TRowResult _elem580; - for (int _i581 = 0; _i581 < _list579.size; ++_i581) + org.apache.thrift.protocol.TList _list589 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); + struct.success = new java.util.ArrayList(_list589.size); + @org.apache.thrift.annotation.Nullable TRowResult _elem590; + for (int _i591 = 0; _i591 < _list589.size; ++_i591) { - _elem580 = new TRowResult(); - _elem580.read(iprot); - struct.success.add(_elem580); + _elem590 = new TRowResult(); + _elem590.read(iprot); + struct.success.add(_elem590); } } struct.setSuccessIsSet(true); @@ -58454,14 +59400,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, append_result struc case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list582 = iprot.readListBegin(); - struct.success = new java.util.ArrayList(_list582.size); - @org.apache.thrift.annotation.Nullable TCell _elem583; - for (int _i584 = 0; _i584 < _list582.size; ++_i584) + org.apache.thrift.protocol.TList _list592 = iprot.readListBegin(); + struct.success = new java.util.ArrayList(_list592.size); + @org.apache.thrift.annotation.Nullable TCell _elem593; + for (int _i594 = 0; _i594 < _list592.size; ++_i594) { - _elem583 = new TCell(); - _elem583.read(iprot); - struct.success.add(_elem583); + _elem593 = new TCell(); + _elem593.read(iprot); + struct.success.add(_elem593); } iprot.readListEnd(); } @@ -58498,9 +59444,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, append_result stru oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (TCell _iter585 : struct.success) + for (TCell _iter595 : struct.success) { - _iter585.write(oprot); + _iter595.write(oprot); } oprot.writeListEnd(); } @@ -58539,9 +59485,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, append_result struc if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (TCell _iter586 : struct.success) + for (TCell _iter596 : struct.success) { - _iter586.write(oprot); + _iter596.write(oprot); } } } @@ -58556,14 +59502,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, append_result struct java.util.BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list587 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); - struct.success = new java.util.ArrayList(_list587.size); - @org.apache.thrift.annotation.Nullable TCell _elem588; - for (int _i589 = 0; _i589 < _list587.size; ++_i589) + org.apache.thrift.protocol.TList _list597 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); + struct.success = new java.util.ArrayList(_list597.size); + @org.apache.thrift.annotation.Nullable TCell _elem598; + for (int _i599 = 0; _i599 < _list597.size; ++_i599) { - _elem588 = new TCell(); - _elem588.read(iprot); - struct.success.add(_elem588); + _elem598 = new TCell(); + _elem598.read(iprot); + struct.success.add(_elem598); } } struct.setSuccessIsSet(true); @@ -59489,15 +60435,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, checkAndPut_args st case 7: // ATTRIBUTES if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map590 = iprot.readMapBegin(); - struct.attributes = new java.util.HashMap(2*_map590.size); - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key591; - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val592; - for (int _i593 = 0; _i593 < _map590.size; ++_i593) + org.apache.thrift.protocol.TMap _map600 = iprot.readMapBegin(); + struct.attributes = new java.util.HashMap(2*_map600.size); + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key601; + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val602; + for (int _i603 = 0; _i603 < _map600.size; ++_i603) { - _key591 = iprot.readBinary(); - _val592 = iprot.readBinary(); - struct.attributes.put(_key591, _val592); + _key601 = iprot.readBinary(); + _val602 = iprot.readBinary(); + struct.attributes.put(_key601, _val602); } iprot.readMapEnd(); } @@ -59550,10 +60496,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, checkAndPut_args s oprot.writeFieldBegin(ATTRIBUTES_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.attributes.size())); - for (java.util.Map.Entry _iter594 : struct.attributes.entrySet()) + for (java.util.Map.Entry _iter604 : struct.attributes.entrySet()) { - oprot.writeBinary(_iter594.getKey()); - oprot.writeBinary(_iter594.getValue()); + oprot.writeBinary(_iter604.getKey()); + oprot.writeBinary(_iter604.getValue()); } oprot.writeMapEnd(); } @@ -59614,10 +60560,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, checkAndPut_args st if (struct.isSetAttributes()) { { oprot.writeI32(struct.attributes.size()); - for (java.util.Map.Entry _iter595 : struct.attributes.entrySet()) + for (java.util.Map.Entry _iter605 : struct.attributes.entrySet()) { - oprot.writeBinary(_iter595.getKey()); - oprot.writeBinary(_iter595.getValue()); + oprot.writeBinary(_iter605.getKey()); + oprot.writeBinary(_iter605.getValue()); } } } @@ -59650,15 +60596,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, checkAndPut_args str } if (incoming.get(5)) { { - org.apache.thrift.protocol.TMap _map596 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); - struct.attributes = new java.util.HashMap(2*_map596.size); - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key597; - @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val598; - for (int _i599 = 0; _i599 < _map596.size; ++_i599) + org.apache.thrift.protocol.TMap _map606 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); + struct.attributes = new java.util.HashMap(2*_map606.size); + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key607; + @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val608; + for (int _i609 = 0; _i609 < _map606.size; ++_i609) { - _key597 = iprot.readBinary(); - _val598 = iprot.readBinary(); - struct.attributes.put(_key597, _val598); + _key607 = iprot.readBinary(); + _val608 = iprot.readBinary(); + struct.attributes.put(_key607, _val608); } } struct.setAttributesIsSet(true); diff --git a/hbase-thrift/src/main/resources/org/apache/hadoop/hbase/thrift/Hbase.thrift b/hbase-thrift/src/main/resources/org/apache/hadoop/hbase/thrift/Hbase.thrift index cfadcb29dcf9..8b204b3bbd7e 100644 --- a/hbase-thrift/src/main/resources/org/apache/hadoop/hbase/thrift/Hbase.thrift +++ b/hbase-thrift/src/main/resources/org/apache/hadoop/hbase/thrift/Hbase.thrift @@ -261,6 +261,14 @@ service Hbase { list getTableNames() throws (1:IOError io) + /** + * List all the userspace tables and their enabled or disabled flags. + * + * @return list of tables with is enabled flags + */ + map getTableNamesWithIsTableEnabled() + throws (1:IOError io) + /** * List all the column families assoicated with a table. * diff --git a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftServer.java b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftServer.java index a6049af3e6b4..3063b068a34e 100644 --- a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftServer.java +++ b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftServer.java @@ -32,6 +32,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Collectors; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CompatibilityFactory; @@ -725,6 +726,35 @@ public static void doTestCheckAndPut() throws Exception { } } + @Test + public void testGetTableNamesWithStatus() throws Exception{ + ThriftHBaseServiceHandler handler = + new ThriftHBaseServiceHandler(UTIL.getConfiguration(), + UserProvider.instantiate(UTIL.getConfiguration())); + + createTestTables(handler); + + assertEquals(2, handler.getTableNamesWithIsTableEnabled().size()); + assertEquals(2, countTablesByStatus(true, handler)); + handler.disableTable(tableBname); + assertEquals(1, countTablesByStatus(true, handler)); + assertEquals(1, countTablesByStatus(false, handler)); + assertEquals(2, handler.getTableNamesWithIsTableEnabled().size()); + handler.enableTable(tableBname); + assertEquals(2, countTablesByStatus(true, handler)); + + dropTestTables(handler); + } + + private static int countTablesByStatus(Boolean isEnabled, Hbase.Iface handler) throws Exception { + AtomicInteger counter = new AtomicInteger(0); + handler.getTableNamesWithIsTableEnabled().forEach( + (table, tableStatus) -> { + if (tableStatus.equals(isEnabled)) counter.getAndIncrement(); + }); + return counter.get(); + } + @Test public void testMetricsWithException() throws Exception { String rowkey = "row1"; @@ -732,50 +762,49 @@ public void testMetricsWithException() throws Exception { String col = "c"; // create a table which will throw exceptions for requests final TableName tableName = TableName.valueOf(name.getMethodName()); - - ColumnFamilyDescriptor columnFamilyDescriptor = ColumnFamilyDescriptorBuilder - .newBuilder(Bytes.toBytes(family)) - .build(); - TableDescriptor tableDescriptor = - TableDescriptorBuilder.newBuilder(tableName) + try { + ColumnFamilyDescriptor columnFamilyDescriptor = + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(family)).build(); + TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tableName) .setCoprocessor(ErrorThrowingGetObserver.class.getName()) - .setColumnFamily(columnFamilyDescriptor) - .build(); + .setColumnFamily(columnFamilyDescriptor).build(); - Table table = UTIL.createTable(tableDescriptor, null); - long now = EnvironmentEdgeManager.currentTime(); - table.put(new Put(Bytes.toBytes(rowkey)) + Table table = UTIL.createTable(tableDescriptor, null); + long now = EnvironmentEdgeManager.currentTime(); + table.put(new Put(Bytes.toBytes(rowkey)) .addColumn(Bytes.toBytes(family), Bytes.toBytes(col), now, Bytes.toBytes("val1"))); - Configuration conf = UTIL.getConfiguration(); - ThriftMetrics metrics = getMetrics(conf); - ThriftHBaseServiceHandler hbaseHandler = - new ThriftHBaseServiceHandler(UTIL.getConfiguration(), - UserProvider.instantiate(UTIL.getConfiguration())); - Hbase.Iface handler = HbaseHandlerMetricsProxy.newInstance(hbaseHandler, metrics, conf); + Configuration conf = UTIL.getConfiguration(); + ThriftMetrics metrics = getMetrics(conf); + ThriftHBaseServiceHandler hbaseHandler = + new ThriftHBaseServiceHandler(UTIL.getConfiguration(), UserProvider.instantiate(UTIL.getConfiguration())); + Hbase.Iface handler = HbaseHandlerMetricsProxy.newInstance(hbaseHandler, metrics, conf); - ByteBuffer tTableName = asByteBuffer(tableName.getNameAsString()); + ByteBuffer tTableName = asByteBuffer(tableName.getNameAsString()); - // check metrics increment with a successful get - long preGetCounter = metricsHelper.checkCounterExists("getRow_num_ops", metrics.getSource()) ? + // check metrics increment with a successful get + long preGetCounter = metricsHelper.checkCounterExists("getRow_num_ops", metrics.getSource()) ? metricsHelper.getCounter("getRow_num_ops", metrics.getSource()) : 0; - List tRowResult = handler.getRow(tTableName, asByteBuffer(rowkey), null); - assertEquals(1, tRowResult.size()); - TRowResult tResult = tRowResult.get(0); + List tRowResult = handler.getRow(tTableName, asByteBuffer(rowkey), null); + assertEquals(1, tRowResult.size()); + TRowResult tResult = tRowResult.get(0); - TCell expectedColumnValue = new TCell(asByteBuffer("val1"), now); + TCell expectedColumnValue = new TCell(asByteBuffer("val1"), now); - assertArrayEquals(Bytes.toBytes(rowkey), tResult.getRow()); - Collection returnedColumnValues = tResult.getColumns().values(); - assertEquals(1, returnedColumnValues.size()); - assertEquals(expectedColumnValue, returnedColumnValues.iterator().next()); + assertArrayEquals(Bytes.toBytes(rowkey), tResult.getRow()); + Collection returnedColumnValues = tResult.getColumns().values(); + assertEquals(1, returnedColumnValues.size()); + assertEquals(expectedColumnValue, returnedColumnValues.iterator().next()); - metricsHelper.assertCounter("getRow_num_ops", preGetCounter + 1, metrics.getSource()); + metricsHelper.assertCounter("getRow_num_ops", preGetCounter + 1, metrics.getSource()); - // check metrics increment when the get throws each exception type - for (ErrorThrowingGetObserver.ErrorType type : ErrorThrowingGetObserver.ErrorType.values()) { - testExceptionType(handler, metrics, tTableName, rowkey, type); + // check metrics increment when the get throws each exception type + for (ErrorThrowingGetObserver.ErrorType type : ErrorThrowingGetObserver.ErrorType.values()) { + testExceptionType(handler, metrics, tTableName, rowkey, type); + } + } finally { + UTIL.deleteTable(tableName); } } diff --git a/pom.xml b/pom.xml index 6768756b6ea0..e2caf6ff6cc9 100755 --- a/pom.xml +++ b/pom.xml @@ -715,6 +715,12 @@ sunxin@apache.org +8 + + huangzhuoyue + Zhuoyue Huang + huangzhuoyue@apache.org + +8 + diff --git a/src/main/asciidoc/_chapters/community.adoc b/src/main/asciidoc/_chapters/community.adoc index e8a7ef3866d0..6ad0083e0f46 100644 --- a/src/main/asciidoc/_chapters/community.adoc +++ b/src/main/asciidoc/_chapters/community.adoc @@ -87,8 +87,8 @@ NOTE: End-of-life releases are not included in this list. | Release | Release Manager -| 1.4 -| Andrew Purtell +| 1.7 +| Reid Chan | 2.3 | Nick Dimiduk diff --git a/src/main/asciidoc/_chapters/configuration.adoc b/src/main/asciidoc/_chapters/configuration.adoc index 71b2ba3a4dff..ecfff5d9452b 100644 --- a/src/main/asciidoc/_chapters/configuration.adoc +++ b/src/main/asciidoc/_chapters/configuration.adoc @@ -289,20 +289,14 @@ Use the following legend to interpret this table: link:https://hadoop.apache.org/cve_list.html[CVEs] so we drop the support in newer minor releases * icon:exclamation-circle[role="yellow"] = Not tested, may/may-not function -[cols="1,4*^.^", options="header"] +[cols="1,2*^.^", options="header"] |=== -| | HBase-1.4.x | HBase-1.6.x | HBase-1.7.x | HBase-2.3.x -|Hadoop-2.7.0 | icon:times-circle[role="red"] | icon:times-circle[role="red"] | icon:times-circle[role="red"] | icon:times-circle[role="red"] -|Hadoop-2.7.1+ | icon:check-circle[role="green"] | icon:times-circle[role="red"] | icon:times-circle[role="red"] | icon:times-circle[role="red"] -|Hadoop-2.8.[0-2] | icon:times-circle[role="red"] | icon:times-circle[role="red"] | icon:times-circle[role="red"] | icon:times-circle[role="red"] -|Hadoop-2.8.[3-4] | icon:exclamation-circle[role="yellow"] | icon:times-circle[role="red"] | icon:times-circle[role="red"] | icon:times-circle[role="red"] -|Hadoop-2.8.5+ | icon:exclamation-circle[role="yellow"] | icon:check-circle[role="green"] | icon:times-circle[role="red"] | icon:times-circle[role="red"] -|Hadoop-2.9.[0-1] | icon:times-circle[role="red"] | icon:times-circle[role="red"] | icon:times-circle[role="red"] | icon:times-circle[role="red"] -|Hadoop-2.9.2+ | icon:exclamation-circle[role="yellow"] | icon:check-circle[role="green"] | icon:times-circle[role="red"] | icon:times-circle[role="red"] -|Hadoop-2.10.x | icon:exclamation-circle[role="yellow"] | icon:check-circle[role="green"] | icon:check-circle[role="green"] | icon:check-circle[role="green"] -|Hadoop-3.1.0 | icon:times-circle[role="red"] | icon:times-circle[role="red"] | icon:times-circle[role="red"] | icon:times-circle[role="red"] -|Hadoop-3.1.1+ | icon:times-circle[role="red"] | icon:times-circle[role="red"] | icon:times-circle[role="red"] | icon:check-circle[role="green"] -|Hadoop-3.2.x | icon:times-circle[role="red"] | icon:times-circle[role="red"] | icon:times-circle[role="red"] | icon:check-circle[role="green"] +| | HBase-1.7.x | HBase-2.3.x +|Hadoop-2.10.x | icon:check-circle[role="green"] | icon:check-circle[role="green"] +|Hadoop-3.1.0 | icon:times-circle[role="red"] | icon:times-circle[role="red"] +|Hadoop-3.1.1+ | icon:times-circle[role="red"] | icon:check-circle[role="green"] +|Hadoop-3.2.x | icon:times-circle[role="red"] | icon:check-circle[role="green"] +|Hadoop-3.3.x | icon:times-circle[role="red"] | icon:check-circle[role="green"] |=== .Hadoop 2.y.0 Releases diff --git a/src/main/asciidoc/_chapters/upgrading.adoc b/src/main/asciidoc/_chapters/upgrading.adoc index 1064130fc86a..f20b88ec9bb4 100644 --- a/src/main/asciidoc/_chapters/upgrading.adoc +++ b/src/main/asciidoc/_chapters/upgrading.adoc @@ -147,7 +147,7 @@ HBase has a lot of API points, but for the compatibility matrix above, we differ Classes which are defined as `IA.Private` may be used as parameters or return values for interfaces which are declared `IA.LimitedPrivate`. Treat the `IA.Private` object as opaque; do not try to access its methods or fields directly. * InterfaceStability (link:https://yetus.apache.org/documentation/in-progress/javadocs/org/apache/yetus/audience/InterfaceStability.html[javadocs]): describes what types of interface changes are permitted. Possible values include: - Stable: the interface is fixed and is not expected to change - - Evolving: the interface may change in future minor verisons + - Evolving: the interface may change in future minor versions - Unstable: the interface may change at any time Please keep in mind the following interactions between the `InterfaceAudience` and `InterfaceStability` annotations within the HBase project: @@ -360,7 +360,7 @@ tooling for triage and analysis as long as they are pointed to the appropriate l [[upgrade2.2]] === Upgrade from 2.0 or 2.1 to 2.2+ -HBase 2.2+ uses a new Procedure form assiging/unassigning/moving Regions. It does not process HBase 2.1 and 2.0's Unassign/Assign Procedure types. Upgrade requires that we first drain the Master Procedure Store of old style Procedures before starting the new 2.2 Master. So you need to make sure that before you kill the old version (2.0 or 2.1) Master, there is no region in transition. And once the new version (2.2+) Master is up, you can rolling upgrade RegionServers one by one. +HBase 2.2+ uses a new Procedure form assigning/unassigning/moving Regions. It does not process HBase 2.1 and 2.0's Unassign/Assign Procedure types. Upgrade requires that we first drain the Master Procedure Store of old style Procedures before starting the new 2.2 Master. So you need to make sure that before you kill the old version (2.0 or 2.1) Master, there is no region in transition. And once the new version (2.2+) Master is up, you can rolling upgrade RegionServers one by one. And there is a more safer way if you are running 2.1.1+ or 2.0.3+ cluster. It need four steps to upgrade Master. @@ -374,7 +374,7 @@ Then you can rolling upgrade RegionServers one by one. See link:https://issues.a [[upgrade2.0]] === Upgrading from 1.x to 2.x -In this section we will first call out significant changes compared to the prior stable HBase release and then go over the upgrade process. Be sure to read the former with care so you avoid suprises. +In this section we will first call out significant changes compared to the prior stable HBase release and then go over the upgrade process. Be sure to read the former with care so you avoid surprises. ==== Changes of Note! @@ -451,7 +451,7 @@ The following configuration settings changed their default value. Where applicab * hbase.regionserver.hlog.blocksize defaults to 2x the HDFS default block size for the WAL dir. Previously it was equal to the HDFS default block size for the WAL dir. * hbase.client.start.log.errors.counter changed to 5. Previously it was 9. * hbase.ipc.server.callqueue.type changed to 'fifo'. In HBase versions 1.0 - 1.2 it was 'deadline'. In prior and later 1.x versions it already defaults to 'fifo'. -* hbase.hregion.memstore.chunkpool.maxsize is 1.0 by default. Previously it was 0.0. Effectively, this means previously we would not use a chunk pool when our memstore is onheap and now we will. See the section <> for more infromation about the MSLAB chunk pool. +* hbase.hregion.memstore.chunkpool.maxsize is 1.0 by default. Previously it was 0.0. Effectively, this means previously we would not use a chunk pool when our memstore is onheap and now we will. See the section <> for more information about the MSLAB chunk pool. * hbase.master.cleaner.interval is now set to 10 minutes. Previously it was 1 minute. * hbase.master.procedure.threads will now default to 1/4 of the number of available CPUs, but not less than 16 threads. Previously it would be number of threads equal to number of CPUs. * hbase.hstore.blockingStoreFiles is now 16. Previously it was 10. @@ -508,7 +508,7 @@ The following metrics have changed their meaning: The following metrics have been removed: -* Metrics related to the Distributed Log Replay feature are no longer present. They were previsouly found in the region server context under the name 'replay'. See the section <> for details. +* Metrics related to the Distributed Log Replay feature are no longer present. They were previously found in the region server context under the name 'replay'. See the section <> for details. The following metrics have been added: @@ -517,7 +517,7 @@ The following metrics have been added: [[upgrade2.0.logging]] .Changed logging HBase-2.0.0 now uses link:https://www.slf4j.org/[slf4j] as its logging frontend. -Prevously, we used link:http://logging.apache.org/log4j/1.2/[log4j (1.2)]. +Previously, we used link:http://logging.apache.org/log4j/1.2/[log4j (1.2)]. For most the transition should be seamless; slf4j does a good job interpreting _log4j.properties_ logging configuration files such that you should not notice any difference in your log system emissions. @@ -564,7 +564,7 @@ The following commands that were deprecated in 1.0 have been removed. Where appl Users upgrading from versions prior to HBase 1.4 should read the instructions in section <>. -Additionally, HBase 2.0 has changed how memstore memory is tracked for flushing decisions. Previously, both the data size and overhead for storage were used to calculate utilization against the flush threashold. Now, only data size is used to make these per-region decisions. Globally the addition of the storage overhead is used to make decisions about forced flushes. +Additionally, HBase 2.0 has changed how memstore memory is tracked for flushing decisions. Previously, both the data size and overhead for storage were used to calculate utilization against the flush threshold. Now, only data size is used to make these per-region decisions. Globally the addition of the storage overhead is used to make decisions about forced flushes. [[upgrade2.0.ui.splitmerge.by.row]] .Web UI for splitting and merging operate on row prefixes @@ -651,11 +651,11 @@ If you previously relied on client side tracing integrated with HBase operations After the Apache HTrace project moved to the Attic/retired, the traces in HBase are left broken and unmaintained since HBase 2.0. A new project link:https://issues.apache.org/jira/browse/HBASE-22120[HBASE-22120] will replace HTrace with OpenTelemetry. It will be shipped in 3.0.0 release. Please see the reference guide section <> for more details. -[[upgrade2.0.hfile.compatability]] -.HFile lose forward compatability +[[upgrade2.0.hfile.compatibility]] +.HFile lose forward compatibility HFiles generated by 2.0.0, 2.0.1, 2.1.0 are not forward compatible to 1.4.6-, 1.3.2.1-, 1.2.6.1-, -and other inactive releases. Why HFile lose compatability is hbase in new versions +and other inactive releases. Why HFile lose compatibility is hbase in new versions (2.0.0, 2.0.1, 2.1.0) use protobuf to serialize/deserialize TimeRangeTracker (TRT) while old versions use DataInput/DataOutput. To solve this, We have to put link:https://jira.apache.org/jira/browse/HBASE-21012[HBASE-21012] diff --git a/src/site/site.xml b/src/site/site.xml index 22490bd14ed7..dcdd8d503c21 100644 --- a/src/site/site.xml +++ b/src/site/site.xml @@ -86,7 +86,6 @@ - diff --git a/src/site/xdoc/downloads.xml b/src/site/xdoc/downloads.xml index b70cda04a79f..f0e49a654f4f 100644 --- a/src/site/xdoc/downloads.xml +++ b/src/site/xdoc/downloads.xml @@ -70,49 +70,49 @@ under the License. - 2.4.6 + 2.4.8 - 2021/09/13 + 2021/11/03 - 2.4.6 vs 2.4.5 + 2.4.8 vs 2.4.7 - Changes + Changes - Release Notes + Release Notes - src (sha512 asc)
- bin (sha512 asc)
- client-bin (sha512 asc) + src (sha512 asc)
+ bin (sha512 asc)
+ client-bin (sha512 asc) - + stable release - 2.3.6 + 2.3.7 - 2021/03/31 + 2021/10/19 - 2.3.5 vs 2.3.6 + 2.3.6 vs 2.3.7 - Changes + Changes - Release Notes + Release Notes - src (sha512 asc)
- bin (sha512 asc)
- client-bin (sha512 asc) + src (sha512 asc)
+ bin (sha512 asc)
+ client-bin (sha512 asc) - stable release + @@ -138,23 +138,23 @@ under the License. - 1.4.13 + 1.4.14 - 2020/02/29 + 2021/10/25 - 1.4.12 vs 1.4.13 + 1.4.13 vs 1.4.14 - Changes + Changes - Release Notes + Release Notes - src (sha asc)
- bin (sha asc) + src (sha512 asc)
+ bin (sha512 asc)